From d6286caa2c71d4c4af34ef289d4dd733547e21ee Mon Sep 17 00:00:00 2001 From: DefectDojo release bot Date: Mon, 16 Sep 2024 19:14:35 +0000 Subject: [PATCH 01/22] Update versions in application files --- components/package.json | 2 +- helm/defectdojo/Chart.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/components/package.json b/components/package.json index c750020d3a..49f5862eec 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.38.2", + "version": "2.39.0-dev", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index 8fcc60818c..9bd09f45fa 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: "2.38.2" +appVersion: "2.39.0-dev" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.6.150 +version: 1.6.151-dev icon: https://www.defectdojo.org/img/favicon.ico maintainers: - name: madchap From a64354476b21ee4265985bee4e75c076d7ce1ea9 Mon Sep 17 00:00:00 2001 From: kiblik <5609770+kiblik@users.noreply.github.com> Date: Tue, 17 Sep 2024 00:07:08 +0200 Subject: [PATCH 02/22] feat(members): List global role members in Prod and ProdType (#10850) --- dojo/product/queries.py | 23 ++++++++++++++---- dojo/product/views.py | 6 +++++ dojo/product_type/queries.py | 24 +++++++++++++++---- dojo/product_type/views.py | 9 ++++++- dojo/templates/dojo/view_product_details.html | 21 ++++++++++++++-- dojo/templates/dojo/view_product_type.html | 20 ++++++++++++++-- 6 files changed, 89 insertions(+), 14 deletions(-) diff --git a/dojo/product/queries.py b/dojo/product/queries.py index 8d562c0f9a..bf856a8988 100644 --- a/dojo/product/queries.py +++ b/dojo/product/queries.py @@ -13,6 +13,7 @@ App_Analysis, DojoMeta, Engagement_Presets, + Global_Role, Languages, Product, Product_API_Scan_Configuration, @@ -71,8 +72,15 @@ def get_authorized_members_for_product(product, permission): if user.is_superuser or user_has_permission(user, product, permission): return Product_Member.objects.filter(product=product).order_by("user__first_name", "user__last_name").select_related("role", "user") - else: - return None + return Product_Member.objects.none() + + +def get_authorized_global_members_for_product(product, permission): + user = get_current_user() + + if user.is_superuser or user_has_permission(user, product, permission): + return Global_Role.objects.filter(group=None, role__isnull=False).order_by("user__first_name", "user__last_name").select_related("role", "user") + return Global_Role.objects.none() def get_authorized_groups_for_product(product, permission): @@ -81,8 +89,15 @@ def get_authorized_groups_for_product(product, permission): if user.is_superuser or user_has_permission(user, product, permission): authorized_groups = get_authorized_groups(Permissions.Group_View) return Product_Group.objects.filter(product=product, group__in=authorized_groups).order_by("group__name").select_related("role") - else: - return None + return Product_Group.objects.none() + + +def get_authorized_global_groups_for_product(product, permission): + user = get_current_user() + + if user.is_superuser or user_has_permission(user, product, permission): + return Global_Role.objects.filter(user=None, role__isnull=False).order_by("group__name").select_related("role") + return Global_Role.objects.none() def get_authorized_product_members(permission): diff --git a/dojo/product/views.py b/dojo/product/views.py index e887938d45..13bcd476e5 100644 --- a/dojo/product/views.py +++ b/dojo/product/views.py @@ -92,6 +92,8 @@ Test_Type, ) from dojo.product.queries import ( + get_authorized_global_groups_for_product, + get_authorized_global_members_for_product, get_authorized_groups_for_product, get_authorized_members_for_product, get_authorized_products, @@ -213,8 +215,10 @@ def view_product(request, pid): .prefetch_related("prod_type__members") prod = get_object_or_404(prod_query, id=pid) product_members = get_authorized_members_for_product(prod, Permissions.Product_View) + global_product_members = get_authorized_global_members_for_product(prod, Permissions.Product_View) product_type_members = get_authorized_members_for_product_type(prod.prod_type, Permissions.Product_Type_View) product_groups = get_authorized_groups_for_product(prod, Permissions.Product_View) + global_product_groups = get_authorized_global_groups_for_product(prod, Permissions.Product_View) product_type_groups = get_authorized_groups_for_product_type(prod.prod_type, Permissions.Product_Type_View) personal_notifications_form = ProductNotificationsForm( instance=Notifications.objects.filter(user=request.user).filter(product=prod).first()) @@ -291,8 +295,10 @@ def view_product(request, pid): "benchmarks_percents": benchAndPercent, "benchmarks": benchmarks, "product_members": product_members, + "global_product_members": global_product_members, "product_type_members": product_type_members, "product_groups": product_groups, + "global_product_groups": global_product_groups, "product_type_groups": product_type_groups, "personal_notifications_form": personal_notifications_form, "enabled_notifications": get_enabled_notifications_list(), diff --git a/dojo/product_type/queries.py b/dojo/product_type/queries.py index 737584a5b0..dacee58914 100644 --- a/dojo/product_type/queries.py +++ b/dojo/product_type/queries.py @@ -9,7 +9,7 @@ ) from dojo.authorization.roles_permissions import Permissions from dojo.group.queries import get_authorized_groups -from dojo.models import Product_Type, Product_Type_Group, Product_Type_Member +from dojo.models import Global_Role, Product_Type, Product_Type_Group, Product_Type_Member def get_authorized_product_types(permission): @@ -45,8 +45,15 @@ def get_authorized_members_for_product_type(product_type, permission): if user.is_superuser or user_has_permission(user, product_type, permission): return Product_Type_Member.objects.filter(product_type=product_type).order_by("user__first_name", "user__last_name").select_related("role", "product_type", "user") - else: - return None + return Product_Type_Member.objects.none() + + +def get_authorized_global_members_for_product_type(product_type, permission): + user = get_current_user() + + if user.is_superuser or user_has_permission(user, product_type, permission): + return Global_Role.objects.filter(group=None, role__isnull=False).order_by("user__first_name", "user__last_name").select_related("role", "user") + return Global_Role.objects.none() def get_authorized_groups_for_product_type(product_type, permission): @@ -55,8 +62,15 @@ def get_authorized_groups_for_product_type(product_type, permission): if user.is_superuser or user_has_permission(user, product_type, permission): authorized_groups = get_authorized_groups(Permissions.Group_View) return Product_Type_Group.objects.filter(product_type=product_type, group__in=authorized_groups).order_by("group__name").select_related("role", "group") - else: - return None + return Product_Type_Group.objects.none() + + +def get_authorized_global_groups_for_product_type(product_type, permission): + user = get_current_user() + + if user.is_superuser or user_has_permission(user, product_type, permission): + return Global_Role.objects.filter(user=None, role__isnull=False).order_by("group__name").select_related("role", "group") + return Global_Role.objects.none() def get_authorized_product_type_members(permission): diff --git a/dojo/product_type/views.py b/dojo/product_type/views.py index 302aa6dbbf..2f21f81362 100644 --- a/dojo/product_type/views.py +++ b/dojo/product_type/views.py @@ -27,6 +27,8 @@ from dojo.models import Product_Type, Product_Type_Group, Product_Type_Member, Role from dojo.product.queries import get_authorized_products from dojo.product_type.queries import ( + get_authorized_global_groups_for_product_type, + get_authorized_global_members_for_product_type, get_authorized_groups_for_product_type, get_authorized_members_for_product_type, get_authorized_product_types, @@ -117,7 +119,9 @@ def view_product_type(request, ptid): page_name = _("View Product Type") pt = get_object_or_404(Product_Type, pk=ptid) members = get_authorized_members_for_product_type(pt, Permissions.Product_Type_View) + global_members = get_authorized_global_members_for_product_type(pt, Permissions.Product_Type_View) groups = get_authorized_groups_for_product_type(pt, Permissions.Product_Type_View) + global_groups = get_authorized_global_groups_for_product_type(pt, Permissions.Product_Type_View) products = get_authorized_products(Permissions.Product_View).filter(prod_type=pt) products = get_page_items(request, products, 25) add_breadcrumb(title=page_name, top_level=False, request=request) @@ -126,7 +130,10 @@ def view_product_type(request, ptid): "pt": pt, "products": products, "groups": groups, - "members": members}) + "members": members, + "global_groups": global_groups, + "global_members": global_members, + }) @user_is_authorized(Product_Type, Permissions.Product_Type_Delete, "ptid") diff --git a/dojo/templates/dojo/view_product_details.html b/dojo/templates/dojo/view_product_details.html index 0005dc3fbb..ea4514d735 100644 --- a/dojo/templates/dojo/view_product_details.html +++ b/dojo/templates/dojo/view_product_details.html @@ -297,7 +297,7 @@

{% trans "Members" %}

{% endif %} - {% if product_members or product_type_members %} + {% if product_members or product_type_members or global_product_members %}
@@ -350,6 +350,15 @@

{% trans "Members" %}

{% endfor %} + {% for member in global_product_members %} + + + + + + + {% endfor %}
{{ member.role }}
+ {{ member.user.get_full_name }}Global role{{ member.role }}
@@ -385,7 +394,7 @@

{% trans "Groups" %}

{% endif %} - {% if product_groups or product_type_groups %} + {% if product_groups or product_type_groups or global_product_groups %}
@@ -437,6 +446,14 @@

{% trans "Groups" %}

{% endfor %} + {% for type_group in global_product_groups %} + + + + + + + {% endfor %}
{{ type_group.role }}
{{ type_group.group.name }}Global role{{ type_group.role }}
diff --git a/dojo/templates/dojo/view_product_type.html b/dojo/templates/dojo/view_product_type.html index ccb90b127b..70e5058350 100644 --- a/dojo/templates/dojo/view_product_type.html +++ b/dojo/templates/dojo/view_product_type.html @@ -151,7 +151,7 @@

{% trans "Members" %}

{% endif %} - {% if members %} + {% if members or global_members %}
@@ -189,6 +189,14 @@

{% trans "Members" %}

{% endfor %} + {% for member in global_members %} + + + + + + {% endfor %}
{{ member.role }}
+ {{ member.user.get_full_name }}{{ member.role }} (Global role)
@@ -224,7 +232,7 @@

{% trans "Groups" %}

{% endif %} - {% if groups %} + {% if groups or global_groups %}
@@ -262,6 +270,14 @@

{% trans "Groups" %}

{% endfor %} + {% for group in global_groups %} + + + + + + {% endfor %}
{{ group.role }}
+ {{ group.group.name }}{{ group.role }} (Global role)
From 8dfe3730f6d13850a12a3e0b95cbaeb1d6a82cfd Mon Sep 17 00:00:00 2001 From: kiblik <5609770+kiblik@users.noreply.github.com> Date: Fri, 20 Sep 2024 23:04:42 +0200 Subject: [PATCH 03/22] fix(docker compose): Use 'docker compose' everywhere (#10916) * fix(docker compose): Use 'docker compose' everywhere * Apply suggestions from code review Co-authored-by: Charles Neill <1749665+cneill@users.noreply.github.com> --------- Co-authored-by: Charles Neill <1749665+cneill@users.noreply.github.com> --- .github/ISSUE_TEMPLATE/bug_report.md | 2 +- .github/ISSUE_TEMPLATE/support_request.md | 2 +- docker/docker-compose-check.sh | 4 +- docker/extra_settings/README.md | 2 +- .../en/contributing/how-to-write-a-parser.md | 14 ++--- .../getting_started/running-in-production.md | 4 +- .../en/getting_started/upgrading/2.23.md | 2 +- .../en/getting_started/upgrading/2.30.md | 2 +- .../en/getting_started/upgrading/_index.md | 12 ++-- docs/content/en/integrations/jira.md | 6 +- .../en/integrations/ldap-authentication.md | 2 +- docs/content/en/usage/features.md | 10 +-- readme-docs/DOCKER.md | 62 +++---------------- tests/local-integration-tests.sh | 26 ++++---- 14 files changed, 53 insertions(+), 97 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 713480dd33..ba1ba50d65 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -36,7 +36,7 @@ A clear and concise description of what you expected to happen. - DefectDojo version (see footer) or commit message: [use `git show -s --format="[%ci] %h: %s [%d]"`] **Logs** -Use `docker-compose logs` (or similar, depending on your deployment method) to get the logs and add the relevant sections here showing the error occurring (if applicable). +Use `docker compose logs` (or similar, depending on your deployment method) to get the logs and add the relevant sections here showing the error occurring (if applicable). **Sample scan files** If applicable, add sample scan files to help reproduce your problem. diff --git a/.github/ISSUE_TEMPLATE/support_request.md b/.github/ISSUE_TEMPLATE/support_request.md index 7eda2a58de..4dc3873471 100644 --- a/.github/ISSUE_TEMPLATE/support_request.md +++ b/.github/ISSUE_TEMPLATE/support_request.md @@ -36,7 +36,7 @@ A clear and concise description of what you expected to happen. - DefectDojo version (see footer) or commit message: [use `git show -s --format="[%ci] %h: %s [%d]"`] **Logs** -Use `docker-compose logs` (or similar, depending on your deployment method) to get the logs and add the relevant sections here showing the error occurring (if applicable). +Use `docker compose logs` (or similar, depending on your deployment method) to get the logs and add the relevant sections here showing the error occurring (if applicable). **Sample scan files** If applicable, add sample scan files to help reproduce your problem. diff --git a/docker/docker-compose-check.sh b/docker/docker-compose-check.sh index b51cf45674..d24419de2e 100755 --- a/docker/docker-compose-check.sh +++ b/docker/docker-compose-check.sh @@ -6,11 +6,11 @@ current=$(docker compose version --short) echo 'Checking docker compose version' if [[ $main -lt 2 ]]; then - echo "$current is not a supported docker-compose version, please upgrade to the minimum supported version: 2.0" + echo "$current is not a supported 'docker compose' version, please upgrade to the minimum supported version: 2.0" exit 1 elif [[ $main -eq 1 ]]; then if [[ $minor -lt 28 ]]; then - echo "$current is not supported docker-compose version, please upgrade to minimal supported version:1.28" + echo "$current is not supported 'docker compose' version, please upgrade to minimal supported version:1.28" exit 1 fi fi diff --git a/docker/extra_settings/README.md b/docker/extra_settings/README.md index e919e1917b..b3a8fc0edd 100644 --- a/docker/extra_settings/README.md +++ b/docker/extra_settings/README.md @@ -6,7 +6,7 @@ If a file if placed here, it will be copied on startup to `dojo/settings/local_s For an example, see [template-local_settings](../../dojo/settings/template-local_settings) Please note this copy action could fail if you have mounted the full `dojo/` folder, but that is owned by a different user/group. -That's why this copy action only happens in docker-compose release mode, and not in dev/debug/unit_tests/integration_tests modes. +That's why this copy action only happens in docker compose release mode, and not in dev/debug/unit_tests/integration_tests modes. For advanced usage you can also place a `settings.dist.py` or `settings.py` file. These will also be copied on startup to dojo/settings. diff --git a/docs/content/en/contributing/how-to-write-a-parser.md b/docs/content/en/contributing/how-to-write-a-parser.md index 7495f7ba88..c87846cb62 100644 --- a/docs/content/en/contributing/how-to-write-a-parser.md +++ b/docs/content/en/contributing/how-to-write-a-parser.md @@ -15,7 +15,7 @@ All commands assume that you're located at the root of the django-DefectDojo clo - Checkout `dev` and make sure you're up to date with the latest changes. - It's advised that you create a dedicated branch for your development, such as `git checkout -b parser-name`. -It is easiest to use the docker-compose deployment as it has hot-reload capbility for uWSGI. +It is easiest to use the docker compose deployment as it has hot-reload capbility for uWSGI. Set up your environment to use the debug environment: `$ docker/setEnv.sh debug` @@ -27,7 +27,7 @@ Please have a look at [DOCKER.md](https://github.com/DefectDojo/django-DefectDoj You will want to build your docker images locally, and eventually pass in your local user's `uid` to be able to write to the image (handy for database migration files). Assuming your user's `uid` is `1000`, then: {{< highlight bash >}} -$ docker-compose build --build-arg uid=1000 +$ docker compose build --build-arg uid=1000 {{< /highlight >}} ## Which files do you need to modify? @@ -279,7 +279,7 @@ This ensures the file is closed at the end of the with statement, even if an exc ### Test database -To test your unit tests locally, you first need to grant some rights. Get your MySQL root password from the docker-compose logs, login as root and issue the following commands: +To test your unit tests locally, you first need to grant some rights. Get your MySQL root password from the docker compose logs, login as root and issue the following commands: {{< highlight mysql >}} MYSQL> grant all privileges on test_defectdojo.* to defectdojo@'%'; @@ -291,17 +291,17 @@ MYSQL> flush privileges; This local command will launch the unit test for your new parser {{< highlight bash >}} -$ docker-compose exec uwsgi bash -c 'python manage.py test unittests.tools.. -v2' +$ docker compose exec uwsgi bash -c 'python manage.py test unittests.tools.. -v2' {{< /highlight >}} Example for the blackduck hub parser: {{< highlight bash >}} -$ docker-compose exec uwsgi bash -c 'python manage.py test unittests.tools.test_blackduck_csv_parser.TestBlackduckHubParser -v2' +$ docker compose exec uwsgi bash -c 'python manage.py test unittests.tools.test_blackduck_csv_parser.TestBlackduckHubParser -v2' {{< /highlight >}} {{% alert title="Information" color="info" %}} -If you want to run all unit tests, simply run `$ docker-compose exec uwsgi bash -c 'python manage.py test unittests -v2'` +If you want to run all unit tests, simply run `$ docker compose exec uwsgi bash -c 'python manage.py test unittests -v2'` {{% /alert %}} ### Endpoint validation @@ -330,7 +330,7 @@ In the event where you'd have to change the model, e.g. to increase a database c * Create a new migration file in dojo/db_migrations by running and including as part of your PR {{< highlight bash >}} - $ docker-compose exec uwsgi bash -c 'python manage.py makemigrations -v2' + $ docker compose exec uwsgi bash -c 'python manage.py makemigrations -v2' {{< /highlight >}} ### Accept a different type of file to upload diff --git a/docs/content/en/getting_started/running-in-production.md b/docs/content/en/getting_started/running-in-production.md index 6da16d253b..4074acb8df 100644 --- a/docs/content/en/getting_started/running-in-production.md +++ b/docs/content/en/getting_started/running-in-production.md @@ -5,7 +5,7 @@ draft: false weight: 4 --- -## Production use with docker-compose +## Production use with docker compose The docker-compose.yml file in this repository is fully functional to evaluate DefectDojo in your local environment. @@ -76,7 +76,7 @@ Dockerfile.django-* for in-file references. You can execute the following command to see the configuration: -`docker-compose exec celerybeat bash -c "celery -A dojo inspect stats"` +`docker compose exec celerybeat bash -c "celery -A dojo inspect stats"` and see what is in effect. #### Asynchronous Import diff --git a/docs/content/en/getting_started/upgrading/2.23.md b/docs/content/en/getting_started/upgrading/2.23.md index 5ebcc4edc6..5525d10ce0 100644 --- a/docs/content/en/getting_started/upgrading/2.23.md +++ b/docs/content/en/getting_started/upgrading/2.23.md @@ -16,6 +16,6 @@ There is a migration process built into the upgrade that will automatically conv - If your deployment uses the MySQL containerized database, please see the following updates to run DefectDojo: - Use of the helper script "dc-up": `./dc-up.sh mysql-rabbitmq` or `./dc-up.sh mysql-redis` - Use of the helper script "dc-up-d": `./dc-up-d.sh mysql-rabbitmq` or `./dc-up-d.sh mysql-redis` - - Use of Docker Compose directly: `docker-compose --profile mysql-rabbitmq --env-file ./docker/environments/mysql-rabbitmq.env up` or `docker-compose --profile mysql-redis --env-file ./docker/environments/mysql-redis.env up` + - Use of Docker Compose directly: `docker compose --profile mysql-rabbitmq --env-file ./docker/environments/mysql-rabbitmq.env up` or `docker compose --profile mysql-redis --env-file ./docker/environments/mysql-redis.env up` For all other changes, check the [Release Notes](https://github.com/DefectDojo/django-DefectDojo/releases/tag/2.23.0) for the contents of the release. diff --git a/docs/content/en/getting_started/upgrading/2.30.md b/docs/content/en/getting_started/upgrading/2.30.md index 6029febd30..b2a0bc6608 100644 --- a/docs/content/en/getting_started/upgrading/2.30.md +++ b/docs/content/en/getting_started/upgrading/2.30.md @@ -10,7 +10,7 @@ There are instructions for upgrading to 2.30.0 if you disabled `enable_auditlog` Parameter `enable_auditlog` is not possible to set through System settings anymore. If you set this parameter or you need to change it to `False` (to disable audit logging), set environmental variable `DD_ENABLE_AUDITLOG` to `False`. -If you are using docker-compose, another EnvVar should be added to the `docker-compose.yml` file in all the containers ran by the django image. This should do the trick +If you are using docker compose, another EnvVar should be added to the `docker-compose.yml` file in all the containers ran by the django image. This should do the trick ```yaml DD_ENABLE_AUDITLOG: ${DD_ENABLE_AUDITLOG:-False} ``` diff --git a/docs/content/en/getting_started/upgrading/_index.md b/docs/content/en/getting_started/upgrading/_index.md index 9a57986dee..a7f5aa3090 100644 --- a/docs/content/en/getting_started/upgrading/_index.md +++ b/docs/content/en/getting_started/upgrading/_index.md @@ -5,9 +5,9 @@ draft: false weight: 5 --- -## Docker-compose +## Docker compose -When you deploy a vanilla docker-compose, it will create a persistent +When you deploy a vanilla docker compose, it will create a persistent volume for your Postgres database. As long as your volume is there, you should not lose any data. @@ -19,7 +19,7 @@ DockerHub to update. {{% /alert %}} -The generic upgrade method for docker-compose are as follows: +The generic upgrade method for docker compose are as follows: - Pull the latest version ``` {.sourceCode .bash} @@ -46,10 +46,10 @@ The generic upgrade method for docker-compose are as follows: - Re-start DefectDojo, allowing for container recreation: `./dc-up-d.sh` - Database migrations will be run automatically by the initializer. - Check the output via `docker-compose logs initializer` or relevant k8s command + Check the output via `docker compose logs initializer` or relevant k8s command - If you have the initializer disabled (or if you want to be on the safe side), run the migration command: - `docker-compose exec uwsgi /bin/bash -c "python manage.py migrate"` + `docker compose exec uwsgi /bin/bash -c "python manage.py migrate"` ### Building your local images @@ -64,7 +64,7 @@ first. git merge origin/master ``` -Then replace the first step of the above generic upgrade method for docker-compose with: `docker-compose build` +Then replace the first step of the above generic upgrade method for docker compose with: `docker compose build` ## godojo installations diff --git a/docs/content/en/integrations/jira.md b/docs/content/en/integrations/jira.md index e7a19329bd..b6bc83fe20 100644 --- a/docs/content/en/integrations/jira.md +++ b/docs/content/en/integrations/jira.md @@ -167,19 +167,19 @@ optional arguments: This can be executed from the uwsgi docker container using: {{< highlight bash >}} -$ docker-compose exec uwsgi /bin/bash -c 'python manage.py jira_status_reconciliation' +$ docker compose exec uwsgi /bin/bash -c 'python manage.py jira_status_reconciliation' {{< /highlight >}} DEBUG output can be obtains via `-v 3`, but only after increasing the logging to DEBUG level in your settings.dist.py or local_settings.py file {{< highlight bash >}} -$ docker-compose exec uwsgi /bin/bash -c 'python manage.py jira_status_reconciliation -v 3' +$ docker compose exec uwsgi /bin/bash -c 'python manage.py jira_status_reconciliation -v 3' {{< /highlight >}} At the end of the command a semicolon seperated CSV summary will be printed. This can be captured by redirecting stdout to a file: {{< highlight bash >}} -$ docker-compose exec uwsgi /bin/bash -c 'python manage.py jira_status_reconciliation > jira_reconciliation.csv' +$ docker compose exec uwsgi /bin/bash -c 'python manage.py jira_status_reconciliation > jira_reconciliation.csv' {{< /highlight >}} diff --git a/docs/content/en/integrations/ldap-authentication.md b/docs/content/en/integrations/ldap-authentication.md index 1769704373..307f1029a0 100644 --- a/docs/content/en/integrations/ldap-authentication.md +++ b/docs/content/en/integrations/ldap-authentication.md @@ -116,7 +116,7 @@ Read the docs for Django Authentication with LDAP here: https://django-auth-ldap #### docker-compose.yml -In order to pass the variables to the settings.dist.py file via docker, it's a good idea to add these to the docker-compose file. +In order to pass the variables to the settings.dist.py file via docker, it's a good idea to add these to the docker compose file. You can do this by adding the following variables to the environment section for the uwsgi image: ```yaml diff --git a/docs/content/en/usage/features.md b/docs/content/en/usage/features.md index f1020ffd4c..5f99f34023 100644 --- a/docs/content/en/usage/features.md +++ b/docs/content/en/usage/features.md @@ -357,7 +357,7 @@ to the hashcode configuration or calculation logic. We will mention this in the To regenerate the hashcodes, use the `dedupe` management command: {{< highlight bash >}} -docker-compose exec uwsgi ./manage.py dedupe --hash_code_only +docker compose exec uwsgi ./manage.py dedupe --hash_code_only {{< / highlight >}} This will only regenerated the hashcodes, but will not run any deduplication logic on existing findings. @@ -365,14 +365,14 @@ If you want to run deduplication again on existing findings to make sure any dup hashcode config are marked as such, run: {{< highlight bash >}} -docker-compose exec uwsgi ./manage.py dedupe +docker compose exec uwsgi ./manage.py dedupe {{< / highlight >}} The deduplication part of this command will run the deduplication for each finding in a celery task. If you want to run the deduplication in the foreground process, use: {{< highlight bash >}} -docker-compose exec uwsgi ./manage.py dedupe --dedupe_sync +docker compose exec uwsgi ./manage.py dedupe --dedupe_sync {{< / highlight >}} Please note the deduplication process is resource intensive and can take a long time to complete @@ -502,10 +502,10 @@ You can of course change this default by modifying that stanza. ### Launching from the CLI You can also invoke the SLA notification function from the CLI. For -example, if run from docker-compose: +example, if run from docker compose: {{< highlight bash >}} -$ docker-compose exec uwsgi /bin/bash -c 'python manage.py sla_notifications' +$ docker compose exec uwsgi /bin/bash -c 'python manage.py sla_notifications' {{< / highlight >}} ## Reports diff --git a/readme-docs/DOCKER.md b/readme-docs/DOCKER.md index d757f8eb81..a85d9f55f2 100644 --- a/readme-docs/DOCKER.md +++ b/readme-docs/DOCKER.md @@ -8,7 +8,7 @@ Although Docker Compose is one of the supported installation methods to deploy a # Prerequisites * Docker version - * Installing with docker-compose requires at least Docker 19.03.0 and Docker Compose 1.28.0. See "Checking Docker versions" below for version errors during running docker-compose. + * Installing with docker compose requires at least Docker 19.03.0 and Docker Compose 1.28.0. See "Checking Docker versions" below for version errors during running docker compose. * Proxies * If you're behind a corporate proxy check https://docs.docker.com/network/proxy/ . @@ -100,7 +100,7 @@ This will run the application based on merged configurations from docker-compose * Hot-reloading for the **celeryworker** container is not yet implemented. When working on deduplication for example, restart the celeryworker container with: ``` -docker-compose restart celeryworker +docker compose restart celeryworker ``` * The postgres port is forwarded to the host so that you can access your database from outside the container. @@ -126,7 +126,7 @@ To find out the admin password, check the very beginning of the console output of the initializer container by running: ```zsh -docker-compose logs initializer | grep "Admin password:" +docker compose logs initializer | grep "Admin password:" ``` Make sure you write down the first password generated as you'll need it when re-starting the application. @@ -141,7 +141,7 @@ docker exec -it django-defectdojo-uwsgi-1 ./manage.py changepassword admin ``` # Logging -For docker-compose release mode the log level is INFO. In the other modes the log level is DEBUG. Logging is configured in `settings.dist.py` and can be tuned using a `local_settings.py`, see [template for local_settings.py](dojo/settings/template-local_settings). For example the deduplication logger can be set to DEBUG in a local_settings.py file: +For docker compose release mode the log level is INFO. In the other modes the log level is DEBUG. Logging is configured in `settings.dist.py` and can be tuned using a `local_settings.py`, see [template for local_settings.py](dojo/settings/template-local_settings). For example the deduplication logger can be set to DEBUG in a local_settings.py file: ``` @@ -251,7 +251,7 @@ To change the port: - update `docker-compose.override.https.yml` or set DD_TLS_PORT in the environment) - restart the application -NB: some third party software may require to change the exposed port in Dockerfile.nginx as they use docker-compose declarations to discover which ports to map when publishing the application. +NB: some third party software may require to change the exposed port in Dockerfile.nginx as they use docker compose declarations to discover which ports to map when publishing the application. # Run the tests with Docker Compose @@ -324,7 +324,7 @@ docker logs -f django-defectdojo_integration-tests_1 # Checking Docker versions -Run the following to determine the versions for docker and docker-compose: +Run the following to determine the versions for docker and docker compose: ```zsh $ docker version @@ -345,58 +345,14 @@ Server: OS/Arch: linux/amd64 Experimental: false -$ docker-compose version -docker-compose version 1.18.0, build 8dd22a9 +$ docker compose version +Docker Compose version 1.18.0, build 8dd22a9 docker-py version: 2.6.1 CPython version: 2.7.13 OpenSSL version: OpenSSL 1.0.1t 3 May 2016 ``` -In this case, both docker (version 17.09.0-ce) and docker-compose (1.18.0) need to be updated. +In this case, both docker (version 17.09.0-ce) and docker compose (1.18.0) need to be updated. Follow [Docker's documentation](https://docs.docker.com/install/) for your OS to get the latest version of Docker. For the docker command, most OSes have a built-in update mechanism like "apt upgrade". -Docker Compose isn't packaged like Docker and you'll need to manually update an existing install if using Linux. For Linux, either follow the instructions in the [Docker Compose documentation](https://docs.docker.com/compose/install/) or use the shell script below. The script below will update docker-compose to the latest version automatically. You will need to make the script executable and have sudo privileges to upgrade docker-compose: - -```zsh -#!/bin/bash - -# Set location of docker-compose binary - shouldn't need to modify this -DESTINATION=/usr/local/bin/docker-compose - -# Get latest docker-compose version -VERSION=$(curl --silent https://api.github.com/repos/docker/compose/releases/latest | jq .name -r) - -# Output some info on what this is going to do -echo "Note: docker-compose version $VERSION will be downloaded from:" -echo "https://github.com/docker/compose/releases/download/${VERSION}/docker-compose-$(uname -s)-$(uname -m)" -echo "Enter sudo password to install docker-compose" - -# Download and install latest docker compose -sudo curl -L https://github.com/docker/compose/releases/download/${VERSION}/docker-compose-$(uname -s)-$(uname -m) -o $DESTINATION -sudo chmod +x $DESTINATION - -# Output new docker-compose version info -echo "" -docker-compose version -``` - -Running the script above will look like: - -```zsh -$ vi update-docker-compose -$ chmod u+x update-docker-compose -$ ./update-docker-compose -Note: docker-compose version 1.24.0 will be downloaded from: -https://github.com/docker/compose/releases/download/1.24.0/docker-compose-Linux-x86_64 -Enter sudo password to install docker-compose - % Total % Received % Xferd Average Speed Time Time Time Current - Dload Upload Total Spent Left Speed -100 617 0 617 0 0 1778 0 --:--:-- --:--:-- --:--:-- 1778 -100 15.4M 100 15.4M 0 0 2478k 0 0:00:06 0:00:06 --:--:-- 2910k - -docker-compose version 1.24.0, build 0aa59064 -docker-py version: 3.7.2 -CPython version: 3.6.8 -OpenSSL version: OpenSSL 1.1.0j 20 Nov 2018 -``` diff --git a/tests/local-integration-tests.sh b/tests/local-integration-tests.sh index afbb624f94..db81412532 100755 --- a/tests/local-integration-tests.sh +++ b/tests/local-integration-tests.sh @@ -12,7 +12,7 @@ echo "Running Product type integration tests" if python3 tests/regulations_test.py ; then echo "Success: Regulation integration tests passed" else - docker-compose logs uwsgi --tail=120 + docker compose logs uwsgi --tail=120 echo "Error: Regulation integration test failed."; exit 1 fi @@ -20,7 +20,7 @@ echo "Running Product type integration tests" if python3 tests/product_type_test.py ; then echo "Success: Product type integration tests passed" else - docker-compose logs uwsgi --tail=120 + docker compose logs uwsgi --tail=120 echo "Error: Product type integration test failed."; exit 1 fi @@ -28,7 +28,7 @@ echo "Running Product integration tests" if python3 tests/product_test.py ; then echo "Success: Product integration tests passed" else - docker-compose logs uwsgi --tail=120 + docker compose logs uwsgi --tail=120 echo "Error: Product integration test failed"; exit 1 fi @@ -36,7 +36,7 @@ echo "Running Dedupe integration tests" if python3 tests/dedupe_test.py ; then echo "Success: Dedupe integration tests passed" else - docker-compose logs uwsgi --tail=120 + docker compose logs uwsgi --tail=120 echo "Error: Dedupe integration test failed"; exit 1 fi @@ -44,7 +44,7 @@ echo "Running Endpoint integration tests" if python3 tests/endpoint_test.py ; then echo "Success: Endpoint integration tests passed" else - docker-compose logs uwsgi --tail=120 + docker compose logs uwsgi --tail=120 echo "Error: Endpoint integration test failed"; exit 1 fi @@ -52,7 +52,7 @@ echo "Running Engagement integration tests" if python3 tests/engagement_test.py ; then echo "Success: Engagement integration tests passed" else - docker-compose logs uwsgi --tail=120 + docker compose logs uwsgi --tail=120 echo "Error: Engagement integration test failed"; exit 1 fi @@ -60,7 +60,7 @@ echo "Running Environment integration tests" if python3 tests/environment_test.py ; then echo "Success: Environment integration tests passed" else - docker-compose logs uwsgi --tail=120 + docker compose logs uwsgi --tail=120 echo "Error: Environment integration test failed"; exit 1 fi @@ -68,7 +68,7 @@ echo "Running Finding integration tests" if python3 tests/finding_test.py ; then echo "Success: Finding integration tests passed" else - docker-compose logs uwsgi --tail=120 + docker compose logs uwsgi --tail=120 echo "Error: Finding integration test failed"; exit 1 fi @@ -76,7 +76,7 @@ echo "Running Test integration tests" if python3 tests/test_test.py ; then echo "Success: Test integration tests passed" else - docker-compose logs uwsgi --tail=120 + docker compose logs uwsgi --tail=120 echo "Error: Test integration test failed"; exit 1 fi @@ -84,7 +84,7 @@ echo "Running User integration tests" if python3 tests/user_test.py ; then echo "Success: User integration tests passed" else - docker-compose logs uwsgi --tail=120 + docker compose logs uwsgi --tail=120 echo "Error: User integration test failed"; exit 1 fi @@ -92,7 +92,7 @@ echo "Running Ibm Appscan integration test" if python3 tests/ibm_appscan_test.py ; then echo "Success: Ibm AppScan integration tests passed" else - docker-compose logs uwsgi --tail=120 + docker compose logs uwsgi --tail=120 echo "Error: Ibm AppScan integration test failed"; exit 1 fi @@ -100,7 +100,7 @@ echo "Running Report Builder integration tests" if python3 tests/report_builder_test.py ; then echo "Success: Report Builder integration tests passed" else - docker-compose logs uwsgi --tail=120 + docker compose logs uwsgi --tail=120 echo "Error: Report Builder integration test failed."; exit 1 fi @@ -108,7 +108,7 @@ echo "Running Search integration test" if python3 tests/search_test.py ; then echo "Success: Search integration tests passed" else - docker-compose logs uwsgi --tail=120 + docker compose logs uwsgi --tail=120 echo "Error: Search integration test failed"; exit 1 fi From e948cde47659b64a8e2133efa25f6e3da2c01372 Mon Sep 17 00:00:00 2001 From: dogboat Date: Fri, 20 Sep 2024 17:09:12 -0400 Subject: [PATCH 04/22] Metrics findings tests (#10930) * metrics-findings-tests some work on updating metrics findings tests; includes some changes to metrics utils to improve readability and adjust timeframes returned for data sets * metrics-findings-tests linter fixes on metrics unittest * metrics-findings-tests linter fix on metrics utils * metrics-findings-tests tweaking tests * metrics-findings-tests undo fixtures changes * metrics-findings-tests add new test data * metrics-findings-tests fix fixtures * metrics-findings-tests update tests * metrics-findings-tests linter fixes * metrics-findings-tests move new metrics finding data to its own fixture --- .../unit_metrics_additional_data.json | 482 ++++++++++++++++++ dojo/metrics/utils.py | 72 +-- dojo/static/dojo/js/metrics.js | 16 +- unittests/test_metrics_queries.py | 114 +++-- 4 files changed, 601 insertions(+), 83 deletions(-) create mode 100644 dojo/fixtures/unit_metrics_additional_data.json diff --git a/dojo/fixtures/unit_metrics_additional_data.json b/dojo/fixtures/unit_metrics_additional_data.json new file mode 100644 index 0000000000..721e47eaac --- /dev/null +++ b/dojo/fixtures/unit_metrics_additional_data.json @@ -0,0 +1,482 @@ +[ + { + "pk": 240, + "model": "dojo.finding", + "fields": { + "last_reviewed_by": null, + "reviewers": [], + "static_finding": false, + "date": "2018-01-01", + "references": "", + "files": [], + "payload": null, + "under_defect_review": false, + "impact": "High", + "false_p": false, + "verified": false, + "severity": "High", + "title": "High Impact Test Finding", + "param": null, + "created": "2017-12-01T00:00:00Z", + "duplicate": true, + "mitigation": "test mitigation", + "found_by": [ + 1 + ], + "numerical_severity": "S0", + "test": 3, + "out_of_scope": false, + "cwe": null, + "file_path": "", + "duplicate_finding": 2, + "description": "test finding", + "mitigated_by": null, + "reporter": 1, + "mitigated": null, + "active": true, + "line": null, + "under_review": false, + "defect_review_requested_by": 1, + "review_requested_by": 1, + "thread_id": 11, + "url": null, + "notes": [], + "dynamic_finding": false, + "hash_code": "5d368a051fdec959e08315a32ef633ba5711bed6e8e75319ddee2cab4d4608c7", + "last_reviewed": null + } + }, + { + "pk": 241, + "model": "dojo.finding", + "fields": { + "last_reviewed_by": null, + "reviewers": [], + "static_finding": false, + "date": "2018-01-01", + "references": "", + "files": [], + "payload": null, + "under_defect_review": false, + "impact": "High", + "false_p": false, + "verified": false, + "severity": "High", + "title": "High Impact Test Finding", + "param": null, + "created": "2017-12-01T00:00:00Z", + "duplicate": true, + "mitigation": "test mitigation", + "found_by": [ + 1 + ], + "numerical_severity": "S0", + "test": 3, + "out_of_scope": false, + "cwe": null, + "file_path": "", + "duplicate_finding": 2, + "description": "test finding", + "mitigated_by": null, + "reporter": 1, + "mitigated": null, + "active": false, + "line": null, + "under_review": false, + "defect_review_requested_by": 1, + "review_requested_by": 1, + "thread_id": 11, + "url": null, + "notes": [], + "dynamic_finding": false, + "hash_code": "5d368a051fdec959e08315a32ef633ba5711bed6e8e75319ddee2cab4d4608c7", + "last_reviewed": null, + "risk_accepted": true + } + }, + { + "pk": 242, + "model": "dojo.finding", + "fields": { + "last_reviewed_by": null, + "reviewers": [], + "static_finding": false, + "date": "2018-01-01", + "references": "", + "files": [], + "payload": null, + "under_defect_review": false, + "impact": "High", + "false_p": false, + "verified": false, + "severity": "High", + "title": "High Impact Test Finding", + "param": null, + "created": "2017-12-01T00:00:00Z", + "duplicate": true, + "mitigation": "test mitigation", + "found_by": [ + 1 + ], + "numerical_severity": "S0", + "test": 3, + "out_of_scope": false, + "cwe": null, + "file_path": "", + "duplicate_finding": 2, + "description": "test finding", + "mitigated_by": null, + "reporter": 1, + "mitigated": null, + "active": false, + "line": null, + "under_review": false, + "defect_review_requested_by": 1, + "review_requested_by": 1, + "thread_id": 11, + "url": null, + "notes": [], + "dynamic_finding": false, + "hash_code": "5d368a051fdec959e08315a32ef633ba5711bed6e8e75319ddee2cab4d4608c7", + "last_reviewed": null, + "risk_accepted": true + } + }, + { + "pk": 243, + "model": "dojo.finding", + "fields": { + "last_reviewed_by": null, + "reviewers": [], + "static_finding": false, + "date": "2017-12-31", + "references": "", + "files": [], + "payload": null, + "under_defect_review": false, + "impact": "High", + "false_p": false, + "verified": false, + "severity": "High", + "title": "DUMMY FINDING", + "param": null, + "created": "2017-12-01T00:00:00Z", + "duplicate": false, + "mitigation": "MITIGATION", + "found_by": [ + 1 + ], + "numerical_severity": "S0", + "test": 3, + "out_of_scope": false, + "cwe": 1, + "file_path": "", + "duplicate_finding": null, + "description": "TEST finding", + "mitigated_by": null, + "reporter": 2, + "mitigated": null, + "active": false, + "line": 100, + "under_review": false, + "defect_review_requested_by": 2, + "review_requested_by": 2, + "thread_id": 1, + "url": "http://www.example.com", + "notes": [ + 1 + ], + "dynamic_finding": false, + "hash_code": "c89d25e445b088ba339908f68e15e3177b78d22f3039d1bfea51c4be251bf4e0", + "last_reviewed": null, + "risk_accepted": true, + "is_mitigated": true + } + }, + { + "pk": 244, + "model": "dojo.finding", + "fields": { + "last_reviewed_by": null, + "reviewers": [], + "static_finding": false, + "date": "2017-12-29", + "references": "", + "files": [], + "payload": null, + "under_defect_review": false, + "impact": "Low", + "false_p": false, + "verified": true, + "severity": "Low", + "title": "Low Impact Test Finding", + "param": null, + "created": "2017-12-01T00:00:00Z", + "duplicate": false, + "mitigation": "test mitigation", + "found_by": [ + 1 + ], + "numerical_severity": "S0", + "test": 33, + "out_of_scope": false, + "cwe": null, + "file_path": "/dev/urandom", + "duplicate_finding": null, + "description": "test finding", + "mitigated_by": null, + "reporter": 1, + "mitigated": null, + "active": true, + "line": 123, + "under_review": false, + "defect_review_requested_by": 1, + "review_requested_by": 1, + "thread_id": 11, + "url": null, + "notes": [], + "dynamic_finding": false, + "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", + "last_reviewed": null + } + }, + { + "pk": 245, + "model": "dojo.finding", + "fields": { + "last_reviewed_by": null, + "reviewers": [], + "static_finding": false, + "date": "2017-12-27", + "references": "", + "files": [], + "payload": null, + "under_defect_review": false, + "impact": "Low", + "false_p": false, + "verified": false, + "severity": "Low", + "title": "Low Impact Test Finding", + "param": null, + "created": "2017-12-01T00:00:00Z", + "duplicate": true, + "mitigation": "test mitigation", + "found_by": [ + 1 + ], + "numerical_severity": "S0", + "test": 33, + "out_of_scope": false, + "cwe": null, + "file_path": "/dev/urandom", + "duplicate_finding": 22, + "description": "test finding", + "mitigated_by": null, + "reporter": 1, + "mitigated": null, + "active": false, + "line": 123, + "under_review": false, + "defect_review_requested_by": 1, + "review_requested_by": 1, + "thread_id": 11, + "url": null, + "notes": [], + "dynamic_finding": false, + "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", + "last_reviewed": null + } + }, + { + "pk": 246, + "model": "dojo.finding", + "fields": { + "last_reviewed_by": null, + "reviewers": [], + "static_finding": false, + "date": "2018-01-02", + "references": "", + "files": [], + "payload": null, + "under_defect_review": false, + "impact": "Low", + "false_p": false, + "verified": false, + "severity": "Low", + "title": "Low Impact Test Finding", + "param": null, + "created": "2017-12-01T00:00:00Z", + "duplicate": true, + "mitigation": "test mitigation", + "found_by": [ + 1 + ], + "numerical_severity": "S0", + "test": 33, + "out_of_scope": false, + "cwe": null, + "file_path": "/dev/urandom", + "duplicate_finding": 22, + "description": "test finding", + "mitigated_by": null, + "reporter": 1, + "mitigated": null, + "active": false, + "line": 123, + "under_review": false, + "defect_review_requested_by": 1, + "review_requested_by": 1, + "thread_id": 11, + "url": null, + "notes": [], + "dynamic_finding": false, + "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", + "last_reviewed": null + } + }, + { + "pk": 247, + "model": "dojo.finding", + "fields": { + "unique_id_from_tool": 12345, + "last_reviewed_by": null, + "reviewers": [], + "static_finding": false, + "date": "2018-01-03", + "references": "", + "files": [], + "payload": null, + "under_defect_review": false, + "impact": "Low", + "false_p": false, + "verified": false, + "severity": "Low", + "title": "Low Impact Test Finding", + "param": null, + "created": "2017-12-01T00:00:00Z", + "duplicate": true, + "mitigation": "test mitigation", + "found_by": [ + 1 + ], + "numerical_severity": "S0", + "test": 55, + "out_of_scope": false, + "cwe": null, + "file_path": "/dev/urandom", + "duplicate_finding": null, + "description": "test finding", + "mitigated_by": null, + "reporter": 1, + "mitigated": null, + "active": false, + "line": 123, + "under_review": false, + "defect_review_requested_by": 1, + "review_requested_by": 1, + "thread_id": 11, + "url": null, + "notes": [], + "dynamic_finding": false, + "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", + "last_reviewed": null + } + }, + { + "pk": 248, + "model": "dojo.finding", + "fields": { + "unique_id_from_tool": 6789, + "last_reviewed_by": null, + "reviewers": [], + "static_finding": false, + "date": "2017-12-27", + "references": "", + "files": [], + "payload": null, + "under_defect_review": false, + "impact": "Low", + "false_p": false, + "verified": true, + "severity": "Low", + "title": "UID Impact Test Finding", + "param": null, + "created": "2017-12-01T00:00:00Z", + "duplicate": false, + "mitigation": "test mitigation", + "found_by": [ + 1 + ], + "numerical_severity": "S0", + "test": 77, + "out_of_scope": false, + "cwe": null, + "file_path": "/dev/urandom", + "duplicate_finding": null, + "description": "test finding", + "mitigated_by": null, + "reporter": 1, + "mitigated": null, + "active": true, + "line": 123, + "under_review": false, + "defect_review_requested_by": 1, + "review_requested_by": 1, + "thread_id": 11, + "url": null, + "notes": [], + "dynamic_finding": false, + "hash_code": "6f8d0bf970c14175e597843f4679769a4775742549d90f902ff803de9244c7e1", + "last_reviewed": null, + "is_mitigated": true + } + }, + { + "pk": 249, + "model": "dojo.finding", + "fields": { + "unique_id_from_tool": 6789, + "last_reviewed_by": null, + "reviewers": [], + "static_finding": false, + "date": "2018-01-04", + "references": "", + "files": [], + "payload": null, + "under_defect_review": false, + "impact": "Low", + "false_p": false, + "verified": false, + "severity": "Low", + "title": "UID Impact Test Finding", + "param": null, + "created": "2017-12-01T00:00:00Z", + "duplicate": true, + "mitigation": "test mitigation", + "found_by": [ + 1 + ], + "numerical_severity": "S0", + "test": 77, + "out_of_scope": false, + "cwe": null, + "file_path": "/dev/urandom", + "duplicate_finding": 224, + "description": "test finding", + "mitigated_by": null, + "reporter": 1, + "mitigated": null, + "active": false, + "line": 123, + "under_review": false, + "defect_review_requested_by": 1, + "review_requested_by": 1, + "thread_id": 11, + "url": null, + "notes": [], + "dynamic_finding": false, + "hash_code": "6f8d0bf970c14175e597843f4679769a4775742549d90f902ff803de9244c7e1", + "last_reviewed": null + } + } +] \ No newline at end of file diff --git a/dojo/metrics/utils.py b/dojo/metrics/utils.py index 8ca345b41f..d9e01e9a1b 100644 --- a/dojo/metrics/utils.py +++ b/dojo/metrics/utils.py @@ -3,8 +3,7 @@ from datetime import date, datetime, timedelta from enum import Enum from functools import partial -from math import ceil -from typing import Any, Callable, NamedTuple, TypeVar, Union +from typing import Any, Callable, NamedTuple, Type, TypeVar, Union from dateutil.relativedelta import relativedelta from django.contrib import messages @@ -35,12 +34,19 @@ ) +def get_metrics_finding_filter_class() -> Type[Union[MetricsFindingFilter, MetricsFindingFilterWithoutObjectLookups]]: + if get_system_setting("filter_string_matching", False): + return MetricsFindingFilterWithoutObjectLookups + else: + return MetricsFindingFilter + + def finding_queries( prod_type: QuerySet[Product_Type], request: HttpRequest, ) -> dict[str, Any]: # Get the initial list of findings the user is authorized to see - findings_query = get_authorized_findings( + all_authorized_findings: QuerySet[Finding] = get_authorized_findings( Permissions.Finding_View, user=request.user, ).select_related( @@ -54,46 +60,47 @@ def finding_queries( "test__test_type", ) - filter_string_matching = get_system_setting("filter_string_matching", False) - finding_filter_class = MetricsFindingFilterWithoutObjectLookups if filter_string_matching else MetricsFindingFilter - findings = finding_filter_class(request.GET, queryset=findings_query) - form = findings.form - findings_qs = queryset_check(findings) - # Quick check to determine if the filters were too tight and filtered everything away - if not findings_qs.exists() and not findings_query.exists(): - findings = findings_query - findings_qs = findings if isinstance(findings, QuerySet) else findings.qs + finding_filter_class = get_metrics_finding_filter_class() + findings_filter = finding_filter_class(request.GET, queryset=all_authorized_findings) + form = findings_filter.form + filtered_findings: QuerySet[Finding] = queryset_check(findings_filter) + # Quick check to determine if the filters were too tight and filtered everything away. If so, fall back to using all + # authorized Findings instead. + if not filtered_findings.exists() and all_authorized_findings.exists(): + filtered_findings = all_authorized_findings messages.add_message( request, messages.ERROR, _("All objects have been filtered away. Displaying all objects"), extra_tags="alert-danger") - start_date, end_date = get_date_range(findings_qs) + start_date, end_date = get_date_range(filtered_findings) # Filter by the date ranges supplied - findings_query = findings_query.filter(date__range=[start_date, end_date]) + all_findings_within_date_range = all_authorized_findings.filter(date__range=[start_date, end_date]) # Get the list of closed and risk accepted findings - findings_closed = findings_query.filter(CLOSED_FINDINGS_QUERY) - accepted_findings = findings_query.filter(ACCEPTED_FINDINGS_QUERY) - active_findings = findings_query.filter(OPEN_FINDINGS_QUERY) + closed_filtered_findings = all_findings_within_date_range.filter(CLOSED_FINDINGS_QUERY) + accepted_filtered_findings = all_findings_within_date_range.filter(ACCEPTED_FINDINGS_QUERY) + active_filtered_findings = all_findings_within_date_range.filter(OPEN_FINDINGS_QUERY) # filter by product type if applicable if len(prod_type) > 0: - findings_query = findings_query.filter(test__engagement__product__prod_type__in=prod_type) - findings_closed = findings_closed.filter(test__engagement__product__prod_type__in=prod_type) - accepted_findings = accepted_findings.filter(test__engagement__product__prod_type__in=prod_type) - active_findings = active_findings.filter(test__engagement__product__prod_type__in=prod_type) + all_findings_within_date_range = all_findings_within_date_range.filter( + test__engagement__product__prod_type__in=prod_type) + closed_filtered_findings = closed_filtered_findings.filter(test__engagement__product__prod_type__in=prod_type) + accepted_filtered_findings = accepted_filtered_findings.filter( + test__engagement__product__prod_type__in=prod_type) + active_filtered_findings = active_filtered_findings.filter(test__engagement__product__prod_type__in=prod_type) # Get the severity counts of risk accepted findings - accepted_findings_counts = severity_count(accepted_findings, "aggregate", "severity") + accepted_findings_counts = severity_count(accepted_filtered_findings, "aggregate", "severity") weeks_between, months_between = period_deltas(start_date, end_date) query_counts_for_period = query_counts( - findings_query, - active_findings, - accepted_findings, + all_findings_within_date_range, + active_filtered_findings, + accepted_filtered_findings, start_date, MetricsType.FINDING, ) @@ -117,9 +124,9 @@ def finding_queries( )[:10] return { - "all": findings_query, - "closed": findings_closed, - "accepted": accepted_findings, + "all": filtered_findings, + "closed": closed_filtered_findings, + "accepted": accepted_filtered_findings, "accepted_count": accepted_findings_counts, "top_ten": top_ten, "monthly_counts": monthly_counts, @@ -454,13 +461,8 @@ def period_deltas(start_date, end_date): :return: A tuple of integers representing (number of weeks between the dates, number of months between the dates) """ r = relativedelta(end_date, start_date) - months_between = (r.years * 12) + r.months - # include current month - months_between += 1 - - weeks_between = int(ceil((((r.years * 12) + r.months) * 4.33) + (r.days / 7))) - if weeks_between <= 0: - weeks_between += 2 + months_between = max((r.years * 12) + r.months, 2) + weeks_between = max((end_date - start_date).days // 7, 2) return weeks_between, months_between diff --git a/dojo/static/dojo/js/metrics.js b/dojo/static/dojo/js/metrics.js index 2e95555d37..2fd518aa3a 100644 --- a/dojo/static/dojo/js/metrics.js +++ b/dojo/static/dojo/js/metrics.js @@ -103,11 +103,16 @@ function homepage_severity_plot(critical, high, medium, low) { dashboard-metrics.html */ +function getTicks(critical, high, medium, low) { + return [...new Set(critical.concat(high, medium, low).map(x => x[0]))] +} + function opened_per_month(critical, high, medium, low) { var options = { xaxes: [{ mode: 'time', - timeformat: "%m/%y" + timeformat: "%m/%y", + ticks: getTicks(critical, high, medium, low), }], yaxes: [{ min: 0 @@ -153,7 +158,8 @@ function accepted_per_month(critical, high, medium, low) { var options = { xaxes: [{ mode: 'time', - timeformat: "%m/%y" + timeformat: "%m/%y", + ticks: getTicks(critical, high, medium, low), }], yaxes: [{ min: 0 @@ -199,7 +205,8 @@ function opened_per_week(critical, high, medium, low) { var options = { xaxes: [{ mode: 'time', - timeformat: "%m/%d/%Y" + timeformat: "%m/%d/%Y", + ticks: getTicks(critical, high, medium, low), }], yaxes: [{ min: 0 @@ -245,7 +252,8 @@ function accepted_per_week(critical, high, medium, low) { var options = { xaxes: [{ mode: 'time', - timeformat: "%m/%d/%Y" + timeformat: "%m/%d/%Y", + ticks: getTicks(critical, high, medium, low), }], yaxes: [{ min: 0 diff --git a/unittests/test_metrics_queries.py b/unittests/test_metrics_queries.py index c52c602ea3..460b426e8b 100644 --- a/unittests/test_metrics_queries.py +++ b/unittests/test_metrics_queries.py @@ -20,12 +20,43 @@ def add(*args, **kwargs): pass +#### +# Test Findings data +#### +FINDING_1 = {"id": 4, "title": "High Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "High", "description": "test finding", "mitigation": "test mitigation", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 2, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "5d368a051fdec959e08315a32ef633ba5711bed6e8e75319ddee2cab4d4608c7", "line": None, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_2 = {"id": 5, "title": "High Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "High", "description": "test finding", "mitigation": "test mitigation", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 2, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "5d368a051fdec959e08315a32ef633ba5711bed6e8e75319ddee2cab4d4608c7", "line": None, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_3 = {"id": 6, "title": "High Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "High", "description": "test finding", "mitigation": "test mitigation", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 2, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "5d368a051fdec959e08315a32ef633ba5711bed6e8e75319ddee2cab4d4608c7", "line": None, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_4 = {"id": 7, "title": "DUMMY FINDING", "date": date(2017, 12, 31), "sla_start_date": None, "sla_expiration_date": None, "cwe": 1, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": "http://www.example.com", "severity": "High", "description": "TEST finding", "mitigation": "MITIGATION", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": False, "verified": False, "false_p": False, "duplicate": False, "duplicate_finding_id": None, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 2, "under_defect_review": False, "defect_review_requested_by_id": 2, "is_mitigated": False, "thread_id": 1, "mitigated": None, "mitigated_by_id": None, "reporter_id": 2, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "c89d25e445b088ba339908f68e15e3177b78d22f3039d1bfea51c4be251bf4e0", "line": 100, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_5 = {"id": 24, "title": "Low Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 33, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 22, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_6 = {"id": 125, "title": "Low Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 55, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": None, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": "12345", "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_7 = {"id": 225, "title": "UID Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 77, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 224, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "6f8d0bf970c14175e597843f4679769a4775742549d90f902ff803de9244c7e1", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": "6789", "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_8 = {"id": 240, "title": "High Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "High", "description": "test finding", "mitigation": "test mitigation", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": True, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 2, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "5d368a051fdec959e08315a32ef633ba5711bed6e8e75319ddee2cab4d4608c7", "line": None, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_9 = {"id": 241, "title": "High Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "High", "description": "test finding", "mitigation": "test mitigation", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 2, "out_of_scope": False, "risk_accepted": True, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "5d368a051fdec959e08315a32ef633ba5711bed6e8e75319ddee2cab4d4608c7", "line": None, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_10 = {"id": 242, "title": "High Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "High", "description": "test finding", "mitigation": "test mitigation", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 2, "out_of_scope": False, "risk_accepted": True, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "5d368a051fdec959e08315a32ef633ba5711bed6e8e75319ddee2cab4d4608c7", "line": None, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_11 = {"id": 243, "title": "DUMMY FINDING", "date": date(2017, 12, 31), "sla_start_date": None, "sla_expiration_date": None, "cwe": 1, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": "http://www.example.com", "severity": "High", "description": "TEST finding", "mitigation": "MITIGATION", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": False, "verified": False, "false_p": False, "duplicate": False, "duplicate_finding_id": None, "out_of_scope": False, "risk_accepted": True, "under_review": False, "last_status_update": None, "review_requested_by_id": 2, "under_defect_review": False, "defect_review_requested_by_id": 2, "is_mitigated": True, "thread_id": 1, "mitigated": None, "mitigated_by_id": None, "reporter_id": 2, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "c89d25e445b088ba339908f68e15e3177b78d22f3039d1bfea51c4be251bf4e0", "line": 100, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_12 = {"id": 244, "title": "Low Impact Test Finding", "date": date(2017, 12, 29), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 33, "active": True, "verified": True, "false_p": False, "duplicate": False, "duplicate_finding_id": None, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_13 = {"id": 245, "title": "Low Impact Test Finding", "date": date(2017, 12, 27), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 33, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 22, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_14 = {"id": 246, "title": "Low Impact Test Finding", "date": date(2018, 1, 2), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 33, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 22, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_15 = {"id": 247, "title": "Low Impact Test Finding", "date": date(2018, 1, 3), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 55, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": None, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": "12345", "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_16 = {"id": 248, "title": "UID Impact Test Finding", "date": date(2017, 12, 27), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 77, "active": True, "verified": True, "false_p": False, "duplicate": False, "duplicate_finding_id": None, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": True, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "6f8d0bf970c14175e597843f4679769a4775742549d90f902ff803de9244c7e1", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": "6789", "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_17 = {"id": 249, "title": "UID Impact Test Finding", "date": date(2018, 1, 4), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 77, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 224, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "6f8d0bf970c14175e597843f4679769a4775742549d90f902ff803de9244c7e1", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": "6789", "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} + + +ALL_FINDINGS = [FINDING_1, FINDING_2, FINDING_3, FINDING_4, FINDING_5, FINDING_6, FINDING_7, FINDING_8, FINDING_9, + FINDING_10, FINDING_11, FINDING_12, FINDING_13, FINDING_14, FINDING_15, FINDING_16, FINDING_17] +CLOSED_FINDINGS = [FINDING_11, FINDING_16] +ACCEPTED_FINDINGS = [FINDING_9, FINDING_10, FINDING_11] + + class FindingQueriesTest(DojoTestCase): - fixtures = ["dojo_testdata.json"] + fixtures = ["dojo_testdata.json", "unit_metrics_additional_data.json"] def setUp(self): user = User.objects.get(username="user1") - self.request = RequestFactory().get(reverse("metrics")) + self.request = RequestFactory().get(reverse("metrics"), { + "start_date": "2017-12-26", + "end_date": "2018-01-05", + }) self.request.user = user self.request._messages = MockMessages() @@ -49,14 +80,13 @@ def test_finding_queries(self, mock_timezone): mock_datetime = datetime(2020, 12, 9, tzinfo=timezone.utc) mock_timezone.return_value = mock_datetime - # Queries over Finding and Risk_Acceptance - with self.assertNumQueries(22): + # Queries over Finding + with self.assertNumQueries(27): product_types = [] finding_queries = utils.finding_queries( product_types, self.request, ) - self.assertSequenceEqual( list(finding_queries.keys()), [ @@ -73,64 +103,57 @@ def test_finding_queries(self, mock_timezone): "form", ], ) - # Assert that we get expected querysets back. This is to be used to # support refactoring, in attempt of lowering the query count. + self.assertSequenceEqual(finding_queries["all"].values(), ALL_FINDINGS) + self.assertSequenceEqual(finding_queries["closed"].values(), CLOSED_FINDINGS) + self.assertSequenceEqual(finding_queries["accepted"].values(), ACCEPTED_FINDINGS) + self.assertSequenceEqual( - finding_queries["all"].values(), - [], - # [{'id': 226, 'title': 'Test Endpoint Mitigation - Finding F1 Without Endpoints', 'date': date(2022, 10, 15), 'sla_start_date': None, 'cwe': None, 'cve': None, 'cvssv3': None, 'cvssv3_score': None, 'url': None, 'severity': 'Info', 'description': 'vulnerability', 'mitigation': '', 'impact': '', 'steps_to_reproduce': '', 'severity_justification': '', 'references': '', 'test_id': 89, 'active': True, 'verified': True, 'false_p': False, 'duplicate': False, 'duplicate_finding_id': None, 'out_of_scope': False, 'risk_accepted': False, 'under_review': False, 'last_status_update': None, 'review_requested_by_id': None, 'under_defect_review': False, 'defect_review_requested_by_id': None, 'is_mitigated': False, 'thread_id': 0, 'mitigated': None, 'mitigated_by_id': None, 'reporter_id': 1, 'numerical_severity': 'S4', 'last_reviewed': None, 'last_reviewed_by_id': None, 'param': None, 'payload': None, 'hash_code': 'a6dd6bd359ff0b504a21b8a7ae5e59f1b40dd0fa1715728bd58de8f688f01b19', 'line': None, 'file_path': '', 'component_name': None, 'component_version': None, 'static_finding': False, 'dynamic_finding': True, 'created': datetime(2022, 10, 15, 23, 12, 52, 966000, tzinfo=pytz.UTC), 'scanner_confidence': None, 'sonarqube_issue_id': None, 'unique_id_from_tool': None, 'vuln_id_from_tool': None, 'sast_source_object': None, 'sast_sink_object': None, 'sast_source_line': None, 'sast_source_file_path': None, 'nb_occurences': None, 'publish_date': None, 'service': None, 'planned_remediation_date': None, 'test__engagement__product__prod_type__member': True, 'test__engagement__product__member': True, 'test__engagement__product__prod_type__authorized_group': False, 'test__engagement__product__authorized_group': False}] - ) - self.assertSequenceEqual( - finding_queries["closed"].values(), - [], - ) - self.assertSequenceEqual( - finding_queries["accepted"].values(), - [], - ) - self.assertSequenceEqual( - list(finding_queries["accepted_count"].values()), - [0, 0, 0, 0, 0, 0], + finding_queries["accepted_count"], + {"total": 3, "critical": 0, "high": 3, "medium": 0, "low": 0, "info": 0}, ) self.assertSequenceEqual( finding_queries["top_ten"].values(), [], ) self.assertEqual( - list(finding_queries["monthly_counts"].values()), - [ - [ - {"epoch": 1604188800000, "grouped_date": date(2020, 11, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0, "closed": 0}, - {"epoch": 1606780800000, "grouped_date": date(2020, 12, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0, "closed": 0}, + finding_queries["monthly_counts"], + { + "opened_per_period": [ + {"epoch": 1509494400000, "grouped_date": date(2017, 11, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0, "closed": 0}, + {"epoch": 1512086400000, "grouped_date": date(2017, 12, 1), "critical": 0, "high": 2, "medium": 0, "low": 3, "info": 0, "total": 5, "closed": 2}, + {"epoch": 1514764800000, "grouped_date": date(2018, 1, 1), "critical": 0, "high": 6, "medium": 0, "low": 6, "info": 0, "total": 12, "closed": 0}, ], - [ - {"epoch": 1604188800000, "grouped_date": date(2020, 11, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, - {"epoch": 1606780800000, "grouped_date": date(2020, 12, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, + "active_per_period": [ + {"epoch": 1509494400000, "grouped_date": date(2017, 11, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, + {"epoch": 1512086400000, "grouped_date": date(2017, 12, 1), "critical": 0, "high": 0, "medium": 0, "low": 2, "info": 0, "total": 2}, + {"epoch": 1514764800000, "grouped_date": date(2018, 1, 1), "critical": 0, "high": 1, "medium": 0, "low": 0, "info": 0, "total": 1}, ], - [ - {"epoch": 1604188800000, "grouped_date": date(2020, 11, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, - {"epoch": 1606780800000, "grouped_date": date(2020, 12, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, + "accepted_per_period": [ + {"epoch": 1509494400000, "grouped_date": date(2017, 11, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, + {"epoch": 1512086400000, "grouped_date": date(2017, 12, 1), "critical": 0, "high": 1, "medium": 0, "low": 0, "info": 0, "total": 1}, + {"epoch": 1514764800000, "grouped_date": date(2018, 1, 1), "critical": 0, "high": 2, "medium": 0, "low": 0, "info": 0, "total": 2}, ], - ], + }, ) self.assertEqual( finding_queries["weekly_counts"], { "opened_per_period": [ - {"epoch": 1606694400000, "grouped_date": date(2020, 11, 30), "total": 0, "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "closed": 0}, - {"epoch": 1607299200000, "grouped_date": date(2020, 12, 7), "total": 0, "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "closed": 0}, - {"epoch": 1607904000000, "grouped_date": date(2020, 12, 14), "total": 0, "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "closed": 0}, - ], - "accepted_per_period": [ - {"epoch": 1606694400000, "grouped_date": date(2020, 11, 30), "total": 0, "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0}, - {"epoch": 1607299200000, "grouped_date": date(2020, 12, 7), "total": 0, "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0}, - {"epoch": 1607904000000, "grouped_date": date(2020, 12, 14), "total": 0, "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0}, + {"epoch": 1513555200000, "grouped_date": date(2017, 12, 18), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0, "closed": 0}, + {"epoch": 1514160000000, "grouped_date": date(2017, 12, 25), "critical": 0, "high": 2, "medium": 0, "low": 3, "info": 0, "total": 5, "closed": 2}, + {"epoch": 1514764800000, "grouped_date": date(2018, 1, 1), "critical": 0, "high": 6, "medium": 0, "low": 6, "info": 0, "total": 12, "closed": 0}, ], "active_per_period": [ - {"epoch": 1606694400000, "grouped_date": date(2020, 11, 30), "total": 0, "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0}, - {"epoch": 1607299200000, "grouped_date": date(2020, 12, 7), "total": 0, "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0}, - {"epoch": 1607904000000, "grouped_date": date(2020, 12, 14), "total": 0, "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0}, + {"epoch": 1513555200000, "grouped_date": date(2017, 12, 18), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, + {"epoch": 1514160000000, "grouped_date": date(2017, 12, 25), "critical": 0, "high": 0, "medium": 0, "low": 2, "info": 0, "total": 2}, + {"epoch": 1514764800000, "grouped_date": date(2018, 1, 1), "critical": 0, "high": 1, "medium": 0, "low": 0, "info": 0, "total": 1}, + ], + "accepted_per_period": [ + {"epoch": 1513555200000, "grouped_date": date(2017, 12, 18), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, + {"epoch": 1514160000000, "grouped_date": date(2017, 12, 25), "critical": 0, "high": 1, "medium": 0, "low": 0, "info": 0, "total": 1}, + {"epoch": 1514764800000, "grouped_date": date(2018, 1, 1), "critical": 0, "high": 2, "medium": 0, "low": 0, "info": 0, "total": 2}, ], }, ) @@ -224,14 +247,17 @@ def test_endpoint_queries(self): [ {"epoch": 1590969600000, "grouped_date": date(2020, 6, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0, "closed": 0}, {"epoch": 1593561600000, "grouped_date": date(2020, 7, 1), "critical": 0, "high": 1, "medium": 0, "low": 0, "info": 5, "total": 6, "closed": 0}, + {"epoch": 1596240000000, "grouped_date": date(2020, 8, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0, "closed": 0}, ], [ {"epoch": 1590969600000, "grouped_date": date(2020, 6, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, {"epoch": 1593561600000, "grouped_date": date(2020, 7, 1), "critical": 0, "high": 1, "medium": 0, "low": 0, "info": 4, "total": 5}, + {"epoch": 1596240000000, "grouped_date": date(2020, 8, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, ], [ {"epoch": 1590969600000, "grouped_date": date(2020, 6, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, {"epoch": 1593561600000, "grouped_date": date(2020, 7, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 1, "total": 1}, + {"epoch": 1596240000000, "grouped_date": date(2020, 8, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, ], ], ) From c0b12fecfe5173ee7dfcf233775901a74d3cb740 Mon Sep 17 00:00:00 2001 From: manuelsommer <47991713+manuel-sommer@users.noreply.github.com> Date: Fri, 20 Sep 2024 23:11:00 +0200 Subject: [PATCH 05/22] :sparkles: implement krakend audit parser (#10924) * :sparkles: implement krakend audit parser * advance unittests --- .../parsers/file/krakend_audit.md | 11 ++++++ dojo/settings/.settings.dist.py.sha256sum | 2 +- dojo/settings/settings.dist.py | 2 ++ dojo/tools/krakend_audit/__init__.py | 1 + dojo/tools/krakend_audit/parser.py | 34 +++++++++++++++++++ .../scans/krakend_audit/many_findings.json | 30 ++++++++++++++++ .../scans/krakend_audit/no_findings.json | 4 +++ unittests/tools/test_krakend_audit_parser.py | 22 ++++++++++++ 8 files changed, 105 insertions(+), 1 deletion(-) create mode 100644 docs/content/en/integrations/parsers/file/krakend_audit.md create mode 100644 dojo/tools/krakend_audit/__init__.py create mode 100644 dojo/tools/krakend_audit/parser.py create mode 100644 unittests/scans/krakend_audit/many_findings.json create mode 100644 unittests/scans/krakend_audit/no_findings.json create mode 100644 unittests/tools/test_krakend_audit_parser.py diff --git a/docs/content/en/integrations/parsers/file/krakend_audit.md b/docs/content/en/integrations/parsers/file/krakend_audit.md new file mode 100644 index 0000000000..9598ce343b --- /dev/null +++ b/docs/content/en/integrations/parsers/file/krakend_audit.md @@ -0,0 +1,11 @@ +--- +title: "KrakenD Audit Scan" +toc_hide: true +--- +Import KrakenD Audit Scan results in JSON format. You can use the following command to audit the KrakenD configuration which then can be uploaded to DefectDojo: +``` +krakend audit -c krakend.json -f "{{ marshal . }}" >> recommendations.json +``` + +### Sample Scan Data +Sample KrakenD Audit scans can be found [here](https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans/krakend_audit). \ No newline at end of file diff --git a/dojo/settings/.settings.dist.py.sha256sum b/dojo/settings/.settings.dist.py.sha256sum index 38c8e49852..5dfa946a6c 100644 --- a/dojo/settings/.settings.dist.py.sha256sum +++ b/dojo/settings/.settings.dist.py.sha256sum @@ -1 +1 @@ -702d74c8bc703d11c03cf5b3f7c4319ad0cdeaef68db6426d1112c59e59365a6 +b330f7dbd92c2df5a2a0632befc9775bef4a1c62b90375aa511957ebcd0ea82a diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index bd33f7fed8..d96733ca8f 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -1280,6 +1280,7 @@ def saml2_attrib_map_format(dict): "Legitify Scan": ["title", "endpoints", "severity"], "ThreatComposer Scan": ["title", "description"], "Invicti Scan": ["title", "description", "severity"], + "KrakenD Audit Scan": ["description", "mitigation", "severity"], } # Override the hardcoded settings here via the env var @@ -1505,6 +1506,7 @@ def saml2_attrib_map_format(dict): "Legitify Scan": DEDUPE_ALGO_HASH_CODE, "ThreatComposer Scan": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL_OR_HASH_CODE, "Invicti Scan": DEDUPE_ALGO_HASH_CODE, + "KrakenD Audit Scan": DEDUPE_ALGO_HASH_CODE, } # Override the hardcoded settings here via the env var diff --git a/dojo/tools/krakend_audit/__init__.py b/dojo/tools/krakend_audit/__init__.py new file mode 100644 index 0000000000..3ad798a42b --- /dev/null +++ b/dojo/tools/krakend_audit/__init__.py @@ -0,0 +1 @@ +__author__ = "manuel-sommer" diff --git a/dojo/tools/krakend_audit/parser.py b/dojo/tools/krakend_audit/parser.py new file mode 100644 index 0000000000..062c978e3c --- /dev/null +++ b/dojo/tools/krakend_audit/parser.py @@ -0,0 +1,34 @@ +import json + +from dojo.models import Finding + + +class KrakenDAuditParser: + def get_scan_types(self): + return ["KrakenD Audit Scan"] + + def get_label_for_scan_types(self, scan_type): + return scan_type # no custom label for now + + def get_description_for_scan_types(self, scan_type): + return "Import JSON reports of KrakenD Audit Scans." + + def get_findings(self, file, test): + data = json.load(file) + findings = [] + for recommendation in data.get("recommendations", []): + rule = recommendation.get("rule", None) + severity = recommendation.get("severity") + message = recommendation.get("message", None) + if rule is not None: + finding = Finding( + title="KrakenD" + "_" + rule, + test=test, + description="**Rule:** " + rule, + severity=severity.lower().capitalize(), + mitigation=message, + static_finding=True, + dynamic_finding=False, + ) + findings.append(finding) + return findings diff --git a/unittests/scans/krakend_audit/many_findings.json b/unittests/scans/krakend_audit/many_findings.json new file mode 100644 index 0000000000..726ae2d029 --- /dev/null +++ b/unittests/scans/krakend_audit/many_findings.json @@ -0,0 +1,30 @@ +{ + "recommendations": [ + { + "rule": "2.1.2", + "severity": "HIGH", + "message": "Enable TLS or use a terminator in front of KrakenD." + }, + { + "rule": "2.1.7", + "severity": "HIGH", + "message": "Enable HTTP security header checks (security/http)." + }, + { + "rule": "2.2.1", + "severity": "MEDIUM", + "message": "Hide the version banner in runtime." + }, + { + "rule": "3.1.1", + "severity": "LOW", + "message": "Enable a bot detector." + }, + { + "rule": "4.2.1", + "severity": "MEDIUM", + "message": "Implement a telemetry system for tracing for monitoring and troubleshooting." + } + ], + "stats": {} + } \ No newline at end of file diff --git a/unittests/scans/krakend_audit/no_findings.json b/unittests/scans/krakend_audit/no_findings.json new file mode 100644 index 0000000000..cfbc08ae70 --- /dev/null +++ b/unittests/scans/krakend_audit/no_findings.json @@ -0,0 +1,4 @@ +{ + "recommendations": [], + "stats": {} + } \ No newline at end of file diff --git a/unittests/tools/test_krakend_audit_parser.py b/unittests/tools/test_krakend_audit_parser.py new file mode 100644 index 0000000000..60f44d51ec --- /dev/null +++ b/unittests/tools/test_krakend_audit_parser.py @@ -0,0 +1,22 @@ +from dojo.models import Test +from dojo.tools.krakend_audit.parser import KrakenDAuditParser +from unittests.dojo_test_case import DojoTestCase + + +class TestKrakenDAuditParser(DojoTestCase): + + def test_parse_no_findings(self): + with open("unittests/scans/krakend_audit/no_findings.json", encoding="utf-8") as testfile: + parser = KrakenDAuditParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(0, len(findings)) + + def test_parse_many_findings(self): + with open("unittests/scans/krakend_audit/many_findings.json", encoding="utf-8") as testfile: + parser = KrakenDAuditParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(5, len(findings)) + with self.subTest(i=0): + finding = findings[0] + self.assertEqual("High", finding.severity) + self.assertEqual("Enable TLS or use a terminator in front of KrakenD.", finding.mitigation) From 1755effd674bdc83878162ccd7ee9909b50299b1 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Fri, 20 Sep 2024 16:13:02 -0500 Subject: [PATCH 06/22] GHA Release: Update settings SHA when creating PR from master (#10927) --- .github/workflows/release-3-master-into-dev.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/workflows/release-3-master-into-dev.yml b/.github/workflows/release-3-master-into-dev.yml index b5c8828ee1..cbd287d232 100644 --- a/.github/workflows/release-3-master-into-dev.yml +++ b/.github/workflows/release-3-master-into-dev.yml @@ -50,11 +50,15 @@ jobs: CURRENT_CHART_VERSION=$(grep -oP 'version: (\K\S*)?' helm/defectdojo/Chart.yaml | head -1) sed -ri "0,/version/s/version: \S+/$(echo "version: $CURRENT_CHART_VERSION" | awk -F. -v OFS=. 'NF==1{print ++$NF}; NF>1{$NF=sprintf("%0*d", length($NF), ($NF+1)); print}')-dev/" helm/defectdojo/Chart.yaml + - name: Update settings SHA + run: sha256sum dojo/settings/settings.dist.py | cut -d ' ' -f1 > dojo/settings/.settings.dist.py.sha256sum + - name: Check numbers run: | grep version dojo/__init__.py grep appVersion helm/defectdojo/Chart.yaml grep version components/package.json + cat dojo/settings/.settings.dist.py.sha256sum - name: Create upgrade notes to documentation run: | @@ -132,11 +136,15 @@ jobs: CURRENT_CHART_VERSION=$(grep -oP 'version: (\K\S*)?' helm/defectdojo/Chart.yaml | head -1) sed -ri "0,/version/s/version: \S+/$(echo "version: $CURRENT_CHART_VERSION" | awk -F. -v OFS=. 'NF==1{print ++$NF}; NF>1{$NF=sprintf("%0*d", length($NF), ($NF+1)); print}')-dev/" helm/defectdojo/Chart.yaml + - name: Update settings SHA + run: sha256sum dojo/settings/settings.dist.py | cut -d ' ' -f1 > dojo/settings/.settings.dist.py.sha256sum + - name: Check numbers run: | grep version dojo/__init__.py grep appVersion helm/defectdojo/Chart.yaml grep version components/package.json + cat dojo/settings/.settings.dist.py.sha256sum - name: Push version changes uses: stefanzweifel/git-auto-commit-action@v5.0.1 From b62884364cfeb44684214b3e7c1e908cc6134ff4 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Fri, 20 Sep 2024 16:14:01 -0500 Subject: [PATCH 07/22] GHA: Remove Docker Caches (#10928) --- .../build-docker-images-for-testing.yml | 4 +--- .../release-x-manual-docker-containers.yml | 20 ------------------- 2 files changed, 1 insertion(+), 23 deletions(-) diff --git a/.github/workflows/build-docker-images-for-testing.yml b/.github/workflows/build-docker-images-for-testing.yml index a8a570a9f8..c5753973ae 100644 --- a/.github/workflows/build-docker-images-for-testing.yml +++ b/.github/workflows/build-docker-images-for-testing.yml @@ -45,9 +45,7 @@ jobs: tags: defectdojo/defectdojo-${{ matrix.docker-image }}:${{ matrix.os }} file: Dockerfile.${{ matrix.docker-image }}-${{ matrix.os }} outputs: type=docker,dest=${{ matrix.docker-image }}-${{ matrix.os }}_img - cache-from: type=gha,scope=${{ matrix.docker-image }} - cache-to: type=gha,mode=max,scope=${{ matrix.docker-image }} - + # export docker images to be used in next jobs below - name: Upload image ${{ matrix.docker-image }} as artifact timeout-minutes: 10 diff --git a/.github/workflows/release-x-manual-docker-containers.yml b/.github/workflows/release-x-manual-docker-containers.yml index bae585d238..6f8862b621 100644 --- a/.github/workflows/release-x-manual-docker-containers.yml +++ b/.github/workflows/release-x-manual-docker-containers.yml @@ -49,18 +49,6 @@ jobs: id: buildx uses: docker/setup-buildx-action@v3 - - name: Cache Docker layers - uses: actions/cache@v4 - env: - docker-image: ${{ matrix.docker-image }} - with: - path: /tmp/.buildx-cache-${{ env.docker-image }} - key: ${{ runner.os }}-buildx-${{ env.docker-image }}-${{ matrix.os }}-${{ env.workflow_name }}-${{ github.sha }}-${{ github.run_id }} - restore-keys: | - ${{ runner.os }}-buildx-${{ env.docker-image }}-${{ matrix.os }}-${{ env.workflow_name}}-${{ github.sha }} - ${{ runner.os }}-buildx-${{ env.docker-image }}-${{ matrix.os }}-${{ env.workflow_name }} - ${{ runner.os }}-buildx-${{ env.docker-image }}-${{ matrix.os }}- - - name: Build and push images with debian if: ${{ matrix.os == 'debian' }} uses: docker/build-push-action@v6 @@ -73,8 +61,6 @@ jobs: tags: ${{ env.REPO_ORG }}/defectdojo-${{ env.docker-image}}:${{ github.event.inputs.release_number }}-${{ matrix.os }}, ${{ env.REPO_ORG }}/defectdojo-${{ env.docker-image}}:${{ github.event.inputs.release_number }}, ${{ env.REPO_ORG }}/defectdojo-${{ env.docker-image}}:latest file: ./Dockerfile.${{ env.docker-image }}-${{ matrix.os }} context: . - cache-from: type=local,src=/tmp/.buildx-cache-${{ env.docker-image }} - cache-to: type=local,dest=/tmp/.buildx-cache-${{ env.docker-image }} - name: Build and push images with alpine if: ${{ matrix.os == 'alpine' }} @@ -88,9 +74,3 @@ jobs: tags: ${{ env.REPO_ORG }}/defectdojo-${{ env.docker-image}}:${{ github.event.inputs.release_number }}-${{ matrix.os }} file: ./Dockerfile.${{ env.docker-image }}-${{ matrix.os }} context: . - cache-from: type=local,src=/tmp/.buildx-cache-${{ env.docker-image }} - cache-to: type=local,dest=/tmp/.buildx-cache-${{ env.docker-image }} -# platforms: ${{ matrix.platform }} - - - name: Image digest - run: echo ${{ steps.docker_build.outputs.digest }} From aadf96b8162bfcf58556edc464354d1c0febb853 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Fri, 20 Sep 2024 16:15:17 -0500 Subject: [PATCH 08/22] Prefetch Serialization: Add a preference during mapping (#10933) --- dojo/api_v2/prefetch/prefetcher.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/dojo/api_v2/prefetch/prefetcher.py b/dojo/api_v2/prefetch/prefetcher.py index 79a4b0e731..3596b3f940 100644 --- a/dojo/api_v2/prefetch/prefetcher.py +++ b/dojo/api_v2/prefetch/prefetcher.py @@ -3,11 +3,17 @@ from rest_framework.serializers import ModelSerializer +from dojo.models import FileUpload + from . import utils # Reduce the scope of search for serializers. SERIALIZER_DEFS_MODULE = "dojo.api_v2.serializers" +preferred_serializers = { + FileUpload: "FileSerializer", +} + class _Prefetcher: @staticmethod @@ -31,7 +37,11 @@ def _is_model_serializer(obj): for _, serializer in available_serializers: model = serializer.Meta.model - serializers[model] = serializer + if model in preferred_serializers: + if serializer.__name__ == preferred_serializers[model]: + serializers[model] = serializer + else: + serializers[model] = serializer # We add object->None to have a more uniform processing later on serializers[object] = None From 4d9bf9a5841d2594067110b5a17620b2efd8c70c Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Fri, 20 Sep 2024 16:15:54 -0500 Subject: [PATCH 09/22] Manage Images: Do not display thumbnail for PDF (#10932) --- dojo/templatetags/display_tags.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dojo/templatetags/display_tags.py b/dojo/templatetags/display_tags.py index 19a63541d3..6dcbcb2873 100644 --- a/dojo/templatetags/display_tags.py +++ b/dojo/templatetags/display_tags.py @@ -69,10 +69,10 @@ "mark_finding_duplicate": "Mark as duplicate", } -supported_file_formats = [ +supported_thumbnail_file_formats = [ "apng", "avif", "gif", "jpg", "jpeg", "jfif", "pjpeg", "pjp", - "png", "svg", "webp", "pdf", + "png", "svg", "webp", ] @@ -860,7 +860,7 @@ def jira_change(obj): def get_thumbnail(file): from pathlib import Path file_format = Path(file.file.url).suffix[1:] - return file_format in supported_file_formats + return file_format in supported_thumbnail_file_formats @register.filter From 072a18557bfd2897270a6cdbb1ae5bd87308d836 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Fri, 20 Sep 2024 16:16:37 -0500 Subject: [PATCH 10/22] Prefetching: Add swagger docs for models already supporting prefetching (#10931) * Prefetching: Add swagger docs for models already supporting prefetching * Fix Flake8 * Correct unit tests --- dojo/api_v2/views.py | 369 +++++++------------------------------------ 1 file changed, 57 insertions(+), 312 deletions(-) diff --git a/dojo/api_v2/views.py b/dojo/api_v2/views.py index 05d1652106..b36924640b 100644 --- a/dojo/api_v2/views.py +++ b/dojo/api_v2/views.py @@ -172,6 +172,33 @@ logger = logging.getLogger(__name__) +def schema_with_prefetch() -> dict: + return { + "list": extend_schema( + parameters=[ + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], + ), + "retrieve": extend_schema( + parameters=[ + OpenApiParameter( + "prefetch", + OpenApiTypes.STR, + OpenApiParameter.QUERY, + required=False, + description="List of fields for which to prefetch model instances and add those to the response", + ), + ], + ), + } + + class DojoOpenApiJsonRenderer(OpenApiJsonRenderer2): def get_indent(self, accepted_media_type, renderer_context): if accepted_media_type and "indent" in accepted_media_type: @@ -211,30 +238,7 @@ def get_queryset(self): # Authorization: object-based -@extend_schema_view( - list=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), - retrieve=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), -) +@extend_schema_view(**schema_with_prefetch()) class DojoGroupViewSet( PrefetchDojoModelViewSet, ): @@ -252,30 +256,7 @@ def get_queryset(self): # Authorization: object-based -@extend_schema_view( - list=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), - retrieve=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), -) +@extend_schema_view(**schema_with_prefetch()) class DojoGroupMemberViewSet( PrefetchDojoModelViewSet, ): @@ -301,6 +282,7 @@ def partial_update(self, request, pk=None): # Authorization: superuser +@extend_schema_view(**schema_with_prefetch()) class GlobalRoleViewSet( PrefetchDojoModelViewSet, ): @@ -315,6 +297,7 @@ def get_queryset(self): # Authorization: object-based +# @extend_schema_view(**schema_with_prefetch()) class EndPointViewSet( PrefetchDojoModelViewSet, ): @@ -370,6 +353,7 @@ def generate_report(self, request, pk=None): # Authorization: object-based +@extend_schema_view(**schema_with_prefetch()) class EndpointStatusViewSet( PrefetchDojoModelViewSet, ): @@ -398,6 +382,7 @@ def get_queryset(self): # Authorization: object-based +@extend_schema_view(**schema_with_prefetch()) class EngagementViewSet( PrefetchDojoModelViewSet, ra_api.AcceptedRisksMixin, @@ -651,6 +636,7 @@ def download_file(self, request, file_id, pk=None): return generate_file_response(file_object) +@extend_schema_view(**schema_with_prefetch()) class RiskAcceptanceViewSet( PrefetchDojoModelViewSet, ): @@ -716,6 +702,7 @@ def download_proof(self, request, pk=None): # These are technologies in the UI and the API! # Authorization: object-based +@extend_schema_view(**schema_with_prefetch()) class AppAnalysisViewSet( PrefetchDojoModelViewSet, ): @@ -734,6 +721,7 @@ def get_queryset(self): # Authorization: object-based +@extend_schema_view(**schema_with_prefetch()) class CredentialsViewSet( PrefetchDojoModelViewSet, ): @@ -747,6 +735,7 @@ def get_queryset(self): # Authorization: configuration +@extend_schema_view(**schema_with_prefetch()) class CredentialsMappingViewSet( PrefetchDojoModelViewSet, ): @@ -1486,6 +1475,7 @@ def get_queryset(self): # Authorization: object-based +@extend_schema_view(**schema_with_prefetch()) class JiraIssuesViewSet( PrefetchDojoModelViewSet, ): @@ -1511,6 +1501,7 @@ def get_queryset(self): # Authorization: object-based +@extend_schema_view(**schema_with_prefetch()) class JiraProjectViewSet( PrefetchDojoModelViewSet, ): @@ -1573,6 +1564,7 @@ def get_queryset(self): # Authorization: object-based +@extend_schema_view(**schema_with_prefetch()) class ProductAPIScanConfigurationViewSet( PrefetchDojoModelViewSet, ): @@ -1599,30 +1591,7 @@ def get_queryset(self): # Authorization: object-based -@extend_schema_view( - list=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), - retrieve=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), -) +@extend_schema_view(**schema_with_prefetch()) class DojoMetaViewSet( PrefetchDojoModelViewSet, ): @@ -1646,30 +1615,7 @@ def get_queryset(self): return get_authorized_dojo_meta(Permissions.Product_View) -@extend_schema_view( - list=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), - retrieve=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), -) +@extend_schema_view(**schema_with_prefetch()) class ProductViewSet( prefetch.PrefetchListMixin, prefetch.PrefetchRetrieveMixin, @@ -1745,30 +1691,7 @@ def generate_report(self, request, pk=None): # Authorization: object-based -@extend_schema_view( - list=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), - retrieve=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), -) +@extend_schema_view(**schema_with_prefetch()) class ProductMemberViewSet( PrefetchDojoModelViewSet, ): @@ -1796,30 +1719,7 @@ def partial_update(self, request, pk=None): # Authorization: object-based -@extend_schema_view( - list=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), - retrieve=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), -) +@extend_schema_view(**schema_with_prefetch()) class ProductGroupViewSet( PrefetchDojoModelViewSet, ): @@ -1847,30 +1747,7 @@ def partial_update(self, request, pk=None): # Authorization: object-based -@extend_schema_view( - list=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), - retrieve=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), -) +@extend_schema_view(**schema_with_prefetch()) class ProductTypeViewSet( PrefetchDojoModelViewSet, ): @@ -1955,30 +1832,7 @@ def generate_report(self, request, pk=None): # Authorization: object-based -@extend_schema_view( - list=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), - retrieve=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), -) +@extend_schema_view(**schema_with_prefetch()) class ProductTypeMemberViewSet( PrefetchDojoModelViewSet, ): @@ -2020,30 +1874,7 @@ def partial_update(self, request, pk=None): # Authorization: object-based -@extend_schema_view( - list=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), - retrieve=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), -) +@extend_schema_view(**schema_with_prefetch()) class ProductTypeGroupViewSet( PrefetchDojoModelViewSet, ): @@ -2071,6 +1902,7 @@ def partial_update(self, request, pk=None): # Authorization: object-based +@extend_schema_view(**schema_with_prefetch()) class StubFindingsViewSet( PrefetchDojoModelViewSet, ): @@ -2109,6 +1941,7 @@ def get_queryset(self): # Authorization: object-based +@extend_schema_view(**schema_with_prefetch()) class TestsViewSet( PrefetchDojoModelViewSet, ra_api.AcceptedRisksMixin, @@ -2316,30 +2149,7 @@ def get_queryset(self): return Test_Type.objects.all().order_by("id") -@extend_schema_view( - list=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), - retrieve=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), -) +@extend_schema_view(**schema_with_prefetch()) class TestImportViewSet( PrefetchDojoModelViewSet, ): @@ -2398,6 +2208,7 @@ def get_queryset(self): # Authorization: configurations +@extend_schema_view(**schema_with_prefetch()) class ToolConfigurationsViewSet( PrefetchDojoModelViewSet, ): @@ -2418,6 +2229,7 @@ def get_queryset(self): # Authorization: object-based +@extend_schema_view(**schema_with_prefetch()) class ToolProductSettingsViewSet( PrefetchDojoModelViewSet, ): @@ -2502,30 +2314,7 @@ def destroy(self, request, *args, **kwargs): # Authorization: superuser -@extend_schema_view( - list=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), - retrieve=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), -) +@extend_schema_view(**schema_with_prefetch()) class UserContactInfoViewSet( PrefetchDojoModelViewSet, ): @@ -2680,30 +2469,7 @@ def get_queryset(self): # Authorization: object-based -@extend_schema_view( - list=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), - retrieve=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), -) +@extend_schema_view(**schema_with_prefetch()) class LanguageViewSet( PrefetchDojoModelViewSet, ): @@ -3147,30 +2913,7 @@ def get_queryset(self): # Authorization: superuser -@extend_schema_view( - list=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), - retrieve=extend_schema( - parameters=[ - OpenApiParameter( - "prefetch", - OpenApiTypes.STR, - OpenApiParameter.QUERY, - required=False, - description="List of fields for which to prefetch model instances and add those to the response", - ), - ], - ), -) +@extend_schema_view(**schema_with_prefetch()) class NotificationsViewSet( PrefetchDojoModelViewSet, ): @@ -3184,6 +2927,7 @@ def get_queryset(self): return Notifications.objects.all().order_by("id") +@extend_schema_view(**schema_with_prefetch()) class EngagementPresetsViewset( PrefetchDojoModelViewSet, ): @@ -3303,6 +3047,7 @@ def get_queryset(self): return Engagement_Survey.objects.all().order_by("id") +@extend_schema_view(**schema_with_prefetch()) class QuestionnaireAnsweredSurveyViewSet( prefetch.PrefetchListMixin, prefetch.PrefetchRetrieveMixin, From 09500cead339e12c650a40b726f81f13a8f62d7f Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Fri, 20 Sep 2024 16:17:31 -0500 Subject: [PATCH 11/22] Semgrep Parser: Add new severities (#10936) --- dojo/tools/semgrep/parser.py | 11 ++- .../semgrep/high-medium-low-severities.json | 95 +++++++++++++++++++ unittests/tools/test_semgrep_parser.py | 6 ++ 3 files changed, 108 insertions(+), 4 deletions(-) create mode 100644 unittests/scans/semgrep/high-medium-low-severities.json diff --git a/dojo/tools/semgrep/parser.py b/dojo/tools/semgrep/parser.py index aa4f730750..e64615ec53 100644 --- a/dojo/tools/semgrep/parser.py +++ b/dojo/tools/semgrep/parser.py @@ -130,13 +130,16 @@ def get_findings(self, filename, test): return list(dupes.values()) def convert_severity(self, val): - if "CRITICAL" == val.upper(): + upper_value = val.upper() + if upper_value == "CRITICAL": return "Critical" - elif "WARNING" == val.upper(): + elif upper_value in ["WARNING", "MEDIUM"]: return "Medium" - elif "ERROR" == val.upper() or "HIGH" == val.upper(): + elif upper_value in ["ERROR", "HIGH"]: return "High" - elif "INFO" == val.upper(): + elif upper_value == "LOW": + return "Low" + elif upper_value == "INFO": return "Info" else: msg = f"Unknown value for severity: {val}" diff --git a/unittests/scans/semgrep/high-medium-low-severities.json b/unittests/scans/semgrep/high-medium-low-severities.json new file mode 100644 index 0000000000..c2fd9c8714 --- /dev/null +++ b/unittests/scans/semgrep/high-medium-low-severities.json @@ -0,0 +1,95 @@ + { + "errors": [], + "interfile_languages_used": [], + "paths": { + "scanned": [] + }, + "results": [ + { + "check_id": "rules.sast.dev.generic.internal.detect-cdn-usage-react-express", + "end": { + "col": 89, + "line": 48, + "offset": 1772 + }, + "extra": { + "engine_kind": "OSS", + "fingerprint": "d30b51e68d2d56fb34e5a87920208e0f18b71dbec62b2ad91d1b55e566c5796c64b1e161d7fd3c0f65834756474c0617c29b7c5bd76b76f14f2d3fc537a664b9_0", + "is_ignored": false, + "lines": "", + "message": "Potential CDN usage detected. Consider removing or replacing CDN references to comply with GDPR and also avoid supply chain risk", + "metadata": { + "category": "security", + "technology": "cdn" + }, + "metavars": {}, + "severity": "LOW", + "validation_state": "NO_VALIDATOR" + }, + "path": "/Users/user.example/git/company/full-codebase/company/lead-magnet/src/templates/base.html.twig", + "start": { + "col": 1, + "line": 48, + "offset": 1684 + } + }, + { + "check_id": "rules.sast.dev.generic.internal.detect-cdn-usage-react-express", + "end": { + "col": 206, + "line": 49, + "offset": 1978 + }, + "extra": { + "engine_kind": "OSS", + "fingerprint": "d30b51e68d2d56fb34e5a87920208e0f18b71dbec62b2ad91d1b55e566c5796c64b1e161d7fd3c0f65834756474c0617c29b7c5bd76b76f14f2d3fc537a664b9_1", + "is_ignored": false, + "lines": "", + "message": "Potential CDN usage detected. Consider removing or replacing CDN references to comply with GDPR and also avoid supply chain risk", + "metadata": { + "category": "security", + "technology": "cdn" + }, + "metavars": {}, + "severity": "LOW", + "validation_state": "NO_VALIDATOR" + }, + "path": "/Users/user.example/git/company/full-codebase/company/lead-magnet/src/templates/base.html.twig", + "start": { + "col": 1, + "line": 49, + "offset": 1773 + } + }, + { + "check_id": "rules.sast.dev.generic.internal.detect-cdn-usage-react-express", + "end": { + "col": 203, + "line": 50, + "offset": 2181 + }, + "extra": { + "engine_kind": "OSS", + "fingerprint": "d30b51e68d2d56fb34e5a87920208e0f18b71dbec62b2ad91d1b55e566c5796c64b1e161d7fd3c0f65834756474c0617c29b7c5bd76b76f14f2d3fc537a664b9_2", + "is_ignored": false, + "lines": "{% block javascripts %}{% endblock %}", + "message": "Potential CDN usage detected. Consider removing or replacing CDN references to comply with GDPR and also avoid supply chain risk", + "metadata": { + "category": "security", + "technology": "cdn" + }, + "metavars": {}, + "severity": "LOW", + "validation_state": "NO_VALIDATOR" + }, + "path": "/Users/user.example/git/company/full-codebase/company/lead-magnet/src/templates/base.html.twig", + "start": { + "col": 1, + "line": 50, + "offset": 1979 + } + } + ], + "skipped_rules": [], + "version": "1.84.1" +} \ No newline at end of file diff --git a/unittests/tools/test_semgrep_parser.py b/unittests/tools/test_semgrep_parser.py index 6892b0b849..8729e4cc00 100644 --- a/unittests/tools/test_semgrep_parser.py +++ b/unittests/tools/test_semgrep_parser.py @@ -121,6 +121,12 @@ def test_parse_issue_8435(self): findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) + def test_parse_low_medium_high_severity(self): + with open("unittests/scans/semgrep/high-medium-low-severities.json", encoding="utf-8") as testfile: + parser = SemgrepParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(3, len(findings)) + def test_parse_sca_deployments_vulns(self): with open("unittests/scans/semgrep/sca-deployments-vulns.json", encoding="utf-8") as testfile: parser = SemgrepParser() From a69b67e2a292d5668894e5eb0fcf558ac0f8b831 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Fri, 20 Sep 2024 16:18:44 -0500 Subject: [PATCH 12/22] New Parser: Qualys Hacker Guardian (#10937) * New Parser: Qualys Hacker Guardian * Restore unit tests * Fix ruff * Update docs/content/en/integrations/parsers/file/qualys_hacker_guardian.md Co-authored-by: Charles Neill <1749665+cneill@users.noreply.github.com> --------- Co-authored-by: Charles Neill <1749665+cneill@users.noreply.github.com> --- .../parsers/file/qualys_hacker_guardian.md | 9 +++ dojo/tools/qualys_hacker_guardian/__init__.py | 0 dojo/tools/qualys_hacker_guardian/parser.py | 77 +++++++++++++++++++ .../qualys_hacker_guardian/many_finding.csv | 5 ++ .../qualys_hacker_guardian/one_finding.csv | 3 + .../qualys_hacker_guardian/zero_finding.csv | 1 + .../test_qualys_hacker_guardian_parser.py | 46 +++++++++++ 7 files changed, 141 insertions(+) create mode 100644 docs/content/en/integrations/parsers/file/qualys_hacker_guardian.md create mode 100644 dojo/tools/qualys_hacker_guardian/__init__.py create mode 100644 dojo/tools/qualys_hacker_guardian/parser.py create mode 100644 unittests/scans/qualys_hacker_guardian/many_finding.csv create mode 100644 unittests/scans/qualys_hacker_guardian/one_finding.csv create mode 100644 unittests/scans/qualys_hacker_guardian/zero_finding.csv create mode 100644 unittests/tools/test_qualys_hacker_guardian_parser.py diff --git a/docs/content/en/integrations/parsers/file/qualys_hacker_guardian.md b/docs/content/en/integrations/parsers/file/qualys_hacker_guardian.md new file mode 100644 index 0000000000..e938970a38 --- /dev/null +++ b/docs/content/en/integrations/parsers/file/qualys_hacker_guardian.md @@ -0,0 +1,9 @@ +--- +title: "Qualys Hacker Guardian Scan" +toc_hide: true +--- +Qualys Hacker Guardian CSV export + +### Sample Scan Data + +Sample Qualys Scan scans can be found [here](https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans/qualys_hacker_guardian). \ No newline at end of file diff --git a/dojo/tools/qualys_hacker_guardian/__init__.py b/dojo/tools/qualys_hacker_guardian/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/dojo/tools/qualys_hacker_guardian/parser.py b/dojo/tools/qualys_hacker_guardian/parser.py new file mode 100644 index 0000000000..0baea41ac6 --- /dev/null +++ b/dojo/tools/qualys_hacker_guardian/parser.py @@ -0,0 +1,77 @@ +import csv +import io + +from dateutil import parser as date_parser + +from dojo.models import Endpoint, Finding + + +class QualysHackerGuardianParser: + """Parser for Qualys HackerGuardian""" + + # Severity mapping taken from + # https://qualysguard.qg2.apps.qualys.com/portal-help/en/malware/knowledgebase/severity_levels.htm + qualys_severity_lookup = { + "1": "Low", + "2": "Low", + "3": "Medium", + "4": "High", + "5": "High", + } + + def get_scan_types(self): + return ["Qualys Hacker Guardian Scan"] + + def get_label_for_scan_types(self, scan_type): + return "Qualys Hacker Guardian Scan" + + def get_description_for_scan_types(self, scan_type): + return "Qualys Hacker Guardian report file can be imported in CSV format." + + def get_endpoint(self, row): + host = row.get("HOSTNAME", row.get("IP")) + if (port := row.get("PORT")) is not None: + host += f":{port}" + if (protocol := row.get("PROTOCOL")) is not None: + host = f"{protocol}://{host}" + + return host + + def get_findings(self, filename, test): + if filename is None: + return () + content = filename.read() + if isinstance(content, bytes): + content = content.decode("utf-8") + reader = csv.DictReader(io.StringIO(content), delimiter=",", quotechar='"') + dupes = {} + for row in reader: + endpoint = Endpoint.from_uri(self.get_endpoint(row)) + finding = Finding( + title=row.get("VULN TITLE"), + severity=self.qualys_severity_lookup[row.get("Q_SEVERITY", 1)], + description=( + f'**Category**: {row.get("CATEGORY", "Unknown")}\n' + f'**Threat**: {row.get("THREAT", "No threat detected")}\n' + f'**Result**: {row.get("RESULT", "No threat detected")}\n' + ), + date=date_parser.parse(row.get("LAST SCAN")), + impact=row.get("IMPACT"), + mitigation=row.get("SOLUTION"), + unique_id_from_tool=row.get("QID"), + dynamic_finding=True, + active=True, + nb_occurences=1, + ) + finding.unsaved_endpoints.append(endpoint) + + dupe_key = finding.unique_id_from_tool + if dupe_key in dupes: + finding = dupes[dupe_key] + if endpoint not in finding.unsaved_endpoints: + finding.unsaved_endpoints.append(endpoint) + finding.nb_occurences += 1 + else: + dupes[dupe_key] = finding + + return list(dupes.values()) diff --git a/unittests/scans/qualys_hacker_guardian/many_finding.csv b/unittests/scans/qualys_hacker_guardian/many_finding.csv new file mode 100644 index 0000000000..c2c6e210e7 --- /dev/null +++ b/unittests/scans/qualys_hacker_guardian/many_finding.csv @@ -0,0 +1,5 @@ +"IP","HOSTNAME","LAST SCAN","QID","VULN TITLE","TYPE","SEVERITY","PORT","PROTOCOL","OPERATING SYSTEM","IS_PCI","FALSE POSITIVE STATUS","CVSS_BASE","Q_SEVERITY","THREAT","IMPACT","SOLUTION","CVSS_TEMPORAL","CATEGORY","RESULT","BUGTRAQID","CVEID" +"18.238.109.17","help.example.co","2024-09-16 04:00:30","150059","Reference to Windows file path is present in HTML","POTENTIAL","M","80","tcp","","Y","-","5.3","1","Windows specific file path was detected in the response.","The response may be an error response that disclosed a local file path. This may potentially be a sensitive information.","The content should be reviewed to determine whether it could be masked or removed.","4.7","Web Application","url: https://help.example.co/ matched: .toLowerCase().split(\ -\ ) c=b.join(\ _\ );return c}} {key:\ fetchQuery\ value:function i(a){var b=this c=this.props d=c.org e=c.domain f=this.getTransformedNavigatorLang() g=f?\ &lang=\ +f:\ \ h=\ https://\ +d.name+\ .api.\ +e+\ /p/v1/kb/deflection/search?term=\ +encodeURIComponent(a)+g;return fetch(h).then(function(a){return a.json()}).then(function(a){var c=a.data;if(c){var d=c.slice(0 5);b.setState({articl url: https://help.example.co/. matched: .toLowerCase().split(\ -\ ) c=b.join(\ _\ );return c}} {key:\ fetchQuery\ value:function i(a){var b=this c=this.props d=c.org e=c.domain f=this.getTransformedNavigatorLang() g=f?\ &lang=\ +f:\ \ h=\ https://\ +d.name+\ .api.\ +e+\ /p/v1/kb/deflection/search?term=\ +encodeURIComponent(a)+g;return fetch(h).then(function(a){return a.json()}).then(function(a){var c=a.data;if(c){var d=c.slice(0 5);b.setState({articl","-","" +"18.238.109.17","help.example.co","2024-09-16 04:00:30","150059","Reference to Windows file path is present in HTML","POTENTIAL","M","443","tcp","","Y","-","5.3","1","Windows specific file path was detected in the response.","The response may be an error response that disclosed a local file path. This may potentially be a sensitive information.","The content should be reviewed to determine whether it could be masked or removed.","4.7","Web Application","url: https://help.example.co/ matched: .toLowerCase().split(\ -\ ) c=b.join(\ _\ );return c}} {key:\ fetchQuery\ value:function i(a){var b=this c=this.props d=c.org e=c.domain f=this.getTransformedNavigatorLang() g=f?\ &lang=\ +f:\ \ h=\ https://\ +d.name+\ .api.\ +e+\ /p/v1/kb/deflection/search?term=\ +encodeURIComponent(a)+g;return fetch(h).then(function(a){return a.json()}).then(function(a){var c=a.data;if(c){var d=c.slice(0 5);b.setState({articl","-","" +"44.207.58.177","jt.example.co","2024-09-15 09:00:18","11827","HTTP Security Header Not Detected","CONFIRMED","M","443","tcp","","Y","-","5.3","2","This QID reports the absence of the following HTTP headers according to CWE-693: Protection Mechanism Failure:
X-Content-Type-Options: This HTTP header will prevent the browser from interpreting files as a different MIME type to what is specified in the Content-Type HTTP header.
Strict-Transport-Security: The HTTP Strict-Transport-Security response header (HSTS) allows web servers to declare that web browsers (or other complying user agents) should only interact with it using secure HTTPS connections and never via the insecure HTTP protocol.

QID Detection Logic:
This unauthenticated QID looks for the presence of the following HTTP responses:
The Valid directives are as belows: X-Content-Type-Options: nosniff

Strict-Transport-Security: max-age=< [;includeSubDomains]

","Depending on the vulnerability being exploited an unauthenticated remote attacker could conduct cross-site scripting clickjacking or MIME-type sniffing attacks.","Note: To better debug the results of this QID it is requested that customers execute commands to simulate the following functionality: curl -lkL --verbose.

CWE-693: Protection Mechanism Failure mentions the following - The product does not use or incorrectly uses a protection mechanism that provides sufficient defense against directed attacks against the product. A "missing" protection mechanism occurs when the application does not define any mechanism against a certain class of attack. An "insufficient" protection mechanism might provide some defenses - for example against the most common attacks - but it does not protect against everything that is intended. Finally an "ignored" mechanism occurs when a mechanism is available and in active use within the product but the developer has not applied it in some code path.

Customers are advised to set proper X-Content-Type-Options and Strict-Transport-Security HTTP response headers.

Depending on their server software customers can set directives in their site configuration or Web.config files. Few examples are:

X-Content-Type-Options:
Apache: Header always set X-Content-Type-Options: nosniff

HTTP Strict-Transport-Security:
Apache: Header always set Strict-Transport-Security "max-age=31536000; includeSubDomains"
Nginx: add_header Strict-Transport-Security max-age=31536000;

Note: Network devices that include a HTTP/HTTPS console for administrative/management purposes often do not include all/some of the security headers. This is a known issue and it is recommend to contact the vendor for a solution.

","4.7","CGI","X-Content-Type-Options HTTP Header missing on port 443. GET / HTTP/1.1 Host: jt.example.co Connection: Keep-Alive User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:93.0) Gecko/20100101 Firefox/93.0 jt edge server ver v1.42.0-1-g7a7022e4 / 2022-06-09T21:08:14.000000Z jt edge server ver v1.42.0-1-g7a7022e4 / 2022-06-09T21:08:14.000000Z. Configure jt (/configurator) Strict-Transport-Security HTTP Header missing on port 443. HTTP/1.1 200 OK Date: Sun 15 Sep 2024 09:12:26 GMT Content-Type: text/html; charset=utf-8 Content-Length: 274 Connection: keep-alive Server: nginx/1.21.6","-","" +"44.220.118.158","data.example.co","2024-09-16 04:00:30","150004","Predictable Resource Location Via Forced Browsing","CONFIRMED","M","80","tcp","","Y","-","5.3","2","A file directory or directory listing was discovered on the Web server. These resources are confirmed to be present based on our logic. Some of the content on these files might have sensitive information.

NOTE: Links found in 150004 are found by forced crawling so will not automatically be added to 150009 Links Crawled or the application site map. If links found in 150004 need to be tested they must be added as Explicit URI so they are included in scope and then will be reported in 150009. Once the link is added to be in scope (i.e. Explicit URI) this same link will no longer be reported for 150004.","The contents of this file or directory may disclose sensitive information.","It is advised to review the contents of the disclosed files. If the contents contain sensitive information please verify that access to this file or directory is permitted. If necessary remove it or apply access controls to it.","4.7","Web Application","url: https://data.example.co/wp-content/uploads/2023/01/image.png Payload: https://data.example.co/feed/image/ comment: Found this Vulnerability for redirect link: https://data.example.co/wp-content/uploads/2023/01/image.png. It was redirected from: https://data.example.co/feed/image/. Original URL is: https://data.example.co/feed/ matched: HTTP/1.1 200 OK url: https://data.example.co/wp-content/uploads/2023/08/download.svg Payload: https://data.example.co/feed/download/ comment: Found this Vulnerability for redirect link: https://data.example.co/wp-content/uploads/2023/08/download.svg. It was redirected from: https://data.example.co/feed/download/. Original URL is: https://data.example.co/feed/ matched: HTTP/1.1 200 OK url: https://data.example.co/test-flow-shopify-bw/ Payload: https://data.example.co:443/test/ comment: Found this Vulnerability for redirect link: https://data.example.co/test-flow-shopify-bw/. It was redirected from: https://data.example.co:443/test/. Original URL is: https://data.example.co:443/. matched: HTTP/1.1 200 OK url: https://data.example.co/wp-content/uploads/2023/08/users.svg Payload: https://data.example.co/feed/users/ comment: Found this Vulnerability for redirect link: https://data.example.co/wp-content/uploads/2023/08/users.svg. It was redirected from: https://data.example.co/feed/users/. Original URL is: https://data.example.co/feed/ matched: HTTP/1.1 200 OK","-","" diff --git a/unittests/scans/qualys_hacker_guardian/one_finding.csv b/unittests/scans/qualys_hacker_guardian/one_finding.csv new file mode 100644 index 0000000000..eaae1d7560 --- /dev/null +++ b/unittests/scans/qualys_hacker_guardian/one_finding.csv @@ -0,0 +1,3 @@ +"IP","HOSTNAME","LAST SCAN","QID","VULN TITLE","TYPE","SEVERITY","PORT","PROTOCOL","OPERATING SYSTEM","IS_PCI","FALSE POSITIVE STATUS","CVSS_BASE","Q_SEVERITY","THREAT","IMPACT","SOLUTION","CVSS_TEMPORAL","CATEGORY","RESULT","BUGTRAQID","CVEID" +"18.238.109.17","help.example.co","2024-09-16 04:00:30","150059","Reference to Windows file path is present in HTML","POTENTIAL","M","80","tcp","","Y","-","5.3","1","Windows specific file path was detected in the response.","The response may be an error response that disclosed a local file path. This may potentially be a sensitive information.","The content should be reviewed to determine whether it could be masked or removed.","4.7","Web Application","url: https://help.example.co/ matched: .toLowerCase().split(\ -\ ) c=b.join(\ _\ );return c}} {key:\ fetchQuery\ value:function i(a){var b=this c=this.props d=c.org e=c.domain f=this.getTransformedNavigatorLang() g=f?\ &lang=\ +f:\ \ h=\ https://\ +d.name+\ .api.\ +e+\ /p/v1/kb/deflection/search?term=\ +encodeURIComponent(a)+g;return fetch(h).then(function(a){return a.json()}).then(function(a){var c=a.data;if(c){var d=c.slice(0 5);b.setState({articl url: https://help.example.co/. matched: .toLowerCase().split(\ -\ ) c=b.join(\ _\ );return c}} {key:\ fetchQuery\ value:function i(a){var b=this c=this.props d=c.org e=c.domain f=this.getTransformedNavigatorLang() g=f?\ &lang=\ +f:\ \ h=\ https://\ +d.name+\ .api.\ +e+\ /p/v1/kb/deflection/search?term=\ +encodeURIComponent(a)+g;return fetch(h).then(function(a){return a.json()}).then(function(a){var c=a.data;if(c){var d=c.slice(0 5);b.setState({articl","-","" +"18.238.109.17","help.example.co","2024-09-16 04:00:30","150059","Reference to Windows file path is present in HTML","POTENTIAL","M","443","tcp","","Y","-","5.3","1","Windows specific file path was detected in the response.","The response may be an error response that disclosed a local file path. This may potentially be a sensitive information.","The content should be reviewed to determine whether it could be masked or removed.","4.7","Web Application","url: https://help.example.co/ matched: .toLowerCase().split(\ -\ ) c=b.join(\ _\ );return c}} {key:\ fetchQuery\ value:function i(a){var b=this c=this.props d=c.org e=c.domain f=this.getTransformedNavigatorLang() g=f?\ &lang=\ +f:\ \ h=\ https://\ +d.name+\ .api.\ +e+\ /p/v1/kb/deflection/search?term=\ +encodeURIComponent(a)+g;return fetch(h).then(function(a){return a.json()}).then(function(a){var c=a.data;if(c){var d=c.slice(0 5);b.setState({articl","-","" diff --git a/unittests/scans/qualys_hacker_guardian/zero_finding.csv b/unittests/scans/qualys_hacker_guardian/zero_finding.csv new file mode 100644 index 0000000000..8171af3c60 --- /dev/null +++ b/unittests/scans/qualys_hacker_guardian/zero_finding.csv @@ -0,0 +1 @@ +"IP","HOSTNAME","LAST SCAN","QID","VULN TITLE","TYPE","SEVERITY","PORT","PROTOCOL","OPERATING SYSTEM","IS_PCI","FALSE POSITIVE STATUS","CVSS_BASE","Q_SEVERITY","THREAT","IMPACT","SOLUTION","CVSS_TEMPORAL","CATEGORY","RESULT","BUGTRAQID","CVEID" diff --git a/unittests/tools/test_qualys_hacker_guardian_parser.py b/unittests/tools/test_qualys_hacker_guardian_parser.py new file mode 100644 index 0000000000..00ccb64499 --- /dev/null +++ b/unittests/tools/test_qualys_hacker_guardian_parser.py @@ -0,0 +1,46 @@ +from os import path + +from dojo.models import Test +from dojo.tools.qualys_hacker_guardian.parser import QualysHackerGuardianParser +from unittests.dojo_test_case import DojoTestCase + + +class TestQualysHackerGuardianParser(DojoTestCase): + + def test_qualys_hacker_guardian_parser_with_no_findings(self): + with open(path.join(path.dirname(__file__), "../scans/qualys_hacker_guardian/zero_finding.csv"), encoding="utf-8") as testfile: + parser = QualysHackerGuardianParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(0, len(findings)) + + def test_qualys_hacker_guardian_parser_with_one_findings(self): + with open(path.join(path.dirname(__file__), "../scans/qualys_hacker_guardian/one_finding.csv"), encoding="utf-8") as testfile: + parser = QualysHackerGuardianParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(1, len(findings)) + finding = findings[0] + self.assertEqual("Low", finding.severity) + self.assertEqual("Reference to Windows file path is present in HTML", finding.title) + self.assertIsNotNone(finding.description) + self.assertEqual(len(finding.unsaved_endpoints), 2) + + def test_qualys_hacker_guardian_parser_with_many_findings(self): + with open(path.join(path.dirname(__file__), "../scans/qualys_hacker_guardian/many_finding.csv"), encoding="utf-8") as testfile: + parser = QualysHackerGuardianParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(3, len(findings)) + finding = findings[0] + self.assertEqual("Low", finding.severity) + self.assertEqual("Reference to Windows file path is present in HTML", finding.title) + self.assertIsNotNone(finding.description) + self.assertEqual(len(finding.unsaved_endpoints), 2) + finding = findings[1] + self.assertEqual("HTTP Security Header Not Detected", finding.title) + self.assertEqual("Low", finding.severity) + self.assertIsNotNone(finding.description) + self.assertEqual(len(finding.unsaved_endpoints), 1) + finding = findings[2] + self.assertEqual("Predictable Resource Location Via Forced Browsing", finding.title) + self.assertEqual("Low", finding.severity) + self.assertIsNotNone(finding.description) + self.assertEqual(len(finding.unsaved_endpoints), 1) From cdee30b8a5b31c4f634ad2d97df275ef0d256109 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Fri, 20 Sep 2024 16:19:46 -0500 Subject: [PATCH 13/22] User: Make email required at all times, password required for new users (#10938) * User: Make email required at all times, password required for new users * fix tests * update tests --- dojo/api_v2/serializers.py | 9 +++++---- dojo/forms.py | 4 +++- tests/user_test.py | 3 +++ unittests/test_apiv2_notifications.py | 1 + unittests/test_apiv2_user.py | 14 ++++++++------ unittests/test_rest_framework.py | 13 ++++++++++++- 6 files changed, 32 insertions(+), 12 deletions(-) diff --git a/dojo/api_v2/serializers.py b/dojo/api_v2/serializers.py index a0d4298b74..10c07b3f3d 100644 --- a/dojo/api_v2/serializers.py +++ b/dojo/api_v2/serializers.py @@ -429,6 +429,7 @@ class Meta: class UserSerializer(serializers.ModelSerializer): date_joined = serializers.DateTimeField(read_only=True) last_login = serializers.DateTimeField(read_only=True, allow_null=True) + email = serializers.EmailField(required=True) password = serializers.CharField( write_only=True, style={"input_type": "password"}, @@ -549,12 +550,12 @@ def validate(self, data): msg = "Only superusers are allowed to add or edit superusers." raise ValidationError(msg) - if ( - self.context["request"].method in ["PATCH", "PUT"] - and "password" in data - ): + if self.context["request"].method in ["PATCH", "PUT"] and "password" in data: msg = "Update of password though API is not allowed" raise ValidationError(msg) + if self.context["request"].method == "POST" and "password" not in data: + msg = "Passwords must be supplied for new users" + raise ValidationError(msg) else: return super().validate(data) diff --git a/dojo/forms.py b/dojo/forms.py index dde58a38b6..fd5c55a7b6 100644 --- a/dojo/forms.py +++ b/dojo/forms.py @@ -2168,8 +2168,9 @@ def clean(self): class AddDojoUserForm(forms.ModelForm): + email = forms.EmailField(required=True) password = forms.CharField(widget=forms.PasswordInput, - required=False, + required=True, validators=[validate_password], help_text="") @@ -2186,6 +2187,7 @@ def __init__(self, *args, **kwargs): class EditDojoUserForm(forms.ModelForm): + email = forms.EmailField(required=True) class Meta: model = Dojo_User diff --git a/tests/user_test.py b/tests/user_test.py index dcaa9c845f..607b8a7b4e 100644 --- a/tests/user_test.py +++ b/tests/user_test.py @@ -59,6 +59,9 @@ def test_create_user_with_writer_global_role(self): # username driver.find_element(By.ID, "id_username").clear() driver.find_element(By.ID, "id_username").send_keys("userWriter") + # password + driver.find_element(By.ID, "id_password").clear() + driver.find_element(By.ID, "id_password").send_keys("Def3ctD0jo&") # First Name driver.find_element(By.ID, "id_first_name").clear() driver.find_element(By.ID, "id_first_name").send_keys("Writer") diff --git a/unittests/test_apiv2_notifications.py b/unittests/test_apiv2_notifications.py index 7149454ebd..a31b859a76 100644 --- a/unittests/test_apiv2_notifications.py +++ b/unittests/test_apiv2_notifications.py @@ -33,6 +33,7 @@ def create_test_user(self): password = "testTEST1234!@#$" r = self.client.post(reverse("user-list"), { "username": "api-user-notification", + "email": "admin@dojo.com", "password": password, }, format="json") return r.json()["id"] diff --git a/unittests/test_apiv2_user.py b/unittests/test_apiv2_user.py index 88f91bfb5f..9b9fe02618 100644 --- a/unittests/test_apiv2_user.py +++ b/unittests/test_apiv2_user.py @@ -26,16 +26,11 @@ def test_user_list(self): self.assertNotIn(item, user, r.content[:1000]) def test_user_add(self): - # simple user without password - r = self.client.post(reverse("user-list"), { - "username": "api-user-1", - }, format="json") - self.assertEqual(r.status_code, 201, r.content[:1000]) - # user with good password password = "testTEST1234!@#$" r = self.client.post(reverse("user-list"), { "username": "api-user-2", + "email": "admin@dojo.com", "password": password, }, format="json") self.assertEqual(r.status_code, 201, r.content[:1000]) @@ -50,6 +45,7 @@ def test_user_add(self): # user with weak password r = self.client.post(reverse("user-list"), { "username": "api-user-3", + "email": "admin@dojo.com", "password": "weakPassword", }, format="json") self.assertEqual(r.status_code, 400, r.content[:1000]) @@ -59,6 +55,8 @@ def test_user_change_password(self): # some user r = self.client.post(reverse("user-list"), { "username": "api-user-4", + "email": "admin@dojo.com", + "password": "testTEST1234!@#$", }, format="json") self.assertEqual(r.status_code, 201, r.content[:1000]) user_id = r.json()["id"] @@ -66,16 +64,19 @@ def test_user_change_password(self): r = self.client.put("{}{}/".format(reverse("user-list"), user_id), { "username": "api-user-4", "first_name": "first", + "email": "admin@dojo.com", }, format="json") self.assertEqual(r.status_code, 200, r.content[:1000]) r = self.client.patch("{}{}/".format(reverse("user-list"), user_id), { "last_name": "last", + "email": "admin@dojo.com", }, format="json") self.assertEqual(r.status_code, 200, r.content[:1000]) r = self.client.put("{}{}/".format(reverse("user-list"), user_id), { "username": "api-user-4", + "email": "admin@dojo.com", "password": "testTEST1234!@#$", }, format="json") self.assertEqual(r.status_code, 400, r.content[:1000]) @@ -83,6 +84,7 @@ def test_user_change_password(self): r = self.client.patch("{}{}/".format(reverse("user-list"), user_id), { "password": "testTEST1234!@#$", + "email": "admin@dojo.com", }, format="json") self.assertEqual(r.status_code, 400, r.content[:1000]) self.assertIn("Update of password though API is not allowed", r.content.decode("utf-8")) diff --git a/unittests/test_rest_framework.py b/unittests/test_rest_framework.py index aa9318ba8f..9fe9b1cc5f 100644 --- a/unittests/test_rest_framework.py +++ b/unittests/test_rest_framework.py @@ -1699,8 +1699,19 @@ def __init__(self, *args, **kwargs): self.deleted_objects = 25 BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs) + def test_create(self): + payload = self.payload.copy() | { + "password": "testTEST1234!@#$", + } + length = self.endpoint_model.objects.count() + response = self.client.post(self.url, payload) + self.assertEqual(201, response.status_code, response.content[:1000]) + self.assertEqual(self.endpoint_model.objects.count(), length + 1) + def test_create_user_with_non_configuration_permissions(self): - payload = self.payload.copy() + payload = self.payload.copy() | { + "password": "testTEST1234!@#$", + } payload["configuration_permissions"] = [25, 26] # these permissions exist but user can not assign them becaause they are not "configuration_permissions" response = self.client.post(self.url, payload) self.assertEqual(response.status_code, 400) From ad7939d43ab93f3b9177b0e278b5aed4ca332208 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Fri, 20 Sep 2024 21:02:18 -0500 Subject: [PATCH 14/22] Hacker One Parser: Add support for Bug Bounty Program reports (#10939) * Restructure parser for extendability * Support for bug bounty reports * Correct ruff --- dojo/settings/.settings.dist.py.sha256sum | 2 +- dojo/settings/settings.dist.py | 1 + dojo/tools/h1/parser.py | 245 ++++++++++++++---- unittests/scans/h1/bug_bounty_many.csv | 5 + unittests/scans/h1/bug_bounty_many.json | 116 +++++++++ unittests/scans/h1/bug_bounty_one.csv | 2 + unittests/scans/h1/bug_bounty_one.json | 32 +++ unittests/scans/h1/bug_bounty_zero.csv | 1 + unittests/scans/h1/bug_bounty_zero.json | 3 + ...ta_many.json => vuln_disclosure_many.json} | 0 ...data_one.json => vuln_disclosure_one.json} | 0 ...a_empty.json => vuln_disclosure_zero.json} | 0 unittests/tools/test_h1_parser.py | 160 +++++++++++- 13 files changed, 513 insertions(+), 54 deletions(-) create mode 100644 unittests/scans/h1/bug_bounty_many.csv create mode 100644 unittests/scans/h1/bug_bounty_many.json create mode 100644 unittests/scans/h1/bug_bounty_one.csv create mode 100644 unittests/scans/h1/bug_bounty_one.json create mode 100644 unittests/scans/h1/bug_bounty_zero.csv create mode 100644 unittests/scans/h1/bug_bounty_zero.json rename unittests/scans/h1/{data_many.json => vuln_disclosure_many.json} (100%) rename unittests/scans/h1/{data_one.json => vuln_disclosure_one.json} (100%) rename unittests/scans/h1/{data_empty.json => vuln_disclosure_zero.json} (100%) diff --git a/dojo/settings/.settings.dist.py.sha256sum b/dojo/settings/.settings.dist.py.sha256sum index 5dfa946a6c..f8adf9d7d4 100644 --- a/dojo/settings/.settings.dist.py.sha256sum +++ b/dojo/settings/.settings.dist.py.sha256sum @@ -1 +1 @@ -b330f7dbd92c2df5a2a0632befc9775bef4a1c62b90375aa511957ebcd0ea82a +f7e63afa0003d1992f8247f9a7a830847bd7498fa1e2d46d6ea04e3006bb9ee2 diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index d96733ca8f..348596ef75 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -1280,6 +1280,7 @@ def saml2_attrib_map_format(dict): "Legitify Scan": ["title", "endpoints", "severity"], "ThreatComposer Scan": ["title", "description"], "Invicti Scan": ["title", "description", "severity"], + "HackerOne Cases": ["title", "severity"], "KrakenD Audit Scan": ["description", "mitigation", "severity"], } diff --git a/dojo/tools/h1/parser.py b/dojo/tools/h1/parser.py index 457e01c06f..e182af6b76 100644 --- a/dojo/tools/h1/parser.py +++ b/dojo/tools/h1/parser.py @@ -1,44 +1,36 @@ +import csv import hashlib +import io import json +from contextlib import suppress from datetime import datetime +from typing import ClassVar -from dojo.models import Finding +from dateutil import parser as date_parser +from django.core.files.uploadedfile import TemporaryUploadedFile + +from dojo.models import Finding, Test __author__ = "Kirill Gotsman" -class H1Parser: +class HackerOneVulnerabilityDisclosureProgram: """ - A class that can be used to parse the Get All Reports JSON export from HackerOne API. + Vulnerability Disclosure Program HackerOne reports """ - def get_scan_types(self): - return ["HackerOne Cases"] - - def get_label_for_scan_types(self, scan_type): - return scan_type - - def get_description_for_scan_types(self, scan_type): - return "Import HackerOne cases findings in JSON format." - - def get_findings(self, file, test): + def get_vulnerability_disclosure_json_findings(self, tree, test): """ Converts a HackerOne reports to a DefectDojo finding """ - - # Load the contents of the JSON file into a dictionary - data = file.read() - try: - tree = json.loads(str(data, "utf-8")) - except Exception: - tree = json.loads(data) # Convert JSON report to DefectDojo format dupes = {} for content in tree["data"]: # Get all relevant data date = content["attributes"]["created_at"] date = datetime.strftime( - datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.%fZ"), "%Y-%m-%d", + datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.%fZ"), + "%Y-%m-%d", ) # Build the title of the Dojo finding title = "#" + content["id"] + " " + content["attributes"]["title"] @@ -47,21 +39,15 @@ def get_findings(self, file, test): # References try: - issue_tracker_id = content["attributes"][ - "issue_tracker_reference_id" - ] - issue_tracker_url = content["attributes"][ - "issue_tracker_reference_url" - ] + issue_tracker_id = content["attributes"]["issue_tracker_reference_id"] + issue_tracker_url = content["attributes"]["issue_tracker_reference_url"] references = f"[{issue_tracker_id}]({issue_tracker_url})\n" except Exception: references = "" # Build the severity of the Dojo finding try: - severity = content["relationships"]["severity"]["data"][ - "attributes" - ]["rating"].capitalize() + severity = content["relationships"]["severity"]["data"]["attributes"]["rating"].capitalize() if severity not in ["Low", "Medium", "High", "Critical"]: severity = "Info" except Exception: @@ -81,9 +67,7 @@ def get_findings(self, file, test): # Set CWE of the Dojo finding try: cwe = int( - content["relationships"]["weakness"]["data"]["attributes"][ - "external_id" - ][4:], + content["relationships"]["weakness"]["data"]["attributes"]["external_id"][4:], ) except Exception: cwe = 0 @@ -121,11 +105,10 @@ def get_findings(self, file, test): def build_description(self, content): date = content["attributes"]["created_at"] date = datetime.strftime( - datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.%fZ"), "%Y-%m-%d", + datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.%fZ"), + "%Y-%m-%d", ) - reporter = content["relationships"]["reporter"]["data"]["attributes"][ - "username" - ] + reporter = content["relationships"]["reporter"]["data"]["attributes"]["username"] triaged_date = content["attributes"]["triaged_at"] # Build the description of the Dojo finding @@ -142,9 +125,7 @@ def build_description(self, content): # Try to grab CVSS try: - cvss = content["relationships"]["severity"]["data"]["attributes"][ - "score" - ] + cvss = content["relationships"]["severity"]["data"]["attributes"]["score"] description += f"CVSS: {cvss}\n" except Exception: pass @@ -156,14 +137,186 @@ def build_description(self, content): # Try to grab weakness if it's there try: - weakness_title = content["relationships"]["weakness"]["data"][ - "attributes" - ]["name"] - weakness_desc = content["relationships"]["weakness"]["data"][ - "attributes" - ]["description"] + weakness_title = content["relationships"]["weakness"]["data"]["attributes"]["name"] + weakness_desc = content["relationships"]["weakness"]["data"]["attributes"]["description"] description += f"\n##Weakness: {weakness_title}\n{weakness_desc}" except Exception: pass return description + + +class HackerOneBugBountyProgram: + """Bug Bounty Program HackerOne reports.""" + + fields_to_label: ClassVar[dict[str, str]] = { + "id": "ID", + "weakness": "Weakness Category", + "substate": "Substate", + "reporter": "Reporter", + "assigned": "Assigned To", + "public": "Public", + "triageted_at": "Triaged On", + "closed_at": "Closed On", + "awarded_at": "Awarded On", + "bounty": "Bounty Price", + "bonus": "Bonus", + "first_response_at": "First Response On", + "source": "Source", + "reference": "Reference", + "reference_url": "Reference URL", + "structured_scope": "Structured Scope", + "structured_scope_reference": "Structured Scope Reference", + "original_report_id": "Original Report ID", + "collaborating_users": "Collaboration Users", + "duplicate_report_ids": "Duplicate Report IDs", + } + + def get_bug_bounty_program_json_findings(self, dict_list: dict, test: Test) -> list[Finding]: + return self.parse_findings(dict_list, test) + + def get_bug_bounty_program_csv_findings(self, dict_list: dict, test: Test) -> list[Finding]: + return self.parse_findings(dict_list, test) + + def parse_findings(self, dict_list: list[dict], test: Test) -> list[Finding]: + """Return a list of findings generated by the submitted report.""" + findings = [] + for entry in dict_list: + status_dict = self.determine_status(entry) + finding = Finding( + title=entry.get("title"), + severity=self.convert_severity(entry), + description=self.parse_description(entry), + date=date_parser.parse(entry.get("reported_at")), + dynamic_finding=True, + test=test, + **status_dict, + ) + # Add vulnerability IDs if they are present + if (cve_str := entry.get("cve_ids")) is not None and len(cve_str) > 0: + finding.unsaved_vulnerability_ids = [cve_str] + # Add the finding the the list + findings.append(finding) + return findings + + def determine_status(self, row) -> dict: + """Generate a dict of status meta to fully represent that state of the finding + + Possible states currently supported are open and closed. In the event that neither + of those options are present, the open status will be the default, and returned + """ + default_status = { + "active": True, + } + # Open status -> active = True + # Closed status -> is_mitigated = True + timestamp + if (status := row.get("state")) is not None: + if status == "open": + return default_status + if status == "closed": + return { + "is_mitigated": True, + "active": False, + "mitigated": date_parser.parse(row.get("closed_at")), + } + return default_status + + def convert_severity(self, entry: dict) -> str: + """Convert the severity from the parser from the string value, or CVSS score.""" + # Try to use the string severity first + if (severity := entry.get("severity_rating")) is not None: + if severity in ["critical", "high", "medium", "low"]: + return severity.capitalize() + # Fall back to "severity_score" which I assume is CVSS Score + if (severity_score := entry.get("severity_score")) is not None: + with suppress(ValueError): + severity_score = float(severity_score) + if severity_score >= 9.0: + return "Critical" + if severity_score >= 7.0: + return "High" + if severity_score >= 4.0: + return "Medium" + if severity_score > 0.0: + return "Low" + # Default to Info in all cases (assuming we reach this) + return "Info" + + def parse_description(self, entry: dict) -> str: + """Build the description from the mapping set in the fields_to_label var.""" + # Iterate over the items and build the string + description = "" + for field, label in self.fields_to_label.items(): + if (value := entry.get(field)) is not None and len(value) > 0: + description += f"**{label}**: {value}\n" + return description + + +class H1Parser( + HackerOneVulnerabilityDisclosureProgram, + HackerOneBugBountyProgram, +): + """ + A class that can be used to parse the Get All Reports JSON export from HackerOne API. + """ + + def get_scan_types(self): + return ["HackerOne Cases"] + + def get_label_for_scan_types(self, scan_type): + return scan_type + + def get_description_for_scan_types(self, scan_type): + return "Import HackerOne cases findings in JSON format." + + def get_findings(self, file: TemporaryUploadedFile, test: Test) -> list[Finding]: + """Return the list of findings generated from the uploaded report.""" + # first determine which format to pase + file_name = file.name + if str(file_name).endswith(".json"): + return self.determine_json_format(file, test) + elif str(file_name).endswith(".csv"): + return self.determine_csv_format(file, test) + else: + msg = "Filename extension not recognized. Use .json or .csv" + raise ValueError(msg) + + def get_json_tree(self, file: TemporaryUploadedFile) -> dict: + """Extract the CSV file into a iterable that represents a dict.""" + data = file.read() + try: + tree = json.loads(str(data, "utf-8")) + except Exception: + tree = json.loads(data) + return tree + + def determine_json_format(self, file: TemporaryUploadedFile, test: Test) -> list[Finding]: + """Evaluate the format of the JSON report that was uploaded to determine which parser to use.""" + tree = self.get_json_tree(file) + # Check for some root elements + if "findings" in tree: + return self.get_bug_bounty_program_json_findings(tree.get("findings", []), test) + if "data" in tree: + return self.get_vulnerability_disclosure_json_findings(tree, test) + else: + msg = "This JSON format is not supported" + raise ValueError(msg) + + def get_csv_reader(self, file: TemporaryUploadedFile) -> csv.DictReader: + """Extract the CSV file into a iterable that represents a dict.""" + if file is None: + return () + content = file.read() + if isinstance(content, bytes): + content = content.decode("utf-8") + return csv.DictReader(io.StringIO(content), delimiter=",", quotechar='"') + + def determine_csv_format(self, file: TemporaryUploadedFile, test: Test) -> list[Finding]: + """Evaluate the format of the CSV report that was uploaded to determine which parser to use.""" + reader = self.get_csv_reader(file) + # Check for some root elements + if "bounty" in reader.fieldnames: + return self.get_bug_bounty_program_csv_findings(reader, test) + else: + msg = "This CSV format is not supported" + raise ValueError(msg) diff --git a/unittests/scans/h1/bug_bounty_many.csv b/unittests/scans/h1/bug_bounty_many.csv new file mode 100644 index 0000000000..a6bc207d64 --- /dev/null +++ b/unittests/scans/h1/bug_bounty_many.csv @@ -0,0 +1,5 @@ +id,title,severity_rating,severity_score,state,substate,weakness,reported_at,first_response_at,triaged_at,closed_at,awarded_at,assigned,reporter,source,bounty,bonus,public,reference,reference_url,structured_scope,structured_scope_reference,original_report_id,cve_ids,collaborating_users,duplicate_report_ids +2501687,Sensitive Account Balance Information Exposure via example's DaviPlata Payment Link Integration,medium,,open,triaged,Information Disclosure,2024-05-12 04:05:27 UTC,2024-05-14 22:14:16 UTC,2024-08-28 19:35:16 UTC,,2024-08-28 19:40:24 UTC,Group example.co Team,reporter,,400.0,,no,,,1489537348,,,"",, +2710467,Acceso no autorizado a soporte premium sin pagar,critical,9.1,open,new,,2024-09-10 15:38:20 UTC,,,,,,reporter,,,,no,,,example.co,,,"",, +2682608,XSS - stg.pse.mock.example.co,none,0.0,closed,duplicate,,2024-08-25 07:27:18 UTC,2024-08-27 18:19:23 UTC,,2024-08-27 18:19:23 UTC,,,reporter,,,,no,,,,,2311675,"",, +2616856,example.co/File creation via HTTP method PUT,critical,,closed,duplicate,,2024-07-22 17:54:36 UTC,2024-07-22 20:57:56 UTC,,2024-07-22 20:57:56 UTC,,,reporter,,,,no,,,example.co,,2597854,CVE-2017-12615,, diff --git a/unittests/scans/h1/bug_bounty_many.json b/unittests/scans/h1/bug_bounty_many.json new file mode 100644 index 0000000000..ba3b7e3eb1 --- /dev/null +++ b/unittests/scans/h1/bug_bounty_many.json @@ -0,0 +1,116 @@ +{ + "findings": [ + { + "id": "2501687", + "title": "Sensitive Account Balance Information Exposure via example's DaviPlata Payment Link Integration", + "severity_rating": "medium", + "severity_score": "", + "state": "open", + "substate": "triaged", + "weakness": "Information Disclosure", + "reported_at": "2024-05-12 04:05:27 UTC", + "first_response_at": "2024-05-14 22:14:16 UTC", + "triaged_at": "2024-08-28 19:35:16 UTC", + "closed_at": "", + "awarded_at": "2024-08-28 19:40:24 UTC", + "assigned": "Group example.co Team", + "reporter": "reporter", + "source": "", + "bounty": "400.0", + "bonus": "", + "public": "no", + "reference": "", + "reference_url": "", + "structured_scope": "1489537348", + "structured_scope_reference": "", + "original_report_id": "", + "cve_ids": "", + "collaborating_users": "", + "duplicate_report_ids": "" + }, + { + "id": "2710467", + "title": "Acceso no autorizado a soporte premium sin pagar", + "severity_rating": "critical", + "severity_score": "9.1", + "state": "open", + "substate": "new", + "weakness": "", + "reported_at": "2024-09-10 15:38:20 UTC", + "first_response_at": "", + "triaged_at": "", + "closed_at": "", + "awarded_at": "", + "assigned": "", + "reporter": "reporter", + "source": "", + "bounty": "", + "bonus": "", + "public": "no", + "reference": "", + "reference_url": "", + "structured_scope": "example.co", + "structured_scope_reference": "", + "original_report_id": "", + "cve_ids": "", + "collaborating_users": "", + "duplicate_report_ids": "" + }, + { + "id": "2682608", + "title": "XSS - stg.pse.mock.example.co", + "severity_rating": "none", + "severity_score": "0.0", + "state": "closed", + "substate": "duplicate", + "weakness": "", + "reported_at": "2024-08-25 07:27:18 UTC", + "first_response_at": "2024-08-27 18:19:23 UTC", + "triaged_at": "", + "closed_at": "2024-08-27 18:19:23 UTC", + "awarded_at": "", + "assigned": "", + "reporter": "reporter", + "source": "", + "bounty": "", + "bonus": "", + "public": "no", + "reference": "", + "reference_url": "", + "structured_scope": "", + "structured_scope_reference": "", + "original_report_id": "2311675", + "cve_ids": "", + "collaborating_users": "", + "duplicate_report_ids": "" + }, + { + "id": "2616856", + "title": "example.co/File creation via HTTP method PUT", + "severity_rating": "critical", + "severity_score": "", + "state": "closed", + "substate": "duplicate", + "weakness": "", + "reported_at": "2024-07-22 17:54:36 UTC", + "first_response_at": "2024-07-22 20:57:56 UTC", + "triaged_at": "", + "closed_at": "2024-07-22 20:57:56 UTC", + "awarded_at": "", + "assigned": "", + "reporter": "reporter", + "source": "", + "bounty": "", + "bonus": "", + "public": "no", + "reference": "", + "reference_url": "", + "structured_scope": "example.co", + "structured_scope_reference": "", + "original_report_id": "2597854", + "cve_ids": "CVE-2017-12615", + "collaborating_users": "", + "duplicate_report_ids": "" + } + ] +} \ No newline at end of file diff --git a/unittests/scans/h1/bug_bounty_one.csv b/unittests/scans/h1/bug_bounty_one.csv new file mode 100644 index 0000000000..7b13f4fdc0 --- /dev/null +++ b/unittests/scans/h1/bug_bounty_one.csv @@ -0,0 +1,2 @@ +id,title,severity_rating,severity_score,state,substate,weakness,reported_at,first_response_at,triaged_at,closed_at,awarded_at,assigned,reporter,source,bounty,bonus,public,reference,reference_url,structured_scope,structured_scope_reference,original_report_id,cve_ids,collaborating_users,duplicate_report_ids +2501687,Sensitive Account Balance Information Exposure via example's DaviPlata Payment Link Integration,medium,,open,triaged,Information Disclosure,2024-05-12 04:05:27 UTC,2024-05-14 22:14:16 UTC,2024-08-28 19:35:16 UTC,,2024-08-28 19:40:24 UTC,Group example.co Team,reporter,,400.0,,no,,,1489537348,,,"",, diff --git a/unittests/scans/h1/bug_bounty_one.json b/unittests/scans/h1/bug_bounty_one.json new file mode 100644 index 0000000000..e70e6932ea --- /dev/null +++ b/unittests/scans/h1/bug_bounty_one.json @@ -0,0 +1,32 @@ +{ + "findings": [ + { + "id": "2501687", + "title": "Sensitive Account Balance Information Exposure via example's DaviPlata Payment Link Integration", + "severity_rating": "medium", + "severity_score": "", + "state": "open", + "substate": "triaged", + "weakness": "Information Disclosure", + "reported_at": "2024-05-12 04:05:27 UTC", + "first_response_at": "2024-05-14 22:14:16 UTC", + "triaged_at": "2024-08-28 19:35:16 UTC", + "closed_at": "", + "awarded_at": "2024-08-28 19:40:24 UTC", + "assigned": "Group example.co Team", + "reporter": "reporter", + "source": "", + "bounty": "400.0", + "bonus": "", + "public": "no", + "reference": "", + "reference_url": "", + "structured_scope": "1489537348", + "structured_scope_reference": "", + "original_report_id": "", + "cve_ids": "", + "collaborating_users": "", + "duplicate_report_ids": "" + } + ] +} \ No newline at end of file diff --git a/unittests/scans/h1/bug_bounty_zero.csv b/unittests/scans/h1/bug_bounty_zero.csv new file mode 100644 index 0000000000..2d388b1293 --- /dev/null +++ b/unittests/scans/h1/bug_bounty_zero.csv @@ -0,0 +1 @@ +id,title,severity_rating,severity_score,state,substate,weakness,reported_at,first_response_at,triaged_at,closed_at,awarded_at,assigned,reporter,source,bounty,bonus,public,reference,reference_url,structured_scope,structured_scope_reference,original_report_id,cve_ids,collaborating_users,duplicate_report_ids diff --git a/unittests/scans/h1/bug_bounty_zero.json b/unittests/scans/h1/bug_bounty_zero.json new file mode 100644 index 0000000000..b8046d0105 --- /dev/null +++ b/unittests/scans/h1/bug_bounty_zero.json @@ -0,0 +1,3 @@ +{ + "findings": [] +} \ No newline at end of file diff --git a/unittests/scans/h1/data_many.json b/unittests/scans/h1/vuln_disclosure_many.json similarity index 100% rename from unittests/scans/h1/data_many.json rename to unittests/scans/h1/vuln_disclosure_many.json diff --git a/unittests/scans/h1/data_one.json b/unittests/scans/h1/vuln_disclosure_one.json similarity index 100% rename from unittests/scans/h1/data_one.json rename to unittests/scans/h1/vuln_disclosure_one.json diff --git a/unittests/scans/h1/data_empty.json b/unittests/scans/h1/vuln_disclosure_zero.json similarity index 100% rename from unittests/scans/h1/data_empty.json rename to unittests/scans/h1/vuln_disclosure_zero.json diff --git a/unittests/tools/test_h1_parser.py b/unittests/tools/test_h1_parser.py index 4b4e602020..685220ff03 100644 --- a/unittests/tools/test_h1_parser.py +++ b/unittests/tools/test_h1_parser.py @@ -1,24 +1,170 @@ +from dateutil import parser as date_parser + from dojo.models import Test from dojo.tools.h1.parser import H1Parser from unittests.dojo_test_case import DojoTestCase -class TestHackerOneParser(DojoTestCase): +class HackerOneVulnerabilityDisclosureProgramTests(DojoTestCase): + def test_parse_file_with_multiple_vuln_has_multiple_finding(self): + with open("unittests/scans/h1/vuln_disclosure_many.json", encoding="utf-8") as testfile: + parser = H1Parser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(2, len(findings)) + + def test_parse_file_with_one_vuln_has_one_finding(self): + with open("unittests/scans/h1/vuln_disclosure_one.json", encoding="utf-8") as testfile: + parser = H1Parser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(1, len(findings)) def test_parse_file_with_no_vuln_has_no_finding(self): - with open("unittests/scans/h1/data_empty.json", encoding="utf-8") as testfile: + with open("unittests/scans/h1/vuln_disclosure_zero.json", encoding="utf-8") as testfile: parser = H1Parser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) - def test_parse_file_with_one_vuln_has_one_finding(self): - with open("unittests/scans/h1/data_one.json", encoding="utf-8") as testfile: + +class HackerOneBugBountyProgramTests(DojoTestCase): + def test_bug_bounty_hacker_one_many_findings_json(self): + with open("unittests/scans/h1/bug_bounty_many.json", encoding="utf-8") as testfile: + parser = H1Parser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(4, len(findings)) + with self.subTest(): + finding = findings[0] + self.assertEqual( + "Sensitive Account Balance Information Exposure via example's DaviPlata Payment Link Integration", + finding.title, + ) + self.assertEqual("Medium", finding.severity) + self.assertEqual(date_parser.parse("2024-05-12 04:05:27 UTC"), finding.date) + self.assertTrue(finding.active) + self.assertIn("**Assigned To**: Group example.co Team", finding.description) + self.assertIn("**Weakness Category**: Information Disclosure", finding.description) + self.assertIn("**Reporter**: reporter", finding.description) + with self.subTest(): + finding = findings[1] + self.assertEqual("Acceso no autorizado a soporte premium sin pagar", finding.title) + self.assertEqual("Critical", finding.severity) + self.assertEqual(date_parser.parse("2024-09-10 15:38:20 UTC"), finding.date) + self.assertTrue(finding.active) + self.assertIn("**Reporter**: reporter", finding.description) + with self.subTest(): + finding = findings[2] + self.assertEqual("XSS - stg.pse.mock.example.co", finding.title) + self.assertEqual("Info", finding.severity) + self.assertEqual(date_parser.parse("2024-08-25 07:27:18 UTC"), finding.date) + self.assertEqual(date_parser.parse("2024-08-27 18:19:23 UTC"), finding.mitigated) + self.assertFalse(finding.active) + self.assertTrue(finding.is_mitigated) + self.assertIn("**Reporter**: reporter", finding.description) + with self.subTest(): + finding = findings[3] + self.assertEqual("example.co/File creation via HTTP method PUT", finding.title) + self.assertEqual("Critical", finding.severity) + self.assertEqual(date_parser.parse("2024-07-22 17:54:36 UTC"), finding.date) + self.assertEqual(date_parser.parse("2024-07-22 20:57:56 UTC"), finding.mitigated) + self.assertFalse(finding.active) + self.assertTrue(finding.is_mitigated) + self.assertIn("**Reporter**: reporter", finding.description) + self.assertIn("CVE-2017-12615", finding.unsaved_vulnerability_ids) + + def test_bug_bounty_hacker_one_one_findings_json(self): + with open("unittests/scans/h1/bug_bounty_one.json", encoding="utf-8") as testfile: parser = H1Parser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) + with self.subTest(): + finding = findings[0] + self.assertEqual( + "Sensitive Account Balance Information Exposure via example's DaviPlata Payment Link Integration", + finding.title, + ) + self.assertEqual("Medium", finding.severity) + self.assertEqual(date_parser.parse("2024-05-12 04:05:27 UTC"), finding.date) + self.assertTrue(finding.active) + self.assertIn("**Assigned To**: Group example.co Team", finding.description) + self.assertIn("**Weakness Category**: Information Disclosure", finding.description) + self.assertIn("**Reporter**: reporter", finding.description) - def test_parse_file_with_multiple_vuln_has_multiple_finding(self): - with open("unittests/scans/h1/data_many.json", encoding="utf-8") as testfile: + def test_bug_bounty_hacker_one_zero_findings_json(self): + with open("unittests/scans/h1/bug_bounty_zero.json", encoding="utf-8") as testfile: parser = H1Parser() findings = parser.get_findings(testfile, Test()) - self.assertEqual(2, len(findings)) + self.assertEqual(0, len(findings)) + + def test_bug_bounty_hacker_one_many_findings_csv(self): + with open("unittests/scans/h1/bug_bounty_many.json", encoding="utf-8") as testfile: + parser = H1Parser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(4, len(findings)) + with self.subTest(): + finding = findings[0] + self.assertEqual( + "Sensitive Account Balance Information Exposure via example's DaviPlata Payment Link Integration", + finding.title, + ) + self.assertEqual("Medium", finding.severity) + self.assertEqual(date_parser.parse("2024-05-12 04:05:27 UTC"), finding.date) + self.assertTrue(finding.active) + self.assertIn("**Assigned To**: Group example.co Team", finding.description) + self.assertIn("**Weakness Category**: Information Disclosure", finding.description) + self.assertIn("**Reporter**: reporter", finding.description) + with self.subTest(): + finding = findings[1] + self.assertEqual("Acceso no autorizado a soporte premium sin pagar", finding.title) + self.assertEqual("Critical", finding.severity) + self.assertEqual(date_parser.parse("2024-09-10 15:38:20 UTC"), finding.date) + self.assertTrue(finding.active) + self.assertIn("**Reporter**: reporter", finding.description) + with self.subTest(): + finding = findings[2] + self.assertEqual("XSS - stg.pse.mock.example.co", finding.title) + self.assertEqual("Info", finding.severity) + self.assertEqual(date_parser.parse("2024-08-25 07:27:18 UTC"), finding.date) + self.assertEqual(date_parser.parse("2024-08-27 18:19:23 UTC"), finding.mitigated) + self.assertFalse(finding.active) + self.assertTrue(finding.is_mitigated) + self.assertIn("**Reporter**: reporter", finding.description) + with self.subTest(): + finding = findings[3] + self.assertEqual("example.co/File creation via HTTP method PUT", finding.title) + self.assertEqual("Critical", finding.severity) + self.assertEqual(date_parser.parse("2024-07-22 17:54:36 UTC"), finding.date) + self.assertEqual(date_parser.parse("2024-07-22 20:57:56 UTC"), finding.mitigated) + self.assertFalse(finding.active) + self.assertTrue(finding.is_mitigated) + self.assertIn("**Reporter**: reporter", finding.description) + self.assertIn("CVE-2017-12615", finding.unsaved_vulnerability_ids) + + def test_bug_bounty_hacker_one_one_findings_csv(self): + with open("unittests/scans/h1/bug_bounty_one.json", encoding="utf-8") as testfile: + parser = H1Parser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(1, len(findings)) + with self.subTest(): + finding = findings[0] + self.assertEqual( + "Sensitive Account Balance Information Exposure via example's DaviPlata Payment Link Integration", + finding.title, + ) + self.assertEqual("Medium", finding.severity) + self.assertEqual(date_parser.parse("2024-05-12 04:05:27 UTC"), finding.date) + self.assertTrue(finding.active) + self.assertIn("**Assigned To**: Group example.co Team", finding.description) + self.assertIn("**Weakness Category**: Information Disclosure", finding.description) + self.assertIn("**Reporter**: reporter", finding.description) + + def test_bug_bounty_hacker_one_zero_findings_csv(self): + with open("unittests/scans/h1/bug_bounty_zero.json", encoding="utf-8") as testfile: + parser = H1Parser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(0, len(findings)) + + +class TestHackerOneParser( + HackerOneVulnerabilityDisclosureProgramTests, + HackerOneBugBountyProgramTests, +): + """Combined unit test runner.""" From ec19ae885cd7d1171854a32deda11a93bea25d10 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Sat, 21 Sep 2024 11:53:24 -0500 Subject: [PATCH 15/22] Risk Exceptions: Add/Remove notes when finding is added/removed from risk exception (#10934) * Risk Exceptions: Add/Remove notes when finding is added/removed from risk exception * Fix Flake8 * Correct tests * Add user ID to finding note * use jira user --- dojo/api_v2/serializers.py | 8 +++-- dojo/api_v2/views.py | 2 +- dojo/engagement/views.py | 6 ++-- dojo/finding/views.py | 16 +++++----- dojo/jira_link/helper.py | 8 ++--- dojo/risk_acceptance/helper.py | 56 ++++++++++++++++++++++++++++++---- 6 files changed, 71 insertions(+), 25 deletions(-) diff --git a/dojo/api_v2/serializers.py b/dojo/api_v2/serializers.py index 10c07b3f3d..78ea12e7ad 100644 --- a/dojo/api_v2/serializers.py +++ b/dojo/api_v2/serializers.py @@ -1468,7 +1468,8 @@ class RiskAcceptanceSerializer(serializers.ModelSerializer): def create(self, validated_data): instance = super().create(validated_data) - add_findings_to_risk_acceptance(instance, instance.accepted_findings.all()) + user = getattr(self.context.get("request", None), "user", None) + add_findings_to_risk_acceptance(user, instance, instance.accepted_findings.all()) return instance def update(self, instance, validated_data): @@ -1482,11 +1483,12 @@ def update(self, instance, validated_data): findings_to_remove = Finding.objects.filter(id__in=[x.id for x in findings_to_remove]) # Make the update in the database instance = super().update(instance, validated_data) + user = getattr(self.context.get("request", None), "user", None) # Add the new findings - add_findings_to_risk_acceptance(instance, findings_to_add) + add_findings_to_risk_acceptance(user, instance, findings_to_add) # Remove the ones that were not present in the payload for finding in findings_to_remove: - remove_finding_from_risk_acceptance(instance, finding) + remove_finding_from_risk_acceptance(user, instance, finding) return instance @extend_schema_field(serializers.CharField()) diff --git a/dojo/api_v2/views.py b/dojo/api_v2/views.py index b36924640b..76521f5e00 100644 --- a/dojo/api_v2/views.py +++ b/dojo/api_v2/views.py @@ -654,7 +654,7 @@ def destroy(self, request, pk=None): instance = self.get_object() # Remove any findings on the risk acceptance for finding in instance.accepted_findings.all(): - remove_finding_from_risk_acceptance(instance, finding) + remove_finding_from_risk_acceptance(request.user, instance, finding) # return the response of the object being deleted return super().destroy(request, pk=pk) diff --git a/dojo/engagement/views.py b/dojo/engagement/views.py index 777a5f7a11..ff86435d0c 100644 --- a/dojo/engagement/views.py +++ b/dojo/engagement/views.py @@ -1250,7 +1250,7 @@ def add_risk_acceptance(request, eid, fid=None): findings = form.cleaned_data["accepted_findings"] - risk_acceptance = ra_helper.add_findings_to_risk_acceptance(risk_acceptance, findings) + risk_acceptance = ra_helper.add_findings_to_risk_acceptance(request.user, risk_acceptance, findings) messages.add_message( request, @@ -1360,7 +1360,7 @@ def view_edit_risk_acceptance(request, eid, raid, edit_mode=False): finding = get_object_or_404( Finding, pk=request.POST["remove_finding_id"]) - ra_helper.remove_finding_from_risk_acceptance(risk_acceptance, finding) + ra_helper.remove_finding_from_risk_acceptance(request.user, risk_acceptance, finding) messages.add_message( request, @@ -1391,7 +1391,7 @@ def view_edit_risk_acceptance(request, eid, raid, edit_mode=False): if not errors: findings = add_findings_form.cleaned_data["accepted_findings"] - ra_helper.add_findings_to_risk_acceptance(risk_acceptance, findings) + ra_helper.add_findings_to_risk_acceptance(request.user, risk_acceptance, findings) messages.add_message( request, diff --git a/dojo/finding/views.py b/dojo/finding/views.py index 4b37ebc8a9..c6ca73fcad 100644 --- a/dojo/finding/views.py +++ b/dojo/finding/views.py @@ -991,10 +991,10 @@ def process_finding_form(self, request: HttpRequest, finding: Finding, context: # Handle risk exception related things if "risk_accepted" in context["form"].cleaned_data and context["form"]["risk_accepted"].value(): if new_finding.test.engagement.product.enable_simple_risk_acceptance: - ra_helper.simple_risk_accept(new_finding, perform_save=False) + ra_helper.simple_risk_accept(request.user, new_finding, perform_save=False) else: if new_finding.risk_accepted: - ra_helper.risk_unaccept(new_finding, perform_save=False) + ra_helper.risk_unaccept(request.user, new_finding, perform_save=False) # Save and add new endpoints finding_helper.add_endpoints(new_finding, context["form"]) # Remove unrelated endpoints @@ -1270,7 +1270,7 @@ def close_finding(request, fid): status.last_modified = timezone.now() status.save() # Clear the risk acceptance, if present - ra_helper.risk_unaccept(finding) + ra_helper.risk_unaccept(request.user, finding) # Manage the jira status changes push_to_jira = False @@ -1446,7 +1446,7 @@ def reopen_finding(request, fid): status.last_modified = timezone.now() status.save() # Clear the risk acceptance, if present - ra_helper.risk_unaccept(finding) + ra_helper.risk_unaccept(request.user, finding) # Manage the jira status changes push_to_jira = False @@ -1626,7 +1626,7 @@ def simple_risk_accept(request, fid): if not finding.test.engagement.product.enable_simple_risk_acceptance: raise PermissionDenied - ra_helper.simple_risk_accept(finding) + ra_helper.simple_risk_accept(request.user, finding) messages.add_message( request, messages.WARNING, "Finding risk accepted.", extra_tags="alert-success", @@ -1640,7 +1640,7 @@ def simple_risk_accept(request, fid): @user_is_authorized(Finding, Permissions.Risk_Acceptance, "fid") def risk_unaccept(request, fid): finding = get_object_or_404(Finding, id=fid) - ra_helper.risk_unaccept(finding) + ra_helper.risk_unaccept(request.user, finding) messages.add_message( request, @@ -2851,9 +2851,9 @@ def finding_bulk_update_all(request, pid=None): ): skipped_risk_accept_count += 1 else: - ra_helper.simple_risk_accept(finding) + ra_helper.simple_risk_accept(request.user, finding) elif form.cleaned_data["risk_unaccept"]: - ra_helper.risk_unaccept(finding) + ra_helper.risk_unaccept(request.user, finding) for prod in prods: calculate_grade(prod) diff --git a/dojo/jira_link/helper.py b/dojo/jira_link/helper.py index 3ccff3df81..d01f3bb334 100644 --- a/dojo/jira_link/helper.py +++ b/dojo/jira_link/helper.py @@ -1623,7 +1623,7 @@ def process_resolution_from_jira(finding, resolution_id, resolution_name, assign owner=finding.reporter, ) finding.test.engagement.risk_acceptance.add(ra) - ra_helper.add_findings_to_risk_acceptance(ra, [finding]) + ra_helper.add_findings_to_risk_acceptance(User.objects.get_or_create(username="JIRA")[0], ra, [finding]) status_changed = True elif jira_instance and resolution_name in jira_instance.false_positive_resolutions: if not finding.false_p: @@ -1633,7 +1633,7 @@ def process_resolution_from_jira(finding, resolution_id, resolution_name, assign finding.mitigated = None finding.is_mitigated = False finding.false_p = True - ra_helper.risk_unaccept(finding) + ra_helper.risk_unaccept(User.objects.get_or_create(username="JIRA")[0], finding) status_changed = True else: # Mitigated by default as before @@ -1645,7 +1645,7 @@ def process_resolution_from_jira(finding, resolution_id, resolution_name, assign finding.mitigated_by, _created = User.objects.get_or_create(username="JIRA") finding.endpoints.clear() finding.false_p = False - ra_helper.risk_unaccept(finding) + ra_helper.risk_unaccept(User.objects.get_or_create(username="JIRA")[0], finding) status_changed = True else: if not finding.active: @@ -1655,7 +1655,7 @@ def process_resolution_from_jira(finding, resolution_id, resolution_name, assign finding.mitigated = None finding.is_mitigated = False finding.false_p = False - ra_helper.risk_unaccept(finding) + ra_helper.risk_unaccept(User.objects.get_or_create(username="JIRA")[0], finding) status_changed = True # for findings in a group, there is no jira_issue attached to the finding diff --git a/dojo/risk_acceptance/helper.py b/dojo/risk_acceptance/helper.py index a1d628b33d..453fccb9f1 100644 --- a/dojo/risk_acceptance/helper.py +++ b/dojo/risk_acceptance/helper.py @@ -1,4 +1,5 @@ import logging +from contextlib import suppress from dateutil.relativedelta import relativedelta from django.core.exceptions import PermissionDenied @@ -8,7 +9,7 @@ import dojo.jira_link.helper as jira_helper from dojo.celery import app from dojo.jira_link.helper import escape_for_jira -from dojo.models import Finding, Risk_Acceptance, System_Settings +from dojo.models import Dojo_User, Finding, Notes, Risk_Acceptance, System_Settings from dojo.notifications.helper import create_notification from dojo.utils import get_full_url, get_system_setting @@ -102,7 +103,7 @@ def delete(eng, risk_acceptance): risk_acceptance.delete() -def remove_finding_from_risk_acceptance(risk_acceptance, finding): +def remove_finding_from_risk_acceptance(user: Dojo_User, risk_acceptance: Risk_Acceptance, finding: Finding) -> None: logger.debug("removing finding %i from risk acceptance %i", finding.id, risk_acceptance.id) risk_acceptance.accepted_findings.remove(finding) finding.active = True @@ -112,9 +113,20 @@ def remove_finding_from_risk_acceptance(risk_acceptance, finding): finding.save(dedupe_option=False) # best effort jira integration, no status changes post_jira_comments(risk_acceptance, [finding], unaccepted_message_creator) + # Add a note to reflect that the finding was removed from the risk acceptance + if user is not None: + finding.notes.add(Notes.objects.create( + entry=( + f"{Dojo_User.generate_full_name(user)} ({user.id}) removed this finding from the risk acceptance: " + f'"{risk_acceptance.name}" ({get_view_risk_acceptance(risk_acceptance)})' + ), + author=user, + )) + return None -def add_findings_to_risk_acceptance(risk_acceptance, findings): + +def add_findings_to_risk_acceptance(user: Dojo_User, risk_acceptance: Risk_Acceptance, findings: list[Finding]) -> None: for finding in findings: if not finding.duplicate or finding.risk_accepted: finding.active = False @@ -123,11 +135,21 @@ def add_findings_to_risk_acceptance(risk_acceptance, findings): # Update any endpoint statuses on each of the findings update_endpoint_statuses(finding, accept_risk=True) risk_acceptance.accepted_findings.add(finding) + # Add a note to reflect that the finding was removed from the risk acceptance + if user is not None: + finding.notes.add(Notes.objects.create( + entry=( + f"{Dojo_User.generate_full_name(user)} ({user.id}) added this finding to the risk acceptance: " + f'"{risk_acceptance.name}" ({get_view_risk_acceptance(risk_acceptance)})' + ), + author=user, + )) risk_acceptance.save() - # best effort jira integration, no status changes post_jira_comments(risk_acceptance, findings, accepted_message_creator) + return None + @app.task def expiration_handler(*args, **kwargs): @@ -174,6 +196,16 @@ def expiration_handler(*args, **kwargs): risk_acceptance.save() +def get_view_risk_acceptance(risk_acceptance: Risk_Acceptance) -> str: + """Return the full qualified URL of the view risk acceptance page.""" + # Suppressing this error because it does not happen under most circumstances that a risk acceptance does not have engagement + with suppress(AttributeError): + get_full_url( + reverse("view_risk_acceptance", args=(risk_acceptance.engagement.id, risk_acceptance.id)), + ) + return "" + + def expiration_message_creator(risk_acceptance, heads_up_days=0): return "Risk acceptance [({})|{}] with {} findings has expired".format( escape_for_jira(risk_acceptance.name), @@ -267,7 +299,7 @@ def prefetch_for_expiration(risk_acceptances): ) -def simple_risk_accept(finding, perform_save=True): +def simple_risk_accept(user: Dojo_User, finding: Finding, perform_save=True) -> None: if not finding.test.engagement.product.enable_simple_risk_acceptance: raise PermissionDenied @@ -282,9 +314,15 @@ def simple_risk_accept(finding, perform_save=True): # post_jira_comment might reload from database so see unaccepted finding. but the comment # only contains some text so that's ok post_jira_comment(finding, accepted_message_creator) + # Add a note to reflect that the finding was removed from the risk acceptance + if user is not None: + finding.notes.add(Notes.objects.create( + entry=(f"{Dojo_User.generate_full_name(user)} ({user.id}) has risk accepted this finding"), + author=user, + )) -def risk_unaccept(finding, perform_save=True): +def risk_unaccept(user: Dojo_User, finding: Finding, perform_save=True) -> None: logger.debug("unaccepting finding %i:%s if it is currently risk accepted", finding.id, finding) if finding.risk_accepted: logger.debug("unaccepting finding %i:%s", finding.id, finding) @@ -302,6 +340,12 @@ def risk_unaccept(finding, perform_save=True): # post_jira_comment might reload from database so see unaccepted finding. but the comment # only contains some text so that's ok post_jira_comment(finding, unaccepted_message_creator) + # Add a note to reflect that the finding was removed from the risk acceptance + if user is not None: + finding.notes.add(Notes.objects.create( + entry=(f"{Dojo_User.generate_full_name(user)} ({user.id}) removed a risk exception from this finding"), + author=user, + )) def remove_from_any_risk_acceptance(finding): From 3218c641879265634f1614cfbd27b648a2358ea7 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Mon, 23 Sep 2024 11:39:37 -0500 Subject: [PATCH 16/22] Parsers: Specify lists rather than `dict.values()` (#10945) --- dojo/tools/blackduck/parser.py | 2 +- dojo/tools/blackduck_binary_analysis/parser.py | 2 +- dojo/tools/h1/parser.py | 2 +- dojo/tools/intsights/parser.py | 2 +- dojo/tools/mend/parser.py | 2 +- dojo/tools/qualys_webapp/parser.py | 2 +- dojo/tools/sslscan/parser.py | 2 +- dojo/tools/sslyze/parser_xml.py | 2 +- dojo/tools/whitehat_sentinel/parser.py | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/dojo/tools/blackduck/parser.py b/dojo/tools/blackduck/parser.py index a79e9db967..12f01a07d0 100644 --- a/dojo/tools/blackduck/parser.py +++ b/dojo/tools/blackduck/parser.py @@ -78,7 +78,7 @@ def ingest_findings(self, normalized_findings, test): dupes[dupe_key] = finding - return dupes.values() + return list(dupes.values()) def format_title(self, i): if i.channel_version_origin_id is not None: diff --git a/dojo/tools/blackduck_binary_analysis/parser.py b/dojo/tools/blackduck_binary_analysis/parser.py index 7e545e6751..0f38773dd9 100644 --- a/dojo/tools/blackduck_binary_analysis/parser.py +++ b/dojo/tools/blackduck_binary_analysis/parser.py @@ -104,7 +104,7 @@ def ingest_findings(self, sorted_findings, test): findings[unique_finding_key] = finding - return findings.values() + return list(findings.values()) def format_title(self, i): title = f"{i.object_name}: {i.component} {i.version} Vulnerable" diff --git a/dojo/tools/h1/parser.py b/dojo/tools/h1/parser.py index e182af6b76..c386dec04d 100644 --- a/dojo/tools/h1/parser.py +++ b/dojo/tools/h1/parser.py @@ -100,7 +100,7 @@ def get_vulnerability_disclosure_json_findings(self, tree, test): ) finding.unsaved_endpoints = [] dupes[dupe_key] = finding - return dupes.values() + return list(dupes.values()) def build_description(self, content): date = content["attributes"]["created_at"] diff --git a/dojo/tools/intsights/parser.py b/dojo/tools/intsights/parser.py index e49c61b852..abd05ee6ef 100644 --- a/dojo/tools/intsights/parser.py +++ b/dojo/tools/intsights/parser.py @@ -71,4 +71,4 @@ def get_findings(self, file, test): duplicates[dupe_key] = alert if dupe_key not in duplicates: duplicates[dupe_key] = True - return duplicates.values() + return list(duplicates.values()) diff --git a/dojo/tools/mend/parser.py b/dojo/tools/mend/parser.py index 60ad893109..dee917ce2a 100644 --- a/dojo/tools/mend/parser.py +++ b/dojo/tools/mend/parser.py @@ -161,4 +161,4 @@ def create_finding_key(f: Finding) -> str: if dupe_key not in dupes: dupes[dupe_key] = finding - return dupes.values() + return list(dupes.values()) diff --git a/dojo/tools/qualys_webapp/parser.py b/dojo/tools/qualys_webapp/parser.py index 59c0d2b855..840c9bc870 100644 --- a/dojo/tools/qualys_webapp/parser.py +++ b/dojo/tools/qualys_webapp/parser.py @@ -462,7 +462,7 @@ def qualys_webapp_parser(qualys_xml_file, test, unique, enable_weakness=False): ).values(), ) - return items + return list(items) class QualysWebAppParser: diff --git a/dojo/tools/sslscan/parser.py b/dojo/tools/sslscan/parser.py index 621ded3daf..9ac284c126 100644 --- a/dojo/tools/sslscan/parser.py +++ b/dojo/tools/sslscan/parser.py @@ -93,4 +93,4 @@ def get_findings(self, file, test): else: endpoint = Endpoint(host=host, port=port) finding.unsaved_endpoints.append(endpoint) - return dupes.values() + return list(dupes.values()) diff --git a/dojo/tools/sslyze/parser_xml.py b/dojo/tools/sslyze/parser_xml.py index 710b0c73b2..24fe3c9b3c 100644 --- a/dojo/tools/sslyze/parser_xml.py +++ b/dojo/tools/sslyze/parser_xml.py @@ -161,4 +161,4 @@ def get_findings(self, file, test): host=host, port=port, protocol=protocol, ), ) - return dupes.values() + return list(dupes.values()) diff --git a/dojo/tools/whitehat_sentinel/parser.py b/dojo/tools/whitehat_sentinel/parser.py index eeb97ee8f5..fe336bf27d 100644 --- a/dojo/tools/whitehat_sentinel/parser.py +++ b/dojo/tools/whitehat_sentinel/parser.py @@ -268,4 +268,4 @@ def _convert_whitehat_sentinel_vulns_to_dojo_finding( finding.unsaved_endpoints = endpoints dupes[dupe_key] = finding - return dupes.values() + return list(dupes.values()) From 57b228e9608c456bce7060ab6022010a397335cf Mon Sep 17 00:00:00 2001 From: DefectDojo release bot Date: Mon, 23 Sep 2024 17:02:51 +0000 Subject: [PATCH 17/22] Update versions in application files --- components/package.json | 2 +- dojo/__init__.py | 2 +- helm/defectdojo/Chart.yaml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/components/package.json b/components/package.json index 49f5862eec..3299d3eb89 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.39.0-dev", + "version": "2.38.3", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/dojo/__init__.py b/dojo/__init__.py index bac40506f9..996b03df38 100644 --- a/dojo/__init__.py +++ b/dojo/__init__.py @@ -4,6 +4,6 @@ # Django starts so that shared_task will use this app. from .celery import app as celery_app # noqa: F401 -__version__ = "2.38.2" +__version__ = "2.38.3" __url__ = "https://github.com/DefectDojo/django-DefectDojo" __docs__ = "https://documentation.defectdojo.com" diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index 9bd09f45fa..1e34a710be 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: "2.39.0-dev" +appVersion: "2.38.3" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.6.151-dev +version: 1.6.151 icon: https://www.defectdojo.org/img/favicon.ico maintainers: - name: madchap From eb5903825a004de040c9e66b686e583bb05b2027 Mon Sep 17 00:00:00 2001 From: DefectDojo release bot Date: Mon, 23 Sep 2024 17:26:29 +0000 Subject: [PATCH 18/22] Update versions in application files --- components/package.json | 2 +- dojo/__init__.py | 2 +- helm/defectdojo/Chart.yaml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/components/package.json b/components/package.json index 3299d3eb89..49f5862eec 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.38.3", + "version": "2.39.0-dev", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/dojo/__init__.py b/dojo/__init__.py index 996b03df38..82fc124150 100644 --- a/dojo/__init__.py +++ b/dojo/__init__.py @@ -4,6 +4,6 @@ # Django starts so that shared_task will use this app. from .celery import app as celery_app # noqa: F401 -__version__ = "2.38.3" +__version__ = "2.39.0-dev" __url__ = "https://github.com/DefectDojo/django-DefectDojo" __docs__ = "https://documentation.defectdojo.com" diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index 1e34a710be..1052b31209 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: "2.38.3" +appVersion: "2.39.0-dev" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.6.151 +version: 1.6.152-dev icon: https://www.defectdojo.org/img/favicon.ico maintainers: - name: madchap From 2c591f2e9feccbc36abcd9c48e74efafbff4965f Mon Sep 17 00:00:00 2001 From: Ross Esposito Date: Mon, 23 Sep 2024 12:54:07 -0500 Subject: [PATCH 19/22] Fixing merge conflict --- dojo/tools/semgrep/parser.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/dojo/tools/semgrep/parser.py b/dojo/tools/semgrep/parser.py index cd32872f37..b7472005da 100644 --- a/dojo/tools/semgrep/parser.py +++ b/dojo/tools/semgrep/parser.py @@ -140,12 +140,12 @@ def convert_severity(self, val): elif upper_value == "LOW": return "Low" elif upper_value == "INFO": - if "WARNING" == val.upper(): - return "Medium" - if "ERROR" == val.upper() or "HIGH" == val.upper(): - return "High" - if "INFO" == val.upper(): - return "Info" + if "WARNING" == val.upper(): + return "Medium" + if "ERROR" == val.upper() or "HIGH" == val.upper(): + return "High" + if "INFO" == val.upper(): + return "Info" msg = f"Unknown value for severity: {val}" raise ValueError(msg) From b0f0c3aebb67f52ab2afc2c8a900de6a1f037969 Mon Sep 17 00:00:00 2001 From: Ross Esposito Date: Mon, 23 Sep 2024 13:34:01 -0500 Subject: [PATCH 20/22] Fixing sha settings value --- dojo/settings/.settings.dist.py.sha256sum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dojo/settings/.settings.dist.py.sha256sum b/dojo/settings/.settings.dist.py.sha256sum index f8adf9d7d4..8a22d6140c 100644 --- a/dojo/settings/.settings.dist.py.sha256sum +++ b/dojo/settings/.settings.dist.py.sha256sum @@ -1 +1 @@ -f7e63afa0003d1992f8247f9a7a830847bd7498fa1e2d46d6ea04e3006bb9ee2 +bf2078296b31ba8c8376fdd88bbf1d552d0fba8b6e465a8552ac2fa901aa7e60 From 38ed4c19d344f69b796c9fc8803264b67b1b8296 Mon Sep 17 00:00:00 2001 From: Ross Esposito Date: Mon, 23 Sep 2024 14:08:47 -0500 Subject: [PATCH 21/22] Fixing more lint errors --- dojo/api_v2/serializers.py | 4 ++-- dojo/metrics/utils.py | 3 +-- dojo/product_type/queries.py | 1 + dojo/risk_acceptance/helper.py | 4 ++-- dojo/tools/h1/parser.py | 20 ++++++++++---------- dojo/tools/semgrep/parser.py | 6 +++--- 6 files changed, 19 insertions(+), 19 deletions(-) diff --git a/dojo/api_v2/serializers.py b/dojo/api_v2/serializers.py index 658f45df6a..371f9f4266 100644 --- a/dojo/api_v2/serializers.py +++ b/dojo/api_v2/serializers.py @@ -555,8 +555,8 @@ def validate(self, data): if self.context["request"].method == "POST" and "password" not in data: msg = "Passwords must be supplied for new users" raise ValidationError(msg) - else: - return super().validate(data) + + return super().validate(data) class UserContactInfoSerializer(serializers.ModelSerializer): diff --git a/dojo/metrics/utils.py b/dojo/metrics/utils.py index b68829da1b..d22b13beb4 100644 --- a/dojo/metrics/utils.py +++ b/dojo/metrics/utils.py @@ -37,8 +37,7 @@ def get_metrics_finding_filter_class() -> Type[Union[MetricsFindingFilter, MetricsFindingFilterWithoutObjectLookups]]: if get_system_setting("filter_string_matching", False): return MetricsFindingFilterWithoutObjectLookups - else: - return MetricsFindingFilter + return MetricsFindingFilter def finding_queries( diff --git a/dojo/product_type/queries.py b/dojo/product_type/queries.py index 27c3b31d70..1d95ac8117 100644 --- a/dojo/product_type/queries.py +++ b/dojo/product_type/queries.py @@ -53,6 +53,7 @@ def get_authorized_global_members_for_product_type(product_type, permission): return Global_Role.objects.filter(group=None, role__isnull=False).order_by("user__first_name", "user__last_name").select_related("role", "user") return Global_Role.objects.none() + def get_authorized_groups_for_product_type(product_type, permission): user = get_current_user() diff --git a/dojo/risk_acceptance/helper.py b/dojo/risk_acceptance/helper.py index bc02b533f5..32dedf3f74 100644 --- a/dojo/risk_acceptance/helper.py +++ b/dojo/risk_acceptance/helper.py @@ -123,7 +123,7 @@ def remove_finding_from_risk_acceptance(user: Dojo_User, risk_acceptance: Risk_A author=user, )) - return None + return def add_findings_to_risk_acceptance(user: Dojo_User, risk_acceptance: Risk_Acceptance, findings: list[Finding]) -> None: @@ -148,7 +148,7 @@ def add_findings_to_risk_acceptance(user: Dojo_User, risk_acceptance: Risk_Accep # best effort jira integration, no status changes post_jira_comments(risk_acceptance, findings, accepted_message_creator) - return None + returnß @app.task diff --git a/dojo/tools/h1/parser.py b/dojo/tools/h1/parser.py index c386dec04d..bdd60f4455 100644 --- a/dojo/tools/h1/parser.py +++ b/dojo/tools/h1/parser.py @@ -275,11 +275,11 @@ def get_findings(self, file: TemporaryUploadedFile, test: Test) -> list[Finding] file_name = file.name if str(file_name).endswith(".json"): return self.determine_json_format(file, test) - elif str(file_name).endswith(".csv"): + if str(file_name).endswith(".csv"): return self.determine_csv_format(file, test) - else: - msg = "Filename extension not recognized. Use .json or .csv" - raise ValueError(msg) + + msg = "Filename extension not recognized. Use .json or .csv" + raise ValueError(msg) def get_json_tree(self, file: TemporaryUploadedFile) -> dict: """Extract the CSV file into a iterable that represents a dict.""" @@ -298,9 +298,9 @@ def determine_json_format(self, file: TemporaryUploadedFile, test: Test) -> list return self.get_bug_bounty_program_json_findings(tree.get("findings", []), test) if "data" in tree: return self.get_vulnerability_disclosure_json_findings(tree, test) - else: - msg = "This JSON format is not supported" - raise ValueError(msg) + + msg = "This JSON format is not supported" + raise ValueError(msg) def get_csv_reader(self, file: TemporaryUploadedFile) -> csv.DictReader: """Extract the CSV file into a iterable that represents a dict.""" @@ -317,6 +317,6 @@ def determine_csv_format(self, file: TemporaryUploadedFile, test: Test) -> list[ # Check for some root elements if "bounty" in reader.fieldnames: return self.get_bug_bounty_program_csv_findings(reader, test) - else: - msg = "This CSV format is not supported" - raise ValueError(msg) + + msg = "This CSV format is not supported" + raise ValueError(msg) diff --git a/dojo/tools/semgrep/parser.py b/dojo/tools/semgrep/parser.py index b7472005da..3cd37c638f 100644 --- a/dojo/tools/semgrep/parser.py +++ b/dojo/tools/semgrep/parser.py @@ -133,11 +133,11 @@ def convert_severity(self, val): upper_value = val.upper() if upper_value == "CRITICAL": return "Critical" - elif upper_value in ["WARNING", "MEDIUM"]: + if upper_value in ["WARNING", "MEDIUM"]: return "Medium" - elif upper_value in ["ERROR", "HIGH"]: + if upper_value in ["ERROR", "HIGH"]: return "High" - elif upper_value == "LOW": + if upper_value == "LOW": return "Low" elif upper_value == "INFO": if "WARNING" == val.upper(): From 4c60a809e033652faefb1b1b4c814f001fb0b95a Mon Sep 17 00:00:00 2001 From: Ross Esposito Date: Mon, 23 Sep 2024 14:14:12 -0500 Subject: [PATCH 22/22] Fixing more lint errors pt 2 --- dojo/api_v2/serializers.py | 1 - dojo/risk_acceptance/helper.py | 2 +- dojo/tools/h1/parser.py | 3 --- dojo/tools/semgrep/parser.py | 2 +- 4 files changed, 2 insertions(+), 6 deletions(-) diff --git a/dojo/api_v2/serializers.py b/dojo/api_v2/serializers.py index 371f9f4266..5109bd068f 100644 --- a/dojo/api_v2/serializers.py +++ b/dojo/api_v2/serializers.py @@ -555,7 +555,6 @@ def validate(self, data): if self.context["request"].method == "POST" and "password" not in data: msg = "Passwords must be supplied for new users" raise ValidationError(msg) - return super().validate(data) diff --git a/dojo/risk_acceptance/helper.py b/dojo/risk_acceptance/helper.py index 32dedf3f74..1cd1b15cda 100644 --- a/dojo/risk_acceptance/helper.py +++ b/dojo/risk_acceptance/helper.py @@ -148,7 +148,7 @@ def add_findings_to_risk_acceptance(user: Dojo_User, risk_acceptance: Risk_Accep # best effort jira integration, no status changes post_jira_comments(risk_acceptance, findings, accepted_message_creator) - returnß + return @app.task diff --git a/dojo/tools/h1/parser.py b/dojo/tools/h1/parser.py index bdd60f4455..8aa3fc5ff2 100644 --- a/dojo/tools/h1/parser.py +++ b/dojo/tools/h1/parser.py @@ -277,7 +277,6 @@ def get_findings(self, file: TemporaryUploadedFile, test: Test) -> list[Finding] return self.determine_json_format(file, test) if str(file_name).endswith(".csv"): return self.determine_csv_format(file, test) - msg = "Filename extension not recognized. Use .json or .csv" raise ValueError(msg) @@ -298,7 +297,6 @@ def determine_json_format(self, file: TemporaryUploadedFile, test: Test) -> list return self.get_bug_bounty_program_json_findings(tree.get("findings", []), test) if "data" in tree: return self.get_vulnerability_disclosure_json_findings(tree, test) - msg = "This JSON format is not supported" raise ValueError(msg) @@ -317,6 +315,5 @@ def determine_csv_format(self, file: TemporaryUploadedFile, test: Test) -> list[ # Check for some root elements if "bounty" in reader.fieldnames: return self.get_bug_bounty_program_csv_findings(reader, test) - msg = "This CSV format is not supported" raise ValueError(msg) diff --git a/dojo/tools/semgrep/parser.py b/dojo/tools/semgrep/parser.py index 3cd37c638f..883fcc4f31 100644 --- a/dojo/tools/semgrep/parser.py +++ b/dojo/tools/semgrep/parser.py @@ -139,7 +139,7 @@ def convert_severity(self, val): return "High" if upper_value == "LOW": return "Low" - elif upper_value == "INFO": + if upper_value == "INFO": if "WARNING" == val.upper(): return "Medium" if "ERROR" == val.upper() or "HIGH" == val.upper():