diff --git a/.github/renovate.json b/.github/renovate.json index 7c9c6623cd..221c87442e 100644 --- a/.github/renovate.json +++ b/.github/renovate.json @@ -6,7 +6,11 @@ "dependencyDashboardApproval": false, "baseBranches": ["dev"], "rebaseWhen": "conflicted", - "ignorePaths": ["requirements.txt", "components/package.json", "components/package-lock.json", "dojo/components/yarn.lock", "dojo/components/package.json", "Dockerfile**"], + "ignorePaths": ["requirements.txt", "requirements-lint.txt", "components/package.json", "components/package-lock.json", "dojo/components/yarn.lock", "dojo/components/package.json", "Dockerfile**"], + "ignoreDeps": [ + "mysql", + "rabbitmq" + ], "packageRules": [{ "packagePatterns": ["*"], "commitMessageExtra": "from {{currentVersion}} to {{#if isMajor}}v{{{newMajor}}}{{else}}{{#if isSingleVersion}}v{{{toVersion}}}{{else}}{{{newValue}}}{{/if}}{{/if}}", diff --git a/.github/workflows/flake8.yml b/.github/workflows/flake8.yml deleted file mode 100644 index 188b39430f..0000000000 --- a/.github/workflows/flake8.yml +++ /dev/null @@ -1,36 +0,0 @@ -name: Flake8 -# pull requests: -# run on pull_request_target instead of just pull_request as we need write access to update the status check -on: - workflow_dispatch: - pull_request_target: - push: - -jobs: - flake8-your-pr: - runs-on: ubuntu-latest - steps: - - name: Checkout - if: github.event_name == 'pull_request' || github.event_name == 'pull_request_target' - uses: actions/checkout@v4 - # by default the pull_requst_target event checks out the base branch, i.e. dev - # so we need to explicitly checkout the head of the PR - # we use fetch-depth 0 to make sure the full history is checked out and we can compare against - # the base commit (branch) of the PR - # more info https://github.community/t/github-actions-are-severely-limited-on-prs/18179/16 - # we checkout merge_commit here as this contains all new code from dev also. we don't need to compare against base_commit - with: - persist-credentials: false - fetch-depth: 0 - ref: refs/pull/${{ github.event.pull_request.number }}/merge - # repository: ${{github.event.pull_request.head.repo.full_name}} - - - name: Checkout - # for non PR runs we just checkout the default, which is a sha on a branch probably - if: github.event_name != 'pull_request' && github.event_name != 'pull_request_target' - uses: actions/checkout@v4 - - # - uses: tayfun/flake8-your-pr@master - - uses: DefectDojo/flake8-your-pr@master - env: - GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" diff --git a/.github/workflows/release-1-create-pr.yml b/.github/workflows/release-1-create-pr.yml index 0b7bf2c92c..c93b1d0ee6 100644 --- a/.github/workflows/release-1-create-pr.yml +++ b/.github/workflows/release-1-create-pr.yml @@ -51,9 +51,9 @@ jobs: - name: Update version numbers in key files run: | - sed -ri "s/__version__ = '.*'/__version__ = '${{ github.event.inputs.release_number }}'/" dojo/__init__.py - sed -ri "s/\"version\": \".*\"/\"version\": \"${{ github.event.inputs.release_number }}\"/" components/package.json - sed -ri "s/appVersion: \".*\"/appVersion: \"${{ github.event.inputs.release_number }}\"/" helm/defectdojo/Chart.yaml + sed -ri 's/__version__ = ".*"/__version__ = "${{ github.event.inputs.release_number }}"/' dojo/__init__.py + sed -ri 's/"version": ".*"/"version": "${{ github.event.inputs.release_number }}"/' components/package.json + sed -ri 's/appVersion: ".*"/appVersion: "${{ github.event.inputs.release_number }}"/' helm/defectdojo/Chart.yaml if grep "\-dev" helm/defectdojo/Chart.yaml; then echo "x.y.z-dev found in Chart.yaml, probably releasing a new minor version" diff --git a/.github/workflows/release-3-master-into-dev.yml b/.github/workflows/release-3-master-into-dev.yml index 2da1dc0dd9..b5c8828ee1 100644 --- a/.github/workflows/release-3-master-into-dev.yml +++ b/.github/workflows/release-3-master-into-dev.yml @@ -44,9 +44,9 @@ jobs: - name: Update version numbers in key files run: | - sed -ri "s/__version__ = '.*'/__version__ = '${{ github.event.inputs.release_number_dev }}'/" dojo/__init__.py - sed -ri "s/appVersion: \".*\"/appVersion: \"${{ github.event.inputs.release_number_dev }}\"/" helm/defectdojo/Chart.yaml - sed -ri "s/\"version\": \".*\"/\"version\": \"${{ github.event.inputs.release_number_dev }}\"/" components/package.json + sed -ri 's/__version__ = ".*"/__version__ = "${{ github.event.inputs.release_number_dev }}"/' dojo/__init__.py + sed -ri 's/"version": ".*"/"version": "${{ github.event.inputs.release_number_dev }}"/' components/package.json + sed -ri 's/appVersion: ".*"/appVersion: "${{ github.event.inputs.release_number_dev }}"/' helm/defectdojo/Chart.yaml CURRENT_CHART_VERSION=$(grep -oP 'version: (\K\S*)?' helm/defectdojo/Chart.yaml | head -1) sed -ri "0,/version/s/version: \S+/$(echo "version: $CURRENT_CHART_VERSION" | awk -F. -v OFS=. 'NF==1{print ++$NF}; NF>1{$NF=sprintf("%0*d", length($NF), ($NF+1)); print}')-dev/" helm/defectdojo/Chart.yaml diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml index 132ffa89db..04799cdd00 100644 --- a/.github/workflows/ruff.yml +++ b/.github/workflows/ruff.yml @@ -2,31 +2,13 @@ name: Ruff Linter on: workflow_dispatch: - pull_request_target: push: - + pull_request: jobs: ruff-linting: runs-on: ubuntu-latest steps: - name: Checkout - if: github.event_name == 'pull_request' || github.event_name == 'pull_request_target' - uses: actions/checkout@v4 - # by default the pull_requst_target event checks out the base branch, i.e. dev - # so we need to explicitly checkout the head of the PR - # we use fetch-depth 0 to make sure the full history is checked out and we can compare against - # the base commit (branch) of the PR - # more info https://github.community/t/github-actions-are-severely-limited-on-prs/18179/16 - # we checkout merge_commit here as this contains all new code from dev also. we don't need to compare against base_commit - with: - persist-credentials: false - fetch-depth: 0 - ref: refs/pull/${{ github.event.pull_request.number }}/merge - # repository: ${{github.event.pull_request.head.repo.full_name}} - - - name: Checkout - # for non PR runs we just checkout the default, which is a sha on a branch probably - if: github.event_name != 'pull_request' && github.event_name != 'pull_request_target' uses: actions/checkout@v4 - name: Install Ruff Linter diff --git a/Dockerfile.django-alpine b/Dockerfile.django-alpine index 7d712ad626..ec2accad78 100644 --- a/Dockerfile.django-alpine +++ b/Dockerfile.django-alpine @@ -5,8 +5,8 @@ # Dockerfile.nginx to use the caching mechanism of Docker. # Ref: https://devguide.python.org/#branchstatus -FROM python:3.11.9-alpine3.20@sha256:df44c0c0761ddbd6388f4549cab42d24d64d257c2a960ad5b276bb7dab9639c7 as base -FROM base as build +FROM python:3.11.9-alpine3.20@sha256:df44c0c0761ddbd6388f4549cab42d24d64d257c2a960ad5b276bb7dab9639c7 AS base +FROM base AS build WORKDIR /app RUN \ apk update && \ @@ -16,13 +16,15 @@ RUN \ bind-tools \ mysql-client \ mariadb-dev \ - postgresql14-client \ + postgresql16-client \ xmlsec \ git \ util-linux \ curl-dev \ openssl \ libffi-dev \ + python3-dev \ + libpq-dev \ && \ rm -rf /var/cache/apk/* && \ true @@ -31,12 +33,12 @@ COPY requirements.txt ./ # https://github.com/unbit/uwsgi/issues/1318#issuecomment-542238096 RUN CPUCOUNT=1 pip3 wheel --wheel-dir=/tmp/wheels -r ./requirements.txt -FROM base as django-alpine +FROM base AS django-alpine WORKDIR /app ARG uid=1001 ARG gid=1337 ARG appuser=defectdojo -ENV appuser ${appuser} +ENV appuser=${appuser} RUN \ apk update && \ apk add --no-cache \ @@ -49,7 +51,7 @@ RUN \ xmlsec \ git \ util-linux \ - postgresql14-client \ + postgresql16-client \ curl-dev \ openssl \ # needed for integration-tests @@ -136,5 +138,5 @@ ENV \ DD_UWSGI_NUM_OF_THREADS="2" ENTRYPOINT ["/entrypoint-uwsgi.sh"] -FROM django-alpine as django-unittests -COPY unittests/ ./unittests/ \ No newline at end of file +FROM django-alpine AS django-unittests +COPY unittests/ ./unittests/ diff --git a/Dockerfile.django-debian b/Dockerfile.django-debian index 8abba29b88..678077041c 100644 --- a/Dockerfile.django-debian +++ b/Dockerfile.django-debian @@ -5,8 +5,8 @@ # Dockerfile.nginx to use the caching mechanism of Docker. # Ref: https://devguide.python.org/#branchstatus -FROM python:3.11.9-slim-bookworm@sha256:8c1036ec919826052306dfb5286e4753ffd9d5f6c24fbc352a5399c3b405b57e as base -FROM base as build +FROM python:3.11.9-slim-bookworm@sha256:8c1036ec919826052306dfb5286e4753ffd9d5f6c24fbc352a5399c3b405b57e AS base +FROM base AS build WORKDIR /app RUN \ apt-get -y update && \ @@ -32,12 +32,12 @@ COPY requirements.txt ./ # https://github.com/unbit/uwsgi/issues/1318#issuecomment-542238096 RUN CPUCOUNT=1 pip3 wheel --wheel-dir=/tmp/wheels -r ./requirements.txt -FROM base as django +FROM base AS django WORKDIR /app ARG uid=1001 ARG gid=1337 ARG appuser=defectdojo -ENV appuser ${appuser} +ENV appuser=${appuser} RUN \ apt-get -y update && \ # ugly fix to install postgresql-client without errors @@ -141,5 +141,5 @@ ENV \ DD_UWSGI_NUM_OF_THREADS="2" ENTRYPOINT ["/entrypoint-uwsgi.sh"] -FROM django as django-unittests +FROM django AS django-unittests COPY unittests/ ./unittests/ diff --git a/Dockerfile.integration-tests-debian b/Dockerfile.integration-tests-debian index c7f38d4fd0..0ff85f7c2a 100644 --- a/Dockerfile.integration-tests-debian +++ b/Dockerfile.integration-tests-debian @@ -1,8 +1,8 @@ # code: language=Dockerfile -FROM openapitools/openapi-generator-cli:v7.6.0@sha256:f86ca824293602b71b9b66683cc0011f8ff963858bd853621c554ff5cc7dd1d5 as openapitools -FROM python:3.11.9-slim-bookworm@sha256:8c1036ec919826052306dfb5286e4753ffd9d5f6c24fbc352a5399c3b405b57e as build +FROM openapitools/openapi-generator-cli:v7.7.0@sha256:99924315933d49e7b33a7d2074bb2b64fc8def8f74519939036e24eb48f00336 AS openapitools +FROM python:3.11.9-slim-bookworm@sha256:8c1036ec919826052306dfb5286e4753ffd9d5f6c24fbc352a5399c3b405b57e AS build WORKDIR /app RUN \ apt-get -y update && \ @@ -76,4 +76,4 @@ ENV \ DD_ADMIN_USER=admin \ DD_ADMIN_PASSWORD='' \ DD_BASE_URL="http://localhost:8080/" -CMD ["/entrypoint-integration-tests.sh"] \ No newline at end of file +CMD ["/entrypoint-integration-tests.sh"] diff --git a/Dockerfile.nginx-alpine b/Dockerfile.nginx-alpine index b9a55ac415..1b61391d2b 100644 --- a/Dockerfile.nginx-alpine +++ b/Dockerfile.nginx-alpine @@ -5,8 +5,8 @@ # Dockerfile.django-alpine to use the caching mechanism of Docker. # Ref: https://devguide.python.org/#branchstatus -FROM python:3.11.9-alpine3.20@sha256:df44c0c0761ddbd6388f4549cab42d24d64d257c2a960ad5b276bb7dab9639c7 as base -FROM base as build +FROM python:3.11.9-alpine3.20@sha256:df44c0c0761ddbd6388f4549cab42d24d64d257c2a960ad5b276bb7dab9639c7 AS base +FROM base AS build WORKDIR /app RUN \ apk update && \ @@ -16,13 +16,15 @@ RUN \ bind-tools \ mysql-client \ mariadb-dev \ - postgresql14-client \ + postgresql16-client \ xmlsec \ git \ util-linux \ curl-dev \ openssl \ libffi-dev \ + python3-dev \ + libpq-dev \ && \ rm -rf /var/cache/apk/* && \ true @@ -34,7 +36,7 @@ RUN CPUCOUNT=1 pip3 wheel --wheel-dir=/tmp/wheels -r ./requirements.txt FROM build AS collectstatic # Node installation from https://github.com/nodejs/docker-node -ENV NODE_VERSION 20.11.0 +ENV NODE_VERSION=20.11.0 RUN addgroup -g 1000 node \ && adduser -u 1000 -G node -s /bin/sh -D node \ @@ -103,7 +105,7 @@ RUN addgroup -g 1000 node \ && node --version \ && npm --version -ENV YARN_VERSION 1.22.19 +ENV YARN_VERSION=1.22.19 RUN apk add --no-cache --virtual .build-deps-yarn curl gnupg tar \ && for key in \ diff --git a/Dockerfile.nginx-debian b/Dockerfile.nginx-debian index a1fd76d05f..ef63d2c307 100644 --- a/Dockerfile.nginx-debian +++ b/Dockerfile.nginx-debian @@ -5,8 +5,8 @@ # Dockerfile.django-debian to use the caching mechanism of Docker. # Ref: https://devguide.python.org/#branchstatus -FROM python:3.11.9-slim-bookworm@sha256:8c1036ec919826052306dfb5286e4753ffd9d5f6c24fbc352a5399c3b405b57e as base -FROM base as build +FROM python:3.11.9-slim-bookworm@sha256:8c1036ec919826052306dfb5286e4753ffd9d5f6c24fbc352a5399c3b405b57e AS base +FROM base AS build WORKDIR /app RUN \ apt-get -y update && \ diff --git a/docker-compose.override.unit_tests.yml b/docker-compose.override.unit_tests.yml index a3d15b9350..a492dbdaad 100644 --- a/docker-compose.override.unit_tests.yml +++ b/docker-compose.override.unit_tests.yml @@ -1,5 +1,4 @@ --- -version: '3.8' services: nginx: image: busybox:1.36.1-musl diff --git a/docker-compose.override.unit_tests_cicd.yml b/docker-compose.override.unit_tests_cicd.yml index cc677ac41e..10dd2e0b6b 100644 --- a/docker-compose.override.unit_tests_cicd.yml +++ b/docker-compose.override.unit_tests_cicd.yml @@ -15,6 +15,7 @@ services: environment: PYTHONWARNINGS: error # We are strict about Warnings during testing DD_DEBUG: 'True' + DD_LOG_LEVEL: 'ERROR' DD_TEST_DATABASE_NAME: ${DD_TEST_DATABASE_NAME} DD_DATABASE_NAME: ${DD_TEST_DATABASE_NAME} DD_DATABASE_ENGINE: ${DD_DATABASE_ENGINE} diff --git a/docker/entrypoint-unit-tests-devDocker.sh b/docker/entrypoint-unit-tests-devDocker.sh index c590974b1b..e37fd64381 100755 --- a/docker/entrypoint-unit-tests-devDocker.sh +++ b/docker/entrypoint-unit-tests-devDocker.sh @@ -53,7 +53,9 @@ EOF echo "Unit Tests" echo "------------------------------------------------------------" -python3 manage.py test unittests -v 3 --keepdb --no-input + +python3 manage.py test unittests -v 3 --keepdb --no-input --failfast --shuffle --parallel --exclude-tag="non-parallel" +python3 manage.py test unittests -v 3 --keepdb --no-input --failfast --shuffle --tag="non-parallel" # you can select a single file to "test" unit tests # python3 manage.py test unittests.tools.test_npm_audit_scan_parser.TestNpmAuditParser --keepdb -v 3 diff --git a/docker/entrypoint-unit-tests.sh b/docker/entrypoint-unit-tests.sh index 6c45ce489d..cabdaa102a 100755 --- a/docker/entrypoint-unit-tests.sh +++ b/docker/entrypoint-unit-tests.sh @@ -79,4 +79,6 @@ python3 manage.py migrate echo "Unit Tests" echo "------------------------------------------------------------" -python3 manage.py test unittests -v 3 --keepdb --no-input + +python3 manage.py test unittests -v 3 --keepdb --no-input --failfast --shuffle --parallel --exclude-tag="non-parallel" +python3 manage.py test unittests -v 3 --keepdb --no-input --failfast --shuffle --tag="non-parallel" diff --git a/docs/config.dev.toml b/docs/config.dev.toml index 23b9cf30c5..65fff4564b 100644 --- a/docs/config.dev.toml +++ b/docs/config.dev.toml @@ -85,7 +85,8 @@ weight = 1 # See a complete list of available styles at https://xyproto.github.io/splash/docs/all.html style = "dracula" # Uncomment if you want your chosen highlight style used for code blocks without a specified language - guessSyntax = "true" + # Do not uncomment otherwise it breaks mermaid + # guessSyntax = "true" # Everything below this are Site Params @@ -198,3 +199,6 @@ enable = false url = "https://owasp.slack.com/archives/C014H3ZV9U6" icon = "fab fa-slack" desc = "Chat with other project developers" + +[params.mermaid] +enable = true diff --git a/docs/config.master.toml b/docs/config.master.toml index 5771eb8367..29c4e0a6ad 100644 --- a/docs/config.master.toml +++ b/docs/config.master.toml @@ -85,7 +85,8 @@ weight = 1 # See a complete list of available styles at https://xyproto.github.io/splash/docs/all.html style = "dracula" # Uncomment if you want your chosen highlight style used for code blocks without a specified language - guessSyntax = "true" + # Do not uncomment otherwise it breaks mermaid + # guessSyntax = "true" # Everything below this are Site Params @@ -198,3 +199,6 @@ enable = false url = "https://owasp.slack.com/archives/C014H3ZV9U6" icon = "fab fa-slack" desc = "Chat with other project developers" + +[params.mermaid] +enable = true diff --git a/docs/content/en/getting_started/upgrading/2.36.md b/docs/content/en/getting_started/upgrading/2.36.md index 260c86960d..ceaa8c77d1 100644 --- a/docs/content/en/getting_started/upgrading/2.36.md +++ b/docs/content/en/getting_started/upgrading/2.36.md @@ -2,6 +2,15 @@ title: 'Upgrading to DefectDojo Version 2.36.x' toc_hide: true weight: -20240603 -description: No special instructions. +description: Breaking Change for HELM deployments with PostgreSQL --- -There are no special instructions for upgrading to 2.36.x. Check the [Release Notes](https://github.com/DefectDojo/django-DefectDojo/releases/tag/2.36.0) for the contents of the release. + +Previous HELM deployments (HELM chart `<=1.6.136`, DefectDojo `<=2.35.4`) used a pinned version of PostgreSQL in versions `11.x`. These are incompatible with Django in version `4.2` (used from DefectDojo version `3.36.0`; HELM chart `1.6.137`). Because of this, it is necessary to upgrade PostgreSQL to version `12.x` or higher. DefectDojo in version `3.36.1` (HELM chart `1.6.138`) uses this new version of PostgreSQL. + +Unfortunately, an upgrade of PostgreSQL is not enough because PostgreSQL does not support automatic migration of data structures in the filesystem. Because of this, migration is needed. There are different ways (many of them similar to migration between different database backends (e.g. from MySQL to PostgreSQL)). Please find inspiration and the best fitting way for you in: + +- https://github.com/DefectDojo/django-DefectDojo/discussions/9480 +- https://owasp.slack.com/archives/C2P5BA8MN/p1717610931766739?thread_ts=1717587117.831149&cid=C2P5BA8MN +- https://dev.to/jkostolansky/how-to-upgrade-postgresql-from-11-to-12-2la6 + +There are no other special instructions for upgrading to 2.36.x. Check the [Release Notes](https://github.com/DefectDojo/django-DefectDojo/releases/tag/2.36.0) for the contents of the release. diff --git a/docs/content/en/integrations/ldap-authentication.md b/docs/content/en/integrations/ldap-authentication.md index 2fcf895e12..1769704373 100644 --- a/docs/content/en/integrations/ldap-authentication.md +++ b/docs/content/en/integrations/ldap-authentication.md @@ -41,7 +41,7 @@ Please check for the latest version of these requirements at the time of impleme Otherwise add the following to requirements.txt: -``` +```python python-ldap==3.4.2 django-auth-ldap==4.1.0 ``` @@ -119,7 +119,7 @@ Read the docs for Django Authentication with LDAP here: https://django-auth-ldap In order to pass the variables to the settings.dist.py file via docker, it's a good idea to add these to the docker-compose file. You can do this by adding the following variables to the environment section for the uwsgi image: -``` +```yaml DD_LDAP_SERVER_URI: "${DD_LDAP_SERVER_URI:-ldap://ldap.example.com}" DD_LDAP_BIND_DN: "${DD_LDAP_BIND_DN:-}" DD_LDAP_BIND_PASSWORD: "${DD_LDAP_BIND_PASSWORD:-}" diff --git a/docs/content/en/integrations/parsers/file/fortify.md b/docs/content/en/integrations/parsers/file/fortify.md index 5c113c36cb..2897e39d7c 100644 --- a/docs/content/en/integrations/parsers/file/fortify.md +++ b/docs/content/en/integrations/parsers/file/fortify.md @@ -20,6 +20,6 @@ per category. To get all issues, copy the [DefaultReportDefinitionAllIssues.xml] Once this is complete, you can run the following command on your .fpr file to generate the required XML: -``` +```bash ./path/to/ReportGenerator -format xml -f /path/to/output.xml -source /path/to/downloaded/artifact.fpr -template DefaultReportDefinitionAllIssues.xml ``` \ No newline at end of file diff --git a/docs/content/en/integrations/parsers/file/veracode.md b/docs/content/en/integrations/parsers/file/veracode.md index 7723786041..431a7f54ca 100644 --- a/docs/content/en/integrations/parsers/file/veracode.md +++ b/docs/content/en/integrations/parsers/file/veracode.md @@ -14,7 +14,7 @@ Veracode reports can be ingested in either XML or JSON Format - Requires slight modification of the response returned from the API - Exmample of a request being: `url | jq "{findings}"` - Desired Format: - ``` + ```json { "findings": [ { @@ -28,7 +28,7 @@ Veracode reports can be ingested in either XML or JSON Format - This response can be saved directly to a file and uploaded - Not as ideal for crafting a refined report consisting of multiple requests - Desired Format: - ``` + ```json { "_embedded": { "findings": [ diff --git a/docs/content/en/integrations/social-authentication.md b/docs/content/en/integrations/social-authentication.md index a7cafe3806..ebf2a6b0c8 100644 --- a/docs/content/en/integrations/social-authentication.md +++ b/docs/content/en/integrations/social-authentication.md @@ -312,7 +312,7 @@ Edit the settings (see [Configuration]({{< ref "/getting_started/configuration" or, alternatively, for helm configuration, add this to the `extraConfig` section: -``` +```yaml DD_SESSION_COOKIE_SECURE: 'True' DD_CSRF_COOKIE_SECURE: 'True' DD_SECURE_SSL_REDIRECT: 'True' @@ -453,7 +453,7 @@ Some Identity Providers are able to send list of groups to which should user bel You can bypass the login form if you are only using SSO/Social authentication for login in by enabling these two environment variables: -``` +```yaml DD_SOCIAL_LOGIN_AUTO_REDIRECT: "true" DD_SOCIAL_AUTH_SHOW_LOGIN_FORM: "false" ``` diff --git a/docs/content/en/usage/productgrading.md b/docs/content/en/usage/productgrading.md index 88cb88267f..3ead06d24f 100644 --- a/docs/content/en/usage/productgrading.md +++ b/docs/content/en/usage/productgrading.md @@ -27,7 +27,7 @@ Note that the following abbreviations were used: - med: amount of medium findings within the product - low: amount of low findings within the product -``` +```python health=100 if crit > 0: health = 40 diff --git a/dojo/__init__.py b/dojo/__init__.py index 707177ee3e..a31f5294e2 100644 --- a/dojo/__init__.py +++ b/dojo/__init__.py @@ -4,6 +4,6 @@ # Django starts so that shared_task will use this app. from .celery import app as celery_app # noqa: F401 -__version__ = '2.37.0-dev' -__url__ = 'https://github.com/DefectDojo/django-DefectDojo' -__docs__ = 'https://documentation.defectdojo.com' +__version__ = "2.37.0-dev" +__url__ = "https://github.com/DefectDojo/django-DefectDojo" +__docs__ = "https://documentation.defectdojo.com" diff --git a/dojo/admin.py b/dojo/admin.py index 87823ff4d0..a2452ce1e5 100644 --- a/dojo/admin.py +++ b/dojo/admin.py @@ -85,8 +85,8 @@ class AnswerParentAdmin(PolymorphicParentModelAdmin): """ list_display = ( - 'answered_survey', - 'question', + "answered_survey", + "question", ) base_model = Answer diff --git a/dojo/api_v2/prefetch/schema.py b/dojo/api_v2/prefetch/schema.py index 030a572a15..1a50f6ba7d 100644 --- a/dojo/api_v2/prefetch/schema.py +++ b/dojo/api_v2/prefetch/schema.py @@ -26,14 +26,14 @@ def get_serializer_ref_name(serializer): :return: Serializer's ``ref_name`` or ``None`` for inline serializer :rtype: str or None """ - serializer_meta = getattr(serializer, 'Meta', None) + serializer_meta = getattr(serializer, "Meta", None) serializer_name = type(serializer).__name__ - if hasattr(serializer_meta, 'ref_name'): + if hasattr(serializer_meta, "ref_name"): ref_name = serializer_meta.ref_name else: ref_name = serializer_name - if ref_name.endswith('Serializer'): - ref_name = ref_name[:-len('Serializer')] + if ref_name.endswith("Serializer"): + ref_name = ref_name[:-len("Serializer")] return ref_name diff --git a/dojo/api_v2/serializers.py b/dojo/api_v2/serializers.py index 006edc63f8..bafdd319bb 100644 --- a/dojo/api_v2/serializers.py +++ b/dojo/api_v2/serializers.py @@ -200,7 +200,7 @@ def __init__(self, **kwargs): self.pretty_print = pretty_print def to_internal_value(self, data): - if isinstance(data, list) and data == [''] and self.allow_empty: + if isinstance(data, list) and data == [""] and self.allow_empty: return [] if isinstance(data, six.string_types): if not data: @@ -1100,7 +1100,7 @@ def validate(self, data): name = data.get("name") # Make sure this will not create a duplicate test type if Tool_Type.objects.filter(name=name).count() > 0: - msg = 'A Tool Type with the name already exists' + msg = "A Tool Type with the name already exists" raise serializers.ValidationError(msg) return data @@ -1512,12 +1512,12 @@ def get_engagement(self, obj): def validate(self, data): def validate_findings_have_same_engagement(finding_objects: List[Finding]): - engagements = finding_objects.values_list('test__engagement__id', flat=True).distinct().count() + engagements = finding_objects.values_list("test__engagement__id", flat=True).distinct().count() if engagements > 1: msg = "You are not permitted to add findings from multiple engagements" raise PermissionDenied(msg) - findings = data.get('accepted_findings', []) + findings = data.get("accepted_findings", []) findings_ids = [x.id for x in findings] finding_objects = Finding.objects.filter(id__in=findings_ids) authed_findings = get_authorized_findings(Permissions.Finding_Edit).filter(id__in=findings_ids) @@ -1526,7 +1526,7 @@ def validate_findings_have_same_engagement(finding_objects: List[Finding]): raise PermissionDenied(msg) if self.context["request"].method == "POST": validate_findings_have_same_engagement(finding_objects) - elif self.context['request'].method in ['PATCH', 'PUT']: + elif self.context["request"].method in ["PATCH", "PUT"]: existing_findings = Finding.objects.filter(risk_acceptance=self.instance.id) existing_and_new_findings = existing_findings | finding_objects validate_findings_have_same_engagement(existing_and_new_findings) @@ -1645,10 +1645,10 @@ class FindingSerializer(TaggitSerializer, serializers.ModelSerializer): age = serializers.IntegerField(read_only=True) sla_days_remaining = serializers.IntegerField(read_only=True) finding_meta = FindingMetaSerializer(read_only=True, many=True) - related_fields = serializers.SerializerMethodField() + related_fields = serializers.SerializerMethodField(allow_null=True) # for backwards compatibility - jira_creation = serializers.SerializerMethodField(read_only=True) - jira_change = serializers.SerializerMethodField(read_only=True) + jira_creation = serializers.SerializerMethodField(read_only=True, allow_null=True) + jira_change = serializers.SerializerMethodField(read_only=True, allow_null=True) display_status = serializers.SerializerMethodField() finding_groups = FindingGroupSerializer( source="finding_group_set", many=True, read_only=True, @@ -2024,12 +2024,12 @@ class Meta: ) def validate(self, data): - async_updating = getattr(self.instance, 'async_updating', None) + async_updating = getattr(self.instance, "async_updating", None) if async_updating: - new_sla_config = data.get('sla_configuration', None) - old_sla_config = getattr(self.instance, 'sla_configuration', None) + new_sla_config = data.get("sla_configuration", None) + old_sla_config = getattr(self.instance, "sla_configuration", None) if new_sla_config and old_sla_config and new_sla_config != old_sla_config: - msg = 'Finding SLA expiration dates are currently being recalculated. The SLA configuration for this product cannot be changed until the calculation is complete.' + msg = "Finding SLA expiration dates are currently being recalculated. The SLA configuration for this product cannot be changed until the calculation is complete." raise serializers.ValidationError(msg) return data @@ -3002,13 +3002,13 @@ class Meta: ) def validate(self, data): - async_updating = getattr(self.instance, 'async_updating', None) + async_updating = getattr(self.instance, "async_updating", None) if async_updating: - for field in ['critical', 'enforce_critical', 'high', 'enforce_high', 'medium', 'enforce_medium', 'low', 'enforce_low']: + for field in ["critical", "enforce_critical", "high", "enforce_high", "medium", "enforce_medium", "low", "enforce_low"]: old_days = getattr(self.instance, field, None) new_days = data.get(field, None) if old_days is not None and new_days is not None and (old_days != new_days): - msg = 'Finding SLA expiration dates are currently being calculated. The SLA days for this SLA configuration cannot be changed until the calculation is complete.' + msg = "Finding SLA expiration dates are currently being calculated. The SLA days for this SLA configuration cannot be changed until the calculation is complete." raise serializers.ValidationError(msg) return data diff --git a/dojo/api_v2/views.py b/dojo/api_v2/views.py index c0a6f14229..0f78a071eb 100644 --- a/dojo/api_v2/views.py +++ b/dojo/api_v2/views.py @@ -173,9 +173,9 @@ class DojoOpenApiJsonRenderer(OpenApiJsonRenderer2): def get_indent(self, accepted_media_type, renderer_context): - if accepted_media_type and 'indent' in accepted_media_type: + if accepted_media_type and "indent" in accepted_media_type: return super().get_indent(accepted_media_type, renderer_context) - return renderer_context.get('indent', None) + return renderer_context.get("indent", None) class DojoSpectacularAPIView(SpectacularAPIView): @@ -200,11 +200,14 @@ class PrefetchDojoModelViewSet( # Authorization: authenticated users class RoleViewSet(viewsets.ReadOnlyModelViewSet): serializer_class = serializers.RoleSerializer - queryset = Role.objects.all() + queryset = Role.objects.none() filter_backends = (DjangoFilterBackend,) filterset_fields = ["id", "name"] permission_classes = (IsAuthenticated,) + def get_queryset(self): + return Role.objects.all().order_by("id") + # Authorization: object-based @extend_schema_view( @@ -306,6 +309,9 @@ class GlobalRoleViewSet( filterset_fields = ["id", "user", "group", "role"] permission_classes = (permissions.IsSuperUser, DjangoModelPermissions) + def get_queryset(self): + return Global_Role.objects.all().order_by("id") + # Authorization: object-based class EndPointViewSet( @@ -746,9 +752,11 @@ class CredentialsViewSet( serializer_class = serializers.CredentialSerializer queryset = Cred_User.objects.all() filter_backends = (DjangoFilterBackend,) - permission_classes = (permissions.IsSuperUser, DjangoModelPermissions) + def get_queryset(self): + return Cred_User.objects.all().order_by("id") + # Authorization: configuration class CredentialsMappingViewSet( @@ -773,11 +781,14 @@ class FindingTemplatesViewSet( DojoModelViewSet, ): serializer_class = serializers.FindingTemplateSerializer - queryset = Finding_Template.objects.all() + queryset = Finding_Template.objects.none() filter_backends = (DjangoFilterBackend,) filterset_class = ApiTemplateFindingFilter permission_classes = (permissions.UserHasConfigurationPermissionStaff,) + def get_queryset(self): + return Finding_Template.objects.all().order_by("id") + # Authorization: object-based @extend_schema_view( @@ -1490,11 +1501,14 @@ class JiraInstanceViewSet( DojoModelViewSet, ): serializer_class = serializers.JIRAInstanceSerializer - queryset = JIRA_Instance.objects.all() + queryset = JIRA_Instance.objects.none() filter_backends = (DjangoFilterBackend,) filterset_fields = ["id", "url"] permission_classes = (permissions.UserHasConfigurationPermissionSuperuser,) + def get_queryset(self): + return JIRA_Instance.objects.all().order_by("id") + # Authorization: object-based class JiraIssuesViewSet( @@ -1554,18 +1568,21 @@ class SonarqubeIssueViewSet( DojoModelViewSet, ): serializer_class = serializers.SonarqubeIssueSerializer - queryset = Sonarqube_Issue.objects.all() + queryset = Sonarqube_Issue.objects.none() filter_backends = (DjangoFilterBackend,) filterset_fields = ["id", "key", "status", "type"] permission_classes = (permissions.IsSuperUser, DjangoModelPermissions) + def get_queryset(self): + return Sonarqube_Issue.objects.all().order_by("id") + # Authorization: superuser class SonarqubeIssueTransitionViewSet( DojoModelViewSet, ): serializer_class = serializers.SonarqubeIssueTransitionSerializer - queryset = Sonarqube_Issue_Transition.objects.all() + queryset = Sonarqube_Issue_Transition.objects.none() filter_backends = (DjangoFilterBackend,) filterset_fields = [ "id", @@ -1576,6 +1593,9 @@ class SonarqubeIssueTransitionViewSet( ] permission_classes = (permissions.IsSuperUser, DjangoModelPermissions) + def get_queryset(self): + return Sonarqube_Issue_Transition.objects.all().order_by("id") + # Authorization: object-based class ProductAPIScanConfigurationViewSet( @@ -1646,9 +1666,6 @@ class DojoMetaViewSet( IsAuthenticated, permissions.UserHasDojoMetaPermission, ) - # swagger_schema = prefetch.get_prefetch_schema( - # ["metadata_list", "metadata_read"], serializers.MetaSerializer - # ).to_schema() def get_queryset(self): return get_authorized_dojo_meta(Permissions.Product_View) @@ -1688,10 +1705,8 @@ class ProductViewSet( dojo_mixins.DeletePreviewModelMixin, ): serializer_class = serializers.ProductSerializer - # TODO: prefetch queryset = Product.objects.none() filter_backends = (DjangoFilterBackend,) - filterset_class = ApiProductFilter permission_classes = ( IsAuthenticated, @@ -2111,10 +2126,13 @@ class DevelopmentEnvironmentViewSet( DojoModelViewSet, ): serializer_class = serializers.DevelopmentEnvironmentSerializer - queryset = Development_Environment.objects.all() + queryset = Development_Environment.objects.none() filter_backends = (DjangoFilterBackend,) permission_classes = (IsAuthenticated, DjangoModelPermissions) + def get_queryset(self): + return Development_Environment.objects.all().order_by("id") + # Authorization: object-based class TestsViewSet( @@ -2326,13 +2344,16 @@ class TestTypesViewSet( viewsets.ReadOnlyModelViewSet, ): serializer_class = serializers.TestTypeSerializer - queryset = Test_Type.objects.all() + queryset = Test_Type.objects.none() filter_backends = (DjangoFilterBackend,) filterset_fields = [ "name", ] permission_classes = (IsAuthenticated, DjangoModelPermissions) + def get_queryset(self): + return Test_Type.objects.all().order_by("id") + @extend_schema_view( list=extend_schema( @@ -2420,7 +2441,7 @@ class ToolConfigurationsViewSet( PrefetchDojoModelViewSet, ): serializer_class = serializers.ToolConfigurationSerializer - queryset = Tool_Configuration.objects.all() + queryset = Tool_Configuration.objects.none() filter_backends = (DjangoFilterBackend,) filterset_fields = [ "id", @@ -2431,6 +2452,9 @@ class ToolConfigurationsViewSet( ] permission_classes = (permissions.UserHasConfigurationPermissionSuperuser,) + def get_queryset(self): + return Tool_Configuration.objects.all().order_by("id") + # Authorization: object-based class ToolProductSettingsViewSet( @@ -2461,29 +2485,35 @@ class ToolTypesViewSet( DojoModelViewSet, ): serializer_class = serializers.ToolTypeSerializer - queryset = Tool_Type.objects.all() + queryset = Tool_Type.objects.none() filter_backends = (DjangoFilterBackend,) filterset_fields = ["id", "name", "description"] permission_classes = (permissions.UserHasConfigurationPermissionSuperuser,) + def get_queryset(self): + return Tool_Type.objects.all().order_by("id") + # Authorization: authenticated, configuration class RegulationsViewSet( DojoModelViewSet, ): serializer_class = serializers.RegulationSerializer - queryset = Regulation.objects.all() + queryset = Regulation.objects.none() filter_backends = (DjangoFilterBackend,) filterset_fields = ["id", "name", "description"] permission_classes = (IsAuthenticated, DjangoModelPermissions) + def get_queryset(self): + return Regulation.objects.all().order_by("id") + # Authorization: configuration class UsersViewSet( DojoModelViewSet, ): serializer_class = serializers.UserSerializer - queryset = User.objects.all() + queryset = User.objects.none() filter_backends = (DjangoFilterBackend,) filterset_fields = [ "id", @@ -2496,6 +2526,9 @@ class UsersViewSet( ] permission_classes = (permissions.UserHasConfigurationPermissionSuperuser,) + def get_queryset(self): + return User.objects.all().order_by("id") + def destroy(self, request, *args, **kwargs): instance = self.get_object() if request.user == instance: @@ -2536,11 +2569,14 @@ class UserContactInfoViewSet( PrefetchDojoModelViewSet, ): serializer_class = serializers.UserContactInfoSerializer - queryset = UserContactInfo.objects.all() + queryset = UserContactInfo.objects.none() filter_backends = (DjangoFilterBackend,) filterset_fields = "__all__" permission_classes = (permissions.IsSuperUser, DjangoModelPermissions) + def get_queryset(self): + return UserContactInfo.objects.all().order_by("id") + # Authorization: authenticated users class UserProfileView(GenericAPIView): @@ -2655,7 +2691,7 @@ class EndpointMetaImporterView( serializer_class = serializers.EndpointMetaImporterSerializer parser_classes = [MultiPartParser] - queryset = Product.objects.all() + queryset = Product.objects.none() permission_classes = ( IsAuthenticated, permissions.UserHasMetaImportPermission, @@ -2673,11 +2709,14 @@ class LanguageTypeViewSet( DojoModelViewSet, ): serializer_class = serializers.LanguageTypeSerializer - queryset = Language_Type.objects.all() + queryset = Language_Type.objects.none() filter_backends = (DjangoFilterBackend,) filterset_fields = ["id", "language", "color"] permission_classes = (permissions.UserHasConfigurationPermissionStaff,) + def get_queryset(self): + return Language_Type.objects.all().order_by("id") + # Authorization: object-based @extend_schema_view( @@ -2807,7 +2846,7 @@ class NoteTypeViewSet( DojoModelViewSet, ): serializer_class = serializers.NoteTypeSerializer - queryset = Note_Type.objects.all() + queryset = Note_Type.objects.none() filter_backends = (DjangoFilterBackend,) filterset_fields = [ "id", @@ -2819,6 +2858,9 @@ class NoteTypeViewSet( ] permission_classes = (permissions.UserHasConfigurationPermissionSuperuser,) + def get_queryset(self): + return Note_Type.objects.all().order_by("id") + # Authorization: superuser class NotesViewSet( @@ -2826,7 +2868,7 @@ class NotesViewSet( viewsets.ReadOnlyModelViewSet, ): serializer_class = serializers.NoteSerializer - queryset = Notes.objects.all() + queryset = Notes.objects.none() filter_backends = (DjangoFilterBackend,) filterset_fields = [ "id", @@ -2840,6 +2882,9 @@ class NotesViewSet( ] permission_classes = (permissions.IsSuperUser, DjangoModelPermissions) + def get_queryset(self): + return Notes.objects.all().order_by("id") + def report_generate(request, obj, options): user = Dojo_User.objects.get(id=request.user.id) @@ -3136,7 +3181,10 @@ class SystemSettingsViewSet( permission_classes = (permissions.IsSuperUser, DjangoModelPermissions) serializer_class = serializers.SystemSettingsSerializer - queryset = System_Settings.objects.all() + queryset = System_Settings.objects.none() + + def get_queryset(self): + return System_Settings.objects.all().order_by("id") # Authorization: superuser @@ -3168,11 +3216,14 @@ class NotificationsViewSet( PrefetchDojoModelViewSet, ): serializer_class = serializers.NotificationsSerializer - queryset = Notifications.objects.all() + queryset = Notifications.objects.none() filter_backends = (DjangoFilterBackend,) filterset_fields = ["id", "user", "product", "template"] permission_classes = (permissions.IsSuperUser, DjangoModelPermissions) + def get_queryset(self): + return Notifications.objects.all().order_by("id") + class EngagementPresetsViewset( PrefetchDojoModelViewSet, @@ -3190,102 +3241,108 @@ def get_queryset(self): return get_authorized_engagement_presets(Permissions.Product_View) -class EngagementCheckListViewset( - PrefetchDojoModelViewSet, -): - serializer_class = serializers.EngagementCheckListSerializer - queryset = Check_List.objects.none() - filter_backends = (DjangoFilterBackend,) - permission_classes = ( - IsAuthenticated, - permissions.UserHasEngagementPermission, - ) - - def get_queryset(self): - return get_authorized_engagement_checklists(Permissions.Product_View) - - class NetworkLocationsViewset( DojoModelViewSet, ): serializer_class = serializers.NetworkLocationsSerializer - queryset = Network_Locations.objects.all() + queryset = Network_Locations.objects.none() filter_backends = (DjangoFilterBackend,) filterset_fields = ["id", "location"] permission_classes = (IsAuthenticated, DjangoModelPermissions) + def get_queryset(self): + return Network_Locations.objects.all().order_by("id") + # Authorization: superuser class ConfigurationPermissionViewSet( viewsets.ReadOnlyModelViewSet, ): serializer_class = serializers.ConfigurationPermissionSerializer - queryset = Permission.objects.filter( - codename__in=get_configuration_permissions_codenames(), - ) + queryset = Permission.objects.none() filter_backends = (DjangoFilterBackend,) filterset_fields = ["id", "name", "codename"] permission_classes = (permissions.IsSuperUser, DjangoModelPermissions) + def get_queryset(self): + return Permission.objects.filter( + codename__in=get_configuration_permissions_codenames(), + ).order_by("id") + class SLAConfigurationViewset( DojoModelViewSet, ): serializer_class = serializers.SLAConfigurationSerializer - queryset = SLA_Configuration.objects.all() + queryset = SLA_Configuration.objects.none() filter_backends = (DjangoFilterBackend,) permission_classes = (IsAuthenticated, DjangoModelPermissions) + def get_queryset(self): + return SLA_Configuration.objects.all().order_by("id") + class QuestionnaireQuestionViewSet( viewsets.ReadOnlyModelViewSet, dojo_mixins.QuestionSubClassFieldsMixin, ): serializer_class = serializers.QuestionnaireQuestionSerializer - queryset = Question.objects.all() + queryset = Question.objects.none() filter_backends = (DjangoFilterBackend,) permission_classes = ( permissions.UserHasEngagementPermission, DjangoModelPermissions, ) + def get_queryset(self): + return Question.objects.all().order_by("id") + class QuestionnaireAnswerViewSet( viewsets.ReadOnlyModelViewSet, dojo_mixins.AnswerSubClassFieldsMixin, ): serializer_class = serializers.QuestionnaireAnswerSerializer - queryset = Answer.objects.all() + queryset = Answer.objects.none() filter_backends = (DjangoFilterBackend,) permission_classes = ( permissions.UserHasEngagementPermission, DjangoModelPermissions, ) + def get_queryset(self): + return Answer.objects.all().order_by("id") + class QuestionnaireGeneralSurveyViewSet( viewsets.ReadOnlyModelViewSet, ): serializer_class = serializers.QuestionnaireGeneralSurveySerializer - queryset = General_Survey.objects.all() + queryset = General_Survey.objects.none() filter_backends = (DjangoFilterBackend,) permission_classes = ( permissions.UserHasEngagementPermission, DjangoModelPermissions, ) + def get_queryset(self): + return General_Survey.objects.all().order_by("id") + class QuestionnaireEngagementSurveyViewSet( viewsets.ReadOnlyModelViewSet, ): serializer_class = serializers.QuestionnaireEngagementSurveySerializer - queryset = Engagement_Survey.objects.all() + queryset = Engagement_Survey.objects.none() filter_backends = (DjangoFilterBackend,) permission_classes = ( permissions.UserHasEngagementPermission, DjangoModelPermissions, ) + def get_queryset(self): + return Engagement_Survey.objects.all().order_by("id") + class QuestionnaireAnsweredSurveyViewSet( prefetch.PrefetchListMixin, @@ -3293,20 +3350,26 @@ class QuestionnaireAnsweredSurveyViewSet( viewsets.ReadOnlyModelViewSet, ): serializer_class = serializers.QuestionnaireAnsweredSurveySerializer - queryset = Answered_Survey.objects.all() + queryset = Answered_Survey.objects.none() filter_backends = (DjangoFilterBackend,) permission_classes = ( permissions.UserHasEngagementPermission, DjangoModelPermissions, ) + def get_queryset(self): + return Answered_Survey.objects.all().order_by("id") + # Authorization: configuration class AnnouncementViewSet( DojoModelViewSet, ): serializer_class = serializers.AnnouncementSerializer - queryset = Announcement.objects.all() + queryset = Announcement.objects.none() filter_backends = (DjangoFilterBackend,) filterset_fields = "__all__" permission_classes = (permissions.UserHasConfigurationPermissionStaff,) + + def get_queryset(self): + return Announcement.objects.all().order_by("id") diff --git a/dojo/apps.py b/dojo/apps.py index 9b3f786408..2411e7e725 100644 --- a/dojo/apps.py +++ b/dojo/apps.py @@ -11,7 +11,7 @@ class DojoAppConfig(AppConfig): - name = 'dojo' + name = "dojo" verbose_name = "Defect Dojo" def ready(self): @@ -25,12 +25,12 @@ def ready(self): # charfields/textfields are the fields that watson indexes by default (but we have to repeat here if we add extra fields) # and watson likes to have tuples instead of lists - watson.register(self.get_model('Product'), fields=get_model_fields_with_extra(self.get_model('Product'), ('id', 'prod_type__name')), store=('prod_type__name', )) + watson.register(self.get_model("Product"), fields=get_model_fields_with_extra(self.get_model("Product"), ("id", "prod_type__name")), store=("prod_type__name", )) - watson.register(self.get_model('Test'), fields=get_model_fields_with_extra(self.get_model('Test'), ('id', 'engagement__product__name')), store=('engagement__product__name', )) # test_type__name? + watson.register(self.get_model("Test"), fields=get_model_fields_with_extra(self.get_model("Test"), ("id", "engagement__product__name")), store=("engagement__product__name", )) # test_type__name? - watson.register(self.get_model('Finding'), fields=get_model_fields_with_extra(self.get_model('Finding'), ('id', 'url', 'unique_id_from_tool', 'test__engagement__product__name', 'jira_issue__jira_key')), - store=('status', 'jira_issue__jira_key', 'test__engagement__product__name', 'severity', 'severity_display', 'latest_note')) + watson.register(self.get_model("Finding"), fields=get_model_fields_with_extra(self.get_model("Finding"), ("id", "url", "unique_id_from_tool", "test__engagement__product__name", "jira_issue__jira_key")), + store=("status", "jira_issue__jira_key", "test__engagement__product__name", "severity", "severity_display", "latest_note")) # some thoughts on Finding fields that are not indexed yet: # CWE can't be indexed as it is an integer @@ -58,16 +58,16 @@ def ready(self): # help_text="Source line number of the attack vector") # sast_source_file_path = models.CharField(null=True, blank=True, max_length=4000, help_text="Source filepath of the attack vector") - watson.register(self.get_model('Finding_Template')) - watson.register(self.get_model('Endpoint'), store=('product__name', )) # add product name also? - watson.register(self.get_model('Engagement'), fields=get_model_fields_with_extra(self.get_model('Engagement'), ('id', 'product__name')), store=('product__name', )) - watson.register(self.get_model('App_Analysis')) - watson.register(self.get_model('Vulnerability_Id'), store=('finding__test__engagement__product__name', )) + watson.register(self.get_model("Finding_Template")) + watson.register(self.get_model("Endpoint"), store=("product__name", )) # add product name also? + watson.register(self.get_model("Engagement"), fields=get_model_fields_with_extra(self.get_model("Engagement"), ("id", "product__name")), store=("product__name", )) + watson.register(self.get_model("App_Analysis")) + watson.register(self.get_model("Vulnerability_Id"), store=("finding__test__engagement__product__name", )) # YourModel = self.get_model("YourModel") # watson.register(YourModel) - register_check(check_configuration_deduplication, 'dojo') + register_check(check_configuration_deduplication, "dojo") # Load any signals here that will be ready for runtime # Importing the signals file is good enough if using the reciever decorator diff --git a/dojo/celery.py b/dojo/celery.py index b2c742a792..e9ec0417d4 100644 --- a/dojo/celery.py +++ b/dojo/celery.py @@ -8,20 +8,20 @@ logger = logging.getLogger(__name__) # set the default Django settings module for the 'celery' program. -os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dojo.settings.settings') +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dojo.settings.settings") -app = Celery('dojo') +app = Celery("dojo") # Using a string here means the worker will not have to # pickle the object when using Windows. -app.config_from_object('django.conf:settings', namespace='CELERY') +app.config_from_object("django.conf:settings", namespace="CELERY") app.autodiscover_tasks(lambda: settings.INSTALLED_APPS) @app.task(bind=True) def debug_task(self): - print(f'Request: {self.request!r}') + print(f"Request: {self.request!r}") @setup_logging.connect diff --git a/dojo/components/views.py b/dojo/components/views.py index 45cf09727f..93a78787e5 100644 --- a/dojo/components/views.py +++ b/dojo/components/views.py @@ -25,7 +25,7 @@ def components(request): .order_by("component_name") .annotate( component_version=StringAgg( - "component_version", delimiter=separator, distinct=True, default=Value(''), + "component_version", delimiter=separator, distinct=True, default=Value(""), ), ) ) diff --git a/dojo/cred/queries.py b/dojo/cred/queries.py index d86c432fc6..4dd14385a0 100644 --- a/dojo/cred/queries.py +++ b/dojo/cred/queries.py @@ -12,7 +12,7 @@ def get_authorized_cred_mappings(permission, queryset=None): return Cred_Mapping.objects.none() if queryset is None: - cred_mappings = Cred_Mapping.objects.all() + cred_mappings = Cred_Mapping.objects.all().order_by("id") else: cred_mappings = queryset @@ -24,19 +24,19 @@ def get_authorized_cred_mappings(permission, queryset=None): roles = get_roles_for_permission(permission) authorized_product_type_roles = Product_Type_Member.objects.filter( - product_type=OuterRef('product__prod_type_id'), + product_type=OuterRef("product__prod_type_id"), user=user, role__in=roles) authorized_product_roles = Product_Member.objects.filter( - product=OuterRef('product_id'), + product=OuterRef("product_id"), user=user, role__in=roles) authorized_product_type_groups = Product_Type_Group.objects.filter( - product_type=OuterRef('product__prod_type_id'), + product_type=OuterRef("product__prod_type_id"), group__users=user, role__in=roles) authorized_product_groups = Product_Group.objects.filter( - product=OuterRef('product_id'), + product=OuterRef("product_id"), group__users=user, role__in=roles) cred_mappings = cred_mappings.annotate( diff --git a/dojo/cred/urls.py b/dojo/cred/urls.py index 65a3d4300f..05f2bfe132 100644 --- a/dojo/cred/urls.py +++ b/dojo/cred/urls.py @@ -3,26 +3,26 @@ from . import views urlpatterns = [ - re_path(r'^cred/add', views.new_cred, name='add_cred'), - re_path(r'^cred/(?P\d+)/view$', views.view_cred_details, name='view_cred_details'), - re_path(r'^cred/(?P\d+)/edit$', views.edit_cred, name='edit_cred'), - re_path(r'^cred/(?P\d+)/delete$', views.delete_cred, name='delete_cred'), - re_path(r'^cred$', views.cred, name='cred'), - re_path(r'^product/(?P\d+)/cred/add$', views.new_cred_product, name='new_cred_product'), - re_path(r'^product/(?P\d+)/cred/all$', views.all_cred_product, name='all_cred_product'), - re_path(r'^product/(?P\d+)/cred/(?P\d+)/edit$', views.edit_cred_product, name='edit_cred_product'), - re_path(r'^product/(?P\d+)/cred/(?P\d+)/view$', views.view_cred_product, name='view_cred_product'), - re_path(r'^product/(?P\d+)/cred/(?P\d+)/delete$', views.delete_cred_product, name='delete_cred_product'), - re_path(r'^engagement/(?P\d+)/cred/add$', views.new_cred_product_engagement, name='new_cred_product_engagement'), - re_path(r'^engagement/(?P\d+)/cred/(?P\d+)/view$', views.view_cred_product_engagement, - name='view_cred_product_engagement'), - re_path(r'^engagement/(?P\d+)/cred/(?P\d+)/delete$', views.delete_cred_engagement, - name='delete_cred_engagement'), - re_path(r'^test/(?P\d+)/cred/add$', views.new_cred_engagement_test, name='new_cred_engagement_test'), - re_path(r'^test/(?P\d+)/cred/(?P\d+)/view$', views.view_cred_engagement_test, - name='view_cred_engagement_test'), - re_path(r'^test/(?P\d+)/cred/(?P\d+)/delete$', views.delete_cred_test, name='delete_cred_test'), - re_path(r'^finding/(?P\d+)/cred/add$', views.new_cred_finding, name='new_cred_finding'), - re_path(r'^finding/(?P\d+)/cred/(?P\d+)/view$', views.view_cred_finding, name='view_cred_finding'), - re_path(r'^finding/(?P\d+)/cred/(?P\d+)/delete$', views.delete_cred_finding, name='delete_cred_finding'), + re_path(r"^cred/add", views.new_cred, name="add_cred"), + re_path(r"^cred/(?P\d+)/view$", views.view_cred_details, name="view_cred_details"), + re_path(r"^cred/(?P\d+)/edit$", views.edit_cred, name="edit_cred"), + re_path(r"^cred/(?P\d+)/delete$", views.delete_cred, name="delete_cred"), + re_path(r"^cred$", views.cred, name="cred"), + re_path(r"^product/(?P\d+)/cred/add$", views.new_cred_product, name="new_cred_product"), + re_path(r"^product/(?P\d+)/cred/all$", views.all_cred_product, name="all_cred_product"), + re_path(r"^product/(?P\d+)/cred/(?P\d+)/edit$", views.edit_cred_product, name="edit_cred_product"), + re_path(r"^product/(?P\d+)/cred/(?P\d+)/view$", views.view_cred_product, name="view_cred_product"), + re_path(r"^product/(?P\d+)/cred/(?P\d+)/delete$", views.delete_cred_product, name="delete_cred_product"), + re_path(r"^engagement/(?P\d+)/cred/add$", views.new_cred_product_engagement, name="new_cred_product_engagement"), + re_path(r"^engagement/(?P\d+)/cred/(?P\d+)/view$", views.view_cred_product_engagement, + name="view_cred_product_engagement"), + re_path(r"^engagement/(?P\d+)/cred/(?P\d+)/delete$", views.delete_cred_engagement, + name="delete_cred_engagement"), + re_path(r"^test/(?P\d+)/cred/add$", views.new_cred_engagement_test, name="new_cred_engagement_test"), + re_path(r"^test/(?P\d+)/cred/(?P\d+)/view$", views.view_cred_engagement_test, + name="view_cred_engagement_test"), + re_path(r"^test/(?P\d+)/cred/(?P\d+)/delete$", views.delete_cred_test, name="delete_cred_test"), + re_path(r"^finding/(?P\d+)/cred/add$", views.new_cred_finding, name="new_cred_finding"), + re_path(r"^finding/(?P\d+)/cred/(?P\d+)/view$", views.view_cred_finding, name="view_cred_finding"), + re_path(r"^finding/(?P\d+)/cred/(?P\d+)/delete$", views.delete_cred_finding, name="delete_cred_finding"), ] diff --git a/dojo/cred/views.py b/dojo/cred/views.py index 53136b4994..09e1cd34e4 100644 --- a/dojo/cred/views.py +++ b/dojo/cred/views.py @@ -18,53 +18,53 @@ @user_is_configuration_authorized(Permissions.Credential_Add) def new_cred(request): - if request.method == 'POST': + if request.method == "POST": tform = CredUserForm(request.POST) if tform.is_valid(): form_copy = tform.save(commit=False) form_copy.password = dojo_crypto_encrypt( - tform.cleaned_data['password']) + tform.cleaned_data["password"]) form_copy.save() messages.add_message( request, messages.SUCCESS, - 'Credential Successfully Created.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('cred')) + "Credential Successfully Created.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("cred")) else: tform = CredUserForm() add_breadcrumb( title="New Credential", top_level=False, request=request) - return render(request, 'dojo/new_cred.html', {'tform': tform}) + return render(request, "dojo/new_cred.html", {"tform": tform}) -@user_is_authorized(Product, Permissions.Product_View, 'pid') +@user_is_authorized(Product, Permissions.Product_View, "pid") def all_cred_product(request, pid): prod = get_object_or_404(Product, id=pid) - creds = Cred_Mapping.objects.filter(product=prod).order_by('cred_id__name') + creds = Cred_Mapping.objects.filter(product=prod).order_by("cred_id__name") product_tab = Product_Tab(prod, title="Credentials", tab="settings") - return render(request, 'dojo/view_cred_prod.html', {'product_tab': product_tab, 'creds': creds, 'prod': prod}) + return render(request, "dojo/view_cred_prod.html", {"product_tab": product_tab, "creds": creds, "prod": prod}) -@user_is_authorized(Cred_User, Permissions.Credential_Edit, 'ttid') +@user_is_authorized(Cred_User, Permissions.Credential_Edit, "ttid") def edit_cred(request, ttid): tool_config = Cred_User.objects.get(pk=ttid) - if request.method == 'POST': + if request.method == "POST": tform = CredUserForm(request.POST, request.FILES, instance=tool_config) if tform.is_valid(): form_copy = tform.save(commit=False) form_copy.password = dojo_crypto_encrypt( - tform.cleaned_data['password']) + tform.cleaned_data["password"]) # handle_uploaded_selenium(request.FILES['selenium_script'], tool_config) form_copy.save() messages.add_message( request, messages.SUCCESS, - 'Credential Successfully Updated.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('cred')) + "Credential Successfully Updated.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("cred")) else: tool_config.password = prepare_for_view(tool_config.password) @@ -74,20 +74,20 @@ def edit_cred(request, ttid): top_level=False, request=request) - return render(request, 'dojo/edit_cred.html', { - 'tform': tform, + return render(request, "dojo/edit_cred.html", { + "tform": tform, }) -@user_is_authorized(Cred_User, Permissions.Credential_View, 'ttid') +@user_is_authorized(Cred_User, Permissions.Credential_View, "ttid") def view_cred_details(request, ttid): cred = Cred_User.objects.get(pk=ttid) notes = cred.notes.all() - cred_products = Cred_Mapping.objects.select_related('product').filter( - product_id__isnull=False, cred_id=ttid).order_by('product__name') + cred_products = Cred_Mapping.objects.select_related("product").filter( + product_id__isnull=False, cred_id=ttid).order_by("product__name") cred_products = get_authorized_cred_mappings(Permissions.Product_View, cred_products) - if request.method == 'POST': + if request.method == "POST": form = NoteForm(request.POST) if form.is_valid(): @@ -101,38 +101,38 @@ def view_cred_details(request, ttid): messages.add_message( request, messages.SUCCESS, - 'Note added successfully.', - extra_tags='alert-success') + "Note added successfully.", + extra_tags="alert-success") else: form = NoteForm() add_breadcrumb(title="View", top_level=False, request=request) - return render(request, 'dojo/view_cred_details.html', { - 'cred': cred, - 'form': form, - 'notes': notes, - 'cred_products': cred_products, + return render(request, "dojo/view_cred_details.html", { + "cred": cred, + "form": form, + "notes": notes, + "cred_products": cred_products, }) @user_is_configuration_authorized(Permissions.Credential_View) def cred(request): - confs = Cred_User.objects.all().order_by('name', 'environment', 'username') + confs = Cred_User.objects.all().order_by("name", "environment", "username") add_breadcrumb(title="Credential Manager", top_level=True, request=request) - return render(request, 'dojo/view_cred.html', { - 'confs': confs, + return render(request, "dojo/view_cred.html", { + "confs": confs, }) -@user_is_authorized(Product, Permissions.Product_View, 'pid') -@user_is_authorized(Cred_User, Permissions.Credential_View, 'ttid') +@user_is_authorized(Product, Permissions.Product_View, "pid") +@user_is_authorized(Cred_User, Permissions.Credential_View, "ttid") def view_cred_product(request, pid, ttid): cred = get_object_or_404( - Cred_Mapping.objects.select_related('cred_id'), id=ttid) + Cred_Mapping.objects.select_related("cred_id"), id=ttid) notes = cred.cred_id.notes.all() - if request.method == 'POST': + if request.method == "POST": form = NoteForm(request.POST) if form.is_valid(): @@ -145,8 +145,8 @@ def view_cred_product(request, pid, ttid): messages.add_message( request, messages.SUCCESS, - 'Note added successfully.', - extra_tags='alert-success') + "Note added successfully.", + extra_tags="alert-success") else: form = NoteForm() @@ -154,43 +154,43 @@ def view_cred_product(request, pid, ttid): title="Credential Manager", top_level=False, request=request) cred_type = "Product" view_link = reverse( - 'view_cred_product', args=( + "view_cred_product", args=( cred.product.id, cred.id, )) edit_link = reverse( - 'edit_cred_product', args=( + "edit_cred_product", args=( cred.product.id, cred.id, )) delete_link = reverse( - 'delete_cred_product', args=( + "delete_cred_product", args=( cred.product.id, cred.id, )) return render( - request, 'dojo/view_cred_all_details.html', { - 'cred': cred, - 'form': form, - 'notes': notes, - 'cred_type': cred_type, - 'edit_link': edit_link, - 'delete_link': delete_link, - 'view_link': view_link, + request, "dojo/view_cred_all_details.html", { + "cred": cred, + "form": form, + "notes": notes, + "cred_type": cred_type, + "edit_link": edit_link, + "delete_link": delete_link, + "view_link": view_link, }) -@user_is_authorized(Product, Permissions.Engagement_View, 'eid') -@user_is_authorized(Cred_User, Permissions.Credential_View, 'ttid') +@user_is_authorized(Product, Permissions.Engagement_View, "eid") +@user_is_authorized(Cred_User, Permissions.Credential_View, "ttid") def view_cred_product_engagement(request, eid, ttid): cred = get_object_or_404( - Cred_Mapping.objects.select_related('cred_id'), id=ttid) + Cred_Mapping.objects.select_related("cred_id"), id=ttid) cred_product = Cred_Mapping.objects.filter( cred_id=cred.cred_id.id, product=cred.engagement.product.id).first() notes = cred.cred_id.notes.all() - if request.method == 'POST': + if request.method == "POST": form = NoteForm(request.POST) if form.is_valid(): @@ -203,8 +203,8 @@ def view_cred_product_engagement(request, eid, ttid): messages.add_message( request, messages.SUCCESS, - 'Note added successfully.', - extra_tags='alert-success') + "Note added successfully.", + extra_tags="alert-success") else: form = NoteForm() @@ -213,35 +213,35 @@ def view_cred_product_engagement(request, eid, ttid): cred_type = "Engagement" edit_link = "" delete_link = reverse( - 'delete_cred_engagement', args=( + "delete_cred_engagement", args=( eid, cred.id, )) return render( - request, 'dojo/view_cred_all_details.html', { - 'cred': cred, - 'form': form, - 'notes': notes, - 'cred_type': cred_type, - 'edit_link': edit_link, - 'delete_link': delete_link, - 'cred_product': cred_product, + request, "dojo/view_cred_all_details.html", { + "cred": cred, + "form": form, + "notes": notes, + "cred_type": cred_type, + "edit_link": edit_link, + "delete_link": delete_link, + "cred_product": cred_product, }) -@user_is_authorized(Product, Permissions.Test_View, 'tid') -@user_is_authorized(Cred_User, Permissions.Credential_View, 'ttid') +@user_is_authorized(Product, Permissions.Test_View, "tid") +@user_is_authorized(Cred_User, Permissions.Credential_View, "ttid") def view_cred_engagement_test(request, tid, ttid): cred = get_object_or_404( - Cred_Mapping.objects.select_related('cred_id'), id=ttid) + Cred_Mapping.objects.select_related("cred_id"), id=ttid) cred_product = Cred_Mapping.objects.filter( cred_id=cred.cred_id.id, product=cred.test.engagement.product.id).first() notes = cred.cred_id.notes.all() - if request.method == 'POST': + if request.method == "POST": form = NoteForm(request.POST) if form.is_valid(): @@ -254,8 +254,8 @@ def view_cred_engagement_test(request, tid, ttid): messages.add_message( request, messages.SUCCESS, - 'Note added successfully.', - extra_tags='alert-success') + "Note added successfully.", + extra_tags="alert-success") else: form = NoteForm() @@ -264,35 +264,35 @@ def view_cred_engagement_test(request, tid, ttid): cred_type = "Test" edit_link = None delete_link = reverse( - 'delete_cred_test', args=( + "delete_cred_test", args=( tid, cred.id, )) return render( - request, 'dojo/view_cred_all_details.html', { - 'cred': cred, - 'form': form, - 'notes': notes, - 'cred_type': cred_type, - 'edit_link': edit_link, - 'delete_link': delete_link, - 'cred_product': cred_product, + request, "dojo/view_cred_all_details.html", { + "cred": cred, + "form": form, + "notes": notes, + "cred_type": cred_type, + "edit_link": edit_link, + "delete_link": delete_link, + "cred_product": cred_product, }) -@user_is_authorized(Product, Permissions.Finding_View, 'fid') -@user_is_authorized(Cred_User, Permissions.Credential_View, 'ttid') +@user_is_authorized(Product, Permissions.Finding_View, "fid") +@user_is_authorized(Cred_User, Permissions.Credential_View, "ttid") def view_cred_finding(request, fid, ttid): cred = get_object_or_404( - Cred_Mapping.objects.select_related('cred_id'), id=ttid) + Cred_Mapping.objects.select_related("cred_id"), id=ttid) cred_product = Cred_Mapping.objects.filter( cred_id=cred.cred_id.id, product=cred.finding.test.engagement.product.id).first() notes = cred.cred_id.notes.all() - if request.method == 'POST': + if request.method == "POST": form = NoteForm(request.POST) if form.is_valid(): @@ -305,8 +305,8 @@ def view_cred_finding(request, fid, ttid): messages.add_message( request, messages.SUCCESS, - 'Note added successfully.', - extra_tags='alert-success') + "Note added successfully.", + extra_tags="alert-success") else: form = NoteForm() @@ -315,139 +315,139 @@ def view_cred_finding(request, fid, ttid): cred_type = "Finding" edit_link = None delete_link = reverse( - 'delete_cred_finding', args=( + "delete_cred_finding", args=( fid, cred.id, )) return render( - request, 'dojo/view_cred_all_details.html', { - 'cred': cred, - 'form': form, - 'notes': notes, - 'cred_type': cred_type, - 'edit_link': edit_link, - 'delete_link': delete_link, - 'cred_product': cred_product, + request, "dojo/view_cred_all_details.html", { + "cred": cred, + "form": form, + "notes": notes, + "cred_type": cred_type, + "edit_link": edit_link, + "delete_link": delete_link, + "cred_product": cred_product, }) -@user_is_authorized(Product, Permissions.Product_Edit, 'pid') -@user_is_authorized(Cred_User, Permissions.Credential_Edit, 'ttid') +@user_is_authorized(Product, Permissions.Product_Edit, "pid") +@user_is_authorized(Cred_User, Permissions.Credential_Edit, "ttid") def edit_cred_product(request, pid, ttid): cred = get_object_or_404( - Cred_Mapping.objects.select_related('cred_id'), id=ttid) + Cred_Mapping.objects.select_related("cred_id"), id=ttid) prod = get_object_or_404(Product, pk=pid) - if request.method == 'POST': + if request.method == "POST": tform = CredMappingFormProd(request.POST, instance=cred) if tform.is_valid(): tform.save() messages.add_message( request, messages.SUCCESS, - 'Credential Successfully Updated.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('all_cred_product', args=(pid, ))) + "Credential Successfully Updated.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("all_cred_product", args=(pid, ))) else: tform = CredMappingFormProd(instance=cred) product_tab = Product_Tab(prod, title="Edit Product Credential", tab="settings") - return render(request, 'dojo/edit_cred_all.html', { - 'tform': tform, - 'product_tab': product_tab, - 'cred_type': "Product", + return render(request, "dojo/edit_cred_all.html", { + "tform": tform, + "product_tab": product_tab, + "cred_type": "Product", }) -@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid') -@user_is_authorized(Cred_User, Permissions.Credential_Edit, 'ttid') +@user_is_authorized(Engagement, Permissions.Engagement_Edit, "eid") +@user_is_authorized(Cred_User, Permissions.Credential_Edit, "ttid") def edit_cred_product_engagement(request, eid, ttid): cred = get_object_or_404( - Cred_Mapping.objects.select_related('cred_id'), id=ttid) + Cred_Mapping.objects.select_related("cred_id"), id=ttid) eng = get_object_or_404(Engagement, pk=eid) - if request.method == 'POST': + if request.method == "POST": tform = CredMappingForm(request.POST, instance=cred) if tform.is_valid(): tform.save() messages.add_message( request, messages.SUCCESS, - 'Credential Successfully Updated.', - extra_tags='alert-success') + "Credential Successfully Updated.", + extra_tags="alert-success") return HttpResponseRedirect( - reverse('view_engagement', args=(eid, ))) + reverse("view_engagement", args=(eid, ))) else: tform = CredMappingFormProd(instance=cred) tform.fields["cred_id"].queryset = Cred_Mapping.objects.filter( - product=eng.product).order_by('cred_id') + product=eng.product).order_by("cred_id") add_breadcrumb( title="Edit Credential Configuration", top_level=False, request=request) - return render(request, 'dojo/edit_cred_all.html', { - 'tform': tform, - 'cred_type': "Engagement", + return render(request, "dojo/edit_cred_all.html", { + "tform": tform, + "cred_type": "Engagement", }) -@user_is_authorized(Product, Permissions.Product_Edit, 'pid') +@user_is_authorized(Product, Permissions.Product_Edit, "pid") def new_cred_product(request, pid): prod = get_object_or_404(Product, pk=pid) - if request.method == 'POST': + if request.method == "POST": tform = CredMappingFormProd(request.POST) if tform.is_valid(): # Select the credential mapping object from the selected list and only allow if the credential is associated with the product cred_user = Cred_Mapping.objects.filter( - cred_id=tform.cleaned_data['cred_id'].id, product=pid).first() + cred_id=tform.cleaned_data["cred_id"].id, product=pid).first() message = "Credential already associated." - status_tag = 'alert-danger' + status_tag = "alert-danger" if cred_user is None: prod = Product.objects.get(id=pid) new_f = tform.save(commit=False) new_f.product = prod new_f.save() - message = 'Credential Successfully Updated.' - status_tag = 'alert-success' + message = "Credential Successfully Updated." + status_tag = "alert-success" messages.add_message( request, messages.SUCCESS, message, extra_tags=status_tag) - return HttpResponseRedirect(reverse('all_cred_product', args=(pid, ))) + return HttpResponseRedirect(reverse("all_cred_product", args=(pid, ))) else: tform = CredMappingFormProd() product_tab = Product_Tab(prod, title="Add Credential Configuration", tab="settings") - return render(request, 'dojo/new_cred_product.html', { - 'tform': tform, - 'pid': pid, - 'product_tab': product_tab, + return render(request, "dojo/new_cred_product.html", { + "tform": tform, + "pid": pid, + "product_tab": product_tab, }) -@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid') +@user_is_authorized(Engagement, Permissions.Engagement_Edit, "eid") def new_cred_product_engagement(request, eid): eng = get_object_or_404(Engagement, pk=eid) - if request.method == 'POST': + if request.method == "POST": tform = CredMappingForm(request.POST) tform.fields["cred_user"].queryset = Cred_Mapping.objects.filter( - product=eng.product).order_by('cred_id') - if tform.is_valid() and tform.cleaned_data['cred_user']: + product=eng.product).order_by("cred_id") + if tform.is_valid() and tform.cleaned_data["cred_user"]: # Select the credential mapping object from the selected list and only allow if the credential is associated with the product cred_user = Cred_Mapping.objects.filter( - pk=tform.cleaned_data['cred_user'].id, - product=eng.product.id).order_by('cred_id').first() + pk=tform.cleaned_data["cred_user"].id, + product=eng.product.id).order_by("cred_id").first() # search for cred_user and engagement id cred_lookup = Cred_Mapping.objects.filter( cred_id=cred_user.cred_id, engagement=eng.id) message = "Credential already associated." - status_tag = 'alert-danger' + status_tag = "alert-danger" if not cred_user: message = "Credential must first be associated with this product." @@ -457,48 +457,48 @@ def new_cred_product_engagement(request, eid): new_f.engagement = eng new_f.cred_id = cred_user.cred_id new_f.save() - message = 'Credential Successfully Updated.' - status_tag = 'alert-success' + message = "Credential Successfully Updated." + status_tag = "alert-success" messages.add_message( request, messages.SUCCESS, message, extra_tags=status_tag) return HttpResponseRedirect( - reverse('view_engagement', args=(eid, ))) + reverse("view_engagement", args=(eid, ))) else: tform = CredMappingForm() tform.fields["cred_user"].queryset = Cred_Mapping.objects.filter( - product=eng.product).order_by('cred_id') + product=eng.product).order_by("cred_id") add_breadcrumb( title="Add Credential Configuration", top_level=False, request=request) return render( - request, 'dojo/new_cred_mapping.html', { - 'tform': tform, - 'eid': eid, - 'formlink': reverse('new_cred_product_engagement', args=(eid, )), + request, "dojo/new_cred_mapping.html", { + "tform": tform, + "eid": eid, + "formlink": reverse("new_cred_product_engagement", args=(eid, )), }) -@user_is_authorized(Test, Permissions.Test_Edit, 'tid') +@user_is_authorized(Test, Permissions.Test_Edit, "tid") def new_cred_engagement_test(request, tid): test = get_object_or_404(Test, pk=tid) - if request.method == 'POST': + if request.method == "POST": tform = CredMappingForm(request.POST) tform.fields["cred_user"].queryset = Cred_Mapping.objects.filter( - engagement=test.engagement).order_by('cred_id') - if tform.is_valid() and tform.cleaned_data['cred_user']: + engagement=test.engagement).order_by("cred_id") + if tform.is_valid() and tform.cleaned_data["cred_user"]: # Select the credential mapping object from the selected list and only allow if the credential is associated with the product cred_user = Cred_Mapping.objects.filter( - pk=tform.cleaned_data['cred_user'].id, + pk=tform.cleaned_data["cred_user"].id, engagement=test.engagement.id).first() # search for cred_user and test id cred_lookup = Cred_Mapping.objects.filter( cred_id=cred_user.cred_id, test=test.id) message = "Credential already associated." - status_tag = 'alert-danger' + status_tag = "alert-danger" if not cred_user: message = "Credential must first be associated with this product." @@ -508,48 +508,48 @@ def new_cred_engagement_test(request, tid): new_f.test = test new_f.cred_id = cred_user.cred_id new_f.save() - message = 'Credential Successfully Updated.' - status_tag = 'alert-success' + message = "Credential Successfully Updated." + status_tag = "alert-success" messages.add_message( request, messages.SUCCESS, message, extra_tags=status_tag) - return HttpResponseRedirect(reverse('view_test', args=(tid, ))) + return HttpResponseRedirect(reverse("view_test", args=(tid, ))) else: tform = CredMappingForm() tform.fields["cred_user"].queryset = Cred_Mapping.objects.filter( - engagement=test.engagement).order_by('cred_id') + engagement=test.engagement).order_by("cred_id") add_breadcrumb( title="Add Credential Configuration", top_level=False, request=request) return render( - request, 'dojo/new_cred_mapping.html', { - 'tform': tform, - 'eid': tid, - 'formlink': reverse('new_cred_engagement_test', args=(tid, )), + request, "dojo/new_cred_mapping.html", { + "tform": tform, + "eid": tid, + "formlink": reverse("new_cred_engagement_test", args=(tid, )), }) -@user_is_authorized(Finding, Permissions.Finding_Edit, 'fid') +@user_is_authorized(Finding, Permissions.Finding_Edit, "fid") def new_cred_finding(request, fid): finding = get_object_or_404(Finding, pk=fid) - if request.method == 'POST': + if request.method == "POST": tform = CredMappingForm(request.POST) tform.fields["cred_user"].queryset = Cred_Mapping.objects.filter( - engagement=finding.test.engagement).order_by('cred_id') + engagement=finding.test.engagement).order_by("cred_id") - if tform.is_valid() and tform.cleaned_data['cred_user']: + if tform.is_valid() and tform.cleaned_data["cred_user"]: # Select the credential mapping object from the selected list and only allow if the credential is associated with the product cred_user = Cred_Mapping.objects.filter( - pk=tform.cleaned_data['cred_user'].id, + pk=tform.cleaned_data["cred_user"].id, engagement=finding.test.engagement.id).first() # search for cred_user and test id cred_lookup = Cred_Mapping.objects.filter( cred_id=cred_user.cred_id, finding=finding.id) message = "Credential already associated." - status_tag = 'alert-danger' + status_tag = "alert-danger" if not cred_user: message = "Credential must first be associated with this product." @@ -559,36 +559,36 @@ def new_cred_finding(request, fid): new_f.finding = finding new_f.cred_id = cred_user.cred_id new_f.save() - message = 'Credential Successfully Updated.' - status_tag = 'alert-success' + message = "Credential Successfully Updated." + status_tag = "alert-success" messages.add_message( request, messages.SUCCESS, message, extra_tags=status_tag) - return HttpResponseRedirect(reverse('view_finding', args=(fid, ))) + return HttpResponseRedirect(reverse("view_finding", args=(fid, ))) else: tform = CredMappingForm() tform.fields["cred_user"].queryset = Cred_Mapping.objects.filter( - engagement=finding.test.engagement).order_by('cred_id') + engagement=finding.test.engagement).order_by("cred_id") add_breadcrumb( title="Add Credential Configuration", top_level=False, request=request) return render( - request, 'dojo/new_cred_mapping.html', { - 'tform': tform, - 'eid': fid, - 'formlink': reverse('new_cred_finding', args=(fid, )), + request, "dojo/new_cred_mapping.html", { + "tform": tform, + "eid": fid, + "formlink": reverse("new_cred_finding", args=(fid, )), }) -@user_is_authorized(Cred_User, Permissions.Credential_Delete, 'ttid') +@user_is_authorized(Cred_User, Permissions.Credential_Delete, "ttid") def delete_cred_controller(request, destination_url, id, ttid): cred = None try: cred = Cred_Mapping.objects.get(pk=ttid) except: pass - if request.method == 'POST': + if request.method == "POST": tform = CredMappingForm(request.POST, instance=cred) message = "" status_tag = "" @@ -633,7 +633,7 @@ def delete_cred_controller(request, destination_url, id, ttid): else: cred.delete() else: - status_tag = 'alert-danger' + status_tag = "alert-danger" messages.add_message( request, messages.SUCCESS, message, extra_tags=status_tag) @@ -661,36 +661,36 @@ def delete_cred_controller(request, destination_url, id, ttid): finding = get_object_or_404(Finding, id=id) product = finding.test.engagement.product product_tab = Product_Tab(product, title="Delete Credential Mapping", tab="settings") - return render(request, 'dojo/delete_cred_all.html', { - 'tform': tform, - 'product_tab': product_tab, + return render(request, "dojo/delete_cred_all.html", { + "tform": tform, + "product_tab": product_tab, }) -@user_is_authorized(Cred_User, Permissions.Credential_Delete, 'ttid') +@user_is_authorized(Cred_User, Permissions.Credential_Delete, "ttid") def delete_cred(request, ttid): return delete_cred_controller(request, "cred", 0, ttid) -@user_is_authorized(Product, Permissions.Product_Edit, 'pid') -@user_is_authorized(Cred_User, Permissions.Credential_Delete, 'ttid') +@user_is_authorized(Product, Permissions.Product_Edit, "pid") +@user_is_authorized(Cred_User, Permissions.Credential_Delete, "ttid") def delete_cred_product(request, pid, ttid): return delete_cred_controller(request, "all_cred_product", pid, ttid) -@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid') -@user_is_authorized(Cred_User, Permissions.Credential_Delete, 'ttid') +@user_is_authorized(Engagement, Permissions.Engagement_Edit, "eid") +@user_is_authorized(Cred_User, Permissions.Credential_Delete, "ttid") def delete_cred_engagement(request, eid, ttid): return delete_cred_controller(request, "view_engagement", eid, ttid) -@user_is_authorized(Test, Permissions.Test_Edit, 'tid') -@user_is_authorized(Cred_User, Permissions.Credential_Delete, 'ttid') +@user_is_authorized(Test, Permissions.Test_Edit, "tid") +@user_is_authorized(Cred_User, Permissions.Credential_Delete, "ttid") def delete_cred_test(request, tid, ttid): return delete_cred_controller(request, "view_test", tid, ttid) -@user_is_authorized(Finding, Permissions.Finding_Edit, 'fid') -@user_is_authorized(Cred_User, Permissions.Credential_Delete, 'ttid') +@user_is_authorized(Finding, Permissions.Finding_Edit, "fid") +@user_is_authorized(Cred_User, Permissions.Credential_Delete, "ttid") def delete_cred_finding(request, fid, ttid): return delete_cred_controller(request, "view_finding", fid, ttid) diff --git a/dojo/decorators.py b/dojo/decorators.py index c919a2995b..e48ba31c56 100644 --- a/dojo/decorators.py +++ b/dojo/decorators.py @@ -16,19 +16,19 @@ def we_want_async(*args, func=None, **kwargs): from dojo.models import Dojo_User from dojo.utils import get_current_user - sync = kwargs.get('sync', False) + sync = kwargs.get("sync", False) if sync: - logger.debug('dojo_async_task %s: running task in the foreground as sync=True has been found as kwarg', func) + logger.debug("dojo_async_task %s: running task in the foreground as sync=True has been found as kwarg", func) return False - user = kwargs.get('async_user', get_current_user()) - logger.debug('user: %s', user) + user = kwargs.get("async_user", get_current_user()) + logger.debug("user: %s", user) if Dojo_User.wants_block_execution(user): - logger.debug('dojo_async_task %s: running task in the foreground as block_execution is set to True for %s', func, user) + logger.debug("dojo_async_task %s: running task in the foreground as block_execution is set to True for %s", func, user) return False - logger.debug('dojo_async_task %s: no current user, running task in the background', func) + logger.debug("dojo_async_task %s: no current user, running task in the background", func) return True @@ -39,7 +39,7 @@ def dojo_async_task(func): def __wrapper__(*args, **kwargs): from dojo.utils import get_current_user user = get_current_user() - kwargs['async_user'] = user + kwargs["async_user"] = user countdown = kwargs.pop("countdown", 0) if we_want_async(*args, func=func, **kwargs): return func.apply_async(args=args, kwargs=kwargs, countdown=countdown) @@ -66,7 +66,7 @@ def __wrapper__(*args, **kwargs): if model_or_id: if isinstance(model_or_id, models.Model) and we_want_async(*args, func=func, **kwargs): - logger.debug('converting model_or_id to id: %s', model_or_id) + logger.debug("converting model_or_id to id: %s", model_or_id) id = model_or_id.id args = list(args) args[parameter] = id @@ -96,25 +96,25 @@ def __wrapper__(*args, **kwargs): if not settings.CELERY_PASS_MODEL_BY_ID: return func(*args, **kwargs) - logger.debug('args:' + str(args)) - logger.debug('kwargs:' + str(kwargs)) + logger.debug("args:" + str(args)) + logger.debug("kwargs:" + str(kwargs)) - logger.debug('checking if we need to convert id to model: %s for parameter: %s', model.__name__, parameter) + logger.debug("checking if we need to convert id to model: %s for parameter: %s", model.__name__, parameter) model_or_id = get_parameter_froms_args_kwargs(args, kwargs, parameter) if model_or_id: if not isinstance(model_or_id, models.Model) and we_want_async(*args, func=func, **kwargs): - logger.debug('instantiating model_or_id: %s for model: %s', model_or_id, model) + logger.debug("instantiating model_or_id: %s for model: %s", model_or_id, model) try: instance = model.objects.get(id=model_or_id) except model.DoesNotExist: - logger.debug('error instantiating model_or_id: %s for model: %s: DoesNotExist', model_or_id, model) + logger.debug("error instantiating model_or_id: %s for model: %s: DoesNotExist", model_or_id, model) instance = None args = list(args) args[parameter] = instance else: - logger.debug('model_or_id already a model instance %s for model: %s', model_or_id, model) + logger.debug("model_or_id already a model instance %s for model: %s", model_or_id, model) return func(*args, **kwargs) @@ -133,16 +133,16 @@ def get_parameter_froms_args_kwargs(args, kwargs, parameter): # Lookup value came as a positional argument args = list(args) if parameter >= len(args): - raise ValueError('parameter index invalid: ' + str(parameter)) + raise ValueError("parameter index invalid: " + str(parameter)) model_or_id = args[parameter] else: # Lookup value was passed as keyword argument model_or_id = kwargs.get(parameter, None) - logger.debug('model_or_id: %s', model_or_id) + logger.debug("model_or_id: %s", model_or_id) if not model_or_id: - logger.error('unable to get parameter: ' + parameter) + logger.error("unable to get parameter: " + parameter) return model_or_id @@ -155,7 +155,7 @@ def wrapper(self, *args, **kwargs): except Exception: print("exception occured at url:", self.driver.current_url) print("page source:", self.driver.page_source) - f = open("/tmp/selenium_page_source.html", "w", encoding='utf-8') + f = open("/tmp/selenium_page_source.html", "w", encoding="utf-8") f.writelines(self.driver.page_source) # time.sleep(30) raise @@ -163,21 +163,21 @@ def wrapper(self, *args, **kwargs): return wrapper -def dojo_ratelimit(key='ip', rate=None, method=UNSAFE, block=False): +def dojo_ratelimit(key="ip", rate=None, method=UNSAFE, block=False): def decorator(fn): @wraps(fn) def _wrapped(request, *args, **kw): - _block = getattr(settings, 'RATE_LIMITER_BLOCK', block) - _rate = getattr(settings, 'RATE_LIMITER_RATE', rate) - _lockout = getattr(settings, 'RATE_LIMITER_ACCOUNT_LOCKOUT', False) - old_limited = getattr(request, 'limited', False) + _block = getattr(settings, "RATE_LIMITER_BLOCK", block) + _rate = getattr(settings, "RATE_LIMITER_RATE", rate) + _lockout = getattr(settings, "RATE_LIMITER_ACCOUNT_LOCKOUT", False) + old_limited = getattr(request, "limited", False) ratelimited = is_ratelimited(request=request, fn=fn, key=key, rate=_rate, method=method, increment=True) request.limited = ratelimited or old_limited if ratelimited and _block: if _lockout: - username = request.POST.get('username', None) + username = request.POST.get("username", None) if username: dojo_user = Dojo_User.objects.filter(username=username).first() if dojo_user: diff --git a/dojo/development_environment/urls.py b/dojo/development_environment/urls.py index a61b507325..1c1c60393d 100644 --- a/dojo/development_environment/urls.py +++ b/dojo/development_environment/urls.py @@ -4,9 +4,9 @@ urlpatterns = [ # dev envs - re_path(r'^dev_env$', views.dev_env, name='dev_env'), - re_path(r'^dev_env/add$', views.add_dev_env, - name='add_dev_env'), - re_path(r'^dev_env/(?P\d+)/edit$', - views.edit_dev_env, name='edit_dev_env'), + re_path(r"^dev_env$", views.dev_env, name="dev_env"), + re_path(r"^dev_env/add$", views.add_dev_env, + name="add_dev_env"), + re_path(r"^dev_env/(?P\d+)/edit$", + views.edit_dev_env, name="edit_dev_env"), ] diff --git a/dojo/development_environment/views.py b/dojo/development_environment/views.py index 1bf998fadf..4a0b3b20df 100644 --- a/dojo/development_environment/views.py +++ b/dojo/development_environment/views.py @@ -20,59 +20,59 @@ @login_required def dev_env(request): - initial_queryset = Development_Environment.objects.all().order_by('name') + initial_queryset = Development_Environment.objects.all().order_by("name") name_words = [de.name for de in initial_queryset] devs = DevelopmentEnvironmentFilter(request.GET, queryset=initial_queryset) dev_page = get_page_items(request, devs.qs, 25) add_breadcrumb(title="Environment List", top_level=True, request=request) - return render(request, 'dojo/dev_env.html', { - 'name': 'Environment', - 'metric': False, - 'user': request.user, - 'devs': dev_page, - 'dts': devs, - 'name_words': name_words}) + return render(request, "dojo/dev_env.html", { + "name": "Environment", + "metric": False, + "user": request.user, + "devs": dev_page, + "dts": devs, + "name_words": name_words}) -@user_is_configuration_authorized('dojo.add_development_environment') +@user_is_configuration_authorized("dojo.add_development_environment") def add_dev_env(request): form = Development_EnvironmentForm() - if request.method == 'POST': + if request.method == "POST": form = Development_EnvironmentForm(request.POST) if form.is_valid(): form.save() messages.add_message(request, messages.SUCCESS, - 'Environment added successfully.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('dev_env')) + "Environment added successfully.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("dev_env")) add_breadcrumb(title="Add Environment", top_level=False, request=request) - return render(request, 'dojo/new_dev_env.html', { - 'name': 'Add Environment', - 'metric': False, - 'user': request.user, - 'form': form, + return render(request, "dojo/new_dev_env.html", { + "name": "Add Environment", + "metric": False, + "user": request.user, + "form": form, }) -@user_is_configuration_authorized('dojo.change_development_environment') +@user_is_configuration_authorized("dojo.change_development_environment") def edit_dev_env(request, deid): de = get_object_or_404(Development_Environment, pk=deid) form1 = Development_EnvironmentForm(instance=de) form2 = Delete_Dev_EnvironmentForm(instance=de) - if request.method == 'POST' and request.POST.get('edit_dev_env'): + if request.method == "POST" and request.POST.get("edit_dev_env"): form1 = Development_EnvironmentForm(request.POST, instance=de) if form1.is_valid(): de = form1.save() messages.add_message( request, messages.SUCCESS, - 'Environment updated successfully.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('dev_env')) - if request.method == 'POST' and request.POST.get('delete_dev_env'): - user_has_configuration_permission_or_403(request.user, 'dojo.delete_development_environment') + "Environment updated successfully.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("dev_env")) + if request.method == "POST" and request.POST.get("delete_dev_env"): + user_has_configuration_permission_or_403(request.user, "dojo.delete_development_environment") form2 = Delete_Dev_EnvironmentForm(request.POST, instance=de) if form2.is_valid(): try: @@ -80,19 +80,19 @@ def edit_dev_env(request, deid): messages.add_message( request, messages.SUCCESS, - 'Environment deleted successfully.', - extra_tags='alert-success') + "Environment deleted successfully.", + extra_tags="alert-success") except RestrictedError as err: messages.add_message(request, messages.WARNING, - f'Environment cannot be deleted: {err}', - extra_tags='alert-warning') - return HttpResponseRedirect(reverse('dev_env')) + f"Environment cannot be deleted: {err}", + extra_tags="alert-warning") + return HttpResponseRedirect(reverse("dev_env")) add_breadcrumb(title="Edit Environment", top_level=False, request=request) - return render(request, 'dojo/edit_dev_env.html', { - 'name': 'Edit Environment', - 'metric': False, - 'user': request.user, - 'form1': form1, - 'de': de}) + return render(request, "dojo/edit_dev_env.html", { + "name": "Edit Environment", + "metric": False, + "user": request.user, + "form1": form1, + "de": de}) diff --git a/dojo/endpoint/queries.py b/dojo/endpoint/queries.py index e9facac14f..581feefc13 100644 --- a/dojo/endpoint/queries.py +++ b/dojo/endpoint/queries.py @@ -21,7 +21,7 @@ def get_authorized_endpoints(permission, queryset=None, user=None): return Endpoint.objects.none() if queryset is None: - endpoints = Endpoint.objects.all() + endpoints = Endpoint.objects.all().order_by("id") else: endpoints = queryset @@ -33,19 +33,19 @@ def get_authorized_endpoints(permission, queryset=None, user=None): roles = get_roles_for_permission(permission) authorized_product_type_roles = Product_Type_Member.objects.filter( - product_type=OuterRef('product__prod_type_id'), + product_type=OuterRef("product__prod_type_id"), user=user, role__in=roles) authorized_product_roles = Product_Member.objects.filter( - product=OuterRef('product_id'), + product=OuterRef("product_id"), user=user, role__in=roles) authorized_product_type_groups = Product_Type_Group.objects.filter( - product_type=OuterRef('product__prod_type_id'), + product_type=OuterRef("product__prod_type_id"), group__users=user, role__in=roles) authorized_product_groups = Product_Group.objects.filter( - product=OuterRef('product_id'), + product=OuterRef("product_id"), group__users=user, role__in=roles) endpoints = endpoints.annotate( @@ -69,7 +69,7 @@ def get_authorized_endpoint_status(permission, queryset=None, user=None): return Endpoint_Status.objects.none() if queryset is None: - endpoint_status = Endpoint_Status.objects.all() + endpoint_status = Endpoint_Status.objects.all().order_by("id") else: endpoint_status = queryset @@ -81,19 +81,19 @@ def get_authorized_endpoint_status(permission, queryset=None, user=None): roles = get_roles_for_permission(permission) authorized_product_type_roles = Product_Type_Member.objects.filter( - product_type=OuterRef('endpoint__product__prod_type_id'), + product_type=OuterRef("endpoint__product__prod_type_id"), user=user, role__in=roles) authorized_product_roles = Product_Member.objects.filter( - product=OuterRef('endpoint__product_id'), + product=OuterRef("endpoint__product_id"), user=user, role__in=roles) authorized_product_type_groups = Product_Type_Group.objects.filter( - product_type=OuterRef('endpoint__product__prod_type_id'), + product_type=OuterRef("endpoint__product__prod_type_id"), group__users=user, role__in=roles) authorized_product_groups = Product_Group.objects.filter( - product=OuterRef('endpoint__product_id'), + product=OuterRef("endpoint__product_id"), group__users=user, role__in=roles) endpoint_status = endpoint_status.annotate( diff --git a/dojo/endpoint/signals.py b/dojo/endpoint/signals.py index 4c18d03d91..f96510df78 100644 --- a/dojo/endpoint/signals.py +++ b/dojo/endpoint/signals.py @@ -16,15 +16,15 @@ def endpoint_post_delete(sender, instance, using, origin, **kwargs): if settings.ENABLE_AUDITLOG: le = LogEntry.objects.get( action=LogEntry.Action.DELETE, - content_type=ContentType.objects.get(app_label='dojo', model='endpoint'), + content_type=ContentType.objects.get(app_label="dojo", model="endpoint"), object_id=instance.id, ) description = _('The endpoint "%(name)s" was deleted by %(user)s') % { - 'name': str(instance), 'user': le.actor} + "name": str(instance), "user": le.actor} else: - description = _('The endpoint "%(name)s" was deleted') % {'name': str(instance)} - create_notification(event='endpoint_deleted', # template does not exists, it will default to "other" but this event name needs to stay because of unit testing - title=_('Deletion of %(name)s') % {'name': str(instance)}, + description = _('The endpoint "%(name)s" was deleted') % {"name": str(instance)} + create_notification(event="endpoint_deleted", # template does not exists, it will default to "other" but this event name needs to stay because of unit testing + title=_("Deletion of %(name)s") % {"name": str(instance)}, description=description, - url=reverse('endpoint'), + url=reverse("endpoint"), icon="exclamation-triangle") diff --git a/dojo/endpoint/urls.py b/dojo/endpoint/urls.py index 56afa2411b..290f32961a 100644 --- a/dojo/endpoint/urls.py +++ b/dojo/endpoint/urls.py @@ -4,38 +4,38 @@ urlpatterns = [ # endpoints - re_path(r'^endpoint$', views.all_endpoints, - name='endpoint'), - re_path(r'^endpoint/host$', views.all_endpoint_hosts, - name='endpoint_host'), - re_path(r'^endpoint/vulnerable$', views.vulnerable_endpoints, - name='vulnerable_endpoints'), - re_path(r'^endpoint/host/vulnerable$', views.vulnerable_endpoint_hosts, - name='vulnerable_endpoint_hosts'), - re_path(r'^endpoint/(?P\d+)$', views.view_endpoint, - name='view_endpoint'), - re_path(r'^endpoint/host/(?P\d+)$', views.view_endpoint_host, - name='view_endpoint_host'), - re_path(r'^endpoint/(?P\d+)/edit$', views.edit_endpoint, - name='edit_endpoint'), - re_path(r'^endpoints/(?P\d+)/add$', views.add_endpoint, - name='add_endpoint'), - re_path(r'^endpoint/(?P\d+)/delete$', views.delete_endpoint, - name='delete_endpoint'), - re_path(r'^endpoints/add$', views.add_product_endpoint, - name='add_product_endpoint'), - re_path(r'^endpoint/(?P\d+)/add_meta_data$', views.add_meta_data, - name='add_endpoint_meta_data'), - re_path(r'^endpoint/(?P\d+)/edit_meta_data$', views.edit_meta_data, - name='edit_endpoint_meta_data'), - re_path(r'^endpoint/bulk$', views.endpoint_bulk_update_all, - name='endpoints_bulk_all'), - re_path(r'^product/(?P\d+)/endpoint/bulk_product$', views.endpoint_bulk_update_all, - name='endpoints_bulk_update_all_product'), - re_path(r'^endpoint/(?P\d+)/bulk_status$', views.endpoint_status_bulk_update, - name='endpoints_status_bulk'), - re_path(r'^endpoint/migrate$', views.migrate_endpoints_view, - name='endpoint_migrate'), - re_path(r'^endpoint/(?P\d+)/import_endpoint_meta$', views.import_endpoint_meta, - name='import_endpoint_meta'), + re_path(r"^endpoint$", views.all_endpoints, + name="endpoint"), + re_path(r"^endpoint/host$", views.all_endpoint_hosts, + name="endpoint_host"), + re_path(r"^endpoint/vulnerable$", views.vulnerable_endpoints, + name="vulnerable_endpoints"), + re_path(r"^endpoint/host/vulnerable$", views.vulnerable_endpoint_hosts, + name="vulnerable_endpoint_hosts"), + re_path(r"^endpoint/(?P\d+)$", views.view_endpoint, + name="view_endpoint"), + re_path(r"^endpoint/host/(?P\d+)$", views.view_endpoint_host, + name="view_endpoint_host"), + re_path(r"^endpoint/(?P\d+)/edit$", views.edit_endpoint, + name="edit_endpoint"), + re_path(r"^endpoints/(?P\d+)/add$", views.add_endpoint, + name="add_endpoint"), + re_path(r"^endpoint/(?P\d+)/delete$", views.delete_endpoint, + name="delete_endpoint"), + re_path(r"^endpoints/add$", views.add_product_endpoint, + name="add_product_endpoint"), + re_path(r"^endpoint/(?P\d+)/add_meta_data$", views.add_meta_data, + name="add_endpoint_meta_data"), + re_path(r"^endpoint/(?P\d+)/edit_meta_data$", views.edit_meta_data, + name="edit_endpoint_meta_data"), + re_path(r"^endpoint/bulk$", views.endpoint_bulk_update_all, + name="endpoints_bulk_all"), + re_path(r"^product/(?P\d+)/endpoint/bulk_product$", views.endpoint_bulk_update_all, + name="endpoints_bulk_update_all_product"), + re_path(r"^endpoint/(?P\d+)/bulk_status$", views.endpoint_status_bulk_update, + name="endpoints_status_bulk"), + re_path(r"^endpoint/migrate$", views.migrate_endpoints_view, + name="endpoint_migrate"), + re_path(r"^endpoint/(?P\d+)/import_endpoint_meta$", views.import_endpoint_meta, + name="import_endpoint_meta"), ] diff --git a/dojo/endpoint/utils.py b/dojo/endpoint/utils.py index 9b7733c553..0715e8b43d 100644 --- a/dojo/endpoint/utils.py +++ b/dojo/endpoint/utils.py @@ -20,53 +20,53 @@ def endpoint_filter(**kwargs): qs = Endpoint.objects.all() - if kwargs.get('protocol'): - qs = qs.filter(protocol__iexact=kwargs['protocol']) + if kwargs.get("protocol"): + qs = qs.filter(protocol__iexact=kwargs["protocol"]) else: qs = qs.filter(protocol__isnull=True) - if kwargs.get('userinfo'): - qs = qs.filter(userinfo__exact=kwargs['userinfo']) + if kwargs.get("userinfo"): + qs = qs.filter(userinfo__exact=kwargs["userinfo"]) else: qs = qs.filter(userinfo__isnull=True) - if kwargs.get('host'): - qs = qs.filter(host__iexact=kwargs['host']) + if kwargs.get("host"): + qs = qs.filter(host__iexact=kwargs["host"]) else: qs = qs.filter(host__isnull=True) - if kwargs.get('port'): - if (kwargs.get('protocol')) and \ - (kwargs['protocol'].lower() in SCHEME_PORT_MAP) and \ - (SCHEME_PORT_MAP[kwargs['protocol'].lower()] == kwargs['port']): - qs = qs.filter(Q(port__isnull=True) | Q(port__exact=SCHEME_PORT_MAP[kwargs['protocol'].lower()])) + if kwargs.get("port"): + if (kwargs.get("protocol")) and \ + (kwargs["protocol"].lower() in SCHEME_PORT_MAP) and \ + (SCHEME_PORT_MAP[kwargs["protocol"].lower()] == kwargs["port"]): + qs = qs.filter(Q(port__isnull=True) | Q(port__exact=SCHEME_PORT_MAP[kwargs["protocol"].lower()])) else: - qs = qs.filter(port__exact=kwargs['port']) + qs = qs.filter(port__exact=kwargs["port"]) else: - if (kwargs.get('protocol')) and (kwargs['protocol'].lower() in SCHEME_PORT_MAP): - qs = qs.filter(Q(port__isnull=True) | Q(port__exact=SCHEME_PORT_MAP[kwargs['protocol'].lower()])) + if (kwargs.get("protocol")) and (kwargs["protocol"].lower() in SCHEME_PORT_MAP): + qs = qs.filter(Q(port__isnull=True) | Q(port__exact=SCHEME_PORT_MAP[kwargs["protocol"].lower()])) else: qs = qs.filter(port__isnull=True) - if kwargs.get('path'): - qs = qs.filter(path__exact=kwargs['path']) + if kwargs.get("path"): + qs = qs.filter(path__exact=kwargs["path"]) else: qs = qs.filter(path__isnull=True) - if kwargs.get('query'): - qs = qs.filter(query__exact=kwargs['query']) + if kwargs.get("query"): + qs = qs.filter(query__exact=kwargs["query"]) else: qs = qs.filter(query__isnull=True) - if kwargs.get('fragment'): - qs = qs.filter(fragment__exact=kwargs['fragment']) + if kwargs.get("fragment"): + qs = qs.filter(fragment__exact=kwargs["fragment"]) else: qs = qs.filter(fragment__isnull=True) - if kwargs.get('product'): - qs = qs.filter(product__exact=kwargs['product']) - elif kwargs.get('product_id'): - qs = qs.filter(product_id__exact=kwargs['product_id']) + if kwargs.get("product"): + qs = qs.filter(product__exact=kwargs["product"]) + elif kwargs.get("product_id"): + qs = qs.filter(product_id__exact=kwargs["product_id"]) else: qs = qs.filter(product__isnull=True) @@ -92,38 +92,38 @@ def endpoint_get_or_create(**kwargs): def clean_hosts_run(apps, change): def err_log(message, html_log, endpoint_html_log, endpoint): - error_suffix = 'It is not possible to migrate it. Delete or edit this endpoint.' - html_log.append({**endpoint_html_log, 'message': message}) - logger.error(f'Endpoint (id={endpoint.pk}) {message}. {error_suffix}') + error_suffix = "It is not possible to migrate it. Delete or edit this endpoint." + html_log.append({**endpoint_html_log, "message": message}) + logger.error(f"Endpoint (id={endpoint.pk}) {message}. {error_suffix}") broken_endpoints.add(endpoint.pk) html_log = [] broken_endpoints = set() - Endpoint_model = apps.get_model('dojo', 'Endpoint') - Endpoint_Status_model = apps.get_model('dojo', 'Endpoint_Status') - Product_model = apps.get_model('dojo', 'Product') - for endpoint in Endpoint_model.objects.order_by('id'): + Endpoint_model = apps.get_model("dojo", "Endpoint") + Endpoint_Status_model = apps.get_model("dojo", "Endpoint_Status") + Product_model = apps.get_model("dojo", "Product") + for endpoint in Endpoint_model.objects.order_by("id"): endpoint_html_log = { - 'view': reverse('view_endpoint', args=[endpoint.pk]), - 'edit': reverse('edit_endpoint', args=[endpoint.pk]), - 'delete': reverse('delete_endpoint', args=[endpoint.pk]), + "view": reverse("view_endpoint", args=[endpoint.pk]), + "edit": reverse("edit_endpoint", args=[endpoint.pk]), + "delete": reverse("delete_endpoint", args=[endpoint.pk]), } if endpoint.host: - if not re.match(r'^[A-Za-z][A-Za-z0-9\.\-\+]+$', endpoint.host): # is old host valid FQDN? + if not re.match(r"^[A-Za-z][A-Za-z0-9\.\-\+]+$", endpoint.host): # is old host valid FQDN? try: validate_ipv46_address(endpoint.host) # is old host valid IPv4/6? except ValidationError: try: - if '://' in endpoint.host: # is the old host full uri? + if "://" in endpoint.host: # is the old host full uri? parts = Endpoint.from_uri(endpoint.host) # can raise exception if the old host is not valid URL else: - parts = Endpoint.from_uri('//' + endpoint.host) + parts = Endpoint.from_uri("//" + endpoint.host) # can raise exception if there is no way to parse the old host if parts.protocol: if endpoint.protocol and (endpoint.protocol != parts.protocol): - message = f'has defined protocol ({endpoint.protocol}) and it is not the same as protocol in host ' \ - f'({parts.protocol})' + message = f"has defined protocol ({endpoint.protocol}) and it is not the same as protocol in host " \ + f"({parts.protocol})" err_log(message, html_log, endpoint_html_log, endpoint) else: if change: @@ -143,20 +143,20 @@ def err_log(message, html_log, endpoint_html_log, endpoint): if parts.port: try: if (endpoint.port is not None) and (int(endpoint.port) != parts.port): - message = f'has defined port number ({endpoint.port}) and it is not the same as port number in ' \ - f'host ({parts.port})' + message = f"has defined port number ({endpoint.port}) and it is not the same as port number in " \ + f"host ({parts.port})" err_log(message, html_log, endpoint_html_log, endpoint) else: if change: endpoint.port = parts.port except ValueError: - message = f'uses non-numeric port: {endpoint.port}' + message = f"uses non-numeric port: {endpoint.port}" err_log(message, html_log, endpoint_html_log, endpoint) if parts.path: if endpoint.path and (endpoint.path != parts.path): - message = f'has defined path ({endpoint.path}) and it is not the same as path in host ' \ - f'({parts.path})' + message = f"has defined path ({endpoint.path}) and it is not the same as path in host " \ + f"({parts.path})" err_log(message, html_log, endpoint_html_log, endpoint) else: if change: @@ -164,8 +164,8 @@ def err_log(message, html_log, endpoint_html_log, endpoint): if parts.query: if endpoint.query and (endpoint.query != parts.query): - message = f'has defined query ({endpoint.query}) and it is not the same as query in host ' \ - f'({parts.query})' + message = f"has defined query ({endpoint.query}) and it is not the same as query in host " \ + f"({parts.query})" err_log(message, html_log, endpoint_html_log, endpoint) else: if change: @@ -173,8 +173,8 @@ def err_log(message, html_log, endpoint_html_log, endpoint): if parts.fragment: if endpoint.fragment and (endpoint.fragment != parts.fragment): - message = f'has defined fragment ({endpoint.fragment}) and it is not the same as fragment in host ' \ - f'({parts.fragment})' + message = f"has defined fragment ({endpoint.fragment}) and it is not the same as fragment in host " \ + f"({parts.fragment})" err_log(message, html_log, endpoint_html_log, endpoint) else: if change: @@ -196,13 +196,13 @@ def err_log(message, html_log, endpoint_html_log, endpoint): err_log(ve, html_log, endpoint_html_log, endpoint) if not endpoint.product: - err_log('Missing product', html_log, endpoint_html_log, endpoint) + err_log("Missing product", html_log, endpoint_html_log, endpoint) if broken_endpoints: - logger.error(f'It is not possible to migrate database because there is/are {len(broken_endpoints)} broken endpoint(s). ' - 'Please check logs.') + logger.error(f"It is not possible to migrate database because there is/are {len(broken_endpoints)} broken endpoint(s). " + "Please check logs.") else: - logger.info('There is not broken endpoint.') + logger.info("There is not broken endpoint.") to_be_deleted = set() for product in Product_model.objects.all().distinct(): @@ -218,7 +218,7 @@ def err_log(message, html_log, endpoint_html_log, endpoint): query=endpoint.query, fragment=endpoint.fragment, product_id=product.pk if product else None, - ).order_by('id') + ).order_by("id") if ep.count() > 1: ep_ids = [x.id for x in ep] @@ -234,13 +234,13 @@ def err_log(message, html_log, endpoint_html_log, endpoint): .update(endpoint=ep_ids[0]) epss = Endpoint_Status_model.objects\ .filter(endpoint=ep_ids[0])\ - .values('finding')\ - .annotate(total=Count('id'))\ + .values("finding")\ + .annotate(total=Count("id"))\ .filter(total__gt=1) for eps in epss: esm = Endpoint_Status_model.objects\ - .filter(finding=eps['finding'])\ - .order_by('-last_modified') + .filter(finding=eps["finding"])\ + .order_by("-last_modified") message = "Endpoint Statuses {} will be replaced by '{}'".format( [f"last_modified: {x.last_modified} (id={x.pk})" for x in esm[1:]], f"last_modified: {esm[0].last_modified} (id={esm[0].pk})") @@ -266,12 +266,12 @@ def validate_endpoints_to_add(endpoints_to_add): endpoints = endpoints_to_add.split() for endpoint in endpoints: try: - if '://' in endpoint: # is it full uri? + if "://" in endpoint: # is it full uri? endpoint_ins = Endpoint.from_uri(endpoint) # from_uri validate URI format + split to components else: # from_uri parse any '//localhost', '//127.0.0.1:80', '//foo.bar/path' correctly # format doesn't follow RFC 3986 but users use it - endpoint_ins = Endpoint.from_uri('//' + endpoint) + endpoint_ins = Endpoint.from_uri("//" + endpoint) endpoint_ins.clean() endpoint_list.append([ endpoint_ins.protocol, @@ -307,32 +307,32 @@ def save_endpoints_to_add(endpoint_list, product): return processed_endpoints -def endpoint_meta_import(file, product, create_endpoints, create_tags, create_meta, origin='UI', request=None): +def endpoint_meta_import(file, product, create_endpoints, create_tags, create_meta, origin="UI", request=None): content = file.read() - sig = content.decode('utf-8-sig') + sig = content.decode("utf-8-sig") content = sig.encode("utf-8") if isinstance(content, bytes): - content = content.decode('utf-8') + content = content.decode("utf-8") reader = csv.DictReader(io.StringIO(content)) - if 'hostname' not in reader.fieldnames: - if origin == 'UI': + if "hostname" not in reader.fieldnames: + if origin == "UI": messages.add_message( request, messages.ERROR, 'The column "hostname" must be present to map host to Endpoint.', - extra_tags='alert-danger') - return HttpResponseRedirect(reverse('import_endpoint_meta', args=(product.id, ))) - elif origin == 'API': + extra_tags="alert-danger") + return HttpResponseRedirect(reverse("import_endpoint_meta", args=(product.id, ))) + elif origin == "API": msg = 'The column "hostname" must be present to map host to Endpoint.' raise ValidationError(msg) - keys = [key for key in reader.fieldnames if key != 'hostname'] + keys = [key for key in reader.fieldnames if key != "hostname"] for row in reader: meta = [] endpoint = None - host = row.get('hostname', None) + host = row.get("hostname", None) if not host: continue @@ -363,18 +363,18 @@ def endpoint_meta_import(file, product, create_endpoints, create_tags, create_me # found existing. Update it existing_tags.remove(tag) break - existing_tags += [item[0] + ':' + item[1]] + existing_tags += [item[0] + ":" + item[1]] # if tags are not supposed to be added, this value remain unchanged endpoint.tags = existing_tags endpoint.save() def remove_broken_endpoint_statuses(apps): - Endpoint_Status = apps.get_model('dojo', 'endpoint_status') + Endpoint_Status = apps.get_model("dojo", "endpoint_status") broken_eps = Endpoint_Status.objects.filter(Q(endpoint=None) | Q(finding=None)) if broken_eps.count() == 0: - logger.info('There is no broken endpoint_status') + logger.info("There is no broken endpoint_status") else: - logger.warning('We identified %s broken endpoint_statuses', broken_eps.count()) + logger.warning("We identified %s broken endpoint_statuses", broken_eps.count()) deleted = broken_eps.delete() - logger.warning('We removed: %s', deleted) + logger.warning("We removed: %s", deleted) diff --git a/dojo/endpoint/views.py b/dojo/endpoint/views.py index b6f838d793..c392f37c19 100644 --- a/dojo/endpoint/views.py +++ b/dojo/endpoint/views.py @@ -47,7 +47,7 @@ def process_endpoints_view(request, host_view=False, vulnerable=False): else: endpoints = Endpoint.objects.all() - endpoints = endpoints.prefetch_related('product', 'product__tags', 'tags').distinct() + endpoints = endpoints.prefetch_related("product", "product__tags", "tags").distinct() endpoints = get_authorized_endpoints(Permissions.Endpoint_View, endpoints, request.user) filter_string_matching = get_system_setting("filter_string_matching", False) filter_class = EndpointFilterWithoutObjectLookups if filter_string_matching else EndpointFilter @@ -72,16 +72,16 @@ def process_endpoints_view(request, host_view=False, vulnerable=False): add_breadcrumb(title=view_name, top_level=not len(request.GET), request=request) product_tab = None - if 'product' in request.GET: - p = request.GET.getlist('product', []) + if "product" in request.GET: + p = request.GET.getlist("product", []) if len(p) == 1: product = get_object_or_404(Product, id=p[0]) user_has_permission_or_403(request.user, product, Permissions.Product_View) product_tab = Product_Tab(product, view_name, tab="endpoints") return render( - request, 'dojo/endpoints.html', { - 'product_tab': product_tab, + request, "dojo/endpoints.html", { + "product_tab": product_tab, "endpoints": paged_endpoints, "filtered": endpoints, "name": view_name, @@ -128,7 +128,7 @@ def process_endpoint_view(request, eid, host_view=False): active_findings = endpoint.host_active_findings() else: endpoints = None - endpoint_metadata = dict(endpoint.endpoint_meta.values_list('name', 'value')) + endpoint_metadata = dict(endpoint.endpoint_meta.values_list("name", "value")) all_findings = endpoint.findings.all() active_findings = endpoint.active_findings() @@ -147,7 +147,7 @@ def process_endpoint_view(request, eid, host_view=False): closed_findings = Finding.objects.none() monthly_counts = get_period_counts(all_findings, closed_findings, None, months_between, start_date, - relative_delta='months') + relative_delta="months") paged_findings = get_page_items(request, active_findings, 25) vulnerable = active_findings.count() != 0 @@ -156,41 +156,41 @@ def process_endpoint_view(request, eid, host_view=False): return render(request, "dojo/view_endpoint.html", {"endpoint": endpoint, - 'product_tab': product_tab, + "product_tab": product_tab, "endpoints": endpoints, "findings": paged_findings, - 'all_findings': all_findings, - 'opened_per_month': monthly_counts['opened_per_period'], - 'endpoint_metadata': endpoint_metadata, - 'vulnerable': vulnerable, - 'host_view': host_view, + "all_findings": all_findings, + "opened_per_month": monthly_counts["opened_per_period"], + "endpoint_metadata": endpoint_metadata, + "vulnerable": vulnerable, + "host_view": host_view, }) -@user_is_authorized(Endpoint, Permissions.Endpoint_View, 'eid') +@user_is_authorized(Endpoint, Permissions.Endpoint_View, "eid") def view_endpoint(request, eid): return process_endpoint_view(request, eid, host_view=False) -@user_is_authorized(Endpoint, Permissions.Endpoint_View, 'eid') +@user_is_authorized(Endpoint, Permissions.Endpoint_View, "eid") def view_endpoint_host(request, eid): return process_endpoint_view(request, eid, host_view=True) -@user_is_authorized(Endpoint, Permissions.Endpoint_View, 'eid') +@user_is_authorized(Endpoint, Permissions.Endpoint_View, "eid") def edit_endpoint(request, eid): endpoint = get_object_or_404(Endpoint, id=eid) - if request.method == 'POST': + if request.method == "POST": form = EditEndpointForm(request.POST, instance=endpoint) if form.is_valid(): - logger.debug('saving endpoint') + logger.debug("saving endpoint") endpoint = form.save() messages.add_message(request, messages.SUCCESS, - 'Endpoint updated successfully.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_endpoint', args=(endpoint.id,))) + "Endpoint updated successfully.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_endpoint", args=(endpoint.id,))) else: add_breadcrumb(parent=endpoint, title="Edit", top_level=False, request=request) form = EditEndpointForm(instance=endpoint) @@ -200,28 +200,28 @@ def edit_endpoint(request, eid): return render(request, "dojo/edit_endpoint.html", {"endpoint": endpoint, - 'product_tab': product_tab, + "product_tab": product_tab, "form": form, }) -@user_is_authorized(Endpoint, Permissions.Endpoint_Delete, 'eid') +@user_is_authorized(Endpoint, Permissions.Endpoint_Delete, "eid") def delete_endpoint(request, eid): endpoint = get_object_or_404(Endpoint, pk=eid) product = endpoint.product form = DeleteEndpointForm(instance=endpoint) - if request.method == 'POST': - if 'id' in request.POST and str(endpoint.id) == request.POST['id']: + if request.method == "POST": + if "id" in request.POST and str(endpoint.id) == request.POST["id"]: form = DeleteEndpointForm(request.POST, instance=endpoint) if form.is_valid(): product = endpoint.product endpoint.delete() messages.add_message(request, messages.SUCCESS, - 'Endpoint and relationships removed.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_product', args=(product.id,))) + "Endpoint and relationships removed.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_product", args=(product.id,))) collector = NestedObjects(using=DEFAULT_DB_ALIAS) collector.collect([endpoint]) @@ -229,135 +229,135 @@ def delete_endpoint(request, eid): product_tab = Product_Tab(endpoint.product, "Delete Endpoint", tab="endpoints") - return render(request, 'dojo/delete_endpoint.html', - {'endpoint': endpoint, - 'product_tab': product_tab, - 'form': form, - 'rels': rels, + return render(request, "dojo/delete_endpoint.html", + {"endpoint": endpoint, + "product_tab": product_tab, + "form": form, + "rels": rels, }) -@user_is_authorized(Product, Permissions.Endpoint_Add, 'pid') +@user_is_authorized(Product, Permissions.Endpoint_Add, "pid") def add_endpoint(request, pid): product = get_object_or_404(Product, id=pid) - template = 'dojo/add_endpoint.html' + template = "dojo/add_endpoint.html" form = AddEndpointForm(product=product) - if request.method == 'POST': + if request.method == "POST": form = AddEndpointForm(request.POST, product=product) if form.is_valid(): endpoints = form.save() - tags = request.POST.get('tags') + tags = request.POST.get("tags") for e in endpoints: e.tags = tags e.save() messages.add_message(request, messages.SUCCESS, - 'Endpoint added successfully.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('endpoint') + "?product=" + pid) + "Endpoint added successfully.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("endpoint") + "?product=" + pid) product_tab = Product_Tab(product, "Add Endpoint", tab="endpoints") return render(request, template, { - 'product_tab': product_tab, - 'name': 'Add Endpoint', - 'form': form}) + "product_tab": product_tab, + "name": "Add Endpoint", + "form": form}) def add_product_endpoint(request): form = AddEndpointForm() - if request.method == 'POST': + if request.method == "POST": form = AddEndpointForm(request.POST) if form.is_valid(): user_has_permission_or_403(request.user, form.product, Permissions.Endpoint_Add) endpoints = form.save() - tags = request.POST.get('tags') + tags = request.POST.get("tags") for e in endpoints: e.tags = tags e.save() messages.add_message(request, messages.SUCCESS, - 'Endpoint added successfully.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('endpoint') + f"?product={form.product.id}") + "Endpoint added successfully.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("endpoint") + f"?product={form.product.id}") add_breadcrumb(title="Add Endpoint", top_level=False, request=request) return render(request, - 'dojo/add_endpoint.html', - {'name': 'Add Endpoint', - 'form': form, + "dojo/add_endpoint.html", + {"name": "Add Endpoint", + "form": form, }) -@user_is_authorized(Endpoint, Permissions.Endpoint_Edit, 'eid') +@user_is_authorized(Endpoint, Permissions.Endpoint_Edit, "eid") def add_meta_data(request, eid): endpoint = Endpoint.objects.get(id=eid) - if request.method == 'POST': + if request.method == "POST": form = DojoMetaDataForm(request.POST, instance=DojoMeta(endpoint=endpoint)) if form.is_valid(): form.save() messages.add_message(request, messages.SUCCESS, - 'Metadata added successfully.', - extra_tags='alert-success') - if 'add_another' in request.POST: - return HttpResponseRedirect(reverse('add_endpoint_meta_data', args=(eid,))) + "Metadata added successfully.", + extra_tags="alert-success") + if "add_another" in request.POST: + return HttpResponseRedirect(reverse("add_endpoint_meta_data", args=(eid,))) else: - return HttpResponseRedirect(reverse('view_endpoint', args=(eid,))) + return HttpResponseRedirect(reverse("view_endpoint", args=(eid,))) else: form = DojoMetaDataForm() add_breadcrumb(parent=endpoint, title="Add Metadata", top_level=False, request=request) product_tab = Product_Tab(endpoint.product, "Add Metadata", tab="endpoints") return render(request, - 'dojo/add_endpoint_meta_data.html', - {'form': form, - 'product_tab': product_tab, - 'endpoint': endpoint, + "dojo/add_endpoint_meta_data.html", + {"form": form, + "product_tab": product_tab, + "endpoint": endpoint, }) -@user_is_authorized(Endpoint, Permissions.Endpoint_Edit, 'eid') +@user_is_authorized(Endpoint, Permissions.Endpoint_Edit, "eid") def edit_meta_data(request, eid): endpoint = Endpoint.objects.get(id=eid) - if request.method == 'POST': + if request.method == "POST": for key, value in request.POST.items(): - if key.startswith('cfv_'): - cfv_id = int(key.split('_')[1]) + if key.startswith("cfv_"): + cfv_id = int(key.split("_")[1]) cfv = get_object_or_404(DojoMeta, id=cfv_id) value = value.strip() if value: cfv.value = value cfv.save() - if key.startswith('delete_'): - cfv_id = int(key.split('_')[2]) + if key.startswith("delete_"): + cfv_id = int(key.split("_")[2]) cfv = get_object_or_404(DojoMeta, id=cfv_id) cfv.delete() messages.add_message(request, messages.SUCCESS, - 'Metadata edited successfully.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_endpoint', args=(eid,))) + "Metadata edited successfully.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_endpoint", args=(eid,))) product_tab = Product_Tab(endpoint.product, "Edit Metadata", tab="endpoints") return render(request, - 'dojo/edit_endpoint_meta_data.html', - {'endpoint': endpoint, - 'product_tab': product_tab, + "dojo/edit_endpoint_meta_data.html", + {"endpoint": endpoint, + "product_tab": product_tab, }) # bulk mitigate and delete are combined, so we can't have the nice user_is_authorized decorator def endpoint_bulk_update_all(request, pid=None): if request.method == "POST": - endpoints_to_update = request.POST.getlist('endpoints_to_update') + endpoints_to_update = request.POST.getlist("endpoints_to_update") endpoints = Endpoint.objects.filter(id__in=endpoints_to_update).order_by("endpoint_meta__product__id") total_endpoint_count = endpoints.count() - if request.POST.get('delete_bulk_endpoints') and endpoints_to_update: + if request.POST.get("delete_bulk_endpoints") and endpoints_to_update: if pid is not None: product = get_object_or_404(Product, id=pid) @@ -374,13 +374,13 @@ def endpoint_bulk_update_all(request, pid=None): calculate_grade(prod) if skipped_endpoint_count > 0: - add_error_message_to_response(f'Skipped deletion of {skipped_endpoint_count} endpoints because you are not authorized.') + add_error_message_to_response(f"Skipped deletion of {skipped_endpoint_count} endpoints because you are not authorized.") if deleted_endpoint_count > 0: messages.add_message(request, messages.SUCCESS, - f'Bulk delete of {deleted_endpoint_count} endpoints was successful.', - extra_tags='alert-success') + f"Bulk delete of {deleted_endpoint_count} endpoints was successful.", + extra_tags="alert-success") else: if endpoints_to_update: @@ -394,7 +394,7 @@ def endpoint_bulk_update_all(request, pid=None): updated_endpoint_count = endpoints.count() if skipped_endpoint_count > 0: - add_error_message_to_response(f'Skipped mitigation of {skipped_endpoint_count} endpoints because you are not authorized.') + add_error_message_to_response(f"Skipped mitigation of {skipped_endpoint_count} endpoints because you are not authorized.") eps_count = Endpoint_Status.objects.filter(endpoint__in=endpoints).update( mitigated=True, @@ -406,22 +406,22 @@ def endpoint_bulk_update_all(request, pid=None): if updated_endpoint_count > 0: messages.add_message(request, messages.SUCCESS, - f'Bulk mitigation of {updated_endpoint_count} endpoints ({eps_count} endpoint statuses) was successful.', - extra_tags='alert-success') + f"Bulk mitigation of {updated_endpoint_count} endpoints ({eps_count} endpoint statuses) was successful.", + extra_tags="alert-success") else: messages.add_message(request, messages.ERROR, - 'Unable to process bulk update. Required fields were not selected.', - extra_tags='alert-danger') - return HttpResponseRedirect(reverse('endpoint', args=())) + "Unable to process bulk update. Required fields were not selected.", + extra_tags="alert-danger") + return HttpResponseRedirect(reverse("endpoint", args=())) -@user_is_authorized(Finding, Permissions.Finding_Edit, 'fid') +@user_is_authorized(Finding, Permissions.Finding_Edit, "fid") def endpoint_status_bulk_update(request, fid): if request.method == "POST": post = request.POST - endpoints_to_update = post.getlist('endpoints_to_update') - status_list = ['active', 'false_positive', 'mitigated', 'out_of_scope', 'risk_accepted'] + endpoints_to_update = post.getlist("endpoints_to_update") + status_list = ["active", "false_positive", "mitigated", "out_of_scope", "risk_accepted"] enable = [item for item in status_list if item in list(post.keys())] if endpoints_to_update and len(enable) > 0: @@ -433,7 +433,7 @@ def endpoint_status_bulk_update(request, fid): for status in status_list: if status in enable: endpoint_status.__setattr__(status, True) - if status == 'mitigated': + if status == "mitigated": endpoint_status.mitigated_by = request.user endpoint_status.mitigated_time = timezone.now() else: @@ -442,70 +442,70 @@ def endpoint_status_bulk_update(request, fid): endpoint_status.save() messages.add_message(request, messages.SUCCESS, - 'Bulk edit of endpoints was successful. Check to make sure it is what you intended.', - extra_tags='alert-success') + "Bulk edit of endpoints was successful. Check to make sure it is what you intended.", + extra_tags="alert-success") else: messages.add_message(request, messages.ERROR, - 'Unable to process bulk update. Required fields were not selected.', - extra_tags='alert-danger') - return redirect(request, post['return_url']) + "Unable to process bulk update. Required fields were not selected.", + extra_tags="alert-danger") + return redirect(request, post["return_url"]) def prefetch_for_endpoints(endpoints): if isinstance(endpoints, QuerySet): - endpoints = endpoints.prefetch_related('product', 'tags', 'product__tags') - endpoints = endpoints.annotate(active_finding_count=Count('finding__id', filter=Q(finding__active=True))) + endpoints = endpoints.prefetch_related("product", "tags", "product__tags") + endpoints = endpoints.annotate(active_finding_count=Count("finding__id", filter=Q(finding__active=True))) else: - logger.debug('unable to prefetch because query was already executed') + logger.debug("unable to prefetch because query was already executed") return endpoints def migrate_endpoints_view(request): - view_name = 'Migrate endpoints' + view_name = "Migrate endpoints" - html_log = clean_hosts_run(apps=apps, change=(request.method == 'POST')) + html_log = clean_hosts_run(apps=apps, change=(request.method == "POST")) return render( - request, 'dojo/migrate_endpoints.html', { - 'product_tab': None, + request, "dojo/migrate_endpoints.html", { + "product_tab": None, "name": view_name, "html_log": html_log, }) -@user_is_authorized(Product, Permissions.Endpoint_Edit, 'pid') +@user_is_authorized(Product, Permissions.Endpoint_Edit, "pid") def import_endpoint_meta(request, pid): product = get_object_or_404(Product, id=pid) form = ImportEndpointMetaForm() - if request.method == 'POST': + if request.method == "POST": form = ImportEndpointMetaForm(request.POST, request.FILES) if form.is_valid(): - file = request.FILES.get('file', None) + file = request.FILES.get("file", None) # Make sure size is not too large if file and is_scan_file_too_large(file): messages.add_message( request, messages.ERROR, f"Report file is too large. Maximum supported size is {settings.SCAN_FILE_MAX_SIZE} MB", - extra_tags='alert-danger') + extra_tags="alert-danger") - create_endpoints = form.cleaned_data['create_endpoints'] - create_tags = form.cleaned_data['create_tags'] - create_dojo_meta = form.cleaned_data['create_dojo_meta'] + create_endpoints = form.cleaned_data["create_endpoints"] + create_tags = form.cleaned_data["create_tags"] + create_dojo_meta = form.cleaned_data["create_dojo_meta"] try: - endpoint_meta_import(file, product, create_endpoints, create_tags, create_dojo_meta, origin='UI', request=request) + endpoint_meta_import(file, product, create_endpoints, create_tags, create_dojo_meta, origin="UI", request=request) except Exception as e: logger.exception(e) - add_error_message_to_response(f'An exception error occurred during the report import:{str(e)}') - return HttpResponseRedirect(reverse('endpoint') + "?product=" + pid) + add_error_message_to_response(f"An exception error occurred during the report import:{str(e)}") + return HttpResponseRedirect(reverse("endpoint") + "?product=" + pid) add_breadcrumb(title="Endpoint Meta Importer", top_level=False, request=request) product_tab = Product_Tab(product, title="Endpoint Meta Importer", tab="endpoints") - return render(request, 'dojo/endpoint_meta_importer.html', { - 'product_tab': product_tab, - 'form': form, + return render(request, "dojo/endpoint_meta_importer.html", { + "product_tab": product_tab, + "form": form, }) diff --git a/dojo/engagement/queries.py b/dojo/engagement/queries.py index d5a7f44593..9d8e9b6ae4 100644 --- a/dojo/engagement/queries.py +++ b/dojo/engagement/queries.py @@ -12,33 +12,33 @@ def get_authorized_engagements(permission): return Engagement.objects.none() if user.is_superuser: - return Engagement.objects.all() + return Engagement.objects.all().order_by("id") if user_has_global_permission(user, permission): - return Engagement.objects.all() + return Engagement.objects.all().order_by("id") roles = get_roles_for_permission(permission) authorized_product_type_roles = Product_Type_Member.objects.filter( - product_type=OuterRef('product__prod_type_id'), + product_type=OuterRef("product__prod_type_id"), user=user, role__in=roles) authorized_product_roles = Product_Member.objects.filter( - product=OuterRef('product_id'), + product=OuterRef("product_id"), user=user, role__in=roles) authorized_product_type_groups = Product_Type_Group.objects.filter( - product_type=OuterRef('product__prod_type_id'), + product_type=OuterRef("product__prod_type_id"), group__users=user, role__in=roles) authorized_product_groups = Product_Group.objects.filter( - product=OuterRef('product_id'), + product=OuterRef("product_id"), group__users=user, role__in=roles) engagements = Engagement.objects.annotate( product__prod_type__member=Exists(authorized_product_type_roles), product__member=Exists(authorized_product_roles), product__prod_type__authorized_group=Exists(authorized_product_type_groups), - product__authorized_group=Exists(authorized_product_groups)) + product__authorized_group=Exists(authorized_product_groups)).order_by("id") engagements = engagements.filter( Q(product__prod_type__member=True) | Q(product__member=True) | Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True)) diff --git a/dojo/engagement/services.py b/dojo/engagement/services.py index 0331e87c5b..f11963867a 100644 --- a/dojo/engagement/services.py +++ b/dojo/engagement/services.py @@ -12,7 +12,7 @@ def close_engagement(eng): eng.active = False - eng.status = 'Completed' + eng.status = "Completed" eng.save() if jira_helper.get_jira_project(eng): @@ -21,7 +21,7 @@ def close_engagement(eng): def reopen_engagement(eng): eng.active = True - eng.status = 'In Progress' + eng.status = "In Progress" eng.save() diff --git a/dojo/engagement/signals.py b/dojo/engagement/signals.py index 7a8e3352ba..3c5266fda0 100644 --- a/dojo/engagement/signals.py +++ b/dojo/engagement/signals.py @@ -13,9 +13,9 @@ @receiver(post_save, sender=Engagement) def engagement_post_save(sender, instance, created, **kwargs): if created: - title = _('Engagement created for "%(product)s": %(name)s') % {'product': instance.product, 'name': instance.name} - create_notification(event='engagement_added', title=title, engagement=instance, product=instance.product, - url=reverse('view_engagement', args=(instance.id,))) + title = _('Engagement created for "%(product)s": %(name)s') % {"product": instance.product, "name": instance.name} + create_notification(event="engagement_added", title=title, engagement=instance, product=instance.product, + url=reverse("view_engagement", args=(instance.id,))) @receiver(pre_save, sender=Engagement) @@ -23,16 +23,16 @@ def engagement_pre_save(sender, instance, **kwargs): old = sender.objects.filter(pk=instance.pk).first() if old and instance.status != old.status: if instance.status in ["Cancelled", "Completed"]: - create_notification(event='engagement_closed', - title=_('Closure of %s') % instance.name, + create_notification(event="engagement_closed", + title=_("Closure of %s") % instance.name, description=_('The engagement "%s" was closed') % (instance.name), - engagement=instance, url=reverse('engagement_all_findings', args=(instance.id, ))) + engagement=instance, url=reverse("engagement_all_findings", args=(instance.id, ))) elif instance.status in ["In Progress"] and old.status not in ["Not Started"]: - create_notification(event='engagement_reopened', - title=_('Reopening of %s') % instance.name, + create_notification(event="engagement_reopened", + title=_("Reopening of %s") % instance.name, engagement=instance, description=_('The engagement "%s" was reopened') % (instance.name), - url=reverse('view_engagement', args=(instance.id, ))) + url=reverse("view_engagement", args=(instance.id, ))) @receiver(post_delete, sender=Engagement) @@ -41,17 +41,17 @@ def engagement_post_delete(sender, instance, using, origin, **kwargs): if settings.ENABLE_AUDITLOG: le = LogEntry.objects.get( action=LogEntry.Action.DELETE, - content_type=ContentType.objects.get(app_label='dojo', model='engagement'), + content_type=ContentType.objects.get(app_label="dojo", model="engagement"), object_id=instance.id, ) description = _('The engagement "%(name)s" was deleted by %(user)s') % { - 'name': instance.name, 'user': le.actor} + "name": instance.name, "user": le.actor} else: - description = _('The engagement "%(name)s" was deleted') % {'name': instance.name} - create_notification(event='engagement_deleted', # template does not exists, it will default to "other" but this event name needs to stay because of unit testing - title=_('Deletion of %(name)s') % {'name': instance.name}, + description = _('The engagement "%(name)s" was deleted') % {"name": instance.name} + create_notification(event="engagement_deleted", # template does not exists, it will default to "other" but this event name needs to stay because of unit testing + title=_("Deletion of %(name)s") % {"name": instance.name}, description=description, product=instance.product, - url=reverse('view_product', args=(instance.product.id, )), + url=reverse("view_product", args=(instance.product.id, )), recipients=[instance.lead], icon="exclamation-triangle") diff --git a/dojo/engagement/urls.py b/dojo/engagement/urls.py index df0a7f5af2..c70bb56a95 100644 --- a/dojo/engagement/urls.py +++ b/dojo/engagement/urls.py @@ -4,56 +4,56 @@ urlpatterns = [ # engagements and calendar - re_path(r'^calendar$', views.engagement_calendar, name='calendar'), - re_path(r'^calendar/engagements$', views.engagement_calendar, name='engagement_calendar'), - re_path(r'^engagement$', views.engagements, {'view': 'active'}, name='engagement'), - re_path(r'^engagements_all$', views.engagements_all, name='engagements_all'), - re_path(r'^engagement/all$', views.engagements, {'view': 'all'}, name='all_engagements'), - re_path(r'^engagement/active$', views.engagements, {'view': 'active'}, name='active_engagements'), - re_path(r'^engagement/(?P\d+)$', views.ViewEngagement.as_view(), - name='view_engagement'), - re_path(r'^engagement/(?P\d+)/ics$', views.engagement_ics, - name='engagement_ics'), - re_path(r'^engagement/(?P\d+)/edit$', views.edit_engagement, - name='edit_engagement'), - re_path(r'^engagement/(?P\d+)/delete$', views.delete_engagement, - name='delete_engagement'), - re_path(r'^engagement/(?P\d+)/copy$', views.copy_engagement, - name='copy_engagement'), - re_path(r'^engagement/(?P\d+)/add_tests$', views.add_tests, - name='add_tests'), + re_path(r"^calendar$", views.engagement_calendar, name="calendar"), + re_path(r"^calendar/engagements$", views.engagement_calendar, name="engagement_calendar"), + re_path(r"^engagement$", views.engagements, {"view": "active"}, name="engagement"), + re_path(r"^engagements_all$", views.engagements_all, name="engagements_all"), + re_path(r"^engagement/all$", views.engagements, {"view": "all"}, name="all_engagements"), + re_path(r"^engagement/active$", views.engagements, {"view": "active"}, name="active_engagements"), + re_path(r"^engagement/(?P\d+)$", views.ViewEngagement.as_view(), + name="view_engagement"), + re_path(r"^engagement/(?P\d+)/ics$", views.engagement_ics, + name="engagement_ics"), + re_path(r"^engagement/(?P\d+)/edit$", views.edit_engagement, + name="edit_engagement"), + re_path(r"^engagement/(?P\d+)/delete$", views.delete_engagement, + name="delete_engagement"), + re_path(r"^engagement/(?P\d+)/copy$", views.copy_engagement, + name="copy_engagement"), + re_path(r"^engagement/(?P\d+)/add_tests$", views.add_tests, + name="add_tests"), re_path( - r'^engagement/(?P\d+)/import_scan_results$', + r"^engagement/(?P\d+)/import_scan_results$", views.ImportScanResultsView.as_view(), - name='import_scan_results'), - re_path(r'^engagement/(?P\d+)/close$', views.close_eng, - name='close_engagement'), - re_path(r'^engagement/(?P\d+)/reopen$', views.reopen_eng, - name='reopen_engagement'), - re_path(r'^engagement/(?P\d+)/complete_checklist$', - views.complete_checklist, name='complete_checklist'), - re_path(r'^engagement/(?P\d+)/risk_acceptance/add$', - views.add_risk_acceptance, name='add_risk_acceptance'), - re_path(r'^engagement/(?P\d+)/risk_acceptance/add/(?P\d+)$', - views.add_risk_acceptance, name='add_risk_acceptance'), - re_path(r'^engagement/(?P\d+)/risk_acceptance/(?P\d+)$', - views.view_risk_acceptance, name='view_risk_acceptance'), - re_path(r'^engagement/(?P\d+)/risk_acceptance/(?P\d+)/edit$', - views.edit_risk_acceptance, name='edit_risk_acceptance'), - re_path(r'^engagement/(?P\d+)/risk_acceptance/(?P\d+)/expire$', - views.expire_risk_acceptance, name='expire_risk_acceptance'), - re_path(r'^engagement/(?P\d+)/risk_acceptance/(?P\d+)/reinstate$', - views.reinstate_risk_acceptance, name='reinstate_risk_acceptance'), - re_path(r'^engagement/(?P\d+)/risk_acceptance/(?P\d+)/delete$', - views.delete_risk_acceptance, name='delete_risk_acceptance'), - re_path(r'^engagement/(?P\d+)/risk_acceptance/(?P\d+)/download$', - views.download_risk_acceptance, name='download_risk_acceptance'), - re_path(r'^engagement/(?P\d+)/threatmodel$', views.view_threatmodel, - name='view_threatmodel'), - re_path(r'^engagement/(?P\d+)/threatmodel/upload$', - views.upload_threatmodel, name='upload_threatmodel'), - re_path(r'^engagement/csv_export$', - views.csv_export, name='engagement_csv_export'), - re_path(r'^engagement/excel_export$', - views.excel_export, name='engagement_excel_export'), + name="import_scan_results"), + re_path(r"^engagement/(?P\d+)/close$", views.close_eng, + name="close_engagement"), + re_path(r"^engagement/(?P\d+)/reopen$", views.reopen_eng, + name="reopen_engagement"), + re_path(r"^engagement/(?P\d+)/complete_checklist$", + views.complete_checklist, name="complete_checklist"), + re_path(r"^engagement/(?P\d+)/risk_acceptance/add$", + views.add_risk_acceptance, name="add_risk_acceptance"), + re_path(r"^engagement/(?P\d+)/risk_acceptance/add/(?P\d+)$", + views.add_risk_acceptance, name="add_risk_acceptance"), + re_path(r"^engagement/(?P\d+)/risk_acceptance/(?P\d+)$", + views.view_risk_acceptance, name="view_risk_acceptance"), + re_path(r"^engagement/(?P\d+)/risk_acceptance/(?P\d+)/edit$", + views.edit_risk_acceptance, name="edit_risk_acceptance"), + re_path(r"^engagement/(?P\d+)/risk_acceptance/(?P\d+)/expire$", + views.expire_risk_acceptance, name="expire_risk_acceptance"), + re_path(r"^engagement/(?P\d+)/risk_acceptance/(?P\d+)/reinstate$", + views.reinstate_risk_acceptance, name="reinstate_risk_acceptance"), + re_path(r"^engagement/(?P\d+)/risk_acceptance/(?P\d+)/delete$", + views.delete_risk_acceptance, name="delete_risk_acceptance"), + re_path(r"^engagement/(?P\d+)/risk_acceptance/(?P\d+)/download$", + views.download_risk_acceptance, name="download_risk_acceptance"), + re_path(r"^engagement/(?P\d+)/threatmodel$", views.view_threatmodel, + name="view_threatmodel"), + re_path(r"^engagement/(?P\d+)/threatmodel/upload$", + views.upload_threatmodel, name="upload_threatmodel"), + re_path(r"^engagement/csv_export$", + views.csv_export, name="engagement_csv_export"), + re_path(r"^engagement/excel_export$", + views.excel_export, name="engagement_excel_export"), ] diff --git a/dojo/engagement/views.py b/dojo/engagement/views.py index f28a0863fb..a6e47589d0 100644 --- a/dojo/engagement/views.py +++ b/dojo/engagement/views.py @@ -113,52 +113,52 @@ @vary_on_cookie def engagement_calendar(request): - if not get_system_setting('enable_calendar'): + if not get_system_setting("enable_calendar"): raise Resolver404 - if 'lead' not in request.GET or '0' in request.GET.getlist('lead'): + if "lead" not in request.GET or "0" in request.GET.getlist("lead"): engagements = get_authorized_engagements(Permissions.Engagement_View) else: filters = [] - leads = request.GET.getlist('lead', '') - if '-1' in request.GET.getlist('lead'): - leads.remove('-1') + leads = request.GET.getlist("lead", "") + if "-1" in request.GET.getlist("lead"): + leads.remove("-1") filters.append(Q(lead__isnull=True)) filters.append(Q(lead__in=leads)) engagements = get_authorized_engagements(Permissions.Engagement_View).filter(reduce(operator.or_, filters)) - engagements = engagements.select_related('lead') - engagements = engagements.prefetch_related('product') + engagements = engagements.select_related("lead") + engagements = engagements.prefetch_related("product") add_breadcrumb( title="Engagement Calendar", top_level=True, request=request) return render( - request, 'dojo/calendar.html', { - 'caltype': 'engagements', - 'leads': request.GET.getlist('lead', ''), - 'engagements': engagements, - 'users': get_authorized_users(Permissions.Engagement_View), + request, "dojo/calendar.html", { + "caltype": "engagements", + "leads": request.GET.getlist("lead", ""), + "engagements": engagements, + "users": get_authorized_users(Permissions.Engagement_View), }) def get_filtered_engagements(request, view): - if view not in ['all', 'active']: - msg = f'View {view} is not allowed' + if view not in ["all", "active"]: + msg = f"View {view} is not allowed" raise ValidationError(msg) - engagements = get_authorized_engagements(Permissions.Engagement_View).order_by('-target_start') + engagements = get_authorized_engagements(Permissions.Engagement_View).order_by("-target_start") - if view == 'active': + if view == "active": engagements = engagements.filter(active=True) - engagements = engagements.select_related('product', 'product__prod_type') \ - .prefetch_related('lead', 'tags', 'product__tags') + engagements = engagements.select_related("product", "product__prod_type") \ + .prefetch_related("lead", "tags", "product__tags") if System_Settings.objects.get().enable_jira: engagements = engagements.prefetch_related( - 'jira_project__jira_instance', - 'product__jira_project_set__jira_instance', + "jira_project__jira_instance", + "product__jira_project_set__jira_instance", ) filter_string_matching = get_system_setting("filter_string_matching", False) @@ -172,13 +172,13 @@ def get_test_counts(engagements): # Get the test counts per engagement. As a separate query, this is much # faster than annotating the above `engagements` query. engagement_test_counts = { - test['engagement']: test['test_count'] + test["engagement"]: test["test_count"] for test in Test.objects.filter( engagement__in=engagements, ).values( - 'engagement', + "engagement", ).annotate( - test_count=Count('engagement'), + test_count=Count("engagement"), ) } return engagement_test_counts @@ -187,13 +187,13 @@ def get_test_counts(engagements): def engagements(request, view): if not view: - view = 'active' + view = "active" filtered_engagements = get_filtered_engagements(request, view) engs = get_page_items(request, filtered_engagements.qs, 25) - product_name_words = sorted(get_authorized_products(Permissions.Product_View).values_list('name', flat=True)) - engagement_name_words = sorted(get_authorized_engagements(Permissions.Engagement_View).values_list('name', flat=True).distinct()) + product_name_words = sorted(get_authorized_products(Permissions.Product_View).values_list("name", flat=True)) + engagement_name_words = sorted(get_authorized_engagements(Permissions.Engagement_View).values_list("name", flat=True).distinct()) add_breadcrumb( title=f"{view.capitalize()} Engagements", @@ -201,13 +201,13 @@ def engagements(request, view): request=request) return render( - request, 'dojo/engagement.html', { - 'engagements': engs, - 'engagement_test_counts': get_test_counts(filtered_engagements.qs), - 'filter_form': filtered_engagements.form, - 'product_name_words': product_name_words, - 'engagement_name_words': engagement_name_words, - 'view': view.capitalize(), + request, "dojo/engagement.html", { + "engagements": engs, + "engagement_test_counts": get_test_counts(filtered_engagements.qs), + "filter_form": filtered_engagements.form, + "product_name_words": product_name_words, + "engagement_name_words": engagement_name_words, + "view": view.capitalize(), }) @@ -217,23 +217,23 @@ def engagements_all(request): products_with_engagements = products_with_engagements.filter(~Q(engagement=None)).distinct() # count using prefetch instead of just using 'engagement__set_test_test` to avoid loading all test in memory just to count them - filter_string_matching = get_system_setting('filter_string_matching', False) + filter_string_matching = get_system_setting("filter_string_matching", False) products_filter_class = ProductEngagementsFilterWithoutObjectLookups if filter_string_matching else ProductEngagementsFilter - engagement_query = Engagement.objects.annotate(test_count=Count('test__id')) + engagement_query = Engagement.objects.annotate(test_count=Count("test__id")) filter_qs = products_with_engagements.prefetch_related( - Prefetch('engagement_set', queryset=products_filter_class(request.GET, engagement_query).qs), + Prefetch("engagement_set", queryset=products_filter_class(request.GET, engagement_query).qs), ) filter_qs = filter_qs.prefetch_related( - 'engagement_set__tags', - 'prod_type', - 'engagement_set__lead', - 'tags', + "engagement_set__tags", + "prod_type", + "engagement_set__lead", + "tags", ) if System_Settings.objects.get().enable_jira: filter_qs = filter_qs.prefetch_related( - 'engagement_set__jira_project__jira_instance', - 'jira_project_set__jira_instance', + "engagement_set__jira_project__jira_instance", + "jira_project_set__jira_instance", ) filter_class = EngagementFilterWithoutObjectLookups if filter_string_matching else EngagementFilter filtered = filter_class( @@ -243,8 +243,8 @@ def engagements_all(request): prods = get_page_items(request, filtered.qs, 25) prods.paginator.count = sum(len(prod.engagement_set.all()) for prod in prods) - name_words = products_with_engagements.values_list('name', flat=True) - eng_words = get_authorized_engagements(Permissions.Engagement_View).values_list('name', flat=True).distinct() + name_words = products_with_engagements.values_list("name", flat=True) + eng_words = get_authorized_engagements(Permissions.Engagement_View).values_list("name", flat=True).distinct() add_breadcrumb( title="All Engagements", @@ -252,15 +252,15 @@ def engagements_all(request): request=request) return render( - request, 'dojo/engagements_all.html', { - 'products': prods, - 'filter_form': filtered.form, - 'name_words': sorted(set(name_words)), - 'eng_words': sorted(set(eng_words)), + request, "dojo/engagements_all.html", { + "products": prods, + "filter_form": filtered.form, + "name_words": sorted(set(name_words)), + "eng_words": sorted(set(eng_words)), }) -@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid') +@user_is_authorized(Engagement, Permissions.Engagement_Edit, "eid") def edit_engagement(request, eid): engagement = Engagement.objects.get(pk=eid) is_ci_cd = engagement.engagement_type == "CI/CD" @@ -268,14 +268,14 @@ def edit_engagement(request, eid): jira_epic_form = None jira_project = None - if request.method == 'POST': + if request.method == "POST": form = EngForm(request.POST, instance=engagement, cicd=is_ci_cd, product=engagement.product, user=request.user) jira_project = jira_helper.get_jira_project(engagement, use_inheritance=False) if form.is_valid(): # first save engagement details - new_status = form.cleaned_data.get('status') - engagement.product = form.cleaned_data.get('product') + new_status = form.cleaned_data.get("status") + engagement.product = form.cleaned_data.get("product") engagement = form.save(commit=False) if (new_status == "Cancelled" or new_status == "Completed"): engagement.active = False @@ -287,80 +287,80 @@ def edit_engagement(request, eid): messages.add_message( request, messages.SUCCESS, - 'Engagement updated successfully.', - extra_tags='alert-success') + "Engagement updated successfully.", + extra_tags="alert-success") - success, jira_project_form = jira_helper.process_jira_project_form(request, instance=jira_project, target='engagement', engagement=engagement, product=engagement.product) + success, jira_project_form = jira_helper.process_jira_project_form(request, instance=jira_project, target="engagement", engagement=engagement, product=engagement.product) error = not success success, jira_epic_form = jira_helper.process_jira_epic_form(request, engagement=engagement) error = error or not success if not error: - if '_Add Tests' in request.POST: + if "_Add Tests" in request.POST: return HttpResponseRedirect( - reverse('add_tests', args=(engagement.id, ))) + reverse("add_tests", args=(engagement.id, ))) else: return HttpResponseRedirect( - reverse('view_engagement', args=(engagement.id, ))) + reverse("view_engagement", args=(engagement.id, ))) else: logger.debug(form.errors) else: - form = EngForm(initial={'product': engagement.product}, instance=engagement, cicd=is_ci_cd, product=engagement.product, user=request.user) + form = EngForm(initial={"product": engagement.product}, instance=engagement, cicd=is_ci_cd, product=engagement.product, user=request.user) jira_epic_form = None - if get_system_setting('enable_jira'): + if get_system_setting("enable_jira"): jira_project = jira_helper.get_jira_project(engagement, use_inheritance=False) - jira_project_form = JIRAProjectForm(instance=jira_project, target='engagement', product=engagement.product) - logger.debug('showing jira-epic-form') + jira_project_form = JIRAProjectForm(instance=jira_project, target="engagement", product=engagement.product) + logger.debug("showing jira-epic-form") jira_epic_form = JIRAEngagementForm(instance=engagement) if is_ci_cd: - title = 'Edit CI/CD Engagement' + title = "Edit CI/CD Engagement" else: - title = 'Edit Interactive Engagement' + title = "Edit Interactive Engagement" product_tab = Product_Tab(engagement.product, title=title, tab="engagements") product_tab.setEngagement(engagement) - return render(request, 'dojo/new_eng.html', { - 'product_tab': product_tab, - 'title': title, - 'form': form, - 'edit': True, - 'jira_epic_form': jira_epic_form, - 'jira_project_form': jira_project_form, - 'engagement': engagement, + return render(request, "dojo/new_eng.html", { + "product_tab": product_tab, + "title": title, + "form": form, + "edit": True, + "jira_epic_form": jira_epic_form, + "jira_project_form": jira_project_form, + "engagement": engagement, }) -@user_is_authorized(Engagement, Permissions.Engagement_Delete, 'eid') +@user_is_authorized(Engagement, Permissions.Engagement_Delete, "eid") def delete_engagement(request, eid): engagement = get_object_or_404(Engagement, pk=eid) product = engagement.product form = DeleteEngagementForm(instance=engagement) - if request.method == 'POST': - if 'id' in request.POST and str(engagement.id) == request.POST['id']: + if request.method == "POST": + if "id" in request.POST and str(engagement.id) == request.POST["id"]: form = DeleteEngagementForm(request.POST, instance=engagement) if form.is_valid(): product = engagement.product if get_setting("ASYNC_OBJECT_DELETE"): async_del = async_delete() async_del.delete(engagement) - message = 'Engagement and relationships will be removed in the background.' + message = "Engagement and relationships will be removed in the background." else: - message = 'Engagement and relationships removed.' + message = "Engagement and relationships removed." engagement.delete() messages.add_message( request, messages.SUCCESS, message, - extra_tags='alert-success') + extra_tags="alert-success") return HttpResponseRedirect(reverse("view_engagements", args=(product.id, ))) - rels = ['Previewing the relationships has been disabled.', ''] - display_preview = get_setting('DELETE_PREVIEW') + rels = ["Previewing the relationships has been disabled.", ""] + display_preview = get_setting("DELETE_PREVIEW") if display_preview: collector = NestedObjects(using=DEFAULT_DB_ALIAS) collector.collect([engagement]) @@ -368,21 +368,21 @@ def delete_engagement(request, eid): product_tab = Product_Tab(product, title="Delete Engagement", tab="engagements") product_tab.setEngagement(engagement) - return render(request, 'dojo/delete_engagement.html', { - 'product_tab': product_tab, - 'engagement': engagement, - 'form': form, - 'rels': rels, + return render(request, "dojo/delete_engagement.html", { + "product_tab": product_tab, + "engagement": engagement, + "form": form, + "rels": rels, }) -@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid') +@user_is_authorized(Engagement, Permissions.Engagement_Edit, "eid") def copy_engagement(request, eid): engagement = get_object_or_404(Engagement, id=eid) product = engagement.product form = DoneForm() - if request.method == 'POST': + if request.method == "POST": form = DoneForm(request.POST) if form.is_valid(): engagement_copy = engagement.copy() @@ -390,13 +390,13 @@ def copy_engagement(request, eid): messages.add_message( request, messages.SUCCESS, - 'Engagement Copied successfully.', - extra_tags='alert-success') - create_notification(event='engagement_copied', # TODO - if 'copy' functionality will be supported by API as well, 'create_notification' needs to be migrated to place where it will be able to cover actions from both interfaces - title=_('Copying of %s') % engagement.name, + "Engagement Copied successfully.", + extra_tags="alert-success") + create_notification(event="engagement_copied", # TODO - if 'copy' functionality will be supported by API as well, 'create_notification' needs to be migrated to place where it will be able to cover actions from both interfaces + title=_("Copying of %s") % engagement.name, description=f'The engagement "{engagement.name}" was copied by {request.user}', product=product, - url=request.build_absolute_uri(reverse('view_engagement', args=(engagement_copy.id, ))), + url=request.build_absolute_uri(reverse("view_engagement", args=(engagement_copy.id, ))), recipients=[engagement.lead], icon="exclamation-triangle") return redirect_to_return_url_or_else(request, reverse("view_engagements", args=(product.id, ))) @@ -404,26 +404,26 @@ def copy_engagement(request, eid): messages.add_message( request, messages.ERROR, - 'Unable to copy engagement, please try again.', - extra_tags='alert-danger') + "Unable to copy engagement, please try again.", + extra_tags="alert-danger") product_tab = Product_Tab(product, title="Copy Engagement", tab="engagements") - return render(request, 'dojo/copy_object.html', { - 'source': engagement, - 'source_label': 'Engagement', - 'destination_label': 'Product', - 'product_tab': product_tab, - 'form': form, + return render(request, "dojo/copy_object.html", { + "source": engagement, + "source_label": "Engagement", + "destination_label": "Product", + "product_tab": product_tab, + "form": form, }) class ViewEngagement(View): def get_template(self): - return 'dojo/view_eng.html' + return "dojo/view_eng.html" def get_risks_accepted(self, eng): - risks_accepted = eng.risk_acceptance.all().select_related('owner').annotate(accepted_findings_count=Count('accepted_findings__id')) + risks_accepted = eng.risk_acceptance.all().select_related("owner").annotate(accepted_findings_count=Count("accepted_findings__id")) return risks_accepted def get_filtered_tests( @@ -438,7 +438,7 @@ def get_filtered_tests( def get(self, request, eid, *args, **kwargs): eng = get_object_or_404(Engagement, id=eid) - tests = eng.test_set.all().order_by('test_type__name', '-updated') + tests = eng.test_set.all().order_by("test_type__name", "-updated") default_page_num = 10 tests_filter = self.get_filtered_tests(request, tests, eng) paged_tests = get_page_items(request, tests_filter.qs, default_page_num) @@ -471,9 +471,9 @@ def get(self, request, eid, *args, **kwargs): form = NoteForm() creds = Cred_Mapping.objects.filter( - product=eng.product).select_related('cred_id').order_by('cred_id') + product=eng.product).select_related("cred_id").order_by("cred_id") cred_eng = Cred_Mapping.objects.filter( - engagement=eng.id).select_related('cred_id').order_by('cred_id') + engagement=eng.id).select_related("cred_id").order_by("cred_id") add_breadcrumb(parent=eng, top_level=False, request=request) @@ -484,28 +484,28 @@ def get(self, request, eid, *args, **kwargs): product_tab.setEngagement(eng) return render( request, self.get_template(), { - 'eng': eng, - 'product_tab': product_tab, - 'system_settings': system_settings, - 'tests': paged_tests, - 'filter': tests_filter, - 'check': check, - 'threat': eng.tmodel_path, - 'form': form, - 'notes': notes, - 'files': files, - 'risks_accepted': risks_accepted, - 'jissue': jissue, - 'jira_project': jira_project, - 'creds': creds, - 'cred_eng': cred_eng, - 'network': network, - 'preset_test_type': preset_test_type, + "eng": eng, + "product_tab": product_tab, + "system_settings": system_settings, + "tests": paged_tests, + "filter": tests_filter, + "check": check, + "threat": eng.tmodel_path, + "form": form, + "notes": notes, + "files": files, + "risks_accepted": risks_accepted, + "jissue": jissue, + "jira_project": jira_project, + "creds": creds, + "cred_eng": cred_eng, + "network": network, + "preset_test_type": preset_test_type, }) def post(self, request, eid, *args, **kwargs): eng = get_object_or_404(Engagement, id=eid) - tests = eng.test_set.all().order_by('test_type__name', '-updated') + tests = eng.test_set.all().order_by("test_type__name", "-updated") default_page_num = 10 @@ -537,7 +537,7 @@ def post(self, request, eid, *args, **kwargs): form = DoneForm() files = eng.files.all() user_has_permission_or_403(request.user, eng, Permissions.Note_Add) - eng.progress = 'check_list' + eng.progress = "check_list" eng.save() if note_type_activation: @@ -557,12 +557,12 @@ def post(self, request, eid, *args, **kwargs): title = f"Engagement: {eng.name} on {eng.product.name}" messages.add_message(request, messages.SUCCESS, - 'Note added successfully.', - extra_tags='alert-success') + "Note added successfully.", + extra_tags="alert-success") creds = Cred_Mapping.objects.filter( - product=eng.product).select_related('cred_id').order_by('cred_id') + product=eng.product).select_related("cred_id").order_by("cred_id") cred_eng = Cred_Mapping.objects.filter( - engagement=eng.id).select_related('cred_id').order_by('cred_id') + engagement=eng.id).select_related("cred_id").order_by("cred_id") add_breadcrumb(parent=eng, top_level=False, request=request) @@ -573,23 +573,23 @@ def post(self, request, eid, *args, **kwargs): product_tab.setEngagement(eng) return render( request, self.get_template(), { - 'eng': eng, - 'product_tab': product_tab, - 'system_settings': system_settings, - 'tests': paged_tests, - 'filter': tests_filter, - 'check': check, - 'threat': eng.tmodel_path, - 'form': form, - 'notes': notes, - 'files': files, - 'risks_accepted': risks_accepted, - 'jissue': jissue, - 'jira_project': jira_project, - 'creds': creds, - 'cred_eng': cred_eng, - 'network': network, - 'preset_test_type': preset_test_type, + "eng": eng, + "product_tab": product_tab, + "system_settings": system_settings, + "tests": paged_tests, + "filter": tests_filter, + "check": check, + "threat": eng.tmodel_path, + "form": form, + "notes": notes, + "files": files, + "risks_accepted": risks_accepted, + "jissue": jissue, + "jira_project": jira_project, + "creds": creds, + "cred_eng": cred_eng, + "network": network, + "preset_test_type": preset_test_type, }) @@ -598,40 +598,40 @@ def prefetch_for_view_tests(tests): if isinstance(tests, QuerySet): # old code can arrive here with prods being a list because the query was already executed - prefetched = prefetched.select_related('lead') - prefetched = prefetched.prefetch_related('tags', 'test_type', 'notes') - prefetched = prefetched.annotate(count_findings_test_all=Count('finding__id', distinct=True)) - prefetched = prefetched.annotate(count_findings_test_active=Count('finding__id', filter=Q(finding__active=True), distinct=True)) - prefetched = prefetched.annotate(count_findings_test_active_verified=Count('finding__id', filter=Q(finding__active=True) & Q(finding__verified=True), distinct=True)) - prefetched = prefetched.annotate(count_findings_test_mitigated=Count('finding__id', filter=Q(finding__is_mitigated=True), distinct=True)) - prefetched = prefetched.annotate(count_findings_test_dups=Count('finding__id', filter=Q(finding__duplicate=True), distinct=True)) - prefetched = prefetched.annotate(total_reimport_count=Count('test_import__id', filter=Q(test_import__type=Test_Import.REIMPORT_TYPE), distinct=True)) + prefetched = prefetched.select_related("lead") + prefetched = prefetched.prefetch_related("tags", "test_type", "notes") + prefetched = prefetched.annotate(count_findings_test_all=Count("finding__id", distinct=True)) + prefetched = prefetched.annotate(count_findings_test_active=Count("finding__id", filter=Q(finding__active=True), distinct=True)) + prefetched = prefetched.annotate(count_findings_test_active_verified=Count("finding__id", filter=Q(finding__active=True) & Q(finding__verified=True), distinct=True)) + prefetched = prefetched.annotate(count_findings_test_mitigated=Count("finding__id", filter=Q(finding__is_mitigated=True), distinct=True)) + prefetched = prefetched.annotate(count_findings_test_dups=Count("finding__id", filter=Q(finding__duplicate=True), distinct=True)) + prefetched = prefetched.annotate(total_reimport_count=Count("test_import__id", filter=Q(test_import__type=Test_Import.REIMPORT_TYPE), distinct=True)) else: - logger.warning('unable to prefetch because query was already executed') + logger.warning("unable to prefetch because query was already executed") return prefetched -@user_is_authorized(Engagement, Permissions.Test_Add, 'eid') +@user_is_authorized(Engagement, Permissions.Test_Add, "eid") def add_tests(request, eid): eng = Engagement.objects.get(id=eid) cred_form = CredMappingForm() cred_form.fields["cred_user"].queryset = Cred_Mapping.objects.filter( - engagement=eng).order_by('cred_id') + engagement=eng).order_by("cred_id") - if request.method == 'POST': + if request.method == "POST": form = TestForm(request.POST, engagement=eng) cred_form = CredMappingForm(request.POST) cred_form.fields["cred_user"].queryset = Cred_Mapping.objects.filter( - engagement=eng).order_by('cred_id') + engagement=eng).order_by("cred_id") if form.is_valid(): new_test = form.save(commit=False) # set default scan_type as it's used in reimport new_test.scan_type = new_test.test_type.name new_test.engagement = eng try: - new_test.lead = User.objects.get(id=form['lead'].value()) + new_test.lead = User.objects.get(id=form["lead"].value()) except: new_test.lead = None @@ -644,10 +644,10 @@ def add_tests(request, eid): # Save the credential to the test if cred_form.is_valid(): - if cred_form.cleaned_data['cred_user']: + if cred_form.cleaned_data["cred_user"]: # Select the credential mapping object from the selected list and only allow if the credential is associated with the product cred_user = Cred_Mapping.objects.filter( - pk=cred_form.cleaned_data['cred_user'].id, + pk=cred_form.cleaned_data["cred_user"].id, engagement=eid).first() new_f = cred_form.save(commit=False) @@ -658,35 +658,35 @@ def add_tests(request, eid): messages.add_message( request, messages.SUCCESS, - 'Test added successfully.', - extra_tags='alert-success') + "Test added successfully.", + extra_tags="alert-success") notifications_helper.notify_test_created(new_test) - if '_Add Another Test' in request.POST: + if "_Add Another Test" in request.POST: return HttpResponseRedirect( - reverse('add_tests', args=(eng.id, ))) - elif '_Add Findings' in request.POST: + reverse("add_tests", args=(eng.id, ))) + elif "_Add Findings" in request.POST: return HttpResponseRedirect( - reverse('add_findings', args=(new_test.id, ))) - elif '_Finished' in request.POST: + reverse("add_findings", args=(new_test.id, ))) + elif "_Finished" in request.POST: return HttpResponseRedirect( - reverse('view_engagement', args=(eng.id, ))) + reverse("view_engagement", args=(eng.id, ))) else: form = TestForm(engagement=eng) - form.initial['target_start'] = eng.target_start - form.initial['target_end'] = eng.target_end - form.initial['lead'] = request.user + form.initial["target_start"] = eng.target_start + form.initial["target_end"] = eng.target_end + form.initial["lead"] = request.user add_breadcrumb( parent=eng, title="Add Tests", top_level=False, request=request) product_tab = Product_Tab(eng.product, title="Add Tests", tab="engagements") product_tab.setEngagement(eng) - return render(request, 'dojo/add_tests.html', { - 'product_tab': product_tab, - 'form': form, - 'cred_form': cred_form, - 'eid': eid, - 'eng': eng, + return render(request, "dojo/add_tests.html", { + "product_tab": product_tab, + "form": form, + "cred_form": cred_form, + "eid": eid, + "eng": eng, }) @@ -727,7 +727,7 @@ def get_engagement_or_product( product = get_object_or_404(Product, id=product_id) engagement_or_product = product else: - msg = 'Either Engagement or Product has to be provided' + msg = "Either Engagement or Product has to be provided" raise Exception(msg) # Ensure the supplied user has access to import to the engagement or product user_has_permission_or_403(user, engagement_or_product, Permissions.Import_Scan_Result) @@ -768,7 +768,7 @@ def get_credential_form( initial={ "cred_user_queryset": Cred_Mapping.objects.filter( engagement=engagement, - ).order_by('cred_id'), + ).order_by("cred_id"), }, ) @@ -790,12 +790,12 @@ def get_jira_form( jira_form = JIRAImportScanForm( request.POST, push_all=push_all_jira_issues, - prefix='jiraform', + prefix="jiraform", ) else: jira_form = JIRAImportScanForm( push_all=push_all_jira_issues, - prefix='jiraform', + prefix="jiraform", ) return jira_form, push_all_jira_issues @@ -905,7 +905,7 @@ def create_engagement( target_start=timezone.now().date(), target_end=timezone.now().date(), product=context.get("product"), - status='In Progress', + status="In Progress", version=context.get("version"), branch_tag=context.get("branch_tag"), build_id=context.get("build_id"), @@ -974,23 +974,23 @@ def process_form( self.create_engagement(context) # close_old_findings_product_scope is a modifier of close_old_findings. # If it is selected, close_old_findings should also be selected. - if close_old_findings_product_scope := form.cleaned_data.get('close_old_findings_product_scope', None): + if close_old_findings_product_scope := form.cleaned_data.get("close_old_findings_product_scope", None): context["close_old_findings_product_scope"] = close_old_findings_product_scope context["close_old_findings"] = True # Save newly added endpoints added_endpoints = save_endpoints_to_add(form.endpoints_to_add_list, context.get("engagement").product) - endpoints_from_form = list(form.cleaned_data['endpoints']) + endpoints_from_form = list(form.cleaned_data["endpoints"]) context["endpoints_to_add"] = endpoints_from_form + added_endpoints # Override the form values of active and verified - if activeChoice := form.cleaned_data.get('active', None): - if activeChoice == 'force_to_true': + if activeChoice := form.cleaned_data.get("active", None): + if activeChoice == "force_to_true": context["active"] = True - elif activeChoice == 'force_to_false': + elif activeChoice == "force_to_false": context["active"] = False - if verifiedChoice := form.cleaned_data.get('verified', None): - if verifiedChoice == 'force_to_true': + if verifiedChoice := form.cleaned_data.get("verified", None): + if verifiedChoice == "force_to_true": context["verified"] = True - elif verifiedChoice == 'force_to_false': + elif verifiedChoice == "force_to_false": context["verified"] = False return None @@ -1019,7 +1019,7 @@ def process_credentials_form( """ Process the credentials form by creating """ - if cred_user := form.cleaned_data['cred_user']: + if cred_user := form.cleaned_data["cred_user"]: # Select the credential mapping object from the selected list and only allow if the credential is associated with the product cred_user = Cred_Mapping.objects.filter( pk=cred_user.id, @@ -1111,27 +1111,27 @@ def post( return self.success_redirect(context) -@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid') +@user_is_authorized(Engagement, Permissions.Engagement_Edit, "eid") def close_eng(request, eid): eng = Engagement.objects.get(id=eid) close_engagement(eng) messages.add_message( request, messages.SUCCESS, - 'Engagement closed successfully.', - extra_tags='alert-success') + "Engagement closed successfully.", + extra_tags="alert-success") return HttpResponseRedirect(reverse("view_engagements", args=(eng.product.id, ))) -@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid') +@user_is_authorized(Engagement, Permissions.Engagement_Edit, "eid") def reopen_eng(request, eid): eng = Engagement.objects.get(id=eid) reopen_engagement(eng) messages.add_message( request, messages.SUCCESS, - 'Engagement reopened successfully.', - extra_tags='alert-success') + "Engagement reopened successfully.", + extra_tags="alert-success") return HttpResponseRedirect(reverse("view_engagements", args=(eng.product.id, ))) @@ -1142,7 +1142,7 @@ def reopen_eng(request, eid): """ -@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid') +@user_is_authorized(Engagement, Permissions.Engagement_Edit, "eid") def complete_checklist(request, eid): eng = get_object_or_404(Engagement, id=eid) try: @@ -1155,7 +1155,7 @@ def complete_checklist(request, eid): title="Complete checklist", top_level=False, request=request) - if request.method == 'POST': + if request.method == "POST": tests = Test.objects.filter(engagement=eng) findings = Finding.objects.filter(test__in=tests).all() form = CheckForm(request.POST, instance=checklist, findings=findings) @@ -1173,10 +1173,10 @@ def complete_checklist(request, eid): messages.add_message( request, messages.SUCCESS, - 'Checklist saved.', - extra_tags='alert-success') + "Checklist saved.", + extra_tags="alert-success") return HttpResponseRedirect( - reverse('view_engagement', args=(eid, ))) + reverse("view_engagement", args=(eid, ))) else: tests = Test.objects.filter(engagement=eng) findings = Finding.objects.filter(test__in=tests).all() @@ -1184,15 +1184,15 @@ def complete_checklist(request, eid): product_tab = Product_Tab(eng.product, title="Checklist", tab="engagements") product_tab.setEngagement(eng) - return render(request, 'dojo/checklist.html', { - 'form': form, - 'product_tab': product_tab, - 'eid': eng.id, - 'findings': findings, + return render(request, "dojo/checklist.html", { + "form": form, + "product_tab": product_tab, + "eid": eng.id, + "findings": findings, }) -@user_is_authorized(Engagement, Permissions.Risk_Acceptance, 'eid') +@user_is_authorized(Engagement, Permissions.Risk_Acceptance, "eid") def add_risk_acceptance(request, eid, fid=None): eng = get_object_or_404(Engagement, id=eid) finding = None @@ -1202,19 +1202,19 @@ def add_risk_acceptance(request, eid, fid=None): if not eng.product.enable_full_risk_acceptance: raise PermissionDenied - if request.method == 'POST': + if request.method == "POST": form = RiskAcceptanceForm(request.POST, request.FILES) if form.is_valid(): # first capture notes param as it cannot be saved directly as m2m notes = None - if form.cleaned_data['notes']: + if form.cleaned_data["notes"]: notes = Notes( - entry=form.cleaned_data['notes'], + entry=form.cleaned_data["notes"], author=request.user, date=timezone.now()) notes.save() - del form.cleaned_data['notes'] + del form.cleaned_data["notes"] try: # we sometimes see a weird exception here, but are unable to reproduce. @@ -1232,42 +1232,42 @@ def add_risk_acceptance(request, eid, fid=None): eng.risk_acceptance.add(risk_acceptance) - findings = form.cleaned_data['accepted_findings'] + findings = form.cleaned_data["accepted_findings"] risk_acceptance = ra_helper.add_findings_to_risk_acceptance(risk_acceptance, findings) messages.add_message( request, messages.SUCCESS, - 'Risk acceptance saved.', - extra_tags='alert-success') + "Risk acceptance saved.", + extra_tags="alert-success") - return redirect_to_return_url_or_else(request, reverse('view_engagement', args=(eid, ))) + return redirect_to_return_url_or_else(request, reverse("view_engagement", args=(eid, ))) else: - risk_acceptance_title_suggestion = f'Accept: {finding}' - form = RiskAcceptanceForm(initial={'owner': request.user, 'name': risk_acceptance_title_suggestion}) + risk_acceptance_title_suggestion = f"Accept: {finding}" + form = RiskAcceptanceForm(initial={"owner": request.user, "name": risk_acceptance_title_suggestion}) - finding_choices = Finding.objects.filter(duplicate=False, test__engagement=eng).filter(NOT_ACCEPTED_FINDINGS_QUERY).order_by('title') + finding_choices = Finding.objects.filter(duplicate=False, test__engagement=eng).filter(NOT_ACCEPTED_FINDINGS_QUERY).order_by("title") - form.fields['accepted_findings'].queryset = finding_choices + form.fields["accepted_findings"].queryset = finding_choices if fid: - form.fields['accepted_findings'].initial = {fid} + form.fields["accepted_findings"].initial = {fid} product_tab = Product_Tab(eng.product, title="Risk Acceptance", tab="engagements") product_tab.setEngagement(eng) - return render(request, 'dojo/add_risk_acceptance.html', { - 'eng': eng, - 'product_tab': product_tab, - 'form': form, + return render(request, "dojo/add_risk_acceptance.html", { + "eng": eng, + "product_tab": product_tab, + "form": form, }) -@user_is_authorized(Engagement, Permissions.Engagement_View, 'eid') +@user_is_authorized(Engagement, Permissions.Engagement_View, "eid") def view_risk_acceptance(request, eid, raid): return view_edit_risk_acceptance(request, eid=eid, raid=raid, edit_mode=False) -@user_is_authorized(Engagement, Permissions.Risk_Acceptance, 'eid') +@user_is_authorized(Engagement, Permissions.Risk_Acceptance, "eid") def edit_risk_acceptance(request, eid, raid): return view_edit_risk_acceptance(request, eid=eid, raid=raid, edit_mode=True) @@ -1283,13 +1283,13 @@ def view_edit_risk_acceptance(request, eid, raid, edit_mode=False): risk_acceptance_form = None errors = False - if request.method == 'POST': + if request.method == "POST": # deleting before instantiating the form otherwise django messes up and we end up with an empty path value if len(request.FILES) > 0: - logger.debug('new proof uploaded') + logger.debug("new proof uploaded") risk_acceptance.path.delete() - if 'decision' in request.POST: + if "decision" in request.POST: old_expiration_date = risk_acceptance.expiration_date risk_acceptance_form = EditRiskAcceptanceForm(request.POST, request.FILES, instance=risk_acceptance) errors = errors or not risk_acceptance_form.is_valid() @@ -1305,10 +1305,10 @@ def view_edit_risk_acceptance(request, eid, raid, edit_mode=False): messages.add_message( request, messages.SUCCESS, - 'Risk Acceptance saved successfully.', - extra_tags='alert-success') + "Risk Acceptance saved successfully.", + extra_tags="alert-success") - if 'entry' in request.POST: + if "entry" in request.POST: note_form = NoteForm(request.POST) errors = errors or not note_form.is_valid() if not errors: @@ -1320,39 +1320,39 @@ def view_edit_risk_acceptance(request, eid, raid, edit_mode=False): messages.add_message( request, messages.SUCCESS, - 'Note added successfully.', - extra_tags='alert-success') + "Note added successfully.", + extra_tags="alert-success") - if 'delete_note' in request.POST: - note = get_object_or_404(Notes, pk=request.POST['delete_note_id']) + if "delete_note" in request.POST: + note = get_object_or_404(Notes, pk=request.POST["delete_note_id"]) if note.author.username == request.user.username: risk_acceptance.notes.remove(note) note.delete() messages.add_message( request, messages.SUCCESS, - 'Note deleted successfully.', - extra_tags='alert-success') + "Note deleted successfully.", + extra_tags="alert-success") else: messages.add_message( request, messages.ERROR, "Since you are not the note's author, it was not deleted.", - extra_tags='alert-danger') + extra_tags="alert-danger") - if 'remove_finding' in request.POST: + if "remove_finding" in request.POST: finding = get_object_or_404( - Finding, pk=request.POST['remove_finding_id']) + Finding, pk=request.POST["remove_finding_id"]) ra_helper.remove_finding_from_risk_acceptance(risk_acceptance, finding) messages.add_message( request, messages.SUCCESS, - 'Finding removed successfully from risk acceptance.', - extra_tags='alert-success') + "Finding removed successfully from risk acceptance.", + extra_tags="alert-success") - if 'replace_file' in request.POST: + if "replace_file" in request.POST: replace_form = ReplaceRiskAcceptanceProofForm( request.POST, request.FILES, instance=risk_acceptance) @@ -1363,17 +1363,17 @@ def view_edit_risk_acceptance(request, eid, raid, edit_mode=False): messages.add_message( request, messages.SUCCESS, - 'New Proof uploaded successfully.', - extra_tags='alert-success') + "New Proof uploaded successfully.", + extra_tags="alert-success") else: logger.error(replace_form.errors) - if 'add_findings' in request.POST: + if "add_findings" in request.POST: add_findings_form = AddFindingsRiskAcceptanceForm( request.POST, request.FILES, instance=risk_acceptance) errors = errors or not add_findings_form.is_valid() if not errors: - findings = add_findings_form.cleaned_data['accepted_findings'] + findings = add_findings_form.cleaned_data["accepted_findings"] ra_helper.add_findings_to_risk_acceptance(risk_acceptance, findings) @@ -1381,12 +1381,12 @@ def view_edit_risk_acceptance(request, eid, raid, edit_mode=False): request, messages.SUCCESS, f"Finding{'s' if len(findings) > 1 else ''} added successfully.", - extra_tags='alert-success') + extra_tags="alert-success") if not errors: - logger.debug('redirecting to return_url') + logger.debug("redirecting to return_url") return redirect_to_return_url_or_else(request, reverse("view_risk_acceptance", args=(eid, raid))) else: - logger.error('errors found') + logger.error("errors found") else: if edit_mode: @@ -1396,12 +1396,12 @@ def view_edit_risk_acceptance(request, eid, raid, edit_mode=False): replace_form = ReplaceRiskAcceptanceProofForm(instance=risk_acceptance) add_findings_form = AddFindingsRiskAcceptanceForm(instance=risk_acceptance) - accepted_findings = risk_acceptance.accepted_findings.order_by('numerical_severity') + accepted_findings = risk_acceptance.accepted_findings.order_by("numerical_severity") fpage = get_page_items(request, accepted_findings, 15) unaccepted_findings = Finding.objects.filter(test__in=eng.test_set.all(), risk_accepted=False) \ .exclude(id__in=accepted_findings).order_by("title") - add_fpage = get_page_items(request, unaccepted_findings, 25, 'apage') + add_fpage = get_page_items(request, unaccepted_findings, 25, "apage") # on this page we need to add unaccepted findings as possible findings to add as accepted add_findings_form.fields[ @@ -1414,26 +1414,26 @@ def view_edit_risk_acceptance(request, eid, raid, edit_mode=False): product_tab = Product_Tab(eng.product, title="Risk Acceptance", tab="engagements") product_tab.setEngagement(eng) return render( - request, 'dojo/view_risk_acceptance.html', { - 'risk_acceptance': risk_acceptance, - 'engagement': eng, - 'product_tab': product_tab, - 'accepted_findings': fpage, - 'notes': risk_acceptance.notes.all(), - 'eng': eng, - 'edit_mode': edit_mode, - 'risk_acceptance_form': risk_acceptance_form, - 'note_form': note_form, - 'replace_form': replace_form, - 'add_findings_form': add_findings_form, + request, "dojo/view_risk_acceptance.html", { + "risk_acceptance": risk_acceptance, + "engagement": eng, + "product_tab": product_tab, + "accepted_findings": fpage, + "notes": risk_acceptance.notes.all(), + "eng": eng, + "edit_mode": edit_mode, + "risk_acceptance_form": risk_acceptance_form, + "note_form": note_form, + "replace_form": replace_form, + "add_findings_form": add_findings_form, # 'show_add_findings_form': len(unaccepted_findings), - 'request': request, - 'add_findings': add_fpage, - 'return_url': get_return_url(request), + "request": request, + "add_findings": add_fpage, + "return_url": get_return_url(request), }) -@user_is_authorized(Engagement, Permissions.Risk_Acceptance, 'eid') +@user_is_authorized(Engagement, Permissions.Risk_Acceptance, "eid") def expire_risk_acceptance(request, eid, raid): risk_acceptance = get_object_or_404(prefetch_for_expiration(Risk_Acceptance.objects.all()), pk=raid) # Validate the engagement ID exists before moving forward @@ -1444,7 +1444,7 @@ def expire_risk_acceptance(request, eid, raid): return redirect_to_return_url_or_else(request, reverse("view_risk_acceptance", args=(eid, raid))) -@user_is_authorized(Engagement, Permissions.Risk_Acceptance, 'eid') +@user_is_authorized(Engagement, Permissions.Risk_Acceptance, "eid") def reinstate_risk_acceptance(request, eid, raid): risk_acceptance = get_object_or_404(prefetch_for_expiration(Risk_Acceptance.objects.all()), pk=raid) eng = get_object_or_404(Engagement, pk=eid) @@ -1457,7 +1457,7 @@ def reinstate_risk_acceptance(request, eid, raid): return redirect_to_return_url_or_else(request, reverse("view_risk_acceptance", args=(eid, raid))) -@user_is_authorized(Engagement, Permissions.Risk_Acceptance, 'eid') +@user_is_authorized(Engagement, Permissions.Risk_Acceptance, "eid") def delete_risk_acceptance(request, eid, raid): risk_acceptance = get_object_or_404(Risk_Acceptance, pk=raid) eng = get_object_or_404(Engagement, pk=eid) @@ -1467,12 +1467,12 @@ def delete_risk_acceptance(request, eid, raid): messages.add_message( request, messages.SUCCESS, - 'Risk acceptance deleted successfully.', - extra_tags='alert-success') + "Risk acceptance deleted successfully.", + extra_tags="alert-success") return HttpResponseRedirect(reverse("view_engagement", args=(eng.id, ))) -@user_is_authorized(Engagement, Permissions.Engagement_View, 'eid') +@user_is_authorized(Engagement, Permissions.Engagement_View, "eid") def download_risk_acceptance(request, eid, raid): import mimetypes @@ -1482,10 +1482,10 @@ def download_risk_acceptance(request, eid, raid): response = StreamingHttpResponse( FileIterWrapper( - open(settings.MEDIA_ROOT + "/" + risk_acceptance.path.name, mode='rb'))) - response['Content-Disposition'] = f'attachment; filename="{risk_acceptance.filename()}"' + open(settings.MEDIA_ROOT + "/" + risk_acceptance.path.name, mode="rb"))) + response["Content-Disposition"] = f'attachment; filename="{risk_acceptance.filename()}"' mimetype, _encoding = mimetypes.guess_type(risk_acceptance.path.name) - response['Content-Type'] = mimetype + response["Content-Type"] = mimetype return response @@ -1497,7 +1497,7 @@ def download_risk_acceptance(request, eid, raid): """ -@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid') +@user_is_authorized(Engagement, Permissions.Engagement_Edit, "eid") def upload_threatmodel(request, eid): eng = Engagement.objects.get(id=eid) add_breadcrumb( @@ -1506,38 +1506,38 @@ def upload_threatmodel(request, eid): top_level=False, request=request) - if request.method == 'POST': + if request.method == "POST": form = UploadThreatForm(request.POST, request.FILES) if form.is_valid(): - handle_uploaded_threat(request.FILES['file'], eng) - eng.progress = 'other' + handle_uploaded_threat(request.FILES["file"], eng) + eng.progress = "other" eng.threat_model = True eng.save() messages.add_message( request, messages.SUCCESS, - 'Threat model saved.', - extra_tags='alert-success') + "Threat model saved.", + extra_tags="alert-success") return HttpResponseRedirect( - reverse('view_engagement', args=(eid, ))) + reverse("view_engagement", args=(eid, ))) else: form = UploadThreatForm() product_tab = Product_Tab(eng.product, title="Upload Threat Model", tab="engagements") - return render(request, 'dojo/up_threat.html', { - 'form': form, - 'product_tab': product_tab, - 'eng': eng, + return render(request, "dojo/up_threat.html", { + "form": form, + "product_tab": product_tab, + "eng": eng, }) -@user_is_authorized(Engagement, Permissions.Engagement_View, 'eid') +@user_is_authorized(Engagement, Permissions.Engagement_View, "eid") def view_threatmodel(request, eid): eng = get_object_or_404(Engagement, pk=eid) - response = FileResponse(open(eng.tmodel_path, 'rb')) + response = FileResponse(open(eng.tmodel_path, "rb")) return response -@user_is_authorized(Engagement, Permissions.Engagement_View, 'eid') +@user_is_authorized(Engagement, Permissions.Engagement_View, "eid") def engagement_ics(request, eid): eng = get_object_or_404(Engagement, id=eid) start_date = datetime.combine(eng.target_start, datetime.min.time()) @@ -1555,8 +1555,8 @@ def engagement_ics(request, eid): ) output = cal.serialize() response = HttpResponse(content=output) - response['Content-Type'] = 'text/calendar' - response['Content-Disposition'] = f'attachment; filename={eng.name}.ics' + response["Content-Type"] = "text/calendar" + response["Content-Disposition"] = f"attachment; filename={eng.name}.ics" return response @@ -1569,26 +1569,26 @@ def get_list_index(list, index): def get_engagements(request): - url = request.META.get('QUERY_STRING') + url = request.META.get("QUERY_STRING") if not url: - msg = 'Please use the export button when exporting engagements' + msg = "Please use the export button when exporting engagements" raise ValidationError(msg) else: - if url.startswith('url='): + if url.startswith("url="): url = url[4:] - path_items = list(filter(None, re.split(r'/|\?', url))) + path_items = list(filter(None, re.split(r"/|\?", url))) - if not path_items or path_items[0] != 'engagement': - msg = 'URL is not an engagement view' + if not path_items or path_items[0] != "engagement": + msg = "URL is not an engagement view" raise ValidationError(msg) view = query = None - if get_list_index(path_items, 1) in ['active', 'all']: + if get_list_index(path_items, 1) in ["active", "all"]: view = get_list_index(path_items, 1) query = get_list_index(path_items, 2) else: - view = 'active' + view = "active" query = get_list_index(path_items, 1) request.GET = QueryDict(query) @@ -1599,19 +1599,19 @@ def get_engagements(request): def get_excludes(): - return ['is_ci_cd', 'jira_issue', 'jira_project', 'objects', 'unaccepted_open_findings'] + return ["is_ci_cd", "jira_issue", "jira_project", "objects", "unaccepted_open_findings"] def get_foreign_keys(): - return ['build_server', 'lead', 'orchestration_engine', 'preset', 'product', - 'report_type', 'requester', 'source_code_management_server'] + return ["build_server", "lead", "orchestration_engine", "preset", "product", + "report_type", "requester", "source_code_management_server"] def csv_export(request): engagements, test_counts = get_engagements(request) - response = HttpResponse(content_type='text/csv') - response['Content-Disposition'] = 'attachment; filename=engagements.csv' + response = HttpResponse(content_type="text/csv") + response["Content-Disposition"] = "attachment; filename=engagements.csv" writer = csv.writer(response) @@ -1620,9 +1620,9 @@ def csv_export(request): if first_row: fields = [] for key in dir(engagement): - if key not in get_excludes() and not callable(getattr(engagement, key)) and not key.startswith('_'): + if key not in get_excludes() and not callable(getattr(engagement, key)) and not key.startswith("_"): fields.append(key) - fields.append('tests') + fields.append("tests") writer.writerow(fields) @@ -1630,12 +1630,12 @@ def csv_export(request): if not first_row: fields = [] for key in dir(engagement): - if key not in get_excludes() and not callable(getattr(engagement, key)) and not key.startswith('_'): + if key not in get_excludes() and not callable(getattr(engagement, key)) and not key.startswith("_"): value = engagement.__dict__.get(key) if key in get_foreign_keys() and getattr(engagement, key): value = str(getattr(engagement, key)) if value and isinstance(value, str): - value = value.replace('\n', ' NEWLINE ').replace('\r', '') + value = value.replace("\n", " NEWLINE ").replace("\r", "") fields.append(value) fields.append(test_counts.get(engagement.id, 0)) @@ -1650,7 +1650,7 @@ def excel_export(request): workbook = Workbook() workbook.iso_dates = True worksheet = workbook.active - worksheet.title = 'Engagements' + worksheet.title = "Engagements" font_bold = Font(bold=True) @@ -1659,17 +1659,17 @@ def excel_export(request): if row_num == 1: col_num = 1 for key in dir(engagement): - if key not in get_excludes() and not callable(getattr(engagement, key)) and not key.startswith('_'): + if key not in get_excludes() and not callable(getattr(engagement, key)) and not key.startswith("_"): cell = worksheet.cell(row=row_num, column=col_num, value=key) cell.font = font_bold col_num += 1 - cell = worksheet.cell(row=row_num, column=col_num, value='tests') + cell = worksheet.cell(row=row_num, column=col_num, value="tests") cell.font = font_bold row_num = 2 if row_num > 1: col_num = 1 for key in dir(engagement): - if key not in get_excludes() and not callable(getattr(engagement, key)) and not key.startswith('_'): + if key not in get_excludes() and not callable(getattr(engagement, key)) and not key.startswith("_"): value = engagement.__dict__.get(key) if key in get_foreign_keys() and getattr(engagement, key): value = str(getattr(engagement, key)) @@ -1687,7 +1687,7 @@ def excel_export(request): response = HttpResponse( content=stream, - content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', + content_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", ) - response['Content-Disposition'] = 'attachment; filename=engagements.xlsx' + response["Content-Disposition"] = "attachment; filename=engagements.xlsx" return response diff --git a/dojo/filters.py b/dojo/filters.py index 6d124d67f0..6c3da70e2b 100644 --- a/dojo/filters.py +++ b/dojo/filters.py @@ -94,30 +94,30 @@ logger = logging.getLogger(__name__) -local_tz = pytz.timezone(get_system_setting('time_zone')) +local_tz = pytz.timezone(get_system_setting("time_zone")) -BOOLEAN_CHOICES = (('false', 'No'), ('true', 'Yes')) +BOOLEAN_CHOICES = (("false", "No"), ("true", "Yes")) EARLIEST_FINDING = None def custom_filter(queryset, name, value): - values = value.split(',') - filter = (f'{name}__in') + values = value.split(",") + filter = (f"{name}__in") return queryset.filter(Q(**{filter: values})) def custom_vulnerability_id_filter(queryset, name, value): - values = value.split(',') + values = value.split(",") ids = Vulnerability_Id.objects \ .filter(vulnerability_id__in=values) \ - .values_list('finding_id', flat=True) + .values_list("finding_id", flat=True) return queryset.filter(id__in=ids) def vulnerability_id_filter(queryset, name, value): ids = Vulnerability_Id.objects \ .filter(vulnerability_id=value) \ - .values_list('finding_id', flat=True) + .values_list("finding_id", flat=True) return queryset.filter(id__in=ids) @@ -163,19 +163,19 @@ def under_review(self, qs, name): return qs.filter(UNDER_REVIEW_QUERY) options = { - None: (_('Any'), any), - 0: (_('Open'), open), - 1: (_('Verified'), verified), - 2: (_('Out Of Scope'), out_of_scope), - 3: (_('False Positive'), false_positive), - 4: (_('Inactive'), inactive), - 5: (_('Risk Accepted'), risk_accepted), - 6: (_('Closed'), closed), - 7: (_('Under Review'), under_review), + None: (_("Any"), any), + 0: (_("Open"), open), + 1: (_("Verified"), verified), + 2: (_("Out Of Scope"), out_of_scope), + 3: (_("False Positive"), false_positive), + 4: (_("Inactive"), inactive), + 5: (_("Risk Accepted"), risk_accepted), + 6: (_("Closed"), closed), + 7: (_("Under Review"), under_review), } def __init__(self, *args, **kwargs): - kwargs['choices'] = [ + kwargs["choices"] = [ (key, value[0]) for key, value in six.iteritems(self.options)] super().__init__(*args, **kwargs) @@ -217,13 +217,13 @@ def sla_violated(self, qs, name): ) options = { - None: (_('Any'), any), - 0: (_('False'), sla_satisfied), - 1: (_('True'), sla_violated), + None: (_("Any"), any), + 0: (_("False"), sla_satisfied), + 1: (_("True"), sla_violated), } def __init__(self, *args, **kwargs): - kwargs['choices'] = [ + kwargs["choices"] = [ (key, value[0]) for key, value in six.iteritems(self.options)] super().__init__(*args, **kwargs) @@ -252,13 +252,13 @@ def sla_violated(self, qs, name): return qs options = { - None: (_('Any'), any), - 0: (_('False'), sla_satisifed), - 1: (_('True'), sla_violated), + None: (_("Any"), any), + 0: (_("False"), sla_satisifed), + 1: (_("True"), sla_violated), } def __init__(self, *args, **kwargs): - kwargs['choices'] = [ + kwargs["choices"] = [ (key, value[0]) for key, value in six.iteritems(self.options)] super().__init__(*args, **kwargs) @@ -275,7 +275,7 @@ def get_earliest_finding(queryset=None): queryset = Finding.objects.all() try: - EARLIEST_FINDING = queryset.earliest('date') + EARLIEST_FINDING = queryset.earliest("date") except (Finding.DoesNotExist, Endpoint_Status.DoesNotExist): EARLIEST_FINDING = None return EARLIEST_FINDING @@ -284,7 +284,7 @@ def get_earliest_finding(queryset=None): def cwe_options(queryset): cwe = {} cwe = dict([cwe, cwe] - for cwe in queryset.order_by().values_list('cwe', flat=True).distinct() + for cwe in queryset.order_by().values_list("cwe", flat=True).distinct() if isinstance(cwe, int) and cwe is not None and cwe > 0) cwe = collections.OrderedDict(sorted(cwe.items())) return list(cwe.items()) @@ -294,10 +294,10 @@ class DojoFilter(FilterSet): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - for field in ['tags', 'test__tags', 'test__engagement__tags', 'test__engagement__product__tags', - 'not_tags', 'not_test__tags', 'not_test__engagement__tags', 'not_test__engagement__product__tags']: + for field in ["tags", "test__tags", "test__engagement__tags", "test__engagement__product__tags", + "not_tags", "not_test__tags", "not_test__engagement__tags", "not_test__engagement__product__tags"]: if field in self.form.fields: - tags_filter = self.filters['tags'] + tags_filter = self.filters["tags"] model = tags_filter.model self.form.fields[field] = model._meta.get_field("tags").formfield() @@ -305,34 +305,34 @@ def __init__(self, *args, **kwargs): # and form.js would then apply select2 multiple times, resulting in duplicated fields # the initialization now happens in filter_js_snippet.html self.form.fields[field].widget.tag_options = \ - self.form.fields[field].widget.tag_options + tagulous.models.options.TagOptions(autocomplete_settings={'width': '200px', 'defer': True}) + self.form.fields[field].widget.tag_options + tagulous.models.options.TagOptions(autocomplete_settings={"width": "200px", "defer": True}) tagged_model, exclude = get_tags_model_from_field_name(field) if tagged_model: # only if not the normal tags field self.form.fields[field].label = get_tags_label_from_model(tagged_model) - self.form.fields[field].autocomplete_tags = tagged_model.tags.tag_model.objects.all().order_by('name') + self.form.fields[field].autocomplete_tags = tagged_model.tags.tag_model.objects.all().order_by("name") if exclude: - self.form.fields[field].label = 'Not ' + self.form.fields[field].label + self.form.fields[field].label = "Not " + self.form.fields[field].label def get_tags_model_from_field_name(field): exclude = False - if field.startswith('not_'): - field = field.replace('not_', '') + if field.startswith("not_"): + field = field.replace("not_", "") exclude = True try: - parts = field.split('__') + parts = field.split("__") model_name = parts[-2] - return apps.get_model(f'dojo.{model_name}', require_ready=True), exclude + return apps.get_model(f"dojo.{model_name}", require_ready=True), exclude except Exception: return None, exclude def get_tags_label_from_model(model): if model: - return f'Tags ({model.__name__.title()})' + return f"Tags ({model.__name__.title()})" else: - return 'Tags (Unknown)' + return "Tags (Unknown)" def get_finding_filterset_fields(metrics=False, similar=False, filter_string_matching=False): @@ -340,106 +340,106 @@ def get_finding_filterset_fields(metrics=False, similar=False, filter_string_mat if similar: fields.extend([ - 'id', - 'hash_code', + "id", + "hash_code", ]) - fields.extend(['title', 'component_name', 'component_version']) + fields.extend(["title", "component_name", "component_version"]) if metrics: fields.extend([ - 'start_date', - 'end_date', + "start_date", + "end_date", ]) fields.extend([ - 'date', - 'cwe', - 'severity', - 'last_reviewed', - 'last_status_update', - 'mitigated', - 'reporter', - 'reviewers', + "date", + "cwe", + "severity", + "last_reviewed", + "last_status_update", + "mitigated", + "reporter", + "reviewers", ]) if filter_string_matching: fields.extend([ - 'reporter', - 'reviewers', - 'test__engagement__product__prod_type__name', - 'test__engagement__product__name', - 'test__engagement__name', - 'test__title', + "reporter", + "reviewers", + "test__engagement__product__prod_type__name", + "test__engagement__product__name", + "test__engagement__name", + "test__title", ]) else: fields.extend([ - 'reporter', - 'reviewers', - 'test__engagement__product__prod_type', - 'test__engagement__product', - 'test__engagement', - 'test', + "reporter", + "reviewers", + "test__engagement__product__prod_type", + "test__engagement__product", + "test__engagement", + "test", ]) fields.extend([ - 'test__test_type', - 'test__engagement__version', - 'test__version', - 'endpoints', - 'status', - 'active', - 'verified', - 'duplicate', - 'is_mitigated', - 'out_of_scope', - 'false_p', - 'has_component', - 'has_notes', - 'file_path', - 'unique_id_from_tool', - 'vuln_id_from_tool', - 'service', - 'epss_score', - 'epss_score_range', - 'epss_percentile', - 'epss_percentile_range', + "test__test_type", + "test__engagement__version", + "test__version", + "endpoints", + "status", + "active", + "verified", + "duplicate", + "is_mitigated", + "out_of_scope", + "false_p", + "has_component", + "has_notes", + "file_path", + "unique_id_from_tool", + "vuln_id_from_tool", + "service", + "epss_score", + "epss_score_range", + "epss_percentile", + "epss_percentile_range", ]) if similar: fields.extend([ - 'id', + "id", ]) fields.extend([ - 'param', - 'payload', - 'risk_acceptance', + "param", + "payload", + "risk_acceptance", ]) - if get_system_setting('enable_jira'): + if get_system_setting("enable_jira"): fields.extend([ - 'has_jira_issue', - 'jira_creation', - 'jira_change', - 'jira_issue__jira_key', + "has_jira_issue", + "jira_creation", + "jira_change", + "jira_issue__jira_key", ]) if is_finding_groups_enabled(): if filter_string_matching: fields.extend([ - 'has_finding_group', - 'finding_group__name', + "has_finding_group", + "finding_group__name", ]) else: fields.extend([ - 'has_finding_group', - 'finding_group', + "has_finding_group", + "finding_group", ]) - if get_system_setting('enable_jira'): + if get_system_setting("enable_jira"): fields.extend([ - 'has_jira_group_issue', + "has_jira_group_issue", ]) return fields @@ -605,39 +605,39 @@ def __init__(self, *args, **kwargs): class DateRangeFilter(ChoiceFilter): options = { - None: (_('Any date'), lambda qs, name: qs.all()), - 1: (_('Today'), lambda qs, name: qs.filter(**{ - f'{name}__year': now().year, - f'{name}__month': now().month, - f'{name}__day': now().day, + None: (_("Any date"), lambda qs, name: qs.all()), + 1: (_("Today"), lambda qs, name: qs.filter(**{ + f"{name}__year": now().year, + f"{name}__month": now().month, + f"{name}__day": now().day, })), - 2: (_('Past 7 days'), lambda qs, name: qs.filter(**{ - f'{name}__gte': _truncate(now() - timedelta(days=7)), - f'{name}__lt': _truncate(now() + timedelta(days=1)), + 2: (_("Past 7 days"), lambda qs, name: qs.filter(**{ + f"{name}__gte": _truncate(now() - timedelta(days=7)), + f"{name}__lt": _truncate(now() + timedelta(days=1)), })), - 3: (_('Past 30 days'), lambda qs, name: qs.filter(**{ - f'{name}__gte': _truncate(now() - timedelta(days=30)), - f'{name}__lt': _truncate(now() + timedelta(days=1)), + 3: (_("Past 30 days"), lambda qs, name: qs.filter(**{ + f"{name}__gte": _truncate(now() - timedelta(days=30)), + f"{name}__lt": _truncate(now() + timedelta(days=1)), })), - 4: (_('Past 90 days'), lambda qs, name: qs.filter(**{ - f'{name}__gte': _truncate(now() - timedelta(days=90)), - f'{name}__lt': _truncate(now() + timedelta(days=1)), + 4: (_("Past 90 days"), lambda qs, name: qs.filter(**{ + f"{name}__gte": _truncate(now() - timedelta(days=90)), + f"{name}__lt": _truncate(now() + timedelta(days=1)), })), - 5: (_('Current month'), lambda qs, name: qs.filter(**{ - f'{name}__year': now().year, - f'{name}__month': now().month, + 5: (_("Current month"), lambda qs, name: qs.filter(**{ + f"{name}__year": now().year, + f"{name}__month": now().month, })), - 6: (_('Current year'), lambda qs, name: qs.filter(**{ - f'{name}__year': now().year, + 6: (_("Current year"), lambda qs, name: qs.filter(**{ + f"{name}__year": now().year, })), - 7: (_('Past year'), lambda qs, name: qs.filter(**{ - f'{name}__gte': _truncate(now() - timedelta(days=365)), - f'{name}__lt': _truncate(now() + timedelta(days=1)), + 7: (_("Past year"), lambda qs, name: qs.filter(**{ + f"{name}__gte": _truncate(now() - timedelta(days=365)), + f"{name}__lt": _truncate(now() + timedelta(days=1)), })), } def __init__(self, *args, **kwargs): - kwargs['choices'] = [ + kwargs["choices"] = [ (key, value[0]) for key, value in six.iteritems(self.options)] super().__init__(*args, **kwargs) @@ -651,55 +651,55 @@ def filter(self, qs, value): class DateRangeOmniFilter(ChoiceFilter): options = { - None: (_('Any date'), lambda qs, name: qs.all()), - 1: (_('Today'), lambda qs, name: qs.filter(**{ - f'{name}__year': now().year, - f'{name}__month': now().month, - f'{name}__day': now().day, + None: (_("Any date"), lambda qs, name: qs.all()), + 1: (_("Today"), lambda qs, name: qs.filter(**{ + f"{name}__year": now().year, + f"{name}__month": now().month, + f"{name}__day": now().day, })), - 2: (_('Next 7 days'), lambda qs, name: qs.filter(**{ - f'{name}__gte': _truncate(now() + timedelta(days=1)), - f'{name}__lt': _truncate(now() + timedelta(days=7)), + 2: (_("Next 7 days"), lambda qs, name: qs.filter(**{ + f"{name}__gte": _truncate(now() + timedelta(days=1)), + f"{name}__lt": _truncate(now() + timedelta(days=7)), })), - 3: (_('Next 30 days'), lambda qs, name: qs.filter(**{ - f'{name}__gte': _truncate(now() + timedelta(days=1)), - f'{name}__lt': _truncate(now() + timedelta(days=30)), + 3: (_("Next 30 days"), lambda qs, name: qs.filter(**{ + f"{name}__gte": _truncate(now() + timedelta(days=1)), + f"{name}__lt": _truncate(now() + timedelta(days=30)), })), - 4: (_('Next 90 days'), lambda qs, name: qs.filter(**{ - f'{name}__gte': _truncate(now() + timedelta(days=1)), - f'{name}__lt': _truncate(now() + timedelta(days=90)), + 4: (_("Next 90 days"), lambda qs, name: qs.filter(**{ + f"{name}__gte": _truncate(now() + timedelta(days=1)), + f"{name}__lt": _truncate(now() + timedelta(days=90)), })), - 5: (_('Past 7 days'), lambda qs, name: qs.filter(**{ - f'{name}__gte': _truncate(now() - timedelta(days=7)), - f'{name}__lt': _truncate(now() + timedelta(days=1)), + 5: (_("Past 7 days"), lambda qs, name: qs.filter(**{ + f"{name}__gte": _truncate(now() - timedelta(days=7)), + f"{name}__lt": _truncate(now() + timedelta(days=1)), })), - 6: (_('Past 30 days'), lambda qs, name: qs.filter(**{ - f'{name}__gte': _truncate(now() - timedelta(days=30)), - f'{name}__lt': _truncate(now() + timedelta(days=1)), + 6: (_("Past 30 days"), lambda qs, name: qs.filter(**{ + f"{name}__gte": _truncate(now() - timedelta(days=30)), + f"{name}__lt": _truncate(now() + timedelta(days=1)), })), - 7: (_('Past 90 days'), lambda qs, name: qs.filter(**{ - f'{name}__gte': _truncate(now() - timedelta(days=90)), - f'{name}__lt': _truncate(now() + timedelta(days=1)), + 7: (_("Past 90 days"), lambda qs, name: qs.filter(**{ + f"{name}__gte": _truncate(now() - timedelta(days=90)), + f"{name}__lt": _truncate(now() + timedelta(days=1)), })), - 8: (_('Current month'), lambda qs, name: qs.filter(**{ - f'{name}__year': now().year, - f'{name}__month': now().month, + 8: (_("Current month"), lambda qs, name: qs.filter(**{ + f"{name}__year": now().year, + f"{name}__month": now().month, })), - 9: (_('Past year'), lambda qs, name: qs.filter(**{ - f'{name}__gte': _truncate(now() - timedelta(days=365)), - f'{name}__lt': _truncate(now() + timedelta(days=1)), + 9: (_("Past year"), lambda qs, name: qs.filter(**{ + f"{name}__gte": _truncate(now() - timedelta(days=365)), + f"{name}__lt": _truncate(now() + timedelta(days=1)), })), - 10: (_('Current year'), lambda qs, name: qs.filter(**{ - f'{name}__year': now().year, + 10: (_("Current year"), lambda qs, name: qs.filter(**{ + f"{name}__year": now().year, })), - 11: (_('Next year'), lambda qs, name: qs.filter(**{ - f'{name}__gte': _truncate(now() + timedelta(days=1)), - f'{name}__lt': _truncate(now() + timedelta(days=365)), + 11: (_("Next year"), lambda qs, name: qs.filter(**{ + f"{name}__gte": _truncate(now() + timedelta(days=1)), + f"{name}__lt": _truncate(now() + timedelta(days=365)), })), } def __init__(self, *args, **kwargs): - kwargs['choices'] = [ + kwargs["choices"] = [ (key, value[0]) for key, value in six.iteritems(self.options)] super().__init__(*args, **kwargs) @@ -713,17 +713,17 @@ def filter(self, qs, value): class ReportBooleanFilter(ChoiceFilter): options = { - None: (_('Either'), lambda qs, name: qs.all()), - 1: (_('Yes'), lambda qs, name: qs.filter(**{ - f'{name}': True, + None: (_("Either"), lambda qs, name: qs.all()), + 1: (_("Yes"), lambda qs, name: qs.filter(**{ + f"{name}": True, })), - 2: (_('No'), lambda qs, name: qs.filter(**{ - f'{name}': False, + 2: (_("No"), lambda qs, name: qs.filter(**{ + f"{name}": False, })), } def __init__(self, *args, **kwargs): - kwargs['choices'] = [ + kwargs["choices"] = [ (key, value[0]) for key, value in six.iteritems(self.options)] super().__init__(*args, **kwargs) @@ -751,14 +751,14 @@ def was_accepted(self, qs, name): return qs.filter(WAS_ACCEPTED_FINDINGS_QUERY) options = { - None: (_('Either'), any), - 1: (_('Yes'), accepted), - 2: (_('No'), not_accepted), - 3: (_('Expired'), was_accepted), + None: (_("Either"), any), + 1: (_("Yes"), accepted), + 2: (_("No"), not_accepted), + 3: (_("Expired"), was_accepted), } def __init__(self, *args, **kwargs): - kwargs['choices'] = [ + kwargs["choices"] = [ (key, value[0]) for key, value in six.iteritems(self.options)] super().__init__(*args, **kwargs) @@ -786,8 +786,8 @@ def current_month(self, qs, name): datetime(now().year, now().month, 1, 0, 0, 0)) self.end_date = now() return qs.filter(**{ - f'{name}__year': self.start_date.year, - f'{name}__month': self.start_date.month, + f"{name}__year": self.start_date.year, + f"{name}__month": self.start_date.month, }) def current_year(self, qs, name): @@ -795,15 +795,15 @@ def current_year(self, qs, name): datetime(now().year, 1, 1, 0, 0, 0)) self.end_date = now() return qs.filter(**{ - f'{name}__year': now().year, + f"{name}__year": now().year, }) def past_x_days(self, qs, name, days): self.start_date = _truncate(now() - timedelta(days=days)) self.end_date = _truncate(now() + timedelta(days=1)) return qs.filter(**{ - f'{name}__gte': self.start_date, - f'{name}__lt': self.end_date, + f"{name}__gte": self.start_date, + f"{name}__lt": self.end_date, }) def past_seven_days(self, qs, name): @@ -822,18 +822,18 @@ def past_year(self, qs, name): return self.past_x_days(qs, name, 365) options = { - None: (_('Past 30 days'), past_thirty_days), - 1: (_('Past 7 days'), past_seven_days), - 2: (_('Past 90 days'), past_ninety_days), - 3: (_('Current month'), current_month), - 4: (_('Current year'), current_year), - 5: (_('Past 6 Months'), past_six_months), - 6: (_('Past year'), past_year), - 7: (_('Any date'), any), + None: (_("Past 30 days"), past_thirty_days), + 1: (_("Past 7 days"), past_seven_days), + 2: (_("Past 90 days"), past_ninety_days), + 3: (_("Current month"), current_month), + 4: (_("Current year"), current_year), + 5: (_("Past 6 Months"), past_six_months), + 6: (_("Past year"), past_year), + 7: (_("Any date"), any), } def __init__(self, *args, **kwargs): - kwargs['choices'] = [ + kwargs["choices"] = [ (key, value[0]) for key, value in six.iteritems(self.options)] super().__init__(*args, **kwargs) @@ -855,23 +855,23 @@ def filter(self, qs, value): class ProductComponentFilter(DojoFilter): - component_name = CharFilter(lookup_expr='icontains', label="Module Name") - component_version = CharFilter(lookup_expr='icontains', label="Module Version") + component_name = CharFilter(lookup_expr="icontains", label="Module Name") + component_version = CharFilter(lookup_expr="icontains", label="Module Version") o = OrderingFilter( fields=( - ('component_name', 'component_name'), - ('component_version', 'component_version'), - ('active', 'active'), - ('duplicate', 'duplicate'), - ('total', 'total'), + ("component_name", "component_name"), + ("component_version", "component_version"), + ("active", "active"), + ("duplicate", "duplicate"), + ("total", "total"), ), field_labels={ - 'component_name': 'Component Name', - 'component_version': 'Component Version', - 'active': 'Active', - 'duplicate': 'Duplicate', - 'total': 'Total', + "component_name": "Component Name", + "component_version": "Component Version", + "active": "Active", + "duplicate": "Duplicate", + "total": "Total", }, ) @@ -910,9 +910,9 @@ class ComponentFilter(ProductComponentFilter): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.form.fields[ - 'test__engagement__product__prod_type'].queryset = get_authorized_product_types(Permissions.Product_Type_View) + "test__engagement__product__prod_type"].queryset = get_authorized_product_types(Permissions.Product_Type_View) self.form.fields[ - 'test__engagement__product'].queryset = get_authorized_products(Permissions.Product_View) + "test__engagement__product"].queryset = get_authorized_products(Permissions.Product_View) class EngagementDirectFilterHelper(FilterSet): @@ -1049,26 +1049,26 @@ class EngagementFilter(EngagementFilterHelper, DojoFilter): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.form.fields['prod_type'].queryset = get_authorized_product_types(Permissions.Product_Type_View) - self.form.fields['engagement__lead'].queryset = get_authorized_users(Permissions.Product_Type_View) \ + self.form.fields["prod_type"].queryset = get_authorized_product_types(Permissions.Product_Type_View) + self.form.fields["engagement__lead"].queryset = get_authorized_users(Permissions.Product_Type_View) \ .filter(engagement__lead__isnull=False).distinct() class Meta: model = Product - fields = ['name', 'prod_type'] + fields = ["name", "prod_type"] class ProductEngagementsFilter(DojoFilter): - engagement__name = CharFilter(field_name='name', lookup_expr='icontains', label='Engagement name contains') - engagement__lead = ModelChoiceFilter(field_name='lead', queryset=Dojo_User.objects.none(), label="Lead") - engagement__version = CharFilter(field_name='version', lookup_expr='icontains', label='Engagement version') - engagement__test__version = CharFilter(field_name='test__version', lookup_expr='icontains', label='Test version') - engagement__status = MultipleChoiceFilter(field_name='status', choices=ENGAGEMENT_STATUS_CHOICES, + engagement__name = CharFilter(field_name="name", lookup_expr="icontains", label="Engagement name contains") + engagement__lead = ModelChoiceFilter(field_name="lead", queryset=Dojo_User.objects.none(), label="Lead") + engagement__version = CharFilter(field_name="version", lookup_expr="icontains", label="Engagement version") + engagement__test__version = CharFilter(field_name="test__version", lookup_expr="icontains", label="Test version") + engagement__status = MultipleChoiceFilter(field_name="status", choices=ENGAGEMENT_STATUS_CHOICES, label="Status") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.form.fields['engagement__lead'].queryset = get_authorized_users(Permissions.Product_Type_View) \ + self.form.fields["engagement__lead"].queryset = get_authorized_users(Permissions.Product_Type_View) \ .filter(engagement__lead__isnull=False).distinct() class Meta: @@ -1108,36 +1108,36 @@ class EngagementFilterWithoutObjectLookups(EngagementFilterHelper): class Meta: model = Product - fields = ['name'] + fields = ["name"] class ProductEngagementFilterHelper(FilterSet): - version = CharFilter(lookup_expr='icontains', label='Engagement version') - test__version = CharFilter(field_name='test__version', lookup_expr='icontains', label='Test version') - name = CharFilter(lookup_expr='icontains') + version = CharFilter(lookup_expr="icontains", label="Engagement version") + test__version = CharFilter(field_name="test__version", lookup_expr="icontains", label="Test version") + name = CharFilter(lookup_expr="icontains") status = MultipleChoiceFilter(choices=ENGAGEMENT_STATUS_CHOICES, label="Status") target_start = DateRangeFilter() target_end = DateRangeFilter() - tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Tag name contains') - not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Not tag name contains', exclude=True) + tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag name contains") + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True) o = OrderingFilter( # tuple-mapping retains order fields=( - ('name', 'name'), - ('version', 'version'), - ('target_start', 'target_start'), - ('target_end', 'target_end'), - ('status', 'status'), - ('lead', 'lead'), + ("name", "name"), + ("version", "version"), + ("target_start", "target_start"), + ("target_end", "target_end"), + ("status", "status"), + ("lead", "lead"), ), field_labels={ - 'name': 'Engagement Name', + "name": "Engagement Name", }, ) class Meta: model = Product - fields = ['name'] + fields = ["name"] class ProductEngagementFilter(ProductEngagementFilterHelper, DojoFilter): @@ -1172,87 +1172,87 @@ class ProductEngagementFilterWithoutObjectLookups(ProductEngagementFilterHelper, class ApiEngagementFilter(DojoFilter): - product__prod_type = NumberInFilter(field_name='product__prod_type', lookup_expr='in') - tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Tag name contains') - tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', - help_text='Comma separated list of exact tags') - product__tags = CharFieldInFilter(field_name='product__tags__name', - lookup_expr='in', - help_text='Comma separated list of exact tags present on product') - - not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True') - not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', - help_text='Comma separated list of exact tags not present on model', exclude='True') - not_product__tags = CharFieldInFilter(field_name='product__tags__name', - lookup_expr='in', - help_text='Comma separated list of exact tags not present on product', - exclude='True') - has_tags = BooleanFilter(field_name='tags', lookup_expr='isnull', exclude=True, label='Has tags') + product__prod_type = NumberInFilter(field_name="product__prod_type", lookup_expr="in") + tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Tag name contains") + tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in", + help_text="Comma separated list of exact tags") + product__tags = CharFieldInFilter(field_name="product__tags__name", + lookup_expr="in", + help_text="Comma separated list of exact tags present on product") + + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Not Tag name contains", exclude="True") + not_tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in", + help_text="Comma separated list of exact tags not present on model", exclude="True") + not_product__tags = CharFieldInFilter(field_name="product__tags__name", + lookup_expr="in", + help_text="Comma separated list of exact tags not present on product", + exclude="True") + has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags") o = OrderingFilter( # tuple-mapping retains order fields=( - ('name', 'name'), - ('version', 'version'), - ('target_start', 'target_start'), - ('target_end', 'target_end'), - ('status', 'status'), - ('lead', 'lead'), - ('created', 'created'), - ('updated', 'updated'), + ("name", "name"), + ("version", "version"), + ("target_start", "target_start"), + ("target_end", "target_end"), + ("status", "status"), + ("lead", "lead"), + ("created", "created"), + ("updated", "updated"), ), field_labels={ - 'name': 'Engagement Name', + "name": "Engagement Name", }, ) class Meta: model = Engagement - fields = ['id', 'active', 'target_start', - 'target_end', 'requester', 'report_type', - 'updated', 'threat_model', 'api_test', - 'pen_test', 'status', 'product', 'name', 'version', 'tags'] + fields = ["id", "active", "target_start", + "target_end", "requester", "report_type", + "updated", "threat_model", "api_test", + "pen_test", "status", "product", "name", "version", "tags"] class ProductFilterHelper(FilterSet): - name = CharFilter(lookup_expr='icontains', label="Product Name") - name_exact = CharFilter(field_name='name', lookup_expr='iexact', label="Exact Product Name") + name = CharFilter(lookup_expr="icontains", label="Product Name") + name_exact = CharFilter(field_name="name", lookup_expr="iexact", label="Exact Product Name") business_criticality = MultipleChoiceFilter(choices=Product.BUSINESS_CRITICALITY_CHOICES, null_label="Empty") platform = MultipleChoiceFilter(choices=Product.PLATFORM_CHOICES, null_label="Empty") lifecycle = MultipleChoiceFilter(choices=Product.LIFECYCLE_CHOICES, null_label="Empty") origin = MultipleChoiceFilter(choices=Product.ORIGIN_CHOICES, null_label="Empty") - external_audience = BooleanFilter(field_name='external_audience') - internet_accessible = BooleanFilter(field_name='internet_accessible') - tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label="Tag contains") - not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Not tag name contains', exclude=True) + external_audience = BooleanFilter(field_name="external_audience") + internet_accessible = BooleanFilter(field_name="internet_accessible") + tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag contains") + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True) outside_of_sla = ProductSLAFilter(label="Outside of SLA") - has_tags = BooleanFilter(field_name='tags', lookup_expr='isnull', exclude=True, label='Has tags') + has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags") o = OrderingFilter( # tuple-mapping retains order fields=( - ('name', 'name'), - ('name_exact', 'name_exact'), - ('prod_type__name', 'prod_type__name'), - ('business_criticality', 'business_criticality'), - ('platform', 'platform'), - ('lifecycle', 'lifecycle'), - ('origin', 'origin'), - ('external_audience', 'external_audience'), - ('internet_accessible', 'internet_accessible'), - ('findings_count', 'findings_count'), + ("name", "name"), + ("name_exact", "name_exact"), + ("prod_type__name", "prod_type__name"), + ("business_criticality", "business_criticality"), + ("platform", "platform"), + ("lifecycle", "lifecycle"), + ("origin", "origin"), + ("external_audience", "external_audience"), + ("internet_accessible", "internet_accessible"), + ("findings_count", "findings_count"), ), field_labels={ - 'name': 'Product Name', - 'name_exact': 'Exact Product Name', - 'prod_type__name': 'Product Type', - 'business_criticality': 'Business Criticality', - 'platform': 'Platform ', - 'lifecycle': 'Lifecycle ', - 'origin': 'Origin ', - 'external_audience': 'External Audience ', - 'internet_accessible': 'Internet Accessible ', - 'findings_count': 'Findings Count ', + "name": "Product Name", + "name_exact": "Exact Product Name", + "prod_type__name": "Product Type", + "business_criticality": "Business Criticality", + "platform": "Platform ", + "lifecycle": "Lifecycle ", + "origin": "Origin ", + "external_audience": "External Audience ", + "internet_accessible": "Internet Accessible ", + "findings_count": "Findings Count ", }, ) @@ -1313,35 +1313,35 @@ class Meta: class ApiProductFilter(DojoFilter): # BooleanFilter - external_audience = BooleanFilter(field_name='external_audience') - internet_accessible = BooleanFilter(field_name='internet_accessible') + external_audience = BooleanFilter(field_name="external_audience") + internet_accessible = BooleanFilter(field_name="internet_accessible") # CharFilter - name = CharFilter(lookup_expr='icontains') - name_exact = CharFilter(field_name='name', lookup_expr='iexact') - description = CharFilter(lookup_expr='icontains') - business_criticality = CharFilter(method=custom_filter, field_name='business_criticality') - platform = CharFilter(method=custom_filter, field_name='platform') - lifecycle = CharFilter(method=custom_filter, field_name='lifecycle') - origin = CharFilter(method=custom_filter, field_name='origin') + name = CharFilter(lookup_expr="icontains") + name_exact = CharFilter(field_name="name", lookup_expr="iexact") + description = CharFilter(lookup_expr="icontains") + business_criticality = CharFilter(method=custom_filter, field_name="business_criticality") + platform = CharFilter(method=custom_filter, field_name="platform") + lifecycle = CharFilter(method=custom_filter, field_name="lifecycle") + origin = CharFilter(method=custom_filter, field_name="origin") # NumberInFilter - id = NumberInFilter(field_name='id', lookup_expr='in') - product_manager = NumberInFilter(field_name='product_manager', lookup_expr='in') - technical_contact = NumberInFilter(field_name='technical_contact', lookup_expr='in') - team_manager = NumberInFilter(field_name='team_manager', lookup_expr='in') - prod_type = NumberInFilter(field_name='prod_type', lookup_expr='in') - tid = NumberInFilter(field_name='tid', lookup_expr='in') - prod_numeric_grade = NumberInFilter(field_name='prod_numeric_grade', lookup_expr='in') - user_records = NumberInFilter(field_name='user_records', lookup_expr='in') - regulations = NumberInFilter(field_name='regulations', lookup_expr='in') - - tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Tag name contains') - tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', - help_text='Comma separated list of exact tags') - - not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True') - not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', - help_text='Comma separated list of exact tags not present on product', exclude='True') - has_tags = BooleanFilter(field_name='tags', lookup_expr='isnull', exclude=True, label='Has tags') + id = NumberInFilter(field_name="id", lookup_expr="in") + product_manager = NumberInFilter(field_name="product_manager", lookup_expr="in") + technical_contact = NumberInFilter(field_name="technical_contact", lookup_expr="in") + team_manager = NumberInFilter(field_name="team_manager", lookup_expr="in") + prod_type = NumberInFilter(field_name="prod_type", lookup_expr="in") + tid = NumberInFilter(field_name="tid", lookup_expr="in") + prod_numeric_grade = NumberInFilter(field_name="prod_numeric_grade", lookup_expr="in") + user_records = NumberInFilter(field_name="user_records", lookup_expr="in") + regulations = NumberInFilter(field_name="regulations", lookup_expr="in") + + tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag name contains") + tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in", + help_text="Comma separated list of exact tags") + + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Not Tag name contains", exclude="True") + not_tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in", + help_text="Comma separated list of exact tags not present on product", exclude="True") + has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags") outside_of_sla = extend_schema_field(OpenApiTypes.NUMBER)(ProductSLAFilter()) # DateRangeFilter @@ -1353,173 +1353,173 @@ class ApiProductFilter(DojoFilter): o = OrderingFilter( # tuple-mapping retains order fields=( - ('id', 'id'), - ('tid', 'tid'), - ('name', 'name'), - ('created', 'created'), - ('prod_numeric_grade', 'prod_numeric_grade'), - ('business_criticality', 'business_criticality'), - ('platform', 'platform'), - ('lifecycle', 'lifecycle'), - ('origin', 'origin'), - ('revenue', 'revenue'), - ('external_audience', 'external_audience'), - ('internet_accessible', 'internet_accessible'), - ('product_manager', 'product_manager'), - ('product_manager__first_name', 'product_manager__first_name'), - ('product_manager__last_name', 'product_manager__last_name'), - ('technical_contact', 'technical_contact'), - ('technical_contact__first_name', 'technical_contact__first_name'), - ('technical_contact__last_name', 'technical_contact__last_name'), - ('team_manager', 'team_manager'), - ('team_manager__first_name', 'team_manager__first_name'), - ('team_manager__last_name', 'team_manager__last_name'), - ('prod_type', 'prod_type'), - ('prod_type__name', 'prod_type__name'), - ('updated', 'updated'), - ('user_records', 'user_records'), + ("id", "id"), + ("tid", "tid"), + ("name", "name"), + ("created", "created"), + ("prod_numeric_grade", "prod_numeric_grade"), + ("business_criticality", "business_criticality"), + ("platform", "platform"), + ("lifecycle", "lifecycle"), + ("origin", "origin"), + ("revenue", "revenue"), + ("external_audience", "external_audience"), + ("internet_accessible", "internet_accessible"), + ("product_manager", "product_manager"), + ("product_manager__first_name", "product_manager__first_name"), + ("product_manager__last_name", "product_manager__last_name"), + ("technical_contact", "technical_contact"), + ("technical_contact__first_name", "technical_contact__first_name"), + ("technical_contact__last_name", "technical_contact__last_name"), + ("team_manager", "team_manager"), + ("team_manager__first_name", "team_manager__first_name"), + ("team_manager__last_name", "team_manager__last_name"), + ("prod_type", "prod_type"), + ("prod_type__name", "prod_type__name"), + ("updated", "updated"), + ("user_records", "user_records"), ), ) class ApiFindingFilter(DojoFilter): # BooleanFilter - active = BooleanFilter(field_name='active') - duplicate = BooleanFilter(field_name='duplicate') - dynamic_finding = BooleanFilter(field_name='dynamic_finding') - false_p = BooleanFilter(field_name='false_p') - is_mitigated = BooleanFilter(field_name='is_mitigated') - out_of_scope = BooleanFilter(field_name='out_of_scope') - static_finding = BooleanFilter(field_name='static_finding') - under_defect_review = BooleanFilter(field_name='under_defect_review') - under_review = BooleanFilter(field_name='under_review') - verified = BooleanFilter(field_name='verified') - has_jira = BooleanFilter(field_name='jira_issue', lookup_expr='isnull', exclude=True) + active = BooleanFilter(field_name="active") + duplicate = BooleanFilter(field_name="duplicate") + dynamic_finding = BooleanFilter(field_name="dynamic_finding") + false_p = BooleanFilter(field_name="false_p") + is_mitigated = BooleanFilter(field_name="is_mitigated") + out_of_scope = BooleanFilter(field_name="out_of_scope") + static_finding = BooleanFilter(field_name="static_finding") + under_defect_review = BooleanFilter(field_name="under_defect_review") + under_review = BooleanFilter(field_name="under_review") + verified = BooleanFilter(field_name="verified") + has_jira = BooleanFilter(field_name="jira_issue", lookup_expr="isnull", exclude=True) # CharFilter - component_version = CharFilter(lookup_expr='icontains') - component_name = CharFilter(lookup_expr='icontains') + component_version = CharFilter(lookup_expr="icontains") + component_name = CharFilter(lookup_expr="icontains") vulnerability_id = CharFilter(method=custom_vulnerability_id_filter) - description = CharFilter(lookup_expr='icontains') - file_path = CharFilter(lookup_expr='icontains') - hash_code = CharFilter(lookup_expr='icontains') - impact = CharFilter(lookup_expr='icontains') - mitigation = CharFilter(lookup_expr='icontains') - numerical_severity = CharFilter(method=custom_filter, field_name='numerical_severity') - param = CharFilter(lookup_expr='icontains') - payload = CharFilter(lookup_expr='icontains') - references = CharFilter(lookup_expr='icontains') - severity = CharFilter(method=custom_filter, field_name='severity') - severity_justification = CharFilter(lookup_expr='icontains') - steps_to_reproduce = CharFilter(lookup_expr='icontains') - unique_id_from_tool = CharFilter(lookup_expr='icontains') - title = CharFilter(lookup_expr='icontains') - product_name = CharFilter(lookup_expr='engagement__product__name__iexact', field_name='test', label='exact product name') - product_name_contains = CharFilter(lookup_expr='engagement__product__name__icontains', field_name='test', label='exact product name') - product_lifecycle = CharFilter(method=custom_filter, lookup_expr='engagement__product__lifecycle', - field_name='test__engagement__product__lifecycle', label='Comma separated list of exact product lifecycles') + description = CharFilter(lookup_expr="icontains") + file_path = CharFilter(lookup_expr="icontains") + hash_code = CharFilter(lookup_expr="icontains") + impact = CharFilter(lookup_expr="icontains") + mitigation = CharFilter(lookup_expr="icontains") + numerical_severity = CharFilter(method=custom_filter, field_name="numerical_severity") + param = CharFilter(lookup_expr="icontains") + payload = CharFilter(lookup_expr="icontains") + references = CharFilter(lookup_expr="icontains") + severity = CharFilter(method=custom_filter, field_name="severity") + severity_justification = CharFilter(lookup_expr="icontains") + steps_to_reproduce = CharFilter(lookup_expr="icontains") + unique_id_from_tool = CharFilter(lookup_expr="icontains") + title = CharFilter(lookup_expr="icontains") + product_name = CharFilter(lookup_expr="engagement__product__name__iexact", field_name="test", label="exact product name") + product_name_contains = CharFilter(lookup_expr="engagement__product__name__icontains", field_name="test", label="exact product name") + product_lifecycle = CharFilter(method=custom_filter, lookup_expr="engagement__product__lifecycle", + field_name="test__engagement__product__lifecycle", label="Comma separated list of exact product lifecycles") # DateRangeFilter created = DateRangeFilter() date = DateRangeFilter() - on = DateFilter(field_name='date', lookup_expr='exact') - before = DateFilter(field_name='date', lookup_expr='lt') - after = DateFilter(field_name='date', lookup_expr='gt') - jira_creation = DateRangeFilter(field_name='jira_issue__jira_creation') - jira_change = DateRangeFilter(field_name='jira_issue__jira_change') + on = DateFilter(field_name="date", lookup_expr="exact") + before = DateFilter(field_name="date", lookup_expr="lt") + after = DateFilter(field_name="date", lookup_expr="gt") + jira_creation = DateRangeFilter(field_name="jira_issue__jira_creation") + jira_change = DateRangeFilter(field_name="jira_issue__jira_change") last_reviewed = DateRangeFilter() mitigated = DateRangeFilter() # NumberInFilter - cwe = NumberInFilter(field_name='cwe', lookup_expr='in') - defect_review_requested_by = NumberInFilter(field_name='defect_review_requested_by', lookup_expr='in') - endpoints = NumberInFilter(field_name='endpoints', lookup_expr='in') - found_by = NumberInFilter(field_name='found_by', lookup_expr='in') - id = NumberInFilter(field_name='id', lookup_expr='in') - last_reviewed_by = NumberInFilter(field_name='last_reviewed_by', lookup_expr='in') - mitigated_by = NumberInFilter(field_name='mitigated_by', lookup_expr='in') - nb_occurences = NumberInFilter(field_name='nb_occurences', lookup_expr='in') - reporter = NumberInFilter(field_name='reporter', lookup_expr='in') - scanner_confidence = NumberInFilter(field_name='scanner_confidence', lookup_expr='in') - review_requested_by = NumberInFilter(field_name='review_requested_by', lookup_expr='in') - reviewers = NumberInFilter(field_name='reviewers', lookup_expr='in') - sast_source_line = NumberInFilter(field_name='sast_source_line', lookup_expr='in') - sonarqube_issue = NumberInFilter(field_name='sonarqube_issue', lookup_expr='in') - test__test_type = NumberInFilter(field_name='test__test_type', lookup_expr='in', label='Test Type') - test__engagement = NumberInFilter(field_name='test__engagement', lookup_expr='in') - test__engagement__product = NumberInFilter(field_name='test__engagement__product', lookup_expr='in') - test__engagement__product__prod_type = NumberInFilter(field_name='test__engagement__product__prod_type', lookup_expr='in') - finding_group = NumberInFilter(field_name='finding_group', lookup_expr='in') + cwe = NumberInFilter(field_name="cwe", lookup_expr="in") + defect_review_requested_by = NumberInFilter(field_name="defect_review_requested_by", lookup_expr="in") + endpoints = NumberInFilter(field_name="endpoints", lookup_expr="in") + found_by = NumberInFilter(field_name="found_by", lookup_expr="in") + id = NumberInFilter(field_name="id", lookup_expr="in") + last_reviewed_by = NumberInFilter(field_name="last_reviewed_by", lookup_expr="in") + mitigated_by = NumberInFilter(field_name="mitigated_by", lookup_expr="in") + nb_occurences = NumberInFilter(field_name="nb_occurences", lookup_expr="in") + reporter = NumberInFilter(field_name="reporter", lookup_expr="in") + scanner_confidence = NumberInFilter(field_name="scanner_confidence", lookup_expr="in") + review_requested_by = NumberInFilter(field_name="review_requested_by", lookup_expr="in") + reviewers = NumberInFilter(field_name="reviewers", lookup_expr="in") + sast_source_line = NumberInFilter(field_name="sast_source_line", lookup_expr="in") + sonarqube_issue = NumberInFilter(field_name="sonarqube_issue", lookup_expr="in") + test__test_type = NumberInFilter(field_name="test__test_type", lookup_expr="in", label="Test Type") + test__engagement = NumberInFilter(field_name="test__engagement", lookup_expr="in") + test__engagement__product = NumberInFilter(field_name="test__engagement__product", lookup_expr="in") + test__engagement__product__prod_type = NumberInFilter(field_name="test__engagement__product__prod_type", lookup_expr="in") + finding_group = NumberInFilter(field_name="finding_group", lookup_expr="in") # ReportRiskAcceptanceFilter risk_acceptance = extend_schema_field(OpenApiTypes.NUMBER)(ReportRiskAcceptanceFilter()) - tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Tag name contains') - tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', - help_text='Comma separated list of exact tags') - test__tags = CharFieldInFilter(field_name='test__tags__name', lookup_expr='in', help_text='Comma separated list of exact tags present on test') - test__engagement__tags = CharFieldInFilter(field_name='test__engagement__tags__name', lookup_expr='in', - help_text='Comma separated list of exact tags present on engagement') + tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Tag name contains") + tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in", + help_text="Comma separated list of exact tags") + test__tags = CharFieldInFilter(field_name="test__tags__name", lookup_expr="in", help_text="Comma separated list of exact tags present on test") + test__engagement__tags = CharFieldInFilter(field_name="test__engagement__tags__name", lookup_expr="in", + help_text="Comma separated list of exact tags present on engagement") test__engagement__product__tags = CharFieldInFilter( - field_name='test__engagement__product__tags__name', - lookup_expr='in', - help_text='Comma separated list of exact tags present on product') - - not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True') - not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', - help_text='Comma separated list of exact tags not present on model', exclude='True') - not_test__tags = CharFieldInFilter(field_name='test__tags__name', lookup_expr='in', exclude='True', help_text='Comma separated list of exact tags present on test') - not_test__engagement__tags = CharFieldInFilter(field_name='test__engagement__tags__name', lookup_expr='in', - help_text='Comma separated list of exact tags not present on engagement', - exclude='True') + field_name="test__engagement__product__tags__name", + lookup_expr="in", + help_text="Comma separated list of exact tags present on product") + + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Not Tag name contains", exclude="True") + not_tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in", + help_text="Comma separated list of exact tags not present on model", exclude="True") + not_test__tags = CharFieldInFilter(field_name="test__tags__name", lookup_expr="in", exclude="True", help_text="Comma separated list of exact tags present on test") + not_test__engagement__tags = CharFieldInFilter(field_name="test__engagement__tags__name", lookup_expr="in", + help_text="Comma separated list of exact tags not present on engagement", + exclude="True") not_test__engagement__product__tags = CharFieldInFilter( - field_name='test__engagement__product__tags__name', - lookup_expr='in', - help_text='Comma separated list of exact tags not present on product', - exclude='True') - has_tags = BooleanFilter(field_name='tags', lookup_expr='isnull', exclude=True, label='Has tags') + field_name="test__engagement__product__tags__name", + lookup_expr="in", + help_text="Comma separated list of exact tags not present on product", + exclude="True") + has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags") outside_of_sla = extend_schema_field(OpenApiTypes.NUMBER)(FindingSLAFilter()) o = OrderingFilter( # tuple-mapping retains order fields=( - ('active', 'active'), - ('component_name', 'component_name'), - ('component_version', 'component_version'), - ('created', 'created'), - ('last_status_update', 'last_status_update'), - ('last_reviewed', 'last_reviewed'), - ('cwe', 'cwe'), - ('date', 'date'), - ('duplicate', 'duplicate'), - ('dynamic_finding', 'dynamic_finding'), - ('false_p', 'false_p'), - ('found_by', 'found_by'), - ('id', 'id'), - ('is_mitigated', 'is_mitigated'), - ('numerical_severity', 'numerical_severity'), - ('out_of_scope', 'out_of_scope'), - ('severity', 'severity'), - ('reviewers', 'reviewers'), - ('static_finding', 'static_finding'), - ('test__engagement__product__name', 'test__engagement__product__name'), - ('title', 'title'), - ('under_defect_review', 'under_defect_review'), - ('under_review', 'under_review'), - ('verified', 'verified'), + ("active", "active"), + ("component_name", "component_name"), + ("component_version", "component_version"), + ("created", "created"), + ("last_status_update", "last_status_update"), + ("last_reviewed", "last_reviewed"), + ("cwe", "cwe"), + ("date", "date"), + ("duplicate", "duplicate"), + ("dynamic_finding", "dynamic_finding"), + ("false_p", "false_p"), + ("found_by", "found_by"), + ("id", "id"), + ("is_mitigated", "is_mitigated"), + ("numerical_severity", "numerical_severity"), + ("out_of_scope", "out_of_scope"), + ("severity", "severity"), + ("reviewers", "reviewers"), + ("static_finding", "static_finding"), + ("test__engagement__product__name", "test__engagement__product__name"), + ("title", "title"), + ("under_defect_review", "under_defect_review"), + ("under_review", "under_review"), + ("verified", "verified"), ), ) class Meta: model = Finding - exclude = ['url', 'thread_id', 'notes', 'files', - 'line', 'cve'] + exclude = ["url", "thread_id", "notes", "files", + "line", "cve"] class PercentageFilter(NumberFilter): def __init__(self, *args, **kwargs): - kwargs['method'] = self.filter_percentage + kwargs["method"] = self.filter_percentage super().__init__(*args, **kwargs) def filter_percentage(self, queryset, name, value): - value = value / decimal.Decimal('100.0') + value = value / decimal.Decimal("100.0") # Provide some wiggle room for filtering since the UI rounds to two places (and because floats): # a user may enter 0.15, but we'll return everything in [0.0015, 0.0016). # To do this, add to our value 1^(whatever the exponent for our least significant digit place is), but ensure @@ -1535,15 +1535,15 @@ def filter_percentage(self, queryset, name, value): class PercentageRangeFilter(RangeFilter): def filter(self, qs, value): if value is not None: - start = value.start / decimal.Decimal('100.0') if value.start else None - stop = value.stop / decimal.Decimal('100.0') if value.stop else None + start = value.start / decimal.Decimal("100.0") if value.start else None + stop = value.stop / decimal.Decimal("100.0") if value.stop else None value = slice(start, stop) return super().filter(qs, value) class FindingFilterHelper(FilterSet): title = CharFilter(lookup_expr="icontains") - date = DateFromToRangeFilter(field_name='date', label="Date Discovered") + date = DateFromToRangeFilter(field_name="date", label="Date Discovered") on = DateFilter(field_name="date", lookup_expr="exact", label="On") before = DateFilter(field_name="date", lookup_expr="lt", label="Before") after = DateFilter(field_name="date", lookup_expr="gt", label="After") @@ -1560,7 +1560,7 @@ class FindingFilterHelper(FilterSet): file_path = CharFilter(lookup_expr="icontains") param = CharFilter(lookup_expr="icontains") payload = CharFilter(lookup_expr="icontains") - test__test_type = ModelMultipleChoiceFilter(queryset=Test_Type.objects.all(), label='Test Type') + test__test_type = ModelMultipleChoiceFilter(queryset=Test_Type.objects.all(), label="Test Type") endpoints__host = CharFilter(lookup_expr="icontains", label="Endpoint Host") service = CharFilter(lookup_expr="icontains") test__engagement__version = CharFilter(lookup_expr="icontains", label="Engagement Version") @@ -1583,10 +1583,10 @@ class FindingFilterHelper(FilterSet): if is_finding_groups_enabled(): has_finding_group = BooleanFilter( - field_name='finding_group', - lookup_expr='isnull', + field_name="finding_group", + lookup_expr="isnull", exclude=True, - label='Is Grouped') + label="Is Grouped") if get_system_setting("enable_jira"): has_jira_issue = BooleanFilter( @@ -1630,28 +1630,28 @@ class FindingFilterHelper(FilterSet): o = OrderingFilter( # tuple-mapping retains order fields=( - ('numerical_severity', 'numerical_severity'), - ('date', 'date'), - ('mitigated', 'mitigated'), - ('risk_acceptance__created__date', - 'risk_acceptance__created__date'), - ('last_reviewed', 'last_reviewed'), - ('title', 'title'), - ('test__engagement__product__name', - 'test__engagement__product__name'), - ('service', 'service'), - ('epss_score', 'epss_score'), - ('epss_percentile', 'epss_percentile'), + ("numerical_severity", "numerical_severity"), + ("date", "date"), + ("mitigated", "mitigated"), + ("risk_acceptance__created__date", + "risk_acceptance__created__date"), + ("last_reviewed", "last_reviewed"), + ("title", "title"), + ("test__engagement__product__name", + "test__engagement__product__name"), + ("service", "service"), + ("epss_score", "epss_score"), + ("epss_percentile", "epss_percentile"), ), field_labels={ - 'numerical_severity': 'Severity', - 'date': 'Date', - 'risk_acceptance__created__date': 'Acceptance Date', - 'mitigated': 'Mitigated Date', - 'title': 'Finding Name', - 'test__engagement__product__name': 'Product Name', - 'epss_score': 'EPSS Score', - 'epss_percentile': 'EPSS Percentile', + "numerical_severity": "Severity", + "date": "Date", + "risk_acceptance__created__date": "Acceptance Date", + "mitigated": "Mitigated Date", + "title": "Finding Name", + "test__engagement__product__name": "Product Name", + "epss_score": "EPSS Score", + "epss_percentile": "EPSS Percentile", }, ) @@ -1659,11 +1659,11 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def set_date_fields(self, *args: list, **kwargs: dict): - date_input_widget = forms.DateInput(attrs={'class': 'datepicker', 'placeholder': 'YYYY-MM-DD'}, format="%Y-%m-%d") - self.form.fields['on'].widget = date_input_widget - self.form.fields['before'].widget = date_input_widget - self.form.fields['after'].widget = date_input_widget - self.form.fields['cwe'].choices = cwe_options(self.queryset) + date_input_widget = forms.DateInput(attrs={"class": "datepicker", "placeholder": "YYYY-MM-DD"}, format="%Y-%m-%d") + self.form.fields["on"].widget = date_input_widget + self.form.fields["before"].widget = date_input_widget + self.form.fields["after"].widget = date_input_widget + self.form.fields["cwe"].choices = cwe_options(self.queryset) class FindingFilterWithoutObjectLookups(FindingFilterHelper, FindingTagStringFilter): @@ -1746,34 +1746,34 @@ class Meta: model = Finding fields = get_finding_filterset_fields(filter_string_matching=True) - exclude = ['url', 'description', 'mitigation', 'impact', - 'endpoints', 'references', - 'thread_id', 'notes', 'scanner_confidence', - 'numerical_severity', 'line', 'duplicate_finding', - 'hash_code', 'reviewers', 'created', 'files', - 'sla_start_date', 'sla_expiration_date', 'cvssv3', - 'severity_justification', 'steps_to_reproduce'] + exclude = ["url", "description", "mitigation", "impact", + "endpoints", "references", + "thread_id", "notes", "scanner_confidence", + "numerical_severity", "line", "duplicate_finding", + "hash_code", "reviewers", "created", "files", + "sla_start_date", "sla_expiration_date", "cvssv3", + "severity_justification", "steps_to_reproduce"] def __init__(self, *args, **kwargs): self.user = None self.pid = None - if 'user' in kwargs: - self.user = kwargs.pop('user') + if "user" in kwargs: + self.user = kwargs.pop("user") - if 'pid' in kwargs: - self.pid = kwargs.pop('pid') + if "pid" in kwargs: + self.pid = kwargs.pop("pid") super().__init__(*args, **kwargs) # Set some date fields self.set_date_fields(*args, **kwargs) # Don't show the product filter on the product finding view if self.pid: - del self.form.fields['test__engagement__product__name'] - del self.form.fields['test__engagement__product__name_contains'] - del self.form.fields['test__engagement__product__prod_type__name'] - del self.form.fields['test__engagement__product__prod_type__name_contains'] + del self.form.fields["test__engagement__product__name"] + del self.form.fields["test__engagement__product__name_contains"] + del self.form.fields["test__engagement__product__prod_type__name"] + del self.form.fields["test__engagement__product__prod_type__name_contains"] else: - del self.form.fields['test__name'] - del self.form.fields['test__name_contains'] + del self.form.fields["test__name"] + del self.form.fields["test__name_contains"] class FindingFilter(FindingFilterHelper, FindingTagFilter): @@ -1784,7 +1784,7 @@ class FindingFilter(FindingFilterHelper, FindingTagFilter): label="Product Type") test__engagement__product__lifecycle = MultipleChoiceFilter( choices=Product.LIFECYCLE_CHOICES, - label='Product lifecycle') + label="Product lifecycle") test__engagement__product = ModelMultipleChoiceFilter( queryset=Product.objects.none(), label="Product") @@ -1804,22 +1804,22 @@ class Meta: model = Finding fields = get_finding_filterset_fields() - exclude = ['url', 'description', 'mitigation', 'impact', - 'endpoints', 'references', - 'thread_id', 'notes', 'scanner_confidence', - 'numerical_severity', 'line', 'duplicate_finding', - 'hash_code', 'reviewers', 'created', 'files', - 'sla_start_date', 'sla_expiration_date', 'cvssv3', - 'severity_justification', 'steps_to_reproduce'] + exclude = ["url", "description", "mitigation", "impact", + "endpoints", "references", + "thread_id", "notes", "scanner_confidence", + "numerical_severity", "line", "duplicate_finding", + "hash_code", "reviewers", "created", "files", + "sla_start_date", "sla_expiration_date", "cvssv3", + "severity_justification", "steps_to_reproduce"] def __init__(self, *args, **kwargs): self.user = None self.pid = None - if 'user' in kwargs: - self.user = kwargs.pop('user') + if "user" in kwargs: + self.user = kwargs.pop("user") - if 'pid' in kwargs: - self.pid = kwargs.pop('pid') + if "pid" in kwargs: + self.pid = kwargs.pop("pid") super().__init__(*args, **kwargs) # Set some date fields self.set_date_fields(*args, **kwargs) @@ -1828,25 +1828,25 @@ def __init__(self, *args, **kwargs): def set_related_object_fields(self, *args: list, **kwargs: dict): if self.pid is not None: - del self.form.fields['test__engagement__product'] - del self.form.fields['test__engagement__product__prod_type'] + del self.form.fields["test__engagement__product"] + del self.form.fields["test__engagement__product__prod_type"] # TODO add authorized check to be sure - self.form.fields['test__engagement'].queryset = Engagement.objects.filter( + self.form.fields["test__engagement"].queryset = Engagement.objects.filter( product_id=self.pid, ).all() - self.form.fields['test'].queryset = get_authorized_tests(Permissions.Test_View, product=self.pid).prefetch_related('test_type') + self.form.fields["test"].queryset = get_authorized_tests(Permissions.Test_View, product=self.pid).prefetch_related("test_type") else: self.form.fields[ - 'test__engagement__product__prod_type'].queryset = get_authorized_product_types(Permissions.Product_Type_View) - self.form.fields['test__engagement'].queryset = get_authorized_engagements(Permissions.Engagement_View) - del self.form.fields['test'] + "test__engagement__product__prod_type"].queryset = get_authorized_product_types(Permissions.Product_Type_View) + self.form.fields["test__engagement"].queryset = get_authorized_engagements(Permissions.Engagement_View) + del self.form.fields["test"] - if self.form.fields.get('test__engagement__product'): - self.form.fields['test__engagement__product'].queryset = get_authorized_products(Permissions.Product_View) - if self.form.fields.get('finding_group', None): - self.form.fields['finding_group'].queryset = get_authorized_finding_groups(Permissions.Finding_Group_View) - self.form.fields['reporter'].queryset = get_authorized_users(Permissions.Finding_View) - self.form.fields['reviewers'].queryset = self.form.fields['reporter'].queryset + if self.form.fields.get("test__engagement__product"): + self.form.fields["test__engagement__product"].queryset = get_authorized_products(Permissions.Product_View) + if self.form.fields.get("finding_group", None): + self.form.fields["finding_group"].queryset = get_authorized_finding_groups(Permissions.Finding_Group_View) + self.form.fields["reporter"].queryset = get_authorized_users(Permissions.Finding_View) + self.form.fields["reviewers"].queryset = self.form.fields["reporter"].queryset class AcceptedFindingFilter(FindingFilter): @@ -1860,8 +1860,8 @@ class AcceptedFindingFilter(FindingFilter): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.form.fields['risk_acceptance__owner'].queryset = get_authorized_users(Permissions.Finding_View) - self.form.fields['risk_acceptance'].queryset = get_authorized_risk_acceptances(Permissions.Risk_Acceptance) + self.form.fields["risk_acceptance__owner"].queryset = get_authorized_users(Permissions.Finding_View) + self.form.fields["risk_acceptance"].queryset = get_authorized_risk_acceptances(Permissions.Risk_Acceptance) class AcceptedFindingFilterWithoutObjectLookups(FindingFilterWithoutObjectLookups): @@ -1890,7 +1890,7 @@ class AcceptedFindingFilterWithoutObjectLookups(FindingFilterWithoutObjectLookup class SimilarFindingHelper(FilterSet): hash_code = MultipleChoiceFilter() - vulnerability_ids = CharFilter(method=custom_vulnerability_id_filter, label='Vulnerability Ids') + vulnerability_ids = CharFilter(method=custom_vulnerability_id_filter, label="Vulnerability Ids") def update_data(self, data: dict, *args: list, **kwargs: dict): # if filterset is bound, use initial values as defaults @@ -1900,20 +1900,20 @@ def update_data(self, data: dict, *args: list, **kwargs: dict): # get a mutable copy of the QueryDict data = data.copy() - data['vulnerability_ids'] = ','.join(self.finding.vulnerability_ids) - data['cwe'] = self.finding.cwe - data['file_path'] = self.finding.file_path - data['line'] = self.finding.line - data['unique_id_from_tool'] = self.finding.unique_id_from_tool - data['test__test_type'] = self.finding.test.test_type - data['test__engagement__product'] = self.finding.test.engagement.product - data['test__engagement__product__prod_type'] = self.finding.test.engagement.product.prod_type + data["vulnerability_ids"] = ",".join(self.finding.vulnerability_ids) + data["cwe"] = self.finding.cwe + data["file_path"] = self.finding.file_path + data["line"] = self.finding.line + data["unique_id_from_tool"] = self.finding.unique_id_from_tool + data["test__test_type"] = self.finding.test.test_type + data["test__engagement__product"] = self.finding.test.engagement.product + data["test__engagement__product__prod_type"] = self.finding.test.engagement.product.prod_type self.has_changed = False def set_hash_codes(self, *args: list, **kwargs: dict): if self.finding and self.finding.hash_code: - self.form.fields['hash_code'] = forms.MultipleChoiceField(choices=[(self.finding.hash_code, self.finding.hash_code[:24] + '...')], required=False, initial=[]) + self.form.fields["hash_code"] = forms.MultipleChoiceField(choices=[(self.finding.hash_code, self.finding.hash_code[:24] + "...")], required=False, initial=[]) def filter_queryset(self, *args: list, **kwargs: dict): queryset = super().filter_queryset(*args, **kwargs) @@ -1930,11 +1930,11 @@ class Meta(FindingFilter.Meta): def __init__(self, data=None, *args, **kwargs): self.user = None - if 'user' in kwargs: - self.user = kwargs.pop('user') + if "user" in kwargs: + self.user = kwargs.pop("user") self.finding = None - if 'finding' in kwargs: - self.finding = kwargs.pop('finding') + if "finding" in kwargs: + self.finding = kwargs.pop("finding") self.update_data(data, *args, **kwargs) super().__init__(data, *args, **kwargs) self.set_hash_codes(*args, **kwargs) @@ -1948,133 +1948,133 @@ class Meta(FindingFilterWithoutObjectLookups.Meta): def __init__(self, data=None, *args, **kwargs): self.user = None - if 'user' in kwargs: - self.user = kwargs.pop('user') + if "user" in kwargs: + self.user = kwargs.pop("user") self.finding = None - if 'finding' in kwargs: - self.finding = kwargs.pop('finding') + if "finding" in kwargs: + self.finding = kwargs.pop("finding") self.update_data(data, *args, **kwargs) super().__init__(data, *args, **kwargs) self.set_hash_codes(*args, **kwargs) class TemplateFindingFilter(DojoFilter): - title = CharFilter(lookup_expr='icontains') + title = CharFilter(lookup_expr="icontains") cwe = MultipleChoiceFilter(choices=[]) severity = MultipleChoiceFilter(choices=SEVERITY_CHOICES) tags = ModelMultipleChoiceFilter( - field_name='tags__name', - to_field_name='name', - queryset=Finding.tags.tag_model.objects.all().order_by('name'), + field_name="tags__name", + to_field_name="name", + queryset=Finding.tags.tag_model.objects.all().order_by("name"), # label='tags', # doesn't work with tagulous, need to set in __init__ below ) - tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Tag name contains') + tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag name contains") not_tags = ModelMultipleChoiceFilter( - field_name='tags__name', - to_field_name='name', + field_name="tags__name", + to_field_name="name", exclude=True, - queryset=Finding.tags.tag_model.objects.all().order_by('name'), + queryset=Finding.tags.tag_model.objects.all().order_by("name"), # label='tags', # doesn't work with tagulous, need to set in __init__ below ) - not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Not tag name contains', exclude=True) + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True) o = OrderingFilter( # tuple-mapping retains order fields=( - ('cwe', 'cwe'), - ('title', 'title'), - ('numerical_severity', 'numerical_severity'), + ("cwe", "cwe"), + ("title", "title"), + ("numerical_severity", "numerical_severity"), ), field_labels={ - 'numerical_severity': 'Severity', + "numerical_severity": "Severity", }, ) class Meta: model = Finding_Template - exclude = ['description', 'mitigation', 'impact', - 'references', 'numerical_severity'] + exclude = ["description", "mitigation", "impact", + "references", "numerical_severity"] not_test__tags = ModelMultipleChoiceFilter( - field_name='test__tags__name', - to_field_name='name', + field_name="test__tags__name", + to_field_name="name", exclude=True, - label='Test without tags', - queryset=Test.tags.tag_model.objects.all().order_by('name'), + label="Test without tags", + queryset=Test.tags.tag_model.objects.all().order_by("name"), # label='tags', # doesn't work with tagulous, need to set in __init__ below ) not_test__engagement__tags = ModelMultipleChoiceFilter( - field_name='test__engagement__tags__name', - to_field_name='name', + field_name="test__engagement__tags__name", + to_field_name="name", exclude=True, - label='Engagement without tags', - queryset=Engagement.tags.tag_model.objects.all().order_by('name'), + label="Engagement without tags", + queryset=Engagement.tags.tag_model.objects.all().order_by("name"), # label='tags', # doesn't work with tagulous, need to set in __init__ below ) not_test__engagement__product__tags = ModelMultipleChoiceFilter( - field_name='test__engagement__product__tags__name', - to_field_name='name', + field_name="test__engagement__product__tags__name", + to_field_name="name", exclude=True, - label='Product without tags', - queryset=Product.tags.tag_model.objects.all().order_by('name'), + label="Product without tags", + queryset=Product.tags.tag_model.objects.all().order_by("name"), # label='tags', # doesn't work with tagulous, need to set in __init__ below ) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.form.fields['cwe'].choices = cwe_options(self.queryset) + self.form.fields["cwe"].choices = cwe_options(self.queryset) class ApiTemplateFindingFilter(DojoFilter): - tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Tag name contains') - tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', - help_text='Comma separated list of exact tags') + tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Tag name contains") + tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in", + help_text="Comma separated list of exact tags") - not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True') - not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', - help_text='Comma separated list of exact tags not present on model', exclude='True') + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Not Tag name contains", exclude="True") + not_tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in", + help_text="Comma separated list of exact tags not present on model", exclude="True") o = OrderingFilter( # tuple-mapping retains order fields=( - ('title', 'title'), - ('cwe', 'cwe'), + ("title", "title"), + ("cwe", "cwe"), ), ) class Meta: model = Finding_Template - fields = ['id', 'title', 'cwe', 'severity', 'description', - 'mitigation'] + fields = ["id", "title", "cwe", "severity", "description", + "mitigation"] class MetricsFindingFilter(FindingFilter): - start_date = DateFilter(field_name='date', label='Start Date', lookup_expr=('gt')) - end_date = DateFilter(field_name='date', label='End Date', lookup_expr=('lt')) + start_date = DateFilter(field_name="date", label="Start Date", lookup_expr=("gt")) + end_date = DateFilter(field_name="date", label="End Date", lookup_expr=("lt")) date = MetricsDateRangeFilter() - vulnerability_id = CharFilter(method=vulnerability_id_filter, label='Vulnerability Id') + vulnerability_id = CharFilter(method=vulnerability_id_filter, label="Vulnerability Id") not_tags = ModelMultipleChoiceFilter( - field_name='tags__name', - to_field_name='name', + field_name="tags__name", + to_field_name="name", exclude=True, - queryset=Endpoint.tags.tag_model.objects.all().order_by('name'), + queryset=Endpoint.tags.tag_model.objects.all().order_by("name"), # label='tags', # doesn't work with tagulous, need to set in __init__ below ) - not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Not tag name contains', exclude=True) + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True) def __init__(self, *args, **kwargs): if args[0]: - if args[0].get('start_date', '') != '' or args[0].get('end_date', '') != '': + if args[0].get("start_date", "") != "" or args[0].get("end_date", "") != "": args[0]._mutable = True - args[0]['date'] = 8 + args[0]["date"] = 8 args[0]._mutable = False super().__init__(*args, **kwargs) @@ -2085,26 +2085,26 @@ class Meta(FindingFilter.Meta): class MetricsFindingFilterWithoutObjectLookups(FindingFilterWithoutObjectLookups): - start_date = DateFilter(field_name='date', label='Start Date', lookup_expr=('gt')) - end_date = DateFilter(field_name='date', label='End Date', lookup_expr=('lt')) + start_date = DateFilter(field_name="date", label="Start Date", lookup_expr=("gt")) + end_date = DateFilter(field_name="date", label="End Date", lookup_expr=("lt")) date = MetricsDateRangeFilter() - vulnerability_id = CharFilter(method=vulnerability_id_filter, label='Vulnerability Id') + vulnerability_id = CharFilter(method=vulnerability_id_filter, label="Vulnerability Id") not_tags = ModelMultipleChoiceFilter( - field_name='tags__name', - to_field_name='name', + field_name="tags__name", + to_field_name="name", exclude=True, - queryset=Endpoint.tags.tag_model.objects.all().order_by('name'), + queryset=Endpoint.tags.tag_model.objects.all().order_by("name"), # label='tags', # doesn't work with tagulous, need to set in __init__ below ) - not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Not tag name contains', exclude=True) + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True) def __init__(self, *args, **kwargs): if args[0]: - if args[0].get('start_date', '') != '' or args[0].get('end_date', '') != '': + if args[0].get("start_date", "") != "" or args[0].get("end_date", "") != "": args[0]._mutable = True - args[0]['date'] = 8 + args[0]["date"] = 8 args[0]._mutable = False super().__init__(*args, **kwargs) @@ -2134,60 +2134,60 @@ class MetricsEndpointFilter(MetricsEndpointFilterHelper): queryset=Engagement.objects.none(), label="Engagement") endpoint__tags = ModelMultipleChoiceFilter( - field_name='endpoint__tags__name', - to_field_name='name', - label='Endpoint tags', - queryset=Endpoint.tags.tag_model.objects.all().order_by('name')) + field_name="endpoint__tags__name", + to_field_name="name", + label="Endpoint tags", + queryset=Endpoint.tags.tag_model.objects.all().order_by("name")) finding__tags = ModelMultipleChoiceFilter( - field_name='finding__tags__name', - to_field_name='name', - label='Finding tags', - queryset=Finding.tags.tag_model.objects.all().order_by('name')) + field_name="finding__tags__name", + to_field_name="name", + label="Finding tags", + queryset=Finding.tags.tag_model.objects.all().order_by("name")) finding__test__tags = ModelMultipleChoiceFilter( - field_name='finding__test__tags__name', - to_field_name='name', - label='Test tags', - queryset=Test.tags.tag_model.objects.all().order_by('name')) + field_name="finding__test__tags__name", + to_field_name="name", + label="Test tags", + queryset=Test.tags.tag_model.objects.all().order_by("name")) finding__test__engagement__tags = ModelMultipleChoiceFilter( - field_name='finding__test__engagement__tags__name', - to_field_name='name', - label='Engagement tags', - queryset=Engagement.tags.tag_model.objects.all().order_by('name')) + field_name="finding__test__engagement__tags__name", + to_field_name="name", + label="Engagement tags", + queryset=Engagement.tags.tag_model.objects.all().order_by("name")) finding__test__engagement__product__tags = ModelMultipleChoiceFilter( - field_name='finding__test__engagement__product__tags__name', - to_field_name='name', - label='Product tags', - queryset=Product.tags.tag_model.objects.all().order_by('name')) + field_name="finding__test__engagement__product__tags__name", + to_field_name="name", + label="Product tags", + queryset=Product.tags.tag_model.objects.all().order_by("name")) not_endpoint__tags = ModelMultipleChoiceFilter( - field_name='endpoint__tags__name', - to_field_name='name', + field_name="endpoint__tags__name", + to_field_name="name", exclude=True, - label='Endpoint without tags', - queryset=Endpoint.tags.tag_model.objects.all().order_by('name')) + label="Endpoint without tags", + queryset=Endpoint.tags.tag_model.objects.all().order_by("name")) not_finding__tags = ModelMultipleChoiceFilter( - field_name='finding__tags__name', - to_field_name='name', + field_name="finding__tags__name", + to_field_name="name", exclude=True, - label='Finding without tags', - queryset=Finding.tags.tag_model.objects.all().order_by('name')) + label="Finding without tags", + queryset=Finding.tags.tag_model.objects.all().order_by("name")) not_finding__test__tags = ModelMultipleChoiceFilter( - field_name='finding__test__tags__name', - to_field_name='name', + field_name="finding__test__tags__name", + to_field_name="name", exclude=True, - label='Test without tags', - queryset=Test.tags.tag_model.objects.all().order_by('name')) + label="Test without tags", + queryset=Test.tags.tag_model.objects.all().order_by("name")) not_finding__test__engagement__tags = ModelMultipleChoiceFilter( - field_name='finding__test__engagement__tags__name', - to_field_name='name', + field_name="finding__test__engagement__tags__name", + to_field_name="name", exclude=True, - label='Engagement without tags', - queryset=Engagement.tags.tag_model.objects.all().order_by('name')) + label="Engagement without tags", + queryset=Engagement.tags.tag_model.objects.all().order_by("name")) not_finding__test__engagement__product__tags = ModelMultipleChoiceFilter( - field_name='finding__test__engagement__product__tags__name', - to_field_name='name', + field_name="finding__test__engagement__product__tags__name", + to_field_name="name", exclude=True, - label='Product without tags', - queryset=Product.tags.tag_model.objects.all().order_by('name')) + label="Product without tags", + queryset=Product.tags.tag_model.objects.all().order_by("name")) def __init__(self, *args, **kwargs): if args[0]: @@ -2370,21 +2370,21 @@ class Meta: class EndpointFilterHelper(FilterSet): - protocol = CharFilter(lookup_expr='icontains') - userinfo = CharFilter(lookup_expr='icontains') - host = CharFilter(lookup_expr='icontains') + protocol = CharFilter(lookup_expr="icontains") + userinfo = CharFilter(lookup_expr="icontains") + host = CharFilter(lookup_expr="icontains") port = NumberFilter() - path = CharFilter(lookup_expr='icontains') - query = CharFilter(lookup_expr='icontains') - fragment = CharFilter(lookup_expr='icontains') - tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Tag name contains') - not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Not tag name contains', exclude=True) - has_tags = BooleanFilter(field_name='tags', lookup_expr='isnull', exclude=True, label='Has tags') + path = CharFilter(lookup_expr="icontains") + query = CharFilter(lookup_expr="icontains") + fragment = CharFilter(lookup_expr="icontains") + tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag name contains") + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True) + has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags") o = OrderingFilter( # tuple-mapping retains order fields=( - ('product', 'product'), - ('host', 'host'), + ("product", "product"), + ("host", "host"), ), ) @@ -2394,67 +2394,67 @@ class EndpointFilter(EndpointFilterHelper, DojoFilter): queryset=Product.objects.none(), label="Product") tags = ModelMultipleChoiceFilter( - field_name='tags__name', - to_field_name='name', + field_name="tags__name", + to_field_name="name", label="Endpoint Tags", - queryset=Endpoint.tags.tag_model.objects.all().order_by('name')) + queryset=Endpoint.tags.tag_model.objects.all().order_by("name")) findings__tags = ModelMultipleChoiceFilter( - field_name='findings__tags__name', - to_field_name='name', + field_name="findings__tags__name", + to_field_name="name", label="Finding Tags", - queryset=Finding.tags.tag_model.objects.all().order_by('name')) + queryset=Finding.tags.tag_model.objects.all().order_by("name")) findings__test__tags = ModelMultipleChoiceFilter( - field_name='findings__test__tags__name', - to_field_name='name', + field_name="findings__test__tags__name", + to_field_name="name", label="Test Tags", - queryset=Test.tags.tag_model.objects.all().order_by('name')) + queryset=Test.tags.tag_model.objects.all().order_by("name")) findings__test__engagement__tags = ModelMultipleChoiceFilter( - field_name='findings__test__engagement__tags__name', - to_field_name='name', + field_name="findings__test__engagement__tags__name", + to_field_name="name", label="Engagement Tags", - queryset=Engagement.tags.tag_model.objects.all().order_by('name')) + queryset=Engagement.tags.tag_model.objects.all().order_by("name")) findings__test__engagement__product__tags = ModelMultipleChoiceFilter( - field_name='findings__test__engagement__product__tags__name', - to_field_name='name', + field_name="findings__test__engagement__product__tags__name", + to_field_name="name", label="Product Tags", - queryset=Product.tags.tag_model.objects.all().order_by('name')) + queryset=Product.tags.tag_model.objects.all().order_by("name")) not_tags = ModelMultipleChoiceFilter( - field_name='tags__name', - to_field_name='name', + field_name="tags__name", + to_field_name="name", label="Not Endpoint Tags", exclude=True, - queryset=Endpoint.tags.tag_model.objects.all().order_by('name')) + queryset=Endpoint.tags.tag_model.objects.all().order_by("name")) not_findings__tags = ModelMultipleChoiceFilter( - field_name='findings__tags__name', - to_field_name='name', + field_name="findings__tags__name", + to_field_name="name", label="Not Finding Tags", exclude=True, - queryset=Finding.tags.tag_model.objects.all().order_by('name')) + queryset=Finding.tags.tag_model.objects.all().order_by("name")) not_findings__test__tags = ModelMultipleChoiceFilter( - field_name='findings__test__tags__name', - to_field_name='name', + field_name="findings__test__tags__name", + to_field_name="name", label="Not Test Tags", exclude=True, - queryset=Test.tags.tag_model.objects.all().order_by('name')) + queryset=Test.tags.tag_model.objects.all().order_by("name")) not_findings__test__engagement__tags = ModelMultipleChoiceFilter( - field_name='findings__test__engagement__tags__name', - to_field_name='name', + field_name="findings__test__engagement__tags__name", + to_field_name="name", label="Not Engagement Tags", exclude=True, - queryset=Engagement.tags.tag_model.objects.all().order_by('name')) + queryset=Engagement.tags.tag_model.objects.all().order_by("name")) not_findings__test__engagement__product__tags = ModelMultipleChoiceFilter( - field_name='findings__test__engagement__product__tags__name', - to_field_name='name', + field_name="findings__test__engagement__product__tags__name", + to_field_name="name", label="Not Product Tags", exclude=True, - queryset=Product.tags.tag_model.objects.all().order_by('name')) + queryset=Product.tags.tag_model.objects.all().order_by("name")) def __init__(self, *args, **kwargs): self.user = None - if 'user' in kwargs: - self.user = kwargs.pop('user') + if "user" in kwargs: + self.user = kwargs.pop("user") super().__init__(*args, **kwargs) - self.form.fields['product'].queryset = get_authorized_products(Permissions.Product_View) + self.form.fields["product"].queryset = get_authorized_products(Permissions.Product_View) @property def qs(self): @@ -2593,8 +2593,8 @@ class EndpointFilterWithoutObjectLookups(EndpointFilterHelper): def __init__(self, *args, **kwargs): self.user = None - if 'user' in kwargs: - self.user = kwargs.pop('user') + if "user" in kwargs: + self.user = kwargs.pop("user") super().__init__(*args, **kwargs) @property @@ -2608,67 +2608,67 @@ class Meta: class ApiEndpointFilter(DojoFilter): - tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Tag name contains') - tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', - help_text='Comma separated list of exact tags') + tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Tag name contains") + tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in", + help_text="Comma separated list of exact tags") - not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True') - not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', - help_text='Comma separated list of exact tags not present on model', exclude='True') - has_tags = BooleanFilter(field_name='tags', lookup_expr='isnull', exclude=True, label='Has tags') + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Not Tag name contains", exclude="True") + not_tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in", + help_text="Comma separated list of exact tags not present on model", exclude="True") + has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags") o = OrderingFilter( # tuple-mapping retains order fields=( - ('host', 'host'), - ('product', 'product'), + ("host", "host"), + ("product", "product"), ), ) class Meta: model = Endpoint - fields = ['id', 'protocol', 'userinfo', 'host', 'port', 'path', 'query', 'fragment', 'product'] + fields = ["id", "protocol", "userinfo", "host", "port", "path", "query", "fragment", "product"] class ApiRiskAcceptanceFilter(DojoFilter): o = OrderingFilter( # tuple-mapping retains order fields=( - ('name', 'name'), + ("name", "name"), ), ) class Meta: model = Risk_Acceptance fields = [ - 'name', 'accepted_findings', 'recommendation', 'recommendation_details', - 'decision', 'decision_details', 'accepted_by', 'owner', 'expiration_date', - 'expiration_date_warned', 'expiration_date_handled', 'reactivate_expired', - 'restart_sla_expired', 'notes', + "name", "accepted_findings", "recommendation", "recommendation_details", + "decision", "decision_details", "accepted_by", "owner", "expiration_date", + "expiration_date_warned", "expiration_date_handled", "reactivate_expired", + "restart_sla_expired", "notes", ] class EngagementTestFilterHelper(FilterSet): - version = CharFilter(lookup_expr='icontains', label='Version') + version = CharFilter(lookup_expr="icontains", label="Version") if settings.TRACK_IMPORT_HISTORY: - test_import__version = CharFilter(field_name='test_import__version', lookup_expr='icontains', label='Reimported Version') + test_import__version = CharFilter(field_name="test_import__version", lookup_expr="icontains", label="Reimported Version") target_start = DateRangeFilter() target_end = DateRangeFilter() - tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Tag name contains') - not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Not tag name contains', exclude=True) - has_tags = BooleanFilter(field_name='tags', lookup_expr='isnull', exclude=True, label='Has tags') + tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag name contains") + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True) + has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags") o = OrderingFilter( # tuple-mapping retains order fields=( - ('title', 'title'), - ('version', 'version'), - ('target_start', 'target_start'), - ('target_end', 'target_end'), - ('lead', 'lead'), - ('api_scan_configuration', 'api_scan_configuration'), + ("title", "title"), + ("version", "version"), + ("target_start", "target_start"), + ("target_end", "target_end"), + ("lead", "lead"), + ("api_scan_configuration", "api_scan_configuration"), ), field_labels={ - 'name': 'Test Name', + "name": "Test Name", }, ) @@ -2697,11 +2697,11 @@ class Meta: ] def __init__(self, *args, **kwargs): - self.engagement = kwargs.pop('engagement') + self.engagement = kwargs.pop("engagement") super(DojoFilter, self).__init__(*args, **kwargs) - self.form.fields['test_type'].queryset = Test_Type.objects.filter(test__engagement=self.engagement).distinct().order_by('name') - self.form.fields['api_scan_configuration'].queryset = Product_API_Scan_Configuration.objects.filter(product=self.engagement.product).distinct() - self.form.fields['lead'].queryset = get_authorized_users(Permissions.Product_Type_View) \ + self.form.fields["test_type"].queryset = Test_Type.objects.filter(test__engagement=self.engagement).distinct().order_by("name") + self.form.fields["api_scan_configuration"].queryset = Product_API_Scan_Configuration.objects.filter(product=self.engagement.product).distinct() + self.form.fields["lead"].queryset = get_authorized_users(Permissions.Product_Type_View) \ .filter(test__lead__isnull=False).distinct() @@ -2757,123 +2757,123 @@ class Meta: ] def __init__(self, *args, **kwargs): - self.engagement = kwargs.pop('engagement') + self.engagement = kwargs.pop("engagement") super().__init__(*args, **kwargs) - self.form.fields['test_type'].queryset = Test_Type.objects.filter(test__engagement=self.engagement).distinct().order_by('name') + self.form.fields["test_type"].queryset = Test_Type.objects.filter(test__engagement=self.engagement).distinct().order_by("name") class ApiTestFilter(DojoFilter): - tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Tag name contains') - tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', - help_text='Comma separated list of exact tags') - engagement__tags = CharFieldInFilter(field_name='engagement__tags__name', lookup_expr='in', - help_text='Comma separated list of exact tags present on engagement') - engagement__product__tags = CharFieldInFilter(field_name='engagement__product__tags__name', - lookup_expr='in', - help_text='Comma separated list of exact tags present on product') - - not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True') - not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', - help_text='Comma separated list of exact tags not present on model', exclude='True') - not_engagement__tags = CharFieldInFilter(field_name='engagement__tags__name', lookup_expr='in', - help_text='Comma separated list of exact tags not present on engagement', - exclude='True') - not_engagement__product__tags = CharFieldInFilter(field_name='engagement__product__tags__name', - lookup_expr='in', - help_text='Comma separated list of exact tags not present on product', - exclude='True') - has_tags = BooleanFilter(field_name='tags', lookup_expr='isnull', exclude=True, label='Has tags') + tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Tag name contains") + tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in", + help_text="Comma separated list of exact tags") + engagement__tags = CharFieldInFilter(field_name="engagement__tags__name", lookup_expr="in", + help_text="Comma separated list of exact tags present on engagement") + engagement__product__tags = CharFieldInFilter(field_name="engagement__product__tags__name", + lookup_expr="in", + help_text="Comma separated list of exact tags present on product") + + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Not Tag name contains", exclude="True") + not_tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in", + help_text="Comma separated list of exact tags not present on model", exclude="True") + not_engagement__tags = CharFieldInFilter(field_name="engagement__tags__name", lookup_expr="in", + help_text="Comma separated list of exact tags not present on engagement", + exclude="True") + not_engagement__product__tags = CharFieldInFilter(field_name="engagement__product__tags__name", + lookup_expr="in", + help_text="Comma separated list of exact tags not present on product", + exclude="True") + has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags") o = OrderingFilter( # tuple-mapping retains order fields=( - ('title', 'title'), - ('version', 'version'), - ('target_start', 'target_start'), - ('target_end', 'target_end'), - ('test_type', 'test_type'), - ('lead', 'lead'), - ('version', 'version'), - ('branch_tag', 'branch_tag'), - ('build_id', 'build_id'), - ('commit_hash', 'commit_hash'), - ('api_scan_configuration', 'api_scan_configuration'), - ('engagement', 'engagement'), - ('created', 'created'), - ('updated', 'updated'), + ("title", "title"), + ("version", "version"), + ("target_start", "target_start"), + ("target_end", "target_end"), + ("test_type", "test_type"), + ("lead", "lead"), + ("version", "version"), + ("branch_tag", "branch_tag"), + ("build_id", "build_id"), + ("commit_hash", "commit_hash"), + ("api_scan_configuration", "api_scan_configuration"), + ("engagement", "engagement"), + ("created", "created"), + ("updated", "updated"), ), field_labels={ - 'name': 'Test Name', + "name": "Test Name", }, ) class Meta: model = Test - fields = ['id', 'title', 'test_type', 'target_start', - 'target_end', 'notes', 'percent_complete', - 'actual_time', 'engagement', 'version', - 'branch_tag', 'build_id', 'commit_hash', - 'api_scan_configuration', 'scan_type'] + fields = ["id", "title", "test_type", "target_start", + "target_end", "notes", "percent_complete", + "actual_time", "engagement", "version", + "branch_tag", "build_id", "commit_hash", + "api_scan_configuration", "scan_type"] class ApiAppAnalysisFilter(DojoFilter): - tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Tag name contains') - tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', - help_text='Comma separated list of exact tags') + tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Tag name contains") + tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in", + help_text="Comma separated list of exact tags") - not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', help_text='Not Tag name contains', exclude='True') - not_tags = CharFieldInFilter(field_name='tags__name', lookup_expr='in', - help_text='Comma separated list of exact tags not present on model', exclude='True') + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Not Tag name contains", exclude="True") + not_tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in", + help_text="Comma separated list of exact tags not present on model", exclude="True") class Meta: model = App_Analysis - fields = ['product', 'name', 'user', 'version'] + fields = ["product", "name", "user", "version"] class ApiCredentialsFilter(DojoFilter): class Meta: model = Cred_Mapping - fields = '__all__' + fields = "__all__" class EndpointReportFilter(DojoFilter): - protocol = CharFilter(lookup_expr='icontains') - userinfo = CharFilter(lookup_expr='icontains') - host = CharFilter(lookup_expr='icontains') + protocol = CharFilter(lookup_expr="icontains") + userinfo = CharFilter(lookup_expr="icontains") + host = CharFilter(lookup_expr="icontains") port = NumberFilter() - path = CharFilter(lookup_expr='icontains') - query = CharFilter(lookup_expr='icontains') - fragment = CharFilter(lookup_expr='icontains') - finding__severity = MultipleChoiceFilter(choices=SEVERITY_CHOICES, label='Severity') - finding__mitigated = ReportBooleanFilter(label='Finding Mitigated') + path = CharFilter(lookup_expr="icontains") + query = CharFilter(lookup_expr="icontains") + fragment = CharFilter(lookup_expr="icontains") + finding__severity = MultipleChoiceFilter(choices=SEVERITY_CHOICES, label="Severity") + finding__mitigated = ReportBooleanFilter(label="Finding Mitigated") tags = ModelMultipleChoiceFilter( - field_name='tags__name', - to_field_name='name', - queryset=Endpoint.tags.tag_model.objects.all().order_by('name'), + field_name="tags__name", + to_field_name="name", + queryset=Endpoint.tags.tag_model.objects.all().order_by("name"), # label='tags', # doesn't work with tagulous, need to set in __init__ below ) - tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Tag name contains') + tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag name contains") not_tags = ModelMultipleChoiceFilter( - field_name='tags__name', - to_field_name='name', + field_name="tags__name", + to_field_name="name", exclude=True, - queryset=Endpoint.tags.tag_model.objects.all().order_by('name'), + queryset=Endpoint.tags.tag_model.objects.all().order_by("name"), # label='tags', # doesn't work with tagulous, need to set in __init__ below ) - not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Not tag name contains', exclude=True) + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True) class Meta: model = Endpoint - exclude = ['product'] + exclude = ["product"] class ReportFindingFilterHelper(FilterSet): - title = CharFilter(lookup_expr='icontains', label='Name') - date = DateFromToRangeFilter(field_name='date', label="Date Discovered") + title = CharFilter(lookup_expr="icontains", label="Name") + date = DateFromToRangeFilter(field_name="date", label="Date Discovered") severity = MultipleChoiceFilter(choices=SEVERITY_CHOICES) active = ReportBooleanFilter() is_mitigated = ReportBooleanFilter() @@ -2884,30 +2884,30 @@ class ReportFindingFilterHelper(FilterSet): duplicate = ReportBooleanFilter() out_of_scope = ReportBooleanFilter() outside_of_sla = FindingSLAFilter(label="Outside of SLA") - file_path = CharFilter(lookup_expr='icontains') + file_path = CharFilter(lookup_expr="icontains") class Meta: model = Finding # exclude sonarqube issue as by default it will show all without checking permissions - exclude = ['date', 'cwe', 'url', 'description', 'mitigation', 'impact', - 'references', 'sonarqube_issue', 'duplicate_finding', - 'thread_id', 'notes', 'inherited_tags', 'endpoints', - 'numerical_severity', 'reporter', 'last_reviewed', - 'jira_creation', 'jira_change', 'files'] + exclude = ["date", "cwe", "url", "description", "mitigation", "impact", + "references", "sonarqube_issue", "duplicate_finding", + "thread_id", "notes", "inherited_tags", "endpoints", + "numerical_severity", "reporter", "last_reviewed", + "jira_creation", "jira_change", "files"] def manage_kwargs(self, kwargs): self.prod_type = None self.product = None self.engagement = None self.test = None - if 'prod_type' in kwargs: - self.prod_type = kwargs.pop('prod_type') - if 'product' in kwargs: - self.product = kwargs.pop('product') - if 'engagement' in kwargs: - self.engagement = kwargs.pop('engagement') - if 'test' in kwargs: - self.test = kwargs.pop('test') + if "prod_type" in kwargs: + self.prod_type = kwargs.pop("prod_type") + if "product" in kwargs: + self.product = kwargs.pop("product") + if "engagement" in kwargs: + self.engagement = kwargs.pop("engagement") + if "test" in kwargs: + self.test = kwargs.pop("test") @property def qs(self): @@ -2931,36 +2931,36 @@ def __init__(self, *args, **kwargs): # duplicate_finding queryset needs to restricted in line with permissions # and inline with report scope to avoid a dropdown with 100K entries - duplicate_finding_query_set = self.form.fields['duplicate_finding'].queryset + duplicate_finding_query_set = self.form.fields["duplicate_finding"].queryset duplicate_finding_query_set = get_authorized_findings(Permissions.Finding_View, duplicate_finding_query_set) if self.test: duplicate_finding_query_set = duplicate_finding_query_set.filter(test=self.test) - del self.form.fields['test__tags'] - del self.form.fields['test__engagement__tags'] - del self.form.fields['test__engagement__product__tags'] + del self.form.fields["test__tags"] + del self.form.fields["test__engagement__tags"] + del self.form.fields["test__engagement__product__tags"] if self.engagement: duplicate_finding_query_set = duplicate_finding_query_set.filter(test__engagement=self.engagement) - del self.form.fields['test__engagement__tags'] - del self.form.fields['test__engagement__product__tags'] + del self.form.fields["test__engagement__tags"] + del self.form.fields["test__engagement__product__tags"] elif self.product: duplicate_finding_query_set = duplicate_finding_query_set.filter(test__engagement__product=self.product) - del self.form.fields['test__engagement__product'] - del self.form.fields['test__engagement__product__tags'] + del self.form.fields["test__engagement__product"] + del self.form.fields["test__engagement__product__tags"] elif self.prod_type: duplicate_finding_query_set = duplicate_finding_query_set.filter(test__engagement__product__prod_type=self.prod_type) - del self.form.fields['test__engagement__product__prod_type'] + del self.form.fields["test__engagement__product__prod_type"] - self.form.fields['duplicate_finding'].queryset = duplicate_finding_query_set + self.form.fields["duplicate_finding"].queryset = duplicate_finding_query_set - if 'test__engagement__product__prod_type' in self.form.fields: + if "test__engagement__product__prod_type" in self.form.fields: self.form.fields[ - 'test__engagement__product__prod_type'].queryset = get_authorized_product_types(Permissions.Product_Type_View) - if 'test__engagement__product' in self.form.fields: + "test__engagement__product__prod_type"].queryset = get_authorized_product_types(Permissions.Product_Type_View) + if "test__engagement__product" in self.form.fields: self.form.fields[ - 'test__engagement__product'].queryset = get_authorized_products(Permissions.Product_View) - if 'test__engagement' in self.form.fields: - self.form.fields['test__engagement'].queryset = get_authorized_engagements(Permissions.Engagement_View) + "test__engagement__product"].queryset = get_authorized_products(Permissions.Product_View) + if "test__engagement" in self.form.fields: + self.form.fields["test__engagement"].queryset = get_authorized_engagements(Permissions.Engagement_View) class ReportFindingFilterWithoutObjectLookups(ReportFindingFilterHelper, FindingTagStringFilter): @@ -3120,62 +3120,62 @@ def __init__(self, *args, **kwargs): class UserFilter(DojoFilter): - first_name = CharFilter(lookup_expr='icontains') - last_name = CharFilter(lookup_expr='icontains') - username = CharFilter(lookup_expr='icontains') - email = CharFilter(lookup_expr='icontains') + first_name = CharFilter(lookup_expr="icontains") + last_name = CharFilter(lookup_expr="icontains") + username = CharFilter(lookup_expr="icontains") + email = CharFilter(lookup_expr="icontains") o = OrderingFilter( # tuple-mapping retains order fields=( - ('username', 'username'), - ('last_name', 'last_name'), - ('first_name', 'first_name'), - ('email', 'email'), - ('is_active', 'is_active'), - ('is_superuser', 'is_superuser'), - ('date_joined', 'date_joined'), - ('last_login', 'last_login'), + ("username", "username"), + ("last_name", "last_name"), + ("first_name", "first_name"), + ("email", "email"), + ("is_active", "is_active"), + ("is_superuser", "is_superuser"), + ("date_joined", "date_joined"), + ("last_login", "last_login"), ), field_labels={ - 'username': 'User Name', - 'is_active': 'Active', - 'is_superuser': 'Superuser', + "username": "User Name", + "is_active": "Active", + "is_superuser": "Superuser", }, ) class Meta: model = Dojo_User - fields = ['is_superuser', 'is_active', 'first_name', 'last_name', 'username', 'email'] + fields = ["is_superuser", "is_active", "first_name", "last_name", "username", "email"] class GroupFilter(DojoFilter): - name = CharFilter(lookup_expr='icontains') - description = CharFilter(lookup_expr='icontains') + name = CharFilter(lookup_expr="icontains") + description = CharFilter(lookup_expr="icontains") class Meta: model = Dojo_Group - fields = ['name', 'description'] - exclude = ['users'] + fields = ["name", "description"] + exclude = ["users"] class TestImportFilter(DojoFilter): - version = CharFilter(field_name='version', lookup_expr='icontains') - version_exact = CharFilter(field_name='version', lookup_expr='iexact', label='Version Exact') - branch_tag = CharFilter(lookup_expr='icontains', label='Branch/Tag') - build_id = CharFilter(lookup_expr='icontains', label="Build ID") - commit_hash = CharFilter(lookup_expr='icontains', label="Commit hash") + version = CharFilter(field_name="version", lookup_expr="icontains") + version_exact = CharFilter(field_name="version", lookup_expr="iexact", label="Version Exact") + branch_tag = CharFilter(lookup_expr="icontains", label="Branch/Tag") + build_id = CharFilter(lookup_expr="icontains", label="Build ID") + commit_hash = CharFilter(lookup_expr="icontains", label="Commit hash") - findings_affected = BooleanFilter(field_name='findings_affected', lookup_expr='isnull', exclude=True, label='Findings affected') + findings_affected = BooleanFilter(field_name="findings_affected", lookup_expr="isnull", exclude=True, label="Findings affected") o = OrderingFilter( # tuple-mapping retains order fields=( - ('date', 'date'), - ('version', 'version'), - ('branch_tag', 'branch_tag'), - ('build_id', 'build_id'), - ('commit_hash', 'commit_hash'), + ("date", "date"), + ("version", "version"), + ("branch_tag", "branch_tag"), + ("build_id", "build_id"), + ("commit_hash", "commit_hash"), ), ) @@ -3190,7 +3190,7 @@ class TestImportFindingActionFilter(DojoFilter): o = OrderingFilter( # tuple-mapping retains order fields=( - ('action', 'action'), + ("action", "action"), ), ) @@ -3208,87 +3208,87 @@ class LogEntryFilter(DojoFilter): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.form.fields['actor'].queryset = get_authorized_users(Permissions.Product_View) + self.form.fields["actor"].queryset = get_authorized_users(Permissions.Product_View) class Meta: model = LogEntry - exclude = ['content_type', 'object_pk', 'object_id', 'object_repr', - 'changes', 'additional_data', 'remote_addr'] + exclude = ["content_type", "object_pk", "object_id", "object_repr", + "changes", "additional_data", "remote_addr"] filter_overrides = { JSONField: { - 'filter_class': CharFilter, - 'extra': lambda f: { - 'lookup_expr': 'icontains', + "filter_class": CharFilter, + "extra": lambda f: { + "lookup_expr": "icontains", }, }, } class ProductTypeFilter(DojoFilter): - name = CharFilter(lookup_expr='icontains') + name = CharFilter(lookup_expr="icontains") o = OrderingFilter( # tuple-mapping retains order fields=( - ('name', 'name'), + ("name", "name"), ), ) class Meta: model = Product_Type exclude = [] - include = ('name',) + include = ("name",) class TestTypeFilter(DojoFilter): - name = CharFilter(lookup_expr='icontains') + name = CharFilter(lookup_expr="icontains") o = OrderingFilter( # tuple-mapping retains order fields=( - ('name', 'name'), + ("name", "name"), ), ) class Meta: model = Test_Type exclude = [] - include = ('name',) + include = ("name",) class DevelopmentEnvironmentFilter(DojoFilter): - name = CharFilter(lookup_expr='icontains') + name = CharFilter(lookup_expr="icontains") o = OrderingFilter( # tuple-mapping retains order fields=( - ('name', 'name'), + ("name", "name"), ), ) class Meta: model = Development_Environment exclude = [] - include = ('name',) + include = ("name",) class NoteTypesFilter(DojoFilter): - name = CharFilter(lookup_expr='icontains') + name = CharFilter(lookup_expr="icontains") o = OrderingFilter( # tuple-mapping retains order fields=( - ('name', 'name'), - ('description', 'description'), - ('is_single', 'is_single'), - ('is_mandatory', 'is_mandatory'), + ("name", "name"), + ("description", "description"), + ("is_single", "is_single"), + ("is_mandatory", "is_mandatory"), ), ) class Meta: model = Note_Type exclude = [] - include = ('name', 'is_single', 'description') + include = ("name", "is_single", "description") # ============================== # Defect Dojo Engaegment Surveys @@ -3296,13 +3296,13 @@ class Meta: class QuestionnaireFilter(FilterSet): - name = CharFilter(lookup_expr='icontains') - description = CharFilter(lookup_expr='icontains') + name = CharFilter(lookup_expr="icontains") + description = CharFilter(lookup_expr="icontains") active = BooleanFilter() class Meta: model = Engagement_Survey - exclude = ['questions'] + exclude = ["questions"] survey_set = FilterSet @@ -3318,13 +3318,13 @@ def choice_question(self, qs, name): return qs.filter(polymorphic_ctype=ContentType.objects.get_for_model(ChoiceQuestion)) options = { - None: (_('Any'), any), - 1: (_('Text Question'), text_question), - 2: (_('Choice Question'), choice_question), + None: (_("Any"), any), + 1: (_("Text Question"), text_question), + 2: (_("Choice Question"), choice_question), } def __init__(self, *args, **kwargs): - kwargs['choices'] = [ + kwargs["choices"] = [ (key, value[0]) for key, value in six.iteritems(self.options)] super().__init__(*args, **kwargs) @@ -3338,11 +3338,11 @@ def filter(self, qs, value): with warnings.catch_warnings(action="ignore", category=ManagerInheritanceWarning): class QuestionFilter(FilterSet): - text = CharFilter(lookup_expr='icontains') + text = CharFilter(lookup_expr="icontains") type = QuestionTypeFilter() class Meta: model = Question - exclude = ['polymorphic_ctype', 'created', 'modified', 'order'] + exclude = ["polymorphic_ctype", "created", "modified", "order"] question_set = FilterSet diff --git a/dojo/finding/helper.py b/dojo/finding/helper.py index 571e640790..d3ed4e3b70 100644 --- a/dojo/finding/helper.py +++ b/dojo/finding/helper.py @@ -51,7 +51,7 @@ def pre_save_finding_status_change(sender, instance, changed_fields=None, **kwar # logger.debug('ignoring save of finding without id') # return - logger.debug('%i: changed status fields pre_save: %s', instance.id or 0, changed_fields) + logger.debug("%i: changed status fields pre_save: %s", instance.id or 0, changed_fields) for field, (old, new) in changed_fields.items(): logger.debug("%i: %s changed from %s to %s" % (instance.id or 0, field, old, new)) @@ -82,9 +82,9 @@ def pre_save_finding_status_change(sender, instance, changed_fields=None, **kwar def update_finding_status(new_state_finding, user, changed_fields=None): now = timezone.now() - logger.debug('changed fields: %s', changed_fields) + logger.debug("changed fields: %s", changed_fields) - is_new_finding = not changed_fields or (changed_fields and len(changed_fields) == 1 and 'id' in changed_fields) + is_new_finding = not changed_fields or (changed_fields and len(changed_fields) == 1 and "id" in changed_fields) # activated # reactivated @@ -94,11 +94,11 @@ def update_finding_status(new_state_finding, user, changed_fields=None): # marked as duplicate # marked as original - if is_new_finding or 'is_mitigated' in changed_fields: + if is_new_finding or "is_mitigated" in changed_fields: # finding is being mitigated if new_state_finding.is_mitigated: # when mitigating a finding, the meta fields can only be editted if allowed - logger.debug('finding being mitigated, set mitigated and mitigated_by fields') + logger.debug("finding being mitigated, set mitigated and mitigated_by fields") if can_edit_mitigated_data(user): # only set if it was not already set by user @@ -117,7 +117,7 @@ def update_finding_status(new_state_finding, user, changed_fields=None): new_state_finding.mitigated = new_state_finding.mitigated or now new_state_finding.mitigated_by = new_state_finding.mitigated_by or user - if is_new_finding or 'active' in changed_fields: + if is_new_finding or "active" in changed_fields: # finding is being (re)activated if new_state_finding.active: new_state_finding.false_p = False @@ -129,10 +129,10 @@ def update_finding_status(new_state_finding, user, changed_fields=None): # finding is being deactivated pass - if is_new_finding or 'verified' in changed_fields: + if is_new_finding or "verified" in changed_fields: pass - if is_new_finding or 'false_p' in changed_fields or 'out_of_scope' in changed_fields: + if is_new_finding or "false_p" in changed_fields or "out_of_scope" in changed_fields: # existing behaviour is that false_p or out_of_scope implies mitigated if new_state_finding.false_p or new_state_finding.out_of_scope: new_state_finding.mitigated = new_state_finding.mitigated or now @@ -154,12 +154,12 @@ def can_edit_mitigated_data(user): def create_finding_group(finds, finding_group_name): - logger.debug('creating finding_group_create') + logger.debug("creating finding_group_create") if not finds or len(finds) == 0: - msg = 'cannot create empty Finding Group' + msg = "cannot create empty Finding Group" raise ValueError(msg) - finding_group_name_dummy = 'bulk group ' + strftime("%a, %d %b %Y %X", timezone.now().timetuple()) + finding_group_name_dummy = "bulk group " + strftime("%a, %d %b %Y %X", timezone.now().timetuple()) finding_group = Finding_Group(test=finds[0].test) finding_group.creator = get_current_user() @@ -192,7 +192,7 @@ def add_to_finding_group(finding_group, finds): # Now update the JIRA to add the finding to the finding group if finding_group.has_jira_issue and jira_helper.get_jira_instance(finding_group).finding_jira_sync: - logger.debug('pushing to jira from finding.finding_bulk_update_all()') + logger.debug("pushing to jira from finding.finding_bulk_update_all()") jira_helper.push_to_jira(finding_group) added = len(available_findings) @@ -219,7 +219,7 @@ def remove_from_finding_group(finds): # Now update the JIRA to remove the finding from the finding group for group in affected_groups: if group.has_jira_issue and jira_helper.get_jira_instance(group).finding_jira_sync: - logger.debug('pushing to jira from finding.finding_bulk_update_all()') + logger.debug("pushing to jira from finding.finding_bulk_update_all()") jira_helper.push_to_jira(group) return affected_groups, removed, skipped @@ -230,36 +230,36 @@ def update_finding_group(finding, finding_group): if finding_group is not None: if finding_group != finding.finding_group: if finding.finding_group: - logger.debug('removing finding %d from finding_group %s', finding.id, finding.finding_group) + logger.debug("removing finding %d from finding_group %s", finding.id, finding.finding_group) finding.finding_group.findings.remove(finding) - logger.debug('adding finding %d to finding_group %s', finding.id, finding_group) + logger.debug("adding finding %d to finding_group %s", finding.id, finding_group) finding_group.findings.add(finding) else: if finding.finding_group: - logger.debug('removing finding %d from finding_group %s', finding.id, finding.finding_group) + logger.debug("removing finding %d from finding_group %s", finding.id, finding.finding_group) finding.finding_group.findings.remove(finding) def get_group_by_group_name(finding, finding_group_by_option): group_name = None - if finding_group_by_option == 'component_name': + if finding_group_by_option == "component_name": group_name = finding.component_name - elif finding_group_by_option == 'component_name+component_version': + elif finding_group_by_option == "component_name+component_version": if finding.component_name or finding.component_version: - group_name = '{}:{}'.format((finding.component_name if finding.component_name else 'None'), - (finding.component_version if finding.component_version else 'None')) - elif finding_group_by_option == 'file_path': + group_name = "{}:{}".format((finding.component_name if finding.component_name else "None"), + (finding.component_version if finding.component_version else "None")) + elif finding_group_by_option == "file_path": if finding.file_path: - group_name = f'Filepath {finding.file_path}' - elif finding_group_by_option == 'finding_title': + group_name = f"Filepath {finding.file_path}" + elif finding_group_by_option == "finding_title": group_name = finding.title else: msg = f"Invalid group_by option {finding_group_by_option}" raise ValueError(msg) if group_name: - return f'Findings in: {group_name}' + return f"Findings in: {group_name}" return group_name @@ -296,7 +296,7 @@ def group_findings_by(finds, finding_group_by_option): # Now update the JIRA to add the finding to the finding group for group in affected_groups: if group.has_jira_issue and jira_helper.get_jira_instance(group).finding_jira_sync: - logger.debug('pushing to jira from finding.finding_bulk_update_all()') + logger.debug("pushing to jira from finding.finding_bulk_update_all()") jira_helper.push_to_jira(group) return affected_groups, grouped, skipped, groups_created @@ -306,14 +306,14 @@ def add_findings_to_auto_group(name, findings, group_by, create_finding_groups_f if name is not None and findings is not None and len(findings) > 0: creator = get_current_user() if not creator: - creator = kwargs.get('async_user', None) + creator = kwargs.get("async_user", None) test = findings[0].test if create_finding_groups_for_all_findings or len(findings) > 1: # Only create a finding group if we have more than one finding for a given finding group, unless configured otherwise finding_group, created = Finding_Group.objects.get_or_create(test=test, creator=creator, name=name) if created: - logger.debug('Created Finding Group %d:%s for test %d:%s', finding_group.id, finding_group, test.id, test) + logger.debug("Created Finding Group %d:%s for test %d:%s", finding_group.id, finding_group, test.id, test) # See if we have old findings in the same test that were created without a finding group # that should be added to this new group old_findings = Finding.objects.filter(test=test) @@ -385,7 +385,7 @@ def post_process_finding_save(finding, dedupe_option=True, rules_option=True, pr # Adding a snippet here for push to JIRA so that it's in one place if push_to_jira: - logger.debug('pushing finding %s to jira from finding.save()', finding.pk) + logger.debug("pushing finding %s to jira from finding.save()", finding.pk) import dojo.jira_link.helper as jira_helper # current approach is that whenever a finding is in a group, the group will be pushed to JIRA @@ -399,7 +399,7 @@ def post_process_finding_save(finding, dedupe_option=True, rules_option=True, pr @receiver(pre_delete, sender=Finding) def finding_pre_delete(sender, instance, **kwargs): - logger.debug('finding pre_delete: %d', instance.id) + logger.debug("finding pre_delete: %d", instance.id) # this shouldn't be necessary as Django should remove any Many-To-Many entries automatically, might be a bug in Django? # https://code.djangoproject.com/ticket/154 @@ -407,7 +407,7 @@ def finding_pre_delete(sender, instance, **kwargs): def finding_delete(instance, **kwargs): - logger.debug('finding delete, instance: %s', instance.id) + logger.debug("finding delete, instance: %s", instance.id) # the idea is that the engagement/test pre delete already prepared all the duplicates inside # the test/engagement to no longer point to any original so they can be safely deleted. @@ -415,7 +415,7 @@ def finding_delete(instance, **kwargs): # a manual / single finding delete, or a bulke delete of findings # in which case we have to process all the duplicates # TODO: should we add the prepocessing also to the bulk edit form? - logger.debug('finding_delete: refresh from db: pk: %d', instance.pk) + logger.debug("finding_delete: refresh from db: pk: %d", instance.pk) try: instance.refresh_from_db() @@ -428,17 +428,17 @@ def finding_delete(instance, **kwargs): if duplicate_cluster: reconfigure_duplicate_cluster(instance, duplicate_cluster) else: - logger.debug('no duplicate cluster found for finding: %d, so no need to reconfigure', instance.id) + logger.debug("no duplicate cluster found for finding: %d, so no need to reconfigure", instance.id) # this shouldn't be necessary as Django should remove any Many-To-Many entries automatically, might be a bug in Django? # https://code.djangoproject.com/ticket/154 - logger.debug('finding delete: clearing found by') + logger.debug("finding delete: clearing found by") instance.found_by.clear() @receiver(post_delete, sender=Finding) def finding_post_delete(sender, instance, **kwargs): - logger.debug('finding post_delete, sender: %s instance: %s', to_str_typed(sender), to_str_typed(instance)) + logger.debug("finding post_delete, sender: %s instance: %s", to_str_typed(sender), to_str_typed(instance)) # calculate_grade(instance.test.engagement.product) @@ -448,7 +448,7 @@ def reset_duplicate_before_delete(dupe): def reset_duplicates_before_delete(qs): - mass_model_updater(Finding, qs, lambda f: reset_duplicate_before_delete(f), fields=['duplicate', 'duplicate_finding']) + mass_model_updater(Finding, qs, lambda f: reset_duplicate_before_delete(f), fields=["duplicate", "duplicate_finding"]) def set_new_original(finding, new_original): @@ -466,13 +466,13 @@ def reconfigure_duplicate_cluster(original, cluster_outside): return if settings.DUPLICATE_CLUSTER_CASCADE_DELETE: - cluster_outside.order_by('-id').delete() + cluster_outside.order_by("-id").delete() else: - logger.debug('reconfigure_duplicate_cluster: cluster_outside: %s', cluster_outside) + logger.debug("reconfigure_duplicate_cluster: cluster_outside: %s", cluster_outside) # set new original to first finding in cluster (ordered by id) - new_original = cluster_outside.order_by('id').first() + new_original = cluster_outside.order_by("id").first() if new_original: - logger.debug('changing original of duplicate cluster %d to: %s:%s', original.id, new_original.id, new_original.title) + logger.debug("changing original of duplicate cluster %d to: %s:%s", original.id, new_original.id, new_original.title) new_original.duplicate = False new_original.duplicate_finding = None @@ -488,13 +488,13 @@ def reconfigure_duplicate_cluster(original, cluster_outside): # find.duplicate_finding = new_original # find.save_no_options() - mass_model_updater(Finding, cluster_outside, lambda f: set_new_original(f, new_original), fields=['duplicate_finding']) + mass_model_updater(Finding, cluster_outside, lambda f: set_new_original(f, new_original), fields=["duplicate_finding"]) def prepare_duplicates_for_delete(test=None, engagement=None): - logger.debug('prepare duplicates for delete, test: %s, engagement: %s', test.id if test else None, engagement.id if engagement else None) + logger.debug("prepare duplicates for delete, test: %s, engagement: %s", test.id if test else None, engagement.id if engagement else None) if test is None and engagement is None: - logger.warning('nothing to prepare as test and engagement are None') + logger.warning("nothing to prepare as test and engagement are None") fix_loop_duplicates() @@ -509,7 +509,7 @@ def prepare_duplicates_for_delete(test=None, engagement=None): originals = originals.distinct() if len(originals) == 0: - logger.debug('no originals found, so no duplicates to prepare for deletion of original') + logger.debug("no originals found, so no duplicates to prepare for deletion of original") return # remove the link to the original from the duplicates inside the cluster so they can be safely deleted by the django framework @@ -518,7 +518,7 @@ def prepare_duplicates_for_delete(test=None, engagement=None): # logger.debug('originals: %s', [original.id for original in originals]) for original in originals: i += 1 - logger.debug('%d/%d: preparing duplicate cluster for deletion of original: %d', i, total, original.id) + logger.debug("%d/%d: preparing duplicate cluster for deletion of original: %d", i, total, original.id) cluster_inside = original.original_finding.all() if engagement: cluster_inside = cluster_inside.filter(test__engagement=engagement) @@ -540,29 +540,29 @@ def prepare_duplicates_for_delete(test=None, engagement=None): if len(cluster_outside) > 0: reconfigure_duplicate_cluster(original, cluster_outside) - logger.debug('done preparing duplicate cluster for deletion of original: %d', original.id) + logger.debug("done preparing duplicate cluster for deletion of original: %d", original.id) @receiver(pre_delete, sender=Test) def test_pre_delete(sender, instance, **kwargs): - logger.debug('test pre_delete, sender: %s instance: %s', to_str_typed(sender), to_str_typed(instance)) + logger.debug("test pre_delete, sender: %s instance: %s", to_str_typed(sender), to_str_typed(instance)) prepare_duplicates_for_delete(test=instance) @receiver(post_delete, sender=Test) def test_post_delete(sender, instance, **kwargs): - logger.debug('test post_delete, sender: %s instance: %s', to_str_typed(sender), to_str_typed(instance)) + logger.debug("test post_delete, sender: %s instance: %s", to_str_typed(sender), to_str_typed(instance)) @receiver(pre_delete, sender=Engagement) def engagement_pre_delete(sender, instance, **kwargs): - logger.debug('engagement pre_delete, sender: %s instance: %s', to_str_typed(sender), to_str_typed(instance)) + logger.debug("engagement pre_delete, sender: %s instance: %s", to_str_typed(sender), to_str_typed(instance)) prepare_duplicates_for_delete(engagement=instance) @receiver(post_delete, sender=Engagement) def engagement_post_delete(sender, instance, **kwargs): - logger.debug('engagement post_delete, sender: %s instance: %s', to_str_typed(sender), to_str_typed(instance)) + logger.debug("engagement post_delete, sender: %s instance: %s", to_str_typed(sender), to_str_typed(instance)) def fix_loop_duplicates(): @@ -574,7 +574,7 @@ def fix_loop_duplicates(): if loop_count > 0: deduplicationLogger.info("Identified %d Findings with Loops" % len(candidates)) - for find_id in candidates.values_list('id', flat=True): + for find_id in candidates.values_list("id", flat=True): removeLoop(find_id, 50) new_originals = Finding.objects.filter(duplicate_finding__isnull=True, duplicate=True) @@ -634,12 +634,12 @@ def add_endpoints(new_finding, form): for endpoint in added_endpoints: endpoint_ids.append(endpoint.id) - new_finding.endpoints.set(form.cleaned_data['endpoints'] | Endpoint.objects.filter(id__in=endpoint_ids)) + new_finding.endpoints.set(form.cleaned_data["endpoints"] | Endpoint.objects.filter(id__in=endpoint_ids)) for endpoint in new_finding.endpoints.all(): _eps, _created = Endpoint_Status.objects.get_or_create( finding=new_finding, - endpoint=endpoint, defaults={'date': form.cleaned_data['date'] or timezone.now()}) + endpoint=endpoint, defaults={"date": form.cleaned_data["date"] or timezone.now()}) def save_vulnerability_ids(finding, vulnerability_ids): diff --git a/dojo/finding/queries.py b/dojo/finding/queries.py index cfc7e9ace9..7f213805a4 100644 --- a/dojo/finding/queries.py +++ b/dojo/finding/queries.py @@ -16,19 +16,19 @@ def get_authorized_groups(permission, user=None): roles = get_roles_for_permission(permission) authorized_product_type_roles = Product_Type_Member.objects.filter( - product_type=OuterRef('test__engagement__product__prod_type_id'), + product_type=OuterRef("test__engagement__product__prod_type_id"), user=user, role__in=roles) authorized_product_roles = Product_Member.objects.filter( - product=OuterRef('test__engagement__product_id'), + product=OuterRef("test__engagement__product_id"), user=user, role__in=roles) authorized_product_type_groups = Product_Type_Group.objects.filter( - product_type=OuterRef('test__engagement__product__prod_type_id'), + product_type=OuterRef("test__engagement__product__prod_type_id"), group__users=user, role__in=roles) authorized_product_groups = Product_Group.objects.filter( - product=OuterRef('test__engagement__product_id'), + product=OuterRef("test__engagement__product_id"), group__users=user, role__in=roles) @@ -46,7 +46,7 @@ def get_authorized_findings(permission, queryset=None, user=None): if user is None: return Finding.objects.none() if queryset is None: - findings = Finding.objects.all() + findings = Finding.objects.all().order_by("id") else: findings = queryset @@ -84,10 +84,10 @@ def get_authorized_stub_findings(permission): return Stub_Finding.objects.none() if user.is_superuser: - return Stub_Finding.objects.all() + return Stub_Finding.objects.all().order_by("id") if user_has_global_permission(user, permission): - return Stub_Finding.objects.all() + return Stub_Finding.objects.all().order_by("id") ( authorized_product_type_roles, @@ -100,7 +100,7 @@ def get_authorized_stub_findings(permission): test__engagement__product__prod_type__member=Exists(authorized_product_type_roles), test__engagement__product__member=Exists(authorized_product_roles), test__engagement__product__prod_type__authorized_group=Exists(authorized_product_type_groups), - test__engagement__product__authorized_group=Exists(authorized_product_groups)) + test__engagement__product__authorized_group=Exists(authorized_product_groups)).order_by("id") findings = findings.filter( Q(test__engagement__product__prod_type__member=True) | Q(test__engagement__product__member=True) @@ -131,19 +131,19 @@ def get_authorized_vulnerability_ids(permission, queryset=None, user=None): roles = get_roles_for_permission(permission) authorized_product_type_roles = Product_Type_Member.objects.filter( - product_type=OuterRef('finding__test__engagement__product__prod_type_id'), + product_type=OuterRef("finding__test__engagement__product__prod_type_id"), user=user, role__in=roles) authorized_product_roles = Product_Member.objects.filter( - product=OuterRef('finding__test__engagement__product_id'), + product=OuterRef("finding__test__engagement__product_id"), user=user, role__in=roles) authorized_product_type_groups = Product_Type_Group.objects.filter( - product_type=OuterRef('finding__test__engagement__product__prod_type_id'), + product_type=OuterRef("finding__test__engagement__product__prod_type_id"), group__users=user, role__in=roles) authorized_product_groups = Product_Group.objects.filter( - product=OuterRef('finding__test__engagement__product_id'), + product=OuterRef("finding__test__engagement__product_id"), group__users=user, role__in=roles) vulnerability_ids = vulnerability_ids.annotate( diff --git a/dojo/finding/urls.py b/dojo/finding/urls.py index 5e20fd1b6f..3b59624029 100644 --- a/dojo/finding/urls.py +++ b/dojo/finding/urls.py @@ -5,188 +5,188 @@ urlpatterns = [ # CRUD operations re_path( - r'^finding/(?P\d+)$', + r"^finding/(?P\d+)$", views.ViewFinding.as_view(), - name='view_finding', + name="view_finding", ), re_path( - r'^finding/(?P\d+)/edit$', + r"^finding/(?P\d+)/edit$", views.EditFinding.as_view(), - name='edit_finding', + name="edit_finding", ), re_path( - r'^finding/(?P\d+)/delete$', + r"^finding/(?P\d+)/delete$", views.DeleteFinding.as_view(), - name='delete_finding', + name="delete_finding", ), # Listing operations re_path( - r'^finding$', + r"^finding$", views.ListFindings.as_view(), - name='all_findings', + name="all_findings", ), re_path( - r'^finding/open$', + r"^finding/open$", views.ListOpenFindings.as_view(), - name='open_findings', + name="open_findings", ), re_path( - r'^finding/verified$', + r"^finding/verified$", views.ListVerifiedFindings.as_view(), - name='verified_findings', + name="verified_findings", ), re_path( - r'^finding/closed$', + r"^finding/closed$", views.ListClosedFindings.as_view(), - name='closed_findings', + name="closed_findings", ), re_path( - r'^finding/accepted$', + r"^finding/accepted$", views.ListAcceptedFindings.as_view(), - name='accepted_findings', + name="accepted_findings", ), re_path( - r'^product/(?P\d+)/finding/open$', + r"^product/(?P\d+)/finding/open$", views.ListOpenFindings.as_view(), - name='product_open_findings', + name="product_open_findings", ), re_path( - r'^product/(?P\d+)/findings$', + r"^product/(?P\d+)/findings$", views.ListOpenFindings.as_view(), - name='view_product_findings_old', + name="view_product_findings_old", ), re_path( - r'^product/(?P\d+)/finding/verified$', + r"^product/(?P\d+)/finding/verified$", views.ListVerifiedFindings.as_view(), - name='product_verified_findings', + name="product_verified_findings", ), re_path( - r'^product/(?P\d+)/finding/out_of_scope$', + r"^product/(?P\d+)/finding/out_of_scope$", views.ListOutOfScopeFindings.as_view(), - name='product_out_of_scope_findings', + name="product_out_of_scope_findings", ), re_path( - r'^product/(?P\d+)/finding/inactive$', + r"^product/(?P\d+)/finding/inactive$", views.ListInactiveFindings.as_view(), - name='product_inactive_findings', + name="product_inactive_findings", ), re_path( - r'^product/(?P\d+)/finding/all$', + r"^product/(?P\d+)/finding/all$", views.ListFindings.as_view(), - name='product_all_findings', + name="product_all_findings", ), re_path( - r'^product/(?P\d+)/finding/closed$', + r"^product/(?P\d+)/finding/closed$", views.ListClosedFindings.as_view(), - name='product_closed_findings', + name="product_closed_findings", ), re_path( - r'^product/(?P\d+)/finding/false_positive$', + r"^product/(?P\d+)/finding/false_positive$", views.ListFalsePositiveFindings.as_view(), - name='product_false_positive_findings', + name="product_false_positive_findings", ), re_path( - r'^product/(?P\d+)/finding/accepted$', + r"^product/(?P\d+)/finding/accepted$", views.ListAcceptedFindings.as_view(), - name='product_accepted_findings', + name="product_accepted_findings", ), re_path( - r'^engagement/(?P\d+)/finding/open$', + r"^engagement/(?P\d+)/finding/open$", views.ListOpenFindings.as_view(), - name='engagement_open_findings', + name="engagement_open_findings", ), re_path( - r'^engagement/(?P\d+)/finding/closed$', + r"^engagement/(?P\d+)/finding/closed$", views.ListClosedFindings.as_view(), - name='engagement_closed_findings', + name="engagement_closed_findings", ), re_path( - r'^engagement/(?P\d+)/finding/verified$', + r"^engagement/(?P\d+)/finding/verified$", views.ListVerifiedFindings.as_view(), - name='engagement_verified_findings', + name="engagement_verified_findings", ), re_path( - r'^engagement/(?P\d+)/finding/accepted$', + r"^engagement/(?P\d+)/finding/accepted$", views.ListAcceptedFindings.as_view(), - name='engagement_accepted_findings', + name="engagement_accepted_findings", ), re_path( - r'^engagement/(?P\d+)/finding/all$', + r"^engagement/(?P\d+)/finding/all$", views.ListFindings.as_view(), - name='engagement_all_findings', + name="engagement_all_findings", ), # findings - re_path(r'^finding/bulk$', views.finding_bulk_update_all, - name='finding_bulk_update_all'), - re_path(r'^product/(?P\d+)/finding/bulk_product$', views.finding_bulk_update_all, - name='finding_bulk_update_all_product'), + re_path(r"^finding/bulk$", views.finding_bulk_update_all, + name="finding_bulk_update_all"), + re_path(r"^product/(?P\d+)/finding/bulk_product$", views.finding_bulk_update_all, + name="finding_bulk_update_all_product"), # re_path(r'^test/(?P\d+)/bulk', views.finding_bulk_update_all, # name='finding_bulk_update_all_test'), - re_path(r'^finding/(?P\d+)/touch$', - views.touch_finding, name='touch_finding'), - re_path(r'^finding/(?P\d+)/simple_risk_accept$', - views.simple_risk_accept, name='simple_risk_accept_finding'), - re_path(r'^finding/(?P\d+)/simple_risk_unaccept$', - views.risk_unaccept, name='risk_unaccept_finding'), - re_path(r'^finding/(?P\d+)/request_review$', - views.request_finding_review, name='request_finding_review'), - re_path(r'^finding/(?P\d+)/review$', - views.clear_finding_review, name='clear_finding_review'), - re_path(r'^finding/(?P\d+)/copy$', - views.copy_finding, name='copy_finding'), - re_path(r'^finding/(?P\d+)/apply_cwe$', - views.apply_template_cwe, name='apply_template_cwe'), - re_path(r'^finding/(?P\d+)/mktemplate$', views.mktemplate, - name='mktemplate'), - re_path(r'^finding/(?P\d+)/find_template_to_apply$', views.find_template_to_apply, - name='find_template_to_apply'), - re_path(r'^finding/(?P\d+)/(?P\d+)/choose_finding_template_options$', views.choose_finding_template_options, - name='choose_finding_template_options'), - re_path(r'^finding/(?P\d+)/(?P\d+)/apply_template_to_finding$', - views.apply_template_to_finding, name='apply_template_to_finding'), - re_path(r'^finding/(?P\d+)/close$', views.close_finding, - name='close_finding'), - re_path(r'^finding/(?P\d+)/defect_review$', - views.defect_finding_review, name='defect_finding_review'), - re_path(r'^finding/(?P\d+)/open$', views.reopen_finding, - name='reopen_finding'), - re_path(r'^finding/image/(?P[^/]+)$', views.download_finding_pic, - name='download_finding_pic'), - re_path(r'^finding/(?P\d+)/merge$', - views.merge_finding_product, name='merge_finding'), - re_path(r'^product/(?P\d+)/merge$', views.merge_finding_product, - name='merge_finding_product'), - re_path(r'^finding/(?P\d+)/duplicate/(?P\d+)$', - views.mark_finding_duplicate, name='mark_finding_duplicate'), - re_path(r'^finding/(?P\d+)/duplicate/reset$', - views.reset_finding_duplicate_status, name='reset_finding_duplicate_status'), - re_path(r'^finding/(?P\d+)/original/(?P\d+)$', - views.set_finding_as_original, name='set_finding_as_original'), - re_path(r'^finding/(?P\d+)/remediation_date$', views.remediation_date, - name='remediation_date'), + re_path(r"^finding/(?P\d+)/touch$", + views.touch_finding, name="touch_finding"), + re_path(r"^finding/(?P\d+)/simple_risk_accept$", + views.simple_risk_accept, name="simple_risk_accept_finding"), + re_path(r"^finding/(?P\d+)/simple_risk_unaccept$", + views.risk_unaccept, name="risk_unaccept_finding"), + re_path(r"^finding/(?P\d+)/request_review$", + views.request_finding_review, name="request_finding_review"), + re_path(r"^finding/(?P\d+)/review$", + views.clear_finding_review, name="clear_finding_review"), + re_path(r"^finding/(?P\d+)/copy$", + views.copy_finding, name="copy_finding"), + re_path(r"^finding/(?P\d+)/apply_cwe$", + views.apply_template_cwe, name="apply_template_cwe"), + re_path(r"^finding/(?P\d+)/mktemplate$", views.mktemplate, + name="mktemplate"), + re_path(r"^finding/(?P\d+)/find_template_to_apply$", views.find_template_to_apply, + name="find_template_to_apply"), + re_path(r"^finding/(?P\d+)/(?P\d+)/choose_finding_template_options$", views.choose_finding_template_options, + name="choose_finding_template_options"), + re_path(r"^finding/(?P\d+)/(?P\d+)/apply_template_to_finding$", + views.apply_template_to_finding, name="apply_template_to_finding"), + re_path(r"^finding/(?P\d+)/close$", views.close_finding, + name="close_finding"), + re_path(r"^finding/(?P\d+)/defect_review$", + views.defect_finding_review, name="defect_finding_review"), + re_path(r"^finding/(?P\d+)/open$", views.reopen_finding, + name="reopen_finding"), + re_path(r"^finding/image/(?P[^/]+)$", views.download_finding_pic, + name="download_finding_pic"), + re_path(r"^finding/(?P\d+)/merge$", + views.merge_finding_product, name="merge_finding"), + re_path(r"^product/(?P\d+)/merge$", views.merge_finding_product, + name="merge_finding_product"), + re_path(r"^finding/(?P\d+)/duplicate/(?P\d+)$", + views.mark_finding_duplicate, name="mark_finding_duplicate"), + re_path(r"^finding/(?P\d+)/duplicate/reset$", + views.reset_finding_duplicate_status, name="reset_finding_duplicate_status"), + re_path(r"^finding/(?P\d+)/original/(?P\d+)$", + views.set_finding_as_original, name="set_finding_as_original"), + re_path(r"^finding/(?P\d+)/remediation_date$", views.remediation_date, + name="remediation_date"), # stub findings - re_path(r'^stub_finding/(?P\d+)/add$', - views.add_stub_finding, name='add_stub_finding'), - re_path(r'^stub_finding/(?P\d+)/promote$', - views.promote_to_finding, name='promote_to_finding'), - re_path(r'^stub_finding/(?P\d+)/delete$', - views.delete_stub_finding, name='delete_stub_finding'), + re_path(r"^stub_finding/(?P\d+)/add$", + views.add_stub_finding, name="add_stub_finding"), + re_path(r"^stub_finding/(?P\d+)/promote$", + views.promote_to_finding, name="promote_to_finding"), + re_path(r"^stub_finding/(?P\d+)/delete$", + views.delete_stub_finding, name="delete_stub_finding"), # template findings - re_path(r'^template$', views.templates, - name='templates'), - re_path(r'^template/add$', views.add_template, - name='add_template'), - re_path(r'^template/(?P\d+)/edit$', - views.edit_template, name='edit_template'), - re_path(r'^template/(?P\d+)/delete$', - views.delete_template, name='delete_template'), - re_path(r'^template/export$', - views.export_templates_to_json, name='export_template'), + re_path(r"^template$", views.templates, + name="templates"), + re_path(r"^template/add$", views.add_template, + name="add_template"), + re_path(r"^template/(?P\d+)/edit$", + views.edit_template, name="edit_template"), + re_path(r"^template/(?P\d+)/delete$", + views.delete_template, name="delete_template"), + re_path(r"^template/export$", + views.export_templates_to_json, name="export_template"), - re_path(r'^finding/(?P\d+)/jira/unlink$', views.unlink_jira, name='finding_unlink_jira'), - re_path(r'^finding/(?P\d+)/jira/push$', views.push_to_jira, name='finding_push_to_jira'), + re_path(r"^finding/(?P\d+)/jira/unlink$", views.unlink_jira, name="finding_unlink_jira"), + re_path(r"^finding/(?P\d+)/jira/push$", views.push_to_jira, name="finding_push_to_jira"), # re_path(r'^finding/(?P\d+)/jira/push', views.finding_link_to_jira, name='finding_link_to_jira'), ] diff --git a/dojo/finding/views.py b/dojo/finding/views.py index 8373022d72..66bcb21059 100644 --- a/dojo/finding/views.py +++ b/dojo/finding/views.py @@ -406,7 +406,7 @@ def add_breadcrumbs(self, request: HttpRequest, context: dict): # show custom breadcrumb if user has filtered by exactly 1 endpoint if "endpoints" in request.GET: endpoint_ids = request.GET.getlist("endpoints", []) - if len(endpoint_ids) == 1 and endpoint_ids[0] != '': + if len(endpoint_ids) == 1 and endpoint_ids[0] != "": endpoint_id = endpoint_ids[0] endpoint = get_object_or_404(Endpoint, id=endpoint_id) context["filter_name"] = "Vulnerable Endpoints" @@ -585,10 +585,10 @@ def get_test_import_data(self, request: HttpRequest, finding: Finding): test_import_finding_actions = test_import_finding_actions.filter(test_import__in=test_import_filter.qs) test_import_finding_action_filter = TestImportFindingActionFilter(request.GET, test_import_finding_actions) - paged_test_import_finding_actions = get_page_items_and_count(request, test_import_finding_action_filter.qs, 5, prefix='test_import_finding_actions') - paged_test_import_finding_actions.object_list = paged_test_import_finding_actions.object_list.prefetch_related('test_import') + paged_test_import_finding_actions = get_page_items_and_count(request, test_import_finding_action_filter.qs, 5, prefix="test_import_finding_actions") + paged_test_import_finding_actions.object_list = paged_test_import_finding_actions.object_list.prefetch_related("test_import") - latest_test_import_finding_action = finding.test_import_finding_action_set.order_by('-created').first + latest_test_import_finding_action = finding.test_import_finding_action_set.order_by("-created").first return { "test_import_filter": test_import_filter, @@ -942,14 +942,14 @@ def process_false_positive_history(self, finding: Finding): # fp history function because it will be called by the save function # If finding was a false positive and is being reactivated: retroactively reactivates all equal findings if finding.false_p and not finding.false_p and get_system_setting("retroactive_false_positive_history"): - logger.debug('FALSE_POSITIVE_HISTORY: Reactivating existing findings based on: %s', finding) + logger.debug("FALSE_POSITIVE_HISTORY: Reactivating existing findings based on: %s", finding) existing_fp_findings = match_finding_to_existing_findings( finding, product=finding.test.engagement.product, ).filter(false_p=True) for fp in existing_fp_findings: - logger.debug('FALSE_POSITIVE_HISTORY: Reactivating false positive %i: %s', fp.id, fp) + logger.debug("FALSE_POSITIVE_HISTORY: Reactivating false positive %i: %s", fp.id, fp) fp.active = finding.active fp.verified = finding.verified fp.false_p = False @@ -2790,14 +2790,14 @@ def finding_bulk_update_all(request, pid=None): # If finding was a false positive and is being reactivated: retroactively reactivates all equal findings elif old_find.false_p and not find.false_p: if system_settings.retroactive_false_positive_history: - logger.debug('FALSE_POSITIVE_HISTORY: Reactivating existing findings based on: %s', find) + logger.debug("FALSE_POSITIVE_HISTORY: Reactivating existing findings based on: %s", find) existing_fp_findings = match_finding_to_existing_findings( find, product=find.test.engagement.product, ).filter(false_p=True) for fp in existing_fp_findings: - logger.debug('FALSE_POSITIVE_HISTORY: Reactivating false positive %i: %s', fp.id, fp) + logger.debug("FALSE_POSITIVE_HISTORY: Reactivating false positive %i: %s", fp.id, fp) fp.active = find.active fp.verified = find.verified fp.false_p = False @@ -3386,9 +3386,8 @@ def push_to_jira(request, fid): ) return JsonResponse({"result": "OK"}) - except Exception as e: - logger.exception(e) - logger.error("Error pushing to JIRA: ", exc_info=True) + except Exception: + logger.exception("Error pushing to JIRA") messages.add_message( request, messages.ERROR, "Error pushing to JIRA", extra_tags="alert-danger", ) diff --git a/dojo/finding_group/queries.py b/dojo/finding_group/queries.py index 9bc4b95ffa..aae57f53c8 100644 --- a/dojo/finding_group/queries.py +++ b/dojo/finding_group/queries.py @@ -26,19 +26,19 @@ def get_authorized_finding_groups(permission, queryset=None, user=None): roles = get_roles_for_permission(permission) authorized_product_type_roles = Product_Type_Member.objects.filter( - product_type=OuterRef('test__engagement__product__prod_type_id'), + product_type=OuterRef("test__engagement__product__prod_type_id"), user=user, role__in=roles) authorized_product_roles = Product_Member.objects.filter( - product=OuterRef('test__engagement__product_id'), + product=OuterRef("test__engagement__product_id"), user=user, role__in=roles) authorized_product_type_groups = Product_Type_Group.objects.filter( - product_type=OuterRef('test__engagement__product__prod_type_id'), + product_type=OuterRef("test__engagement__product__prod_type_id"), group__users=user, role__in=roles) authorized_product_groups = Product_Group.objects.filter( - product=OuterRef('test__engagement__product_id'), + product=OuterRef("test__engagement__product_id"), group__users=user, role__in=roles) finding_groups = finding_groups.annotate( diff --git a/dojo/finding_group/signals.py b/dojo/finding_group/signals.py index e73927f13a..17ed6b1b9a 100644 --- a/dojo/finding_group/signals.py +++ b/dojo/finding_group/signals.py @@ -16,16 +16,16 @@ def finding_group_post_delete(sender, instance, using, origin, **kwargs): if settings.ENABLE_AUDITLOG: le = LogEntry.objects.get( action=LogEntry.Action.DELETE, - content_type=ContentType.objects.get(app_label='dojo', model='finding_group'), + content_type=ContentType.objects.get(app_label="dojo", model="finding_group"), object_id=instance.id, ) description = _('The finding group "%(name)s" was deleted by %(user)s') % { - 'name': instance.name, 'user': le.actor} + "name": instance.name, "user": le.actor} else: - description = _('The finding group "%(name)s" was deleted') % {'name': instance.name} - create_notification(event='finding_group_deleted', # template does not exists, it will default to "other" but this event name needs to stay because of unit testing - title=_('Deletion of %(name)s') % {'name': instance.name}, + description = _('The finding group "%(name)s" was deleted') % {"name": instance.name} + create_notification(event="finding_group_deleted", # template does not exists, it will default to "other" but this event name needs to stay because of unit testing + title=_("Deletion of %(name)s") % {"name": instance.name}, description=description, product=instance.test.engagement.product, - url=reverse('view_test', args=(instance.test.id, )), + url=reverse("view_test", args=(instance.test.id, )), icon="exclamation-triangle") diff --git a/dojo/finding_group/urls.py b/dojo/finding_group/urls.py index 56b9482676..5abb46ece5 100644 --- a/dojo/finding_group/urls.py +++ b/dojo/finding_group/urls.py @@ -4,8 +4,8 @@ urlpatterns = [ # finding group - re_path(r'^finding_group/(?P\d+)$', views.view_finding_group, name='view_finding_group'), - re_path(r'^finding_group/(?P\d+)/delete$', views.delete_finding_group, name='delete_finding_group'), - re_path(r'^finding_group/(?P\d+)/jira/push$', views.push_to_jira, name='finding_group_push_to_jira'), - re_path(r'^finding_group/(?P\d+)/jira/unlink$', views.unlink_jira, name='finding_group_unlink_jira'), + re_path(r"^finding_group/(?P\d+)$", views.view_finding_group, name="view_finding_group"), + re_path(r"^finding_group/(?P\d+)/delete$", views.delete_finding_group, name="delete_finding_group"), + re_path(r"^finding_group/(?P\d+)/jira/push$", views.push_to_jira, name="finding_group_push_to_jira"), + re_path(r"^finding_group/(?P\d+)/jira/unlink$", views.unlink_jira, name="finding_group_unlink_jira"), ] diff --git a/dojo/finding_group/views.py b/dojo/finding_group/views.py index b22c75d0e7..f6291e2e5c 100644 --- a/dojo/finding_group/views.py +++ b/dojo/finding_group/views.py @@ -21,7 +21,7 @@ logger = logging.getLogger(__name__) -@user_is_authorized(Finding_Group, Permissions.Finding_Group_View, 'fgid') +@user_is_authorized(Finding_Group, Permissions.Finding_Group_View, "fgid") def view_finding_group(request, fgid): finding_group = get_object_or_404(Finding_Group, pk=fgid) findings = finding_group.findings.all() @@ -51,11 +51,11 @@ def view_finding_group(request, fgid): github_config = GITHUB_PKey.objects.filter(product__engagement=eid).first() findings_filter = finding_filter_class(request.GET, findings, user=request.user, eid=eid) - title_words = get_words_for_field(Finding, 'title') - component_words = get_words_for_field(Finding, 'component_name') + title_words = get_words_for_field(Finding, "title") + component_words = get_words_for_field(Finding, "component_name") paged_findings = get_page_items(request, findings_filter.qs, 25) - paged_findings.object_list = prefetch_for_findings(paged_findings.object_list, 'all') + paged_findings.object_list = prefetch_for_findings(paged_findings.object_list, "all") bulk_edit_form = FindingBulkUpdateForm(request.GET) @@ -64,18 +64,18 @@ def view_finding_group(request, fgid): filter_name = finding_group.name - if request.method == 'POST': + if request.method == "POST": edit_finding_group_form = EditFindingGroupForm(request.POST, instance=finding_group) if edit_finding_group_form.is_valid(): - finding_group.name = edit_finding_group_form.cleaned_data.get('name', '') - push_to_jira = edit_finding_group_form.cleaned_data.get('push_to_jira') - jira_issue = edit_finding_group_form.cleaned_data.get('jira_issue') + finding_group.name = edit_finding_group_form.cleaned_data.get("name", "") + push_to_jira = edit_finding_group_form.cleaned_data.get("push_to_jira") + jira_issue = edit_finding_group_form.cleaned_data.get("jira_issue") if jira_issue: # See if the submitted issue was a issue key or the full URL jira_instance = jira_helper.get_jira_project(finding_group).jira_instance - if jira_issue.startswith(jira_instance.url + '/browse/'): - jira_issue = jira_issue[len(jira_instance.url + '/browse/'):] + if jira_issue.startswith(jira_instance.url + "/browse/"): + jira_issue = jira_issue[len(jira_instance.url + "/browse/"):] if finding_group.has_jira_issue and not jira_issue == jira_helper.get_jira_key(finding_group): jira_helper.unlink_jira(request, finding_group) @@ -86,60 +86,60 @@ def view_finding_group(request, fgid): jira_helper.push_to_jira(finding_group, sync=True) finding_group.save() - return HttpResponseRedirect(reverse('view_test', args=(finding_group.test.id,))) + return HttpResponseRedirect(reverse("view_test", args=(finding_group.test.id,))) add_breadcrumb(title=finding_group.name, top_level=not len(request.GET), request=request) - return render(request, 'dojo/view_finding_group.html', { - 'show_product_column': show_product_column, - 'product_tab': product_tab, - 'findings': paged_findings, - 'filtered': findings_filter, - 'title_words': title_words, - 'component_words': component_words, - 'custom_breadcrumb': custom_breadcrumb, - 'filter_name': filter_name, - 'jira_project': jira_project, - 'bulk_edit_form': bulk_edit_form, - 'edit_finding_group_form': edit_finding_group_form, + return render(request, "dojo/view_finding_group.html", { + "show_product_column": show_product_column, + "product_tab": product_tab, + "findings": paged_findings, + "filtered": findings_filter, + "title_words": title_words, + "component_words": component_words, + "custom_breadcrumb": custom_breadcrumb, + "filter_name": filter_name, + "jira_project": jira_project, + "bulk_edit_form": bulk_edit_form, + "edit_finding_group_form": edit_finding_group_form, }) -@user_is_authorized(Finding_Group, Permissions.Finding_Group_Delete, 'fgid') +@user_is_authorized(Finding_Group, Permissions.Finding_Group_Delete, "fgid") @require_POST def delete_finding_group(request, fgid): finding_group = get_object_or_404(Finding_Group, pk=fgid) form = DeleteFindingGroupForm(instance=finding_group) - if request.method == 'POST': - if 'id' in request.POST and str(finding_group.id) == request.POST['id']: + if request.method == "POST": + if "id" in request.POST and str(finding_group.id) == request.POST["id"]: form = DeleteFindingGroupForm(request.POST, instance=finding_group) if form.is_valid(): finding_group.delete() messages.add_message(request, messages.SUCCESS, - 'Finding Group and relationships removed.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_test', args=(finding_group.test.id,))) + "Finding Group and relationships removed.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_test", args=(finding_group.test.id,))) collector = NestedObjects(using=DEFAULT_DB_ALIAS) collector.collect([finding_group]) rels = collector.nested() product_tab = Product_Tab(finding_group.test.engagement.product, title="Product", tab="settings") - return render(request, 'dojo/delete_finding_group.html', { - 'finding_group': finding_group, - 'form': form, - 'product_tab': product_tab, - 'rels': rels, + return render(request, "dojo/delete_finding_group.html", { + "finding_group": finding_group, + "form": form, + "product_tab": product_tab, + "rels": rels, }) -@user_is_authorized(Finding_Group, Permissions.Finding_Group_Edit, 'fgid') +@user_is_authorized(Finding_Group, Permissions.Finding_Group_Edit, "fgid") @require_POST def unlink_jira(request, fgid): - logger.debug('/finding_group/%s/jira/unlink', fgid) + logger.debug("/finding_group/%s/jira/unlink", fgid) group = get_object_or_404(Finding_Group, id=fgid) - logger.info('trying to unlink a linked jira issue from %d:%s', group.id, group.name) + logger.info("trying to unlink a linked jira issue from %d:%s", group.id, group.name) if group.has_jira_issue: try: jira_helper.unlink_jira(request, group) @@ -147,36 +147,36 @@ def unlink_jira(request, fgid): messages.add_message( request, messages.SUCCESS, - 'Link to JIRA issue succesfully deleted', - extra_tags='alert-success') + "Link to JIRA issue succesfully deleted", + extra_tags="alert-success") - return JsonResponse({'result': 'OK'}) + return JsonResponse({"result": "OK"}) except Exception as e: logger.exception(e) messages.add_message( request, messages.ERROR, - 'Link to JIRA could not be deleted, see alerts for details', - extra_tags='alert-danger') + "Link to JIRA could not be deleted, see alerts for details", + extra_tags="alert-danger") return HttpResponse(status=500) else: messages.add_message( request, messages.ERROR, - 'Link to JIRA not found', - extra_tags='alert-danger') + "Link to JIRA not found", + extra_tags="alert-danger") return HttpResponse(status=400) -@user_is_authorized(Finding_Group, Permissions.Finding_Group_Edit, 'fgid') +@user_is_authorized(Finding_Group, Permissions.Finding_Group_Edit, "fgid") @require_POST def push_to_jira(request, fgid): - logger.debug('/finding_group/%s/jira/push', fgid) + logger.debug("/finding_group/%s/jira/push", fgid) group = get_object_or_404(Finding_Group, id=fgid) try: - logger.info('trying to push %d:%s to JIRA to create or update JIRA issue', group.id, group.name) - logger.debug('pushing to jira from group.push_to-jira()') + logger.info("trying to push %d:%s to JIRA to create or update JIRA issue", group.id, group.name) + logger.debug("pushing to jira from group.push_to-jira()") # it may look like success here, but the push_to_jira are swallowing exceptions # but cant't change too much now without having a test suite, so leave as is for now with the addition warning message to check alerts for background errors. @@ -184,22 +184,21 @@ def push_to_jira(request, fgid): messages.add_message( request, messages.SUCCESS, - message='Action queued to create or update linked JIRA issue, check alerts for background errors.', - extra_tags='alert-success') + message="Action queued to create or update linked JIRA issue, check alerts for background errors.", + extra_tags="alert-success") else: messages.add_message( request, messages.SUCCESS, - 'Push to JIRA failed, check alerts on the top right for errors', - extra_tags='alert-danger') + "Push to JIRA failed, check alerts on the top right for errors", + extra_tags="alert-danger") - return JsonResponse({'result': 'OK'}) - except Exception as e: - logger.exception(e) - logger.error('Error pushing to JIRA: ', exc_info=True) + return JsonResponse({"result": "OK"}) + except Exception: + logger.exception("Error pushing to JIRA") messages.add_message( request, messages.ERROR, - 'Error pushing to JIRA', - extra_tags='alert-danger') + "Error pushing to JIRA", + extra_tags="alert-danger") return HttpResponse(status=500) diff --git a/dojo/forms.py b/dojo/forms.py index 91c16eb3d5..2ab32f717c 100644 --- a/dojo/forms.py +++ b/dojo/forms.py @@ -112,21 +112,21 @@ logger = logging.getLogger(__name__) -RE_DATE = re.compile(r'(\d{4})-(\d\d?)-(\d\d?)$') +RE_DATE = re.compile(r"(\d{4})-(\d\d?)-(\d\d?)$") -FINDING_STATUS = (('verified', 'Verified'), - ('false_p', 'False Positive'), - ('duplicate', 'Duplicate'), - ('out_of_scope', 'Out of Scope')) +FINDING_STATUS = (("verified", "Verified"), + ("false_p", "False Positive"), + ("duplicate", "Duplicate"), + ("out_of_scope", "Out of Scope")) vulnerability_ids_field = forms.CharField(max_length=5000, required=False, label="Vulnerability Ids", help_text="Ids of vulnerabilities in security advisories associated with this finding. Can be Common Vulnerabilities and Exposures (CVE) or from other sources." "You may enter one vulnerability id per line.", - widget=forms.widgets.Textarea(attrs={'rows': '3', 'cols': '400'})) + widget=forms.widgets.Textarea(attrs={"rows": "3", "cols": "400"})) -EFFORT_FOR_FIXING_INVALID_CHOICE = _('Select valid choice: Low,Medium,High') +EFFORT_FOR_FIXING_INVALID_CHOICE = _("Select valid choice: Low,Medium,High") class MultipleSelectWithPop(forms.SelectMultiple): @@ -146,9 +146,9 @@ class MonthYearWidget(Widget): django/trunk/django/forms/extras/widgets.py """ - none_value = (0, '---') - month_field = '%s_month' - year_field = '%s_year' + none_value = (0, "---") + month_field = "%s_month" + year_field = "%s_year" def __init__(self, attrs=None, years=None, required=True): # years is an optional list/tuple of years to use in the @@ -173,16 +173,16 @@ def render(self, name, value, attrs=None, renderer=None): output = [] - if 'id' in self.attrs: - id_ = self.attrs['id'] + if "id" in self.attrs: + id_ = self.attrs["id"] else: - id_ = f'id_{name}' + id_ = f"id_{name}" month_choices = list(MONTHS.items()) if not (self.required and value): month_choices.append(self.none_value) month_choices.sort() - local_attrs = self.build_attrs({'id': self.month_field % id_}) + local_attrs = self.build_attrs({"id": self.month_field % id_}) s = Select(choices=month_choices) select_html = s.render(self.month_field % name, month_val, local_attrs) @@ -191,15 +191,15 @@ def render(self, name, value, attrs=None, renderer=None): year_choices = [(i, i) for i in self.years] if not (self.required and value): year_choices.insert(0, self.none_value) - local_attrs['id'] = self.year_field % id_ + local_attrs["id"] = self.year_field % id_ s = Select(choices=year_choices) select_html = s.render(self.year_field % name, year_val, local_attrs) output.append(select_html) - return mark_safe('\n'.join(output)) + return mark_safe("\n".join(output)) def id_for_label(self, id_): - return f'{id_}_month' + return f"{id_}_month" id_for_label = classmethod(id_for_label) @@ -209,7 +209,7 @@ def value_from_datadict(self, data, files, name): if y == m == "0": return None if y and m: - return f'{y}-{m}-{1}' + return f"{y}-{m}-{1}" return data.get(name, None) @@ -219,7 +219,7 @@ class Product_TypeForm(forms.ModelForm): class Meta: model = Product_Type - fields = ['name', 'description', 'critical_product', 'key_product'] + fields = ["name", "description", "critical_product", "key_product"] class Delete_Product_TypeForm(forms.ModelForm): @@ -228,75 +228,75 @@ class Delete_Product_TypeForm(forms.ModelForm): class Meta: model = Product_Type - fields = ['id'] + fields = ["id"] class Edit_Product_Type_MemberForm(forms.ModelForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.fields['product_type'].disabled = True - self.fields['user'].queryset = Dojo_User.objects.order_by('first_name', 'last_name') - self.fields['user'].disabled = True + self.fields["product_type"].disabled = True + self.fields["user"].queryset = Dojo_User.objects.order_by("first_name", "last_name") + self.fields["user"].disabled = True class Meta: model = Product_Type_Member - fields = ['product_type', 'user', 'role'] + fields = ["product_type", "user", "role"] class Add_Product_Type_MemberForm(forms.ModelForm): - users = forms.ModelMultipleChoiceField(queryset=Dojo_User.objects.none(), required=True, label='Users') + users = forms.ModelMultipleChoiceField(queryset=Dojo_User.objects.none(), required=True, label="Users") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - current_members = Product_Type_Member.objects.filter(product_type=self.initial["product_type"]).values_list('user', flat=True) - self.fields['users'].queryset = Dojo_User.objects.exclude( + current_members = Product_Type_Member.objects.filter(product_type=self.initial["product_type"]).values_list("user", flat=True) + self.fields["users"].queryset = Dojo_User.objects.exclude( Q(is_superuser=True) - | Q(id__in=current_members)).exclude(is_active=False).order_by('first_name', 'last_name') - self.fields['product_type'].disabled = True + | Q(id__in=current_members)).exclude(is_active=False).order_by("first_name", "last_name") + self.fields["product_type"].disabled = True class Meta: model = Product_Type_Member - fields = ['product_type', 'users', 'role'] + fields = ["product_type", "users", "role"] class Add_Product_Type_Member_UserForm(forms.ModelForm): - product_types = forms.ModelMultipleChoiceField(queryset=Product_Type.objects.none(), required=True, label='Product Types') + product_types = forms.ModelMultipleChoiceField(queryset=Product_Type.objects.none(), required=True, label="Product Types") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - current_members = Product_Type_Member.objects.filter(user=self.initial['user']).values_list('product_type', flat=True) - self.fields['product_types'].queryset = get_authorized_product_types(Permissions.Product_Type_Member_Add_Owner) \ + current_members = Product_Type_Member.objects.filter(user=self.initial["user"]).values_list("product_type", flat=True) + self.fields["product_types"].queryset = get_authorized_product_types(Permissions.Product_Type_Member_Add_Owner) \ .exclude(id__in=current_members) - self.fields['user'].disabled = True + self.fields["user"].disabled = True class Meta: model = Product_Type_Member - fields = ['product_types', 'user', 'role'] + fields = ["product_types", "user", "role"] class Delete_Product_Type_MemberForm(Edit_Product_Type_MemberForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.fields['role'].disabled = True + self.fields["role"].disabled = True class Test_TypeForm(forms.ModelForm): class Meta: model = Test_Type - exclude = [''] + exclude = [""] class Development_EnvironmentForm(forms.ModelForm): class Meta: model = Development_Environment - fields = ['name'] + fields = ["name"] class Delete_Dev_EnvironmentForm(forms.ModelForm): class Meta: model = Development_Environment - fields = ['id'] + fields = ["id"] class ProductForm(forms.ModelForm): @@ -304,34 +304,34 @@ class ProductForm(forms.ModelForm): description = forms.CharField(widget=forms.Textarea(attrs={}), required=True) - prod_type = forms.ModelChoiceField(label='Product Type', + prod_type = forms.ModelChoiceField(label="Product Type", queryset=Product_Type.objects.none(), required=True) - sla_configuration = forms.ModelChoiceField(label='SLA Configuration', + sla_configuration = forms.ModelChoiceField(label="SLA Configuration", queryset=SLA_Configuration.objects.all(), required=True, - initial='Default') + initial="Default") - product_manager = forms.ModelChoiceField(queryset=Dojo_User.objects.exclude(is_active=False).order_by('first_name', 'last_name'), required=False) - technical_contact = forms.ModelChoiceField(queryset=Dojo_User.objects.exclude(is_active=False).order_by('first_name', 'last_name'), required=False) - team_manager = forms.ModelChoiceField(queryset=Dojo_User.objects.exclude(is_active=False).order_by('first_name', 'last_name'), required=False) + product_manager = forms.ModelChoiceField(queryset=Dojo_User.objects.exclude(is_active=False).order_by("first_name", "last_name"), required=False) + technical_contact = forms.ModelChoiceField(queryset=Dojo_User.objects.exclude(is_active=False).order_by("first_name", "last_name"), required=False) + team_manager = forms.ModelChoiceField(queryset=Dojo_User.objects.exclude(is_active=False).order_by("first_name", "last_name"), required=False) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.fields['prod_type'].queryset = get_authorized_product_types(Permissions.Product_Type_Add_Product) + self.fields["prod_type"].queryset = get_authorized_product_types(Permissions.Product_Type_Add_Product) # if this product has findings being asynchronously updated, disable the sla config field if self.instance.async_updating: - self.fields['sla_configuration'].disabled = True - self.fields['sla_configuration'].widget.attrs['message'] = 'Finding SLA expiration dates are currently being recalculated. ' + \ - 'This field cannot be changed until the calculation is complete.' + self.fields["sla_configuration"].disabled = True + self.fields["sla_configuration"].widget.attrs["message"] = "Finding SLA expiration dates are currently being recalculated. " + \ + "This field cannot be changed until the calculation is complete." class Meta: model = Product - fields = ['name', 'description', 'tags', 'product_manager', 'technical_contact', 'team_manager', 'prod_type', 'sla_configuration', 'regulations', - 'business_criticality', 'platform', 'lifecycle', 'origin', 'user_records', 'revenue', 'external_audience', 'enable_product_tag_inheritance', - 'internet_accessible', 'enable_simple_risk_acceptance', 'enable_full_risk_acceptance', 'disable_sla_breach_notifications'] + fields = ["name", "description", "tags", "product_manager", "technical_contact", "team_manager", "prod_type", "sla_configuration", "regulations", + "business_criticality", "platform", "lifecycle", "origin", "user_records", "revenue", "external_audience", "enable_product_tag_inheritance", + "internet_accessible", "enable_simple_risk_acceptance", "enable_full_risk_acceptance", "disable_sla_breach_notifications"] class DeleteProductForm(forms.ModelForm): @@ -340,32 +340,32 @@ class DeleteProductForm(forms.ModelForm): class Meta: model = Product - fields = ['id'] + fields = ["id"] class EditFindingGroupForm(forms.ModelForm): - name = forms.CharField(max_length=255, required=True, label='Finding Group Name') - jira_issue = forms.CharField(max_length=255, required=False, label='Linked JIRA Issue', - help_text='Leave empty and check push to jira to create a new JIRA issue for this finding group.') + name = forms.CharField(max_length=255, required=True, label="Finding Group Name") + jira_issue = forms.CharField(max_length=255, required=False, label="Linked JIRA Issue", + help_text="Leave empty and check push to jira to create a new JIRA issue for this finding group.") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) import dojo.jira_link.helper as jira_helper - self.fields['push_to_jira'] = forms.BooleanField() - self.fields['push_to_jira'].required = False - self.fields['push_to_jira'].help_text = "Checking this will overwrite content of your JIRA issue, or create one." + self.fields["push_to_jira"] = forms.BooleanField() + self.fields["push_to_jira"].required = False + self.fields["push_to_jira"].help_text = "Checking this will overwrite content of your JIRA issue, or create one." - self.fields['push_to_jira'].label = "Push to JIRA" + self.fields["push_to_jira"].label = "Push to JIRA" - if hasattr(self.instance, 'has_jira_issue') and self.instance.has_jira_issue: + if hasattr(self.instance, "has_jira_issue") and self.instance.has_jira_issue: jira_url = jira_helper.get_jira_url(self.instance) - self.fields['jira_issue'].initial = jira_url - self.fields['push_to_jira'].widget.attrs['checked'] = 'checked' + self.fields["jira_issue"].initial = jira_url + self.fields["push_to_jira"].widget.attrs["checked"] = "checked" class Meta: model = Finding_Group - fields = ['name'] + fields = ["name"] class DeleteFindingGroupForm(forms.ModelForm): @@ -374,57 +374,57 @@ class DeleteFindingGroupForm(forms.ModelForm): class Meta: model = Finding_Group - fields = ['id'] + fields = ["id"] class Edit_Product_MemberForm(forms.ModelForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.fields['product'].disabled = True - self.fields['user'].queryset = Dojo_User.objects.order_by('first_name', 'last_name') - self.fields['user'].disabled = True + self.fields["product"].disabled = True + self.fields["user"].queryset = Dojo_User.objects.order_by("first_name", "last_name") + self.fields["user"].disabled = True class Meta: model = Product_Member - fields = ['product', 'user', 'role'] + fields = ["product", "user", "role"] class Add_Product_MemberForm(forms.ModelForm): - users = forms.ModelMultipleChoiceField(queryset=Dojo_User.objects.none(), required=True, label='Users') + users = forms.ModelMultipleChoiceField(queryset=Dojo_User.objects.none(), required=True, label="Users") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.fields['product'].disabled = True - current_members = Product_Member.objects.filter(product=self.initial["product"]).values_list('user', flat=True) - self.fields['users'].queryset = Dojo_User.objects.exclude( + self.fields["product"].disabled = True + current_members = Product_Member.objects.filter(product=self.initial["product"]).values_list("user", flat=True) + self.fields["users"].queryset = Dojo_User.objects.exclude( Q(is_superuser=True) - | Q(id__in=current_members)).exclude(is_active=False).order_by('first_name', 'last_name') + | Q(id__in=current_members)).exclude(is_active=False).order_by("first_name", "last_name") class Meta: model = Product_Member - fields = ['product', 'users', 'role'] + fields = ["product", "users", "role"] class Add_Product_Member_UserForm(forms.ModelForm): - products = forms.ModelMultipleChoiceField(queryset=Product.objects.none(), required=True, label='Products') + products = forms.ModelMultipleChoiceField(queryset=Product.objects.none(), required=True, label="Products") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - current_members = Product_Member.objects.filter(user=self.initial["user"]).values_list('product', flat=True) - self.fields['products'].queryset = get_authorized_products(Permissions.Product_Member_Add_Owner) \ + current_members = Product_Member.objects.filter(user=self.initial["user"]).values_list("product", flat=True) + self.fields["products"].queryset = get_authorized_products(Permissions.Product_Member_Add_Owner) \ .exclude(id__in=current_members) - self.fields['user'].disabled = True + self.fields["user"].disabled = True class Meta: model = Product_Member - fields = ['products', 'user', 'role'] + fields = ["products", "user", "role"] class Delete_Product_MemberForm(Edit_Product_MemberForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.fields['role'].disabled = True + self.fields["role"].disabled = True class NoteTypeForm(forms.ModelForm): @@ -433,30 +433,30 @@ class NoteTypeForm(forms.ModelForm): class Meta: model = Note_Type - fields = ['name', 'description', 'is_single', 'is_mandatory'] + fields = ["name", "description", "is_single", "is_mandatory"] class EditNoteTypeForm(NoteTypeForm): def __init__(self, *args, **kwargs): - is_single = kwargs.pop('is_single') + is_single = kwargs.pop("is_single") super().__init__(*args, **kwargs) if is_single is False: - self.fields['is_single'].widget = forms.HiddenInput() + self.fields["is_single"].widget = forms.HiddenInput() class DisableOrEnableNoteTypeForm(NoteTypeForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.fields['name'].disabled = True - self.fields['description'].disabled = True - self.fields['is_single'].disabled = True - self.fields['is_mandatory'].disabled = True - self.fields['is_active'].disabled = True + self.fields["name"].disabled = True + self.fields["description"].disabled = True + self.fields["is_single"].disabled = True + self.fields["is_mandatory"].disabled = True + self.fields["is_active"].disabled = True class Meta: model = Note_Type - fields = '__all__' + fields = "__all__" class DojoMetaDataForm(forms.ModelForm): @@ -469,11 +469,11 @@ def full_clean(self): self.instance.validate_unique() except ValidationError: msg = "A metadata entry with the same name exists already for this object." - self.add_error('name', msg) + self.add_error("name", msg) class Meta: model = DojoMeta - fields = '__all__' + fields = "__all__" class ImportScanForm(forms.Form): @@ -484,30 +484,30 @@ class ImportScanForm(forms.Form): required=False, label="Scan Completion Date", help_text="Scan completion date will be used on all findings.", - widget=forms.TextInput(attrs={'class': 'datepicker'})) - minimum_severity = forms.ChoiceField(help_text='Minimum severity level to be imported', + widget=forms.TextInput(attrs={"class": "datepicker"})) + minimum_severity = forms.ChoiceField(help_text="Minimum severity level to be imported", required=True, choices=SEVERITY_CHOICES) active = forms.ChoiceField(required=True, choices=active_verified_choices, - help_text='Force findings to be active/inactive, or default to the original tool') + help_text="Force findings to be active/inactive, or default to the original tool") verified = forms.ChoiceField(required=True, choices=active_verified_choices, - help_text='Force findings to be verified/not verified, or default to the original tool') + help_text="Force findings to be verified/not verified, or default to the original tool") # help_do_not_reactivate = 'Select if the import should ignore active findings from the report, useful for triage-less scanners. Will keep existing findings closed, without reactivating them. For more information check the docs.' # do_not_reactivate = forms.BooleanField(help_text=help_do_not_reactivate, required=False) scan_type = forms.ChoiceField(required=True, choices=get_choices_sorted) environment = forms.ModelChoiceField( - queryset=Development_Environment.objects.all().order_by('name')) - endpoints = forms.ModelMultipleChoiceField(Endpoint.objects, required=False, label='Systems / Endpoints') + queryset=Development_Environment.objects.all().order_by("name")) + endpoints = forms.ModelMultipleChoiceField(Endpoint.objects, required=False, label="Systems / Endpoints") endpoints_to_add = forms.CharField(max_length=5000, required=False, label="Endpoints to add", help_text="The IP address, host name or full URL. You may enter one endpoint per line. " "Each must be valid.", - widget=forms.widgets.Textarea(attrs={'rows': '3', 'cols': '400'})) + widget=forms.widgets.Textarea(attrs={"rows": "3", "cols": "400"})) version = forms.CharField(max_length=100, required=False, help_text="Version that was scanned.") branch_tag = forms.CharField(max_length=100, required=False, help_text="Branch or Tag that was scanned.") commit_hash = forms.CharField(max_length=100, required=False, help_text="Commit that was scanned.") build_id = forms.CharField(max_length=100, required=False, help_text="ID of the build that was scanned.") - api_scan_configuration = forms.ModelChoiceField(Product_API_Scan_Configuration.objects, required=False, label='API Scan Configuration') + api_scan_configuration = forms.ModelChoiceField(Product_API_Scan_Configuration.objects, required=False, label="API Scan Configuration") service = forms.CharField(max_length=200, required=False, help_text="A service is a self-contained piece of functionality within a Product. " "This is an optional field which is used in deduplication and closing of old findings when set.") @@ -549,7 +549,7 @@ class ImportScanForm(forms.Form): ) if is_finding_groups_enabled(): - group_by = forms.ChoiceField(required=False, choices=Finding_Group.GROUP_BY_OPTIONS, help_text='Choose an option to automatically group new findings by the chosen option.') + group_by = forms.ChoiceField(required=False, choices=Finding_Group.GROUP_BY_OPTIONS, help_text="Choose an option to automatically group new findings by the chosen option.") create_finding_groups_for_all_findings = forms.BooleanField(help_text="If unchecked, finding groups will only be created when there is more than one grouped finding", required=False, initial=True) def __init__(self, *args, **kwargs): @@ -557,19 +557,19 @@ def __init__(self, *args, **kwargs): endpoints = kwargs.pop("endpoints", None) api_scan_configuration = kwargs.pop("api_scan_configuration", None) super().__init__(*args, **kwargs) - self.fields['active'].initial = self.active_verified_choices[0] - self.fields['verified'].initial = self.active_verified_choices[0] + self.fields["active"].initial = self.active_verified_choices[0] + self.fields["verified"].initial = self.active_verified_choices[0] if environment: - self.fields['environment'].initial = environment + self.fields["environment"].initial = environment if endpoints: - self.fields['endpoints'].queryset = endpoints + self.fields["endpoints"].queryset = endpoints if api_scan_configuration: - self.fields['api_scan_configuration'].queryset = api_scan_configuration + self.fields["api_scan_configuration"].queryset = api_scan_configuration # couldn't find a cleaner way to add empty default - if 'group_by' in self.fields: - choices = self.fields['group_by'].choices - choices.insert(0, ('', '---------')) - self.fields['group_by'].choices = choices + if "group_by" in self.fields: + choices = self.fields["group_by"].choices + choices.insert(0, ("", "---------")) + self.fields["group_by"].choices = choices self.endpoints_to_add_list = [] @@ -585,12 +585,12 @@ def clean(self): msg = _(f"Report file is too large. Maximum supported size is {settings.SCAN_FILE_MAX_SIZE} MB") raise forms.ValidationError(msg) if tool_type: - api_scan_configuration = cleaned_data.get('api_scan_configuration') + api_scan_configuration = cleaned_data.get("api_scan_configuration") if api_scan_configuration and tool_type != api_scan_configuration.tool_configuration.tool_type.name: - msg = f'API scan configuration must be of tool type {tool_type}' + msg = f"API scan configuration must be of tool type {tool_type}" raise forms.ValidationError(msg) - endpoints_to_add_list, errors = validate_endpoints_to_add(cleaned_data['endpoints_to_add']) + endpoints_to_add_list, errors = validate_endpoints_to_add(cleaned_data["endpoints_to_add"]) if errors: raise forms.ValidationError(errors) else: @@ -600,14 +600,14 @@ def clean(self): # date can only be today or in the past, not the future def clean_scan_date(self): - date = self.cleaned_data.get('scan_date', None) + date = self.cleaned_data.get("scan_date", None) if date and date.date() > datetime.today().date(): msg = "The date cannot be in the future!" raise forms.ValidationError(msg) return date def get_scan_type(self): - TGT_scan = self.cleaned_data['scan_type'] + TGT_scan = self.cleaned_data["scan_type"] return TGT_scan @@ -619,18 +619,18 @@ class ReImportScanForm(forms.Form): required=False, label="Scan Completion Date", help_text="Scan completion date will be used on all findings.", - widget=forms.TextInput(attrs={'class': 'datepicker'})) - minimum_severity = forms.ChoiceField(help_text='Minimum severity level to be imported', + widget=forms.TextInput(attrs={"class": "datepicker"})) + minimum_severity = forms.ChoiceField(help_text="Minimum severity level to be imported", required=True, choices=SEVERITY_CHOICES[0:4]) active = forms.ChoiceField(required=True, choices=active_verified_choices, - help_text='Force findings to be active/inactive, or default to the original tool') + help_text="Force findings to be active/inactive, or default to the original tool") verified = forms.ChoiceField(required=True, choices=active_verified_choices, - help_text='Force findings to be verified/not verified, or default to the original tool') + help_text="Force findings to be verified/not verified, or default to the original tool") - help_do_not_reactivate = 'Select if the import should ignore active findings from the report, useful for triage-less scanners. Will keep existing findings closed, without reactivating them. For more information check the docs.' + help_do_not_reactivate = "Select if the import should ignore active findings from the report, useful for triage-less scanners. Will keep existing findings closed, without reactivating them. For more information check the docs." do_not_reactivate = forms.BooleanField(help_text=help_do_not_reactivate, required=False) - endpoints = forms.ModelMultipleChoiceField(Endpoint.objects, required=False, label='Systems / Endpoints') + endpoints = forms.ModelMultipleChoiceField(Endpoint.objects, required=False, label="Systems / Endpoints") tags = TagField(required=False, help_text="Modify existing tags that help describe this scan. " "Choose from the list or add new tags. Press Enter key to add.") file = forms.FileField(widget=forms.widgets.FileInput( @@ -644,7 +644,7 @@ class ReImportScanForm(forms.Form): branch_tag = forms.CharField(max_length=100, required=False, help_text="Branch or Tag that was scanned.") commit_hash = forms.CharField(max_length=100, required=False, help_text="Commit that was scanned.") build_id = forms.CharField(max_length=100, required=False, help_text="ID of the build that was scanned.") - api_scan_configuration = forms.ModelChoiceField(Product_API_Scan_Configuration.objects, required=False, label='API Scan Configuration') + api_scan_configuration = forms.ModelChoiceField(Product_API_Scan_Configuration.objects, required=False, label="API Scan Configuration") service = forms.CharField(max_length=200, required=False, help_text="A service is a self-contained piece of functionality within a Product. This is an optional field which is used in deduplication of findings when set.") source_code_management_uri = forms.URLField(max_length=600, required=False, help_text="Resource link to source code") apply_tags_to_findings = forms.BooleanField( @@ -661,7 +661,7 @@ class ReImportScanForm(forms.Form): ) if is_finding_groups_enabled(): - group_by = forms.ChoiceField(required=False, choices=Finding_Group.GROUP_BY_OPTIONS, help_text='Choose an option to automatically group new findings by the chosen option') + group_by = forms.ChoiceField(required=False, choices=Finding_Group.GROUP_BY_OPTIONS, help_text="Choose an option to automatically group new findings by the chosen option") create_finding_groups_for_all_findings = forms.BooleanField(help_text="If unchecked, finding groups will only be created when there is more than one grouped finding", required=False, initial=True) def __init__(self, *args, test=None, **kwargs): @@ -669,12 +669,12 @@ def __init__(self, *args, test=None, **kwargs): api_scan_configuration = kwargs.pop("api_scan_configuration", None) api_scan_configuration_queryset = kwargs.pop("api_scan_configuration_queryset", None) super().__init__(*args, **kwargs) - self.fields['active'].initial = self.active_verified_choices[0] - self.fields['verified'].initial = self.active_verified_choices[0] + self.fields["active"].initial = self.active_verified_choices[0] + self.fields["verified"].initial = self.active_verified_choices[0] self.scan_type = None if test: self.scan_type = test.test_type.name - self.fields['tags'].initial = test.tags.all() + self.fields["tags"].initial = test.tags.all() if endpoints: self.fields["endpoints"].queryset = endpoints if api_scan_configuration: @@ -682,10 +682,10 @@ def __init__(self, *args, test=None, **kwargs): if api_scan_configuration_queryset: self.fields["api_scan_configuration"].queryset = api_scan_configuration_queryset # couldn't find a cleaner way to add empty default - if 'group_by' in self.fields: - choices = self.fields['group_by'].choices - choices.insert(0, ('', '---------')) - self.fields['group_by'].choices = choices + if "group_by" in self.fields: + choices = self.fields["group_by"].choices + choices.insert(0, ("", "---------")) + self.fields["group_by"].choices = choices def clean(self): cleaned_data = super().clean() @@ -698,16 +698,16 @@ def clean(self): raise forms.ValidationError(msg) tool_type = requires_tool_type(self.scan_type) if tool_type: - api_scan_configuration = cleaned_data.get('api_scan_configuration') + api_scan_configuration = cleaned_data.get("api_scan_configuration") if api_scan_configuration and tool_type != api_scan_configuration.tool_configuration.tool_type.name: - msg = f'API scan configuration must be of tool type {tool_type}' + msg = f"API scan configuration must be of tool type {tool_type}" raise forms.ValidationError(msg) return cleaned_data # date can only be today or in the past, not the future def clean_scan_date(self): - date = self.cleaned_data.get('scan_date', None) + date = self.cleaned_data.get("scan_date", None) if date and date.date() > timezone.localtime(timezone.now()).date(): msg = "The date cannot be in the future!" raise forms.ValidationError(msg) @@ -750,7 +750,7 @@ class UploadThreatForm(forms.Form): class MergeFindings(forms.ModelForm): - FINDING_ACTION = (('', 'Select an Action'), ('inactive', 'Inactive'), ('delete', 'Delete')) + FINDING_ACTION = (("", "Select an Action"), ("inactive", "Inactive"), ("delete", "Delete")) append_description = forms.BooleanField(label="Append Description", initial=True, required=False, help_text="Description in all findings will be appended into the merged finding.") @@ -777,23 +777,23 @@ class MergeFindings(forms.ModelForm): help_text="The action to take on the merged finding. Set the findings to inactive or delete the findings.") def __init__(self, *args, **kwargs): - _ = kwargs.pop('finding') - findings = kwargs.pop('findings') + _ = kwargs.pop("finding") + findings = kwargs.pop("findings") super().__init__(*args, **kwargs) - self.fields['finding_to_merge_into'] = forms.ModelChoiceField( + self.fields["finding_to_merge_into"] = forms.ModelChoiceField( queryset=findings, initial=0, required="False", label="Finding to Merge Into", help_text="Findings selected below will be merged into this finding.") # Exclude the finding to merge into from the findings to merge into - self.fields['findings_to_merge'] = forms.ModelMultipleChoiceField( + self.fields["findings_to_merge"] = forms.ModelMultipleChoiceField( queryset=findings, required=True, label="Findings to Merge", - widget=forms.widgets.SelectMultiple(attrs={'size': 10}), - help_text=('Select the findings to merge.')) - self.field_order = ['finding_to_merge_into', 'findings_to_merge', 'append_description', 'add_endpoints', 'append_reference'] + widget=forms.widgets.SelectMultiple(attrs={"size": 10}), + help_text=("Select the findings to merge.")) + self.field_order = ["finding_to_merge_into", "findings_to_merge", "append_description", "add_endpoints", "append_reference"] class Meta: model = Finding - fields = ['append_description', 'add_endpoints', 'append_reference'] + fields = ["append_description", "add_endpoints", "append_reference"] class EditRiskAcceptanceForm(forms.ModelForm): @@ -802,17 +802,17 @@ class EditRiskAcceptanceForm(forms.ModelForm): decision = forms.ChoiceField(choices=Risk_Acceptance.TREATMENT_CHOICES, initial=Risk_Acceptance.TREATMENT_ACCEPT, widget=forms.RadioSelect) path = forms.FileField(label="Proof", required=False, widget=forms.widgets.FileInput(attrs={"accept": ".jpg,.png,.pdf"})) - expiration_date = forms.DateTimeField(required=False, widget=forms.TextInput(attrs={'class': 'datepicker'})) + expiration_date = forms.DateTimeField(required=False, widget=forms.TextInput(attrs={"class": "datepicker"})) class Meta: model = Risk_Acceptance - exclude = ['accepted_findings', 'notes'] + exclude = ["accepted_findings", "notes"] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.fields['path'].help_text = f'Existing proof uploaded: {self.instance.filename()}' if self.instance.filename() else 'None' - self.fields['expiration_date_warned'].disabled = True - self.fields['expiration_date_handled'].disabled = True + self.fields["path"].help_text = f"Existing proof uploaded: {self.instance.filename()}" if self.instance.filename() else "None" + self.fields["expiration_date_warned"].disabled = True + self.fields["expiration_date_handled"].disabled = True class RiskAcceptanceForm(EditRiskAcceptanceForm): @@ -820,26 +820,26 @@ class RiskAcceptanceForm(EditRiskAcceptanceForm): # expiration_date = forms.DateTimeField(required=False, widget=forms.TextInput(attrs={'class': 'datepicker'})) accepted_findings = forms.ModelMultipleChoiceField( queryset=Finding.objects.none(), required=True, - widget=forms.widgets.SelectMultiple(attrs={'size': 10}), - help_text=('Active, verified findings listed, please select to add findings.')) + widget=forms.widgets.SelectMultiple(attrs={"size": 10}), + help_text=("Active, verified findings listed, please select to add findings.")) notes = forms.CharField(required=False, max_length=2400, widget=forms.Textarea, - label='Notes') + label="Notes") class Meta: model = Risk_Acceptance - fields = '__all__' + fields = "__all__" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - expiration_delta_days = get_system_setting('risk_acceptance_form_default_days') - logger.debug('expiration_delta_days: %i', expiration_delta_days) + expiration_delta_days = get_system_setting("risk_acceptance_form_default_days") + logger.debug("expiration_delta_days: %i", expiration_delta_days) if expiration_delta_days > 0: expiration_date = timezone.now().date() + relativedelta(days=expiration_delta_days) # logger.debug('setting default expiration_date: %s', expiration_date) - self.fields['expiration_date'].initial = expiration_date + self.fields["expiration_date"].initial = expiration_date # self.fields['path'].help_text = 'Existing proof uploaded: %s' % self.instance.filename() if self.instance.filename() else 'None' - self.fields['accepted_findings'].queryset = get_authorized_findings(Permissions.Risk_Acceptance) + self.fields["accepted_findings"].queryset = get_authorized_findings(Permissions.Risk_Acceptance) class BaseManageFileFormSet(forms.BaseModelFormSet): @@ -849,7 +849,7 @@ def clean(self): # Don't bother validating the formset unless each form is valid on its own return for form in self.forms: - file = form.cleaned_data.get('file', None) + file = form.cleaned_data.get("file", None) if file: ext = os.path.splitext(file.name)[1] # [0] returns path+filename valid_extensions = settings.FILE_UPLOAD_TYPES @@ -864,10 +864,10 @@ def clean(self): "File uploads are prohibited due to the list of acceptable " "file extensions being empty" ) - form.add_error('file', msg) + form.add_error("file", msg) -ManageFileFormSet = modelformset_factory(FileUpload, extra=3, max_num=10, fields=['title', 'file'], can_delete=True, formset=BaseManageFileFormSet) +ManageFileFormSet = modelformset_factory(FileUpload, extra=3, max_num=10, fields=["title", "file"], can_delete=True, formset=BaseManageFileFormSet) class ReplaceRiskAcceptanceProofForm(forms.ModelForm): @@ -875,7 +875,7 @@ class ReplaceRiskAcceptanceProofForm(forms.ModelForm): class Meta: model = Risk_Acceptance - fields = ['path'] + fields = ["path"] class AddFindingsRiskAcceptanceForm(forms.ModelForm): @@ -884,20 +884,20 @@ class AddFindingsRiskAcceptanceForm(forms.ModelForm): queryset=Finding.objects.none(), required=True, label="", - widget=TableCheckboxWidget(attrs={'size': 25}), + widget=TableCheckboxWidget(attrs={"size": 25}), ) class Meta: model = Risk_Acceptance - fields = ['accepted_findings'] + fields = ["accepted_findings"] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.fields['accepted_findings'].queryset = get_authorized_findings(Permissions.Risk_Acceptance) + self.fields["accepted_findings"].queryset = get_authorized_findings(Permissions.Risk_Acceptance) class CheckForm(forms.ModelForm): - options = (('Pass', 'Pass'), ('Fail', 'Fail'), ('N/A', 'N/A')) + options = (("Pass", "Pass"), ("Fail", "Fail"), ("N/A", "N/A")) session_management = forms.ChoiceField(choices=options) encryption_crypto = forms.ChoiceField(choices=options) configuration_management = forms.ChoiceField(choices=options) @@ -908,24 +908,24 @@ class CheckForm(forms.ModelForm): other = forms.ChoiceField(choices=options) def __init__(self, *args, **kwargs): - findings = kwargs.pop('findings') + findings = kwargs.pop("findings") super().__init__(*args, **kwargs) - self.fields['session_issues'].queryset = findings - self.fields['crypto_issues'].queryset = findings - self.fields['config_issues'].queryset = findings - self.fields['auth_issues'].queryset = findings - self.fields['author_issues'].queryset = findings - self.fields['data_issues'].queryset = findings - self.fields['sensitive_issues'].queryset = findings - self.fields['other_issues'].queryset = findings + self.fields["session_issues"].queryset = findings + self.fields["crypto_issues"].queryset = findings + self.fields["config_issues"].queryset = findings + self.fields["auth_issues"].queryset = findings + self.fields["author_issues"].queryset = findings + self.fields["data_issues"].queryset = findings + self.fields["sensitive_issues"].queryset = findings + self.fields["other_issues"].queryset = findings class Meta: model = Check_List - fields = ['session_management', 'session_issues', 'encryption_crypto', 'crypto_issues', - 'configuration_management', 'config_issues', 'authentication', 'auth_issues', - 'authorization_and_access_control', 'author_issues', - 'data_input_sanitization_validation', 'data_issues', - 'sensitive_data', 'sensitive_issues', 'other', 'other_issues'] + fields = ["session_management", "session_issues", "encryption_crypto", "crypto_issues", + "configuration_management", "config_issues", "authentication", "auth_issues", + "authorization_and_access_control", "author_issues", + "data_input_sanitization_validation", "data_issues", + "sensitive_data", "sensitive_issues", "other", "other_issues"] class EngForm(forms.ModelForm): @@ -935,13 +935,13 @@ class EngForm(forms.ModelForm): + "Without a name the target start date will be set.") description = forms.CharField(widget=forms.Textarea(attrs={}), required=False, help_text="Description of the engagement and details regarding the engagement.") - product = forms.ModelChoiceField(label='Product', + product = forms.ModelChoiceField(label="Product", queryset=Product.objects.none(), required=True) target_start = forms.DateField(widget=forms.TextInput( - attrs={'class': 'datepicker', 'autocomplete': 'off'})) + attrs={"class": "datepicker", "autocomplete": "off"})) target_end = forms.DateField(widget=forms.TextInput( - attrs={'class': 'datepicker', 'autocomplete': 'off'})) + attrs={"class": "datepicker", "autocomplete": "off"})) lead = forms.ModelChoiceField( queryset=None, required=True, label="Testing Lead") @@ -950,38 +950,38 @@ class EngForm(forms.ModelForm): def __init__(self, *args, **kwargs): cicd = False product = None - if 'cicd' in kwargs: - cicd = kwargs.pop('cicd') + if "cicd" in kwargs: + cicd = kwargs.pop("cicd") - if 'product' in kwargs: - product = kwargs.pop('product') + if "product" in kwargs: + product = kwargs.pop("product") self.user = None - if 'user' in kwargs: - self.user = kwargs.pop('user') + if "user" in kwargs: + self.user = kwargs.pop("user") super().__init__(*args, **kwargs) if product: - self.fields['preset'] = forms.ModelChoiceField(help_text="Settings and notes for performing this engagement.", required=False, queryset=Engagement_Presets.objects.filter(product=product)) - self.fields['lead'].queryset = get_authorized_users_for_product_and_product_type(None, product, Permissions.Product_View).filter(is_active=True) + self.fields["preset"] = forms.ModelChoiceField(help_text="Settings and notes for performing this engagement.", required=False, queryset=Engagement_Presets.objects.filter(product=product)) + self.fields["lead"].queryset = get_authorized_users_for_product_and_product_type(None, product, Permissions.Product_View).filter(is_active=True) else: - self.fields['lead'].queryset = get_authorized_users(Permissions.Engagement_View).filter(is_active=True) + self.fields["lead"].queryset = get_authorized_users(Permissions.Engagement_View).filter(is_active=True) - self.fields['product'].queryset = get_authorized_products(Permissions.Engagement_Add) + self.fields["product"].queryset = get_authorized_products(Permissions.Engagement_Add) # Don't show CICD fields on a interactive engagement if cicd is False: - del self.fields['build_id'] - del self.fields['commit_hash'] - del self.fields['branch_tag'] - del self.fields['build_server'] - del self.fields['source_code_management_server'] + del self.fields["build_id"] + del self.fields["commit_hash"] + del self.fields["branch_tag"] + del self.fields["build_server"] + del self.fields["source_code_management_server"] # del self.fields['source_code_management_uri'] - del self.fields['orchestration_engine'] + del self.fields["orchestration_engine"] else: - del self.fields['test_strategy'] - del self.fields['status'] + del self.fields["test_strategy"] + del self.fields["status"] def is_valid(self): valid = super().is_valid() @@ -989,17 +989,17 @@ def is_valid(self): # we're done now if not valid if not valid: return valid - if self.cleaned_data['target_start'] > self.cleaned_data['target_end']: - self.add_error('target_start', 'Your target start date exceeds your target end date') - self.add_error('target_end', 'Your target start date exceeds your target end date') + if self.cleaned_data["target_start"] > self.cleaned_data["target_end"]: + self.add_error("target_start", "Your target start date exceeds your target end date") + self.add_error("target_end", "Your target start date exceeds your target end date") return False return True class Meta: model = Engagement - exclude = ('first_contacted', 'real_start', 'engagement_type', 'inherited_tags', - 'real_end', 'requester', 'reason', 'updated', 'report_type', - 'product', 'threat_model', 'api_test', 'pen_test', 'check_list') + exclude = ("first_contacted", "real_start", "engagement_type", "inherited_tags", + "real_end", "requester", "reason", "updated", "report_type", + "product", "threat_model", "api_test", "pen_test", "check_list") class DeleteEngagementForm(forms.ModelForm): @@ -1008,19 +1008,19 @@ class DeleteEngagementForm(forms.ModelForm): class Meta: model = Engagement - fields = ['id'] + fields = ["id"] class TestForm(forms.ModelForm): title = forms.CharField(max_length=255, required=False) - description = forms.CharField(widget=forms.Textarea(attrs={'rows': '3'}), required=False) - test_type = forms.ModelChoiceField(queryset=Test_Type.objects.all().order_by('name')) + description = forms.CharField(widget=forms.Textarea(attrs={"rows": "3"}), required=False) + test_type = forms.ModelChoiceField(queryset=Test_Type.objects.all().order_by("name")) environment = forms.ModelChoiceField( - queryset=Development_Environment.objects.all().order_by('name')) + queryset=Development_Environment.objects.all().order_by("name")) target_start = forms.DateTimeField(widget=forms.TextInput( - attrs={'class': 'datepicker', 'autocomplete': 'off'})) + attrs={"class": "datepicker", "autocomplete": "off"})) target_end = forms.DateTimeField(widget=forms.TextInput( - attrs={'class': 'datepicker', 'autocomplete': 'off'})) + attrs={"class": "datepicker", "autocomplete": "off"})) lead = forms.ModelChoiceField( queryset=None, @@ -1029,26 +1029,26 @@ class TestForm(forms.ModelForm): def __init__(self, *args, **kwargs): obj = None - if 'engagement' in kwargs: - obj = kwargs.pop('engagement') + if "engagement" in kwargs: + obj = kwargs.pop("engagement") - if 'instance' in kwargs: - obj = kwargs.get('instance') + if "instance" in kwargs: + obj = kwargs.get("instance") super().__init__(*args, **kwargs) if obj: product = get_product(obj) - self.fields['lead'].queryset = get_authorized_users_for_product_and_product_type(None, product, Permissions.Product_View).filter(is_active=True) - self.fields['api_scan_configuration'].queryset = Product_API_Scan_Configuration.objects.filter(product=product) + self.fields["lead"].queryset = get_authorized_users_for_product_and_product_type(None, product, Permissions.Product_View).filter(is_active=True) + self.fields["api_scan_configuration"].queryset = Product_API_Scan_Configuration.objects.filter(product=product) else: - self.fields['lead'].queryset = get_authorized_users(Permissions.Test_View).filter(is_active=True) + self.fields["lead"].queryset = get_authorized_users(Permissions.Test_View).filter(is_active=True) class Meta: model = Test - fields = ['title', 'test_type', 'target_start', 'target_end', 'description', - 'environment', 'percent_complete', 'tags', 'lead', 'version', 'branch_tag', 'build_id', 'commit_hash', - 'api_scan_configuration'] + fields = ["title", "test_type", "target_start", "target_end", "description", + "environment", "percent_complete", "tags", "lead", "version", "branch_tag", "build_id", "commit_hash", + "api_scan_configuration"] class DeleteTestForm(forms.ModelForm): @@ -1057,89 +1057,89 @@ class DeleteTestForm(forms.ModelForm): class Meta: model = Test - fields = ['id'] + fields = ["id"] class CopyTestForm(forms.Form): engagement = forms.ModelChoiceField( required=True, queryset=Engagement.objects.none(), - error_messages={'required': '*'}) + error_messages={"required": "*"}) def __init__(self, *args, **kwargs): - authorized_lists = kwargs.pop('engagements', None) + authorized_lists = kwargs.pop("engagements", None) super().__init__(*args, **kwargs) - self.fields['engagement'].queryset = authorized_lists + self.fields["engagement"].queryset = authorized_lists class AddFindingForm(forms.ModelForm): title = forms.CharField(max_length=1000) date = forms.DateField(required=True, - widget=forms.TextInput(attrs={'class': 'datepicker', 'autocomplete': 'off'})) + widget=forms.TextInput(attrs={"class": "datepicker", "autocomplete": "off"})) cwe = forms.IntegerField(required=False) vulnerability_ids = vulnerability_ids_field - cvssv3 = forms.CharField(max_length=117, required=False, widget=forms.TextInput(attrs={'class': 'cvsscalculator', 'data-toggle': 'dropdown', 'aria-haspopup': 'true', 'aria-expanded': 'false'})) + cvssv3 = forms.CharField(max_length=117, required=False, widget=forms.TextInput(attrs={"class": "cvsscalculator", "data-toggle": "dropdown", "aria-haspopup": "true", "aria-expanded": "false"})) description = forms.CharField(widget=forms.Textarea) severity = forms.ChoiceField( choices=SEVERITY_CHOICES, error_messages={ - 'required': 'Select valid choice: In Progress, On Hold, Completed', - 'invalid_choice': EFFORT_FOR_FIXING_INVALID_CHOICE}) + "required": "Select valid choice: In Progress, On Hold, Completed", + "invalid_choice": EFFORT_FOR_FIXING_INVALID_CHOICE}) mitigation = forms.CharField(widget=forms.Textarea, required=False) impact = forms.CharField(widget=forms.Textarea, required=False) request = forms.CharField(widget=forms.Textarea, required=False) response = forms.CharField(widget=forms.Textarea, required=False) - endpoints = forms.ModelMultipleChoiceField(Endpoint.objects.none(), required=False, label='Systems / Endpoints') + endpoints = forms.ModelMultipleChoiceField(Endpoint.objects.none(), required=False, label="Systems / Endpoints") endpoints_to_add = forms.CharField(max_length=5000, required=False, label="Endpoints to add", help_text="The IP address, host name or full URL. You may enter one endpoint per line. " "Each must be valid.", - widget=forms.widgets.Textarea(attrs={'rows': '3', 'cols': '400'})) + widget=forms.widgets.Textarea(attrs={"rows": "3", "cols": "400"})) references = forms.CharField(widget=forms.Textarea, required=False) - publish_date = forms.DateField(widget=forms.TextInput(attrs={'class': 'datepicker', 'autocomplete': 'off'}), required=False) - planned_remediation_date = forms.DateField(widget=forms.TextInput(attrs={'class': 'datepicker', 'autocomplete': 'off'}), required=False) + publish_date = forms.DateField(widget=forms.TextInput(attrs={"class": "datepicker", "autocomplete": "off"}), required=False) + planned_remediation_date = forms.DateField(widget=forms.TextInput(attrs={"class": "datepicker", "autocomplete": "off"}), required=False) planned_remediation_version = forms.CharField(max_length=99, required=False) effort_for_fixing = forms.ChoiceField( required=False, choices=EFFORT_FOR_FIXING_CHOICES, error_messages={ - 'invalid_choice': EFFORT_FOR_FIXING_INVALID_CHOICE}) + "invalid_choice": EFFORT_FOR_FIXING_INVALID_CHOICE}) # the only reliable way without hacking internal fields to get predicatble ordering is to make it explicit - field_order = ('title', 'date', 'cwe', 'vulnerability_ids', 'severity', 'cvssv3', 'description', 'mitigation', 'impact', 'request', 'response', 'steps_to_reproduce', - 'severity_justification', 'endpoints', 'endpoints_to_add', 'references', 'active', 'verified', 'false_p', 'duplicate', 'out_of_scope', - 'risk_accepted', 'under_defect_review') + field_order = ("title", "date", "cwe", "vulnerability_ids", "severity", "cvssv3", "description", "mitigation", "impact", "request", "response", "steps_to_reproduce", + "severity_justification", "endpoints", "endpoints_to_add", "references", "active", "verified", "false_p", "duplicate", "out_of_scope", + "risk_accepted", "under_defect_review") def __init__(self, *args, **kwargs): - req_resp = kwargs.pop('req_resp') + req_resp = kwargs.pop("req_resp") product = None - if 'product' in kwargs: - product = kwargs.pop('product') + if "product" in kwargs: + product = kwargs.pop("product") super().__init__(*args, **kwargs) if product: - self.fields['endpoints'].queryset = Endpoint.objects.filter(product=product) + self.fields["endpoints"].queryset = Endpoint.objects.filter(product=product) if req_resp: - self.fields['request'].initial = req_resp[0] - self.fields['response'].initial = req_resp[1] + self.fields["request"].initial = req_resp[0] + self.fields["response"].initial = req_resp[1] self.endpoints_to_add_list = [] def clean(self): cleaned_data = super().clean() - if ((cleaned_data['active'] or cleaned_data['verified']) and cleaned_data['duplicate']): - msg = 'Duplicate findings cannot be verified or active' + if ((cleaned_data["active"] or cleaned_data["verified"]) and cleaned_data["duplicate"]): + msg = "Duplicate findings cannot be verified or active" raise forms.ValidationError(msg) - if cleaned_data['false_p'] and cleaned_data['verified']: - msg = 'False positive findings cannot be verified.' + if cleaned_data["false_p"] and cleaned_data["verified"]: + msg = "False positive findings cannot be verified." raise forms.ValidationError(msg) - if cleaned_data['active'] and 'risk_accepted' in cleaned_data and cleaned_data['risk_accepted']: - msg = 'Active findings cannot be risk accepted.' + if cleaned_data["active"] and "risk_accepted" in cleaned_data and cleaned_data["risk_accepted"]: + msg = "Active findings cannot be risk accepted." raise forms.ValidationError(msg) - endpoints_to_add_list, errors = validate_endpoints_to_add(cleaned_data['endpoints_to_add']) + endpoints_to_add_list, errors = validate_endpoints_to_add(cleaned_data["endpoints_to_add"]) if errors: raise forms.ValidationError(errors) else: @@ -1149,75 +1149,75 @@ def clean(self): class Meta: model = Finding - exclude = ('reporter', 'url', 'numerical_severity', 'under_review', 'reviewers', 'cve', 'inherited_tags', - 'review_requested_by', 'is_mitigated', 'jira_creation', 'jira_change', 'endpoints', 'sla_start_date') + exclude = ("reporter", "url", "numerical_severity", "under_review", "reviewers", "cve", "inherited_tags", + "review_requested_by", "is_mitigated", "jira_creation", "jira_change", "endpoints", "sla_start_date") class AdHocFindingForm(forms.ModelForm): title = forms.CharField(max_length=1000) date = forms.DateField(required=True, - widget=forms.TextInput(attrs={'class': 'datepicker', 'autocomplete': 'off'})) + widget=forms.TextInput(attrs={"class": "datepicker", "autocomplete": "off"})) cwe = forms.IntegerField(required=False) vulnerability_ids = vulnerability_ids_field - cvssv3 = forms.CharField(max_length=117, required=False, widget=forms.TextInput(attrs={'class': 'cvsscalculator', 'data-toggle': 'dropdown', 'aria-haspopup': 'true', 'aria-expanded': 'false'})) + cvssv3 = forms.CharField(max_length=117, required=False, widget=forms.TextInput(attrs={"class": "cvsscalculator", "data-toggle": "dropdown", "aria-haspopup": "true", "aria-expanded": "false"})) description = forms.CharField(widget=forms.Textarea) severity = forms.ChoiceField( choices=SEVERITY_CHOICES, error_messages={ - 'required': 'Select valid choice: In Progress, On Hold, Completed', - 'invalid_choice': EFFORT_FOR_FIXING_INVALID_CHOICE}) + "required": "Select valid choice: In Progress, On Hold, Completed", + "invalid_choice": EFFORT_FOR_FIXING_INVALID_CHOICE}) mitigation = forms.CharField(widget=forms.Textarea, required=False) impact = forms.CharField(widget=forms.Textarea, required=False) request = forms.CharField(widget=forms.Textarea, required=False) response = forms.CharField(widget=forms.Textarea, required=False) - endpoints = forms.ModelMultipleChoiceField(queryset=Endpoint.objects.none(), required=False, label='Systems / Endpoints') + endpoints = forms.ModelMultipleChoiceField(queryset=Endpoint.objects.none(), required=False, label="Systems / Endpoints") endpoints_to_add = forms.CharField(max_length=5000, required=False, label="Endpoints to add", help_text="The IP address, host name or full URL. You may enter one endpoint per line. " "Each must be valid.", - widget=forms.widgets.Textarea(attrs={'rows': '3', 'cols': '400'})) + widget=forms.widgets.Textarea(attrs={"rows": "3", "cols": "400"})) references = forms.CharField(widget=forms.Textarea, required=False) - publish_date = forms.DateField(widget=forms.TextInput(attrs={'class': 'datepicker', 'autocomplete': 'off'}), required=False) - planned_remediation_date = forms.DateField(widget=forms.TextInput(attrs={'class': 'datepicker', 'autocomplete': 'off'}), required=False) + publish_date = forms.DateField(widget=forms.TextInput(attrs={"class": "datepicker", "autocomplete": "off"}), required=False) + planned_remediation_date = forms.DateField(widget=forms.TextInput(attrs={"class": "datepicker", "autocomplete": "off"}), required=False) planned_remediation_version = forms.CharField(max_length=99, required=False) effort_for_fixing = forms.ChoiceField( required=False, choices=EFFORT_FOR_FIXING_CHOICES, error_messages={ - 'invalid_choice': EFFORT_FOR_FIXING_INVALID_CHOICE}) + "invalid_choice": EFFORT_FOR_FIXING_INVALID_CHOICE}) # the only reliable way without hacking internal fields to get predicatble ordering is to make it explicit - field_order = ('title', 'date', 'cwe', 'vulnerability_ids', 'severity', 'cvssv3', 'description', 'mitigation', 'impact', 'request', 'response', 'steps_to_reproduce', - 'severity_justification', 'endpoints', 'endpoints_to_add', 'references', 'active', 'verified', 'false_p', 'duplicate', 'out_of_scope', - 'risk_accepted', 'under_defect_review', 'sla_start_date', 'sla_expiration_date') + field_order = ("title", "date", "cwe", "vulnerability_ids", "severity", "cvssv3", "description", "mitigation", "impact", "request", "response", "steps_to_reproduce", + "severity_justification", "endpoints", "endpoints_to_add", "references", "active", "verified", "false_p", "duplicate", "out_of_scope", + "risk_accepted", "under_defect_review", "sla_start_date", "sla_expiration_date") def __init__(self, *args, **kwargs): - req_resp = kwargs.pop('req_resp') + req_resp = kwargs.pop("req_resp") product = None - if 'product' in kwargs: - product = kwargs.pop('product') + if "product" in kwargs: + product = kwargs.pop("product") super().__init__(*args, **kwargs) if product: - self.fields['endpoints'].queryset = Endpoint.objects.filter(product=product) + self.fields["endpoints"].queryset = Endpoint.objects.filter(product=product) if req_resp: - self.fields['request'].initial = req_resp[0] - self.fields['response'].initial = req_resp[1] + self.fields["request"].initial = req_resp[0] + self.fields["response"].initial = req_resp[1] self.endpoints_to_add_list = [] def clean(self): cleaned_data = super().clean() - if ((cleaned_data['active'] or cleaned_data['verified']) and cleaned_data['duplicate']): - msg = 'Duplicate findings cannot be verified or active' + if ((cleaned_data["active"] or cleaned_data["verified"]) and cleaned_data["duplicate"]): + msg = "Duplicate findings cannot be verified or active" raise forms.ValidationError(msg) - if cleaned_data['false_p'] and cleaned_data['verified']: - msg = 'False positive findings cannot be verified.' + if cleaned_data["false_p"] and cleaned_data["verified"]: + msg = "False positive findings cannot be verified." raise forms.ValidationError(msg) - endpoints_to_add_list, errors = validate_endpoints_to_add(cleaned_data['endpoints_to_add']) + endpoints_to_add_list, errors = validate_endpoints_to_add(cleaned_data["endpoints_to_add"]) if errors: raise forms.ValidationError(errors) else: @@ -1227,55 +1227,55 @@ def clean(self): class Meta: model = Finding - exclude = ('reporter', 'url', 'numerical_severity', 'under_review', 'reviewers', 'cve', 'inherited_tags', - 'review_requested_by', 'is_mitigated', 'jira_creation', 'jira_change', 'endpoints', 'sla_start_date', - 'sla_expiration_date') + exclude = ("reporter", "url", "numerical_severity", "under_review", "reviewers", "cve", "inherited_tags", + "review_requested_by", "is_mitigated", "jira_creation", "jira_change", "endpoints", "sla_start_date", + "sla_expiration_date") class PromoteFindingForm(forms.ModelForm): title = forms.CharField(max_length=1000) date = forms.DateField(required=True, - widget=forms.TextInput(attrs={'class': 'datepicker', 'autocomplete': 'off'})) + widget=forms.TextInput(attrs={"class": "datepicker", "autocomplete": "off"})) cwe = forms.IntegerField(required=False) vulnerability_ids = vulnerability_ids_field - cvssv3 = forms.CharField(max_length=117, required=False, widget=forms.TextInput(attrs={'class': 'cvsscalculator', 'data-toggle': 'dropdown', 'aria-haspopup': 'true', 'aria-expanded': 'false'})) + cvssv3 = forms.CharField(max_length=117, required=False, widget=forms.TextInput(attrs={"class": "cvsscalculator", "data-toggle": "dropdown", "aria-haspopup": "true", "aria-expanded": "false"})) description = forms.CharField(widget=forms.Textarea) severity = forms.ChoiceField( choices=SEVERITY_CHOICES, error_messages={ - 'required': 'Select valid choice: In Progress, On Hold, Completed', - 'invalid_choice': 'Select valid choice: Critical,High,Medium,Low'}) + "required": "Select valid choice: In Progress, On Hold, Completed", + "invalid_choice": "Select valid choice: Critical,High,Medium,Low"}) mitigation = forms.CharField(widget=forms.Textarea, required=False) impact = forms.CharField(widget=forms.Textarea, required=False) - endpoints = forms.ModelMultipleChoiceField(Endpoint.objects.none(), required=False, label='Systems / Endpoints') + endpoints = forms.ModelMultipleChoiceField(Endpoint.objects.none(), required=False, label="Systems / Endpoints") endpoints_to_add = forms.CharField(max_length=5000, required=False, label="Endpoints to add", help_text="The IP address, host name or full URL. You may enter one endpoint per line. " "Each must be valid.", - widget=forms.widgets.Textarea(attrs={'rows': '3', 'cols': '400'})) + widget=forms.widgets.Textarea(attrs={"rows": "3", "cols": "400"})) references = forms.CharField(widget=forms.Textarea, required=False) # the onyl reliable way without hacking internal fields to get predicatble ordering is to make it explicit - field_order = ('title', 'group', 'date', 'sla_start_date', 'sla_expiration_date', 'cwe', 'vulnerability_ids', 'severity', 'cvssv3', - 'cvssv3_score', 'description', 'mitigation', 'impact', 'request', 'response', 'steps_to_reproduce', 'severity_justification', - 'endpoints', 'endpoints_to_add', 'references', 'active', 'mitigated', 'mitigated_by', 'verified', 'false_p', 'duplicate', - 'out_of_scope', 'risk_accept', 'under_defect_review') + field_order = ("title", "group", "date", "sla_start_date", "sla_expiration_date", "cwe", "vulnerability_ids", "severity", "cvssv3", + "cvssv3_score", "description", "mitigation", "impact", "request", "response", "steps_to_reproduce", "severity_justification", + "endpoints", "endpoints_to_add", "references", "active", "mitigated", "mitigated_by", "verified", "false_p", "duplicate", + "out_of_scope", "risk_accept", "under_defect_review") def __init__(self, *args, **kwargs): product = None - if 'product' in kwargs: - product = kwargs.pop('product') + if "product" in kwargs: + product = kwargs.pop("product") super().__init__(*args, **kwargs) if product: - self.fields['endpoints'].queryset = Endpoint.objects.filter(product=product) + self.fields["endpoints"].queryset = Endpoint.objects.filter(product=product) self.endpoints_to_add_list = [] def clean(self): cleaned_data = super().clean() - endpoints_to_add_list, errors = validate_endpoints_to_add(cleaned_data['endpoints_to_add']) + endpoints_to_add_list, errors = validate_endpoints_to_add(cleaned_data["endpoints_to_add"]) if errors: raise forms.ValidationError(errors) else: @@ -1285,121 +1285,121 @@ def clean(self): class Meta: model = Finding - exclude = ('reporter', 'url', 'numerical_severity', 'active', 'false_p', 'verified', 'endpoint_status', 'cve', 'inherited_tags', - 'duplicate', 'out_of_scope', 'under_review', 'reviewers', 'review_requested_by', 'is_mitigated', 'jira_creation', 'jira_change', 'planned_remediation_date', 'planned_remediation_version', 'effort_for_fixing') + exclude = ("reporter", "url", "numerical_severity", "active", "false_p", "verified", "endpoint_status", "cve", "inherited_tags", + "duplicate", "out_of_scope", "under_review", "reviewers", "review_requested_by", "is_mitigated", "jira_creation", "jira_change", "planned_remediation_date", "planned_remediation_version", "effort_for_fixing") class FindingForm(forms.ModelForm): title = forms.CharField(max_length=1000) - group = forms.ModelChoiceField(required=False, queryset=Finding_Group.objects.none(), help_text='The Finding Group to which this finding belongs, leave empty to remove the finding from the group. Groups can only be created via Bulk Edit for now.') + group = forms.ModelChoiceField(required=False, queryset=Finding_Group.objects.none(), help_text="The Finding Group to which this finding belongs, leave empty to remove the finding from the group. Groups can only be created via Bulk Edit for now.") date = forms.DateField(required=True, - widget=forms.TextInput(attrs={'class': 'datepicker', 'autocomplete': 'off'})) + widget=forms.TextInput(attrs={"class": "datepicker", "autocomplete": "off"})) cwe = forms.IntegerField(required=False) vulnerability_ids = vulnerability_ids_field - cvssv3 = forms.CharField(max_length=117, required=False, widget=forms.TextInput(attrs={'class': 'cvsscalculator', 'data-toggle': 'dropdown', 'aria-haspopup': 'true', 'aria-expanded': 'false'})) + cvssv3 = forms.CharField(max_length=117, required=False, widget=forms.TextInput(attrs={"class": "cvsscalculator", "data-toggle": "dropdown", "aria-haspopup": "true", "aria-expanded": "false"})) cvssv3_score = forms.FloatField(required=False, max_value=10.0, min_value=0.0) description = forms.CharField(widget=forms.Textarea) severity = forms.ChoiceField( choices=SEVERITY_CHOICES, error_messages={ - 'required': 'Select valid choice: In Progress, On Hold, Completed', - 'invalid_choice': 'Select valid choice: Critical,High,Medium,Low'}) + "required": "Select valid choice: In Progress, On Hold, Completed", + "invalid_choice": "Select valid choice: Critical,High,Medium,Low"}) mitigation = forms.CharField(widget=forms.Textarea, required=False) impact = forms.CharField(widget=forms.Textarea, required=False) request = forms.CharField(widget=forms.Textarea, required=False) response = forms.CharField(widget=forms.Textarea, required=False) - endpoints = forms.ModelMultipleChoiceField(queryset=Endpoint.objects.none(), required=False, label='Systems / Endpoints') + endpoints = forms.ModelMultipleChoiceField(queryset=Endpoint.objects.none(), required=False, label="Systems / Endpoints") endpoints_to_add = forms.CharField(max_length=5000, required=False, label="Endpoints to add", help_text="The IP address, host name or full URL. You may enter one endpoint per line. " "Each must be valid.", - widget=forms.widgets.Textarea(attrs={'rows': '3', 'cols': '400'})) + widget=forms.widgets.Textarea(attrs={"rows": "3", "cols": "400"})) references = forms.CharField(widget=forms.Textarea, required=False) - mitigated = forms.DateField(required=False, help_text='Date and time when the flaw has been fixed', widget=forms.TextInput(attrs={'class': 'datepicker', 'autocomplete': 'off'})) + mitigated = forms.DateField(required=False, help_text="Date and time when the flaw has been fixed", widget=forms.TextInput(attrs={"class": "datepicker", "autocomplete": "off"})) mitigated_by = forms.ModelChoiceField(required=False, queryset=Dojo_User.objects.none()) - publish_date = forms.DateField(widget=forms.TextInput(attrs={'class': 'datepicker', 'autocomplete': 'off'}), required=False) - planned_remediation_date = forms.DateField(widget=forms.TextInput(attrs={'class': 'datepicker', 'autocomplete': 'off'}), required=False) + publish_date = forms.DateField(widget=forms.TextInput(attrs={"class": "datepicker", "autocomplete": "off"}), required=False) + planned_remediation_date = forms.DateField(widget=forms.TextInput(attrs={"class": "datepicker", "autocomplete": "off"}), required=False) planned_remediation_version = forms.CharField(max_length=99, required=False) effort_for_fixing = forms.ChoiceField( required=False, choices=EFFORT_FOR_FIXING_CHOICES, error_messages={ - 'invalid_choice': EFFORT_FOR_FIXING_INVALID_CHOICE}) + "invalid_choice": EFFORT_FOR_FIXING_INVALID_CHOICE}) # the only reliable way without hacking internal fields to get predicatble ordering is to make it explicit - field_order = ('title', 'group', 'date', 'sla_start_date', 'sla_expiration_date', 'cwe', 'vulnerability_ids', 'severity', 'cvssv3', - 'cvssv3_score', 'description', 'mitigation', 'impact', 'request', 'response', 'steps_to_reproduce', 'severity_justification', - 'endpoints', 'endpoints_to_add', 'references', 'active', 'mitigated', 'mitigated_by', 'verified', 'false_p', 'duplicate', - 'out_of_scope', 'risk_accept', 'under_defect_review') + field_order = ("title", "group", "date", "sla_start_date", "sla_expiration_date", "cwe", "vulnerability_ids", "severity", "cvssv3", + "cvssv3_score", "description", "mitigation", "impact", "request", "response", "steps_to_reproduce", "severity_justification", + "endpoints", "endpoints_to_add", "references", "active", "mitigated", "mitigated_by", "verified", "false_p", "duplicate", + "out_of_scope", "risk_accept", "under_defect_review") def __init__(self, *args, **kwargs): req_resp = None - if 'req_resp' in kwargs: - req_resp = kwargs.pop('req_resp') + if "req_resp" in kwargs: + req_resp = kwargs.pop("req_resp") - self.can_edit_mitigated_data = kwargs.pop('can_edit_mitigated_data') if 'can_edit_mitigated_data' in kwargs \ + self.can_edit_mitigated_data = kwargs.pop("can_edit_mitigated_data") if "can_edit_mitigated_data" in kwargs \ else False super().__init__(*args, **kwargs) - self.fields['endpoints'].queryset = Endpoint.objects.filter(product=self.instance.test.engagement.product) - self.fields['mitigated_by'].queryset = get_authorized_users(Permissions.Test_Edit) + self.fields["endpoints"].queryset = Endpoint.objects.filter(product=self.instance.test.engagement.product) + self.fields["mitigated_by"].queryset = get_authorized_users(Permissions.Test_Edit) # do not show checkbox if finding is not accepted and simple risk acceptance is disabled # if checked, always show to allow unaccept also with full risk acceptance enabled # when adding from template, we don't have access to the test. quickfix for now to just hide simple risk acceptance - if not hasattr(self.instance, 'test') or (not self.instance.risk_accepted and not self.instance.test.engagement.product.enable_simple_risk_acceptance): - del self.fields['risk_accepted'] + if not hasattr(self.instance, "test") or (not self.instance.risk_accepted and not self.instance.test.engagement.product.enable_simple_risk_acceptance): + del self.fields["risk_accepted"] else: if self.instance.risk_accepted: - self.fields['risk_accepted'].help_text = "Uncheck to unaccept the risk. Use full risk acceptance from the dropdown menu if you need advanced settings such as an expiry date." + self.fields["risk_accepted"].help_text = "Uncheck to unaccept the risk. Use full risk acceptance from the dropdown menu if you need advanced settings such as an expiry date." elif self.instance.test.engagement.product.enable_simple_risk_acceptance: - self.fields['risk_accepted'].help_text = "Check to accept the risk. Use full risk acceptance from the dropdown menu if you need advanced settings such as an expiry date." + self.fields["risk_accepted"].help_text = "Check to accept the risk. Use full risk acceptance from the dropdown menu if you need advanced settings such as an expiry date." # self.fields['tags'].widget.choices = t if req_resp: - self.fields['request'].initial = req_resp[0] - self.fields['response'].initial = req_resp[1] + self.fields["request"].initial = req_resp[0] + self.fields["response"].initial = req_resp[1] if self.instance.duplicate: - self.fields['duplicate'].help_text = "Original finding that is being duplicated here (readonly). Use view finding page to manage duplicate relationships. Unchecking duplicate here will reset this findings duplicate status, but will trigger deduplication logic." + self.fields["duplicate"].help_text = "Original finding that is being duplicated here (readonly). Use view finding page to manage duplicate relationships. Unchecking duplicate here will reset this findings duplicate status, but will trigger deduplication logic." else: - self.fields['duplicate'].help_text = "You can mark findings as duplicate only from the view finding page." + self.fields["duplicate"].help_text = "You can mark findings as duplicate only from the view finding page." - self.fields['sla_start_date'].disabled = True - self.fields['sla_expiration_date'].disabled = True + self.fields["sla_start_date"].disabled = True + self.fields["sla_expiration_date"].disabled = True if self.can_edit_mitigated_data: - if hasattr(self, 'instance'): - self.fields['mitigated'].initial = self.instance.mitigated - self.fields['mitigated_by'].initial = self.instance.mitigated_by + if hasattr(self, "instance"): + self.fields["mitigated"].initial = self.instance.mitigated + self.fields["mitigated_by"].initial = self.instance.mitigated_by else: - del self.fields['mitigated'] - del self.fields['mitigated_by'] + del self.fields["mitigated"] + del self.fields["mitigated_by"] - if not is_finding_groups_enabled() or not hasattr(self.instance, 'test'): - del self.fields['group'] + if not is_finding_groups_enabled() or not hasattr(self.instance, "test"): + del self.fields["group"] else: - self.fields['group'].queryset = self.instance.test.finding_group_set.all() - self.fields['group'].initial = self.instance.finding_group + self.fields["group"].queryset = self.instance.test.finding_group_set.all() + self.fields["group"].initial = self.instance.finding_group self.endpoints_to_add_list = [] def clean(self): cleaned_data = super().clean() - if (cleaned_data['active'] or cleaned_data['verified']) and cleaned_data['duplicate']: - msg = 'Duplicate findings cannot be verified or active' + if (cleaned_data["active"] or cleaned_data["verified"]) and cleaned_data["duplicate"]: + msg = "Duplicate findings cannot be verified or active" raise forms.ValidationError(msg) - if cleaned_data['false_p'] and cleaned_data['verified']: - msg = 'False positive findings cannot be verified.' + if cleaned_data["false_p"] and cleaned_data["verified"]: + msg = "False positive findings cannot be verified." raise forms.ValidationError(msg) - if cleaned_data['active'] and 'risk_accepted' in cleaned_data and cleaned_data['risk_accepted']: - msg = 'Active findings cannot be risk accepted.' + if cleaned_data["active"] and "risk_accepted" in cleaned_data and cleaned_data["risk_accepted"]: + msg = "Active findings cannot be risk accepted." raise forms.ValidationError(msg) - endpoints_to_add_list, errors = validate_endpoints_to_add(cleaned_data['endpoints_to_add']) + endpoints_to_add_list, errors = validate_endpoints_to_add(cleaned_data["endpoints_to_add"]) if errors: raise forms.ValidationError(errors) else: @@ -1413,15 +1413,15 @@ def _post_clean(self): if self.can_edit_mitigated_data: opts = self.instance._meta try: - opts.get_field('mitigated').save_form_data(self.instance, self.cleaned_data.get('mitigated')) - opts.get_field('mitigated_by').save_form_data(self.instance, self.cleaned_data.get('mitigated_by')) + opts.get_field("mitigated").save_form_data(self.instance, self.cleaned_data.get("mitigated")) + opts.get_field("mitigated_by").save_form_data(self.instance, self.cleaned_data.get("mitigated_by")) except forms.ValidationError as e: self._update_errors(e) class Meta: model = Finding - exclude = ('reporter', 'url', 'numerical_severity', 'under_review', 'reviewers', 'cve', 'inherited_tags', - 'review_requested_by', 'is_mitigated', 'jira_creation', 'jira_change', 'sonarqube_issue', 'endpoint_status') + exclude = ("reporter", "url", "numerical_severity", "under_review", "reviewers", "cve", "inherited_tags", + "review_requested_by", "is_mitigated", "jira_creation", "jira_change", "sonarqube_issue", "endpoint_status") class StubFindingForm(forms.ModelForm): @@ -1429,14 +1429,14 @@ class StubFindingForm(forms.ModelForm): class Meta: model = Stub_Finding - order = ('title',) + order = ("title",) exclude = ( - 'date', 'description', 'severity', 'reporter', 'test', 'is_mitigated') + "date", "description", "severity", "reporter", "test", "is_mitigated") def clean(self): cleaned_data = super().clean() - if 'title' in cleaned_data: - if len(cleaned_data['title']) <= 0: + if "title" in cleaned_data: + if len(cleaned_data["title"]) <= 0: msg = "The title is required." raise forms.ValidationError(msg) else: @@ -1452,29 +1452,29 @@ class ApplyFindingTemplateForm(forms.Form): cwe = forms.IntegerField(label="CWE", required=False) vulnerability_ids = vulnerability_ids_field - cvssv3 = forms.CharField(label="CVSSv3", max_length=117, required=False, widget=forms.TextInput(attrs={'class': 'btn btn-secondary dropdown-toggle', 'data-toggle': 'dropdown', 'aria-haspopup': 'true', 'aria-expanded': 'false'})) + cvssv3 = forms.CharField(label="CVSSv3", max_length=117, required=False, widget=forms.TextInput(attrs={"class": "btn btn-secondary dropdown-toggle", "data-toggle": "dropdown", "aria-haspopup": "true", "aria-expanded": "false"})) - severity = forms.ChoiceField(required=False, choices=SEVERITY_CHOICES, error_messages={'required': 'Select valid choice: In Progress, On Hold, Completed', 'invalid_choice': 'Select valid choice: Critical,High,Medium,Low'}) + severity = forms.ChoiceField(required=False, choices=SEVERITY_CHOICES, error_messages={"required": "Select valid choice: In Progress, On Hold, Completed", "invalid_choice": "Select valid choice: Critical,High,Medium,Low"}) description = forms.CharField(widget=forms.Textarea) mitigation = forms.CharField(widget=forms.Textarea, required=False) impact = forms.CharField(widget=forms.Textarea, required=False) references = forms.CharField(widget=forms.Textarea, required=False) - tags = TagField(required=False, help_text="Add tags that help describe this finding template. Choose from the list or add new tags. Press Enter key to add.", initial=Finding.tags.tag_model.objects.all().order_by('name')) + tags = TagField(required=False, help_text="Add tags that help describe this finding template. Choose from the list or add new tags. Press Enter key to add.", initial=Finding.tags.tag_model.objects.all().order_by("name")) def __init__(self, template=None, *args, **kwargs): super().__init__(*args, **kwargs) - self.fields['tags'].autocomplete_tags = Finding.tags.tag_model.objects.all().order_by('name') + self.fields["tags"].autocomplete_tags = Finding.tags.tag_model.objects.all().order_by("name") self.template = template if template: - self.template.vulnerability_ids = '\n'.join(template.vulnerability_ids) + self.template.vulnerability_ids = "\n".join(template.vulnerability_ids) def clean(self): cleaned_data = super().clean() - if 'title' in cleaned_data: - if len(cleaned_data['title']) <= 0: + if "title" in cleaned_data: + if len(cleaned_data["title"]) <= 0: msg = "The title is required." raise forms.ValidationError(msg) else: @@ -1484,8 +1484,8 @@ def clean(self): return cleaned_data class Meta: - fields = ['title', 'cwe', 'vulnerability_ids', 'cvssv3', 'severity', 'description', 'mitigation', 'impact', 'references', 'tags'] - order = ('title', 'cwe', 'vulnerability_ids', 'cvssv3', 'severity', 'description', 'impact', 'is_mitigated') + fields = ["title", "cwe", "vulnerability_ids", "cvssv3", "severity", "description", "mitigation", "impact", "references", "tags"] + order = ("title", "cwe", "vulnerability_ids", "cvssv3", "severity", "description", "impact", "is_mitigated") class FindingTemplateForm(forms.ModelForm): @@ -1494,24 +1494,24 @@ class FindingTemplateForm(forms.ModelForm): cwe = forms.IntegerField(label="CWE", required=False) vulnerability_ids = vulnerability_ids_field - cvssv3 = forms.CharField(max_length=117, required=False, widget=forms.TextInput(attrs={'class': 'btn btn-secondary dropdown-toggle', 'data-toggle': 'dropdown', 'aria-haspopup': 'true', 'aria-expanded': 'false'})) + cvssv3 = forms.CharField(max_length=117, required=False, widget=forms.TextInput(attrs={"class": "btn btn-secondary dropdown-toggle", "data-toggle": "dropdown", "aria-haspopup": "true", "aria-expanded": "false"})) severity = forms.ChoiceField( required=False, choices=SEVERITY_CHOICES, error_messages={ - 'required': 'Select valid choice: In Progress, On Hold, Completed', - 'invalid_choice': 'Select valid choice: Critical,High,Medium,Low'}) + "required": "Select valid choice: In Progress, On Hold, Completed", + "invalid_choice": "Select valid choice: Critical,High,Medium,Low"}) - field_order = ['title', 'cwe', 'vulnerability_ids', 'severity', 'cvssv3', 'description', 'mitigation', 'impact', 'references', 'tags', 'template_match', 'template_match_cwe', 'template_match_title', 'apply_to_findings'] + field_order = ["title", "cwe", "vulnerability_ids", "severity", "cvssv3", "description", "mitigation", "impact", "references", "tags", "template_match", "template_match_cwe", "template_match_title", "apply_to_findings"] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.fields['tags'].autocomplete_tags = Finding.tags.tag_model.objects.all().order_by('name') + self.fields["tags"].autocomplete_tags = Finding.tags.tag_model.objects.all().order_by("name") class Meta: model = Finding_Template - order = ('title', 'cwe', 'vulnerability_ids', 'cvssv3', 'severity', 'description', 'impact') - exclude = ('numerical_severity', 'is_mitigated', 'last_used', 'endpoint_status', 'cve') + order = ("title", "cwe", "vulnerability_ids", "cvssv3", "severity", "description", "impact") + exclude = ("numerical_severity", "is_mitigated", "last_used", "endpoint_status", "cve") class DeleteFindingTemplateForm(forms.ModelForm): @@ -1520,7 +1520,7 @@ class DeleteFindingTemplateForm(forms.ModelForm): class Meta: model = Finding_Template - fields = ['id'] + fields = ["id"] class FindingBulkUpdateForm(forms.ModelForm): @@ -1529,9 +1529,9 @@ class FindingBulkUpdateForm(forms.ModelForm): risk_accept = forms.BooleanField(required=False) risk_unaccept = forms.BooleanField(required=False) - date = forms.DateField(required=False, widget=forms.DateInput(attrs={'class': 'datepicker'})) - planned_remediation_date = forms.DateField(required=False, widget=forms.DateInput(attrs={'class': 'datepicker'})) - planned_remediation_version = forms.CharField(required=False, max_length=99, widget=forms.TextInput(attrs={'class': 'form-control'})) + date = forms.DateField(required=False, widget=forms.DateInput(attrs={"class": "datepicker"})) + planned_remediation_date = forms.DateField(required=False, widget=forms.DateInput(attrs={"class": "datepicker"})) + planned_remediation_version = forms.CharField(required=False, max_length=99, widget=forms.TextInput(attrs={"class": "form-control"})) finding_group = forms.BooleanField(required=False) finding_group_create = forms.BooleanField(required=False) finding_group_create_name = forms.CharField(required=False) @@ -1544,43 +1544,43 @@ class FindingBulkUpdateForm(forms.ModelForm): push_to_jira = forms.BooleanField(required=False) # unlink_from_jira = forms.BooleanField(required=False) push_to_github = forms.BooleanField(required=False) - tags = TagField(required=False, autocomplete_tags=Finding.tags.tag_model.objects.all().order_by('name')) - notes = forms.CharField(required=False, max_length=1024, widget=forms.TextInput(attrs={'class': 'form-control'})) + tags = TagField(required=False, autocomplete_tags=Finding.tags.tag_model.objects.all().order_by("name")) + notes = forms.CharField(required=False, max_length=1024, widget=forms.TextInput(attrs={"class": "form-control"})) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.fields['severity'].required = False + self.fields["severity"].required = False # we need to defer initialization to prevent multiple initializations if other forms are shown - self.fields['tags'].widget.tag_options = tagulous.models.options.TagOptions(autocomplete_settings={'width': '200px', 'defer': True}) + self.fields["tags"].widget.tag_options = tagulous.models.options.TagOptions(autocomplete_settings={"width": "200px", "defer": True}) def clean(self): cleaned_data = super().clean() - if (cleaned_data['active'] or cleaned_data['verified']) and cleaned_data['duplicate']: - msg = 'Duplicate findings cannot be verified or active' + if (cleaned_data["active"] or cleaned_data["verified"]) and cleaned_data["duplicate"]: + msg = "Duplicate findings cannot be verified or active" raise forms.ValidationError(msg) - if cleaned_data['false_p'] and cleaned_data['verified']: - msg = 'False positive findings cannot be verified.' + if cleaned_data["false_p"] and cleaned_data["verified"]: + msg = "False positive findings cannot be verified." raise forms.ValidationError(msg) return cleaned_data class Meta: model = Finding - fields = ('severity', 'date', 'planned_remediation_date', 'active', 'verified', 'false_p', 'duplicate', 'out_of_scope', - 'is_mitigated') + fields = ("severity", "date", "planned_remediation_date", "active", "verified", "false_p", "duplicate", "out_of_scope", + "is_mitigated") class EditEndpointForm(forms.ModelForm): class Meta: model = Endpoint - exclude = ['product', 'inherited_tags'] + exclude = ["product", "inherited_tags"] def __init__(self, *args, **kwargs): self.product = None self.endpoint_instance = None super().__init__(*args, **kwargs) - if 'instance' in kwargs: - self.endpoint_instance = kwargs.pop('instance') + if "instance" in kwargs: + self.endpoint_instance = kwargs.pop("instance") self.product = self.endpoint_instance.product product_id = self.endpoint_instance.product.pk findings = Finding.objects.filter(test__engagement__product__id=product_id) @@ -1590,13 +1590,13 @@ def clean(self): cleaned_data = super().clean() - protocol = cleaned_data['protocol'] - userinfo = cleaned_data['userinfo'] - host = cleaned_data['host'] - port = cleaned_data['port'] - path = cleaned_data['path'] - query = cleaned_data['query'] - fragment = cleaned_data['fragment'] + protocol = cleaned_data["protocol"] + userinfo = cleaned_data["userinfo"] + host = cleaned_data["host"] + port = cleaned_data["port"] + path = cleaned_data["path"] + query = cleaned_data["query"] + fragment = cleaned_data["fragment"] endpoint = endpoint_filter( protocol=protocol, @@ -1609,8 +1609,8 @@ def clean(self): product=self.product, ) if endpoint.count() > 1 or (endpoint.count() == 1 and endpoint.first().pk != self.endpoint_instance.pk): - msg = 'It appears as though an endpoint with this data already exists for this product.' - raise forms.ValidationError(msg, code='invalid') + msg = "It appears as though an endpoint with this data already exists for this product." + raise forms.ValidationError(msg, code="invalid") return cleaned_data @@ -1619,7 +1619,7 @@ class AddEndpointForm(forms.Form): endpoint = forms.CharField(max_length=5000, required=True, label="Endpoint(s)", help_text="The IP address, host name or full URL. You may enter one endpoint per line. " "Each must be valid.", - widget=forms.widgets.Textarea(attrs={'rows': '15', 'cols': '400'})) + widget=forms.widgets.Textarea(attrs={"rows": "15", "cols": "400"})) product = forms.CharField(required=True, widget=forms.widgets.HiddenInput(), help_text="The product this endpoint should be " "associated with.") @@ -1629,12 +1629,12 @@ class AddEndpointForm(forms.Form): def __init__(self, *args, **kwargs): product = None - if 'product' in kwargs: - product = kwargs.pop('product') + if "product" in kwargs: + product = kwargs.pop("product") super().__init__(*args, **kwargs) - self.fields['product'] = forms.ModelChoiceField(queryset=get_authorized_products(Permissions.Endpoint_Add)) + self.fields["product"] = forms.ModelChoiceField(queryset=get_authorized_products(Permissions.Endpoint_Add)) if product is not None: - self.fields['product'].initial = product.id + self.fields["product"].initial = product.id self.product = product self.endpoints_to_process = [] @@ -1659,16 +1659,16 @@ def clean(self): cleaned_data = super().clean() - if 'endpoint' in cleaned_data and 'product' in cleaned_data: - endpoint = cleaned_data['endpoint'] - product = cleaned_data['product'] + if "endpoint" in cleaned_data and "product" in cleaned_data: + endpoint = cleaned_data["endpoint"] + product = cleaned_data["product"] if isinstance(product, Product): self.product = product else: self.product = Product.objects.get(id=int(product)) else: - msg = 'Please enter a valid URL or IP address.' - raise forms.ValidationError(msg, code='invalid') + msg = "Please enter a valid URL or IP address." + raise forms.ValidationError(msg, code="invalid") endpoints_to_add_list, errors = validate_endpoints_to_add(endpoint) if errors: @@ -1685,28 +1685,28 @@ class DeleteEndpointForm(forms.ModelForm): class Meta: model = Endpoint - fields = ['id'] + fields = ["id"] class NoteForm(forms.ModelForm): - entry = forms.CharField(max_length=2400, widget=forms.Textarea(attrs={'rows': 4, 'cols': 15}), - label='Notes:') + entry = forms.CharField(max_length=2400, widget=forms.Textarea(attrs={"rows": 4, "cols": 15}), + label="Notes:") class Meta: model = Notes - fields = ['entry', 'private'] + fields = ["entry", "private"] class TypedNoteForm(NoteForm): def __init__(self, *args, **kwargs): - queryset = kwargs.pop('available_note_types') + queryset = kwargs.pop("available_note_types") super().__init__(*args, **kwargs) - self.fields['note_type'] = forms.ModelChoiceField(queryset=queryset, label='Note Type', required=True) + self.fields["note_type"] = forms.ModelChoiceField(queryset=queryset, label="Note Type", required=True) class Meta: model = Notes - fields = ['note_type', 'entry', 'private'] + fields = ["note_type", "entry", "private"] class DeleteNoteForm(forms.ModelForm): @@ -1715,73 +1715,73 @@ class DeleteNoteForm(forms.ModelForm): class Meta: model = Notes - fields = ['id'] + fields = ["id"] class CloseFindingForm(forms.ModelForm): entry = forms.CharField( required=True, max_length=2400, - widget=forms.Textarea, label='Notes:', - error_messages={'required': ('The reason for closing a finding is ' - 'required, please use the text area ' - 'below to provide documentation.')}) + widget=forms.Textarea, label="Notes:", + error_messages={"required": ("The reason for closing a finding is " + "required, please use the text area " + "below to provide documentation.")}) - mitigated = forms.DateField(required=False, help_text='Date and time when the flaw has been fixed', widget=forms.TextInput(attrs={'class': 'datepicker', 'autocomplete': 'off'})) + mitigated = forms.DateField(required=False, help_text="Date and time when the flaw has been fixed", widget=forms.TextInput(attrs={"class": "datepicker", "autocomplete": "off"})) mitigated_by = forms.ModelChoiceField(required=False, queryset=Dojo_User.objects.none()) - false_p = forms.BooleanField(initial=False, required=False, label='False Positive') - out_of_scope = forms.BooleanField(initial=False, required=False, label='Out of Scope') - duplicate = forms.BooleanField(initial=False, required=False, label='Duplicate') + false_p = forms.BooleanField(initial=False, required=False, label="False Positive") + out_of_scope = forms.BooleanField(initial=False, required=False, label="Out of Scope") + duplicate = forms.BooleanField(initial=False, required=False, label="Duplicate") def __init__(self, *args, **kwargs): - queryset = kwargs.pop('missing_note_types') + queryset = kwargs.pop("missing_note_types") super().__init__(*args, **kwargs) if len(queryset) == 0: - self.fields['note_type'].widget = forms.HiddenInput() + self.fields["note_type"].widget = forms.HiddenInput() else: - self.fields['note_type'] = forms.ModelChoiceField(queryset=queryset, label='Note Type', required=True) + self.fields["note_type"] = forms.ModelChoiceField(queryset=queryset, label="Note Type", required=True) - self.can_edit_mitigated_data = kwargs.pop('can_edit_mitigated_data') if 'can_edit_mitigated_data' in kwargs \ + self.can_edit_mitigated_data = kwargs.pop("can_edit_mitigated_data") if "can_edit_mitigated_data" in kwargs \ else False if self.can_edit_mitigated_data: - self.fields['mitigated_by'].queryset = get_authorized_users(Permissions.Test_Edit) - self.fields['mitigated'].initial = self.instance.mitigated - self.fields['mitigated_by'].initial = self.instance.mitigated_by + self.fields["mitigated_by"].queryset = get_authorized_users(Permissions.Test_Edit) + self.fields["mitigated"].initial = self.instance.mitigated + self.fields["mitigated_by"].initial = self.instance.mitigated_by def _post_clean(self): super()._post_clean() if self.can_edit_mitigated_data: opts = self.instance._meta - if not self.cleaned_data.get('active'): + if not self.cleaned_data.get("active"): try: - opts.get_field('mitigated').save_form_data(self.instance, self.cleaned_data.get('mitigated')) - opts.get_field('mitigated_by').save_form_data(self.instance, self.cleaned_data.get('mitigated_by')) + opts.get_field("mitigated").save_form_data(self.instance, self.cleaned_data.get("mitigated")) + opts.get_field("mitigated_by").save_form_data(self.instance, self.cleaned_data.get("mitigated_by")) except forms.ValidationError as e: self._update_errors(e) class Meta: model = Notes - fields = ['note_type', 'entry', 'mitigated', 'mitigated_by', 'false_p', 'out_of_scope', 'duplicate'] + fields = ["note_type", "entry", "mitigated", "mitigated_by", "false_p", "out_of_scope", "duplicate"] class EditPlannedRemediationDateFindingForm(forms.ModelForm): def __init__(self, *args, **kwargs): finding = None - if 'finding' in kwargs: - finding = kwargs.pop('finding') + if "finding" in kwargs: + finding = kwargs.pop("finding") super().__init__(*args, **kwargs) - self.fields['planned_remediation_date'].required = True - self.fields['planned_remediation_date'].widget = forms.DateInput(attrs={'class': 'datepicker'}) + self.fields["planned_remediation_date"].required = True + self.fields["planned_remediation_date"].widget = forms.DateInput(attrs={"class": "datepicker"}) if finding is not None: - self.fields['planned_remediation_date'].initial = finding.planned_remediation_date + self.fields["planned_remediation_date"].initial = finding.planned_remediation_date class Meta: model = Finding - fields = ['planned_remediation_date'] + fields = ["planned_remediation_date"] class DefectFindingForm(forms.ModelForm): @@ -1790,28 +1790,28 @@ class DefectFindingForm(forms.ModelForm): entry = forms.CharField( required=True, max_length=2400, - widget=forms.Textarea, label='Notes:', - error_messages={'required': ('The reason for closing a finding is ' - 'required, please use the text area ' - 'below to provide documentation.')}) + widget=forms.Textarea, label="Notes:", + error_messages={"required": ("The reason for closing a finding is " + "required, please use the text area " + "below to provide documentation.")}) class Meta: model = Notes - fields = ['entry'] + fields = ["entry"] class ClearFindingReviewForm(forms.ModelForm): entry = forms.CharField( required=True, max_length=2400, - help_text='Please provide a message.', - widget=forms.Textarea, label='Notes:', - error_messages={'required': ('The reason for clearing a review is ' - 'required, please use the text area ' - 'below to provide documentation.')}) + help_text="Please provide a message.", + widget=forms.Textarea, label="Notes:", + error_messages={"required": ("The reason for clearing a review is " + "required, please use the text area " + "below to provide documentation.")}) class Meta: model = Finding - fields = ['active', 'verified', 'false_p', 'out_of_scope', 'duplicate'] + fields = ["active", "verified", "false_p", "out_of_scope", "duplicate"] class ReviewFindingForm(forms.Form): @@ -1889,7 +1889,7 @@ def __init__(self, *args, **kwargs): wmf_options = tuple(wmf_options) - self.fields['dates'].choices = wmf_options + self.fields["dates"].choices = wmf_options class SimpleMetricsForm(forms.Form): @@ -1904,43 +1904,43 @@ class SimpleSearchForm(forms.Form): class DateRangeMetrics(forms.Form): start_date = forms.DateField(required=True, label="To", - widget=forms.TextInput(attrs={'class': 'datepicker', 'autocomplete': 'off'})) + widget=forms.TextInput(attrs={"class": "datepicker", "autocomplete": "off"})) end_date = forms.DateField(required=True, label="From", - widget=forms.TextInput(attrs={'class': 'datepicker', 'autocomplete': 'off'})) + widget=forms.TextInput(attrs={"class": "datepicker", "autocomplete": "off"})) class MetricsFilterForm(forms.Form): start_date = forms.DateField(required=False, label="To", - widget=forms.TextInput(attrs={'class': 'datepicker', 'autocomplete': 'off'})) + widget=forms.TextInput(attrs={"class": "datepicker", "autocomplete": "off"})) end_date = forms.DateField(required=False, label="From", - widget=forms.TextInput(attrs={'class': 'datepicker', 'autocomplete': 'off'})) + widget=forms.TextInput(attrs={"class": "datepicker", "autocomplete": "off"})) finding_status = forms.MultipleChoiceField( required=False, widget=forms.CheckboxSelectMultiple, choices=FINDING_STATUS, label="Status") severity = forms.MultipleChoiceField(required=False, - choices=(('Low', 'Low'), - ('Medium', 'Medium'), - ('High', 'High'), - ('Critical', 'Critical')), + choices=(("Low", "Low"), + ("Medium", "Medium"), + ("High", "High"), + ("Critical", "Critical")), help_text=('Hold down "Control", or ' '"Command" on a Mac, to ' 'select more than one.')) exclude_product_types = forms.ModelMultipleChoiceField( - required=False, queryset=Product_Type.objects.all().order_by('name')) + required=False, queryset=Product_Type.objects.all().order_by("name")) # add the ability to exclude the exclude_product_types field def __init__(self, *args, **kwargs): - exclude_product_types = kwargs.get('exclude_product_types', False) - if 'exclude_product_types' in kwargs: - del kwargs['exclude_product_types'] + exclude_product_types = kwargs.get("exclude_product_types", False) + if "exclude_product_types" in kwargs: + del kwargs["exclude_product_types"] super().__init__(*args, **kwargs) if exclude_product_types: - del self.fields['exclude_product_types'] + del self.fields["exclude_product_types"] class DojoGroupForm(forms.ModelForm): @@ -1950,8 +1950,8 @@ class DojoGroupForm(forms.ModelForm): class Meta: model = Dojo_Group - fields = ['name', 'description'] - exclude = ['users'] + fields = ["name", "description"] + exclude = ["users"] class DeleteGroupForm(forms.ModelForm): @@ -1960,169 +1960,169 @@ class DeleteGroupForm(forms.ModelForm): class Meta: model = Dojo_Group - fields = ['id'] + fields = ["id"] class Add_Group_MemberForm(forms.ModelForm): - users = forms.ModelMultipleChoiceField(queryset=Dojo_Group_Member.objects.none(), required=True, label='Users') + users = forms.ModelMultipleChoiceField(queryset=Dojo_Group_Member.objects.none(), required=True, label="Users") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.fields['group'].disabled = True - current_members = Dojo_Group_Member.objects.filter(group=self.initial['group']).values_list('user', flat=True) - self.fields['users'].queryset = Dojo_User.objects.exclude( + self.fields["group"].disabled = True + current_members = Dojo_Group_Member.objects.filter(group=self.initial["group"]).values_list("user", flat=True) + self.fields["users"].queryset = Dojo_User.objects.exclude( Q(is_superuser=True) - | Q(id__in=current_members)).exclude(is_active=False).order_by('first_name', 'last_name') - self.fields['role'].queryset = get_group_member_roles() + | Q(id__in=current_members)).exclude(is_active=False).order_by("first_name", "last_name") + self.fields["role"].queryset = get_group_member_roles() class Meta: model = Dojo_Group_Member - fields = ['group', 'users', 'role'] + fields = ["group", "users", "role"] class Add_Group_Member_UserForm(forms.ModelForm): - groups = forms.ModelMultipleChoiceField(queryset=Dojo_Group.objects.none(), required=True, label='Groups') + groups = forms.ModelMultipleChoiceField(queryset=Dojo_Group.objects.none(), required=True, label="Groups") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.fields['user'].disabled = True - current_groups = Dojo_Group_Member.objects.filter(user=self.initial['user']).values_list('group', flat=True) - self.fields['groups'].queryset = Dojo_Group.objects.exclude(id__in=current_groups) - self.fields['role'].queryset = get_group_member_roles() + self.fields["user"].disabled = True + current_groups = Dojo_Group_Member.objects.filter(user=self.initial["user"]).values_list("group", flat=True) + self.fields["groups"].queryset = Dojo_Group.objects.exclude(id__in=current_groups) + self.fields["role"].queryset = get_group_member_roles() class Meta: model = Dojo_Group_Member - fields = ['groups', 'user', 'role'] + fields = ["groups", "user", "role"] class Edit_Group_MemberForm(forms.ModelForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.fields['group'].disabled = True - self.fields['user'].disabled = True - self.fields['role'].queryset = get_group_member_roles() + self.fields["group"].disabled = True + self.fields["user"].disabled = True + self.fields["role"].queryset = get_group_member_roles() class Meta: model = Dojo_Group_Member - fields = ['group', 'user', 'role'] + fields = ["group", "user", "role"] class Delete_Group_MemberForm(Edit_Group_MemberForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.fields['role'].disabled = True + self.fields["role"].disabled = True class Add_Product_GroupForm(forms.ModelForm): - groups = forms.ModelMultipleChoiceField(queryset=Dojo_Group.objects.none(), required=True, label='Groups') + groups = forms.ModelMultipleChoiceField(queryset=Dojo_Group.objects.none(), required=True, label="Groups") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.fields['product'].disabled = True - current_groups = Product_Group.objects.filter(product=self.initial["product"]).values_list('group', flat=True) + self.fields["product"].disabled = True + current_groups = Product_Group.objects.filter(product=self.initial["product"]).values_list("group", flat=True) authorized_groups = get_authorized_groups(Permissions.Group_View) authorized_groups = authorized_groups.exclude(id__in=current_groups) - self.fields['groups'].queryset = authorized_groups + self.fields["groups"].queryset = authorized_groups class Meta: model = Product_Group - fields = ['product', 'groups', 'role'] + fields = ["product", "groups", "role"] class Add_Product_Group_GroupForm(forms.ModelForm): - products = forms.ModelMultipleChoiceField(queryset=Product.objects.none(), required=True, label='Products') + products = forms.ModelMultipleChoiceField(queryset=Product.objects.none(), required=True, label="Products") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - current_members = Product_Group.objects.filter(group=self.initial["group"]).values_list('product', flat=True) - self.fields['products'].queryset = get_authorized_products(Permissions.Product_Member_Add_Owner) \ + current_members = Product_Group.objects.filter(group=self.initial["group"]).values_list("product", flat=True) + self.fields["products"].queryset = get_authorized_products(Permissions.Product_Member_Add_Owner) \ .exclude(id__in=current_members) - self.fields['group'].disabled = True + self.fields["group"].disabled = True class Meta: model = Product_Group - fields = ['products', 'group', 'role'] + fields = ["products", "group", "role"] class Edit_Product_Group_Form(forms.ModelForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.fields['product'].disabled = True - self.fields['group'].disabled = True + self.fields["product"].disabled = True + self.fields["group"].disabled = True class Meta: model = Product_Group - fields = ['product', 'group', 'role'] + fields = ["product", "group", "role"] class Delete_Product_GroupForm(Edit_Product_Group_Form): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.fields['role'].disabled = True + self.fields["role"].disabled = True class Add_Product_Type_GroupForm(forms.ModelForm): - groups = forms.ModelMultipleChoiceField(queryset=Dojo_Group.objects.none(), required=True, label='Groups') + groups = forms.ModelMultipleChoiceField(queryset=Dojo_Group.objects.none(), required=True, label="Groups") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - current_groups = Product_Type_Group.objects.filter(product_type=self.initial["product_type"]).values_list('group', flat=True) + current_groups = Product_Type_Group.objects.filter(product_type=self.initial["product_type"]).values_list("group", flat=True) authorized_groups = get_authorized_groups(Permissions.Group_View) authorized_groups = authorized_groups.exclude(id__in=current_groups) - self.fields['groups'].queryset = authorized_groups - self.fields['product_type'].disabled = True + self.fields["groups"].queryset = authorized_groups + self.fields["product_type"].disabled = True class Meta: model = Product_Type_Group - fields = ['product_type', 'groups', 'role'] + fields = ["product_type", "groups", "role"] class Add_Product_Type_Group_GroupForm(forms.ModelForm): - product_types = forms.ModelMultipleChoiceField(queryset=Product_Type.objects.none(), required=True, label='Product Types') + product_types = forms.ModelMultipleChoiceField(queryset=Product_Type.objects.none(), required=True, label="Product Types") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - current_members = Product_Type_Group.objects.filter(group=self.initial['group']).values_list('product_type', flat=True) - self.fields['product_types'].queryset = get_authorized_product_types(Permissions.Product_Type_Member_Add_Owner) \ + current_members = Product_Type_Group.objects.filter(group=self.initial["group"]).values_list("product_type", flat=True) + self.fields["product_types"].queryset = get_authorized_product_types(Permissions.Product_Type_Member_Add_Owner) \ .exclude(id__in=current_members) - self.fields['group'].disabled = True + self.fields["group"].disabled = True class Meta: model = Product_Type_Group - fields = ['product_types', 'group', 'role'] + fields = ["product_types", "group", "role"] class Edit_Product_Type_Group_Form(forms.ModelForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.fields['product_type'].disabled = True - self.fields['group'].disabled = True + self.fields["product_type"].disabled = True + self.fields["group"].disabled = True class Meta: model = Product_Type_Group - fields = ['product_type', 'group', 'role'] + fields = ["product_type", "group", "role"] class Delete_Product_Type_GroupForm(Edit_Product_Type_Group_Form): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.fields['role'].disabled = True + self.fields["role"].disabled = True class DojoUserForm(forms.ModelForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - if not get_current_user().is_superuser and not get_system_setting('enable_user_profile_editable'): + if not get_current_user().is_superuser and not get_system_setting("enable_user_profile_editable"): for field in self.fields: self.fields[field].disabled = True class Meta: model = Dojo_User - exclude = ['password', 'last_login', 'is_superuser', 'groups', - 'username', 'is_staff', 'is_active', 'date_joined', - 'user_permissions'] + exclude = ["password", "last_login", "is_superuser", "groups", + "username", "is_staff", "is_active", "date_joined", + "user_permissions"] class ChangePasswordForm(forms.Form): @@ -2131,34 +2131,34 @@ class ChangePasswordForm(forms.Form): new_password = forms.CharField(widget=forms.PasswordInput, required=True, validators=[validate_password], - help_text='') + help_text="") confirm_password = forms.CharField(widget=forms.PasswordInput, required=True, validators=[validate_password], - help_text='Password must match the new password entered above.') + help_text="Password must match the new password entered above.") def __init__(self, *args, **kwargs): self.user = None - if 'user' in kwargs: - self.user = kwargs.pop('user') + if "user" in kwargs: + self.user = kwargs.pop("user") super().__init__(*args, **kwargs) - self.fields['new_password'].help_text = get_password_requirements_string() + self.fields["new_password"].help_text = get_password_requirements_string() def clean(self): cleaned_data = super().clean() - current_password = self.cleaned_data.get('current_password') - new_password = self.cleaned_data.get('new_password') - confirm_password = self.cleaned_data.get('confirm_password') + current_password = self.cleaned_data.get("current_password") + new_password = self.cleaned_data.get("new_password") + confirm_password = self.cleaned_data.get("confirm_password") if not self.user.check_password(current_password): - msg = 'Current password is incorrect.' + msg = "Current password is incorrect." raise forms.ValidationError(msg) if new_password == current_password: - msg = 'New password must be different from current password.' + msg = "New password must be different from current password." raise forms.ValidationError(msg) if new_password != confirm_password: - msg = 'Passwords do not match.' + msg = "Passwords do not match." raise forms.ValidationError(msg) return cleaned_data @@ -2168,31 +2168,31 @@ class AddDojoUserForm(forms.ModelForm): password = forms.CharField(widget=forms.PasswordInput, required=False, validators=[validate_password], - help_text='') + help_text="") class Meta: model = Dojo_User - fields = ['username', 'password', 'first_name', 'last_name', 'email', 'is_active', 'is_superuser'] + fields = ["username", "password", "first_name", "last_name", "email", "is_active", "is_superuser"] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) current_user = get_current_user() if not current_user.is_superuser: - self.fields['is_superuser'].disabled = True - self.fields['password'].help_text = get_password_requirements_string() + self.fields["is_superuser"].disabled = True + self.fields["password"].help_text = get_password_requirements_string() class EditDojoUserForm(forms.ModelForm): class Meta: model = Dojo_User - fields = ['username', 'first_name', 'last_name', 'email', 'is_active', 'is_superuser'] + fields = ["username", "first_name", "last_name", "email", "is_active", "is_superuser"] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) current_user = get_current_user() if not current_user.is_superuser: - self.fields['is_superuser'].disabled = True + self.fields["is_superuser"].disabled = True class DeleteUserForm(forms.ModelForm): @@ -2201,20 +2201,20 @@ class DeleteUserForm(forms.ModelForm): class Meta: model = User - fields = ['id'] + fields = ["id"] class UserContactInfoForm(forms.ModelForm): class Meta: model = UserContactInfo - exclude = ['user', 'slack_user_id'] + exclude = ["user", "slack_user_id"] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) current_user = get_current_user() if not current_user.is_superuser: - del self.fields['force_password_reset'] - if not get_system_setting('enable_user_profile_editable'): + del self.fields["force_password_reset"] + if not get_system_setting("enable_user_profile_editable"): for field in self.fields: self.fields[field].disabled = True @@ -2222,13 +2222,13 @@ def __init__(self, *args, **kwargs): class GlobalRoleForm(forms.ModelForm): class Meta: model = Global_Role - exclude = ['user', 'group'] + exclude = ["user", "group"] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) current_user = get_current_user() if not current_user.is_superuser: - self.fields['role'].disabled = True + self.fields["role"].disabled = True def get_years(): @@ -2238,33 +2238,33 @@ def get_years(): class ProductCountsFormBase(forms.Form): month = forms.ChoiceField(choices=list(MONTHS.items()), required=True, error_messages={ - 'required': '*'}) + "required": "*"}) year = forms.ChoiceField(choices=get_years, required=True, error_messages={ - 'required': '*'}) + "required": "*"}) class ProductTypeCountsForm(ProductCountsFormBase): product_type = forms.ModelChoiceField(required=True, queryset=Product_Type.objects.none(), error_messages={ - 'required': '*'}) + "required": "*"}) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.fields['product_type'].queryset = get_authorized_product_types(Permissions.Product_Type_View) + self.fields["product_type"].queryset = get_authorized_product_types(Permissions.Product_Type_View) class ProductTagCountsForm(ProductCountsFormBase): product_tag = forms.ModelChoiceField(required=True, - queryset=Product.tags.tag_model.objects.none().order_by('name'), + queryset=Product.tags.tag_model.objects.none().order_by("name"), error_messages={ - 'required': '*'}) + "required": "*"}) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) prods = get_authorized_products(Permissions.Product_View) tags_available_to_user = Product.tags.tag_model.objects.filter(product__in=prods) - self.fields['product_tag'].queryset = tags_available_to_user + self.fields["product_tag"].queryset = tags_available_to_user class APIKeyForm(forms.ModelForm): @@ -2273,27 +2273,27 @@ class APIKeyForm(forms.ModelForm): class Meta: model = User - exclude = ['username', 'first_name', 'last_name', 'email', 'is_active', - 'is_staff', 'is_superuser', 'password', 'last_login', 'groups', - 'date_joined', 'user_permissions'] + exclude = ["username", "first_name", "last_name", "email", "is_active", + "is_staff", "is_superuser", "password", "last_login", "groups", + "date_joined", "user_permissions"] class ReportOptionsForm(forms.Form): - yes_no = (('0', 'No'), ('1', 'Yes')) + yes_no = (("0", "No"), ("1", "Yes")) include_finding_notes = forms.ChoiceField(choices=yes_no, label="Finding Notes") include_finding_images = forms.ChoiceField(choices=yes_no, label="Finding Images") include_executive_summary = forms.ChoiceField(choices=yes_no, label="Executive Summary") include_table_of_contents = forms.ChoiceField(choices=yes_no, label="Table of Contents") include_disclaimer = forms.ChoiceField(choices=yes_no, label="Disclaimer") - report_type = forms.ChoiceField(choices=(('HTML', 'HTML'), ('AsciiDoc', 'AsciiDoc'))) + report_type = forms.ChoiceField(choices=(("HTML", "HTML"), ("AsciiDoc", "AsciiDoc"))) class CustomReportOptionsForm(forms.Form): - yes_no = (('0', 'No'), ('1', 'Yes')) + yes_no = (("0", "No"), ("1", "Yes")) report_name = forms.CharField(required=False, max_length=100) include_finding_notes = forms.ChoiceField(required=False, choices=yes_no) include_finding_images = forms.ChoiceField(choices=yes_no, label="Finding Images") - report_type = forms.ChoiceField(choices=(('HTML', 'HTML'), ('AsciiDoc', 'AsciiDoc'))) + report_type = forms.ChoiceField(choices=(("HTML", "HTML"), ("AsciiDoc", "AsciiDoc"))) class DeleteFindingForm(forms.ModelForm): @@ -2302,19 +2302,19 @@ class DeleteFindingForm(forms.ModelForm): class Meta: model = Finding - fields = ['id'] + fields = ["id"] class CopyFindingForm(forms.Form): test = forms.ModelChoiceField( required=True, queryset=Test.objects.none(), - error_messages={'required': '*'}) + error_messages={"required": "*"}) def __init__(self, *args, **kwargs): - authorized_lists = kwargs.pop('tests', None) + authorized_lists = kwargs.pop("tests", None) super().__init__(*args, **kwargs) - self.fields['test'].queryset = authorized_lists + self.fields["test"].queryset = authorized_lists class FindingFormID(forms.ModelForm): @@ -2323,7 +2323,7 @@ class FindingFormID(forms.ModelForm): class Meta: model = Finding - fields = ('id',) + fields = ("id",) class DeleteStubFindingForm(forms.ModelForm): @@ -2332,14 +2332,14 @@ class DeleteStubFindingForm(forms.ModelForm): class Meta: model = Stub_Finding - fields = ['id'] + fields = ["id"] class GITHUB_IssueForm(forms.ModelForm): class Meta: model = GITHUB_Issue - exclude = ['product'] + exclude = ["product"] class GITHUBForm(forms.ModelForm): @@ -2347,7 +2347,7 @@ class GITHUBForm(forms.ModelForm): class Meta: model = GITHUB_Conf - exclude = ['product'] + exclude = ["product"] class DeleteGITHUBConfForm(forms.ModelForm): @@ -2356,24 +2356,24 @@ class DeleteGITHUBConfForm(forms.ModelForm): class Meta: model = GITHUB_Conf - fields = ['id'] + fields = ["id"] class ExpressGITHUBForm(forms.ModelForm): password = forms.CharField(widget=forms.PasswordInput, required=True) - issue_key = forms.CharField(required=True, help_text='A valid issue ID is required to gather the necessary information.') + issue_key = forms.CharField(required=True, help_text="A valid issue ID is required to gather the necessary information.") class Meta: model = GITHUB_Conf - exclude = ['product', 'epic_name_id', 'open_status_key', - 'close_status_key', 'info_mapping_severity', - 'low_mapping_severity', 'medium_mapping_severity', - 'high_mapping_severity', 'critical_mapping_severity', 'finding_text'] + exclude = ["product", "epic_name_id", "open_status_key", + "close_status_key", "info_mapping_severity", + "low_mapping_severity", "medium_mapping_severity", + "high_mapping_severity", "critical_mapping_severity", "finding_text"] def get_jira_issue_template_dir_choices(): template_root = settings.JIRA_TEMPLATE_ROOT - template_dir_list = [('', '---')] + template_dir_list = [("", "---")] for base_dir, dirnames, filenames in os.walk(template_root): # for filename in filenames: # if base_dir.startswith(settings.TEMPLATE_DIR_PREFIX): @@ -2385,7 +2385,7 @@ def get_jira_issue_template_dir_choices(): base_dir = base_dir[len(settings.TEMPLATE_DIR_PREFIX):] template_dir_list.append((os.path.join(base_dir, dirname), dirname)) - logger.debug('templates: %s', template_dir_list) + logger.debug("templates: %s", template_dir_list) return template_dir_list @@ -2396,7 +2396,7 @@ class JIRA_IssueForm(forms.ModelForm): class Meta: model = JIRA_Issue - exclude = ['product'] + exclude = ["product"] class BaseJiraForm(forms.ModelForm): @@ -2406,16 +2406,16 @@ def test_jira_connection(self): import dojo.jira_link.helper as jira_helper try: # Attempt to validate the credentials before moving forward - jira_helper.get_jira_connection_raw(self.cleaned_data['url'], - self.cleaned_data['username'], - self.cleaned_data['password']) - logger.debug('valid JIRA config!') + jira_helper.get_jira_connection_raw(self.cleaned_data["url"], + self.cleaned_data["username"], + self.cleaned_data["password"]) + logger.debug("valid JIRA config!") except Exception as e: # form only used by admins, so we can show full error message using str(e) which can help debug any problems - message = 'Unable to authenticate to JIRA. Please check the URL, username, password, captcha challenge, Network connection. Details in alert on top right. ' + str( + message = "Unable to authenticate to JIRA. Please check the URL, username, password, captcha challenge, Network connection. Details in alert on top right. " + str( e) - self.add_error('username', message) - self.add_error('password', message) + self.add_error("username", message) + self.add_error("password", message) def clean(self): self.test_jira_connection() @@ -2425,39 +2425,39 @@ def clean(self): class JIRAForm(BaseJiraForm): issue_template_dir = forms.ChoiceField(required=False, choices=JIRA_TEMPLATE_CHOICES, - help_text='Choose the folder containing the Django templates used to render the JIRA issue description. These are stored in dojo/templates/issue-trackers. Leave empty to use the default jira_full templates.') + help_text="Choose the folder containing the Django templates used to render the JIRA issue description. These are stored in dojo/templates/issue-trackers. Leave empty to use the default jira_full templates.") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self.instance: - self.fields['password'].required = False + self.fields["password"].required = False def clean(self): - if self.instance and not self.cleaned_data['password']: - self.cleaned_data['password'] = self.instance.password + if self.instance and not self.cleaned_data["password"]: + self.cleaned_data["password"] = self.instance.password return super().clean() class Meta: model = JIRA_Instance - exclude = [''] + exclude = [""] class ExpressJIRAForm(BaseJiraForm): - issue_key = forms.CharField(required=True, help_text='A valid issue ID is required to gather the necessary information.') + issue_key = forms.CharField(required=True, help_text="A valid issue ID is required to gather the necessary information.") class Meta: model = JIRA_Instance - exclude = ['product', 'epic_name_id', 'open_status_key', - 'close_status_key', 'info_mapping_severity', - 'low_mapping_severity', 'medium_mapping_severity', - 'high_mapping_severity', 'critical_mapping_severity', 'finding_text'] + exclude = ["product", "epic_name_id", "open_status_key", + "close_status_key", "info_mapping_severity", + "low_mapping_severity", "medium_mapping_severity", + "high_mapping_severity", "critical_mapping_severity", "finding_text"] class Benchmark_Product_SummaryForm(forms.ModelForm): class Meta: model = Benchmark_Product_Summary - exclude = ['product', 'current_level', 'benchmark_type', 'asvs_level_1_benchmark', 'asvs_level_1_score', 'asvs_level_2_benchmark', 'asvs_level_2_score', 'asvs_level_3_benchmark', 'asvs_level_3_score'] + exclude = ["product", "current_level", "benchmark_type", "asvs_level_1_benchmark", "asvs_level_1_score", "asvs_level_2_benchmark", "asvs_level_2_score", "asvs_level_3_benchmark", "asvs_level_3_score"] class DeleteBenchmarkForm(forms.ModelForm): @@ -2466,7 +2466,7 @@ class DeleteBenchmarkForm(forms.ModelForm): class Meta: model = Benchmark_Product_Summary - fields = ['id'] + fields = ["id"] # class JIRA_ProjectForm(forms.ModelForm): @@ -2482,14 +2482,14 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) tool_configuration = forms.ModelChoiceField( - label='Tool Configuration', - queryset=Tool_Configuration.objects.all().order_by('name'), + label="Tool Configuration", + queryset=Tool_Configuration.objects.all().order_by("name"), required=True, ) class Meta: model = Product_API_Scan_Configuration - exclude = ['product'] + exclude = ["product"] class DeleteProduct_API_Scan_ConfigurationForm(forms.ModelForm): @@ -2497,7 +2497,7 @@ class DeleteProduct_API_Scan_ConfigurationForm(forms.ModelForm): class Meta: model = Product_API_Scan_Configuration - fields = ['id'] + fields = ["id"] class DeleteJIRAInstanceForm(forms.ModelForm): @@ -2506,16 +2506,16 @@ class DeleteJIRAInstanceForm(forms.ModelForm): class Meta: model = JIRA_Instance - fields = ['id'] + fields = ["id"] class ToolTypeForm(forms.ModelForm): class Meta: model = Tool_Type - exclude = ['product'] + exclude = ["product"] def __init__(self, *args, **kwargs): - instance = kwargs.get('instance', None) + instance = kwargs.get("instance", None) self.newly_created = True if instance is not None: self.newly_created = instance.pk is None @@ -2527,7 +2527,7 @@ def clean(self): name = form_data.get("name") # Make sure this will not create a duplicate test type if Tool_Type.objects.filter(name=name).count() > 0: - msg = 'A Tool Type with the name already exists' + msg = "A Tool Type with the name already exists" raise forms.ValidationError(msg) return form_data @@ -2536,40 +2536,40 @@ def clean(self): class RegulationForm(forms.ModelForm): class Meta: model = Regulation - exclude = ['product'] + exclude = ["product"] class AppAnalysisForm(forms.ModelForm): - user = forms.ModelChoiceField(queryset=Dojo_User.objects.exclude(is_active=False).order_by('first_name', 'last_name'), required=True) + user = forms.ModelChoiceField(queryset=Dojo_User.objects.exclude(is_active=False).order_by("first_name", "last_name"), required=True) class Meta: model = App_Analysis - exclude = ['product'] + exclude = ["product"] class DeleteAppAnalysisForm(forms.ModelForm): class Meta: model = App_Analysis - exclude = ['product', 'tags'] + exclude = ["product", "tags"] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.fields['name'].disabled = True - self.fields['user'].disabled = True - self.fields['confidence'].disabled = True - self.fields['version'].disabled = True - self.fields['icon'].disabled = True - self.fields['website'].disabled = True - self.fields['website_found'].disabled = True + self.fields["name"].disabled = True + self.fields["user"].disabled = True + self.fields["confidence"].disabled = True + self.fields["version"].disabled = True + self.fields["icon"].disabled = True + self.fields["website"].disabled = True + self.fields["website_found"].disabled = True class ToolConfigForm(forms.ModelForm): - tool_type = forms.ModelChoiceField(queryset=Tool_Type.objects.all(), label='Tool Type') - ssh = forms.CharField(widget=forms.Textarea(attrs={}), required=False, label='SSH Key') + tool_type = forms.ModelChoiceField(queryset=Tool_Type.objects.all(), label="Tool Type") + ssh = forms.CharField(widget=forms.Textarea(attrs={}), required=False, label="SSH Key") class Meta: model = Tool_Configuration - exclude = ['product'] + exclude = ["product"] def clean(self): from django.core.validators import URLValidator @@ -2577,11 +2577,11 @@ def clean(self): try: if form_data["url"] is not None: - url_validator = URLValidator(schemes=['ssh', 'http', 'https']) + url_validator = URLValidator(schemes=["ssh", "http", "https"]) url_validator(form_data["url"]) except forms.ValidationError: - msg = 'It does not appear as though this endpoint is a valid URL/SSH or IP address.' - raise forms.ValidationError(msg, code='invalid') + msg = "It does not appear as though this endpoint is a valid URL/SSH or IP address." + raise forms.ValidationError(msg, code="invalid") return form_data @@ -2592,24 +2592,24 @@ def __init__(self, *args, **kwargs): # if this sla config has findings being asynchronously updated, disable the days by severity fields if self.instance.async_updating: - msg = 'Finding SLA expiration dates are currently being recalculated. ' + \ - 'This field cannot be changed until the calculation is complete.' - self.fields['critical'].disabled = True - self.fields['enforce_critical'].disabled = True - self.fields['critical'].widget.attrs['message'] = msg - self.fields['high'].disabled = True - self.fields['enforce_high'].disabled = True - self.fields['high'].widget.attrs['message'] = msg - self.fields['medium'].disabled = True - self.fields['enforce_medium'].disabled = True - self.fields['medium'].widget.attrs['message'] = msg - self.fields['low'].disabled = True - self.fields['enforce_low'].disabled = True - self.fields['low'].widget.attrs['message'] = msg + msg = "Finding SLA expiration dates are currently being recalculated. " + \ + "This field cannot be changed until the calculation is complete." + self.fields["critical"].disabled = True + self.fields["enforce_critical"].disabled = True + self.fields["critical"].widget.attrs["message"] = msg + self.fields["high"].disabled = True + self.fields["enforce_high"].disabled = True + self.fields["high"].widget.attrs["message"] = msg + self.fields["medium"].disabled = True + self.fields["enforce_medium"].disabled = True + self.fields["medium"].widget.attrs["message"] = msg + self.fields["low"].disabled = True + self.fields["enforce_low"].disabled = True + self.fields["low"].widget.attrs["message"] = msg class Meta: model = SLA_Configuration - fields = ['name', 'description', 'critical', 'enforce_critical', 'high', 'enforce_high', 'medium', 'enforce_medium', 'low', 'enforce_low'] + fields = ["name", "description", "critical", "enforce_critical", "high", "enforce_high", "medium", "enforce_medium", "low", "enforce_low"] class DeleteSLAConfigForm(forms.ModelForm): @@ -2618,7 +2618,7 @@ class DeleteSLAConfigForm(forms.ModelForm): class Meta: model = SLA_Configuration - fields = ['id'] + fields = ["id"] class DeleteObjectsSettingsForm(forms.ModelForm): @@ -2627,7 +2627,7 @@ class DeleteObjectsSettingsForm(forms.ModelForm): class Meta: model = Objects_Product - fields = ['id'] + fields = ["id"] class DeleteToolProductSettingsForm(forms.ModelForm): @@ -2636,17 +2636,17 @@ class DeleteToolProductSettingsForm(forms.ModelForm): class Meta: model = Tool_Product_Settings - fields = ['id'] + fields = ["id"] class ToolProductSettingsForm(forms.ModelForm): - tool_configuration = forms.ModelChoiceField(queryset=Tool_Configuration.objects.all(), label='Tool Configuration') + tool_configuration = forms.ModelChoiceField(queryset=Tool_Configuration.objects.all(), label="Tool Configuration") class Meta: model = Tool_Product_Settings - fields = ['name', 'description', 'url', 'tool_configuration', 'tool_project_id'] - exclude = ['tool_type'] - order = ['name'] + fields = ["name", "description", "url", "tool_configuration", "tool_project_id"] + exclude = ["tool_type"] + order = ["name"] def clean(self): from django.core.validators import URLValidator @@ -2654,11 +2654,11 @@ def clean(self): try: if form_data["url"] is not None: - url_validator = URLValidator(schemes=['ssh', 'http', 'https']) + url_validator = URLValidator(schemes=["ssh", "http", "https"]) url_validator(form_data["url"]) except forms.ValidationError: - msg = 'It does not appear as though this endpoint is a valid URL/SSH or IP address.' - raise forms.ValidationError(msg, code='invalid') + msg = "It does not appear as though this endpoint is a valid URL/SSH or IP address." + raise forms.ValidationError(msg, code="invalid") return form_data @@ -2672,8 +2672,8 @@ class ObjectSettingsForm(forms.ModelForm): class Meta: model = Objects_Product - fields = ['path', 'folder', 'artifact', 'name', 'review_status', 'tags'] - exclude = ['product'] + fields = ["path", "folder", "artifact", "name", "review_status", "tags"] + exclude = ["product"] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @@ -2686,15 +2686,15 @@ def clean(self): class CredMappingForm(forms.ModelForm): cred_user = forms.ModelChoiceField( - queryset=Cred_Mapping.objects.all().select_related('cred_id'), + queryset=Cred_Mapping.objects.all().select_related("cred_id"), required=False, - label='Select a Credential', + label="Select a Credential", ) class Meta: model = Cred_Mapping - fields = ['cred_user'] - exclude = ['product', 'finding', 'engagement', 'test', 'url', 'is_authn_provider'] + fields = ["cred_user"] + exclude = ["product", "finding", "engagement", "test", "url", "is_authn_provider"] def __init__(self, *args, **kwargs): cred_user_queryset = kwargs.pop("cred_user_queryset", None) @@ -2706,8 +2706,8 @@ def __init__(self, *args, **kwargs): class CredMappingFormProd(forms.ModelForm): class Meta: model = Cred_Mapping - fields = ['cred_id', 'url', 'is_authn_provider'] - exclude = ['product', 'finding', 'engagement', 'test'] + fields = ["cred_id", "url", "is_authn_provider"] + exclude = ["product", "finding", "engagement", "test"] class EngagementPresetsForm(forms.ModelForm): @@ -2720,7 +2720,7 @@ class EngagementPresetsForm(forms.ModelForm): class Meta: model = Engagement_Presets - exclude = ['product'] + exclude = ["product"] class DeleteEngagementPresetsForm(forms.ModelForm): @@ -2729,7 +2729,7 @@ class DeleteEngagementPresetsForm(forms.ModelForm): class Meta: model = Engagement_Presets - fields = ['id'] + fields = ["id"] class SystemSettingsForm(forms.ModelForm): @@ -2737,42 +2737,42 @@ class SystemSettingsForm(forms.ModelForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.fields['default_group_role'].queryset = get_group_member_roles() + self.fields["default_group_role"].queryset = get_group_member_roles() def clean(self): cleaned_data = super().clean() - enable_jira_value = cleaned_data.get('enable_jira') - jira_webhook_secret_value = cleaned_data.get('jira_webhook_secret').strip() + enable_jira_value = cleaned_data.get("enable_jira") + jira_webhook_secret_value = cleaned_data.get("jira_webhook_secret").strip() if enable_jira_value and not jira_webhook_secret_value: - self.add_error('jira_webhook_secret', 'This field is required when enable Jira Integration is True') + self.add_error("jira_webhook_secret", "This field is required when enable Jira Integration is True") return cleaned_data class Meta: model = System_Settings - exclude = ['product_grade'] + exclude = ["product_grade"] class BenchmarkForm(forms.ModelForm): class Meta: model = Benchmark_Product - exclude = ['product', 'control'] + exclude = ["product", "control"] class Benchmark_RequirementForm(forms.ModelForm): class Meta: model = Benchmark_Requirement - exclude = [''] + exclude = [""] class NotificationsForm(forms.ModelForm): class Meta: model = Notifications - exclude = ['template'] + exclude = ["template"] class ProductNotificationsForm(forms.ModelForm): @@ -2780,17 +2780,17 @@ class ProductNotificationsForm(forms.ModelForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if not self.instance.id: - self.initial['engagement_added'] = '' - self.initial['close_engagement'] = '' - self.initial['test_added'] = '' - self.initial['scan_added'] = '' - self.initial['sla_breach'] = '' - self.initial['sla_breach_combined'] = '' - self.initial['risk_acceptance_expiration'] = '' + self.initial["engagement_added"] = "" + self.initial["close_engagement"] = "" + self.initial["test_added"] = "" + self.initial["scan_added"] = "" + self.initial["sla_breach"] = "" + self.initial["sla_breach_combined"] = "" + self.initial["risk_acceptance_expiration"] = "" class Meta: model = Notifications - fields = ['engagement_added', 'close_engagement', 'test_added', 'scan_added', 'sla_breach', 'sla_breach_combined', 'risk_acceptance_expiration'] + fields = ["engagement_added", "close_engagement", "test_added", "scan_added", "sla_breach", "sla_breach_combined", "risk_acceptance_expiration"] class AjaxChoiceField(forms.ChoiceField): @@ -2805,133 +2805,133 @@ class CredUserForm(forms.ModelForm): class Meta: model = Cred_User - exclude = [''] + exclude = [""] # fields = ['selenium_script'] class GITHUB_Product_Form(forms.ModelForm): - git_conf = forms.ModelChoiceField(queryset=GITHUB_Conf.objects.all(), label='GITHUB Configuration', required=False) + git_conf = forms.ModelChoiceField(queryset=GITHUB_Conf.objects.all(), label="GITHUB Configuration", required=False) class Meta: model = GITHUB_PKey - exclude = ['product'] + exclude = ["product"] class JIRAProjectForm(forms.ModelForm): - inherit_from_product = forms.BooleanField(label='inherit JIRA settings from product', required=False) - jira_instance = forms.ModelChoiceField(queryset=JIRA_Instance.objects.all(), label='JIRA Instance', required=False) + inherit_from_product = forms.BooleanField(label="inherit JIRA settings from product", required=False) + jira_instance = forms.ModelChoiceField(queryset=JIRA_Instance.objects.all(), label="JIRA Instance", required=False) issue_template_dir = forms.ChoiceField(required=False, choices=JIRA_TEMPLATE_CHOICES, - help_text='Choose the folder containing the Django templates used to render the JIRA issue description. These are stored in dojo/templates/issue-trackers. Leave empty to use the default jira_full templates.') + help_text="Choose the folder containing the Django templates used to render the JIRA issue description. These are stored in dojo/templates/issue-trackers. Leave empty to use the default jira_full templates.") - prefix = 'jira-project-form' + prefix = "jira-project-form" class Meta: model = JIRA_Project - exclude = ['product', 'engagement'] - fields = ['inherit_from_product', 'jira_instance', 'project_key', 'issue_template_dir', 'epic_issue_type_name', 'component', 'custom_fields', 'jira_labels', 'default_assignee', 'add_vulnerability_id_to_jira_label', 'push_all_issues', 'enable_engagement_epic_mapping', 'push_notes', 'product_jira_sla_notification', 'risk_acceptance_expiration_notification'] + exclude = ["product", "engagement"] + fields = ["inherit_from_product", "jira_instance", "project_key", "issue_template_dir", "epic_issue_type_name", "component", "custom_fields", "jira_labels", "default_assignee", "add_vulnerability_id_to_jira_label", "push_all_issues", "enable_engagement_epic_mapping", "push_notes", "product_jira_sla_notification", "risk_acceptance_expiration_notification"] def __init__(self, *args, **kwargs): from dojo.jira_link import helper as jira_helper # if the form is shown for an engagement, we set a placeholder text around inherited settings from product - self.target = kwargs.pop('target', 'product') - self.product = kwargs.pop('product', None) - self.engagement = kwargs.pop('engagement', None) + self.target = kwargs.pop("target", "product") + self.product = kwargs.pop("product", None) + self.engagement = kwargs.pop("engagement", None) super().__init__(*args, **kwargs) - logger.debug('self.target: %s, self.product: %s, self.instance: %s', self.target, self.product, self.instance) - logger.debug('data: %s', self.data) - if self.target == 'engagement': - product_name = self.product.name if self.product else self.engagement.product.name if self.engagement.product else '' + logger.debug("self.target: %s, self.product: %s, self.instance: %s", self.target, self.product, self.instance) + logger.debug("data: %s", self.data) + if self.target == "engagement": + product_name = self.product.name if self.product else self.engagement.product.name if self.engagement.product else "" - self.fields['project_key'].widget = forms.TextInput(attrs={'placeholder': f"JIRA settings inherited from product '{product_name}'"}) - self.fields['project_key'].help_text = f"JIRA settings are inherited from product '{product_name}', unless configured differently here." - self.fields['jira_instance'].help_text = f"JIRA settings are inherited from product '{product_name}' , unless configured differently here." + self.fields["project_key"].widget = forms.TextInput(attrs={"placeholder": f"JIRA settings inherited from product '{product_name}'"}) + self.fields["project_key"].help_text = f"JIRA settings are inherited from product '{product_name}', unless configured differently here." + self.fields["jira_instance"].help_text = f"JIRA settings are inherited from product '{product_name}' , unless configured differently here." # if we don't have an instance, django will insert a blank empty one :-( # so we have to check for id to make sure we only trigger this when there is a real instance from db if self.instance.id: - logger.debug('jira project instance found for engagement, unchecking inherit checkbox') - self.fields['jira_instance'].required = True - self.fields['project_key'].required = True - self.initial['inherit_from_product'] = False + logger.debug("jira project instance found for engagement, unchecking inherit checkbox") + self.fields["jira_instance"].required = True + self.fields["project_key"].required = True + self.initial["inherit_from_product"] = False # once a jira project config is attached to an engagement, we can't go back to inheriting # because the config needs to remain in place for the existing jira issues - self.fields['inherit_from_product'].disabled = True - self.fields['inherit_from_product'].help_text = 'Once an engagement has a JIRA Project stored, you cannot switch back to inheritance to avoid breaking existing JIRA issues' - self.fields['jira_instance'].disabled = False - self.fields['project_key'].disabled = False - self.fields['issue_template_dir'].disabled = False - self.fields['epic_issue_type_name'].disabled = False - self.fields['component'].disabled = False - self.fields['custom_fields'].disabled = False - self.fields['default_assignee'].disabled = False - self.fields['jira_labels'].disabled = False - self.fields['add_vulnerability_id_to_jira_label'].disabled = False - self.fields['push_all_issues'].disabled = False - self.fields['enable_engagement_epic_mapping'].disabled = False - self.fields['push_notes'].disabled = False - self.fields['product_jira_sla_notification'].disabled = False - self.fields['risk_acceptance_expiration_notification'].disabled = False + self.fields["inherit_from_product"].disabled = True + self.fields["inherit_from_product"].help_text = "Once an engagement has a JIRA Project stored, you cannot switch back to inheritance to avoid breaking existing JIRA issues" + self.fields["jira_instance"].disabled = False + self.fields["project_key"].disabled = False + self.fields["issue_template_dir"].disabled = False + self.fields["epic_issue_type_name"].disabled = False + self.fields["component"].disabled = False + self.fields["custom_fields"].disabled = False + self.fields["default_assignee"].disabled = False + self.fields["jira_labels"].disabled = False + self.fields["add_vulnerability_id_to_jira_label"].disabled = False + self.fields["push_all_issues"].disabled = False + self.fields["enable_engagement_epic_mapping"].disabled = False + self.fields["push_notes"].disabled = False + self.fields["product_jira_sla_notification"].disabled = False + self.fields["risk_acceptance_expiration_notification"].disabled = False elif self.product: - logger.debug('setting jira project fields from product1') - self.initial['inherit_from_product'] = True + logger.debug("setting jira project fields from product1") + self.initial["inherit_from_product"] = True jira_project_product = jira_helper.get_jira_project(self.product) # we have to check that we are not in a POST request where jira project config data is posted # this is because initial values will overwrite the actual values entered by the user # makes no sense, but seems to be accepted behaviour: https://code.djangoproject.com/ticket/30407 - if jira_project_product and (self.prefix + '-jira_instance') not in self.data: - logger.debug('setting jira project fields from product2') - self.initial['jira_instance'] = jira_project_product.jira_instance.id if jira_project_product.jira_instance else None - self.initial['project_key'] = jira_project_product.project_key - self.initial['issue_template_dir'] = jira_project_product.issue_template_dir - self.initial['epic_issue_type_name'] = jira_project_product.epic_issue_type_name - self.initial['component'] = jira_project_product.component - self.initial['custom_fields'] = jira_project_product.custom_fields - self.initial['default_assignee'] = jira_project_product.default_assignee - self.initial['jira_labels'] = jira_project_product.jira_labels - self.initial['add_vulnerability_id_to_jira_label'] = jira_project_product.add_vulnerability_id_to_jira_label - self.initial['push_all_issues'] = jira_project_product.push_all_issues - self.initial['enable_engagement_epic_mapping'] = jira_project_product.enable_engagement_epic_mapping - self.initial['push_notes'] = jira_project_product.push_notes - self.initial['product_jira_sla_notification'] = jira_project_product.product_jira_sla_notification - self.initial['risk_acceptance_expiration_notification'] = jira_project_product.risk_acceptance_expiration_notification - - self.fields['jira_instance'].disabled = True - self.fields['project_key'].disabled = True - self.fields['issue_template_dir'].disabled = True - self.fields['epic_issue_type_name'].disabled = True - self.fields['component'].disabled = True - self.fields['custom_fields'].disabled = True - self.fields['default_assignee'].disabled = True - self.fields['jira_labels'].disabled = True - self.fields['add_vulnerability_id_to_jira_label'].disabled = True - self.fields['push_all_issues'].disabled = True - self.fields['enable_engagement_epic_mapping'].disabled = True - self.fields['push_notes'].disabled = True - self.fields['product_jira_sla_notification'].disabled = True - self.fields['risk_acceptance_expiration_notification'].disabled = True + if jira_project_product and (self.prefix + "-jira_instance") not in self.data: + logger.debug("setting jira project fields from product2") + self.initial["jira_instance"] = jira_project_product.jira_instance.id if jira_project_product.jira_instance else None + self.initial["project_key"] = jira_project_product.project_key + self.initial["issue_template_dir"] = jira_project_product.issue_template_dir + self.initial["epic_issue_type_name"] = jira_project_product.epic_issue_type_name + self.initial["component"] = jira_project_product.component + self.initial["custom_fields"] = jira_project_product.custom_fields + self.initial["default_assignee"] = jira_project_product.default_assignee + self.initial["jira_labels"] = jira_project_product.jira_labels + self.initial["add_vulnerability_id_to_jira_label"] = jira_project_product.add_vulnerability_id_to_jira_label + self.initial["push_all_issues"] = jira_project_product.push_all_issues + self.initial["enable_engagement_epic_mapping"] = jira_project_product.enable_engagement_epic_mapping + self.initial["push_notes"] = jira_project_product.push_notes + self.initial["product_jira_sla_notification"] = jira_project_product.product_jira_sla_notification + self.initial["risk_acceptance_expiration_notification"] = jira_project_product.risk_acceptance_expiration_notification + + self.fields["jira_instance"].disabled = True + self.fields["project_key"].disabled = True + self.fields["issue_template_dir"].disabled = True + self.fields["epic_issue_type_name"].disabled = True + self.fields["component"].disabled = True + self.fields["custom_fields"].disabled = True + self.fields["default_assignee"].disabled = True + self.fields["jira_labels"].disabled = True + self.fields["add_vulnerability_id_to_jira_label"].disabled = True + self.fields["push_all_issues"].disabled = True + self.fields["enable_engagement_epic_mapping"].disabled = True + self.fields["push_notes"].disabled = True + self.fields["product_jira_sla_notification"].disabled = True + self.fields["risk_acceptance_expiration_notification"].disabled = True else: - del self.fields['inherit_from_product'] + del self.fields["inherit_from_product"] # if we don't have an instance, django will insert a blank empty one :-( # so we have to check for id to make sure we only trigger this when there is a real instance from db if self.instance.id: - self.fields['jira_instance'].required = True - self.fields['project_key'].required = True - self.fields['epic_issue_type_name'].required = True + self.fields["jira_instance"].required = True + self.fields["project_key"].required = True + self.fields["epic_issue_type_name"].required = True def clean(self): - logger.debug('validating jira project form') + logger.debug("validating jira project form") cleaned_data = super().clean() - logger.debug('clean: inherit: %s', self.cleaned_data.get('inherit_from_product', False)) - if not self.cleaned_data.get('inherit_from_product', False): - jira_instance = self.cleaned_data.get('jira_instance') - project_key = self.cleaned_data.get('project_key') - epic_issue_type_name = self.cleaned_data.get('epic_issue_type_name') + logger.debug("clean: inherit: %s", self.cleaned_data.get("inherit_from_product", False)) + if not self.cleaned_data.get("inherit_from_product", False): + jira_instance = self.cleaned_data.get("jira_instance") + project_key = self.cleaned_data.get("project_key") + epic_issue_type_name = self.cleaned_data.get("epic_issue_type_name") if project_key and jira_instance and epic_issue_type_name: return cleaned_data @@ -2939,100 +2939,100 @@ def clean(self): if not project_key and not jira_instance and not epic_issue_type_name: return cleaned_data - if self.target == 'engagement': - msg = 'JIRA Project needs a JIRA Instance, JIRA Project Key, and Epic issue type name, or choose to inherit settings from product' + if self.target == "engagement": + msg = "JIRA Project needs a JIRA Instance, JIRA Project Key, and Epic issue type name, or choose to inherit settings from product" raise ValidationError(msg) else: - msg = 'JIRA Project needs a JIRA Instance, JIRA Project Key, and Epic issue type name, leave empty to have no JIRA integration setup' + msg = "JIRA Project needs a JIRA Instance, JIRA Project Key, and Epic issue type name, leave empty to have no JIRA integration setup" raise ValidationError(msg) class GITHUBFindingForm(forms.Form): def __init__(self, *args, **kwargs): - self.enabled = kwargs.pop('enabled') + self.enabled = kwargs.pop("enabled") super().__init__(*args, **kwargs) - self.fields['push_to_github'] = forms.BooleanField() - self.fields['push_to_github'].required = False - self.fields['push_to_github'].help_text = "Checking this will overwrite content of your Github issue, or create one." + self.fields["push_to_github"] = forms.BooleanField() + self.fields["push_to_github"].required = False + self.fields["push_to_github"].help_text = "Checking this will overwrite content of your Github issue, or create one." push_to_github = forms.BooleanField(required=False) class JIRAFindingForm(forms.Form): def __init__(self, *args, **kwargs): - self.push_all = kwargs.pop('push_all', False) - self.instance = kwargs.pop('instance', None) - self.jira_project = kwargs.pop('jira_project', None) + self.push_all = kwargs.pop("push_all", False) + self.instance = kwargs.pop("instance", None) + self.jira_project = kwargs.pop("jira_project", None) # we provide the finding_form from the same page so we can add validation errors # if the finding doesn't satisfy the rules to be pushed to JIRA - self.finding_form = kwargs.pop('finding_form', None) + self.finding_form = kwargs.pop("finding_form", None) if self.instance is None and self.jira_project is None: - msg = 'either and finding instance or jira_project is needed' + msg = "either and finding instance or jira_project is needed" raise ValueError(msg) super().__init__(*args, **kwargs) - self.fields['push_to_jira'] = forms.BooleanField() - self.fields['push_to_jira'].required = False + self.fields["push_to_jira"] = forms.BooleanField() + self.fields["push_to_jira"].required = False if is_finding_groups_enabled(): - self.fields['push_to_jira'].help_text = "Checking this will overwrite content of your JIRA issue, or create one. If this finding is part of a Finding Group, the group will pushed instead of the finding." + self.fields["push_to_jira"].help_text = "Checking this will overwrite content of your JIRA issue, or create one. If this finding is part of a Finding Group, the group will pushed instead of the finding." else: - self.fields['push_to_jira'].help_text = "Checking this will overwrite content of your JIRA issue, or create one." + self.fields["push_to_jira"].help_text = "Checking this will overwrite content of your JIRA issue, or create one." - self.fields['push_to_jira'].label = "Push to JIRA" + self.fields["push_to_jira"].label = "Push to JIRA" if self.push_all: # This will show the checkbox as checked and greyed out, this way the user is aware # that issues will be pushed to JIRA, given their product-level settings. - self.fields['push_to_jira'].help_text = \ + self.fields["push_to_jira"].help_text = \ "Push all issues is enabled on this product. If you do not wish to push all issues" \ " to JIRA, please disable Push all issues on this product." - self.fields['push_to_jira'].widget.attrs['checked'] = 'checked' - self.fields['push_to_jira'].disabled = True + self.fields["push_to_jira"].widget.attrs["checked"] = "checked" + self.fields["push_to_jira"].disabled = True if self.instance: - if hasattr(self.instance, 'has_jira_issue') and self.instance.has_jira_issue: - self.initial['jira_issue'] = self.instance.jira_issue.jira_key - self.fields['push_to_jira'].widget.attrs['checked'] = 'checked' + if hasattr(self.instance, "has_jira_issue") and self.instance.has_jira_issue: + self.initial["jira_issue"] = self.instance.jira_issue.jira_key + self.fields["push_to_jira"].widget.attrs["checked"] = "checked" if is_finding_groups_enabled(): - self.fields['jira_issue'].widget = forms.TextInput(attrs={'placeholder': 'Leave empty and check push to jira to create a new JIRA issue for this finding, or the group this finding is in.'}) + self.fields["jira_issue"].widget = forms.TextInput(attrs={"placeholder": "Leave empty and check push to jira to create a new JIRA issue for this finding, or the group this finding is in."}) else: - self.fields['jira_issue'].widget = forms.TextInput(attrs={'placeholder': 'Leave empty and check push to jira to create a new JIRA issue for this finding.'}) + self.fields["jira_issue"].widget = forms.TextInput(attrs={"placeholder": "Leave empty and check push to jira to create a new JIRA issue for this finding."}) - if self.instance and hasattr(self.instance, 'has_jira_group_issue') and self.instance.has_jira_group_issue: - self.fields['push_to_jira'].widget.attrs['checked'] = 'checked' - self.fields['jira_issue'].help_text = 'Changing the linked JIRA issue for finding groups is not (yet) supported.' - self.initial['jira_issue'] = self.instance.finding_group.jira_issue.jira_key - self.fields['jira_issue'].disabled = True + if self.instance and hasattr(self.instance, "has_jira_group_issue") and self.instance.has_jira_group_issue: + self.fields["push_to_jira"].widget.attrs["checked"] = "checked" + self.fields["jira_issue"].help_text = "Changing the linked JIRA issue for finding groups is not (yet) supported." + self.initial["jira_issue"] = self.instance.finding_group.jira_issue.jira_key + self.fields["jira_issue"].disabled = True def clean(self): - logger.debug('jform clean') + logger.debug("jform clean") super().clean() - jira_issue_key_new = self.cleaned_data.get('jira_issue') + jira_issue_key_new = self.cleaned_data.get("jira_issue") finding = self.instance jira_project = self.jira_project - logger.debug('self.cleaned_data.push_to_jira: %s', self.cleaned_data.get('push_to_jira', None)) + logger.debug("self.cleaned_data.push_to_jira: %s", self.cleaned_data.get("push_to_jira", None)) - if self.cleaned_data.get('push_to_jira', None) and finding and finding.has_jira_group_issue: + if self.cleaned_data.get("push_to_jira", None) and finding and finding.has_jira_group_issue: can_be_pushed_to_jira, error_message, error_code = jira_helper.can_be_pushed_to_jira(finding.finding_group, self.finding_form) if not can_be_pushed_to_jira: - self.add_error('push_to_jira', ValidationError(error_message, code=error_code)) + self.add_error("push_to_jira", ValidationError(error_message, code=error_code)) # for field in error_fields: # self.finding_form.add_error(field, error) - elif self.cleaned_data.get('push_to_jira', None) and finding: + elif self.cleaned_data.get("push_to_jira", None) and finding: can_be_pushed_to_jira, error_message, error_code = jira_helper.can_be_pushed_to_jira(finding, self.finding_form) if not can_be_pushed_to_jira: - self.add_error('push_to_jira', ValidationError(error_message, code=error_code)) + self.add_error("push_to_jira", ValidationError(error_message, code=error_code)) # for field in error_fields: # self.finding_form.add_error(field, error) - elif self.cleaned_data.get('push_to_jira', None): - active = self.finding_form['active'].value() - verified = self.finding_form['verified'].value() + elif self.cleaned_data.get("push_to_jira", None): + active = self.finding_form["active"].value() + verified = self.finding_form["verified"].value() if not active or not verified: - logger.debug('Findings must be active and verified to be pushed to JIRA') - error_message = 'Findings must be active and verified to be pushed to JIRA' - self.add_error('push_to_jira', ValidationError(error_message, code='not_active_or_verified')) + logger.debug("Findings must be active and verified to be pushed to JIRA") + error_message = "Findings must be active and verified to be pushed to JIRA" + self.add_error("push_to_jira", ValidationError(error_message, code="not_active_or_verified")) if jira_issue_key_new and (not finding or not finding.has_jira_group_issue): # when there is a group jira issue, we skip all the linking/unlinking as this is not supported (yet) @@ -3059,9 +3059,9 @@ def clean(self): if jira_issue_need_to_exist: jira_issue_new = jira_helper.jira_get_issue(jira_project, jira_issue_key_new) if not jira_issue_new: - raise ValidationError('JIRA issue ' + jira_issue_key_new + ' does not exist or cannot be retrieved') + raise ValidationError("JIRA issue " + jira_issue_key_new + " does not exist or cannot be retrieved") - logger.debug('checking if provided jira issue id already is linked to another finding') + logger.debug("checking if provided jira issue id already is linked to another finding") jira_issues = JIRA_Issue.objects.filter(jira_id=jira_issue_new.id, jira_key=jira_issue_key_new).exclude(engagement__isnull=False) if self.instance: @@ -3069,45 +3069,45 @@ def clean(self): jira_issues = jira_issues.exclude(finding=finding) if len(jira_issues) > 0: - raise ValidationError('JIRA issue ' + jira_issue_key_new + ' already linked to ' + reverse('view_finding', args=(jira_issues[0].finding_id,))) + raise ValidationError("JIRA issue " + jira_issue_key_new + " already linked to " + reverse("view_finding", args=(jira_issues[0].finding_id,))) jira_issue = forms.CharField(required=False, label="Linked JIRA Issue", validators=[validators.RegexValidator( - regex=r'^[A-Z][A-Z_0-9]+-\d+$', - message='JIRA issue key must be in XXXX-nnnn format ([A-Z][A-Z_0-9]+-\\d+)')]) + regex=r"^[A-Z][A-Z_0-9]+-\d+$", + message="JIRA issue key must be in XXXX-nnnn format ([A-Z][A-Z_0-9]+-\\d+)")]) push_to_jira = forms.BooleanField(required=False, label="Push to JIRA") class JIRAImportScanForm(forms.Form): def __init__(self, *args, **kwargs): - self.push_all = kwargs.pop('push_all', False) + self.push_all = kwargs.pop("push_all", False) super().__init__(*args, **kwargs) if self.push_all: # This will show the checkbox as checked and greyed out, this way the user is aware # that issues will be pushed to JIRA, given their product-level settings. - self.fields['push_to_jira'].help_text = \ + self.fields["push_to_jira"].help_text = \ "Push all issues is enabled on this product. If you do not wish to push all issues" \ " to JIRA, please disable Push all issues on this product." - self.fields['push_to_jira'].widget.attrs['checked'] = 'checked' - self.fields['push_to_jira'].disabled = True + self.fields["push_to_jira"].widget.attrs["checked"] = "checked" + self.fields["push_to_jira"].disabled = True push_to_jira = forms.BooleanField(required=False, label="Push to JIRA", help_text="Checking this will create a new jira issue for each new finding.") class JIRAEngagementForm(forms.Form): - prefix = 'jira-epic-form' + prefix = "jira-epic-form" def __init__(self, *args, **kwargs): - self.instance = kwargs.pop('instance', None) + self.instance = kwargs.pop("instance", None) super().__init__(*args, **kwargs) if self.instance: if self.instance.has_jira_issue: - self.fields['push_to_jira'].widget.attrs['checked'] = 'checked' - self.fields['push_to_jira'].label = 'Update JIRA Epic' - self.fields['push_to_jira'].help_text = 'Checking this will update the existing EPIC in JIRA.' + self.fields["push_to_jira"].widget.attrs["checked"] = "checked" + self.fields["push_to_jira"].label = "Update JIRA Epic" + self.fields["push_to_jira"].help_text = "Checking this will update the existing EPIC in JIRA." push_to_jira = forms.BooleanField(required=False, label="Create EPIC", help_text="Checking this will create an EPIC in JIRA for this engagement.") epic_name = forms.CharField(max_length=200, required=False, help_text="EPIC name in JIRA. If not specified, it defaults to the engagement name") @@ -3119,7 +3119,7 @@ class LoginBanner(forms.Form): label="Enable login banner", initial=False, required=False, - help_text='Tick this box to enable a text banner on the login page', + help_text="Tick this box to enable a text banner on the login page", ) banner_message = forms.CharField( @@ -3141,9 +3141,9 @@ class Meta: class AnnouncementRemoveForm(AnnouncementCreateForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.fields['dismissable'].disabled = True - self.fields['message'].disabled = True - self.fields['style'].disabled = True + self.fields["dismissable"].disabled = True + self.fields["message"].disabled = True + self.fields["style"].disabled = True # ============================== @@ -3159,31 +3159,31 @@ class QuestionForm(forms.Form): def __init__(self, *args, **kwargs): self.helper = FormHelper() - self.helper.form_method = 'post' + self.helper.form_method = "post" # If true crispy-forms will render a
..
tags - self.helper.form_tag = kwargs.get('form_tag', True) + self.helper.form_tag = kwargs.get("form_tag", True) - if 'form_tag' in kwargs: - del kwargs['form_tag'] + if "form_tag" in kwargs: + del kwargs["form_tag"] - self.engagement_survey = kwargs.get('engagement_survey') + self.engagement_survey = kwargs.get("engagement_survey") - self.answered_survey = kwargs.get('answered_survey') + self.answered_survey = kwargs.get("answered_survey") if not self.answered_survey: - del kwargs['engagement_survey'] + del kwargs["engagement_survey"] else: - del kwargs['answered_survey'] + del kwargs["answered_survey"] - self.helper.form_class = kwargs.get('form_class', '') + self.helper.form_class = kwargs.get("form_class", "") - self.question = kwargs.get('question') + self.question = kwargs.get("question") if not self.question: - msg = 'Need a question to render' + msg = "Need a question to render" raise ValueError(msg) - del kwargs['question'] + del kwargs["question"] super().__init__(*args, **kwargs) @@ -3201,9 +3201,9 @@ def __init__(self, *args, **kwargs): if initial_answer.exists(): initial_answer = initial_answer[0].answer else: - initial_answer = '' + initial_answer = "" - self.fields['answer'] = forms.CharField( + self.fields["answer"] = forms.CharField( label=self.question.text, widget=forms.Textarea(attrs={"rows": 3, "cols": 10}), required=not self.question.optional, @@ -3212,14 +3212,14 @@ def __init__(self, *args, **kwargs): def save(self): if not self.is_valid(): - msg = 'form is not valid' + msg = "form is not valid" raise forms.ValidationError(msg) - answer = self.cleaned_data.get('answer') + answer = self.cleaned_data.get("answer") if not answer: - if self.fields['answer'].required: - msg = 'Required' + if self.fields["answer"].required: + msg = "Required" raise forms.ValidationError(msg) return @@ -3246,12 +3246,12 @@ def __init__(self, *args, **kwargs): choice_answer = ChoiceAnswer.objects.filter( answered_survey=self.answered_survey, question=self.question, - ).annotate(a=Count('answer')).filter(a__gt=0) + ).annotate(a=Count("answer")).filter(a__gt=0) # we have ChoiceAnswer instance if choice_answer: choice_answer = choice_answer[0] - initial_choices = list(choice_answer.answer.all().values_list('id', flat=True)) + initial_choices = list(choice_answer.answer.all().values_list("id", flat=True)) if self.question.multichoice is False: initial_choices = initial_choices[0] @@ -3273,15 +3273,15 @@ def __init__(self, *args, **kwargs): widget=widget, ) - self.fields['answer'] = field + self.fields["answer"] = field # Render choice buttons inline self.helper.layout = Layout( - inline_type('answer'), + inline_type("answer"), ) def clean_answer(self): - real_answer = self.cleaned_data.get('answer') + real_answer = self.cleaned_data.get("answer") # for single choice questions, the selected answer is a single string if not isinstance(real_answer, list): @@ -3290,14 +3290,14 @@ def clean_answer(self): def save(self): if not self.is_valid(): - msg = 'Form is not valid' + msg = "Form is not valid" raise forms.ValidationError(msg) - real_answer = self.cleaned_data.get('answer') + real_answer = self.cleaned_data.get("answer") if not real_answer: - if self.fields['answer'].required: - msg = 'Required' + if self.fields["answer"].required: + msg = "Required" raise forms.ValidationError(msg) return @@ -3331,15 +3331,15 @@ class Add_Questionnaire_Form(forms.ModelForm): queryset=Engagement_Survey.objects.all(), required=True, widget=forms.widgets.Select(), - help_text='Select the Questionnaire to add.') + help_text="Select the Questionnaire to add.") class Meta: model = Answered_Survey - exclude = ('responder', - 'completed', - 'engagement', - 'answered_on', - 'assignee') + exclude = ("responder", + "completed", + "engagement", + "answered_on", + "assignee") class AddGeneralQuestionnaireForm(forms.ModelForm): @@ -3347,17 +3347,17 @@ class AddGeneralQuestionnaireForm(forms.ModelForm): queryset=Engagement_Survey.objects.all(), required=True, widget=forms.widgets.Select(), - help_text='Select the Questionnaire to add.') + help_text="Select the Questionnaire to add.") expiration = forms.DateField(widget=forms.TextInput( - attrs={'class': 'datepicker', 'autocomplete': 'off'})) + attrs={"class": "datepicker", "autocomplete": "off"})) class Meta: model = General_Survey - exclude = ('num_responses', 'generated') + exclude = ("num_responses", "generated") # date can only be today or in the past, not the future def clean_expiration(self): - expiration = self.cleaned_data.get('expiration', None) + expiration = self.cleaned_data.get("expiration", None) if expiration: today = datetime.today().date() if expiration < today: @@ -3378,7 +3378,7 @@ class Delete_Questionnaire_Form(forms.ModelForm): class Meta: model = Answered_Survey - fields = ['id'] + fields = ["id"] class DeleteGeneralQuestionnaireForm(forms.ModelForm): @@ -3387,7 +3387,7 @@ class DeleteGeneralQuestionnaireForm(forms.ModelForm): class Meta: model = General_Survey - fields = ['id'] + fields = ["id"] class Delete_Eng_Survey_Form(forms.ModelForm): @@ -3396,13 +3396,13 @@ class Delete_Eng_Survey_Form(forms.ModelForm): class Meta: model = Engagement_Survey - fields = ['id'] + fields = ["id"] class CreateQuestionnaireForm(forms.ModelForm): class Meta: model = Engagement_Survey - exclude = ['questions'] + exclude = ["questions"] with warnings.catch_warnings(action="ignore", category=ManagerInheritanceWarning): @@ -3411,11 +3411,11 @@ class EditQuestionnaireQuestionsForm(forms.ModelForm): Question.polymorphic.all(), required=True, help_text="Select questions to include on this questionnaire. Field can be used to search available questions.", - widget=MultipleSelectWithPop(attrs={'size': '11'})) + widget=MultipleSelectWithPop(attrs={"size": "11"})) class Meta: model = Engagement_Survey - exclude = ['name', 'description', 'active'] + exclude = ["name", "description", "active"] class CreateQuestionForm(forms.Form): @@ -3423,13 +3423,13 @@ class CreateQuestionForm(forms.Form): choices=(("---", "-----"), ("text", "Text"), ("choice", "Choice"))) order = forms.IntegerField( min_value=1, - widget=forms.TextInput(attrs={'data-type': 'both'}), + widget=forms.TextInput(attrs={"data-type": "both"}), help_text="The order the question will appear on the questionnaire") optional = forms.BooleanField(help_text="If selected, user doesn't have to answer this question", initial=False, required=False, - widget=forms.CheckboxInput(attrs={'data-type': 'both'})) - text = forms.CharField(widget=forms.Textarea(attrs={'data-type': 'text'}), + widget=forms.CheckboxInput(attrs={"data-type": "both"})) + text = forms.CharField(widget=forms.Textarea(attrs={"data-type": "text"}), label="Question Text", help_text="The actual question.") @@ -3437,17 +3437,17 @@ class CreateQuestionForm(forms.Form): class CreateTextQuestionForm(forms.Form): class Meta: model = TextQuestion - exclude = ['order', 'optional'] + exclude = ["order", "optional"] class MultiWidgetBasic(forms.widgets.MultiWidget): def __init__(self, attrs=None): - widgets = [forms.TextInput(attrs={'data-type': 'choice'}), - forms.TextInput(attrs={'data-type': 'choice'}), - forms.TextInput(attrs={'data-type': 'choice'}), - forms.TextInput(attrs={'data-type': 'choice'}), - forms.TextInput(attrs={'data-type': 'choice'}), - forms.TextInput(attrs={'data-type': 'choice'})] + widgets = [forms.TextInput(attrs={"data-type": "choice"}), + forms.TextInput(attrs={"data-type": "choice"}), + forms.TextInput(attrs={"data-type": "choice"}), + forms.TextInput(attrs={"data-type": "choice"}), + forms.TextInput(attrs={"data-type": "choice"}), + forms.TextInput(attrs={"data-type": "choice"})] super().__init__(widgets, attrs) def decompress(self, value): @@ -3457,7 +3457,7 @@ def decompress(self, value): return [None, None, None, None, None, None] def format_output(self, rendered_widgets): - return '
'.join(rendered_widgets) + return "
".join(rendered_widgets) class MultiExampleField(forms.fields.MultiValueField): @@ -3479,14 +3479,14 @@ def compress(self, values): class CreateChoiceQuestionForm(forms.Form): multichoice = forms.BooleanField(required=False, initial=False, - widget=forms.CheckboxInput(attrs={'data-type': 'choice'}), + widget=forms.CheckboxInput(attrs={"data-type": "choice"}), help_text="Can more than one choice can be selected?") - answer_choices = MultiExampleField(required=False, widget=MultiWidgetBasic(attrs={'data-type': 'choice'})) + answer_choices = MultiExampleField(required=False, widget=MultiWidgetBasic(attrs={"data-type": "choice"})) class Meta: model = ChoiceQuestion - exclude = ['order', 'optional', 'choices'] + exclude = ["order", "optional", "choices"] class EditQuestionForm(forms.ModelForm): @@ -3506,7 +3506,7 @@ class EditChoiceQuestionForm(EditQuestionForm): Choice.objects.all(), required=True, help_text="Select choices to include on this question. Field can be used to search available choices.", - widget=MultipleSelectWithPop(attrs={'size': '11'})) + widget=MultipleSelectWithPop(attrs={"size": "11"})) class Meta: model = ChoiceQuestion @@ -3525,17 +3525,17 @@ class AssignUserForm(forms.ModelForm): def __init__(self, *args, **kwargs): assignee = None - if 'assignee' in kwargs: - assignee = kwargs.pop('asignees') + if "assignee" in kwargs: + assignee = kwargs.pop("asignees") super().__init__(*args, **kwargs) if assignee is None: - self.fields['assignee'] = forms.ModelChoiceField(queryset=get_authorized_users(Permissions.Engagement_View), empty_label='Not Assigned', required=False) + self.fields["assignee"] = forms.ModelChoiceField(queryset=get_authorized_users(Permissions.Engagement_View), empty_label="Not Assigned", required=False) else: - self.fields['assignee'].initial = assignee + self.fields["assignee"].initial = assignee class Meta: model = Answered_Survey - exclude = ['engagement', 'survey', 'responder', 'completed', 'answered_on'] + exclude = ["engagement", "survey", "responder", "completed", "answered_on"] class AddEngagementForm(forms.Form): @@ -3543,18 +3543,18 @@ class AddEngagementForm(forms.Form): queryset=Product.objects.none(), required=True, widget=forms.widgets.Select(), - help_text='Select which product to attach Engagement') + help_text="Select which product to attach Engagement") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.fields['product'].queryset = get_authorized_products(Permissions.Engagement_Add) + self.fields["product"].queryset = get_authorized_products(Permissions.Engagement_Add) class ConfigurationPermissionsForm(forms.Form): def __init__(self, *args, **kwargs): - self.user = kwargs.pop('user', None) - self.group = kwargs.pop('group', None) + self.user = kwargs.pop("user", None) + self.group = kwargs.pop("group", None) super().__init__(*args, **kwargs) self.permission_fields = get_configuration_permissions_fields() @@ -3562,7 +3562,7 @@ def __init__(self, *args, **kwargs): for permission_field in self.permission_fields: for codename in permission_field.codenames(): self.fields[codename] = forms.BooleanField(required=False) - if not get_current_user().has_perm('auth.change_permission'): + if not get_current_user().has_perm("auth.change_permission"): self.fields[codename].disabled = True permissions_list = Permission.objects.all() @@ -3584,7 +3584,7 @@ def set_permission(self, codename): elif self.group: self.group.auth_group.permissions.add(self.permissions[codename]) else: - msg = 'Neither user or group are set' + msg = "Neither user or group are set" raise Exception(msg) else: # Checkbox is unset @@ -3593,5 +3593,5 @@ def set_permission(self, codename): elif self.group: self.group.auth_group.permissions.remove(self.permissions[codename]) else: - msg = 'Neither user or group are set' + msg = "Neither user or group are set" raise Exception(msg) diff --git a/dojo/github.py b/dojo/github.py index d6737bf639..5fe1ca35c1 100644 --- a/dojo/github.py +++ b/dojo/github.py @@ -17,7 +17,7 @@ def reopen_external_issue_github(find, note, prod, eng): from dojo.utils import get_system_setting - if not get_system_setting('enable_github'): + if not get_system_setting("enable_github"): return # Check if we have github info related to the product @@ -38,17 +38,17 @@ def reopen_external_issue_github(find, note, prod, eng): issue = repo.get_issue(int(g_issue.issue_id)) except: e = sys.exc_info()[0] - logger.error('cannot update finding in github: ' + e) + logger.error("cannot update finding in github: " + e) - logger.info('Will close github issue ' + g_issue.issue_id) - issue.edit(state='open') + logger.info("Will close github issue " + g_issue.issue_id) + issue.edit(state="open") issue.create_comment(note) def close_external_issue_github(find, note, prod, eng): from dojo.utils import get_system_setting - if not get_system_setting('enable_github'): + if not get_system_setting("enable_github"): return # Check if we have github info related to the product @@ -69,17 +69,17 @@ def close_external_issue_github(find, note, prod, eng): issue = repo.get_issue(int(g_issue.issue_id)) except: e = sys.exc_info()[0] - logger.error('cannot update finding in github: ' + e) + logger.error("cannot update finding in github: " + e) - logger.info('Will close github issue ' + g_issue.issue_id) - issue.edit(state='closed') + logger.info("Will close github issue " + g_issue.issue_id) + issue.edit(state="closed") issue.create_comment(note) def update_external_issue_github(find, prod, eng): from dojo.utils import get_system_setting - if not get_system_setting('enable_github'): + if not get_system_setting("enable_github"): return # Check if we have github info related to the product @@ -101,18 +101,18 @@ def update_external_issue_github(find, prod, eng): issue.edit(title=find.title, body=github_body(find), labels=["defectdojo", "security / " + find.severity]) except: e = sys.exc_info()[0] - logger.error('cannot update finding in github: ' + e) + logger.error("cannot update finding in github: " + e) def add_external_issue_github(find, prod, eng): from dojo.utils import get_system_setting - if not get_system_setting('enable_github'): + if not get_system_setting("enable_github"): return # Check if we have github info related to the product if GITHUB_PKey.objects.filter(product=prod).count() == 0: - logger.debug('cannot find github conf for this product') + logger.debug("cannot find github conf for this product") return github_pkey = GITHUB_PKey.objects.get(product=prod) @@ -123,30 +123,30 @@ def add_external_issue_github(find, prod, eng): github_conf = github_pkey.git_conf # We push only active and verified issues - if 'Active' in find.status() and 'Verified' in find.status(): + if "Active" in find.status() and "Verified" in find.status(): eng = Engagement.objects.get(test=find.test) prod = Product.objects.get(engagement=eng) github_product_key = GITHUB_PKey.objects.get(product=prod) - logger.info('Create issue with github profile: ' + str(github_conf) + ' on product: ' + str(github_product_key)) + logger.info("Create issue with github profile: " + str(github_conf) + " on product: " + str(github_product_key)) try: g = Github(github_conf.api_key) user = g.get_user() - logger.debug('logged in with github user: ' + user.login) - logger.debug('Look for project: ' + github_product_key.git_project) + logger.debug("logged in with github user: " + user.login) + logger.debug("Look for project: " + github_product_key.git_project) repo = g.get_repo(github_product_key.git_project) - logger.debug('Found repo: ' + str(repo.url)) + logger.debug("Found repo: " + str(repo.url)) issue = repo.create_issue(title=find.title, body=github_body(find), labels=["defectdojo", "security / " + find.severity]) - logger.debug('created issue: ' + str(issue.html_url)) + logger.debug("created issue: " + str(issue.html_url)) g_issue = GITHUB_Issue(issue_id=issue.number, issue_url=issue.html_url, finding=find) g_issue.save() except: e = sys.exc_info()[0] - logger.error('cannot create finding in github: ' + e) + logger.error("cannot create finding in github: " + e) def github_body(find): - template = 'issue-trackers/jira_full/jira-description.tpl' + template = "issue-trackers/jira_full/jira-description.tpl" kwargs = {} - kwargs['finding'] = find + kwargs["finding"] = find return render_to_string(template, kwargs) diff --git a/dojo/github_issue_link/urls.py b/dojo/github_issue_link/urls.py index e5ffebb94c..61c3fc7926 100644 --- a/dojo/github_issue_link/urls.py +++ b/dojo/github_issue_link/urls.py @@ -3,8 +3,8 @@ from . import views urlpatterns = [ - re_path(r'^github-webhook', views.webhook, name='github_web_hook'), - re_path(r'^github/add', views.new_github, name='add_github'), - re_path(r'^github/(?P\d+)/delete$', views.delete_github, - name='delete_github'), - re_path(r'^github$', views.github, name='github')] + re_path(r"^github-webhook", views.webhook, name="github_web_hook"), + re_path(r"^github/add", views.new_github, name="add_github"), + re_path(r"^github/(?P\d+)/delete$", views.delete_github, + name="delete_github"), + re_path(r"^github$", views.github, name="github")] diff --git a/dojo/github_issue_link/views.py b/dojo/github_issue_link/views.py index f575cf2d4d..22c5ae0d7f 100644 --- a/dojo/github_issue_link/views.py +++ b/dojo/github_issue_link/views.py @@ -23,78 +23,78 @@ @csrf_exempt def webhook(request): - return HttpResponse('') + return HttpResponse("") -@user_is_configuration_authorized('dojo.add_github_conf') +@user_is_configuration_authorized("dojo.add_github_conf") def new_github(request): - if request.method == 'POST': + if request.method == "POST": gform = GITHUBForm(request.POST, instance=GITHUB_Conf()) if gform.is_valid(): try: - api_key = gform.cleaned_data.get('api_key') + api_key = gform.cleaned_data.get("api_key") g = Github(api_key) user = g.get_user() - logger.debug('Using user ' + user.login) + logger.debug("Using user " + user.login) new_j = gform.save(commit=False) new_j.api_key = api_key new_j.save() messages.add_message(request, messages.SUCCESS, - 'GitHub Configuration Successfully Created.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('github')) + "GitHub Configuration Successfully Created.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("github")) except Exception as info: logger.error(info) messages.add_message(request, messages.ERROR, - 'Unable to authenticate on GitHub.', - extra_tags='alert-danger') - return HttpResponseRedirect(reverse('github')) + "Unable to authenticate on GitHub.", + extra_tags="alert-danger") + return HttpResponseRedirect(reverse("github")) else: gform = GITHUBForm() add_breadcrumb(title="New GitHub Configuration", top_level=False, request=request) - return render(request, 'dojo/new_github.html', - {'gform': gform}) + return render(request, "dojo/new_github.html", + {"gform": gform}) -@user_is_configuration_authorized('dojo.view_github_conf') +@user_is_configuration_authorized("dojo.view_github_conf") def github(request): confs = GITHUB_Conf.objects.all() add_breadcrumb(title="GitHub List", top_level=not len(request.GET), request=request) return render(request, - 'dojo/github.html', - {'confs': confs, + "dojo/github.html", + {"confs": confs, }) -@user_is_configuration_authorized('dojo.delete_github_conf') +@user_is_configuration_authorized("dojo.delete_github_conf") def delete_github(request, tid): github_instance = get_object_or_404(GITHUB_Conf, pk=tid) # eng = test.engagement # TODO Make Form form = DeleteGITHUBConfForm(instance=github_instance) - if request.method == 'POST': - if 'id' in request.POST and str(github_instance.id) == request.POST['id']: + if request.method == "POST": + if "id" in request.POST and str(github_instance.id) == request.POST["id"]: form = DeleteGITHUBConfForm(request.POST, instance=github_instance) if form.is_valid(): github_instance.delete() messages.add_message(request, messages.SUCCESS, - 'GitHub Conf and relationships removed.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('github')) + "GitHub Conf and relationships removed.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("github")) collector = NestedObjects(using=DEFAULT_DB_ALIAS) collector.collect([github_instance]) rels = collector.nested() add_breadcrumb(title="Delete", top_level=False, request=request) - return render(request, 'dojo/delete_github.html', - {'inst': github_instance, - 'form': form, - 'rels': rels, - 'deletable_objects': rels, + return render(request, "dojo/delete_github.html", + {"inst": github_instance, + "form": form, + "rels": rels, + "deletable_objects": rels, }) diff --git a/dojo/group/queries.py b/dojo/group/queries.py index db4d8d633d..a8b70e6b76 100644 --- a/dojo/group/queries.py +++ b/dojo/group/queries.py @@ -13,13 +13,13 @@ def get_authorized_groups(permission): return Dojo_Group.objects.none() if user.is_superuser: - return Dojo_Group.objects.all().order_by('name') + return Dojo_Group.objects.all().order_by("name") roles = get_roles_for_permission(permission) - authorized_roles = Dojo_Group_Member.objects.filter(group=OuterRef('pk'), + authorized_roles = Dojo_Group_Member.objects.filter(group=OuterRef("pk"), user=user, role__in=roles) - groups = Dojo_Group.objects.annotate(user=Exists(authorized_roles)).order_by('name') + groups = Dojo_Group.objects.annotate(user=Exists(authorized_roles)).order_by("name") return groups.filter(user=True) @@ -30,29 +30,29 @@ def get_authorized_group_members(permission): return Dojo_Group_Member.objects.none() if user.is_superuser: - return Dojo_Group_Member.objects.all().select_related('role') + return Dojo_Group_Member.objects.all().order_by("id").select_related("role") groups = get_authorized_groups(permission) - return Dojo_Group_Member.objects.filter(group__in=groups).select_related('role') + return Dojo_Group_Member.objects.filter(group__in=groups).order_by("id").select_related("role") def get_authorized_group_members_for_user(user): groups = get_authorized_groups(Permissions.Group_View) - group_members = Dojo_Group_Member.objects.filter(user=user, group__in=groups).order_by('group__name').select_related('role', 'group') + group_members = Dojo_Group_Member.objects.filter(user=user, group__in=groups).order_by("group__name").select_related("role", "group") return group_members def get_group_members_for_group(group): - return Dojo_Group_Member.objects.filter(group=group).select_related('role') + return Dojo_Group_Member.objects.filter(group=group).select_related("role") def get_product_groups_for_group(group): - return Product_Group.objects.filter(group=group).select_related('role') + return Product_Group.objects.filter(group=group).select_related("role") def get_product_type_groups_for_group(group): - return Product_Type_Group.objects.filter(group=group).select_related('role') + return Product_Type_Group.objects.filter(group=group).select_related("role") def get_group_member_roles(): - return Role.objects.exclude(name='API_Importer').exclude(name='Writer') + return Role.objects.exclude(name="API_Importer").exclude(name="Writer") diff --git a/dojo/group/urls.py b/dojo/group/urls.py index ddf7f03bd9..2839846dba 100644 --- a/dojo/group/urls.py +++ b/dojo/group/urls.py @@ -3,15 +3,15 @@ from dojo.group import views urlpatterns = [ - re_path(r'^group$', views.ListGroups.as_view(), name='groups'), - re_path(r'^group/add$', views.AddGroup.as_view(), name='add_group'), - re_path(r'^group/(?P\d+)$', views.ViewGroup.as_view(), name='view_group'), - re_path(r'^group/(?P\d+)/edit$', views.EditGroup.as_view(), name='edit_group'), - re_path(r'^group/(?P\d+)/delete$', views.DeleteGroup.as_view(), name='delete_group'), - re_path(r'^group/(?P\d+)/add_product_group$', views.add_product_group, name='add_product_group_group'), - re_path(r'^group/(?P\d+)/add_product_type_group$', views.add_product_type_group, name='add_product_type_group_group'), - re_path(r'^group/(?P\d+)/add_group_member$', views.add_group_member, name='add_group_member'), - re_path(r'group/member/(?P\d+)/edit_group_member$', views.edit_group_member, name='edit_group_member'), - re_path(r'group/member/(?P\d+)/delete_group_member$', views.delete_group_member, name='delete_group_member'), - re_path(r'^group/(?P\d+)/edit_permissions$', views.edit_permissions, name='edit_group_permissions'), + re_path(r"^group$", views.ListGroups.as_view(), name="groups"), + re_path(r"^group/add$", views.AddGroup.as_view(), name="add_group"), + re_path(r"^group/(?P\d+)$", views.ViewGroup.as_view(), name="view_group"), + re_path(r"^group/(?P\d+)/edit$", views.EditGroup.as_view(), name="edit_group"), + re_path(r"^group/(?P\d+)/delete$", views.DeleteGroup.as_view(), name="delete_group"), + re_path(r"^group/(?P\d+)/add_product_group$", views.add_product_group, name="add_product_group_group"), + re_path(r"^group/(?P\d+)/add_product_type_group$", views.add_product_type_group, name="add_product_type_group_group"), + re_path(r"^group/(?P\d+)/add_group_member$", views.add_group_member, name="add_group_member"), + re_path(r"group/member/(?P\d+)/edit_group_member$", views.edit_group_member, name="edit_group_member"), + re_path(r"group/member/(?P\d+)/delete_group_member$", views.delete_group_member, name="delete_group_member"), + re_path(r"^group/(?P\d+)/edit_permissions$", views.edit_permissions, name="edit_group_permissions"), ] diff --git a/dojo/group/utils.py b/dojo/group/utils.py index 09ea0e7939..1a6bc68b13 100644 --- a/dojo/group/utils.py +++ b/dojo/group/utils.py @@ -9,12 +9,12 @@ def get_auth_group_name(group, attempt=0): if attempt > 999: - msg = f'Cannot find name for authorization group for Dojo_Group {group.name}, aborted after 999 attempts.' + msg = f"Cannot find name for authorization group for Dojo_Group {group.name}, aborted after 999 attempts." raise Exception(msg) if attempt == 0: auth_group_name = group.name else: - auth_group_name = group.name + '_' + str(attempt) + auth_group_name = group.name + "_" + str(attempt) try: # Attempt to fetch an existing group before moving forward with the real operation @@ -26,8 +26,8 @@ def get_auth_group_name(group, attempt=0): @receiver(post_save, sender=Dojo_Group) def group_post_save_handler(sender, **kwargs): - created = kwargs.pop('created') - group = kwargs.pop('instance') + created = kwargs.pop("created") + group = kwargs.pop("instance") if created: # Create authentication group auth_group = Group(name=get_auth_group_name(group)) @@ -48,7 +48,7 @@ def group_post_save_handler(sender, **kwargs): @receiver(post_delete, sender=Dojo_Group) def group_post_delete_handler(sender, **kwargs): - group = kwargs.pop('instance') + group = kwargs.pop("instance") # Authorization group doesn't get deleted automatically if group.auth_group: group.auth_group.delete() @@ -56,8 +56,8 @@ def group_post_delete_handler(sender, **kwargs): @receiver(post_save, sender=Dojo_Group_Member) def group_member_post_save_handler(sender, **kwargs): - created = kwargs.pop('created') - group_member = kwargs.pop('instance') + created = kwargs.pop("created") + group_member = kwargs.pop("instance") if created: # Add user to authentication group as well if group_member.group.auth_group: @@ -66,7 +66,7 @@ def group_member_post_save_handler(sender, **kwargs): @receiver(post_delete, sender=Dojo_Group_Member) def group_member_post_delete_handler(sender, **kwargs): - group_member = kwargs.pop('instance') + group_member = kwargs.pop("instance") # Remove user from the authentication group as well if group_member.group.auth_group: group_member.group.auth_group.user_set.remove(group_member.user) diff --git a/dojo/group/views.py b/dojo/group/views.py index 1aea50fe35..2e59ffb644 100644 --- a/dojo/group/views.py +++ b/dojo/group/views.py @@ -62,7 +62,7 @@ def get_template(self): def get(self, request: HttpRequest): # quick permission check - if not user_has_configuration_permission(request.user, 'auth.view_group'): + if not user_has_configuration_permission(request.user, "auth.view_group"): raise PermissionDenied # Fetch the groups groups = self.get_groups() @@ -108,7 +108,7 @@ def get(self, request: HttpRequest, group_id: int): # Fetch the group group = self.get_group(group_id) # quick permission check - if not user_has_configuration_permission(request.user, 'auth.view_group'): + if not user_has_configuration_permission(request.user, "auth.view_group"): raise PermissionDenied user_has_permission_or_403(request.user, group, Permissions.Group_View) # Set up the initial context @@ -127,7 +127,7 @@ def get_group(self, group_id: int): def get_global_role(self, group: Dojo_Group): # Try to pull the global role from the group object - return group.global_role if hasattr(group, 'global_role') else None + return group.global_role if hasattr(group, "global_role") else None def get_group_form(self, request: HttpRequest, group: Dojo_Group): # Set up the args for the form @@ -161,12 +161,12 @@ def process_forms(self, request: HttpRequest, group: Dojo_Group, context: dict): # Validate the forms if context["form"].is_valid() and context["global_role_form"].is_valid(): # Determine if the previous global roles was changed with proper authorization - if context["global_role_form"].cleaned_data['role'] != context["previous_global_role"] and not request.user.is_superuser: + if context["global_role_form"].cleaned_data["role"] != context["previous_global_role"] and not request.user.is_superuser: messages.add_message( request, messages.WARNING, - 'Only superusers are allowed to change the global role.', - extra_tags='alert-warning') + "Only superusers are allowed to change the global role.", + extra_tags="alert-warning") else: context["form"].save() global_role = context["global_role_form"].save(commit=False) @@ -175,16 +175,16 @@ def process_forms(self, request: HttpRequest, group: Dojo_Group, context: dict): messages.add_message( request, messages.SUCCESS, - 'Group saved successfully.', - extra_tags='alert-success') + "Group saved successfully.", + extra_tags="alert-success") return request, True else: messages.add_message( request, messages.ERROR, - 'Group was not saved successfully.', - extra_tags='alert_danger') + "Group was not saved successfully.", + extra_tags="alert_danger") return request, False @@ -255,14 +255,14 @@ def process_forms(self, request: HttpRequest, group: Dojo_Group, context: dict): messages.add_message( request, messages.SUCCESS, - 'Group and relationships successfully removed.', - extra_tags='alert-success') + "Group and relationships successfully removed.", + extra_tags="alert-success") except RestrictedError as err: messages.add_message( request, messages.WARNING, - f'Group cannot be deleted: {err}', - extra_tags='alert-warning', + f"Group cannot be deleted: {err}", + extra_tags="alert-warning", ) return request, False @@ -329,12 +329,12 @@ def process_forms(self, request: HttpRequest, context: dict): group = None # Validate the forms if context["form"].is_valid() and context["global_role_form"].is_valid(): - if context["global_role_form"].cleaned_data['role'] is not None and not request.user.is_superuser: + if context["global_role_form"].cleaned_data["role"] is not None and not request.user.is_superuser: messages.add_message( request, messages.ERROR, - 'Only superusers are allowed to set global role.', - extra_tags='alert-warning') + "Only superusers are allowed to set global role.", + extra_tags="alert-warning") else: group = context["form"].save() global_role = context["global_role_form"].save(commit=False) @@ -343,15 +343,15 @@ def process_forms(self, request: HttpRequest, context: dict): messages.add_message( request, messages.SUCCESS, - 'Group was added successfully.', - extra_tags='alert-success') + "Group was added successfully.", + extra_tags="alert-success") return request, group, True else: messages.add_message( request, messages.ERROR, - 'Group was not added successfully.', - extra_tags='alert-danger') + "Group was not added successfully.", + extra_tags="alert-danger") return request, group, False @@ -360,7 +360,7 @@ def get_template(self): def get(self, request: HttpRequest): # quick permission check - if not user_has_configuration_permission(request.user, 'auth.add_group'): + if not user_has_configuration_permission(request.user, "auth.add_group"): raise PermissionDenied # Set up the initial context context = self.get_initial_context(request) @@ -371,7 +371,7 @@ def get(self, request: HttpRequest): def post(self, request: HttpRequest): # quick permission check - if not user_has_configuration_permission(request.user, 'auth.add_group'): + if not user_has_configuration_permission(request.user, "auth.add_group"): raise PermissionDenied # Set up the initial context context = self.get_initial_context(request) @@ -386,48 +386,48 @@ def post(self, request: HttpRequest): return render(request, self.get_template(), context) -@user_is_authorized(Dojo_Group, Permissions.Group_Manage_Members, 'gid') +@user_is_authorized(Dojo_Group, Permissions.Group_Manage_Members, "gid") def add_group_member(request, gid): group = get_object_or_404(Dojo_Group, id=gid) - groupform = Add_Group_MemberForm(initial={'group': group.id}) + groupform = Add_Group_MemberForm(initial={"group": group.id}) - if request.method == 'POST': - groupform = Add_Group_MemberForm(request.POST, initial={'group': group.id}) + if request.method == "POST": + groupform = Add_Group_MemberForm(request.POST, initial={"group": group.id}) if groupform.is_valid(): - if groupform.cleaned_data['role'].is_owner and not user_has_permission(request.user, group, Permissions.Group_Add_Owner): + if groupform.cleaned_data["role"].is_owner and not user_has_permission(request.user, group, Permissions.Group_Add_Owner): messages.add_message(request, messages.WARNING, - 'You are not permitted to add users as owners.', - extra_tags='alert-warning') + "You are not permitted to add users as owners.", + extra_tags="alert-warning") else: - if 'users' in groupform.cleaned_data and len(groupform.cleaned_data['users']) > 0: - for user in groupform.cleaned_data['users']: + if "users" in groupform.cleaned_data and len(groupform.cleaned_data["users"]) > 0: + for user in groupform.cleaned_data["users"]: existing_users = Dojo_Group_Member.objects.filter(group=group, user=user) if existing_users.count() == 0: group_member = Dojo_Group_Member() group_member.group = group group_member.user = user - group_member.role = groupform.cleaned_data['role'] + group_member.role = groupform.cleaned_data["role"] group_member.save() messages.add_message(request, messages.SUCCESS, - 'Group members added successfully.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_group', args=(gid, ))) + "Group members added successfully.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_group", args=(gid, ))) add_breadcrumb(title="Add Group Member", top_level=False, request=request) - return render(request, 'dojo/new_group_member.html', { - 'group': group, - 'form': groupform, + return render(request, "dojo/new_group_member.html", { + "group": group, + "form": groupform, }) -@user_is_authorized(Dojo_Group_Member, Permissions.Group_Manage_Members, 'mid') +@user_is_authorized(Dojo_Group_Member, Permissions.Group_Manage_Members, "mid") def edit_group_member(request, mid): member = get_object_or_404(Dojo_Group_Member, pk=mid) memberform = Edit_Group_MemberForm(instance=member) - if request.method == 'POST': + if request.method == "POST": memberform = Edit_Group_MemberForm(request.POST, instance=member) if memberform.is_valid(): if not member.role.is_owner: @@ -435,41 +435,41 @@ def edit_group_member(request, mid): if owners < 1: messages.add_message(request, messages.WARNING, - f'There must be at least one owner for group {member.group.name}.', - extra_tags='alert-warning') - if is_title_in_breadcrumbs('View User'): - return HttpResponseRedirect(reverse('view_user', args=(member.user.id, ))) + f"There must be at least one owner for group {member.group.name}.", + extra_tags="alert-warning") + if is_title_in_breadcrumbs("View User"): + return HttpResponseRedirect(reverse("view_user", args=(member.user.id, ))) else: - return HttpResponseRedirect(reverse('view_group', args=(member.group.id, ))) + return HttpResponseRedirect(reverse("view_group", args=(member.group.id, ))) if member.role.is_owner and not user_has_permission(request.user, member.group, Permissions.Group_Add_Owner): messages.add_message(request, messages.WARNING, - 'You are not permitted to make users owners.', - extra_tags='alert-warning') + "You are not permitted to make users owners.", + extra_tags="alert-warning") else: memberform.save() messages.add_message(request, messages.SUCCESS, - 'Group member updated successfully', - extra_tags='alert-success') - if is_title_in_breadcrumbs('View User'): - return HttpResponseRedirect(reverse('view_user', args=(member.user.id, ))) + "Group member updated successfully", + extra_tags="alert-success") + if is_title_in_breadcrumbs("View User"): + return HttpResponseRedirect(reverse("view_user", args=(member.user.id, ))) else: - return HttpResponseRedirect(reverse('view_group', args=(member.group.id, ))) + return HttpResponseRedirect(reverse("view_group", args=(member.group.id, ))) add_breadcrumb(title="Edit a Group Member", top_level=False, request=request) - return render(request, 'dojo/edit_group_member.html', { - 'memberid': mid, - 'form': memberform, + return render(request, "dojo/edit_group_member.html", { + "memberid": mid, + "form": memberform, }) -@user_is_authorized(Dojo_Group_Member, Permissions.Group_Member_Delete, 'mid') +@user_is_authorized(Dojo_Group_Member, Permissions.Group_Member_Delete, "mid") def delete_group_member(request, mid): member = get_object_or_404(Dojo_Group_Member, pk=mid) memberform = Delete_Group_MemberForm(instance=member) - if request.method == 'POST': + if request.method == "POST": memberform = Delete_Group_MemberForm(request.POST, instance=member) member = memberform.instance if member.role.is_owner: @@ -477,103 +477,103 @@ def delete_group_member(request, mid): if owners <= 1: messages.add_message(request, messages.WARNING, - f'There must be at least one owner for group {member.group.name}.', - extra_tags='alert-warning') - if is_title_in_breadcrumbs('View User'): - return HttpResponseRedirect(reverse('view_user', args=(member.user.id, ))) + f"There must be at least one owner for group {member.group.name}.", + extra_tags="alert-warning") + if is_title_in_breadcrumbs("View User"): + return HttpResponseRedirect(reverse("view_user", args=(member.user.id, ))) else: - return HttpResponseRedirect(reverse('view_group', args=(member.group.id, ))) + return HttpResponseRedirect(reverse("view_group", args=(member.group.id, ))) user = member.user member.delete() messages.add_message(request, messages.SUCCESS, - 'Group member deleted successfully.', - extra_tags='alert-success') - if is_title_in_breadcrumbs('View User'): - return HttpResponseRedirect(reverse('view_user', args=(member.user.id, ))) + "Group member deleted successfully.", + extra_tags="alert-success") + if is_title_in_breadcrumbs("View User"): + return HttpResponseRedirect(reverse("view_user", args=(member.user.id, ))) else: if user == request.user: - return HttpResponseRedirect(reverse('groups')) + return HttpResponseRedirect(reverse("groups")) else: - return HttpResponseRedirect(reverse('view_group', args=(member.group.id, ))) + return HttpResponseRedirect(reverse("view_group", args=(member.group.id, ))) add_breadcrumb("Delete a group member", top_level=False, request=request) - return render(request, 'dojo/delete_group_member.html', { - 'memberid': mid, - 'form': memberform, + return render(request, "dojo/delete_group_member.html", { + "memberid": mid, + "form": memberform, }) @user_passes_test(lambda u: u.is_superuser) def add_product_group(request, gid): group = get_object_or_404(Dojo_Group, id=gid) - group_form = Add_Product_Group_GroupForm(initial={'group': group.id}) + group_form = Add_Product_Group_GroupForm(initial={"group": group.id}) - if request.method == 'POST': - group_form = Add_Product_Group_GroupForm(request.POST, initial={'group': group.id}) + if request.method == "POST": + group_form = Add_Product_Group_GroupForm(request.POST, initial={"group": group.id}) if group_form.is_valid(): - if 'products' in group_form.cleaned_data and len(group_form.cleaned_data['products']) > 0: - for product in group_form.cleaned_data['products']: + if "products" in group_form.cleaned_data and len(group_form.cleaned_data["products"]) > 0: + for product in group_form.cleaned_data["products"]: existing_groups = Product_Group.objects.filter(product=product, group=group) if existing_groups.count() == 0: product_group = Product_Group() product_group.product = product product_group.group = group - product_group.role = group_form.cleaned_data['role'] + product_group.role = group_form.cleaned_data["role"] product_group.save() messages.add_message(request, messages.SUCCESS, - 'Product groups added successfully.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_group', args=(gid, ))) + "Product groups added successfully.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_group", args=(gid, ))) add_breadcrumb(title="Add Product Group", top_level=False, request=request) - return render(request, 'dojo/new_product_group_group.html', { - 'group': group, - 'form': group_form, + return render(request, "dojo/new_product_group_group.html", { + "group": group, + "form": group_form, }) @user_passes_test(lambda u: u.is_superuser) def add_product_type_group(request, gid): group = get_object_or_404(Dojo_Group, id=gid) - group_form = Add_Product_Type_Group_GroupForm(initial={'group': group.id}) + group_form = Add_Product_Type_Group_GroupForm(initial={"group": group.id}) - if request.method == 'POST': - group_form = Add_Product_Type_Group_GroupForm(request.POST, initial={'group': group.id}) + if request.method == "POST": + group_form = Add_Product_Type_Group_GroupForm(request.POST, initial={"group": group.id}) if group_form.is_valid(): - if 'product_types' in group_form.cleaned_data and len(group_form.cleaned_data['product_types']) > 0: - for product_type in group_form.cleaned_data['product_types']: + if "product_types" in group_form.cleaned_data and len(group_form.cleaned_data["product_types"]) > 0: + for product_type in group_form.cleaned_data["product_types"]: existing_groups = Product_Type_Group.objects.filter(product_type=product_type, group=group) if existing_groups.count() == 0: product_type_group = Product_Type_Group() product_type_group.product_type = product_type product_type_group.group = group - product_type_group.role = group_form.cleaned_data['role'] + product_type_group.role = group_form.cleaned_data["role"] product_type_group.save() messages.add_message(request, messages.SUCCESS, - 'Product type groups added successfully.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_group', args=(gid, ))) + "Product type groups added successfully.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_group", args=(gid, ))) add_breadcrumb(title="Add Product Type Group", top_level=False, request=request) - return render(request, 'dojo/new_product_type_group_group.html', { - 'group': group, - 'form': group_form, + return render(request, "dojo/new_product_type_group_group.html", { + "group": group, + "form": group_form, }) -@user_is_configuration_authorized('auth.change_permission') +@user_is_configuration_authorized("auth.change_permission") def edit_permissions(request, gid): group = get_object_or_404(Dojo_Group, id=gid) - if request.method == 'POST': + if request.method == "POST": form = ConfigurationPermissionsForm(request.POST, group=group) if form.is_valid(): form.save() messages.add_message(request, messages.SUCCESS, - 'Permissions updated.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_group', args=(gid,))) + "Permissions updated.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_group", args=(gid,))) diff --git a/dojo/home/urls.py b/dojo/home/urls.py index 70ac70c4ed..888ed33549 100644 --- a/dojo/home/urls.py +++ b/dojo/home/urls.py @@ -4,7 +4,7 @@ urlpatterns = [ # dojo home pages - re_path(r'^$', views.home, name='home'), - re_path(r'^dashboard$', views.dashboard, name='dashboard'), - re_path(r'^support$', views.support, name='support'), + re_path(r"^$", views.home, name="home"), + re_path(r"^dashboard$", views.dashboard, name="dashboard"), + re_path(r"^support$", views.support, name="support"), ] diff --git a/dojo/home/views.py b/dojo/home/views.py index b79a5bf843..cd06a98b72 100644 --- a/dojo/home/views.py +++ b/dojo/home/views.py @@ -18,7 +18,7 @@ def home(request: HttpRequest) -> HttpResponse: - return HttpResponseRedirect(reverse('dashboard')) + return HttpResponseRedirect(reverse("dashboard")) def dashboard(request: HttpRequest) -> HttpResponse: @@ -46,60 +46,60 @@ def dashboard(request: HttpRequest) -> HttpResponse: severity_count_by_month = get_severities_by_month(findings, today) punchcard, ticks = get_punchcard_data(findings, today - relativedelta(weeks=26), 26) - if user_has_configuration_permission(request.user, 'dojo.view_engagement_survey'): + if user_has_configuration_permission(request.user, "dojo.view_engagement_survey"): unassigned_surveys = Answered_Survey.objects.filter(assignee_id__isnull=True, completed__gt=0) \ .filter(Q(engagement__isnull=True) | Q(engagement__in=engagements)) else: unassigned_surveys = None add_breadcrumb(request=request, clear=True) - return render(request, 'dojo/dashboard.html', { - 'engagement_count': engagement_count, - 'finding_count': finding_count, - 'mitigated_count': mitigated_count, - 'accepted_count': accepted_count, - 'critical': severity_count_all['Critical'], - 'high': severity_count_all['High'], - 'medium': severity_count_all['Medium'], - 'low': severity_count_all['Low'], - 'info': severity_count_all['Info'], - 'by_month': severity_count_by_month, - 'punchcard': punchcard, - 'ticks': ticks, - 'surveys': unassigned_surveys, + return render(request, "dojo/dashboard.html", { + "engagement_count": engagement_count, + "finding_count": finding_count, + "mitigated_count": mitigated_count, + "accepted_count": accepted_count, + "critical": severity_count_all["Critical"], + "high": severity_count_all["High"], + "medium": severity_count_all["Medium"], + "low": severity_count_all["Low"], + "info": severity_count_all["Info"], + "by_month": severity_count_by_month, + "punchcard": punchcard, + "ticks": ticks, + "surveys": unassigned_surveys, }) def support(request: HttpRequest) -> HttpResponse: add_breadcrumb(title="Support", top_level=not len(request.GET), request=request) - return render(request, 'dojo/support.html', {}) + return render(request, "dojo/support.html", {}) def get_severities_all(findings) -> Dict[str, int]: - severities_all = findings.values('severity').annotate(count=Count('severity')).order_by() - return defaultdict(lambda: 0, {s['severity']: s['count'] for s in severities_all}) + severities_all = findings.values("severity").annotate(count=Count("severity")).order_by() + return defaultdict(lambda: 0, {s["severity"]: s["count"] for s in severities_all}) def get_severities_by_month(findings, today): severities_by_month = findings\ .filter(created__date__gte=(today - relativedelta(months=6)))\ - .values('created__year', 'created__month', 'severity')\ - .annotate(count=Count('severity'))\ + .values("created__year", "created__month", "severity")\ + .annotate(count=Count("severity"))\ .order_by() # The chart expects a, b, c, d, e instead of Critical, High, ... SEVERITY_MAP = { - 'Critical': 'a', - 'High': 'b', # noqa: E241 - 'Medium': 'c', # noqa: E241 - 'Low': 'd', # noqa: E241 - 'Info': 'e', # noqa: E241 + "Critical": "a", + "High": "b", # noqa: E241 + "Medium": "c", # noqa: E241 + "Low": "d", # noqa: E241 + "Info": "e", # noqa: E241 } results = {} for ms in severities_by_month: key = f"{ms['created__year']}-{ms['created__month']:02}" - month_stats = results.setdefault(key, {'y': key, 'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0, None: 0}) - month_stats[SEVERITY_MAP.get(ms['severity'])] += ms['count'] + month_stats = results.setdefault(key, {"y": key, "a": 0, "b": 0, "c": 0, "d": 0, "e": 0, None: 0}) + month_stats[SEVERITY_MAP.get(ms["severity"])] += ms["count"] return [v for k, v in sorted(results.items())] diff --git a/dojo/importers/auto_create_context.py b/dojo/importers/auto_create_context.py index a0c24bffa8..186109314f 100644 --- a/dojo/importers/auto_create_context.py +++ b/dojo/importers/auto_create_context.py @@ -178,7 +178,7 @@ def get_target_engagement_if_exists( If a match is not found, and a product is not supplied, return None """ if engagement := get_object_or_none(Engagement, pk=engagement_id): - logger.debug('Using existing engagement by id: %s', engagement_id) + logger.debug("Using existing engagement by id: %s", engagement_id) return engagement # if there's no product, then for sure there's no engagement either if product is None: @@ -200,7 +200,7 @@ def get_target_test_if_exists( the provided scan_type and test_title. """ if test := get_object_or_none(Test, pk=test_id): - logger.debug('Using existing Test by id: %s', test_id) + logger.debug("Using existing Test by id: %s", test_id) return test # If the engagement is not supplied, we cannot do anything if not engagement: diff --git a/dojo/importers/base_importer.py b/dojo/importers/base_importer.py index a2f4bb6794..e58c789e99 100644 --- a/dojo/importers/base_importer.py +++ b/dojo/importers/base_importer.py @@ -215,7 +215,7 @@ def parse_findings( # Attempt any preprocessing before generating findings if len(self.parsed_findings) == 0 or self.test is None: scan = self.process_scan_file(scan) - if hasattr(parser, 'get_tests'): + if hasattr(parser, "get_tests"): self.parsed_findings = self.parse_findings_dynamic_test_type(scan, parser) else: self.parsed_findings = self.parse_findings_static_test_type(scan, parser) @@ -297,7 +297,7 @@ def update_timestamps(self): # Update the target end of the engagement if it is a CI/CD engagement # If the supplied scan date is greater than the current configured # target end date on the engagement - if self.test.engagement.engagement_type == 'CI/CD': + if self.test.engagement.engagement_type == "CI/CD": self.test.engagement.target_end = max_safe( [self.scan_date.date(), self.test.engagement.target_end], ) @@ -342,15 +342,15 @@ def update_import_history( ) # Create a dictionary to stuff into the test import object import_settings = {} - import_settings['active'] = self.active - import_settings['verified'] = self.verified - import_settings['minimum_severity'] = self.minimum_severity - import_settings['close_old_findings'] = self.close_old_findings_toggle - import_settings['push_to_jira'] = self.push_to_jira - import_settings['tags'] = self.tags + import_settings["active"] = self.active + import_settings["verified"] = self.verified + import_settings["minimum_severity"] = self.minimum_severity + import_settings["close_old_findings"] = self.close_old_findings_toggle + import_settings["push_to_jira"] = self.push_to_jira + import_settings["tags"] = self.tags # Add the list of endpoints that were added exclusively at import time if len(self.endpoints_to_add) > 0: - import_settings['endpoints'] = [str(endpoint) for endpoint in self.endpoints_to_add] + import_settings["endpoints"] = [str(endpoint) for endpoint in self.endpoints_to_add] # Create the test import object test_import = Test_Import.objects.create( test=self.test, @@ -557,13 +557,13 @@ def sanitize_severity( If not, raise a ValidationError explaining as such """ # Checks around Informational/Info severity - starts_with_info = finding.severity.lower().startswith('info') - lower_none = finding.severity.lower() == 'none' - not_info = finding.severity != 'Info' + starts_with_info = finding.severity.lower().startswith("info") + lower_none = finding.severity.lower() == "none" + not_info = finding.severity != "Info" # Make the comparisons if not_info and (starts_with_info or lower_none): # Correct the severity - finding.severity = 'Info' + finding.severity = "Info" # Ensure the final severity is one of the supported options if finding.severity not in SEVERITIES: msg = ( @@ -608,7 +608,7 @@ def process_request_response_pairs( Create BurpRawRequestResponse objects linked to the finding without returning the finding afterward """ - if len(unsaved_req_resp := getattr(finding, 'unsaved_req_resp', [])) > 0: + if len(unsaved_req_resp := getattr(finding, "unsaved_req_resp", [])) > 0: for req_resp in unsaved_req_resp: burp_rr = BurpRawRequestResponse( finding=finding, @@ -643,7 +643,7 @@ def process_endpoints( self.endpoint_manager.chunk_endpoints_and_disperse(finding, finding.unsaved_endpoints) # Check for any that were added in the form if len(endpoints_to_add) > 0: - logger.debug('endpoints_to_add: %s', endpoints_to_add) + logger.debug("endpoints_to_add: %s", endpoints_to_add) self.endpoint_manager.chunk_endpoints_and_disperse(finding, endpoints_to_add) def process_vulnerability_ids( @@ -687,8 +687,8 @@ def process_files( """ if finding.unsaved_files: for unsaved_file in finding.unsaved_files: - data = base64.b64decode(unsaved_file.get('data')) - title = unsaved_file.get('title', '') + data = base64.b64decode(unsaved_file.get("data")) + title = unsaved_file.get("title", "") file_upload, _ = FileUpload.objects.get_or_create(title=title) file_upload.file.save(title, ContentFile(data)) file_upload.save() diff --git a/dojo/importers/default_importer.py b/dojo/importers/default_importer.py index 78bb761feb..73f9341ef2 100644 --- a/dojo/importers/default_importer.py +++ b/dojo/importers/default_importer.py @@ -96,7 +96,7 @@ def process_scan( - Send out notifications - Update the test progress """ - logger.debug(f'IMPORT_SCAN: parameters: {locals()}') + logger.debug(f"IMPORT_SCAN: parameters: {locals()}") # Validate the Tool_Configuration self.verify_tool_configuration_from_engagement() # Fetch the parser based upon the string version of the scan type @@ -124,7 +124,7 @@ def process_scan( closed_findings=closed_findings, ) # Send out some notifications to the user - logger.debug('IMPORT_SCAN: Generating notifications') + logger.debug("IMPORT_SCAN: Generating notifications") notifications_helper.notify_test_created(self.test) updated_count = len(new_findings) + len(closed_findings) notifications_helper.notify_scan_added( @@ -134,9 +134,9 @@ def process_scan( findings_mitigated=closed_findings, ) # Update the test progress to reflect that the import has completed - logger.debug('IMPORT_SCAN: Updating Test progress') + logger.debug("IMPORT_SCAN: Updating Test progress") self.update_test_progress() - logger.debug('IMPORT_SCAN: Done') + logger.debug("IMPORT_SCAN: Done") return self.test, 0, len(new_findings), len(closed_findings), 0, 0, test_import_history def process_findings( @@ -152,7 +152,7 @@ def process_findings( at import time """ new_findings = [] - logger.debug('starting import of %i parsed findings.', len(parsed_findings) if parsed_findings else 0) + logger.debug("starting import of %i parsed findings.", len(parsed_findings) if parsed_findings else 0) group_names_to_findings_dict = {} for unsaved_finding in parsed_findings: @@ -172,7 +172,7 @@ def process_findings( unsaved_finding.reporter = self.user unsaved_finding.last_reviewed_by = self.user unsaved_finding.last_reviewed = self.now - logger.debug('process_parsed_findings: active from report: %s, verified from report: %s', unsaved_finding.active, unsaved_finding.verified) + logger.debug("process_parsed_findings: active from report: %s, verified from report: %s", unsaved_finding.active, unsaved_finding.verified) # indicates an override. Otherwise, do not change the value of unsaved_finding.active if self.active is not None: unsaved_finding.active = self.active @@ -224,9 +224,9 @@ def process_findings( else: jira_helper.push_to_jira(findings[0]) - sync = kwargs.get('sync', True) + sync = kwargs.get("sync", True) if not sync: - return [serialize('json', [finding]) for finding in new_findings] + return [serialize("json", [finding]) for finding in new_findings] return new_findings def close_old_findings( @@ -275,7 +275,7 @@ def close_old_findings( if self.service is not None: old_findings = old_findings.filter(service=self.service) else: - old_findings = old_findings.filter(Q(service__isnull=True) | Q(service__exact='')) + old_findings = old_findings.filter(Q(service__isnull=True) | Q(service__exact="")) # Update the status of the findings and any endpoints for old_finding in old_findings: self.mitigate_finding( @@ -305,7 +305,7 @@ def parse_findings_static_test_type( """ # by default test_type == scan_type self.test = self.create_test(self.scan_type) - logger.debug('IMPORT_SCAN: Parse findings') + logger.debug("IMPORT_SCAN: Parse findings") # Use the parent method for the rest of this return super().parse_findings_static_test_type(scan, parser) @@ -319,12 +319,12 @@ def parse_findings_dynamic_test_type( by the API based parser, aggregates all findings from each test into a single test, and then renames the test is applicable """ - logger.debug('IMPORT_SCAN parser v2: Create Test and parse findings') + logger.debug("IMPORT_SCAN parser v2: Create Test and parse findings") tests = self.parse_dynamic_test_type_tests(scan, parser) parsed_findings = [] # Make sure we have at least one test returned if len(tests) == 0: - logger.info(f'No tests found in import for {self.scan_type}') + logger.info(f"No tests found in import for {self.scan_type}") self.test = None return parsed_findings # for now we only consider the first test in the list and artificially aggregate all findings of all tests @@ -350,7 +350,7 @@ def parse_findings_dynamic_test_type( if test_raw.description: self.test.description = test_raw.description self.test.save() - logger.debug('IMPORT_SCAN parser v2: Parse findings (aggregate)') + logger.debug("IMPORT_SCAN parser v2: Parse findings (aggregate)") # Aggregate all the findings and return them with the newly created test return self.parse_dynamic_test_type_findings_from_tests(tests) @@ -378,11 +378,11 @@ def async_process_findings( # So I can check on the task later results_list += [result] # After all tasks have been started, time to pull the results - logger.info('IMPORT_SCAN: Collecting Findings') + logger.info("IMPORT_SCAN: Collecting Findings") for results in results_list: serial_new_findings = results new_findings += [next(deserialize("json", finding)).object for finding in serial_new_findings] - logger.info('IMPORT_SCAN: All Findings Collected') + logger.info("IMPORT_SCAN: All Findings Collected") # Indicate that the test is not complete yet as endpoints will still be rolling in. self.test.percent_complete = 50 self.test.save() diff --git a/dojo/importers/default_reimporter.py b/dojo/importers/default_reimporter.py index ad0260f714..46676ebaf1 100644 --- a/dojo/importers/default_reimporter.py +++ b/dojo/importers/default_reimporter.py @@ -83,7 +83,7 @@ def process_scan( - Send out notifications - Update the test progress """ - logger.debug(f'REIMPORT_SCAN: parameters: {locals()}') + logger.debug(f"REIMPORT_SCAN: parameters: {locals()}") # Validate the Tool_Configuration self.verify_tool_configuration_from_test() # Fetch the parser based upon the string version of the scan type @@ -123,7 +123,7 @@ def process_scan( untouched_findings=untouched_findings, ) # Send out som notifications to the user - logger.debug('REIMPORT_SCAN: Generating notifications') + logger.debug("REIMPORT_SCAN: Generating notifications") updated_count = ( len(closed_findings) + len(reactivated_findings) + len(new_findings) ) @@ -134,9 +134,9 @@ def process_scan( findings_mitigated=closed_findings, ) # Update the test progress to reflect that the import has completed - logger.debug('REIMPORT_SCAN: Updating Test progress') + logger.debug("REIMPORT_SCAN: Updating Test progress") self.update_test_progress() - logger.debug('REIMPORT_SCAN: Done') + logger.debug("REIMPORT_SCAN: Done") return ( self.test, updated_count, @@ -368,26 +368,26 @@ def match_new_finding_to_existing_finding( """ # This code should match the logic used for deduplication out of the re-import feature. # See utils.py deduplicate_* functions - deduplicationLogger.debug('return findings bases on algorithm: %s', self.deduplication_algorithm) - if self.deduplication_algorithm == 'hash_code': + deduplicationLogger.debug("return findings bases on algorithm: %s", self.deduplication_algorithm) + if self.deduplication_algorithm == "hash_code": return Finding.objects.filter( test=self.test, hash_code=unsaved_finding.hash_code, - ).exclude(hash_code=None).order_by('id') - elif self.deduplication_algorithm == 'unique_id_from_tool': + ).exclude(hash_code=None).order_by("id") + elif self.deduplication_algorithm == "unique_id_from_tool": return Finding.objects.filter( test=self.test, unique_id_from_tool=unsaved_finding.unique_id_from_tool, - ).exclude(unique_id_from_tool=None).order_by('id') - elif self.deduplication_algorithm == 'unique_id_from_tool_or_hash_code': + ).exclude(unique_id_from_tool=None).order_by("id") + elif self.deduplication_algorithm == "unique_id_from_tool_or_hash_code": query = Finding.objects.filter( Q(test=self.test), (Q(hash_code__isnull=False) & Q(hash_code=unsaved_finding.hash_code)) | (Q(unique_id_from_tool__isnull=False) & Q(unique_id_from_tool=unsaved_finding.unique_id_from_tool)), - ).order_by('id') + ).order_by("id") deduplicationLogger.debug(query.query) return query - elif self.deduplication_algorithm == 'legacy': + elif self.deduplication_algorithm == "legacy": # This is the legacy reimport behavior. Although it's pretty flawed and doesn't match the legacy algorithm for deduplication, # this is left as is for simplicity. # Re-writing the legacy deduplication here would be complicated and counter-productive. @@ -397,7 +397,7 @@ def match_new_finding_to_existing_finding( title=unsaved_finding.title, test=self.test, severity=unsaved_finding.severity, - numerical_severity=Finding.get_numerical_severity(unsaved_finding.severity)).order_by('id') + numerical_severity=Finding.get_numerical_severity(unsaved_finding.severity)).order_by("id") else: logger.error(f'Internal error: unexpected deduplication_algorithm: "{self.deduplication_algorithm}"') return None @@ -586,7 +586,7 @@ def process_matched_active_finding( if self.verified is not None: existing_finding.verified = self.verified elif unsaved_finding.risk_accepted or unsaved_finding.false_p or unsaved_finding.out_of_scope: - logger.debug('Reimported mitigated item matches a finding that is currently open, closing.') + logger.debug("Reimported mitigated item matches a finding that is currently open, closing.") logger.debug( f"Closing: {existing_finding.id}: {existing_finding.title} " f"({existing_finding.component_name} - {existing_finding.component_version})", diff --git a/dojo/importers/endpoint_manager.py b/dojo/importers/endpoint_manager.py index 2e885168aa..2ee3e7d300 100644 --- a/dojo/importers/endpoint_manager.py +++ b/dojo/importers/endpoint_manager.py @@ -55,7 +55,7 @@ def add_endpoints_to_unsaved_finding( Endpoint_Status.objects.get_or_create( finding=finding, endpoint=ep, - defaults={'date': finding.date}) + defaults={"date": finding.date}) logger.debug(f"IMPORT_SCAN: {len(endpoints)} imported") return None diff --git a/dojo/jira_link/helper.py b/dojo/jira_link/helper.py index f2b869e55a..d7645851e3 100644 --- a/dojo/jira_link/helper.py +++ b/dojo/jira_link/helper.py @@ -46,22 +46,22 @@ logger = logging.getLogger(__name__) RESOLVED_STATUS = [ - 'Inactive', - 'Mitigated', - 'False Positive', - 'Out of Scope', - 'Duplicate', + "Inactive", + "Mitigated", + "False Positive", + "Out of Scope", + "Duplicate", ] OPEN_STATUS = [ - 'Active', - 'Verified', + "Active", + "Verified", ] def is_jira_enabled(): - if not get_system_setting('enable_jira'): - logger.debug('JIRA is disabled, not doing anything') + if not get_system_setting("enable_jira"): + logger.debug("JIRA is disabled, not doing anything") return False return True @@ -108,10 +108,10 @@ def is_push_all_issues(instance): def can_be_pushed_to_jira(obj, form=None): # logger.debug('can be pushed to JIRA: %s', finding_or_form) if not get_jira_project(obj): - return False, f'{to_str_typed(obj)} cannot be pushed to jira as there is no jira project configuration for this product.', 'error_no_jira_project' + return False, f"{to_str_typed(obj)} cannot be pushed to jira as there is no jira project configuration for this product.", "error_no_jira_project" - if not hasattr(obj, 'has_jira_issue'): - return False, f'{to_str_typed(obj)} cannot be pushed to jira as there is no jira_issue attribute.', 'error_no_jira_issue_attribute' + if not hasattr(obj, "has_jira_issue"): + return False, f"{to_str_typed(obj)} cannot be pushed to jira as there is no jira_issue attribute.", "error_no_jira_issue_attribute" if isinstance(obj, Stub_Finding): # stub findings don't have active/verified/etc and can always be pushed @@ -123,35 +123,35 @@ def can_be_pushed_to_jira(obj, form=None): if isinstance(obj, Finding): if form: - active = form['active'].value() - verified = form['verified'].value() - severity = form['severity'].value() + active = form["active"].value() + verified = form["verified"].value() + severity = form["severity"].value() else: active = obj.active verified = obj.verified severity = obj.severity - logger.debug('can_be_pushed_to_jira: %s, %s, %s', active, verified, severity) + logger.debug("can_be_pushed_to_jira: %s, %s, %s", active, verified, severity) if not active or not verified: - logger.debug('Findings must be active and verified to be pushed to JIRA') - return False, 'Findings must be active and verified to be pushed to JIRA', 'not_active_or_verified' + logger.debug("Findings must be active and verified to be pushed to JIRA") + return False, "Findings must be active and verified to be pushed to JIRA", "not_active_or_verified" jira_minimum_threshold = None if System_Settings.objects.get().jira_minimum_severity: jira_minimum_threshold = Finding.get_number_severity(System_Settings.objects.get().jira_minimum_severity) if jira_minimum_threshold and jira_minimum_threshold > Finding.get_number_severity(severity): - logger.debug(f'Finding below the minimum JIRA severity threshold ({System_Settings.objects.get().jira_minimum_severity}).') - return False, f'Finding below the minimum JIRA severity threshold ({System_Settings.objects.get().jira_minimum_severity}).', 'below_minimum_threshold' + logger.debug(f"Finding below the minimum JIRA severity threshold ({System_Settings.objects.get().jira_minimum_severity}).") + return False, f"Finding below the minimum JIRA severity threshold ({System_Settings.objects.get().jira_minimum_severity}).", "below_minimum_threshold" elif isinstance(obj, Finding_Group): if not obj.findings.all(): - return False, f'{to_str_typed(obj)} cannot be pushed to jira as it is empty.', 'error_empty' - if 'Active' not in obj.status(): - return False, f'{to_str_typed(obj)} cannot be pushed to jira as it is not active.', 'error_inactive' + return False, f"{to_str_typed(obj)} cannot be pushed to jira as it is empty.", "error_empty" + if "Active" not in obj.status(): + return False, f"{to_str_typed(obj)} cannot be pushed to jira as it is not active.", "error_inactive" else: - return False, f'{to_str_typed(obj)} cannot be pushed to jira as it is of unsupported type.', 'error_unsupported' + return False, f"{to_str_typed(obj)} cannot be pushed to jira as it is of unsupported type.", "error_unsupported" return True, None, None @@ -173,9 +173,9 @@ def get_jira_project(obj, use_inheritance=True): if obj.jira_project: return obj.jira_project # some old jira_issue records don't have a jira_project, so try to go via the finding instead - elif hasattr(obj, 'finding') and obj.finding: + elif hasattr(obj, "finding") and obj.finding: return get_jira_project(obj.finding, use_inheritance=use_inheritance) - elif hasattr(obj, 'engagement') and obj.engagement: + elif hasattr(obj, "engagement") and obj.engagement: return get_jira_project(obj.finding, use_inheritance=use_inheritance) else: return None @@ -197,16 +197,16 @@ def get_jira_project(obj, use_inheritance=True): try: jira_project = engagement.jira_project # first() doesn't work with prefetching if jira_project: - logger.debug('found jira_project %s for %s', jira_project, engagement) + logger.debug("found jira_project %s for %s", jira_project, engagement) return jira_project except JIRA_Project.DoesNotExist: pass # leave jira_project as None if use_inheritance: - logger.debug('delegating to product %s for %s', engagement.product, engagement) + logger.debug("delegating to product %s for %s", engagement.product, engagement) return get_jira_project(engagement.product) else: - logger.debug('not delegating to product %s for %s', engagement.product, engagement) + logger.debug("not delegating to product %s for %s", engagement.product, engagement) return None if isinstance(obj, Product): @@ -215,10 +215,10 @@ def get_jira_project(obj, use_inheritance=True): jira_projects = product.jira_project_set.all() # first() doesn't work with prefetching jira_project = jira_projects[0] if len(jira_projects) > 0 else None if jira_project: - logger.debug('found jira_project %s for %s', jira_project, product) + logger.debug("found jira_project %s for %s", jira_project, product) return jira_project - logger.debug('no jira_project found for %s', obj) + logger.debug("no jira_project found for %s", obj) return None @@ -228,14 +228,14 @@ def get_jira_instance(obj): jira_project = get_jira_project(obj) if jira_project: - logger.debug('found jira_instance %s for %s', jira_project.jira_instance, obj) + logger.debug("found jira_instance %s for %s", jira_project.jira_instance, obj) return jira_project.jira_instance return None def get_jira_url(obj): - logger.debug('getting jira url') + logger.debug("getting jira url") # finding + engagement issue = get_jira_issue(obj) @@ -253,35 +253,35 @@ def get_jira_url(obj): def get_jira_issue_url(issue): - logger.debug('getting jira issue url') + logger.debug("getting jira issue url") jira_project = get_jira_project(issue) jira_instance = get_jira_instance(jira_project) if jira_instance is None: return None # example http://jira.com/browser/SEC-123 - return jira_instance.url + '/browse/' + issue.jira_key + return jira_instance.url + "/browse/" + issue.jira_key def get_jira_project_url(obj): - logger.debug('getting jira project url') + logger.debug("getting jira project url") if not isinstance(obj, JIRA_Project): jira_project = get_jira_project(obj) else: jira_project = obj if jira_project: - logger.debug('getting jira project url2') + logger.debug("getting jira project url2") jira_instance = get_jira_instance(obj) if jira_project and jira_instance: - logger.debug('getting jira project url3') - return jira_project.jira_instance.url + '/browse/' + jira_project.project_key + logger.debug("getting jira project url3") + return jira_project.jira_instance.url + "/browse/" + jira_project.project_key return None def get_jira_key(obj): - if hasattr(obj, 'has_jira_issue') and obj.has_jira_issue: + if hasattr(obj, "has_jira_issue") and obj.has_jira_issue: return get_jira_issue_key(obj) if isinstance(obj, JIRA_Project): @@ -316,12 +316,12 @@ def get_jira_issue_template(obj): # fallback to default as before if not template_dir: - template_dir = 'issue-trackers/jira_full/' + template_dir = "issue-trackers/jira_full/" if isinstance(obj, Finding_Group): - return os.path.join(template_dir, 'jira-finding-group-description.tpl') + return os.path.join(template_dir, "jira-finding-group-description.tpl") else: - return os.path.join(template_dir, 'jira-description.tpl') + return os.path.join(template_dir, "jira-description.tpl") def get_jira_creation(obj): @@ -336,7 +336,7 @@ def get_jira_change(obj): if obj.has_jira_issue: return obj.jira_issue.jira_change else: - logger.debug('get_jira_change unsupported object type: %s', obj) + logger.debug("get_jira_change unsupported object type: %s", obj) return None @@ -344,7 +344,7 @@ def get_epic_name_field_name(jira_instance): if not jira_instance or not jira_instance.epic_name_id: return None - return 'customfield_' + str(jira_instance.epic_name_id) + return "customfield_" + str(jira_instance.epic_name_id) def has_jira_issue(obj): @@ -375,10 +375,10 @@ def connect_to_jira(jira_server, jira_username, jira_password): def get_jira_connect_method(): - if hasattr(settings, 'JIRA_CONNECT_METHOD'): + if hasattr(settings, "JIRA_CONNECT_METHOD"): try: import importlib - mn, _, fn = settings.JIRA_CONNECT_METHOD.rpartition('.') + mn, _, fn = settings.JIRA_CONNECT_METHOD.rpartition(".") m = importlib.import_module(mn) return getattr(m, fn) except ModuleNotFoundError: @@ -391,28 +391,28 @@ def get_jira_connection_raw(jira_server, jira_username, jira_password): connect_method = get_jira_connect_method() jira = connect_method(jira_server, jira_username, jira_password) - logger.debug('logged in to JIRA ''%s'' successfully', jira_server) + logger.debug("logged in to JIRA ""%s"" successfully", jira_server) return jira except JIRAError as e: logger.exception(e) - error_message = e.text if hasattr(e, 'text') else e.message if hasattr(e, 'message') else e.args[0] + error_message = e.text if hasattr(e, "text") else e.message if hasattr(e, "message") else e.args[0] if e.status_code in [401, 403]: - log_jira_generic_alert('JIRA Authentication Error', error_message) + log_jira_generic_alert("JIRA Authentication Error", error_message) else: - log_jira_generic_alert('Unknown JIRA Connection Error', error_message) + log_jira_generic_alert("Unknown JIRA Connection Error", error_message) - add_error_message_to_response('Unable to authenticate to JIRA. Please check the URL, username, password, captcha challenge, Network connection. Details in alert on top right. ' + str(error_message)) + add_error_message_to_response("Unable to authenticate to JIRA. Please check the URL, username, password, captcha challenge, Network connection. Details in alert on top right. " + str(error_message)) raise except requests.exceptions.RequestException as re: logger.exception(re) - error_message = re.text if hasattr(re, 'text') else re.message if hasattr(re, 'message') else re.args[0] - log_jira_generic_alert('Unknown JIRA Connection Error', re) + error_message = re.text if hasattr(re, "text") else re.message if hasattr(re, "message") else re.args[0] + log_jira_generic_alert("Unknown JIRA Connection Error", re) - add_error_message_to_response('Unable to authenticate to JIRA. Please check the URL, username, password, captcha challenge, Network connection. Details in alert on top right. ' + str(error_message)) + add_error_message_to_response("Unable to authenticate to JIRA. Please check the URL, username, password, captcha challenge, Network connection. Details in alert on top right. " + str(error_message)) raise @@ -430,11 +430,11 @@ def jira_get_resolution_id(jira, issue, status): transitions = jira.transitions(issue) resolution_id = None for t in transitions: - if t['name'] == "Resolve Issue": - resolution_id = t['id'] + if t["name"] == "Resolve Issue": + resolution_id = t["id"] break - if t['name'] == "Reopen Issue": - resolution_id = t['id'] + if t["name"] == "Reopen Issue": + resolution_id = t["id"] break return resolution_id @@ -446,14 +446,14 @@ def jira_transition(jira, issue, transition_id): jira.transition_issue(issue, transition_id) return True except JIRAError as jira_error: - logger.debug('error transitioning jira issue ' + issue.key + ' ' + str(jira_error)) + logger.debug("error transitioning jira issue " + issue.key + " " + str(jira_error)) logger.exception(jira_error) alert_text = f"JiraError HTTP {jira_error.status_code}" if jira_error.url: alert_text += f" url: {jira_error.url}" if jira_error.text: alert_text += f"\ntext: {jira_error.text}" - log_jira_generic_alert('error transitioning jira issue ' + issue.key, alert_text) + log_jira_generic_alert("error transitioning jira issue " + issue.key, alert_text) return None @@ -499,34 +499,34 @@ def get_jira_comments(finding): # Logs the error to the alerts table, which appears in the notification toolbar def log_jira_generic_alert(title, description): create_notification( - event='jira_update', + event="jira_update", title=title, description=description, - icon='bullseye', - source='JIRA') + icon="bullseye", + source="JIRA") # Logs the error to the alerts table, which appears in the notification toolbar def log_jira_alert(error, obj): create_notification( - event='jira_update', - title='Error pushing to JIRA ' + '(' + truncate_with_dots(prod_name(obj), 25) + ')', - description=to_str_typed(obj) + ', ' + error, + event="jira_update", + title="Error pushing to JIRA " + "(" + truncate_with_dots(prod_name(obj), 25) + ")", + description=to_str_typed(obj) + ", " + error, url=obj.get_absolute_url(), - icon='bullseye', - source='Push to JIRA', + icon="bullseye", + source="Push to JIRA", obj=obj) # Displays an alert for Jira notifications def log_jira_message(text, finding): create_notification( - event='jira_update', - title='Pushing to JIRA: ', + event="jira_update", + title="Pushing to JIRA: ", description=text + " Finding: " + str(finding.id), - url=reverse('view_finding', args=(finding.id, )), - icon='bullseye', - source='JIRA', finding=finding) + url=reverse("view_finding", args=(finding.id, )), + icon="bullseye", + source="JIRA", finding=finding) def get_labels(obj): @@ -572,41 +572,41 @@ def get_tags(obj): obj_tags = obj.tags.all() if obj_tags: for tag in obj_tags: - tags.append(str(tag.name.replace(' ', '-'))) + tags.append(str(tag.name.replace(" ", "-"))) if isinstance(obj, Finding_Group): for finding in obj.findings.all(): obj_tags = finding.tags.all() if obj_tags: for tag in obj_tags: if tag not in tags: - tags.append(str(tag.name.replace(' ', '-'))) + tags.append(str(tag.name.replace(" ", "-"))) return tags def jira_summary(obj): - summary = '' + summary = "" if isinstance(obj, Finding): summary = obj.title if isinstance(obj, Finding_Group): summary = obj.name - return summary.replace('\r', '').replace('\n', '')[:255] + return summary.replace("\r", "").replace("\n", "")[:255] def jira_description(obj): template = get_jira_issue_template(obj) - logger.debug('rendering description for jira from: %s', template) + logger.debug("rendering description for jira from: %s", template) kwargs = {} if isinstance(obj, Finding): - kwargs['finding'] = obj + kwargs["finding"] = obj elif isinstance(obj, Finding_Group): - kwargs['finding_group'] = obj + kwargs["finding_group"] = obj description = render_to_string(template, kwargs) - logger.debug('rendered description: %s', description) + logger.debug("rendered description: %s", description) return description @@ -626,12 +626,12 @@ def jira_environment(obj): jira_environments = [env for env in envs if env] return "\n".join(jira_environments) else: - return '' + return "" def push_to_jira(obj, *args, **kwargs): if obj is None: - msg = 'Cannot push None to JIRA' + msg = "Cannot push None to JIRA" raise ValueError(msg) if isinstance(obj, Finding): @@ -656,14 +656,14 @@ def push_to_jira(obj, *args, **kwargs): return add_jira_issue_for_finding_group(group, *args, **kwargs) else: - logger.error('unsupported object passed to push_to_jira: %s %i %s', obj.__name__, obj.id, obj) + logger.error("unsupported object passed to push_to_jira: %s %i %s", obj.__name__, obj.id, obj) def add_issues_to_epic(jira, obj, epic_id, issue_keys, ignore_epics=True): try: return jira.add_issues_to_epic(epic_id=epic_id, issue_keys=issue_keys, ignore_epics=ignore_epics) except JIRAError as e: - logger.error('error adding issues %s to epic %s for %s', issue_keys, epic_id, obj.id) + logger.error("error adding issues %s to epic %s for %s", issue_keys, epic_id, obj.id) logger.exception(e) log_jira_alert(e.text, obj) return False @@ -703,35 +703,35 @@ def prepare_jira_issue_fields( issuetype_fields=[]): fields = { - 'project': {'key': project_key}, - 'issuetype': {'name': issuetype_name}, - 'summary': summary, - 'description': description, + "project": {"key": project_key}, + "issuetype": {"name": issuetype_name}, + "summary": summary, + "description": description, } if component_name: - fields['components'] = [{'name': component_name}] + fields["components"] = [{"name": component_name}] if custom_fields: fields.update(custom_fields) - if labels and 'labels' in issuetype_fields: - fields['labels'] = labels + if labels and "labels" in issuetype_fields: + fields["labels"] = labels - if environment and 'environment' in issuetype_fields: - fields['environment'] = environment + if environment and "environment" in issuetype_fields: + fields["environment"] = environment - if priority_name and 'priority' in issuetype_fields: - fields['priority'] = {'name': priority_name} + if priority_name and "priority" in issuetype_fields: + fields["priority"] = {"name": priority_name} if epic_name_field and epic_name_field in issuetype_fields: fields[epic_name_field] = summary - if duedate and 'duedate' in issuetype_fields: - fields['duedate'] = duedate.strftime('%Y-%m-%d') + if duedate and "duedate" in issuetype_fields: + fields["duedate"] = duedate.strftime("%Y-%m-%d") if default_assignee: - fields['assignee'] = {'name': default_assignee} + fields["assignee"] = {"name": default_assignee} return fields @@ -744,13 +744,13 @@ def failure_to_add_message(message: str, exception: Exception, object: Any) -> b log_jira_alert(message, obj) return False - logger.info('trying to create a new jira issue for %d:%s', obj.id, to_str_typed(obj)) + logger.info("trying to create a new jira issue for %d:%s", obj.id, to_str_typed(obj)) if not is_jira_enabled(): return False if not is_jira_configured_and_enabled(obj): - message = f'Object {obj.id} cannot be pushed to JIRA as there is no JIRA configuration for {to_str_typed(obj)}.' + message = f"Object {obj.id} cannot be pushed to JIRA as there is no JIRA configuration for {to_str_typed(obj)}." return failure_to_add_message(message, None, obj) jira_project = get_jira_project(obj) @@ -765,7 +765,7 @@ def failure_to_add_message(message: str, exception: Exception, object: Any) -> b logger.warning("%s cannot be pushed to JIRA: %s.", to_str_typed(obj), error_message) logger.warning("The JIRA issue will NOT be created.") return False - logger.debug('Trying to create a new JIRA issue for %s...', to_str_typed(obj)) + logger.debug("Trying to create a new JIRA issue for %s...", to_str_typed(obj)) # Attempt to get the jira connection try: JIRAError.log_to_tempfile = False @@ -806,23 +806,23 @@ def failure_to_add_message(message: str, exception: Exception, object: Any) -> b return failure_to_add_message(message, e, obj) # Create a new issue in Jira with the fields set in the last step try: - logger.debug('sending fields to JIRA: %s', fields) + logger.debug("sending fields to JIRA: %s", fields) new_issue = jira.create_issue(fields) - logger.debug('saving JIRA_Issue for %s finding %s', new_issue.key, obj.id) + logger.debug("saving JIRA_Issue for %s finding %s", new_issue.key, obj.id) j_issue = JIRA_Issue(jira_id=new_issue.id, jira_key=new_issue.key, jira_project=jira_project) j_issue.set_obj(obj) j_issue.jira_creation = timezone.now() j_issue.jira_change = timezone.now() j_issue.save() jira.issue(new_issue.id) - logger.info('Created the following jira issue for %d:%s', obj.id, to_str_typed(obj)) + logger.info("Created the following jira issue for %d:%s", obj.id, to_str_typed(obj)) except Exception as e: message = f"Failed to create jira issue with the following payload: {fields} - {e}" return failure_to_add_message(message, e, obj) # Attempt to set a default assignee try: if jira_project.default_assignee: - created_assignee = str(new_issue.get_field('assignee')) + created_assignee = str(new_issue.get_field("assignee")) logger.debug("new issue created with assignee %s", created_assignee) if created_assignee != jira_project.default_assignee: jira.assign_issue(new_issue.key, jira_project.default_assignee) @@ -843,7 +843,7 @@ def failure_to_add_message(message: str, exception: Exception, object: Any) -> b try: jira_attachment( find, jira, new_issue, - settings.MEDIA_ROOT + '/' + pic) + settings.MEDIA_ROOT + "/" + pic) except FileNotFoundError as e: logger.info(e) except Exception as e: @@ -864,12 +864,12 @@ def failure_to_add_message(message: str, exception: Exception, object: Any) -> b try: if jira_project.enable_engagement_epic_mapping: eng = obj.test.engagement - logger.debug('Adding to EPIC Map: %s', eng.name) + logger.debug("Adding to EPIC Map: %s", eng.name) epic = get_jira_issue(eng) if epic: add_issues_to_epic(jira, obj, epic_id=epic.jira_id, issue_keys=[str(new_issue.id)], ignore_epics=True) else: - logger.info('The following EPIC does not exist: %s', eng.name) + logger.info("The following EPIC does not exist: %s", eng.name) except Exception as e: message = f"Failed to assign jira issue to existing epic: {e}" return failure_to_add_message(message, e, obj) @@ -903,7 +903,7 @@ def failure_to_update_message(message: str, exception: Exception, obj: Any) -> b log_jira_alert(message, obj) return False - logger.debug('trying to update a linked jira issue for %d:%s', obj.id, to_str_typed(obj)) + logger.debug("trying to update a linked jira issue for %d:%s", obj.id, to_str_typed(obj)) if not is_jira_enabled(): return False @@ -912,7 +912,7 @@ def failure_to_update_message(message: str, exception: Exception, obj: Any) -> b jira_instance = get_jira_instance(obj) if not is_jira_configured_and_enabled(obj): - message = f'Object {obj.id} cannot be pushed to JIRA as there is no JIRA configuration for {to_str_typed(obj)}.' + message = f"Object {obj.id} cannot be pushed to JIRA as there is no JIRA configuration for {to_str_typed(obj)}." return failure_to_update_message(message, None, obj) j_issue = obj.jira_issue @@ -946,10 +946,10 @@ def failure_to_update_message(message: str, exception: Exception, obj: Any) -> b return failure_to_update_message(message, e, obj) # Update the issue in jira try: - logger.debug('sending fields to JIRA: %s', fields) + logger.debug("sending fields to JIRA: %s", fields) issue.update( - summary=fields['summary'], - description=fields['description'], + summary=fields["summary"], + description=fields["description"], # Do not update the priority in jira after creation as this could have changed in jira, but should not change in dojo # priority=fields['priority'], fields=fields) @@ -977,7 +977,7 @@ def failure_to_update_message(message: str, exception: Exception, obj: Any) -> b try: jira_attachment( find, jira, issue, - settings.MEDIA_ROOT + '/' + pic) + settings.MEDIA_ROOT + "/" + pic) except FileNotFoundError as e: logger.info(e) except Exception as e: @@ -988,12 +988,12 @@ def failure_to_update_message(message: str, exception: Exception, obj: Any) -> b try: if jira_project.enable_engagement_epic_mapping: eng = find.test.engagement - logger.debug('Adding to EPIC Map: %s', eng.name) + logger.debug("Adding to EPIC Map: %s", eng.name) epic = get_jira_issue(eng) if epic: add_issues_to_epic(jira, obj, epic_id=epic.jira_id, issue_keys=[str(j_issue.jira_id)], ignore_epics=True) else: - logger.info('The following EPIC does not exist: %s', eng.name) + logger.info("The following EPIC does not exist: %s", eng.name) except Exception as e: message = f"Failed to assign jira issue to existing epic: {e}" return failure_to_update_message(message, e, obj) @@ -1002,7 +1002,7 @@ def failure_to_update_message(message: str, exception: Exception, obj: Any) -> b def get_jira_issue_from_jira(find): - logger.debug('getting jira issue from JIRA for %d:%s', find.id, find) + logger.debug("getting jira issue from JIRA for %d:%s", find.id, find) if not is_jira_enabled(): return False @@ -1021,7 +1021,7 @@ def get_jira_issue_from_jira(find): JIRAError.log_to_tempfile = False jira = get_jira_connection(jira_instance) - logger.debug('getting issue from JIRA') + logger.debug("getting issue from JIRA") issue_from_jira = jira.issue(j_issue.jira_id) return issue_from_jira @@ -1047,7 +1047,7 @@ def issue_from_jira_is_active(issue_from_jira): # or # "resolution": "None" - if not hasattr(issue_from_jira.fields, 'resolution'): + if not hasattr(issue_from_jira.fields, "resolution"): print(vars(issue_from_jira)) return True @@ -1067,19 +1067,19 @@ def push_status_to_jira(obj, jira_instance, jira, issue, save=False): # check RESOLVED_STATUS first to avoid corner cases with findings that are Inactive, but verified if any(item in status_list for item in RESOLVED_STATUS): if issue_from_jira_is_active(issue): - logger.debug('Transitioning Jira issue to Resolved') + logger.debug("Transitioning Jira issue to Resolved") updated = jira_transition(jira, issue, jira_instance.close_status_key) else: - logger.debug('Jira issue already Resolved') + logger.debug("Jira issue already Resolved") updated = False issue_closed = True if not issue_closed and any(item in status_list for item in OPEN_STATUS): if not issue_from_jira_is_active(issue): - logger.debug('Transitioning Jira issue to Active (Reopen)') + logger.debug("Transitioning Jira issue to Active (Reopen)") updated = jira_transition(jira, issue, jira_instance.open_status_key) else: - logger.debug('Jira issue already Active') + logger.debug("Jira issue already Active") updated = False if updated and save: @@ -1094,7 +1094,7 @@ def get_issuetype_fields( issuetype_name): issuetype_fields = None - use_cloud_api = jira.deploymentType.lower() == 'cloud' or jira._version < (9, 0, 0) + use_cloud_api = jira.deploymentType.lower() == "cloud" or jira._version < (9, 0, 0) try: if use_cloud_api: try: @@ -1108,13 +1108,13 @@ def get_issuetype_fields( project = None try: - project = meta['projects'][0] + project = meta["projects"][0] except Exception: msg = "Project misconfigured or no permissions in Jira ?" raise JIRAError(msg) try: - issuetype_fields = project['issuetypes'][0]['fields'].keys() + issuetype_fields = project["issuetypes"][0]["fields"].keys() except Exception: msg = "Misconfigured default issue type ?" raise JIRAError(msg) @@ -1184,7 +1184,7 @@ def jira_attachment(finding, jira, issue, file, jira_filename=None): issue=issue, attachment=attachment, filename=jira_filename) else: # read and upload a file - with open(file, 'rb') as f: + with open(file, "rb") as f: jira.add_attachment(issue=issue, attachment=f) return True except JIRAError as e: @@ -1227,9 +1227,9 @@ def close_epic(eng, push_to_jira, **kwargs): logger.warning("JIRA close epic failed: no issue found") return False - req_url = jira_instance.url + '/rest/api/latest/issue/' + \ - jissue.jira_id + '/transitions' - json_data = {'transition': {'id': jira_instance.close_status_key}} + req_url = jira_instance.url + "/rest/api/latest/issue/" + \ + jissue.jira_id + "/transitions" + json_data = {"transition": {"id": jira_instance.close_status_key}} r = requests.post( url=req_url, auth=HTTPBasicAuth(jira_instance.username, jira_instance.password), @@ -1240,10 +1240,10 @@ def close_epic(eng, push_to_jira, **kwargs): return True except JIRAError as e: logger.exception(e) - log_jira_generic_alert('Jira Engagement/Epic Close Error', str(e)) + log_jira_generic_alert("Jira Engagement/Epic Close Error", str(e)) return False else: - add_error_message_to_response('Push to JIRA for Epic skipped because enable_engagement_epic_mapping is not checked for this engagement') + add_error_message_to_response("Push to JIRA for Epic skipped because enable_engagement_epic_mapping is not checked for this engagement") return False @@ -1252,12 +1252,12 @@ def close_epic(eng, push_to_jira, **kwargs): @app.task @dojo_model_from_id(model=Engagement) def update_epic(engagement, **kwargs): - logger.debug('trying to update jira EPIC for %d:%s', engagement.id, engagement.name) + logger.debug("trying to update jira EPIC for %d:%s", engagement.id, engagement.name) if not is_jira_configured_and_enabled(engagement): return False - logger.debug('config found') + logger.debug("config found") jira_project = get_jira_project(engagement) jira_instance = get_jira_instance(engagement) @@ -1267,7 +1267,7 @@ def update_epic(engagement, **kwargs): j_issue = get_jira_issue(engagement) issue = jira.issue(j_issue.jira_id) - epic_name = kwargs.get('epic_name') + epic_name = kwargs.get("epic_name") if not epic_name: epic_name = engagement.name @@ -1275,10 +1275,10 @@ def update_epic(engagement, **kwargs): return True except JIRAError as e: logger.exception(e) - log_jira_generic_alert('Jira Engagement/Epic Update Error', str(e)) + log_jira_generic_alert("Jira Engagement/Epic Update Error", str(e)) return False else: - add_error_message_to_response('Push to JIRA for Epic skipped because enable_engagement_epic_mapping is not checked for this engagement') + add_error_message_to_response("Push to JIRA for Epic skipped because enable_engagement_epic_mapping is not checked for this engagement") return False @@ -1288,35 +1288,35 @@ def update_epic(engagement, **kwargs): @app.task @dojo_model_from_id(model=Engagement) def add_epic(engagement, **kwargs): - logger.debug('trying to create a new jira EPIC for %d:%s', engagement.id, engagement.name) + logger.debug("trying to create a new jira EPIC for %d:%s", engagement.id, engagement.name) if not is_jira_configured_and_enabled(engagement): return False - logger.debug('config found') + logger.debug("config found") jira_project = get_jira_project(engagement) jira_instance = get_jira_instance(engagement) if jira_project.enable_engagement_epic_mapping: - epic_name = kwargs.get('epic_name') + epic_name = kwargs.get("epic_name") if not epic_name: epic_name = engagement.name issue_dict = { - 'project': { - 'key': jira_project.project_key, + "project": { + "key": jira_project.project_key, }, - 'summary': epic_name, - 'description': epic_name, - 'issuetype': { - 'name': getattr(jira_project, "epic_issue_type_name", "Epic"), + "summary": epic_name, + "description": epic_name, + "issuetype": { + "name": getattr(jira_project, "epic_issue_type_name", "Epic"), }, get_epic_name_field_name(jira_instance): epic_name, } - if kwargs.get('epic_priority'): - issue_dict['priority'] = {'name': kwargs.get('epic_priority')} + if kwargs.get("epic_priority"): + issue_dict["priority"] = {"name": kwargs.get("epic_priority")} try: jira = get_jira_connection(jira_instance) - logger.debug('add_epic: %s', issue_dict) + logger.debug("add_epic: %s", issue_dict) new_issue = jira.create_issue(fields=issue_dict) j_issue = JIRA_Issue( jira_id=new_issue.id, @@ -1338,11 +1338,11 @@ def add_epic(engagement, **kwargs): message = "The 'Epic name id' in your DefectDojo Jira Configuration does not appear to be correct. Please visit, " + jira_instance.url + \ "/rest/api/2/field and search for Epic Name. Copy the number out of cf[number] and place in your DefectDojo settings for Jira and try again. For example, if your results are cf[100001] then copy 100001 and place it in 'Epic name id'. (Your Epic Id will be different.) \n\n" - log_jira_generic_alert('Jira Engagement/Epic Creation Error', + log_jira_generic_alert("Jira Engagement/Epic Creation Error", message + error) return False else: - add_error_message_to_response('Push to JIRA for Epic skipped because enable_engagement_epic_mapping is not checked for this engagement') + add_error_message_to_response("Push to JIRA for Epic skipped because enable_engagement_epic_mapping is not checked for this engagement") return False @@ -1354,9 +1354,9 @@ def jira_get_issue(jira_project, issue_key): return issue except JIRAError as jira_error: - logger.debug('error retrieving jira issue ' + issue_key + ' ' + str(jira_error)) + logger.debug("error retrieving jira issue " + issue_key + " " + str(jira_error)) logger.exception(jira_error) - log_jira_generic_alert('error retrieving jira issue ' + issue_key, str(jira_error)) + log_jira_generic_alert("error retrieving jira issue " + issue_key, str(jira_error)) return None @@ -1370,7 +1370,7 @@ def add_comment(obj, note, force_push=False, **kwargs): if not is_jira_configured_and_enabled(obj): return False - logger.debug('trying to add a comment to a linked jira issue for: %d:%s', obj.id, obj) + logger.debug("trying to add a comment to a linked jira issue for: %d:%s", obj.id, obj) if not note.private: jira_project = get_jira_project(obj) jira_instance = get_jira_instance(obj) @@ -1381,10 +1381,10 @@ def add_comment(obj, note, force_push=False, **kwargs): j_issue = obj.jira_issue jira.add_comment( j_issue.jira_id, - f'({note.author.get_full_name() if note.author.get_full_name() else note.author.username}): {note.entry}') + f"({note.author.get_full_name() if note.author.get_full_name() else note.author.username}): {note.entry}") return True except JIRAError as e: - log_jira_generic_alert('Jira Add Comment Error', str(e)) + log_jira_generic_alert("Jira Add Comment Error", str(e)) return False @@ -1397,19 +1397,19 @@ def add_simple_jira_comment(jira_instance, jira_issue, comment): ) return True except Exception as e: - log_jira_generic_alert('Jira Add Comment Error', str(e)) + log_jira_generic_alert("Jira Add Comment Error", str(e)) return False def finding_link_jira(request, finding, new_jira_issue_key): - logger.debug('linking existing jira issue %s for finding %i', new_jira_issue_key, finding.id) + logger.debug("linking existing jira issue %s for finding %i", new_jira_issue_key, finding.id) existing_jira_issue = jira_get_issue(get_jira_project(finding), new_jira_issue_key) jira_project = get_jira_project(finding) if not existing_jira_issue: - raise ValueError('JIRA issue not found or cannot be retrieved: ' + new_jira_issue_key) + raise ValueError("JIRA issue not found or cannot be retrieved: " + new_jira_issue_key) jira_issue = JIRA_Issue( jira_id=existing_jira_issue.id, @@ -1432,14 +1432,14 @@ def finding_link_jira(request, finding, new_jira_issue_key): def finding_group_link_jira(request, finding_group, new_jira_issue_key): - logger.debug('linking existing jira issue %s for finding group %i', new_jira_issue_key, finding_group.id) + logger.debug("linking existing jira issue %s for finding group %i", new_jira_issue_key, finding_group.id) existing_jira_issue = jira_get_issue(get_jira_project(finding_group), new_jira_issue_key) jira_project = get_jira_project(finding_group) if not existing_jira_issue: - raise ValueError('JIRA issue not found or cannot be retrieved: ' + new_jira_issue_key) + raise ValueError("JIRA issue not found or cannot be retrieved: " + new_jira_issue_key) jira_issue = JIRA_Issue( jira_id=existing_jira_issue.id, @@ -1466,14 +1466,14 @@ def finding_unlink_jira(request, finding): def unlink_jira(request, obj): - logger.debug('removing linked jira issue %s for %i:%s', obj.jira_issue.jira_key, obj.id, to_str_typed(obj)) + logger.debug("removing linked jira issue %s for %i:%s", obj.jira_issue.jira_key, obj.id, to_str_typed(obj)) obj.jira_issue.delete() # finding.save(push_to_jira=False, dedupe_option=False, issue_updater_option=False) # return True if no errors def process_jira_project_form(request, instance=None, target=None, product=None, engagement=None): - if not get_system_setting('enable_jira'): + if not get_system_setting("enable_jira"): return True, None error = False @@ -1482,25 +1482,25 @@ def process_jira_project_form(request, instance=None, target=None, product=None, # jform = JIRAProjectForm(request.POST, instance=instance if instance else JIRA_Project(), product=product) jform = JIRAProjectForm(request.POST, instance=instance, target=target, product=product, engagement=engagement) # logging has_changed because it sometimes doesn't do what we expect - logger.debug('jform has changed: %s', str(jform.has_changed())) + logger.debug("jform has changed: %s", str(jform.has_changed())) if jform.has_changed(): # if no data was changed, no need to do anything! - logger.debug('jform changed_data: %s', jform.changed_data) - logger.debug('jform: %s', vars(jform)) - logger.debug('request.POST: %s', request.POST) + logger.debug("jform changed_data: %s", jform.changed_data) + logger.debug("jform: %s", vars(jform)) + logger.debug("request.POST: %s", request.POST) # calling jform.is_valid() here with inheritance enabled would call clean() on the JIRA_Project model # resulting in a validation error if no jira_instance or project_key is provided # this validation is done because the form is a model form and cannot be skipped # so we check for inheritance checkbox before validating the form. # seems like it's impossible to write clean code with the Django forms framework. - if request.POST.get('jira-project-form-inherit_from_product', False): - logger.debug('inherit chosen') + if request.POST.get("jira-project-form-inherit_from_product", False): + logger.debug("inherit chosen") if not instance: - logger.debug('inheriting but no existing JIRA Project for engagement, so nothing to do') + logger.debug("inheriting but no existing JIRA Project for engagement, so nothing to do") else: error = True - msg = 'Not allowed to remove existing JIRA Config for an engagement' + msg = "Not allowed to remove existing JIRA Config for an engagement" raise ValueError(msg) elif jform.is_valid(): try: @@ -1514,14 +1514,14 @@ def process_jira_project_form(request, instance=None, target=None, product=None, obj = product if not jira_project.product_id and not jira_project.engagement_id: - msg = 'encountered JIRA_Project without product_id and without engagement_id' + msg = "encountered JIRA_Project without product_id and without engagement_id" raise ValueError(msg) # only check jira project if form is sufficiently populated if jira_project.jira_instance and jira_project.project_key: # is_jira_project_valid already adds messages if not a valid jira project if not is_jira_project_valid(jira_project): - logger.debug('unable to retrieve jira project from jira instance, invalid?!') + logger.debug("unable to retrieve jira project from jira instance, invalid?!") error = True else: logger.debug(vars(jira_project)) @@ -1533,10 +1533,10 @@ def process_jira_project_form(request, instance=None, target=None, product=None, messages.add_message(request, messages.SUCCESS, - 'JIRA Project config stored successfully.', - extra_tags='alert-success') + "JIRA Project config stored successfully.", + extra_tags="alert-success") error = False - logger.debug('stored JIRA_Project successfully') + logger.debug("stored JIRA_Project successfully") except Exception as e: error = True logger.exception(e) @@ -1547,17 +1547,17 @@ def process_jira_project_form(request, instance=None, target=None, product=None, if error: messages.add_message(request, messages.ERROR, - 'JIRA Project config not stored due to errors.', - extra_tags='alert-danger') + "JIRA Project config not stored due to errors.", + extra_tags="alert-danger") return not error, jform # return True if no errors def process_jira_epic_form(request, engagement=None): - if not get_system_setting('enable_jira'): + if not get_system_setting("enable_jira"): return True, None - logger.debug('checking jira epic form for engagement: %i:%s', engagement.id if engagement else 0, engagement) + logger.debug("checking jira epic form for engagement: %i:%s", engagement.id if engagement else 0, engagement) # push epic error = False jira_epic_form = JIRAEngagementForm(request.POST, instance=engagement) @@ -1566,33 +1566,33 @@ def process_jira_epic_form(request, engagement=None): if jira_project: if jira_epic_form.is_valid(): - if jira_epic_form.cleaned_data.get('push_to_jira'): - logger.debug('pushing engagement to JIRA') + if jira_epic_form.cleaned_data.get("push_to_jira"): + logger.debug("pushing engagement to JIRA") epic_name = engagement.name - if jira_epic_form.cleaned_data.get('epic_name'): - epic_name = jira_epic_form.cleaned_data.get('epic_name') + if jira_epic_form.cleaned_data.get("epic_name"): + epic_name = jira_epic_form.cleaned_data.get("epic_name") epic_priority = None - if jira_epic_form.cleaned_data.get('epic_priority'): - epic_priority = jira_epic_form.cleaned_data.get('epic_priority') + if jira_epic_form.cleaned_data.get("epic_priority"): + epic_priority = jira_epic_form.cleaned_data.get("epic_priority") if push_to_jira(engagement, epic_name=epic_name, epic_priority=epic_priority): - logger.debug('Push to JIRA for Epic queued successfully') + logger.debug("Push to JIRA for Epic queued successfully") messages.add_message( request, messages.SUCCESS, - 'Push to JIRA for Epic queued succesfully, check alerts on the top right for errors', - extra_tags='alert-success') + "Push to JIRA for Epic queued succesfully, check alerts on the top right for errors", + extra_tags="alert-success") else: error = True - logger.debug('Push to JIRA for Epic failey') + logger.debug("Push to JIRA for Epic failey") messages.add_message( request, messages.ERROR, - 'Push to JIRA for Epic failed, check alerts on the top right for errors', - extra_tags='alert-danger') + "Push to JIRA for Epic failed, check alerts on the top right for errors", + extra_tags="alert-danger") else: - logger.debug('invalid jira epic form') + logger.debug("invalid jira epic form") else: - logger.debug('no jira_project for this engagement, skipping epic push') + logger.debug("no jira_project for this engagement, skipping epic push") return not error, jira_epic_form @@ -1600,7 +1600,7 @@ def process_jira_epic_form(request, engagement=None): # [name|url]. if name contains a '|' is will break it # so [%s|%s] % (escape_for_jira(name), url) def escape_for_jira(text): - return text.replace('|', '%7D') + return text.replace("|", "%7D") def process_resolution_from_jira(finding, resolution_id, resolution_name, assignee_name, jira_now, jira_issue) -> bool: @@ -1642,7 +1642,7 @@ def process_resolution_from_jira(finding, resolution_id, resolution_name, assign finding.active = False finding.mitigated = jira_now finding.is_mitigated = True - finding.mitigated_by, _created = User.objects.get_or_create(username='JIRA') + finding.mitigated_by, _created = User.objects.get_or_create(username="JIRA") finding.endpoints.clear() finding.false_p = False ra_helper.risk_unaccept(finding) diff --git a/dojo/jira_link/queries.py b/dojo/jira_link/queries.py index 4b9d9c09b7..6d41b3b6e2 100644 --- a/dojo/jira_link/queries.py +++ b/dojo/jira_link/queries.py @@ -13,7 +13,7 @@ def get_authorized_jira_projects(permission, user=None): if user is None: return JIRA_Project.objects.none() - jira_projects = JIRA_Project.objects.all() + jira_projects = JIRA_Project.objects.all().order_by("id") if user.is_superuser: return jira_projects @@ -23,35 +23,35 @@ def get_authorized_jira_projects(permission, user=None): roles = get_roles_for_permission(permission) engagement_authorized_product_type_roles = Product_Type_Member.objects.filter( - product_type=OuterRef('engagement__product__prod_type_id'), + product_type=OuterRef("engagement__product__prod_type_id"), user=user, role__in=roles) engagement_authorized_product_roles = Product_Member.objects.filter( - product=OuterRef('engagement__product_id'), + product=OuterRef("engagement__product_id"), user=user, role__in=roles) engagement_authorized_product_type_groups = Product_Type_Group.objects.filter( - product_type=OuterRef('engagement__product__prod_type_id'), + product_type=OuterRef("engagement__product__prod_type_id"), group__users=user, role__in=roles) engagement_authorized_product_groups = Product_Group.objects.filter( - product=OuterRef('engagement__product_id'), + product=OuterRef("engagement__product_id"), group__users=user, role__in=roles) product_authorized_product_type_roles = Product_Type_Member.objects.filter( - product_type=OuterRef('product__prod_type_id'), + product_type=OuterRef("product__prod_type_id"), user=user, role__in=roles) product_authorized_product_roles = Product_Member.objects.filter( - product=OuterRef('product_id'), + product=OuterRef("product_id"), user=user, role__in=roles) product_authorized_product_type_groups = Product_Type_Group.objects.filter( - product_type=OuterRef('product__prod_type_id'), + product_type=OuterRef("product__prod_type_id"), group__users=user, role__in=roles) product_authorized_product_groups = Product_Group.objects.filter( - product=OuterRef('product_id'), + product=OuterRef("product_id"), group__users=user, role__in=roles) jira_projects = jira_projects.annotate( @@ -82,7 +82,7 @@ def get_authorized_jira_issues(permission): if user is None: return JIRA_Issue.objects.none() - jira_issues = JIRA_Issue.objects.all() + jira_issues = JIRA_Issue.objects.all().order_by("id") if user.is_superuser: return jira_issues @@ -92,51 +92,51 @@ def get_authorized_jira_issues(permission): roles = get_roles_for_permission(permission) engagement_authorized_product_type_roles = Product_Type_Member.objects.filter( - product_type=OuterRef('engagement__product__prod_type_id'), + product_type=OuterRef("engagement__product__prod_type_id"), user=user, role__in=roles) engagement_authorized_product_roles = Product_Member.objects.filter( - product=OuterRef('engagement__product_id'), + product=OuterRef("engagement__product_id"), user=user, role__in=roles) engagement_authorized_product_type_groups = Product_Type_Group.objects.filter( - product_type=OuterRef('engagement__product__prod_type_id'), + product_type=OuterRef("engagement__product__prod_type_id"), group__users=user, role__in=roles) engagement_authorized_product_groups = Product_Group.objects.filter( - product=OuterRef('engagement__product_id'), + product=OuterRef("engagement__product_id"), group__users=user, role__in=roles) finding_group_authorized_product_type_roles = Product_Type_Member.objects.filter( - product_type=OuterRef('finding_group__test__engagement__product__prod_type_id'), + product_type=OuterRef("finding_group__test__engagement__product__prod_type_id"), user=user, role__in=roles) finding_group_authorized_product_roles = Product_Member.objects.filter( - product=OuterRef('finding_group__test__engagement__product_id'), + product=OuterRef("finding_group__test__engagement__product_id"), user=user, role__in=roles) finding_group_authorized_product_type_groups = Product_Type_Group.objects.filter( - product_type=OuterRef('finding_group__test__engagement__product__prod_type_id'), + product_type=OuterRef("finding_group__test__engagement__product__prod_type_id"), group__users=user, role__in=roles) finding_group_authorized_product_groups = Product_Group.objects.filter( - product=OuterRef('finding_group__test__engagement__product_id'), + product=OuterRef("finding_group__test__engagement__product_id"), group__users=user, role__in=roles) finding_authorized_product_type_roles = Product_Type_Member.objects.filter( - product_type=OuterRef('finding__test__engagement__product__prod_type_id'), + product_type=OuterRef("finding__test__engagement__product__prod_type_id"), user=user, role__in=roles) finding_authorized_product_roles = Product_Member.objects.filter( - product=OuterRef('finding__test__engagement__product_id'), + product=OuterRef("finding__test__engagement__product_id"), user=user, role__in=roles) finding_authorized_product_type_groups = Product_Type_Group.objects.filter( - product_type=OuterRef('finding__test__engagement__product__prod_type_id'), + product_type=OuterRef("finding__test__engagement__product__prod_type_id"), group__users=user, role__in=roles) finding_authorized_product_groups = Product_Group.objects.filter( - product=OuterRef('finding__test__engagement__product_id'), + product=OuterRef("finding__test__engagement__product_id"), group__users=user, role__in=roles) jira_issues = jira_issues.annotate( diff --git a/dojo/jira_link/urls.py b/dojo/jira_link/urls.py index 40eef551f6..84abc6faef 100644 --- a/dojo/jira_link/urls.py +++ b/dojo/jira_link/urls.py @@ -3,12 +3,12 @@ from . import views urlpatterns = [ - re_path(r'^webhook/(?P[\w-]+)$', views.webhook, name='web_hook_secret'), - re_path(r'^webhook/', views.webhook, name='web_hook'), - re_path(r'^jira/webhook/(?P[\w-]+)$', views.webhook, name='jira_web_hook_secret'), - re_path(r'^jira/webhook/', views.webhook, name='jira_web_hook'), - re_path(r'^jira/add', views.NewJiraView.as_view(), name='add_jira'), - re_path(r'^jira/(?P\d+)/edit$', views.EditJiraView.as_view(), name='edit_jira'), - re_path(r'^jira/(?P\d+)/delete$', views.DeleteJiraView.as_view(), name='delete_jira'), - re_path(r'^jira$', views.ListJiraView.as_view(), name='jira'), - re_path(r'^jira/express', views.ExpressJiraView.as_view(), name='express_jira')] + re_path(r"^webhook/(?P[\w-]+)$", views.webhook, name="web_hook_secret"), + re_path(r"^webhook/", views.webhook, name="web_hook"), + re_path(r"^jira/webhook/(?P[\w-]+)$", views.webhook, name="jira_web_hook_secret"), + re_path(r"^jira/webhook/", views.webhook, name="jira_web_hook"), + re_path(r"^jira/add", views.NewJiraView.as_view(), name="add_jira"), + re_path(r"^jira/(?P\d+)/edit$", views.EditJiraView.as_view(), name="edit_jira"), + re_path(r"^jira/(?P\d+)/delete$", views.DeleteJiraView.as_view(), name="delete_jira"), + re_path(r"^jira$", views.ListJiraView.as_view(), name="jira"), + re_path(r"^jira/express", views.ExpressJiraView.as_view(), name="express_jira")] diff --git a/dojo/jira_link/views.py b/dojo/jira_link/views.py index 80065f78ad..8b50993542 100644 --- a/dojo/jira_link/views.py +++ b/dojo/jira_link/views.py @@ -85,12 +85,12 @@ def webhook(request, secret=None): try: parsed = json.loads(request.body.decode("utf-8")) # Check if the events supplied are supported - if parsed.get('webhookEvent') not in ['comment_created', 'jira:issue_updated']: + if parsed.get("webhookEvent") not in ["comment_created", "jira:issue_updated"]: return webhook_responser_handler("info", f"Unrecognized JIRA webhook event received: {parsed.get('webhookEvent')}") - if parsed.get('webhookEvent') == 'jira:issue_updated': + if parsed.get("webhookEvent") == "jira:issue_updated": # xml examples at the end of file - jid = parsed['issue']['id'] + jid = parsed["issue"]["id"] # This may raise a 404, but it will be handled in the exception response try: jissue = JIRA_Issue.objects.get(jira_id=jid) @@ -109,11 +109,11 @@ def webhook(request, secret=None): else: return webhook_responser_handler("info", f"Received issue update for {jissue.jira_key} for unknown object") # Process the assignee if present - assignee = parsed['issue']['fields'].get('assignee') - assignee_name = 'Jira User' + assignee = parsed["issue"]["fields"].get("assignee") + assignee_name = "Jira User" if assignee is not None: # First look for the 'name' field. If not present, try 'displayName'. Else put None - assignee_name = assignee.get('name', assignee.get('displayName')) + assignee_name = assignee.get("name", assignee.get("displayName")) # "resolution":{ # "self":"http://www.testjira.com/rest/api/2/resolution/11", @@ -128,11 +128,11 @@ def webhook(request, secret=None): # or # "resolution": "None" - resolution = parsed['issue']['fields']['resolution'] + resolution = parsed["issue"]["fields"]["resolution"] resolution = resolution if resolution and resolution != "None" else None - resolution_id = resolution['id'] if resolution else None - resolution_name = resolution['name'] if resolution else None - jira_now = parse_datetime(parsed['issue']['fields']['updated']) + resolution_id = resolution["id"] if resolution else None + resolution_name = resolution["name"] if resolution else None + jira_now = parse_datetime(parsed["issue"]["fields"]["updated"]) if findings: for finding in findings: @@ -141,7 +141,7 @@ def webhook(request, secret=None): if (error_response := check_for_and_create_comment(parsed)) is not None: return error_response - if parsed.get('webhookEvent') == 'comment_created': + if parsed.get("webhookEvent") == "comment_created": if (error_response := check_for_and_create_comment(parsed)) is not None: return error_response @@ -212,25 +212,25 @@ def check_for_and_create_comment(parsed_json): if comment is None: return - comment_text = comment.get('body') - commenter = '' - if 'name' in comment.get('updateAuthor'): - commenter = comment.get('updateAuthor', {}).get('name') - elif 'emailAddress' in comment.get('updateAuthor'): - commenter = comment.get('updateAuthor', {}).get('emailAddress') + comment_text = comment.get("body") + commenter = "" + if "name" in comment.get("updateAuthor"): + commenter = comment.get("updateAuthor", {}).get("name") + elif "emailAddress" in comment.get("updateAuthor"): + commenter = comment.get("updateAuthor", {}).get("emailAddress") else: - logger.debug('Could not find the author of this jira comment!') - commenter_display_name = comment.get('updateAuthor', {}).get('displayName') + logger.debug("Could not find the author of this jira comment!") + commenter_display_name = comment.get("updateAuthor", {}).get("displayName") # example: body['comment']['self'] = "http://www.testjira.com/jira_under_a_path/rest/api/2/issue/666/comment/456843" - jid = comment.get('self', '').split('/')[-3] + jid = comment.get("self", "").split("/")[-3] try: jissue = JIRA_Issue.objects.get(jira_id=jid) except JIRA_Instance.DoesNotExist: return webhook_responser_handler("info", f"JIRA issue {jid} is not linked to a DefectDojo Finding") logging.debug(f"Received issue comment for {jissue.jira_key}") - logger.debug('jissue: %s', vars(jissue)) + logger.debug("jissue: %s", vars(jissue)) - jira_usernames = JIRA_Instance.objects.values_list('username', flat=True) + jira_usernames = JIRA_Instance.objects.values_list("username", flat=True) for jira_user_id in jira_usernames: # logger.debug('incoming username: %s jira config username: %s', commenter.lower(), jira_user_id.lower()) if jira_user_id.lower() == commenter.lower(): @@ -239,19 +239,19 @@ def check_for_and_create_comment(parsed_json): findings = None if jissue.finding: findings = [jissue.finding] - create_notification(event='jira_comment', title=f'JIRA incoming comment - {jissue.finding}', finding=jissue.finding, url=reverse("view_finding", args=(jissue.finding.id,)), icon='check') + create_notification(event="jira_comment", title=f"JIRA incoming comment - {jissue.finding}", finding=jissue.finding, url=reverse("view_finding", args=(jissue.finding.id,)), icon="check") elif jissue.finding_group: findings = jissue.finding_group.findings.all() first_finding_group = findings.first() if first_finding_group: - create_notification(event='jira_comment', title=f'JIRA incoming comment - {jissue.finding_group}', finding=first_finding_group, url=reverse("view_finding_group", args=(jissue.finding_group.id,)), icon='check') + create_notification(event="jira_comment", title=f"JIRA incoming comment - {jissue.finding_group}", finding=first_finding_group, url=reverse("view_finding_group", args=(jissue.finding_group.id,)), icon="check") elif jissue.engagement: return webhook_responser_handler("debug", "Comment for engagement ignored") else: return webhook_responser_handler("info", f"Received issue update for {jissue.jira_key} for unknown object") # Set the fields for the notes - author, _ = User.objects.get_or_create(username='JIRA') - entry = f'({commenter_display_name} ({commenter})): {comment_text}' + author, _ = User.objects.get_or_create(username="JIRA") + entry = f"({commenter_display_name} ({commenter})): {comment_text}" # Iterate (potentially) over each of the findings the note should be added to for finding in findings: # Determine if this exact note was created within the last 30 seconds to avoid duplicate notes @@ -273,11 +273,11 @@ def check_for_and_create_comment(parsed_json): def get_custom_field(jira, label): - url = jira._options["server"].strip('/') + '/rest/api/2/field' + url = jira._options["server"].strip("/") + "/rest/api/2/field" response = jira._session.get(url).json() for node in response: - if label in node['clauseNames']: - field = int(node['schema']['customId']) + if label in node["clauseNames"]: + field = int(node["schema"]["customId"]) break return field @@ -285,10 +285,10 @@ def get_custom_field(jira, label): class ExpressJiraView(View): def get_template(self): - return 'dojo/express_new_jira.html' + return "dojo/express_new_jira.html" def get_fallback_template(self): - return 'dojo/new_jira.html' + return "dojo/new_jira.html" def get_form_class(self): return ExpressJIRAForm @@ -297,20 +297,20 @@ def get_fallback_form_class(self): return JIRAForm def get(self, request): - if not user_has_configuration_permission(request.user, 'dojo.add_jira_instance'): + if not user_has_configuration_permission(request.user, "dojo.add_jira_instance"): raise PermissionDenied jform = self.get_form_class()() add_breadcrumb(title="New Jira Configuration (Express)", top_level=False, request=request) - return render(request, self.get_template(), {'jform': jform}) + return render(request, self.get_template(), {"jform": jform}) def post(self, request): - if not user_has_configuration_permission(request.user, 'dojo.add_jira_instance'): + if not user_has_configuration_permission(request.user, "dojo.add_jira_instance"): raise PermissionDenied jform = self.get_form_class()(request.POST, instance=JIRA_Instance()) if jform.is_valid(): - jira_server = jform.cleaned_data.get('url').rstrip('/') - jira_username = jform.cleaned_data.get('username') - jira_password = jform.cleaned_data.get('password') + jira_server = jform.cleaned_data.get("url").rstrip("/") + jira_username = jform.cleaned_data.get("username") + jira_password = jform.cleaned_data.get("password") try: jira = jira_helper.get_jira_connection_raw(jira_server, jira_username, jira_password) @@ -319,100 +319,100 @@ def post(self, request): messages.add_message( request, messages.ERROR, - 'Unable to authenticate. Please check credentials.', - extra_tags='alert-danger') - return render(request, self.get_template(), {'jform': jform}) + "Unable to authenticate. Please check credentials.", + extra_tags="alert-danger") + return render(request, self.get_template(), {"jform": jform}) # authentication successful # Get the open and close keys try: - issue_id = jform.cleaned_data.get('issue_key') - key_url = jira_server.strip('/') + '/rest/api/latest/issue/' + issue_id + '/transitions?expand=transitions.fields' + issue_id = jform.cleaned_data.get("issue_key") + key_url = jira_server.strip("/") + "/rest/api/latest/issue/" + issue_id + "/transitions?expand=transitions.fields" response = jira._session.get(key_url).json() - logger.debug('Retrieved JIRA issue successfully') + logger.debug("Retrieved JIRA issue successfully") open_key = close_key = None - for node in response['transitions']: - if node['to']['statusCategory']['name'] == 'To Do': - open_key = open_key or int(node['id']) - if node['to']['statusCategory']['name'] == 'Done': - close_key = close_key or int(node['id']) + for node in response["transitions"]: + if node["to"]["statusCategory"]["name"] == "To Do": + open_key = open_key or int(node["id"]) + if node["to"]["statusCategory"]["name"] == "Done": + close_key = close_key or int(node["id"]) except Exception as e: logger.exception(e) # already logged in jira_helper messages.add_message( request, messages.ERROR, - 'Unable to find Open/Close ID\'s (invalid issue key specified?). They will need to be found manually', - extra_tags='alert-danger') + "Unable to find Open/Close ID's (invalid issue key specified?). They will need to be found manually", + extra_tags="alert-danger") fallback_form = self.get_fallback_form_class()(request.POST, instance=JIRA_Instance()) - return render(request, self.get_fallback_template(), {'jform': fallback_form}) + return render(request, self.get_fallback_template(), {"jform": fallback_form}) # Get the epic id name try: - epic_name = get_custom_field(jira, 'Epic Name') + epic_name = get_custom_field(jira, "Epic Name") except Exception as e: logger.exception(e) # already logged in jira_helper messages.add_message( request, messages.ERROR, - 'Unable to find Epic Name. It will need to be found manually', - extra_tags='alert-danger') + "Unable to find Epic Name. It will need to be found manually", + extra_tags="alert-danger") fallback_form = self.get_fallback_form_class()(request.POST, instance=JIRA_Instance()) - return render(request, self.get_fallback_template(), {'jform': fallback_form}) + return render(request, self.get_fallback_template(), {"jform": fallback_form}) jira_instance = JIRA_Instance( username=jira_username, password=jira_password, url=jira_server, - configuration_name=jform.cleaned_data.get('configuration_name'), - info_mapping_severity='Lowest', - low_mapping_severity='Low', - medium_mapping_severity='Medium', - high_mapping_severity='High', - critical_mapping_severity='Highest', + configuration_name=jform.cleaned_data.get("configuration_name"), + info_mapping_severity="Lowest", + low_mapping_severity="Low", + medium_mapping_severity="Medium", + high_mapping_severity="High", + critical_mapping_severity="Highest", epic_name_id=epic_name, open_status_key=open_key, close_status_key=close_key, - finding_text='', - default_issue_type=jform.cleaned_data.get('default_issue_type'), - finding_jira_sync=jform.cleaned_data.get('finding_jira_sync')) + finding_text="", + default_issue_type=jform.cleaned_data.get("default_issue_type"), + finding_jira_sync=jform.cleaned_data.get("finding_jira_sync")) jira_instance.save() messages.add_message( request, messages.SUCCESS, - 'JIRA Configuration Successfully Created.', - extra_tags='alert-success') + "JIRA Configuration Successfully Created.", + extra_tags="alert-success") create_notification( - event='jira_config_added', + event="jira_config_added", title=f"New addition of JIRA: {jform.cleaned_data.get('configuration_name')}", description=f"JIRA \"{jform.cleaned_data.get('configuration_name')}\" was added by {request.user}", - url=request.build_absolute_uri(reverse('jira'))) + url=request.build_absolute_uri(reverse("jira"))) - return HttpResponseRedirect(reverse('jira')) - return render(request, self.get_template(), {'jform': jform}) + return HttpResponseRedirect(reverse("jira")) + return render(request, self.get_template(), {"jform": jform}) class NewJiraView(View): def get_template(self): - return 'dojo/new_jira.html' + return "dojo/new_jira.html" def get_form_class(self): return JIRAForm def get(self, request): - if not user_has_configuration_permission(request.user, 'dojo.add_jira_instance'): + if not user_has_configuration_permission(request.user, "dojo.add_jira_instance"): raise PermissionDenied jform = self.get_form_class()() add_breadcrumb(title="New Jira Configuration", top_level=False, request=request) - return render(request, self.get_template(), {'jform': jform}) + return render(request, self.get_template(), {"jform": jform}) def post(self, request): - if not user_has_configuration_permission(request.user, 'dojo.add_jira_instance'): + if not user_has_configuration_permission(request.user, "dojo.add_jira_instance"): raise PermissionDenied jform = self.get_form_class()(request.POST, instance=JIRA_Instance()) if jform.is_valid(): - jira_server = jform.cleaned_data.get('url').rstrip('/') - jira_username = jform.cleaned_data.get('username') - jira_password = jform.cleaned_data.get('password') + jira_server = jform.cleaned_data.get("url").rstrip("/") + jira_username = jform.cleaned_data.get("username") + jira_password = jform.cleaned_data.get("password") - logger.debug('calling get_jira_connection_raw') + logger.debug("calling get_jira_connection_raw") # Make sure the connection can be completed jira_helper.get_jira_connection_raw(jira_server, jira_username, jira_password) @@ -422,47 +422,47 @@ def post(self, request): messages.add_message( request, messages.SUCCESS, - 'JIRA Configuration Successfully Created.', - extra_tags='alert-success') + "JIRA Configuration Successfully Created.", + extra_tags="alert-success") create_notification( - event='jira_config_added', + event="jira_config_added", title=f"New addition of JIRA: {jform.cleaned_data.get('configuration_name')}", description=f"JIRA \"{jform.cleaned_data.get('configuration_name')}\" was added by {request.user}", - url=request.build_absolute_uri(reverse('jira'))) + url=request.build_absolute_uri(reverse("jira"))) - return HttpResponseRedirect(reverse('jira')) + return HttpResponseRedirect(reverse("jira")) else: - logger.error('jform.errors: %s', jform.errors) - return render(request, self.get_template(), {'jform': jform}) + logger.error("jform.errors: %s", jform.errors) + return render(request, self.get_template(), {"jform": jform}) class EditJiraView(View): def get_template(self): - return 'dojo/edit_jira.html' + return "dojo/edit_jira.html" def get_form_class(self): return JIRAForm def get(self, request, jid=None): - if not user_has_configuration_permission(request.user, 'dojo.change_jira_instance'): + if not user_has_configuration_permission(request.user, "dojo.change_jira_instance"): raise PermissionDenied jira = JIRA_Instance.objects.get(pk=jid) jform = self.get_form_class()(instance=jira) add_breadcrumb(title="Edit JIRA Configuration", top_level=False, request=request) - return render(request, self.get_template(), {'jform': jform}) + return render(request, self.get_template(), {"jform": jform}) def post(self, request, jid=None): - if not user_has_configuration_permission(request.user, 'dojo.change_jira_instance'): + if not user_has_configuration_permission(request.user, "dojo.change_jira_instance"): raise PermissionDenied jira = JIRA_Instance.objects.get(pk=jid) jira_password_from_db = jira.password jform = self.get_form_class()(request.POST, instance=jira) if jform.is_valid(): - jira_server = jform.cleaned_data.get('url').rstrip('/') - jira_username = jform.cleaned_data.get('username') + jira_server = jform.cleaned_data.get("url").rstrip("/") + jira_username = jform.cleaned_data.get("username") - if jform.cleaned_data.get('password'): - jira_password = jform.cleaned_data.get('password') + if jform.cleaned_data.get("password"): + jira_password = jform.cleaned_data.get("password") else: # on edit the password is optional jira_password = jira_password_from_db @@ -477,41 +477,41 @@ def post(self, request, jid=None): messages.add_message( request, messages.SUCCESS, - 'JIRA Configuration Successfully Saved.', - extra_tags='alert-success') + "JIRA Configuration Successfully Saved.", + extra_tags="alert-success") create_notification( - event='jira_config_edited', + event="jira_config_edited", title=f"Edit of JIRA: {jform.cleaned_data.get('configuration_name')}", description=f"JIRA \"{jform.cleaned_data.get('configuration_name')}\" was edited by {request.user}", - url=request.build_absolute_uri(reverse('jira'))) + url=request.build_absolute_uri(reverse("jira"))) - return HttpResponseRedirect(reverse('jira')) + return HttpResponseRedirect(reverse("jira")) - return render(request, self.get_template(), {'jform': jform}) + return render(request, self.get_template(), {"jform": jform}) class ListJiraView(View): def get_template(self): - return 'dojo/jira.html' + return "dojo/jira.html" def get(self, request): - if not user_has_configuration_permission(request.user, 'dojo.view_jira_instance'): + if not user_has_configuration_permission(request.user, "dojo.view_jira_instance"): raise PermissionDenied jira_instances = JIRA_Instance.objects.all() - context = {'jira_instances': jira_instances} + context = {"jira_instances": jira_instances} add_breadcrumb(title="JIRA List", top_level=not len(request.GET), request=request) return render(request, self.get_template(), context) class DeleteJiraView(View): def get_template(self): - return 'dojo/delete_jira.html' + return "dojo/delete_jira.html" def get_form_class(self): return DeleteJIRAInstanceForm def get(self, request, tid=None): - if not user_has_configuration_permission(request.user, 'dojo.delete_jira_instance'): + if not user_has_configuration_permission(request.user, "dojo.delete_jira_instance"): raise PermissionDenied jira_instance = get_object_or_404(JIRA_Instance, pk=tid) form = self.get_form_class()(instance=jira_instance) @@ -521,17 +521,17 @@ def get(self, request, tid=None): add_breadcrumb(title="Delete", top_level=False, request=request) return render(request, self.get_template(), { - 'inst': jira_instance, - 'form': form, - 'rels': rels, - 'deletable_objects': rels, + "inst": jira_instance, + "form": form, + "rels": rels, + "deletable_objects": rels, }) def post(self, request, tid=None): - if not user_has_configuration_permission(request.user, 'dojo.delete_jira_instance'): + if not user_has_configuration_permission(request.user, "dojo.delete_jira_instance"): raise PermissionDenied jira_instance = get_object_or_404(JIRA_Instance, pk=tid) - if 'id' in request.POST and str(jira_instance.id) == request.POST['id']: + if "id" in request.POST and str(jira_instance.id) == request.POST["id"]: form = self.get_form_class()(request.POST, instance=jira_instance) if form.is_valid(): try: @@ -539,24 +539,24 @@ def post(self, request, tid=None): messages.add_message( request, messages.SUCCESS, - 'JIRA Conf and relationships removed.', - extra_tags='alert-success') + "JIRA Conf and relationships removed.", + extra_tags="alert-success") create_notification( - event='jira_config_deleted', - title=_('Deletion of JIRA: %s') % jira_instance.configuration_name, + event="jira_config_deleted", + title=_("Deletion of JIRA: %s") % jira_instance.configuration_name, description=f'JIRA "{jira_instance.configuration_name}" was deleted by {request.user}', - url=request.build_absolute_uri(reverse('jira'))) - return HttpResponseRedirect(reverse('jira')) + url=request.build_absolute_uri(reverse("jira"))) + return HttpResponseRedirect(reverse("jira")) except Exception as e: - add_error_message_to_response(f'Unable to delete JIRA Instance, probably because it is used by JIRA Issues: {str(e)}') + add_error_message_to_response(f"Unable to delete JIRA Instance, probably because it is used by JIRA Issues: {str(e)}") collector = NestedObjects(using=DEFAULT_DB_ALIAS) collector.collect([jira_instance]) rels = collector.nested() add_breadcrumb(title="Delete", top_level=False, request=request) return render(request, self.get_template(), { - 'inst': jira_instance, - 'form': form, - 'rels': rels, - 'deletable_objects': rels, + "inst": jira_instance, + "form": form, + "rels": rels, + "deletable_objects": rels, }) diff --git a/dojo/management/commands/clear_alerts.py b/dojo/management/commands/clear_alerts.py index 5ae54ae6bb..4607be2a54 100644 --- a/dojo/management/commands/clear_alerts.py +++ b/dojo/management/commands/clear_alerts.py @@ -12,17 +12,17 @@ class Command(BaseCommand): - help = 'Remove alerts from the database' + help = "Remove alerts from the database" def add_arguments(self, parser): - parser.add_argument('-a', '--all', action='store_true', help='Remove all alerts from the database') - parser.add_argument('-s', '--system', action='store_true', help='Remove alerts wihtout a user') - parser.add_argument('-u', '--users', nargs='+', type=str, help='Removes alerts from users') + parser.add_argument("-a", "--all", action="store_true", help="Remove all alerts from the database") + parser.add_argument("-s", "--system", action="store_true", help="Remove alerts wihtout a user") + parser.add_argument("-u", "--users", nargs="+", type=str, help="Removes alerts from users") def handle(self, *args, **options): - alls = options['all'] - users = options['users'] - system = options['system'] + alls = options["all"] + users = options["users"] + system = options["system"] if users: for user_name in users: diff --git a/dojo/management/commands/csv_findings_export.py b/dojo/management/commands/csv_findings_export.py index 80c2e2b591..03521b82f7 100644 --- a/dojo/management/commands/csv_findings_export.py +++ b/dojo/management/commands/csv_findings_export.py @@ -6,7 +6,7 @@ from dojo.models import Finding from dojo.utils import get_system_setting -locale = timezone(get_system_setting('time_zone')) +locale = timezone(get_system_setting("time_zone")) """ Author: Aaron Weaver @@ -15,18 +15,18 @@ class Command(BaseCommand): - help = 'Input: Filepath and name' + help = "Input: Filepath and name" def add_arguments(self, parser): - parser.add_argument('file_path') + parser.add_argument("file_path") def handle(self, *args, **options): - file_path = options['file_path'] + file_path = options["file_path"] findings = Finding.objects.filter(verified=True, active=True).select_related( "test__engagement__product") - writer = csv.writer(open(file_path, 'w')) + writer = csv.writer(open(file_path, "w")) headers = [] headers.append("product_name") @@ -48,7 +48,7 @@ def handle(self, *args, **options): if field != "product_name": value = getattr(obj, field) if isinstance(value, str): - value = value.encode('utf-8').strip() + value = value.encode("utf-8").strip() row.append(value) writer.writerow(row) diff --git a/dojo/management/commands/dedupe.py b/dojo/management/commands/dedupe.py index 995d258f15..1e4ae0ec90 100644 --- a/dojo/management/commands/dedupe.py +++ b/dojo/management/commands/dedupe.py @@ -12,7 +12,7 @@ mass_model_updater, ) -locale = timezone(get_system_setting('time_zone')) +locale = timezone(get_system_setting("time_zone")) logger = logging.getLogger(__name__) deduplicationLogger = logging.getLogger("dojo.specific-loggers.deduplication") @@ -22,7 +22,7 @@ def generate_hash_code(f): old_hash_code = f.hash_code f.hash_code = f.compute_hash_code() if f.hash_code != old_hash_code: - logger.debug('%d: hash_code changed from %s to %s', f.id, old_hash_code, f.hash_code) + logger.debug("%d: hash_code changed from %s to %s", f.id, old_hash_code, f.hash_code) return f @@ -35,21 +35,21 @@ class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument( - '--parser', - dest='parser', - action='append', + "--parser", + dest="parser", + action="append", help="""List of parsers for which hash_code needs recomputing (defaults to all parsers)""", ) - parser.add_argument('--hash_code_only', action='store_true', help='Only compute hash codes') - parser.add_argument('--dedupe_only', action='store_true', help='Only run deduplication') - parser.add_argument('--dedupe_sync', action='store_true', help='Run dedupe in the foreground, default false') + parser.add_argument("--hash_code_only", action="store_true", help="Only compute hash codes") + parser.add_argument("--dedupe_only", action="store_true", help="Only run deduplication") + parser.add_argument("--dedupe_sync", action="store_true", help="Run dedupe in the foreground, default false") def handle(self, *args, **options): - restrict_to_parsers = options['parser'] - hash_code_only = options['hash_code_only'] - dedupe_only = options['dedupe_only'] - dedupe_sync = options['dedupe_sync'] + restrict_to_parsers = options["parser"] + hash_code_only = options["hash_code_only"] + dedupe_only = options["dedupe_only"] + dedupe_sync = options["dedupe_sync"] if restrict_to_parsers is not None: findings = Finding.objects.filter(test__test_type__name__in=restrict_to_parsers) @@ -64,26 +64,26 @@ def handle(self, *args, **options): logger.info("######## Start Updating Hashcodes (foreground) ########") # only prefetch here for hash_code calculation - finds = findings.prefetch_related('endpoints', 'test__test_type') - mass_model_updater(Finding, finds, lambda f: generate_hash_code(f), fields=['hash_code'], order='asc', log_prefix='hash_code computation ') + finds = findings.prefetch_related("endpoints", "test__test_type") + mass_model_updater(Finding, finds, lambda f: generate_hash_code(f), fields=["hash_code"], order="asc", log_prefix="hash_code computation ") logger.info("######## Done Updating Hashcodes########") # Phase 2: deduplicate synchronously if not hash_code_only: - if get_system_setting('enable_deduplication'): - logger.info("######## Start deduplicating (%s) ########", ('foreground' if dedupe_sync else 'background')) + if get_system_setting("enable_deduplication"): + logger.info("######## Start deduplicating (%s) ########", ("foreground" if dedupe_sync else "background")) if dedupe_sync: - mass_model_updater(Finding, findings, lambda f: do_dedupe_finding(f), fields=None, order='desc', page_size=100, log_prefix='deduplicating ') + mass_model_updater(Finding, findings, lambda f: do_dedupe_finding(f), fields=None, order="desc", page_size=100, log_prefix="deduplicating ") else: # async tasks only need the id - mass_model_updater(Finding, findings.only('id'), lambda f: do_dedupe_finding_task(f.id), fields=None, order='desc', log_prefix='deduplicating ') + mass_model_updater(Finding, findings.only("id"), lambda f: do_dedupe_finding_task(f.id), fields=None, order="desc", log_prefix="deduplicating ") # update the grading (if enabled) - logger.debug('Updating grades for products...') + logger.debug("Updating grades for products...") for product in Product.objects.all(): calculate_grade(product) - logger.info("######## Done deduplicating (%s) ########", ('foreground' if dedupe_sync else 'tasks submitted to celery')) + logger.info("######## Done deduplicating (%s) ########", ("foreground" if dedupe_sync else "tasks submitted to celery")) else: logger.debug("skipping dedupe because it's disabled in system settings") diff --git a/dojo/management/commands/dupecheck.py b/dojo/management/commands/dupecheck.py index 2fc252c7ae..5ea3da9b11 100644 --- a/dojo/management/commands/dupecheck.py +++ b/dojo/management/commands/dupecheck.py @@ -10,23 +10,23 @@ class Command(BaseCommand): - help = 'No input commands for dedupe findings.' + help = "No input commands for dedupe findings." def count_the_duplicates(self, model, column): print("===================================") print(" Table:" + str(model) + " Column: " + column) print("===================================") - duplicates = model.objects.values(column).annotate(Count('id')).order_by().filter(id__count__gt=1) - kwargs = {'{}__{}'.format(column, 'in'): [item[column] for item in duplicates]} + duplicates = model.objects.values(column).annotate(Count("id")).order_by().filter(id__count__gt=1) + kwargs = {"{}__{}".format(column, "in"): [item[column] for item in duplicates]} duplicates = model.objects.filter(**kwargs) if not duplicates: print("No duplicates found") for dupe in duplicates: - print(f'{dupe.id}, Duplicate value: {getattr(dupe, column)}, Object: {dupe}') + print(f"{dupe.id}, Duplicate value: {getattr(dupe, column)}, Object: {dupe}") def handle(self, *args, **options): - self.count_the_duplicates(Product, 'name') - self.count_the_duplicates(Product_Type, 'name') - self.count_the_duplicates(Tool_Type, 'name') - self.count_the_duplicates(JIRA_Issue, 'jira_id') + self.count_the_duplicates(Product, "name") + self.count_the_duplicates(Product_Type, "name") + self.count_the_duplicates(Tool_Type, "name") + self.count_the_duplicates(JIRA_Issue, "jira_id") diff --git a/dojo/management/commands/endpoint_migration.py b/dojo/management/commands/endpoint_migration.py index b04893b4d4..feae9dd59a 100644 --- a/dojo/management/commands/endpoint_migration.py +++ b/dojo/management/commands/endpoint_migration.py @@ -10,14 +10,14 @@ class Command(BaseCommand): - help = 'Usage: manage.py endpoint_migration.py [--dry-run]' + help = "Usage: manage.py endpoint_migration.py [--dry-run]" def add_arguments(self, parser): parser.add_argument( - '--dry-run', - action='store_true', - help='Just look for broken endpoints', + "--dry-run", + action="store_true", + help="Just look for broken endpoints", ) def handle(self, *args, **options): - clean_hosts_run(apps=apps, change=bool(options.get('dry_run'))) + clean_hosts_run(apps=apps, change=bool(options.get("dry_run"))) diff --git a/dojo/management/commands/fix_0120.py b/dojo/management/commands/fix_0120.py index 127503fa97..4abdce6bb9 100644 --- a/dojo/management/commands/fix_0120.py +++ b/dojo/management/commands/fix_0120.py @@ -12,26 +12,26 @@ class Command(BaseCommand): - help = 'Usage: manage.py fix_0120' + help = "Usage: manage.py fix_0120" def handle(self, *args, **options): connection = connections[DEFAULT_DB_ALIAS] connection.prepare_database() executor = MigrationExecutor(connection) - if not (executor.migration_plan([('dojo', '0119_default_group_is_staff')])): + if not (executor.migration_plan([("dojo", "0119_default_group_is_staff")])): # this means that '0119_default_group_is_staff' was last successful migration logger.warning('This command will remove field "sonarqube_config" in model "Test" to be able to finish migration 0120_sonarqube_test_and_clean') try: with connection.schema_editor() as schema_editor: schema_editor.remove_field( model=Test, - field=Test._meta.get_field('sonarqube_config'), + field=Test._meta.get_field("sonarqube_config"), ) except OperationalError: # We expact exception like: # django.db.utils.OperationalError: (1091, "Can't DROP 'sonarqube_config_id'; check that column/key exists") - logger.info('There was nothing to fix') + logger.info("There was nothing to fix") else: - logger.info('Database fixed') + logger.info("Database fixed") else: - logger.error('Only migrations stacked in front of 0120 can be fixed by this command') + logger.error("Only migrations stacked in front of 0120 can be fixed by this command") diff --git a/dojo/management/commands/fix_broken_endpoint_status.py b/dojo/management/commands/fix_broken_endpoint_status.py index 7924b60321..65982d99ba 100644 --- a/dojo/management/commands/fix_broken_endpoint_status.py +++ b/dojo/management/commands/fix_broken_endpoint_status.py @@ -10,7 +10,7 @@ class Command(BaseCommand): - help = 'Usage: manage.py remove_broken_endpoint_statuses.py' + help = "Usage: manage.py remove_broken_endpoint_statuses.py" def handle(self, *args, **options): remove_broken_endpoint_statuses(apps=apps) diff --git a/dojo/management/commands/fix_loop_duplicates.py b/dojo/management/commands/fix_loop_duplicates.py index 7f69b23002..d46e43fbb5 100644 --- a/dojo/management/commands/fix_loop_duplicates.py +++ b/dojo/management/commands/fix_loop_duplicates.py @@ -15,7 +15,7 @@ class Command(BaseCommand): - help = 'No input commands for fixing Loop findings.' + help = "No input commands for fixing Loop findings." def handle(self, *args, **options): fix_loop_duplicates() diff --git a/dojo/management/commands/import_github_languages.py b/dojo/management/commands/import_github_languages.py index a21e57cd9d..be83506a3e 100644 --- a/dojo/management/commands/import_github_languages.py +++ b/dojo/management/commands/import_github_languages.py @@ -14,13 +14,13 @@ class Command(BaseCommand): GitHub maintains a wide range of languages with colors. The project https://github.com/ozh/github-colors converts them regularly in a json file, which we can use to update Language_Types """ - help = 'Usage: manage.py migraimport_github_languages' + help = "Usage: manage.py migraimport_github_languages" def handle(self, *args, **options): - logger.info('Started importing languages from GitHub ...') + logger.info("Started importing languages from GitHub ...") try: - deserialized = json.loads(requests.get('https://raw.githubusercontent.com/ozh/github-colors/master/colors.json').text) + deserialized = json.loads(requests.get("https://raw.githubusercontent.com/ozh/github-colors/master/colors.json").text) except: msg = "Invalid format" raise Exception(msg) @@ -29,19 +29,19 @@ def handle(self, *args, **options): for name in deserialized: element = deserialized[name] - color = element.get('color', None) + color = element.get("color", None) if color is not None: try: language_type, created = Language_Type.objects.get_or_create(language=name) except Language_Type.MultipleObjectsReturned: - logger.warning(f'Language_Type {name} exists multiple times') + logger.warning(f"Language_Type {name} exists multiple times") continue if created: new_language_types += 1 - language_type.color = element.get('color', 0) + language_type.color = element.get("color", 0) language_type.save() - logger.info(f'Finished importing languages from GitHub, added {new_language_types} Language_Types') + logger.info(f"Finished importing languages from GitHub, added {new_language_types} Language_Types") diff --git a/dojo/management/commands/import_surveys.py b/dojo/management/commands/import_surveys.py index 902ba3180a..0eec21a81c 100644 --- a/dojo/management/commands/import_surveys.py +++ b/dojo/management/commands/import_surveys.py @@ -7,7 +7,7 @@ from dojo.models import TextQuestion from dojo.utils import get_system_setting -locale = timezone(get_system_setting('time_zone')) +locale = timezone(get_system_setting("time_zone")) """ Author: Cody Maffucci @@ -16,11 +16,11 @@ class Command(BaseCommand): - help = 'Import surverys from dojo/fixtures/initial_surveys.py' + help = "Import surverys from dojo/fixtures/initial_surveys.py" def handle(self, *args, **options): # First create a temp question to pull the polymorphic_ctype_id from - created_question = TextQuestion.objects.create(optional=False, order=1, text='What is love?') + created_question = TextQuestion.objects.create(optional=False, order=1, text="What is love?") # Get the ID used in this system with connection.cursor() as cursor: cursor.execute("select polymorphic_ctype_id from dojo_question;") @@ -28,14 +28,14 @@ def handle(self, *args, **options): ctype_id = row[0] # Find the current id in the surveys file path = os.path.dirname(os.path.abspath(__file__)) - path = path[:-19] + 'fixtures/initial_surveys.json' + path = path[:-19] + "fixtures/initial_surveys.json" contents = open(path).readlines() for line in contents: if '"polymorphic_ctype": ' in line: matchedLine = line break # Create the new id line - old_id = ''.join(c for c in matchedLine if c.isdigit()) + old_id = "".join(c for c in matchedLine if c.isdigit()) new_line = matchedLine.replace(old_id, str(ctype_id)) # Replace the all lines in the file with open(path, "w") as fout: diff --git a/dojo/management/commands/initialize_permissions.py b/dojo/management/commands/initialize_permissions.py index 9e14ecdb89..81ac5d7eb6 100644 --- a/dojo/management/commands/initialize_permissions.py +++ b/dojo/management/commands/initialize_permissions.py @@ -11,22 +11,22 @@ class Command(BaseCommand): """ This management command creates non-standard Django permissions """ - help = 'Usage: manage.py initialize_permissions' + help = "Usage: manage.py initialize_permissions" def handle(self, *args, **options): try: - content_type_system_settings = ContentType.objects.get(app_label='dojo', model='system_settings') + content_type_system_settings = ContentType.objects.get(app_label="dojo", model="system_settings") google_permission = Permission.objects.filter(content_type=content_type_system_settings, - codename='change_google_sheet').count() + codename="change_google_sheet").count() if google_permission == 0: Permission.objects.create( - name='Can change Google Sheet', + name="Can change Google Sheet", content_type=content_type_system_settings, - codename='change_google_sheet', + codename="change_google_sheet", ) - logger.info('Non-standard permissions have been created') + logger.info("Non-standard permissions have been created") except ContentType.DoesNotExist: - logger.warning('No content type found for dojo.system_settings') + logger.warning("No content type found for dojo.system_settings") except ContentType.MultipleObjectsReturned: - logger.warning('Multiple content types found for dojo.system_settings') + logger.warning("Multiple content types found for dojo.system_settings") diff --git a/dojo/management/commands/initialize_test_types.py b/dojo/management/commands/initialize_test_types.py index f4ccc00bbe..58e6ba01b2 100644 --- a/dojo/management/commands/initialize_test_types.py +++ b/dojo/management/commands/initialize_test_types.py @@ -5,14 +5,14 @@ class Command(BaseCommand): - help = 'Initializes Test_Types' + help = "Initializes Test_Types" def handle(self, *args, **options): # called by the initializer to fill the table with test_types for scan_type in PARSERS: Test_Type.objects.get_or_create(name=scan_type) parser = PARSERS[scan_type] - if hasattr(parser, 'requires_tool_type'): + if hasattr(parser, "requires_tool_type"): tool_type = parser.requires_tool_type(scan_type) if tool_type: Tool_Type.objects.get_or_create(name=tool_type) diff --git a/dojo/management/commands/jira_async_updates.py b/dojo/management/commands/jira_async_updates.py index e85afdc177..7b688e8da9 100644 --- a/dojo/management/commands/jira_async_updates.py +++ b/dojo/management/commands/jira_async_updates.py @@ -12,13 +12,13 @@ class Command(BaseCommand): - help = 'No input commands for JIRA bulk update.' + help = "No input commands for JIRA bulk update." def handle(self, *args, **options): findings = Finding.objects.exclude(jira_issue__isnull=True) findings = findings.filter(verified=True, active=True) - findings = findings.prefetch_related('jira_issue') + findings = findings.prefetch_related("jira_issue") # finding = Finding.objects.get(id=1) for finding in findings: # try: @@ -44,12 +44,12 @@ def handle(self, *args, **options): new_note.entry = "Please Review Jira Request: " + str( issue) + ". Review status has changed to " + str( issue.fields.resolution) + "." - new_note.author = User.objects.get(username='JIRA') + new_note.author = User.objects.get(username="JIRA") new_note.date = now new_note.save() finding.notes.add(new_note) finding.under_defect_review = True - dojo_user = Dojo_User.objects.get(username='JIRA') + dojo_user = Dojo_User.objects.get(username="JIRA") finding.defect_review_requested_by = dojo_user # Create alert to notify user diff --git a/dojo/management/commands/jira_refactor_data_migration.py b/dojo/management/commands/jira_refactor_data_migration.py index bbd79a76cf..16086ee3cd 100644 --- a/dojo/management/commands/jira_refactor_data_migration.py +++ b/dojo/management/commands/jira_refactor_data_migration.py @@ -10,51 +10,51 @@ class Command(BaseCommand): - help = 'Command to move data from some tables to other tables as part of https://github.com/DefectDojo/django-DefectDojo/pull/3200' + \ - 'Should normally be handled by the migration in that PR, but if that causes errors, this command can help to get the data migrated anyway.' + help = "Command to move data from some tables to other tables as part of https://github.com/DefectDojo/django-DefectDojo/pull/3200" + \ + "Should normally be handled by the migration in that PR, but if that causes errors, this command can help to get the data migrated anyway." def move_jira_creation_changed(self): - logger.info('migrating finding.jira_creation and jira_change fields to JIRA_Issue model') - for jira_issue in JIRA_Issue.objects.all().select_related('finding'): + logger.info("migrating finding.jira_creation and jira_change fields to JIRA_Issue model") + for jira_issue in JIRA_Issue.objects.all().select_related("finding"): # try: if jira_issue.finding: - logger.debug('populating jira_issue: %s', jira_issue.jira_key) + logger.debug("populating jira_issue: %s", jira_issue.jira_key) jira_issue.jira_creation = jira_issue.finding.jira_creation jira_issue.jira_change = jira_issue.finding.jira_change jira_issue.save() else: - logger.debug('no finding: skipping jira_issue: %s', jira_issue.jira_key) + logger.debug("no finding: skipping jira_issue: %s", jira_issue.jira_key) def populate_jira_project(self): - logger.info('populating jira_issue.jira_project to point to jira configuration of the product in defect dojo') - for jira_issue in JIRA_Issue.objects.all().select_related('jira_project').prefetch_related('finding__test__engagement__product'): + logger.info("populating jira_issue.jira_project to point to jira configuration of the product in defect dojo") + for jira_issue in JIRA_Issue.objects.all().select_related("jira_project").prefetch_related("finding__test__engagement__product"): # try: if not jira_issue.jira_project and jira_issue.finding: - logger.info('populating jira_issue from finding: %s', jira_issue.jira_key) + logger.info("populating jira_issue from finding: %s", jira_issue.jira_key) jira_project = jira_helper.get_jira_project(jira_issue.finding) # jira_project = jira_issue.finding.test.engagement.product.jira_project_set.all()[0] - logger.debug('jira_project: %s', jira_project) + logger.debug("jira_project: %s", jira_project) jira_issue.jira_project = jira_project jira_issue.save() elif not jira_issue.jira_project and jira_issue.engagement: - logger.debug('populating jira_issue from engagement: %s', jira_issue.jira_key) + logger.debug("populating jira_issue from engagement: %s", jira_issue.jira_key) jira_project = jira_helper.get_jira_project(jira_issue.finding) # jira_project = jira_issue.engagement.product.jira_project_set.all()[0] - logger.debug('jira_project: %s', jira_project) + logger.debug("jira_project: %s", jira_project) jira_issue.jira_project = jira_project jira_issue.save() elif not jira_issue.jira_project: - logger.info('skipping %s as there is no finding or engagment', jira_issue.jira_key) + logger.info("skipping %s as there is no finding or engagment", jira_issue.jira_key) def populate_jira_instance_name_if_empty(self): - logger.info('populating JIRA_Instance.configuration_name with url if empty') + logger.info("populating JIRA_Instance.configuration_name with url if empty") for jira_instance in JIRA_Instance.objects.all(): # try: if not jira_instance.configuration_name: jira_instance.configuration_name = jira_instance.url jira_instance.save() else: - logger.debug('configuration_name already set for %i %s', jira_instance.id, jira_instance.url) + logger.debug("configuration_name already set for %i %s", jira_instance.id, jira_instance.url) def handle(self, *args, **options): @@ -62,5 +62,5 @@ def handle(self, *args, **options): self.populate_jira_project() self.populate_jira_instance_name_if_empty() - logger.info('now this script is completed, you can run the migration 0063_jira_refactor_populate as normal. it will skip over the data because it has already been populated') - logger.info('if it still fails, comment out all the runpython parts, but leave the operations on the database fields in place') + logger.info("now this script is completed, you can run the migration 0063_jira_refactor_populate as normal. it will skip over the data because it has already been populated") + logger.info("if it still fails, comment out all the runpython parts, but leave the operations on the database fields in place") diff --git a/dojo/management/commands/jira_status_reconciliation.py b/dojo/management/commands/jira_status_reconciliation.py index db1337fda6..500db1428d 100644 --- a/dojo/management/commands/jira_status_reconciliation.py +++ b/dojo/management/commands/jira_status_reconciliation.py @@ -13,20 +13,20 @@ def jira_status_reconciliation(*args, **kwargs): - mode = kwargs['mode'] - product = kwargs['product'] - engagement = kwargs['engagement'] - daysback = kwargs['daysback'] - dryrun = kwargs['dryrun'] + mode = kwargs["mode"] + product = kwargs["product"] + engagement = kwargs["engagement"] + daysback = kwargs["daysback"] + dryrun = kwargs["dryrun"] - logger.debug('mode: %s product:%s engagement: %s dryrun: %s', mode, product, engagement, dryrun) + logger.debug("mode: %s product:%s engagement: %s dryrun: %s", mode, product, engagement, dryrun) - if mode and mode not in ('push_status_to_jira', 'import_status_from_jira', 'reconcile'): - print('mode must be one of reconcile, push_status_to_jira or import_status_from_jira') + if mode and mode not in ("push_status_to_jira", "import_status_from_jira", "reconcile"): + print("mode must be one of reconcile, push_status_to_jira or import_status_from_jira") return False if not mode: - mode = 'reconcile' + mode = "reconcile" findings = Finding.objects.all() if product: @@ -44,29 +44,29 @@ def jira_status_reconciliation(*args, **kwargs): findings = findings.exclude(jira_issue__isnull=True) # order by product, engagement to increase the cance of being able to reuse jira_instance + jira connection - findings = findings.order_by('test__engagement__product__id', 'test__engagement__id') + findings = findings.order_by("test__engagement__product__id", "test__engagement__id") - findings = findings.prefetch_related('jira_issue__jira_project__jira_instance') - findings = findings.prefetch_related('test__engagement__jira_project__jira_instance') - findings = findings.prefetch_related('test__engagement__product__jira_project_set__jira_instance') + findings = findings.prefetch_related("jira_issue__jira_project__jira_instance") + findings = findings.prefetch_related("test__engagement__jira_project__jira_instance") + findings = findings.prefetch_related("test__engagement__product__jira_project_set__jira_instance") logger.debug(findings.query) - messages = ['jira_key;finding_url;resolution_or_status;find.jira_issue.jira_change;issue_from_jira.fields.updated;find.last_status_update;issue_from_jira.fields.updated;find.last_reviewed;issue_from_jira.fields.updated;flag1;flag2;flag3;action;change_made'] + messages = ["jira_key;finding_url;resolution_or_status;find.jira_issue.jira_change;issue_from_jira.fields.updated;find.last_status_update;issue_from_jira.fields.updated;find.last_reviewed;issue_from_jira.fields.updated;flag1;flag2;flag3;action;change_made"] for find in findings: - logger.debug('jira status reconciliation for: %i:%s', find.id, find) + logger.debug("jira status reconciliation for: %i:%s", find.id, find) issue_from_jira = jira_helper.get_jira_issue_from_jira(find) if not issue_from_jira: - message = '%s;%s/finding/%d;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;unable to retrieve JIRA Issue;%s' % \ + message = "%s;%s/finding/%d;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;unable to retrieve JIRA Issue;%s" % \ (find.jira_issue.jira_key, settings.SITE_URL, find.id, find.status(), None, None, None, None, - find.jira_issue.jira_change, None, find.last_status_update, None, find.last_reviewed, None, 'error') + find.jira_issue.jira_change, None, find.last_status_update, None, find.last_reviewed, None, "error") messages.append(message) logger.info(message) continue - assignee = issue_from_jira.fields.assignee if hasattr(issue_from_jira.fields, 'assignee') else None + assignee = issue_from_jira.fields.assignee if hasattr(issue_from_jira.fields, "assignee") else None assignee_name = assignee.displayName if assignee else None resolution = issue_from_jira.fields.resolution if issue_from_jira.fields.resolution and issue_from_jira.fields.resolution != "None" else None resolution_id = resolution.id if resolution else None @@ -79,35 +79,35 @@ def jira_status_reconciliation(*args, **kwargs): flag1, flag2, flag3 = None, None, None - if mode == 'reconcile' and not find.last_status_update: - message = '%s; %s/finding/%d;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;skipping finding with no last_status_update;%s' % \ + if mode == "reconcile" and not find.last_status_update: + message = "%s; %s/finding/%d;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;skipping finding with no last_status_update;%s" % \ (find.jira_issue.jira_key, settings.SITE_URL, find.id, find.status(), None, None, None, None, - find.jira_issue.jira_change, issue_from_jira.fields.updated, find.last_status_update, issue_from_jira.fields.updated, find.last_reviewed, issue_from_jira.fields.updated, 'skipped') + find.jira_issue.jira_change, issue_from_jira.fields.updated, find.last_status_update, issue_from_jira.fields.updated, find.last_reviewed, issue_from_jira.fields.updated, "skipped") messages.append(message) logger.info(message) continue elif find.risk_accepted: - message = '%s; %s/finding/%d;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%sskipping risk accepted findings;%s' % \ + message = "%s; %s/finding/%d;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%sskipping risk accepted findings;%s" % \ (find.jira_issue.jira_key, settings.SITE_URL, find.id, find.status(), resolution_name, None, None, None, - find.jira_issue.jira_change, issue_from_jira.fields.updated, find.last_status_update, issue_from_jira.fields.updated, find.last_reviewed, issue_from_jira.fields.updated, 'skipped') + find.jira_issue.jira_change, issue_from_jira.fields.updated, find.last_status_update, issue_from_jira.fields.updated, find.last_reviewed, issue_from_jira.fields.updated, "skipped") messages.append(message) logger.info(message) elif jira_helper.issue_from_jira_is_active(issue_from_jira) and find.active: - message = '%s; %s/finding/%d;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;no action both sides are active/open;%s' % \ + message = "%s; %s/finding/%d;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;no action both sides are active/open;%s" % \ (find.jira_issue.jira_key, settings.SITE_URL, find.id, find.status(), resolution_name, None, None, None, - find.jira_issue.jira_change, issue_from_jira.fields.updated, find.last_status_update, issue_from_jira.fields.updated, find.last_reviewed, issue_from_jira.fields.updated, 'equal') + find.jira_issue.jira_change, issue_from_jira.fields.updated, find.last_status_update, issue_from_jira.fields.updated, find.last_reviewed, issue_from_jira.fields.updated, "equal") messages.append(message) logger.info(message) elif not jira_helper.issue_from_jira_is_active(issue_from_jira) and not find.active: - message = '%s; %s/finding/%d;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;no action both sides are inactive/closed;%s' % \ + message = "%s; %s/finding/%d;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;no action both sides are inactive/closed;%s" % \ (find.jira_issue.jira_key, settings.SITE_URL, find.id, find.status(), resolution_name, None, None, None, - find.jira_issue.jira_change, issue_from_jira.fields.updated, find.last_status_update, issue_from_jira.fields.updated, find.last_reviewed, issue_from_jira.fields.updated, 'equal') + find.jira_issue.jira_change, issue_from_jira.fields.updated, find.last_status_update, issue_from_jira.fields.updated, find.last_reviewed, issue_from_jira.fields.updated, "equal") messages.append(message) logger.info(message) else: # statuses are different - if mode in ('push_status_to_jira', 'import_status_from_jira'): + if mode in ("push_status_to_jira", "import_status_from_jira"): action = mode else: # reconcile @@ -120,10 +120,10 @@ def jira_status_reconciliation(*args, **kwargs): flag2 = not find.last_status_update or (find.last_status_update < issue_from_jira.fields.updated) flag3 = (not find.last_reviewed or (find.last_reviewed < issue_from_jira.fields.updated)) - logger.debug('%s,%s,%s,%s', resolution_name, flag1, flag2, flag3) + logger.debug("%s,%s,%s,%s", resolution_name, flag1, flag2, flag3) if flag1 and flag2 and flag3: - action = 'import_status_from_jira' + action = "import_status_from_jira" else: # Status is DOJO is newer if: @@ -136,64 +136,64 @@ def jira_status_reconciliation(*args, **kwargs): flag2 = find.last_status_update > issue_from_jira.fields.updated flag3 = find.is_mitigated and find.mitigated and find.jira_issue.jira_change and find.mitigated > find.jira_issue.jira_change - logger.debug('%s,%s,%s,%s', resolution_name, flag1, flag2, flag3) + logger.debug("%s,%s,%s,%s", resolution_name, flag1, flag2, flag3) if flag1 or flag2 or flag3: - action = 'push_status_to_jira' + action = "push_status_to_jira" prev_jira_instance, jira = None, None - if action == 'import_status_from_jira': - message_action = 'deactivating' if find.active else 'reactivating' + if action == "import_status_from_jira": + message_action = "deactivating" if find.active else "reactivating" - status_changed = jira_helper.process_resolution_from_jira(find, resolution_id, resolution_name, assignee_name, issue_from_jira.fields.updated, find.jira_issue) if not dryrun else 'dryrun' + status_changed = jira_helper.process_resolution_from_jira(find, resolution_id, resolution_name, assignee_name, issue_from_jira.fields.updated, find.jira_issue) if not dryrun else "dryrun" if status_changed: - message = '%s; %s/finding/%d;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s finding in defectdojo;%s' % \ + message = "%s; %s/finding/%d;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s finding in defectdojo;%s" % \ (find.jira_issue.jira_key, settings.SITE_URL, find.id, find.status(), resolution_name, flag1, flag2, flag3, find.jira_issue.jira_change, issue_from_jira.fields.updated, find.last_status_update, issue_from_jira.fields.updated, find.last_reviewed, issue_from_jira.fields.updated, message_action, status_changed) messages.append(message) logger.info(message) else: - message = '%s; %s/finding/%d;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;no changes made from jira resolution;%s' % \ + message = "%s; %s/finding/%d;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;no changes made from jira resolution;%s" % \ (find.jira_issue.jira_key, settings.SITE_URL, find.id, find.status(), resolution_name, flag1, flag2, flag3, find.jira_issue.jira_change, issue_from_jira.fields.updated, find.last_status_update, issue_from_jira.fields.updated, find.last_reviewed, issue_from_jira.fields.updated, status_changed) messages.append(message) logger.info(message) - elif action == 'push_status_to_jira': + elif action == "push_status_to_jira": jira_instance = jira_helper.get_jira_instance(find) if not prev_jira_instance or (jira_instance.id != prev_jira_instance.id): # only reconnect to jira if the instance if different from the previous finding jira = jira_helper.get_jira_connection(jira_instance) - message_action = 'reopening' if find.active else 'closing' + message_action = "reopening" if find.active else "closing" - status_changed = jira_helper.push_status_to_jira(find, jira_instance, jira, issue_from_jira, save=True) if not dryrun else 'dryrun' + status_changed = jira_helper.push_status_to_jira(find, jira_instance, jira, issue_from_jira, save=True) if not dryrun else "dryrun" if status_changed: - message = '%s; %s/finding/%d;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s jira issue;%s;' % \ + message = "%s; %s/finding/%d;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s jira issue;%s;" % \ (find.jira_issue.jira_key, settings.SITE_URL, find.id, find.status(), resolution_name, flag1, flag2, flag3, message_action, find.jira_issue.jira_change, issue_from_jira.fields.updated, find.last_status_update, issue_from_jira.fields.updated, find.last_reviewed, issue_from_jira.fields.updated, status_changed) messages.append(message) logger.info(message) else: if status_changed is None: - status_changed = 'Error' - message = '%s; %s/finding/%d;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;no changes made while pushing status to jira;%s' % \ + status_changed = "Error" + message = "%s; %s/finding/%d;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;no changes made while pushing status to jira;%s" % \ (find.jira_issue.jira_key, settings.SITE_URL, find.id, find.status(), resolution_name, flag1, flag2, flag3, find.jira_issue.jira_change, issue_from_jira.fields.updated, find.last_status_update, issue_from_jira.fields.updated, find.last_reviewed, issue_from_jira.fields.updated, status_changed) messages.append(message) logger.info(message) else: - message = '%s; %s/finding/%d;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;unable to determine source of truth;%s' % \ + message = "%s; %s/finding/%d;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;%s;unable to determine source of truth;%s" % \ (find.jira_issue.jira_key, settings.SITE_URL, find.id, find.status(), resolution_name, flag1, flag2, flag3, find.jira_issue.jira_change, issue_from_jira.fields.updated, find.last_status_update, issue_from_jira.fields.updated, find.last_reviewed, issue_from_jira.fields.updated, status_changed) messages.append(message) logger.info(message) - logger.info('results (semicolon seperated)') + logger.info("results (semicolon seperated)") for message in messages: print(message) @@ -209,20 +209,20 @@ class Command(BaseCommand): - sync_from_jira: overwrite status in Defect Dojo with status from JIRA """ - help = 'Reconcile finding status with JIRA issue status, stdout will contain semicolon seperated CSV results. \ - Risk Accepted findings are skipped. Findings created before 1.14.0 are skipped.' + help = "Reconcile finding status with JIRA issue status, stdout will contain semicolon seperated CSV results. \ + Risk Accepted findings are skipped. Findings created before 1.14.0 are skipped." mode_help = \ - '- reconcile: (default)reconcile any differences in status between Defect Dojo and JIRA, will look at the latest status change timestamp in both systems to determine which one is the correct status' \ - '- push_status_to_jira: update JIRA status for all JIRA issues connected to a Defect Dojo finding (will not push summary/description, only status)' \ - '- import_status_from_jira: update Defect Dojo finding status from JIRA' + "- reconcile: (default)reconcile any differences in status between Defect Dojo and JIRA, will look at the latest status change timestamp in both systems to determine which one is the correct status" \ + "- push_status_to_jira: update JIRA status for all JIRA issues connected to a Defect Dojo finding (will not push summary/description, only status)" \ + "- import_status_from_jira: update Defect Dojo finding status from JIRA" def add_arguments(self, parser): - parser.add_argument('--mode', help=self.mode_help) - parser.add_argument('--product', help='Only process findings in this product (name)') - parser.add_argument('--engagement', help='Only process findings in this product (name)') - parser.add_argument('--daysback', type=int, help='Only process findings created in the last \'daysback\' days') - parser.add_argument('--dryrun', action='store_true', help='Only print actions to be performed, but make no modifications.') + parser.add_argument("--mode", help=self.mode_help) + parser.add_argument("--product", help="Only process findings in this product (name)") + parser.add_argument("--engagement", help="Only process findings in this product (name)") + parser.add_argument("--daysback", type=int, help="Only process findings created in the last 'daysback' days") + parser.add_argument("--dryrun", action="store_true", help="Only print actions to be performed, but make no modifications.") def handle(self, *args, **options): # mode = options['mode'] diff --git a/dojo/management/commands/migrate_authorization_v2.py b/dojo/management/commands/migrate_authorization_v2.py index 35fa8bbd97..cfcde52a2c 100644 --- a/dojo/management/commands/migrate_authorization_v2.py +++ b/dojo/management/commands/migrate_authorization_v2.py @@ -12,7 +12,7 @@ class Command(BaseCommand): authorization is removed from the code now, this management command is empty. It cannot be removed because it is called in a db migration. """ - help = 'Usage: manage.py migration_authorization_v2' + help = "Usage: manage.py migration_authorization_v2" def handle(self, *args, **options): pass diff --git a/dojo/management/commands/migrate_staff_users.py b/dojo/management/commands/migrate_staff_users.py index 3015a2f44a..d7a08eeb1e 100644 --- a/dojo/management/commands/migrate_staff_users.py +++ b/dojo/management/commands/migrate_staff_users.py @@ -14,23 +14,23 @@ class Command(BaseCommand): This management command creates a group for staff users with all configuration permissions staff users had in previous releases. """ - help = 'Usage: manage.py migrate_staff_users' + help = "Usage: manage.py migrate_staff_users" def handle(self, *args, **options): # If group already exists, then the migration has been running before - group_name = 'Staff users' + group_name = "Staff users" groups = Dojo_Group.objects.filter(name=group_name).count() if groups > 0: - sys.exit(f'Group {group_name} already exists, migration aborted') + sys.exit(f"Group {group_name} already exists, migration aborted") # The superuser with the lowest id will be set as the owner of the group - users = Dojo_User.objects.filter(is_superuser=True).order_by('id') + users = Dojo_User.objects.filter(is_superuser=True).order_by("id") if len(users) == 0: - sys.exit('No superuser found, migration aborted') + sys.exit("No superuser found, migration aborted") user = users[0] - group = Dojo_Group(name=group_name, description='Migrated staff users') + group = Dojo_Group(name=group_name, description="Migrated staff users") group.save() owner_role = Role.objects.get(is_owner=True) @@ -43,7 +43,7 @@ def handle(self, *args, **options): owner.save() # All staff users are made to members of the group - reader_role = Role.objects.get(name='Reader') + reader_role = Role.objects.get(name="Reader") staff_users = Dojo_User.objects.filter(is_staff=True) for staff_user in staff_users: if staff_user != owner.user: @@ -62,31 +62,31 @@ def handle(self, *args, **options): # Set the same configuration permissions, staff users had in previous releases auth_group = group.auth_group if not auth_group: - sys.exit('Group has no auth_group, migration aborted') - - auth_group.permissions.add(permissions['view_group']) - auth_group.permissions.add(permissions['add_group']) - auth_group.permissions.add(permissions['view_development_environment']) - auth_group.permissions.add(permissions['add_development_environment']) - auth_group.permissions.add(permissions['change_development_environment']) - auth_group.permissions.add(permissions['delete_development_environment']) - auth_group.permissions.add(permissions['view_finding_template']) - auth_group.permissions.add(permissions['add_finding_template']) - auth_group.permissions.add(permissions['change_finding_template']) - auth_group.permissions.add(permissions['delete_finding_template']) - auth_group.permissions.add(permissions['view_engagement_survey']) - auth_group.permissions.add(permissions['add_engagement_survey']) - auth_group.permissions.add(permissions['change_engagement_survey']) - auth_group.permissions.add(permissions['delete_engagement_survey']) - auth_group.permissions.add(permissions['view_question']) - auth_group.permissions.add(permissions['add_question']) - auth_group.permissions.add(permissions['change_question']) - auth_group.permissions.add(permissions['delete_question']) - auth_group.permissions.add(permissions['view_test_type']) - auth_group.permissions.add(permissions['add_test_type']) - auth_group.permissions.add(permissions['change_test_type']) - auth_group.permissions.add(permissions['delete_test_type']) - auth_group.permissions.add(permissions['view_user']) - auth_group.permissions.add(permissions['add_product_type']) - - logger.info(f'Migrated {len(staff_users)} staff users') + sys.exit("Group has no auth_group, migration aborted") + + auth_group.permissions.add(permissions["view_group"]) + auth_group.permissions.add(permissions["add_group"]) + auth_group.permissions.add(permissions["view_development_environment"]) + auth_group.permissions.add(permissions["add_development_environment"]) + auth_group.permissions.add(permissions["change_development_environment"]) + auth_group.permissions.add(permissions["delete_development_environment"]) + auth_group.permissions.add(permissions["view_finding_template"]) + auth_group.permissions.add(permissions["add_finding_template"]) + auth_group.permissions.add(permissions["change_finding_template"]) + auth_group.permissions.add(permissions["delete_finding_template"]) + auth_group.permissions.add(permissions["view_engagement_survey"]) + auth_group.permissions.add(permissions["add_engagement_survey"]) + auth_group.permissions.add(permissions["change_engagement_survey"]) + auth_group.permissions.add(permissions["delete_engagement_survey"]) + auth_group.permissions.add(permissions["view_question"]) + auth_group.permissions.add(permissions["add_question"]) + auth_group.permissions.add(permissions["change_question"]) + auth_group.permissions.add(permissions["delete_question"]) + auth_group.permissions.add(permissions["view_test_type"]) + auth_group.permissions.add(permissions["add_test_type"]) + auth_group.permissions.add(permissions["change_test_type"]) + auth_group.permissions.add(permissions["delete_test_type"]) + auth_group.permissions.add(permissions["view_user"]) + auth_group.permissions.add(permissions["add_product_type"]) + + logger.info(f"Migrated {len(staff_users)} staff users") diff --git a/dojo/management/commands/migrate_surveys.py b/dojo/management/commands/migrate_surveys.py index 98cea012ab..e90ad41bd5 100644 --- a/dojo/management/commands/migrate_surveys.py +++ b/dojo/management/commands/migrate_surveys.py @@ -12,7 +12,7 @@ class Command(BaseCommand): - help = 'import survey data from defectDojo_engagement_survey tables to dojo tables' + help = "import survey data from defectDojo_engagement_survey tables to dojo tables" def handle(self, *args, **options): # Get a connection to the db @@ -20,45 +20,45 @@ def handle(self, *args, **options): # Check if there are any tables to migrate # Has to be specially ordered for parental reasons table_list = [ - 'defectDojo_engagement_survey_question', - 'defectDojo_engagement_survey_choice', - 'defectDojo_engagement_survey_choicequestion', - 'defectDojo_engagement_survey_engagement_survey', - 'defectDojo_engagement_survey_answered_survey', - 'defectDojo_engagement_survey_general_survey', - 'defectDojo_engagement_survey_answer', - 'defectDojo_engagement_survey_textanswer', - 'defectDojo_engagement_survey_choiceanswer', - 'defectDojo_engagement_survey_choiceanswer_answer', - 'defectDojo_engagement_survey_choicequestion_choices', - 'defectDojo_engagement_survey_engagement_survey_questions', - 'defectDojo_engagement_survey_textquestion', + "defectDojo_engagement_survey_question", + "defectDojo_engagement_survey_choice", + "defectDojo_engagement_survey_choicequestion", + "defectDojo_engagement_survey_engagement_survey", + "defectDojo_engagement_survey_answered_survey", + "defectDojo_engagement_survey_general_survey", + "defectDojo_engagement_survey_answer", + "defectDojo_engagement_survey_textanswer", + "defectDojo_engagement_survey_choiceanswer", + "defectDojo_engagement_survey_choiceanswer_answer", + "defectDojo_engagement_survey_choicequestion_choices", + "defectDojo_engagement_survey_engagement_survey_questions", + "defectDojo_engagement_survey_textquestion", ] - survey_tables = [table for table in table_list if table.split('_')[0] == 'defectDojo'] + survey_tables = [table for table in table_list if table.split("_")[0] == "defectDojo"] if len(survey_tables) == 0: - sys.exit('There are no defectDojo_enagagement_survey tables to migrate.') + sys.exit("There are no defectDojo_enagagement_survey tables to migrate.") # Get unique ploymorphic id for the system ctype_id = 0 # First create a temp question to pull the polymorphic_ctype_id from - TextQuestion.objects.create(optional=False, order=1, text='What is love?') + TextQuestion.objects.create(optional=False, order=1, text="What is love?") # Get the ID used in this system cursor.execute("select polymorphic_ctype_id from dojo_question;") row = cursor.fetchone() ctype_id = row[0] # Copy the tables over for table in survey_tables: - new_table_name = 'dojo' + table[28:] + new_table_name = "dojo" + table[28:] # Take all contents from ddse table and insert into dojo table - copy_string = 'INSERT INTO `' + new_table_name + '` SELECT * FROM `' + table + '`;' + copy_string = "INSERT INTO `" + new_table_name + "` SELECT * FROM `" + table + "`;" cursor.execute(str(copy_string)) # Update polymorphic id on some tables - if new_table_name == 'dojo_question' or new_table_name == 'dojo_answer': - update_string = 'UPDATE `' + new_table_name + '` SET polymorphic_ctype_id = ' + str(ctype_id) + ';' + if new_table_name == "dojo_question" or new_table_name == "dojo_answer": + update_string = "UPDATE `" + new_table_name + "` SET polymorphic_ctype_id = " + str(ctype_id) + ";" cursor.execute(str(update_string)) # Drop the ddse table - print('All defectDojo_engagement_sruvey tables migrated to dojo tables') + print("All defectDojo_engagement_sruvey tables migrated to dojo tables") # Delete the old tables in reverse order to drop the children first for table in reversed(table_list): - cursor.execute('DROP TABLE `' + table + '`;') - print('All defectDojo_engagement_sruvey tables removed') + cursor.execute("DROP TABLE `" + table + "`;") + print("All defectDojo_engagement_sruvey tables removed") diff --git a/dojo/management/commands/migrate_textquestions.py b/dojo/management/commands/migrate_textquestions.py index d62836c4d3..5b559e2a1c 100644 --- a/dojo/management/commands/migrate_textquestions.py +++ b/dojo/management/commands/migrate_textquestions.py @@ -11,10 +11,10 @@ class Command(BaseCommand): Textquestions for surveys need to be modified after loading the fixture as they contain an instance dependant polymorphic content id """ - help = 'Usage: manage.py migration_textquestions' + help = "Usage: manage.py migration_textquestions" def handle(self, *args, **options): - logger.info('Started migrating textquestions ...') + logger.info("Started migrating textquestions ...") update_textquestions = """UPDATE dojo_question SET polymorphic_ctype_id = ( @@ -29,4 +29,4 @@ def handle(self, *args, **options): with connection.cursor() as cursor: cursor.execute(update_textquestions) - logger.info('Finished migrating textquestions') + logger.info("Finished migrating textquestions") diff --git a/dojo/management/commands/print_settings.py b/dojo/management/commands/print_settings.py index 518e22aa6e..66dbc7a6f5 100644 --- a/dojo/management/commands/print_settings.py +++ b/dojo/management/commands/print_settings.py @@ -6,11 +6,11 @@ class Command(BaseCommand): - help = 'Display all the currently loaded settings in the project' + help = "Display all the currently loaded settings in the project" def handle(self, *args, **options): - os.environ['DJANGO_SETTINGS_MODULE'] = 'my_django_project.settings' + os.environ["DJANGO_SETTINGS_MODULE"] = "my_django_project.settings" a_dict = {} diff --git a/dojo/management/commands/push_to_jira_update.py b/dojo/management/commands/push_to_jira_update.py index 2b3c10b0bb..8e1d480b35 100644 --- a/dojo/management/commands/push_to_jira_update.py +++ b/dojo/management/commands/push_to_jira_update.py @@ -5,7 +5,7 @@ from dojo.models import Finding from dojo.utils import get_system_setting -locale = timezone(get_system_setting('time_zone')) +locale = timezone(get_system_setting("time_zone")) """ Author: Aaron Weaver @@ -14,7 +14,7 @@ class Command(BaseCommand): - help = 'No input commands for Jira bulk update.' + help = "No input commands for Jira bulk update." def handle(self, *args, **options): diff --git a/dojo/management/commands/rename_mend_findings.py b/dojo/management/commands/rename_mend_findings.py index 336988e64c..4b8b6de5c9 100644 --- a/dojo/management/commands/rename_mend_findings.py +++ b/dojo/management/commands/rename_mend_findings.py @@ -3,7 +3,7 @@ from dojo.celery import app -locale = timezone(get_system_setting('time_zone')) +locale = timezone(get_system_setting("time_zone")) """ Author: Aaron Weaver @@ -12,29 +12,29 @@ class Command(BaseCommand): - help = 'No input commands for dedupe findings.' + help = "No input commands for dedupe findings." def handle(self, *args, **options): rename_mend_finding() -@app.task(name='rename_mend_finding_task') +@app.task(name="rename_mend_finding_task") def rename_mend_finding(): mend_id = Test_Type.objects.get(name="Mend Scan").id findings = Finding.objects.filter(found_by=mend_id) - findings = findings.order_by('-pk') + findings = findings.order_by("-pk") logger.info("######## Updating Hashcodes - deduplication is done in the background upon finding save ########") for finding in findings: logger.info("Updating Mend Finding with id: %d" % finding.id) - lib_name_begin = re.search('\\*\\*Library Filename\\*\\* : ', finding.description).span(0)[1] - lib_name_end = re.search('\\*\\*Library Description\\*\\*', finding.description).span(0)[0] + lib_name_begin = re.search("\\*\\*Library Filename\\*\\* : ", finding.description).span(0)[1] + lib_name_end = re.search("\\*\\*Library Description\\*\\*", finding.description).span(0)[0] lib_name = finding.description[lib_name_begin:lib_name_end - 1] if finding.cve is None: finding.title = "CVE-None | " + lib_name else: finding.title = finding.cve + " | " + lib_name if not finding.cwe: - logger.debug('Set cwe for finding %d to 1035 if not an cwe Number is set' % finding.id) + logger.debug("Set cwe for finding %d to 1035 if not an cwe Number is set" % finding.id) finding.cwe = 1035 finding.title = finding.title.rstrip() # delete \n at the end of the title finding.hash_code = finding.compute_hash_code() diff --git a/dojo/management/commands/risk_acceptance_handle_expiration.py b/dojo/management/commands/risk_acceptance_handle_expiration.py index a627c1161c..b78e706be4 100644 --- a/dojo/management/commands/risk_acceptance_handle_expiration.py +++ b/dojo/management/commands/risk_acceptance_handle_expiration.py @@ -6,9 +6,9 @@ class Command(BaseCommand): - help = 'Handle any risk acceptances that are expired (and not handled yet). Also posts expiration heads alerts / jira comments if configured' + help = "Handle any risk acceptances that are expired (and not handled yet). Also posts expiration heads alerts / jira comments if configured" def handle(self, *args, **options): # use admin user to make sure we have access to its properties i.e. to determine wants_async - with impersonate(Dojo_User.objects.get(username='admin')): + with impersonate(Dojo_User.objects.get(username="admin")): ra_helper.expiration_handler() diff --git a/dojo/management/commands/sla_notifications.py b/dojo/management/commands/sla_notifications.py index 395891742a..467d71652c 100644 --- a/dojo/management/commands/sla_notifications.py +++ b/dojo/management/commands/sla_notifications.py @@ -8,7 +8,7 @@ class Command(BaseCommand): - help = 'Launch with no argument.' + help = "Launch with no argument." def handle(self, *args, **options): sla_compute_and_notify() diff --git a/dojo/management/commands/stamp_finding_last_reviewed.py b/dojo/management/commands/stamp_finding_last_reviewed.py index 3401b75f05..b89cbc1ccc 100644 --- a/dojo/management/commands/stamp_finding_last_reviewed.py +++ b/dojo/management/commands/stamp_finding_last_reviewed.py @@ -6,7 +6,7 @@ from dojo.models import Finding from dojo.utils import get_system_setting -locale = timezone(get_system_setting('time_zone')) +locale = timezone(get_system_setting("time_zone")) """ Authors: Jay Paz @@ -24,15 +24,15 @@ class Command(BaseCommand): - help = 'A new field last_reviewed has been added to the Finding model \n' \ - 'This script will update all findings with a last_reviewed date of the most current date from: \n' \ - '1. Finding Date if no other evidence of activity is found \n' \ - '2. Last note added date if a note is found \n' \ - '3. Mitigation Date if finding is mitigated \n' \ - '4. Last action_log entry date if Finding has been updated \n' + help = "A new field last_reviewed has been added to the Finding model \n" \ + "This script will update all findings with a last_reviewed date of the most current date from: \n" \ + "1. Finding Date if no other evidence of activity is found \n" \ + "2. Last note added date if a note is found \n" \ + "3. Mitigation Date if finding is mitigated \n" \ + "4. Last action_log entry date if Finding has been updated \n" def handle(self, *args, **options): - findings = Finding.objects.all().order_by('id') + findings = Finding.objects.all().order_by("id") for finding in findings: save = False if not finding.last_reviewed: @@ -40,7 +40,7 @@ def handle(self, *args, **options): last_note_date = finding.date if finding.notes.all(): - last_note_date = finding.notes.order_by('-date')[ + last_note_date = finding.notes.order_by("-date")[ 0].date.date() mitigation_date = finding.date @@ -56,7 +56,7 @@ def handle(self, *args, **options): obj = ct.get_object_for_this_type(pk=finding.id) log_entries = LogEntry.objects.filter(content_type=ct, object_pk=obj.id).order_by( - '-timestamp') + "-timestamp") if log_entries: last_action_date = log_entries[0].timestamp.date() except KeyError: diff --git a/dojo/management/commands/system_settings.py b/dojo/management/commands/system_settings.py index 6731154451..eace6a7e2b 100644 --- a/dojo/management/commands/system_settings.py +++ b/dojo/management/commands/system_settings.py @@ -4,7 +4,7 @@ class Command(BaseCommand): - help = 'Updates product grade calculation' + help = "Updates product grade calculation" def handle(self, *args, **options): code = """def grade_product(crit, high, med, low): diff --git a/dojo/management/commands/test_celery_decorator.py b/dojo/management/commands/test_celery_decorator.py index 6fe77e0234..1592aa1c79 100644 --- a/dojo/management/commands/test_celery_decorator.py +++ b/dojo/management/commands/test_celery_decorator.py @@ -58,7 +58,7 @@ def wrapper(*args, **kwargs): func(*args, **kwargs) print("outside after") - if getattr(func, 'delay', None): + if getattr(func, "delay", None): wrapper.delay = my_decorator_outside(func.delay) return wrapper @@ -77,7 +77,7 @@ def wrapper(*args, **kwargs): @app.task @my_decorator_inside def my_test_task(new_finding, *args, **kwargs): - print('oh la la what a nice task') + print("oh la la what a nice task") # example working with multiple parameters... @@ -88,6 +88,6 @@ def my_test_task(new_finding, *args, **kwargs): @dojo_model_from_id(model=Notes, parameter=1) @dojo_model_from_id def test_valentijn_task(new_finding, note, **kwargs): - logger.debug('test_valentijn:') + logger.debug("test_valentijn:") logger.debug(new_finding) logger.debug(note) diff --git a/dojo/metrics/urls.py b/dojo/metrics/urls.py index 7b2683cf6f..a121403cc1 100644 --- a/dojo/metrics/urls.py +++ b/dojo/metrics/urls.py @@ -4,24 +4,24 @@ urlpatterns = [ # metrics - re_path(r'^metrics$', views.metrics, {'mtype': 'All'}, - name='metrics'), - re_path(r'^critical_product_metrics$', views.critical_product_metrics, {'mtype': 'All'}, - name='critical_product_metrics'), - re_path(r'^metrics/all$', views.metrics, {'mtype': 'All'}, - name='metrics_all'), - re_path(r'^metrics/product/type$', views.metrics, {'mtype': 'All'}, - name='metrics_product_type'), - re_path(r'^metrics/simple$', views.simple_metrics, - name='simple_metrics'), - re_path(r'^metrics/product/type/(?P\d+)$', - views.metrics, name='product_type_metrics'), - re_path(r'^metrics/product/type/counts$', - views.product_type_counts, name='product_type_counts'), - re_path(r'^metrics/product/tag/counts$', - views.product_tag_counts, name='product_tag_counts'), - re_path(r'^metrics/engineer$', views.engineer_metrics, - name='engineer_metrics'), - re_path(r'^metrics/engineer/(?P\d+)$', views.view_engineer, - name='view_engineer'), + re_path(r"^metrics$", views.metrics, {"mtype": "All"}, + name="metrics"), + re_path(r"^critical_product_metrics$", views.critical_product_metrics, {"mtype": "All"}, + name="critical_product_metrics"), + re_path(r"^metrics/all$", views.metrics, {"mtype": "All"}, + name="metrics_all"), + re_path(r"^metrics/product/type$", views.metrics, {"mtype": "All"}, + name="metrics_product_type"), + re_path(r"^metrics/simple$", views.simple_metrics, + name="simple_metrics"), + re_path(r"^metrics/product/type/(?P\d+)$", + views.metrics, name="product_type_metrics"), + re_path(r"^metrics/product/type/counts$", + views.product_type_counts, name="product_type_counts"), + re_path(r"^metrics/product/tag/counts$", + views.product_tag_counts, name="product_tag_counts"), + re_path(r"^metrics/engineer$", views.engineer_metrics, + name="engineer_metrics"), + re_path(r"^metrics/engineer/(?P\d+)$", views.view_engineer, + name="view_engineer"), ] diff --git a/dojo/metrics/utils.py b/dojo/metrics/utils.py index 6de04ee72b..ef283593d4 100644 --- a/dojo/metrics/utils.py +++ b/dojo/metrics/utils.py @@ -43,14 +43,14 @@ def finding_queries( Permissions.Finding_View, user=request.user, ).select_related( - 'reporter', - 'test', - 'test__engagement__product', - 'test__engagement__product__prod_type', + "reporter", + "test", + "test__engagement__product", + "test__engagement__product__prod_type", ).prefetch_related( - 'risk_acceptance_set', - 'test__engagement__risk_acceptance', - 'test__test_type', + "risk_acceptance_set", + "test__engagement__risk_acceptance", + "test__test_type", ) filter_string_matching = get_system_setting("filter_string_matching", False) @@ -65,8 +65,8 @@ def finding_queries( messages.add_message( request, messages.ERROR, - _('All objects have been filtered away. Displaying all objects'), - extra_tags='alert-danger') + _("All objects have been filtered away. Displaying all objects"), + extra_tags="alert-danger") start_date, end_date = get_date_range(findings_qs) @@ -85,7 +85,7 @@ def finding_queries( active_findings = active_findings.filter(test__engagement__product__prod_type__in=prod_type) # Get the severity counts of risk accepted findings - accepted_findings_counts = severity_count(accepted_findings, 'aggregate', 'severity') + accepted_findings_counts = severity_count(accepted_findings, "aggregate", "severity") weeks_between, months_between = period_deltas(start_date, end_date) @@ -106,27 +106,27 @@ def finding_queries( engagement__test__finding__duplicate=False, engagement__test__finding__out_of_scope=False, engagement__test__finding__mitigated__isnull=True, - engagement__test__finding__severity__in=('Critical', 'High', 'Medium', 'Low'), + engagement__test__finding__severity__in=("Critical", "High", "Medium", "Low"), prod_type__in=prod_type) top_ten = severity_count( - top_ten, 'annotate', 'engagement__test__finding__severity', + top_ten, "annotate", "engagement__test__finding__severity", ).order_by( - '-critical', '-high', '-medium', '-low', + "-critical", "-high", "-medium", "-low", )[:10] return { - 'all': findings_query, - 'closed': findings_closed, - 'accepted': accepted_findings, - 'accepted_count': accepted_findings_counts, - 'top_ten': top_ten, - 'monthly_counts': monthly_counts, - 'weekly_counts': weekly_counts, - 'weeks_between': weeks_between, - 'start_date': start_date, - 'end_date': end_date, - 'form': form, + "all": findings_query, + "closed": findings_closed, + "accepted": accepted_findings, + "accepted_count": accepted_findings_counts, + "top_ten": top_ten, + "monthly_counts": monthly_counts, + "weekly_counts": weekly_counts, + "weeks_between": weeks_between, + "start_date": start_date, + "end_date": end_date, + "form": form, } @@ -136,13 +136,13 @@ def endpoint_queries( ) -> dict[str, Any]: endpoints_query = Endpoint_Status.objects.filter( mitigated=False, - finding__severity__in=('Critical', 'High', 'Medium', 'Low', 'Info'), + finding__severity__in=("Critical", "High", "Medium", "Low", "Info"), ).prefetch_related( - 'finding__test__engagement__product', - 'finding__test__engagement__product__prod_type', - 'finding__test__engagement__risk_acceptance', - 'finding__risk_acceptance_set', - 'finding__reporter', + "finding__test__engagement__product", + "finding__test__engagement__product__prod_type", + "finding__test__engagement__risk_acceptance", + "finding__risk_acceptance_set", + "finding__reporter", ) endpoints_query = get_authorized_endpoint_status(Permissions.Endpoint_View, endpoints_query, request.user) @@ -158,8 +158,8 @@ def endpoint_queries( messages.add_message( request, messages.ERROR, - _('All objects have been filtered away. Displaying all objects'), - extra_tags='alert-danger') + _("All objects have been filtered away. Displaying all objects"), + extra_tags="alert-danger") start_date, end_date = get_date_range(endpoints_qs) @@ -168,7 +168,7 @@ def endpoint_queries( mitigated_time__range=[start_date, end_date], finding__test__engagement__product__prod_type__in=prod_type, ).prefetch_related( - 'finding__test__engagement__product', + "finding__test__engagement__product", ) # capture the accepted findings in period accepted_endpoints = Endpoint_Status.objects.filter( @@ -176,25 +176,25 @@ def endpoint_queries( risk_accepted=True, finding__test__engagement__product__prod_type__in=prod_type, ).prefetch_related( - 'finding__test__engagement__product', + "finding__test__engagement__product", ) else: endpoints_closed = Endpoint_Status.objects.filter( mitigated_time__range=[start_date, end_date], ).prefetch_related( - 'finding__test__engagement__product', + "finding__test__engagement__product", ) # capture the accepted findings in period accepted_endpoints = Endpoint_Status.objects.filter( date__range=[start_date, end_date], risk_accepted=True, ).prefetch_related( - 'finding__test__engagement__product', + "finding__test__engagement__product", ) endpoints_closed = get_authorized_endpoint_status(Permissions.Endpoint_View, endpoints_closed, request.user) accepted_endpoints = get_authorized_endpoint_status(Permissions.Endpoint_View, accepted_endpoints, request.user) - accepted_endpoints_counts = severity_count(accepted_endpoints, 'aggregate', 'finding__severity') + accepted_endpoints_counts = severity_count(accepted_endpoints, "aggregate", "finding__severity") weeks_between, months_between = period_deltas(start_date, end_date) @@ -214,32 +214,32 @@ def endpoint_queries( engagement__test__finding__status_finding__false_positive=False, engagement__test__finding__status_finding__out_of_scope=False, engagement__test__finding__status_finding__risk_accepted=False, - engagement__test__finding__severity__in=('Critical', 'High', 'Medium', 'Low'), + engagement__test__finding__severity__in=("Critical", "High", "Medium", "Low"), prod_type__in=prod_type) top_ten = severity_count( - top_ten, 'annotate', 'engagement__test__finding__severity', + top_ten, "annotate", "engagement__test__finding__severity", ).order_by( - '-critical', '-high', '-medium', '-low', + "-critical", "-high", "-medium", "-low", )[:10] return { - 'all': endpoints, - 'closed': endpoints_closed, - 'accepted': accepted_endpoints, - 'accepted_count': accepted_endpoints_counts, - 'top_ten': top_ten, - 'monthly_counts': monthly_counts, - 'weekly_counts': weekly_counts, - 'weeks_between': weeks_between, - 'start_date': start_date, - 'end_date': end_date, - 'form': form, + "all": endpoints, + "closed": endpoints_closed, + "accepted": accepted_endpoints, + "accepted_count": accepted_endpoints_counts, + "top_ten": top_ten, + "monthly_counts": monthly_counts, + "weekly_counts": weekly_counts, + "weeks_between": weeks_between, + "start_date": start_date, + "end_date": end_date, + "form": form, } # For type-hinting methods that take querysets we can perform metrics over -MetricsQuerySet = TypeVar('MetricsQuerySet', QuerySet[Finding], QuerySet[Endpoint_Status]) +MetricsQuerySet = TypeVar("MetricsQuerySet", QuerySet[Finding], QuerySet[Endpoint_Status]) class _MetricsPeriodEntry(NamedTuple): @@ -255,8 +255,8 @@ class MetricsPeriod(_MetricsPeriodEntry, Enum): """ Enum for the two metrics periods supported: by week and month """ - WEEK = ('weeks', TruncWeek) - MONTH = ('months', TruncMonth) + WEEK = ("weeks", TruncWeek) + MONTH = ("months", TruncMonth) class _MetricsTypeEntry(NamedTuple): @@ -272,8 +272,8 @@ class MetricsType(_MetricsTypeEntry, Enum): """ Enum for the two metrics types supported: by Findings and by Endpoints (Endpoint_Status) """ - FINDING = ('severity', 'is_mitigated') - ENDPOINT = ('finding__severity', 'mitigated') + FINDING = ("severity", "is_mitigated") + ENDPOINT = ("finding__severity", "mitigated") def query_counts( @@ -300,9 +300,9 @@ def _aggregate_data(qs: MetricsQuerySet, include_closed: bool = False) -> list[d agg_qs = partial(aggregate_counts_by_period, period=period, metrics_type=metrics_type) return chart_data(agg_qs(qs, include_closed=include_closed), include_closed=include_closed) return { - 'opened_per_period': _aggregate_data(open_qs, True), - 'active_per_period': _aggregate_data(active_qs), - 'accepted_per_period': _aggregate_data(accepted_qs), + "opened_per_period": _aggregate_data(open_qs, True), + "active_per_period": _aggregate_data(active_qs), + "accepted_per_period": _aggregate_data(accepted_qs), } return _aggregates_for_period @@ -320,10 +320,10 @@ def get_date_range( try: tz = timezone.get_current_timezone() - start_date = qs.earliest('date').date + start_date = qs.earliest("date").date start_date = datetime(start_date.year, start_date.month, start_date.day, tzinfo=tz) - end_date = qs.latest('date').date + end_date = qs.latest("date").date end_date = datetime(end_date.year, end_date.month, end_date.day, tzinfo=tz) except: start_date = end_date = timezone.now() @@ -344,14 +344,14 @@ def severity_count( :param expression: The lookup expression for severity, relative to the queryset model type :return: A queryset containing aggregated counts of severities """ - total_expression = expression + '__in' + total_expression = expression + "__in" return getattr(queryset, method)( - total=Count('id', filter=Q(**{total_expression: ('Critical', 'High', 'Medium', 'Low', 'Info')})), - critical=Count('id', filter=Q(**{expression: 'Critical'})), - high=Count('id', filter=Q(**{expression: 'High'})), - medium=Count('id', filter=Q(**{expression: 'Medium'})), - low=Count('id', filter=Q(**{expression: 'Low'})), - info=Count('id', filter=Q(**{expression: 'Info'})), + total=Count("id", filter=Q(**{total_expression: ("Critical", "High", "Medium", "Low", "Info")})), + critical=Count("id", filter=Q(**{expression: "Critical"})), + high=Count("id", filter=Q(**{expression: "High"})), + medium=Count("id", filter=Q(**{expression: "Medium"})), + low=Count("id", filter=Q(**{expression: "Low"})), + info=Count("id", filter=Q(**{expression: "Info"})), ) @@ -365,20 +365,20 @@ def identify_view( :return: A string, either 'Endpoint' or 'Finding,' that represents the requested metrics view """ get_data = request.GET - view = get_data.get('type', None) + view = get_data.get("type", None) if view: return view - finding_severity = get_data.get('finding__severity', None) - false_positive = get_data.get('false_positive', None) + finding_severity = get_data.get("finding__severity", None) + false_positive = get_data.get("false_positive", None) - referer = request.META.get('HTTP_REFERER', None) - endpoint_in_referer = referer and referer.find('type=Endpoint') > -1 + referer = request.META.get("HTTP_REFERER", None) + endpoint_in_referer = referer and referer.find("type=Endpoint") > -1 if finding_severity or false_positive or endpoint_in_referer: - return 'Endpoint' + return "Endpoint" - return 'Finding' + return "Finding" def js_epoch( @@ -427,20 +427,20 @@ def get_charting_data( # Arrange all our data by epoch date for easy lookup in the loop below. # At the same time, add the epoch date to each entry as the charts will rely on that. - by_date = {e: {'epoch': e, **q} for q in qs if (e := js_epoch(q['grouped_date'])) is not None} + by_date = {e: {"epoch": e, **q} for q in qs if (e := js_epoch(q["grouped_date"])) is not None} # Iterate over our period of time, adding zero-element data entries for dates not represented for x in range(-1, period_count): cur_date = start_date + relativedelta(**{period.datetime_name: x}) if (e := js_epoch(cur_date)) not in by_date: by_date[e] = { - 'epoch': e, 'grouped_date': cur_date.date(), - 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0} + "epoch": e, "grouped_date": cur_date.date(), + "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0} if include_closed: - by_date[e]['closed'] = 0 + by_date[e]["closed"] = 0 # Return, sorting by date - return sorted(by_date.values(), key=lambda m: m['grouped_date']) + return sorted(by_date.values(), key=lambda m: m["grouped_date"]) def period_deltas(start_date, end_date): @@ -479,12 +479,12 @@ def aggregate_counts_by_period( :return: A queryset with aggregate severity counts grouped by period """ - desired_values = ('grouped_date', 'critical', 'high', 'medium', 'low', 'info', 'total') + desired_values = ("grouped_date", "critical", "high", "medium", "low", "info", "total") severities_by_period = severity_count( # Group by desired period - qs.annotate(grouped_date=period.db_method('date')).values('grouped_date'), - 'annotate', + qs.annotate(grouped_date=period.db_method("date")).values("grouped_date"), + "annotate", metrics_type.severity_lookup, ) if include_closed: @@ -495,7 +495,7 @@ def aggregate_counts_by_period( output_field=IntegerField(), default=0), ), ) - desired_values += ('closed',) + desired_values += ("closed",) return severities_by_period.values(*desired_values) @@ -509,8 +509,8 @@ def findings_by_product( :param findings: A queryset of Findings :return: A queryset of Findings grouped by product (name/ID) """ - return findings.values(product_name=F('test__engagement__product__name'), - product_id=F('test__engagement__product__id')) + return findings.values(product_name=F("test__engagement__product__name"), + product_id=F("test__engagement__product__id")) def get_in_period_details( @@ -523,20 +523,20 @@ def get_in_period_details( :return: A tuple of (a queryset of severity aggregates, a queryset of severity aggregates by product, a dict of Findings by age) """ - in_period_counts = severity_count(findings, 'aggregate', 'severity') + in_period_counts = severity_count(findings, "aggregate", "severity") in_period_details = severity_count( - findings_by_product(findings), 'annotate', 'severity', - ).order_by('product_name') + findings_by_product(findings), "annotate", "severity", + ).order_by("product_name") # Approach to age determination is db-engine dependent - if 'postgresql' in connection.settings_dict['ENGINE']: - age_detail = findings.annotate(age=ExtractDay(Coalesce('mitigated', Now()) - F('date'))) - elif 'mysql' in connection.settings_dict['ENGINE']: + if "postgresql" in connection.settings_dict["ENGINE"]: + age_detail = findings.annotate(age=ExtractDay(Coalesce("mitigated", Now()) - F("date"))) + elif "mysql" in connection.settings_dict["ENGINE"]: # MySQL doesn't support durations natively and using an expression with subtraction yields unwanted results, # so datediff() it is. finding_table = Finding.objects.model._meta.db_table age_detail = findings.annotate( - age=RawSQL(f'DATEDIFF(COALESCE({finding_table}.mitigated, CURRENT_TIMESTAMP), {finding_table}.date)', []), + age=RawSQL(f"DATEDIFF(COALESCE({finding_table}.mitigated, CURRENT_TIMESTAMP), {finding_table}.date)", []), ) else: raise ValueError @@ -561,8 +561,8 @@ def get_accepted_in_period_details( :return: A queryset of severity aggregates for Findings grouped by product (name/ID) """ return severity_count( - findings_by_product(findings), 'annotate', 'severity', - ).order_by('product_name') + findings_by_product(findings), "annotate", "severity", + ).order_by("product_name") def get_closed_in_period_details( @@ -576,10 +576,10 @@ def get_closed_in_period_details( product) """ return ( - severity_count(findings, 'aggregate', 'severity'), + severity_count(findings, "aggregate", "severity"), severity_count( - findings_by_product(findings), 'annotate', 'severity', - ).order_by('product_name'), + findings_by_product(findings), "annotate", "severity", + ).order_by("product_name"), ) diff --git a/dojo/metrics/views.py b/dojo/metrics/views.py index a15d9979fc..c29761f585 100644 --- a/dojo/metrics/views.py +++ b/dojo/metrics/views.py @@ -60,94 +60,94 @@ def critical_product_metrics(request, mtype): - template = 'dojo/metrics.html' - page_name = _('Critical Product Metrics') + template = "dojo/metrics.html" + page_name = _("Critical Product Metrics") critical_products = get_authorized_product_types(Permissions.Product_Type_View) critical_products = critical_products.filter(critical_product=True) add_breadcrumb(title=page_name, top_level=not len(request.GET), request=request) return render(request, template, { - 'name': page_name, - 'critical_prods': critical_products, - 'url_prefix': get_system_setting('url_prefix'), + "name": page_name, + "critical_prods": critical_products, + "url_prefix": get_system_setting("url_prefix"), }) # @cache_page(60 * 5) # cache for 5 minutes @vary_on_cookie def metrics(request, mtype): - template = 'dojo/metrics.html' + template = "dojo/metrics.html" show_pt_filter = True view = identify_view(request) - page_name = _('Metrics') + page_name = _("Metrics") - if mtype != 'All': + if mtype != "All": pt = Product_Type.objects.filter(id=mtype) request.GET._mutable = True - request.GET.appendlist('test__engagement__product__prod_type', mtype) + request.GET.appendlist("test__engagement__product__prod_type", mtype) request.GET._mutable = False show_pt_filter = False - page_name = _('%(product_type)s Metrics') % {'product_type': mtype} + page_name = _("%(product_type)s Metrics") % {"product_type": mtype} prod_type = pt - elif 'test__engagement__product__prod_type' in request.GET: - prod_type = Product_Type.objects.filter(id__in=request.GET.getlist('test__engagement__product__prod_type', [])) + elif "test__engagement__product__prod_type" in request.GET: + prod_type = Product_Type.objects.filter(id__in=request.GET.getlist("test__engagement__product__prod_type", [])) else: prod_type = get_authorized_product_types(Permissions.Product_Type_View) # legacy code calls has 'prod_type' as 'related_name' for product.... so weird looking prefetch - prod_type = prod_type.prefetch_related('prod_type') + prod_type = prod_type.prefetch_related("prod_type") filters = {} - if view == 'Finding': - page_name = _('Product Type Metrics by Findings') + if view == "Finding": + page_name = _("Product Type Metrics by Findings") filters = finding_queries(prod_type, request) - elif view == 'Endpoint': - page_name = _('Product Type Metrics by Affected Endpoints') + elif view == "Endpoint": + page_name = _("Product Type Metrics by Affected Endpoints") filters = endpoint_queries(prod_type, request) - all_findings = findings_queryset(queryset_check(filters['all'])) + all_findings = findings_queryset(queryset_check(filters["all"])) in_period_counts, in_period_details, age_detail = get_in_period_details(all_findings) accepted_in_period_details = get_accepted_in_period_details( - findings_queryset(filters['accepted']), + findings_queryset(filters["accepted"]), ) closed_in_period_counts, closed_in_period_details = get_closed_in_period_details( - findings_queryset(filters['closed']), + findings_queryset(filters["closed"]), ) punchcard = [] ticks = [] - if 'view' in request.GET and 'dashboard' == request.GET['view']: - punchcard, ticks = get_punchcard_data(all_findings, filters['start_date'], filters['weeks_between'], view) - page_name = _('%(team_name)s Metrics') % {'team_name': get_system_setting('team_name')} - template = 'dojo/dashboard-metrics.html' + if "view" in request.GET and "dashboard" == request.GET["view"]: + punchcard, ticks = get_punchcard_data(all_findings, filters["start_date"], filters["weeks_between"], view) + page_name = _("%(team_name)s Metrics") % {"team_name": get_system_setting("team_name")} + template = "dojo/dashboard-metrics.html" add_breadcrumb(title=page_name, top_level=not len(request.GET), request=request) return render(request, template, { - 'name': page_name, - 'start_date': filters['start_date'], - 'end_date': filters['end_date'], - 'findings': all_findings, - 'max_findings_details': 50, - 'opened_per_month': filters['monthly_counts']['opened_per_period'], - 'active_per_month': filters['monthly_counts']['active_per_period'], - 'opened_per_week': filters['weekly_counts']['opened_per_period'], - 'accepted_per_month': filters['monthly_counts']['accepted_per_period'], - 'accepted_per_week': filters['weekly_counts']['accepted_per_period'], - 'top_ten_products': filters['top_ten'], - 'age_detail': age_detail, - 'in_period_counts': in_period_counts, - 'in_period_details': in_period_details, - 'accepted_in_period_counts': filters['accepted_count'], - 'accepted_in_period_details': accepted_in_period_details, - 'closed_in_period_counts': closed_in_period_counts, - 'closed_in_period_details': closed_in_period_details, - 'punchcard': punchcard, - 'ticks': ticks, - 'form': filters.get('form', None), - 'show_pt_filter': show_pt_filter, + "name": page_name, + "start_date": filters["start_date"], + "end_date": filters["end_date"], + "findings": all_findings, + "max_findings_details": 50, + "opened_per_month": filters["monthly_counts"]["opened_per_period"], + "active_per_month": filters["monthly_counts"]["active_per_period"], + "opened_per_week": filters["weekly_counts"]["opened_per_period"], + "accepted_per_month": filters["monthly_counts"]["accepted_per_period"], + "accepted_per_week": filters["weekly_counts"]["accepted_per_period"], + "top_ten_products": filters["top_ten"], + "age_detail": age_detail, + "in_period_counts": in_period_counts, + "in_period_details": in_period_details, + "accepted_in_period_counts": filters["accepted_count"], + "accepted_in_period_details": accepted_in_period_details, + "closed_in_period_counts": closed_in_period_counts, + "closed_in_period_details": closed_in_period_details, + "punchcard": punchcard, + "ticks": ticks, + "form": filters.get("form", None), + "show_pt_filter": show_pt_filter, }) @@ -161,16 +161,16 @@ def metrics(request, mtype): @cache_page(60 * 5) # cache for 5 minutes @vary_on_cookie def simple_metrics(request): - page_name = _('Simple Metrics') + page_name = _("Simple Metrics") now = timezone.now() - if request.method == 'POST': + if request.method == "POST": form = SimpleMetricsForm(request.POST) if form.is_valid(): - now = form.cleaned_data['date'] - form = SimpleMetricsForm({'date': now}) + now = form.cleaned_data["date"] + form = SimpleMetricsForm({"date": now}) else: - form = SimpleMetricsForm({'date': now}) + form = SimpleMetricsForm({"date": now}) findings_by_product_type = collections.OrderedDict() @@ -178,7 +178,7 @@ def simple_metrics(request): # count the S0, S1, S2 and S3 # legacy code calls has 'prod_type' as 'related_name' for product.... so weird looking prefetch product_types = get_authorized_product_types(Permissions.Product_Type_View) - product_types = product_types.prefetch_related('prod_type') + product_types = product_types.prefetch_related("prod_type") for pt in product_types: total_critical = [] total_high = [] @@ -201,11 +201,11 @@ def simple_metrics(request): for f in total: if f.severity == "Critical": total_critical.append(f) - elif f.severity == 'High': + elif f.severity == "High": total_high.append(f) - elif f.severity == 'Medium': + elif f.severity == "Medium": total_medium.append(f) - elif f.severity == 'Low': + elif f.severity == "Low": total_low.append(f) else: total_info.append(f) @@ -216,26 +216,26 @@ def simple_metrics(request): if f.date.year == now.year and f.date.month == now.month: total_opened.append(f) - findings_broken_out['Total'] = len(total) - findings_broken_out['S0'] = len(total_critical) - findings_broken_out['S1'] = len(total_high) - findings_broken_out['S2'] = len(total_medium) - findings_broken_out['S3'] = len(total_low) - findings_broken_out['S4'] = len(total_info) + findings_broken_out["Total"] = len(total) + findings_broken_out["S0"] = len(total_critical) + findings_broken_out["S1"] = len(total_high) + findings_broken_out["S2"] = len(total_medium) + findings_broken_out["S3"] = len(total_low) + findings_broken_out["S4"] = len(total_info) - findings_broken_out['Opened'] = len(total_opened) - findings_broken_out['Closed'] = len(total_closed) + findings_broken_out["Opened"] = len(total_opened) + findings_broken_out["Closed"] = len(total_closed) findings_by_product_type[pt] = findings_broken_out add_breadcrumb(title=page_name, top_level=True, request=request) - return render(request, 'dojo/simple_metrics.html', { - 'findings': findings_by_product_type, - 'name': page_name, - 'metric': True, - 'user': request.user, - 'form': form, + return render(request, "dojo/simple_metrics.html", { + "findings": findings_by_product_type, + "name": page_name, + "metric": True, + "user": request.user, + "form": form, }) @@ -258,13 +258,13 @@ def product_type_counts(request): start_date = first_of_month end_date = end_of_month - if request.method == 'GET' and 'month' in request.GET and 'year' in request.GET and 'product_type' in request.GET: + if request.method == "GET" and "month" in request.GET and "year" in request.GET and "product_type" in request.GET: form = ProductTypeCountsForm(request.GET) if form.is_valid(): - pt = form.cleaned_data['product_type'] + pt = form.cleaned_data["product_type"] user_has_permission_or_403(request.user, pt, Permissions.Product_Type_View) - month = int(form.cleaned_data['month']) - year = int(form.cleaned_data['year']) + month = int(form.cleaned_data["month"]) + year = int(form.cleaned_data["year"]) first_of_month = first_of_month.replace(month=month, year=year) month_requested = datetime(year, month, 1) @@ -292,17 +292,17 @@ def product_type_counts(request): closed_in_period = Finding.objects.filter(mitigated__date__range=[start_date, end_date], test__engagement__product__prod_type=pt, - severity__in=('Critical', 'High', 'Medium', 'Low')).values( - 'numerical_severity').annotate(Count('numerical_severity')).order_by('numerical_severity') + severity__in=("Critical", "High", "Medium", "Low")).values( + "numerical_severity").annotate(Count("numerical_severity")).order_by("numerical_severity") total_closed_in_period = Finding.objects.filter(mitigated__date__range=[start_date, end_date], test__engagement__product__prod_type=pt, severity__in=( - 'Critical', 'High', 'Medium', 'Low')).aggregate( + "Critical", "High", "Medium", "Low")).aggregate( total=Sum( - Case(When(severity__in=('Critical', 'High', 'Medium', 'Low'), + Case(When(severity__in=("Critical", "High", "Medium", "Low"), then=Value(1)), - output_field=IntegerField())))['total'] + output_field=IntegerField())))["total"] overall_in_pt = Finding.objects.filter(date__lt=end_date, verified=True, @@ -311,8 +311,8 @@ def product_type_counts(request): out_of_scope=False, mitigated__isnull=True, test__engagement__product__prod_type=pt, - severity__in=('Critical', 'High', 'Medium', 'Low')).values( - 'numerical_severity').annotate(Count('numerical_severity')).order_by('numerical_severity') + severity__in=("Critical", "High", "Medium", "Low")).values( + "numerical_severity").annotate(Count("numerical_severity")).order_by("numerical_severity") total_overall_in_pt = Finding.objects.filter(date__lte=end_date, verified=True, @@ -321,11 +321,11 @@ def product_type_counts(request): out_of_scope=False, mitigated__isnull=True, test__engagement__product__prod_type=pt, - severity__in=('Critical', 'High', 'Medium', 'Low')).aggregate( + severity__in=("Critical", "High", "Medium", "Low")).aggregate( total=Sum( - Case(When(severity__in=('Critical', 'High', 'Medium', 'Low'), + Case(When(severity__in=("Critical", "High", "Medium", "Low"), then=Value(1)), - output_field=IntegerField())))['total'] + output_field=IntegerField())))["total"] all_current_in_pt = Finding.objects.filter(date__lte=end_date, verified=True, @@ -335,12 +335,12 @@ def product_type_counts(request): mitigated__isnull=True, test__engagement__product__prod_type=pt, severity__in=( - 'Critical', 'High', 'Medium', 'Low')).prefetch_related( - 'test__engagement__product', - 'test__engagement__product__prod_type', - 'test__engagement__risk_acceptance', - 'reporter').order_by( - 'numerical_severity') + "Critical", "High", "Medium", "Low")).prefetch_related( + "test__engagement__product", + "test__engagement__product__prod_type", + "test__engagement__risk_acceptance", + "reporter").order_by( + "numerical_severity") top_ten = Product.objects.filter(engagement__test__finding__date__lte=end_date, engagement__test__finding__verified=True, @@ -349,45 +349,45 @@ def product_type_counts(request): engagement__test__finding__out_of_scope=False, engagement__test__finding__mitigated__isnull=True, engagement__test__finding__severity__in=( - 'Critical', 'High', 'Medium', 'Low'), + "Critical", "High", "Medium", "Low"), prod_type=pt) - top_ten = severity_count(top_ten, 'annotate', 'engagement__test__finding__severity').order_by('-critical', '-high', '-medium', '-low')[:10] + top_ten = severity_count(top_ten, "annotate", "engagement__test__finding__severity").order_by("-critical", "-high", "-medium", "-low")[:10] - cip = {'S0': 0, - 'S1': 0, - 'S2': 0, - 'S3': 0, - 'Total': total_closed_in_period} + cip = {"S0": 0, + "S1": 0, + "S2": 0, + "S3": 0, + "Total": total_closed_in_period} - aip = {'S0': 0, - 'S1': 0, - 'S2': 0, - 'S3': 0, - 'Total': total_overall_in_pt} + aip = {"S0": 0, + "S1": 0, + "S2": 0, + "S3": 0, + "Total": total_overall_in_pt} for o in closed_in_period: - cip[o['numerical_severity']] = o['numerical_severity__count'] + cip[o["numerical_severity"]] = o["numerical_severity__count"] for o in overall_in_pt: - aip[o['numerical_severity']] = o['numerical_severity__count'] + aip[o["numerical_severity"]] = o["numerical_severity__count"] else: messages.add_message(request, messages.ERROR, _("Please choose month and year and the Product Type."), - extra_tags='alert-danger') + extra_tags="alert-danger") add_breadcrumb(title=_("Bi-Weekly Metrics"), top_level=True, request=request) return render(request, - 'dojo/pt_counts.html', - {'form': form, - 'start_date': start_date, - 'end_date': end_date, - 'opened_in_period': oip, - 'trending_opened': opened_in_period_list, - 'closed_in_period': cip, - 'overall_in_pt': aip, - 'all_current_in_pt': all_current_in_pt, - 'top_ten': top_ten, - 'pt': pt}, + "dojo/pt_counts.html", + {"form": form, + "start_date": start_date, + "end_date": end_date, + "opened_in_period": oip, + "trending_opened": opened_in_period_list, + "closed_in_period": cip, + "overall_in_pt": aip, + "all_current_in_pt": all_current_in_pt, + "top_ten": top_ten, + "pt": pt}, ) @@ -408,14 +408,14 @@ def product_tag_counts(request): start_date = first_of_month end_date = end_of_month - if request.method == 'GET' and 'month' in request.GET and 'year' in request.GET and 'product_tag' in request.GET: + if request.method == "GET" and "month" in request.GET and "year" in request.GET and "product_tag" in request.GET: form = ProductTagCountsForm(request.GET) if form.is_valid(): prods = get_authorized_products(Permissions.Product_View) - pt = form.cleaned_data['product_tag'] - month = int(form.cleaned_data['month']) - year = int(form.cleaned_data['year']) + pt = form.cleaned_data["product_tag"] + month = int(form.cleaned_data["month"]) + year = int(form.cleaned_data["year"]) first_of_month = first_of_month.replace(month=month, year=year) month_requested = datetime(year, month, 1) @@ -446,18 +446,18 @@ def product_tag_counts(request): closed_in_period = Finding.objects.filter(mitigated__date__range=[start_date, end_date], test__engagement__product__tags__name=pt, test__engagement__product__in=prods, - severity__in=('Critical', 'High', 'Medium', 'Low')).values( - 'numerical_severity').annotate(Count('numerical_severity')).order_by('numerical_severity') + severity__in=("Critical", "High", "Medium", "Low")).values( + "numerical_severity").annotate(Count("numerical_severity")).order_by("numerical_severity") total_closed_in_period = Finding.objects.filter(mitigated__date__range=[start_date, end_date], test__engagement__product__tags__name=pt, test__engagement__product__in=prods, severity__in=( - 'Critical', 'High', 'Medium', 'Low')).aggregate( + "Critical", "High", "Medium", "Low")).aggregate( total=Sum( - Case(When(severity__in=('Critical', 'High', 'Medium', 'Low'), + Case(When(severity__in=("Critical", "High", "Medium", "Low"), then=Value(1)), - output_field=IntegerField())))['total'] + output_field=IntegerField())))["total"] overall_in_pt = Finding.objects.filter(date__lt=end_date, verified=True, @@ -467,8 +467,8 @@ def product_tag_counts(request): mitigated__isnull=True, test__engagement__product__tags__name=pt, test__engagement__product__in=prods, - severity__in=('Critical', 'High', 'Medium', 'Low')).values( - 'numerical_severity').annotate(Count('numerical_severity')).order_by('numerical_severity') + severity__in=("Critical", "High", "Medium", "Low")).values( + "numerical_severity").annotate(Count("numerical_severity")).order_by("numerical_severity") total_overall_in_pt = Finding.objects.filter(date__lte=end_date, verified=True, @@ -478,11 +478,11 @@ def product_tag_counts(request): mitigated__isnull=True, test__engagement__product__tags__name=pt, test__engagement__product__in=prods, - severity__in=('Critical', 'High', 'Medium', 'Low')).aggregate( + severity__in=("Critical", "High", "Medium", "Low")).aggregate( total=Sum( - Case(When(severity__in=('Critical', 'High', 'Medium', 'Low'), + Case(When(severity__in=("Critical", "High", "Medium", "Low"), then=Value(1)), - output_field=IntegerField())))['total'] + output_field=IntegerField())))["total"] all_current_in_pt = Finding.objects.filter(date__lte=end_date, verified=True, @@ -493,12 +493,12 @@ def product_tag_counts(request): test__engagement__product__tags__name=pt, test__engagement__product__in=prods, severity__in=( - 'Critical', 'High', 'Medium', 'Low')).prefetch_related( - 'test__engagement__product', - 'test__engagement__product__prod_type', - 'test__engagement__risk_acceptance', - 'reporter').order_by( - 'numerical_severity') + "Critical", "High", "Medium", "Low")).prefetch_related( + "test__engagement__product", + "test__engagement__product__prod_type", + "test__engagement__risk_acceptance", + "reporter").order_by( + "numerical_severity") top_ten = Product.objects.filter(engagement__test__finding__date__lte=end_date, engagement__test__finding__verified=True, @@ -507,54 +507,54 @@ def product_tag_counts(request): engagement__test__finding__out_of_scope=False, engagement__test__finding__mitigated__isnull=True, engagement__test__finding__severity__in=( - 'Critical', 'High', 'Medium', 'Low'), + "Critical", "High", "Medium", "Low"), tags__name=pt, engagement__product__in=prods) - top_ten = severity_count(top_ten, 'annotate', 'engagement__test__finding__severity').order_by('-critical', '-high', '-medium', '-low')[:10] + top_ten = severity_count(top_ten, "annotate", "engagement__test__finding__severity").order_by("-critical", "-high", "-medium", "-low")[:10] - cip = {'S0': 0, - 'S1': 0, - 'S2': 0, - 'S3': 0, - 'Total': total_closed_in_period} + cip = {"S0": 0, + "S1": 0, + "S2": 0, + "S3": 0, + "Total": total_closed_in_period} - aip = {'S0': 0, - 'S1': 0, - 'S2': 0, - 'S3': 0, - 'Total': total_overall_in_pt} + aip = {"S0": 0, + "S1": 0, + "S2": 0, + "S3": 0, + "Total": total_overall_in_pt} for o in closed_in_period: - cip[o['numerical_severity']] = o['numerical_severity__count'] + cip[o["numerical_severity"]] = o["numerical_severity__count"] for o in overall_in_pt: - aip[o['numerical_severity']] = o['numerical_severity__count'] + aip[o["numerical_severity"]] = o["numerical_severity__count"] else: messages.add_message(request, messages.ERROR, _("Please choose month and year and the Product Tag."), - extra_tags='alert-danger') + extra_tags="alert-danger") add_breadcrumb(title=_("Bi-Weekly Metrics"), top_level=True, request=request) return render(request, - 'dojo/pt_counts.html', - {'form': form, - 'start_date': start_date, - 'end_date': end_date, - 'opened_in_period': oip, - 'trending_opened': opened_in_period_list, - 'closed_in_period': cip, - 'overall_in_pt': aip, - 'all_current_in_pt': all_current_in_pt, - 'top_ten': top_ten, - 'pt': pt}, + "dojo/pt_counts.html", + {"form": form, + "start_date": start_date, + "end_date": end_date, + "opened_in_period": oip, + "trending_opened": opened_in_period_list, + "closed_in_period": cip, + "overall_in_pt": aip, + "all_current_in_pt": all_current_in_pt, + "top_ten": top_ten, + "pt": pt}, ) def engineer_metrics(request): # only superusers can select other users to view if request.user.is_superuser: - users = Dojo_User.objects.all().order_by('username') + users = Dojo_User.objects.all().order_by("username") else: - return HttpResponseRedirect(reverse('view_engineer', args=(request.user.id,))) + return HttpResponseRedirect(reverse("view_engineer", args=(request.user.id,))) users = UserFilter(request.GET, queryset=users) paged_users = get_page_items(request, users.qs, 25) @@ -562,8 +562,8 @@ def engineer_metrics(request): add_breadcrumb(title=_("Engineer Metrics"), top_level=True, request=request) return render(request, - 'dojo/engineer_metrics.html', - {'users': paged_users, + "dojo/engineer_metrics.html", + {"users": paged_users, "filtered": users, }) @@ -653,13 +653,13 @@ def view_engineer(request, eid): for finding in [finding for ra in Risk_Acceptance.objects.filter( created__range=[month_start, month_end], owner=user) for finding in ra.accepted_findings.all()]: - if finding.severity == 'Critical': + if finding.severity == "Critical": month[1] += 1 - if finding.severity == 'High': + if finding.severity == "High": month[2] += 1 - if finding.severity == 'Medium': + if finding.severity == "Medium": month[3] += 1 - if finding.severity == 'Low': + if finding.severity == "Low": month[4] += 1 month[5] = sum(month[1:]) @@ -671,7 +671,7 @@ def view_engineer(request, eid): # findings_this_period no longer fits the need for accepted findings # however will use its week finding output to use here for week in week_a_stuff: - wk_range = week[0].split('-') + wk_range = week[0].split("-") week_start = datetime.strptime( wk_range[0].strip() + " " + str(now.year), "%b %d %Y") week_end = datetime.strptime( @@ -680,13 +680,13 @@ def view_engineer(request, eid): for finding in [finding for ra in Risk_Acceptance.objects.filter( created__range=[week_start, week_end], owner=user) for finding in ra.accepted_findings.all()]: - if finding.severity == 'Critical': + if finding.severity == "Critical": week[1] += 1 - if finding.severity == 'High': + if finding.severity == "High": week[2] += 1 - if finding.severity == 'Medium': + if finding.severity == "Medium": week[3] += 1 - if finding.severity == 'Low': + if finding.severity == "Low": week[4] += 1 week[5] = sum(week[1:]) @@ -722,26 +722,26 @@ def view_engineer(request, eid): z_count += findings.filter( test=test, mitigated__isnull=True, - severity='Critical', + severity="Critical", ).count() o_count += findings.filter( test=test, mitigated__isnull=True, - severity='High', + severity="High", ).count() t_count += findings.filter( test=test, mitigated__isnull=True, - severity='Medium', + severity="Medium", ).count() h_count += findings.filter( test=test, mitigated__isnull=True, - severity='Low', + severity="Low", ).count() prod = Product.objects.get(id=product) all_findings_link = "{}".format( - reverse('product_open_findings', args=(prod.id,)), escape(prod.name)) + reverse("product_open_findings", args=(prod.id,)), escape(prod.name)) update.append([all_findings_link, z_count, o_count, t_count, h_count, z_count + o_count + t_count + h_count]) total_update = [] @@ -759,22 +759,22 @@ def view_engineer(request, eid): z_count += findings.filter( test=test, mitigated__isnull=True, - severity='Critical').count() + severity="Critical").count() o_count += findings.filter( test=test, mitigated__isnull=True, - severity='High').count() + severity="High").count() t_count += findings.filter( test=test, mitigated__isnull=True, - severity='Medium').count() + severity="Medium").count() h_count += findings.filter( test=test, mitigated__isnull=True, - severity='Low').count() + severity="Low").count() prod = Product.objects.get(id=product) all_findings_link = "{}".format( - reverse('product_open_findings', args=(prod.id,)), escape(prod.name)) + reverse("product_open_findings", args=(prod.id,)), escape(prod.name)) total_update.append([all_findings_link, z_count, o_count, t_count, h_count, z_count + o_count + t_count + h_count]) @@ -798,20 +798,20 @@ def view_engineer(request, eid): more_nine += 1 # Data for the monthly charts - chart_data = [['Date', 'S0', 'S1', 'S2', 'S3', 'Total']] + chart_data = [["Date", "S0", "S1", "S2", "S3", "Total"]] for thing in o_stuff: chart_data.insert(1, thing) - a_chart_data = [['Date', 'S0', 'S1', 'S2', 'S3', 'Total']] + a_chart_data = [["Date", "S0", "S1", "S2", "S3", "Total"]] for thing in a_stuff: a_chart_data.insert(1, thing) # Data for the weekly charts - week_chart_data = [['Date', 'S0', 'S1', 'S2', 'S3', 'Total']] + week_chart_data = [["Date", "S0", "S1", "S2", "S3", "Total"]] for thing in week_o_stuff: week_chart_data.insert(1, thing) - week_a_chart_data = [['Date', 'S0', 'S1', 'S2', 'S3', 'Total']] + week_a_chart_data = [["Date", "S0", "S1", "S2", "S3", "Total"]] for thing in week_a_stuff: week_a_chart_data.insert(1, thing) @@ -823,9 +823,9 @@ def view_engineer(request, eid): description = find.title life = date.today() - find.date life = life.days - status = 'Active' + status = "Active" if find.risk_accepted: - status = 'Accepted' + status = "Accepted" detail = [team, name, severity, description, life, status, find.reporter] details.append(detail) @@ -833,51 +833,51 @@ def view_engineer(request, eid): add_breadcrumb(title=f"{user.get_full_name()} Metrics", top_level=False, request=request) - return render(request, 'dojo/view_engineer.html', { - 'open_month': open_month, - 'a_month': accepted_month, - 'low_a_month': accepted_count["low"], - 'medium_a_month': accepted_count["med"], - 'high_a_month': accepted_count["high"], - 'critical_a_month': accepted_count["crit"], - 'closed_month': closed_month, - 'low_open_month': open_count["low"], - 'medium_open_month': open_count["med"], - 'high_open_month': open_count["high"], - 'critical_open_month': open_count["crit"], - 'low_c_month': closed_count["low"], - 'medium_c_month': closed_count["med"], - 'high_c_month': closed_count["high"], - 'critical_c_month': closed_count["crit"], - 'week_stuff': week_stuff, - 'week_a_stuff': week_a_stuff, - 'a_total': a_stuff, - 'total': stuff, - 'sub': neg_length, - 'update': update, - 'lt': less_thirty, - 'ls': less_sixty, - 'ln': less_nine, - 'mn': more_nine, - 'chart_data': chart_data, - 'a_chart_data': a_chart_data, - 'week_chart_data': week_chart_data, - 'week_a_chart_data': week_a_chart_data, - 'name': f'{user.get_full_name()} Metrics', - 'metric': True, - 'total_update': total_update, - 'details': details, - 'open_week': open_week, - 'closed_week': closed_week, - 'accepted_week': accepted_week, - 'a_dict': a_dict, - 'o_dict': o_dict, - 'c_dict': c_dict, - 'o_week_dict': o_week_dict, - 'a_week_dict': a_week_dict, - 'c_week_dict': c_week_dict, - 'open_week_count': open_week_count, - 'accepted_week_count': accepted_week_count, - 'closed_week_count': closed_week_count, - 'user': request.user, + return render(request, "dojo/view_engineer.html", { + "open_month": open_month, + "a_month": accepted_month, + "low_a_month": accepted_count["low"], + "medium_a_month": accepted_count["med"], + "high_a_month": accepted_count["high"], + "critical_a_month": accepted_count["crit"], + "closed_month": closed_month, + "low_open_month": open_count["low"], + "medium_open_month": open_count["med"], + "high_open_month": open_count["high"], + "critical_open_month": open_count["crit"], + "low_c_month": closed_count["low"], + "medium_c_month": closed_count["med"], + "high_c_month": closed_count["high"], + "critical_c_month": closed_count["crit"], + "week_stuff": week_stuff, + "week_a_stuff": week_a_stuff, + "a_total": a_stuff, + "total": stuff, + "sub": neg_length, + "update": update, + "lt": less_thirty, + "ls": less_sixty, + "ln": less_nine, + "mn": more_nine, + "chart_data": chart_data, + "a_chart_data": a_chart_data, + "week_chart_data": week_chart_data, + "week_a_chart_data": week_a_chart_data, + "name": f"{user.get_full_name()} Metrics", + "metric": True, + "total_update": total_update, + "details": details, + "open_week": open_week, + "closed_week": closed_week, + "accepted_week": accepted_week, + "a_dict": a_dict, + "o_dict": o_dict, + "c_dict": c_dict, + "o_week_dict": o_week_dict, + "a_week_dict": a_week_dict, + "c_week_dict": c_week_dict, + "open_week_count": open_week_count, + "accepted_week_count": accepted_week_count, + "closed_week_count": closed_week_count, + "user": request.user, }) diff --git a/dojo/middleware.py b/dojo/middleware.py index 3b8e641646..277dac7d65 100644 --- a/dojo/middleware.py +++ b/dojo/middleware.py @@ -13,8 +13,8 @@ logger = logging.getLogger(__name__) -EXEMPT_URLS = [compile(settings.LOGIN_URL.lstrip('/'))] -if hasattr(settings, 'LOGIN_EXEMPT_URLS'): +EXEMPT_URLS = [compile(settings.LOGIN_URL.lstrip("/"))] +if hasattr(settings, "LOGIN_EXEMPT_URLS"): EXEMPT_URLS += [compile(expr) for expr in settings.LOGIN_EXEMPT_URLS] @@ -34,16 +34,16 @@ def __init__(self, get_response): self.get_response = get_response def __call__(self, request): - assert hasattr(request, 'user'), "The Login Required middleware\ + assert hasattr(request, "user"), "The Login Required middleware\ requires authentication middleware to be installed. Edit your\ MIDDLEWARE_CLASSES setting to insert\ 'django.contrib.auth.middleware.AuthenticationMiddleware'. If that doesn't\ work, ensure your TEMPLATE_CONTEXT_PROCESSORS setting includes\ 'django.core.context_processors.auth'." if not request.user.is_authenticated: - path = request.path_info.lstrip('/') + path = request.path_info.lstrip("/") if not any(m.match(path) for m in EXEMPT_URLS): - if path == 'logout': + if path == "logout": fullURL = f"{settings.LOGIN_URL}?next=/" else: fullURL = f"{settings.LOGIN_URL}?next={quote(request.get_full_path())}" @@ -52,16 +52,16 @@ def __call__(self, request): if request.user.is_authenticated: logger.debug("Authenticated user: %s", str(request.user)) try: - uwsgi = __import__('uwsgi', globals(), locals(), ['set_logvar'], 0) + uwsgi = __import__("uwsgi", globals(), locals(), ["set_logvar"], 0) # this populates dd_user log var, so can appear in the uwsgi logs - uwsgi.set_logvar('dd_user', str(request.user)) + uwsgi.set_logvar("dd_user", str(request.user)) except: # to avoid unittests to fail pass - path = request.path_info.lstrip('/') + path = request.path_info.lstrip("/") from dojo.models import Dojo_User - if Dojo_User.force_password_reset(request.user) and path != 'change_password': - return HttpResponseRedirect(reverse('change_password')) + if Dojo_User.force_password_reset(request.user) and path != "change_password": + return HttpResponseRedirect(reverse("change_password")) return self.get_response(request) @@ -86,14 +86,14 @@ def process_exception(self, request, exception): @classmethod def get_system_settings(cls): - if hasattr(cls._thread_local, 'system_settings'): + if hasattr(cls._thread_local, "system_settings"): return cls._thread_local.system_settings return None @classmethod def cleanup(cls, *args, **kwargs): - if hasattr(cls._thread_local, 'system_settings'): + if hasattr(cls._thread_local, "system_settings"): del cls._thread_local.system_settings @classmethod @@ -145,9 +145,9 @@ def __init__(self, get_response): def __call__(self, request): response = self.get_response(request) - path = request.path_info.lstrip('/') - if request.method == 'POST' and 'api/v2/' in path and path[-1] != '/' and response.status_code == 400: - response.data = {'message': 'Please add a trailing slash to your request.'} + path = request.path_info.lstrip("/") + if request.method == "POST" and "api/v2/" in path and path[-1] != "/" and response.status_code == 400: + response.data = {"message": "Please add a trailing slash to your request."} # you need to change private attribute `_is_render` # to call render second time response._is_rendered = False diff --git a/dojo/models.py b/dojo/models.py index e29c0641db..3dbe66043f 100644 --- a/dojo/models.py +++ b/dojo/models.py @@ -45,46 +45,46 @@ logger = logging.getLogger(__name__) deduplicationLogger = logging.getLogger("dojo.specific-loggers.deduplication") -SEVERITY_CHOICES = (('Info', 'Info'), ('Low', 'Low'), ('Medium', 'Medium'), - ('High', 'High'), ('Critical', 'Critical')) +SEVERITY_CHOICES = (("Info", "Info"), ("Low", "Low"), ("Medium", "Medium"), + ("High", "High"), ("Critical", "Critical")) SEVERITIES = [s[0] for s in SEVERITY_CHOICES] -EFFORT_FOR_FIXING_CHOICES = (('', ''), ('Low', 'Low'), ('Medium', 'Medium'), ('High', 'High')) +EFFORT_FOR_FIXING_CHOICES = (("", ""), ("Low", "Low"), ("Medium", "Medium"), ("High", "High")) # fields returned in statistics, typically all status fields -STATS_FIELDS = ['active', 'verified', 'duplicate', 'false_p', 'out_of_scope', 'is_mitigated', 'risk_accepted', 'total'] +STATS_FIELDS = ["active", "verified", "duplicate", "false_p", "out_of_scope", "is_mitigated", "risk_accepted", "total"] # default template with all values set to 0 DEFAULT_STATS = {sev.lower(): {stat_field: 0 for stat_field in STATS_FIELDS} for sev in SEVERITIES} -IMPORT_CREATED_FINDING = 'N' -IMPORT_CLOSED_FINDING = 'C' -IMPORT_REACTIVATED_FINDING = 'R' -IMPORT_UNTOUCHED_FINDING = 'U' +IMPORT_CREATED_FINDING = "N" +IMPORT_CLOSED_FINDING = "C" +IMPORT_REACTIVATED_FINDING = "R" +IMPORT_UNTOUCHED_FINDING = "U" IMPORT_ACTIONS = [ - (IMPORT_CREATED_FINDING, 'created'), - (IMPORT_CLOSED_FINDING, 'closed'), - (IMPORT_REACTIVATED_FINDING, 'reactivated'), - (IMPORT_UNTOUCHED_FINDING, 'left untouched'), + (IMPORT_CREATED_FINDING, "created"), + (IMPORT_CLOSED_FINDING, "closed"), + (IMPORT_REACTIVATED_FINDING, "reactivated"), + (IMPORT_UNTOUCHED_FINDING, "left untouched"), ] def _get_annotations_for_statistics(): - annotations = {stats_field.lower(): Count(Case(When(**{stats_field: True}, then=1))) for stats_field in STATS_FIELDS if stats_field != 'total'} + annotations = {stats_field.lower(): Count(Case(When(**{stats_field: True}, then=1))) for stats_field in STATS_FIELDS if stats_field != "total"} # add total - annotations['total'] = Count('id') + annotations["total"] = Count("id") return annotations def _get_statistics_for_queryset(qs, annotation_factory): # order by to get rid of default ordering that would mess with group_by # group by severity (lowercase) - values = qs.annotate(sev=Lower('severity')).values('sev').order_by() + values = qs.annotate(sev=Lower("severity")).values("sev").order_by() # add annotation for each status field values = values.annotate(**annotation_factory()) # make sure sev and total are included - stat_fields = ['sev', 'total'] + STATS_FIELDS + stat_fields = ["sev", "total"] + STATS_FIELDS # go for it values = values.values(*stat_fields) @@ -92,12 +92,12 @@ def _get_statistics_for_queryset(qs, annotation_factory): # need to copy the DEFAULT_STATS otherwise it gets overwritten stats = copy.copy(DEFAULT_STATS) for row in values: - sev = row.pop('sev') + sev = row.pop("sev") stats[sev] = row values_total = qs.values() values_total = values_total.aggregate(**annotation_factory()) - stats['total'] = values_total + stats["total"] = values_total return stats @@ -150,33 +150,33 @@ def __call__(self, model_instance, filename): class Regulation(models.Model): - PRIVACY_CATEGORY = 'privacy' - FINANCE_CATEGORY = 'finance' - EDUCATION_CATEGORY = 'education' - MEDICAL_CATEGORY = 'medical' - CORPORATE_CATEGORY = 'corporate' - OTHER_CATEGORY = 'other' + PRIVACY_CATEGORY = "privacy" + FINANCE_CATEGORY = "finance" + EDUCATION_CATEGORY = "education" + MEDICAL_CATEGORY = "medical" + CORPORATE_CATEGORY = "corporate" + OTHER_CATEGORY = "other" CATEGORY_CHOICES = ( - (PRIVACY_CATEGORY, _('Privacy')), - (FINANCE_CATEGORY, _('Finance')), - (EDUCATION_CATEGORY, _('Education')), - (MEDICAL_CATEGORY, _('Medical')), - (CORPORATE_CATEGORY, _('Corporate')), - (OTHER_CATEGORY, _('Other')), + (PRIVACY_CATEGORY, _("Privacy")), + (FINANCE_CATEGORY, _("Finance")), + (EDUCATION_CATEGORY, _("Education")), + (MEDICAL_CATEGORY, _("Medical")), + (CORPORATE_CATEGORY, _("Corporate")), + (OTHER_CATEGORY, _("Other")), ) - name = models.CharField(max_length=128, unique=True, help_text=_('The name of the regulation.')) - acronym = models.CharField(max_length=20, unique=True, help_text=_('A shortened representation of the name.')) - category = models.CharField(max_length=9, choices=CATEGORY_CHOICES, help_text=_('The subject of the regulation.')) - jurisdiction = models.CharField(max_length=64, help_text=_('The territory over which the regulation applies.')) - description = models.TextField(blank=True, help_text=_('Information about the regulation\'s purpose.')) - reference = models.URLField(blank=True, help_text=_('An external URL for more information.')) + name = models.CharField(max_length=128, unique=True, help_text=_("The name of the regulation.")) + acronym = models.CharField(max_length=20, unique=True, help_text=_("A shortened representation of the name.")) + category = models.CharField(max_length=9, choices=CATEGORY_CHOICES, help_text=_("The subject of the regulation.")) + jurisdiction = models.CharField(max_length=64, help_text=_("The territory over which the regulation applies.")) + description = models.TextField(blank=True, help_text=_("Information about the regulation's purpose.")) + reference = models.URLField(blank=True, help_text=_("An external URL for more information.")) class Meta: - ordering = ['name'] + ordering = ["name"] def __str__(self): - return self.acronym + ' (' + self.jurisdiction + ')' + return self.acronym + " (" + self.jurisdiction + ")" User = get_user_model() @@ -186,7 +186,7 @@ def __str__(self): class Dojo_User(User): class Meta: proxy = True - ordering = ['first_name'] + ordering = ["first_name"] def get_full_name(self): return Dojo_User.generate_full_name(self) @@ -197,19 +197,19 @@ def __str__(self): @staticmethod def wants_block_execution(user): # this return False if there is no user, i.e. in celery processes, unittests, etc. - return hasattr(user, 'usercontactinfo') and user.usercontactinfo.block_execution + return hasattr(user, "usercontactinfo") and user.usercontactinfo.block_execution @staticmethod def force_password_reset(user): - return hasattr(user, 'usercontactinfo') and user.usercontactinfo.force_password_reset + return hasattr(user, "usercontactinfo") and user.usercontactinfo.force_password_reset def disable_force_password_reset(user): - if hasattr(user, 'usercontactinfo'): + if hasattr(user, "usercontactinfo"): user.usercontactinfo.force_password_reset = False user.usercontactinfo.save() def enable_force_password_reset(user): - if hasattr(user, 'usercontactinfo'): + if hasattr(user, "usercontactinfo"): user.usercontactinfo.force_password_reset = True user.usercontactinfo.save() @@ -218,14 +218,14 @@ def generate_full_name(user): """ Returns the first_name plus the last_name, with a space in between. """ - full_name = f'{user.first_name} {user.last_name} ({user.username})' + full_name = f"{user.first_name} {user.last_name} ({user.username})" return full_name.strip() class UserContactInfo(models.Model): user = models.OneToOneField(Dojo_User, on_delete=models.CASCADE) title = models.CharField(blank=True, null=True, max_length=150) - phone_regex = RegexValidator(regex=r'^\+?1?\d{9,15}$', + phone_regex = RegexValidator(regex=r"^\+?1?\d{9,15}$", message=_("Phone number must be entered in the format: '+999999999'. " "Up to 15 digits allowed.")) phone_number = models.CharField(validators=[phone_regex], blank=True, @@ -238,24 +238,24 @@ class UserContactInfo(models.Model): "Up to 15 digits allowed.")) twitter_username = models.CharField(blank=True, null=True, max_length=150) github_username = models.CharField(blank=True, null=True, max_length=150) - slack_username = models.CharField(blank=True, null=True, max_length=150, help_text=_("Email address associated with your slack account"), verbose_name=_('Slack Email Address')) + slack_username = models.CharField(blank=True, null=True, max_length=150, help_text=_("Email address associated with your slack account"), verbose_name=_("Slack Email Address")) slack_user_id = models.CharField(blank=True, null=True, max_length=25) block_execution = models.BooleanField(default=False, help_text=_("Instead of async deduping a finding the findings will be deduped synchronously and will 'block' the user until completion.")) - force_password_reset = models.BooleanField(default=False, help_text=_('Forces this user to reset their password on next login.')) + force_password_reset = models.BooleanField(default=False, help_text=_("Forces this user to reset their password on next login.")) class Dojo_Group(models.Model): - AZURE = 'AzureAD' - REMOTE = 'Remote' + AZURE = "AzureAD" + REMOTE = "Remote" SOCIAL_CHOICES = ( - (AZURE, _('AzureAD')), - (REMOTE, _('Remote')), + (AZURE, _("AzureAD")), + (REMOTE, _("Remote")), ) name = models.CharField(max_length=255, unique=True) description = models.CharField(max_length=4000, null=True, blank=True) - users = models.ManyToManyField(Dojo_User, through='Dojo_Group_Member', related_name='users', blank=True) + users = models.ManyToManyField(Dojo_User, through="Dojo_Group_Member", related_name="users", blank=True) auth_group = models.ForeignKey(Group, null=True, blank=True, on_delete=models.CASCADE) - social_provider = models.CharField(max_length=10, choices=SOCIAL_CHOICES, blank=True, null=True, help_text=_('Group imported from a social provider.'), verbose_name=_('Social Authentication Provider')) + social_provider = models.CharField(max_length=10, choices=SOCIAL_CHOICES, blank=True, null=True, help_text=_("Group imported from a social provider."), verbose_name=_("Social Authentication Provider")) def __str__(self): return self.name @@ -266,7 +266,7 @@ class Role(models.Model): is_owner = models.BooleanField(default=False) class Meta: - ordering = ('name',) + ordering = ("name",) def __str__(self): return self.name @@ -276,7 +276,7 @@ class System_Settings(models.Model): enable_deduplication = models.BooleanField( default=False, blank=False, - verbose_name=_('Deduplicate findings'), + verbose_name=_("Deduplicate findings"), help_text=_("With this setting turned on, DefectDojo deduplicates findings by " "comparing endpoints, cwe fields, and titles. " "If two findings share a URL and have the same CWE or " @@ -285,72 +285,72 @@ class System_Settings(models.Model): "deduplicated findings is added to the engagement view.")) delete_duplicates = models.BooleanField(default=False, blank=False, help_text=_("Requires next setting: maximum number of duplicates to retain.")) max_dupes = models.IntegerField(blank=True, null=True, default=10, - verbose_name=_('Max Duplicates'), + verbose_name=_("Max Duplicates"), help_text=_("When enabled, if a single " "issue reaches the maximum " "number of duplicates, the " "oldest will be deleted. Duplicate will not be deleted when left empty. A value of 0 will remove all duplicates.")) - email_from = models.CharField(max_length=200, default='no-reply@example.com', blank=True) + email_from = models.CharField(max_length=200, default="no-reply@example.com", blank=True) enable_jira = models.BooleanField(default=False, - verbose_name=_('Enable JIRA integration'), + verbose_name=_("Enable JIRA integration"), blank=False) enable_jira_web_hook = models.BooleanField(default=False, - verbose_name=_('Enable JIRA web hook'), - help_text=_('Please note: It is strongly recommended to use a secret below and / or IP whitelist the JIRA server using a proxy such as Nginx.'), + verbose_name=_("Enable JIRA web hook"), + help_text=_("Please note: It is strongly recommended to use a secret below and / or IP whitelist the JIRA server using a proxy such as Nginx."), blank=False) disable_jira_webhook_secret = models.BooleanField(default=False, - verbose_name=_('Disable web hook secret'), - help_text=_('Allows incoming requests without a secret (discouraged legacy behaviour)'), + verbose_name=_("Disable web hook secret"), + help_text=_("Allows incoming requests without a secret (discouraged legacy behaviour)"), blank=False) # will be set to random / uuid by initializer so null needs to be True - jira_webhook_secret = models.CharField(max_length=64, blank=False, null=True, verbose_name=_('JIRA Webhook URL'), - help_text=_('Secret needed in URL for incoming JIRA Webhook')) - - jira_choices = (('Critical', 'Critical'), - ('High', 'High'), - ('Medium', 'Medium'), - ('Low', 'Low'), - ('Info', 'Info')) + jira_webhook_secret = models.CharField(max_length=64, blank=False, null=True, verbose_name=_("JIRA Webhook URL"), + help_text=_("Secret needed in URL for incoming JIRA Webhook")) + + jira_choices = (("Critical", "Critical"), + ("High", "High"), + ("Medium", "Medium"), + ("Low", "Low"), + ("Info", "Info")) jira_minimum_severity = models.CharField(max_length=20, blank=True, null=True, choices=jira_choices, - default='Low') + default="Low") jira_labels = models.CharField(max_length=200, blank=True, null=True, - help_text=_('JIRA issue labels space seperated')) + help_text=_("JIRA issue labels space seperated")) add_vulnerability_id_to_jira_label = models.BooleanField(default=False, - verbose_name=_('Add vulnerability Id as a JIRA label'), + verbose_name=_("Add vulnerability Id as a JIRA label"), blank=False) enable_github = models.BooleanField(default=False, - verbose_name=_('Enable GITHUB integration'), + verbose_name=_("Enable GITHUB integration"), blank=False) enable_slack_notifications = \ models.BooleanField(default=False, - verbose_name=_('Enable Slack notifications'), + verbose_name=_("Enable Slack notifications"), blank=False) - slack_channel = models.CharField(max_length=100, default='', blank=True, - help_text=_('Optional. Needed if you want to send global notifications.')) - slack_token = models.CharField(max_length=100, default='', blank=True, - help_text=_('Token required for interacting ' - 'with Slack. Get one at ' - 'https://api.slack.com/tokens')) - slack_username = models.CharField(max_length=100, default='', blank=True, - help_text=_('Optional. Will take your bot name otherwise.')) + slack_channel = models.CharField(max_length=100, default="", blank=True, + help_text=_("Optional. Needed if you want to send global notifications.")) + slack_token = models.CharField(max_length=100, default="", blank=True, + help_text=_("Token required for interacting " + "with Slack. Get one at " + "https://api.slack.com/tokens")) + slack_username = models.CharField(max_length=100, default="", blank=True, + help_text=_("Optional. Will take your bot name otherwise.")) enable_msteams_notifications = \ models.BooleanField(default=False, - verbose_name=_('Enable Microsoft Teams notifications'), + verbose_name=_("Enable Microsoft Teams notifications"), blank=False) - msteams_url = models.CharField(max_length=400, default='', blank=True, - help_text=_('The full URL of the ' - 'incoming webhook')) + msteams_url = models.CharField(max_length=400, default="", blank=True, + help_text=_("The full URL of the " + "incoming webhook")) enable_mail_notifications = models.BooleanField(default=False, blank=False) - mail_notifications_to = models.CharField(max_length=200, default='', + mail_notifications_to = models.CharField(max_length=200, default="", blank=True) false_positive_history = models.BooleanField( @@ -372,50 +372,50 @@ class System_Settings(models.Model): ), ) - url_prefix = models.CharField(max_length=300, default='', blank=True, help_text=_("URL prefix if DefectDojo is installed in it's own virtual subdirectory.")) - team_name = models.CharField(max_length=100, default='', blank=True) + url_prefix = models.CharField(max_length=300, default="", blank=True, help_text=_("URL prefix if DefectDojo is installed in it's own virtual subdirectory.")) + team_name = models.CharField(max_length=100, default="", blank=True) time_zone = models.CharField(max_length=50, choices=[(tz, tz) for tz in all_timezones], - default='UTC', blank=False) - enable_product_grade = models.BooleanField(default=False, verbose_name=_('Enable Product Grading'), help_text=_("Displays a grade letter next to a product to show the overall health.")) + default="UTC", blank=False) + enable_product_grade = models.BooleanField(default=False, verbose_name=_("Enable Product Grading"), help_text=_("Displays a grade letter next to a product to show the overall health.")) product_grade = models.CharField(max_length=800, blank=True) product_grade_a = models.IntegerField(default=90, - verbose_name=_('Grade A'), + verbose_name=_("Grade A"), help_text=_("Percentage score for an " "'A' >=")) product_grade_b = models.IntegerField(default=80, - verbose_name=_('Grade B'), + verbose_name=_("Grade B"), help_text=_("Percentage score for a " "'B' >=")) product_grade_c = models.IntegerField(default=70, - verbose_name=_('Grade C'), + verbose_name=_("Grade C"), help_text=_("Percentage score for a " "'C' >=")) product_grade_d = models.IntegerField(default=60, - verbose_name=_('Grade D'), + verbose_name=_("Grade D"), help_text=_("Percentage score for a " "'D' >=")) product_grade_f = models.IntegerField(default=59, - verbose_name=_('Grade F'), + verbose_name=_("Grade F"), help_text=_("Percentage score for an " "'F' <=")) enable_product_tag_inheritance = models.BooleanField( default=False, blank=False, - verbose_name=_('Enable Product Tag Inheritance'), + verbose_name=_("Enable Product Tag Inheritance"), help_text=_("Enables product tag inheritance globally for all products. Any tags added on a product will automatically be added to all Engagements, Tests, and Findings")) enable_benchmark = models.BooleanField( default=True, blank=False, - verbose_name=_('Enable Benchmarks'), + verbose_name=_("Enable Benchmarks"), help_text=_("Enables Benchmarks such as the OWASP ASVS " "(Application Security Verification Standard)")) enable_template_match = models.BooleanField( default=False, blank=False, - verbose_name=_('Enable Remediation Advice'), + verbose_name=_("Enable Remediation Advice"), help_text=_("Enables global remediation advice and matching on CWE and Title. The text will be replaced for mitigation, impact and references on a finding. Useful for providing consistent impact and remediation advice regardless of the scanner.")) enable_similar_findings = models.BooleanField( @@ -428,7 +428,7 @@ class System_Settings(models.Model): default=False, blank=False, verbose_name=_("Enable Engagement Auto-Close"), - help_text=_('Closes an engagement after 3 days (default) past due date including last update.')) + help_text=_("Closes an engagement after 3 days (default) past due date including last update.")) engagement_auto_close_days = models.IntegerField( default=3, @@ -469,55 +469,55 @@ class System_Settings(models.Model): allow_anonymous_survey_repsonse = models.BooleanField( default=False, blank=False, - verbose_name=_('Allow Anonymous Survey Responses'), + verbose_name=_("Allow Anonymous Survey Responses"), help_text=_("Enable anyone with a link to the survey to answer a survey"), ) credentials = models.TextField(max_length=3000, blank=True) - disclaimer = models.TextField(max_length=3000, default='', blank=True, - verbose_name=_('Custom Disclaimer'), + disclaimer = models.TextField(max_length=3000, default="", blank=True, + verbose_name=_("Custom Disclaimer"), help_text=_("Include this custom disclaimer on all notifications and generated reports")) risk_acceptance_form_default_days = models.IntegerField(null=True, blank=True, default=180, help_text=_("Default expiry period for risk acceptance form.")) risk_acceptance_notify_before_expiration = models.IntegerField(null=True, blank=True, default=10, - verbose_name=_('Risk acceptance expiration heads up days'), help_text=_("Notify X days before risk acceptance expires. Leave empty to disable.")) + verbose_name=_("Risk acceptance expiration heads up days"), help_text=_("Notify X days before risk acceptance expires. Leave empty to disable.")) enable_credentials = models.BooleanField( default=True, blank=False, - verbose_name=_('Enable credentials'), + verbose_name=_("Enable credentials"), help_text=_("With this setting turned off, credentials will be disabled in the user interface.")) enable_questionnaires = models.BooleanField( default=True, blank=False, - verbose_name=_('Enable questionnaires'), + verbose_name=_("Enable questionnaires"), help_text=_("With this setting turned off, questionnaires will be disabled in the user interface.")) enable_checklists = models.BooleanField( default=True, blank=False, - verbose_name=_('Enable checklists'), + verbose_name=_("Enable checklists"), help_text=_("With this setting turned off, checklists will be disabled in the user interface.")) enable_endpoint_metadata_import = models.BooleanField( default=True, blank=False, - verbose_name=_('Enable Endpoint Metadata Import'), + verbose_name=_("Enable Endpoint Metadata Import"), help_text=_("With this setting turned off, endpoint metadata import will be disabled in the user interface.")) enable_user_profile_editable = models.BooleanField( default=True, blank=False, - verbose_name=_('Enable user profile for writing'), + verbose_name=_("Enable user profile for writing"), help_text=_("When turned on users can edit their profiles")) enable_product_tracking_files = models.BooleanField( default=True, blank=False, - verbose_name=_('Enable Product Tracking Files'), + verbose_name=_("Enable Product Tracking Files"), help_text=_("With this setting turned off, the product tracking files will be disabled in the user interface.")) enable_finding_groups = models.BooleanField( default=True, blank=False, - verbose_name=_('Enable Finding Groups'), + verbose_name=_("Enable Finding Groups"), help_text=_("With this setting turned off, the Finding Groups will be disabled.")) enable_calendar = models.BooleanField( default=True, blank=False, - verbose_name=_('Enable Calendar'), + verbose_name=_("Enable Calendar"), help_text=_("With this setting turned off, the Calendar will be disabled in the user interface.")) default_group = models.ForeignKey( Dojo_Group, @@ -533,16 +533,16 @@ class System_Settings(models.Model): on_delete=models.RESTRICT) default_group_email_pattern = models.CharField( max_length=200, - default='', + default="", blank=True, help_text=_("New users will only be assigned to the default group, when their email address matches this regex pattern. This is optional condition.")) minimum_password_length = models.IntegerField( default=9, - verbose_name=_('Minimum password length'), + verbose_name=_("Minimum password length"), help_text=_("Requires user to set passwords greater than minimum length.")) maximum_password_length = models.IntegerField( default=48, - verbose_name=_('Maximum password length'), + verbose_name=_("Maximum password length"), help_text=_("Requires user to set passwords less than maximum length.")) number_character_required = models.BooleanField( default=True, @@ -592,12 +592,12 @@ class SystemSettingsFormAdmin(forms.ModelForm): class Meta: model = System_Settings - fields = ['product_grade'] + fields = ["product_grade"] class System_SettingsAdmin(admin.ModelAdmin): form = SystemSettingsFormAdmin - fields = ('product_grade',) + fields = ("product_grade",) def get_current_date(): @@ -611,13 +611,13 @@ def get_current_datetime(): class Dojo_Group_Member(models.Model): group = models.ForeignKey(Dojo_Group, on_delete=models.CASCADE) user = models.ForeignKey(Dojo_User, on_delete=models.CASCADE) - role = models.ForeignKey(Role, on_delete=models.CASCADE, help_text=_("This role determines the permissions of the user to manage the group."), verbose_name=_('Group role')) + role = models.ForeignKey(Role, on_delete=models.CASCADE, help_text=_("This role determines the permissions of the user to manage the group."), verbose_name=_("Group role")) class Global_Role(models.Model): user = models.OneToOneField(Dojo_User, null=True, blank=True, on_delete=models.CASCADE) group = models.OneToOneField(Dojo_Group, null=True, blank=True, on_delete=models.CASCADE) - role = models.ForeignKey(Role, on_delete=models.CASCADE, null=True, blank=True, help_text=_("The global role will be applied to all product types and products."), verbose_name=_('Global role')) + role = models.ForeignKey(Role, on_delete=models.CASCADE, null=True, blank=True, help_text=_("The global role will be applied to all product types and products."), verbose_name=_("Global role")) class Contact(models.Model): @@ -656,21 +656,21 @@ def copy(self): class Notes(models.Model): - note_type = models.ForeignKey(Note_Type, related_name='note_type', null=True, blank=True, on_delete=models.CASCADE) + note_type = models.ForeignKey(Note_Type, related_name="note_type", null=True, blank=True, on_delete=models.CASCADE) entry = models.TextField() date = models.DateTimeField(null=False, editable=False, default=get_current_datetime) - author = models.ForeignKey(Dojo_User, related_name='editor_notes_set', editable=False, on_delete=models.CASCADE) + author = models.ForeignKey(Dojo_User, related_name="editor_notes_set", editable=False, on_delete=models.CASCADE) private = models.BooleanField(default=False) edited = models.BooleanField(default=False) - editor = models.ForeignKey(Dojo_User, related_name='author_notes_set', editable=False, null=True, on_delete=models.CASCADE) + editor = models.ForeignKey(Dojo_User, related_name="author_notes_set", editable=False, null=True, on_delete=models.CASCADE) edit_time = models.DateTimeField(null=True, editable=False, default=get_current_datetime) history = models.ManyToManyField(NoteHistory, blank=True, editable=False) class Meta: - ordering = ['-date'] + ordering = ["-date"] def __str__(self): return self.entry @@ -693,7 +693,7 @@ def copy(self): class FileUpload(models.Model): title = models.CharField(max_length=100, unique=True) - file = models.FileField(upload_to=UniqueUploadNameProvider('uploaded_files')) + file = models.FileField(upload_to=UniqueUploadNameProvider("uploaded_files")) def copy(self): copy = self @@ -701,12 +701,12 @@ def copy(self): copy.pk = None copy.id = None # Add unique modifier to file name - copy.title = f'{self.title} - clone-{str(uuid4())[:8]}' + copy.title = f"{self.title} - clone-{str(uuid4())[:8]}" # Create new unique file name current_url = self.file.url - _, current_full_filename = current_url.rsplit('/', 1) - _, extension = current_full_filename.split('.', 1) - new_file = ContentFile(self.file.read(), name=f'{uuid4()}.{extension}') + _, current_full_filename = current_url.rsplit("/", 1) + _, extension = current_full_filename.split(".", 1) + new_file = ContentFile(self.file.read(), name=f"{uuid4()}.{extension}") copy.file = new_file copy.save() @@ -714,13 +714,13 @@ def copy(self): def get_accessible_url(self, obj, obj_id): if isinstance(obj, Engagement): - obj_type = 'Engagement' + obj_type = "Engagement" elif isinstance(obj, Test): - obj_type = 'Test' + obj_type = "Test" elif isinstance(obj, Finding): - obj_type = 'Finding' + obj_type = "Finding" - return f'access_file/{self.id}/{obj_id}/{obj_type}' + return f"access_file/{self.id}/{obj_id}/{obj_type}" class Product_Type(models.Model): @@ -738,44 +738,44 @@ class Product_Type(models.Model): key_product = models.BooleanField(default=False) updated = models.DateTimeField(auto_now=True, null=True) created = models.DateTimeField(auto_now_add=True, null=True) - members = models.ManyToManyField(Dojo_User, through='Product_Type_Member', related_name='prod_type_members', blank=True) - authorization_groups = models.ManyToManyField(Dojo_Group, through='Product_Type_Group', related_name='product_type_groups', blank=True) + members = models.ManyToManyField(Dojo_User, through="Product_Type_Member", related_name="prod_type_members", blank=True) + authorization_groups = models.ManyToManyField(Dojo_Group, through="Product_Type_Group", related_name="product_type_groups", blank=True) class Meta: - ordering = ('name',) + ordering = ("name",) def __str__(self): return self.name def get_absolute_url(self): from django.urls import reverse - return reverse('product_type', args=[str(self.id)]) + return reverse("product_type", args=[str(self.id)]) def get_breadcrumbs(self): - bc = [{'title': str(self), - 'url': reverse('edit_product_type', args=(self.id,))}] + bc = [{"title": str(self), + "url": reverse("edit_product_type", args=(self.id,))}] return bc @cached_property def critical_present(self): c_findings = Finding.objects.filter( - test__engagement__product__prod_type=self, severity='Critical') + test__engagement__product__prod_type=self, severity="Critical") if c_findings.count() > 0: return True @cached_property def high_present(self): c_findings = Finding.objects.filter( - test__engagement__product__prod_type=self, severity='High') + test__engagement__product__prod_type=self, severity="High") if c_findings.count() > 0: return True @cached_property def calc_health(self): h_findings = Finding.objects.filter( - test__engagement__product__prod_type=self, severity='High') + test__engagement__product__prod_type=self, severity="High") c_findings = Finding.objects.filter( - test__engagement__product__prod_type=self, severity='Critical') + test__engagement__product__prod_type=self, severity="Critical") health = 100 if c_findings.count() > 0: health = 40 @@ -814,40 +814,40 @@ class Test_Type(models.Model): active = models.BooleanField(default=True) class Meta: - ordering = ('name',) + ordering = ("name",) def __str__(self): return self.name def get_breadcrumbs(self): - bc = [{'title': str(self), - 'url': None}] + bc = [{"title": str(self), + "url": None}] return bc class DojoMeta(models.Model): name = models.CharField(max_length=120) value = models.CharField(max_length=300) - product = models.ForeignKey('Product', + product = models.ForeignKey("Product", on_delete=models.CASCADE, null=True, editable=False, - related_name='product_meta') - endpoint = models.ForeignKey('Endpoint', + related_name="product_meta") + endpoint = models.ForeignKey("Endpoint", on_delete=models.CASCADE, null=True, editable=False, - related_name='endpoint_meta') - finding = models.ForeignKey('Finding', + related_name="endpoint_meta") + finding = models.ForeignKey("Finding", on_delete=models.CASCADE, null=True, editable=False, - related_name='finding_meta') + related_name="finding_meta") class Meta: - unique_together = (('product', 'name'), - ('endpoint', 'name'), - ('finding', 'name')) + unique_together = (("product", "name"), + ("endpoint", "name"), + ("finding", "name")) def __str__(self): return f"{self.name}: {self.value}" @@ -867,58 +867,58 @@ def clean(self): ids_count += 1 if ids_count == 0: - msg = 'Metadata entries need either a product, an endpoint or a finding' + msg = "Metadata entries need either a product, an endpoint or a finding" raise ValidationError(msg) if ids_count > 1: - msg = 'Metadata entries may not have more than one relation, either a product, an endpoint either or a finding' + msg = "Metadata entries may not have more than one relation, either a product, an endpoint either or a finding" raise ValidationError(msg) class SLA_Configuration(models.Model): - name = models.CharField(max_length=128, unique=True, blank=False, verbose_name=_('Custom SLA Name'), - help_text=_('A unique name for the set of SLAs.')) + name = models.CharField(max_length=128, unique=True, blank=False, verbose_name=_("Custom SLA Name"), + help_text=_("A unique name for the set of SLAs.")) description = models.CharField( max_length=512, null=True, blank=True) critical = models.IntegerField( default=7, - verbose_name=_('Critical Finding SLA Days'), - help_text=_('The number of days to remediate a critical finding.')) + verbose_name=_("Critical Finding SLA Days"), + help_text=_("The number of days to remediate a critical finding.")) enforce_critical = models.BooleanField( default=True, - verbose_name=_('Enforce Critical Finding SLA Days'), - help_text=_('When enabled, critical findings will be assigned an SLA expiration date based on the critical finding SLA days within this SLA configuration.')) + verbose_name=_("Enforce Critical Finding SLA Days"), + help_text=_("When enabled, critical findings will be assigned an SLA expiration date based on the critical finding SLA days within this SLA configuration.")) high = models.IntegerField( default=30, - verbose_name=_('High Finding SLA Days'), - help_text=_('The number of days to remediate a high finding.')) + verbose_name=_("High Finding SLA Days"), + help_text=_("The number of days to remediate a high finding.")) enforce_high = models.BooleanField( default=True, - verbose_name=_('Enforce High Finding SLA Days'), - help_text=_('When enabled, high findings will be assigned an SLA expiration date based on the high finding SLA days within this SLA configuration.')) + verbose_name=_("Enforce High Finding SLA Days"), + help_text=_("When enabled, high findings will be assigned an SLA expiration date based on the high finding SLA days within this SLA configuration.")) medium = models.IntegerField( default=90, - verbose_name=_('Medium Finding SLA Days'), - help_text=_('The number of days to remediate a medium finding.')) + verbose_name=_("Medium Finding SLA Days"), + help_text=_("The number of days to remediate a medium finding.")) enforce_medium = models.BooleanField( default=True, - verbose_name=_('Enforce Medium Finding SLA Days'), - help_text=_('When enabled, medium findings will be assigned an SLA expiration date based on the medium finding SLA days within this SLA configuration.')) + verbose_name=_("Enforce Medium Finding SLA Days"), + help_text=_("When enabled, medium findings will be assigned an SLA expiration date based on the medium finding SLA days within this SLA configuration.")) low = models.IntegerField( default=120, - verbose_name=_('Low Finding SLA Days'), - help_text=_('The number of days to remediate a low finding.')) + verbose_name=_("Low Finding SLA Days"), + help_text=_("The number of days to remediate a low finding.")) enforce_low = models.BooleanField( default=True, - verbose_name=_('Enforce Low Finding SLA Days'), - help_text=_('When enabled, low findings will be assigned an SLA expiration date based on the low finding SLA days within this SLA configuration.')) + verbose_name=_("Enforce Low Finding SLA Days"), + help_text=_("When enabled, low findings will be assigned an SLA expiration date based on the low finding SLA days within this SLA configuration.")) async_updating = models.BooleanField( default=False, - help_text=_('Findings under this SLA configuration are asynchronously being updated')) + help_text=_("Findings under this SLA configuration are asynchronously being updated")) class Meta: - ordering = ['name'] + ordering = ["name"] def __str__(self): return self.name @@ -946,13 +946,13 @@ def save(self, *args, **kwargs): # check which sla days fields changed based on severity severities = [] if (initial_sla_config.critical != self.critical) or (initial_sla_config.enforce_critical != self.enforce_critical): - severities.append('Critical') + severities.append("Critical") if (initial_sla_config.high != self.high) or (initial_sla_config.enforce_high != self.enforce_high): - severities.append('High') + severities.append("High") if (initial_sla_config.medium != self.medium) or (initial_sla_config.enforce_medium != self.enforce_medium): - severities.append('Medium') + severities.append("Medium") if (initial_sla_config.low != self.low) or (initial_sla_config.enforce_low != self.enforce_low): - severities.append('Low') + severities.append("Low") # if severities have changed, update finding sla expiration dates with those severities if len(severities): # set the async updating flag to true for this sla config @@ -972,11 +972,11 @@ def clean(self): for sla_day in sla_days: if sla_day < 1: - msg = 'SLA Days must be at least 1' + msg = "SLA Days must be at least 1" raise ValidationError(msg) def delete(self, *args, **kwargs): - logger.debug('%d sla configuration delete', self.id) + logger.debug("%d sla configuration delete", self.id) if self.id != 1: super().delete(*args, **kwargs) @@ -985,85 +985,85 @@ def delete(self, *args, **kwargs): raise ValidationError(msg) def get_summary(self): - return f'{self.name} - Critical: {self.critical}, High: {self.high}, Medium: {self.medium}, Low: {self.low}' + return f"{self.name} - Critical: {self.critical}, High: {self.high}, Medium: {self.medium}, Low: {self.low}" class Product(models.Model): - WEB_PLATFORM = 'web' - IOT = 'iot' - DESKTOP_PLATFORM = 'desktop' - MOBILE_PLATFORM = 'mobile' - WEB_SERVICE_PLATFORM = 'web service' + WEB_PLATFORM = "web" + IOT = "iot" + DESKTOP_PLATFORM = "desktop" + MOBILE_PLATFORM = "mobile" + WEB_SERVICE_PLATFORM = "web service" PLATFORM_CHOICES = ( - (WEB_SERVICE_PLATFORM, _('API')), - (DESKTOP_PLATFORM, _('Desktop')), - (IOT, _('Internet of Things')), - (MOBILE_PLATFORM, _('Mobile')), - (WEB_PLATFORM, _('Web')), + (WEB_SERVICE_PLATFORM, _("API")), + (DESKTOP_PLATFORM, _("Desktop")), + (IOT, _("Internet of Things")), + (MOBILE_PLATFORM, _("Mobile")), + (WEB_PLATFORM, _("Web")), ) - CONSTRUCTION = 'construction' - PRODUCTION = 'production' - RETIREMENT = 'retirement' + CONSTRUCTION = "construction" + PRODUCTION = "production" + RETIREMENT = "retirement" LIFECYCLE_CHOICES = ( - (CONSTRUCTION, _('Construction')), - (PRODUCTION, _('Production')), - (RETIREMENT, _('Retirement')), + (CONSTRUCTION, _("Construction")), + (PRODUCTION, _("Production")), + (RETIREMENT, _("Retirement")), ) - THIRD_PARTY_LIBRARY_ORIGIN = 'third party library' - PURCHASED_ORIGIN = 'purchased' - CONTRACTOR_ORIGIN = 'contractor' - INTERNALLY_DEVELOPED_ORIGIN = 'internal' - OPEN_SOURCE_ORIGIN = 'open source' - OUTSOURCED_ORIGIN = 'outsourced' + THIRD_PARTY_LIBRARY_ORIGIN = "third party library" + PURCHASED_ORIGIN = "purchased" + CONTRACTOR_ORIGIN = "contractor" + INTERNALLY_DEVELOPED_ORIGIN = "internal" + OPEN_SOURCE_ORIGIN = "open source" + OUTSOURCED_ORIGIN = "outsourced" ORIGIN_CHOICES = ( - (THIRD_PARTY_LIBRARY_ORIGIN, _('Third Party Library')), - (PURCHASED_ORIGIN, _('Purchased')), - (CONTRACTOR_ORIGIN, _('Contractor Developed')), - (INTERNALLY_DEVELOPED_ORIGIN, _('Internally Developed')), - (OPEN_SOURCE_ORIGIN, _('Open Source')), - (OUTSOURCED_ORIGIN, _('Outsourced')), + (THIRD_PARTY_LIBRARY_ORIGIN, _("Third Party Library")), + (PURCHASED_ORIGIN, _("Purchased")), + (CONTRACTOR_ORIGIN, _("Contractor Developed")), + (INTERNALLY_DEVELOPED_ORIGIN, _("Internally Developed")), + (OPEN_SOURCE_ORIGIN, _("Open Source")), + (OUTSOURCED_ORIGIN, _("Outsourced")), ) - VERY_HIGH_CRITICALITY = 'very high' - HIGH_CRITICALITY = 'high' - MEDIUM_CRITICALITY = 'medium' - LOW_CRITICALITY = 'low' - VERY_LOW_CRITICALITY = 'very low' - NONE_CRITICALITY = 'none' + VERY_HIGH_CRITICALITY = "very high" + HIGH_CRITICALITY = "high" + MEDIUM_CRITICALITY = "medium" + LOW_CRITICALITY = "low" + VERY_LOW_CRITICALITY = "very low" + NONE_CRITICALITY = "none" BUSINESS_CRITICALITY_CHOICES = ( - (VERY_HIGH_CRITICALITY, _('Very High')), - (HIGH_CRITICALITY, _('High')), - (MEDIUM_CRITICALITY, _('Medium')), - (LOW_CRITICALITY, _('Low')), - (VERY_LOW_CRITICALITY, _('Very Low')), - (NONE_CRITICALITY, _('None')), + (VERY_HIGH_CRITICALITY, _("Very High")), + (HIGH_CRITICALITY, _("High")), + (MEDIUM_CRITICALITY, _("Medium")), + (LOW_CRITICALITY, _("Low")), + (VERY_LOW_CRITICALITY, _("Very Low")), + (NONE_CRITICALITY, _("None")), ) name = models.CharField(max_length=255, unique=True) description = models.CharField(max_length=4000) product_manager = models.ForeignKey(Dojo_User, null=True, blank=True, - related_name='product_manager', on_delete=models.RESTRICT) + related_name="product_manager", on_delete=models.RESTRICT) technical_contact = models.ForeignKey(Dojo_User, null=True, blank=True, - related_name='technical_contact', on_delete=models.RESTRICT) + related_name="technical_contact", on_delete=models.RESTRICT) team_manager = models.ForeignKey(Dojo_User, null=True, blank=True, - related_name='team_manager', on_delete=models.RESTRICT) + related_name="team_manager", on_delete=models.RESTRICT) created = models.DateTimeField(auto_now_add=True, null=True) - prod_type = models.ForeignKey(Product_Type, related_name='prod_type', + prod_type = models.ForeignKey(Product_Type, related_name="prod_type", null=False, blank=False, on_delete=models.CASCADE) updated = models.DateTimeField(auto_now=True, null=True) sla_configuration = models.ForeignKey(SLA_Configuration, - related_name='sla_config', + related_name="sla_config", null=False, blank=False, default=1, on_delete=models.RESTRICT) tid = models.IntegerField(default=0, editable=False) - members = models.ManyToManyField(Dojo_User, through='Product_Member', related_name='product_members', blank=True) - authorization_groups = models.ManyToManyField(Dojo_Group, through='Product_Group', related_name='product_groups', blank=True) + members = models.ManyToManyField(Dojo_User, through="Product_Member", related_name="product_members", blank=True) + authorization_groups = models.ManyToManyField(Dojo_Group, through="Product_Group", related_name="product_groups", blank=True) prod_numeric_grade = models.IntegerField(null=True, blank=True) # Metadata @@ -1071,20 +1071,20 @@ class Product(models.Model): platform = models.CharField(max_length=11, choices=PLATFORM_CHOICES, blank=True, null=True) lifecycle = models.CharField(max_length=12, choices=LIFECYCLE_CHOICES, blank=True, null=True) origin = models.CharField(max_length=19, choices=ORIGIN_CHOICES, blank=True, null=True) - user_records = models.PositiveIntegerField(blank=True, null=True, help_text=_('Estimate the number of user records within the application.')) - revenue = models.DecimalField(max_digits=15, decimal_places=2, blank=True, null=True, help_text=_('Estimate the application\'s revenue.')) - external_audience = models.BooleanField(default=False, help_text=_('Specify if the application is used by people outside the organization.')) - internet_accessible = models.BooleanField(default=False, help_text=_('Specify if the application is accessible from the public internet.')) + user_records = models.PositiveIntegerField(blank=True, null=True, help_text=_("Estimate the number of user records within the application.")) + revenue = models.DecimalField(max_digits=15, decimal_places=2, blank=True, null=True, help_text=_("Estimate the application's revenue.")) + external_audience = models.BooleanField(default=False, help_text=_("Specify if the application is used by people outside the organization.")) + internet_accessible = models.BooleanField(default=False, help_text=_("Specify if the application is accessible from the public internet.")) regulations = models.ManyToManyField(Regulation, blank=True) tags = TagField(blank=True, force_lowercase=True, help_text=_("Add tags that help describe this product. Choose from the list or add new tags. Press Enter key to add.")) enable_product_tag_inheritance = models.BooleanField( default=False, blank=False, - verbose_name=_('Enable Product Tag Inheritance'), + verbose_name=_("Enable Product Tag Inheritance"), help_text=_("Enables product tag inheritance. Any tags added on a product will automatically be added to all Engagements, Tests, and Findings")) - enable_simple_risk_acceptance = models.BooleanField(default=False, help_text=_('Allows simple risk acceptance by checking/unchecking a checkbox.')) - enable_full_risk_acceptance = models.BooleanField(default=True, help_text=_('Allows full risk acceptance using a risk acceptance form, expiration date, uploaded proof, etc.')) + enable_simple_risk_acceptance = models.BooleanField(default=False, help_text=_("Allows simple risk acceptance by checking/unchecking a checkbox.")) + enable_full_risk_acceptance = models.BooleanField(default=True, help_text=_("Allows full risk acceptance using a risk acceptance form, expiration date, uploaded proof, etc.")) disable_sla_breach_notifications = models.BooleanField( default=False, @@ -1092,10 +1092,10 @@ class Product(models.Model): verbose_name=_("Disable SLA breach notifications"), help_text=_("Disable SLA breach notifications if configured in the global settings")) async_updating = models.BooleanField(default=False, - help_text=_('Findings under this Product or SLA configuration are asynchronously being updated')) + help_text=_("Findings under this Product or SLA configuration are asynchronously being updated")) class Meta: - ordering = ('name',) + ordering = ("name",) def __str__(self): return self.name @@ -1104,7 +1104,7 @@ def save(self, *args, **kwargs): # get the product's sla config before saving (if this is an existing product) initial_sla_config = None if self.pk is not None: - initial_sla_config = getattr(Product.objects.get(pk=self.pk), 'sla_configuration', None) + initial_sla_config = getattr(Product.objects.get(pk=self.pk), "sla_configuration", None) # if initial sla config exists and async finding update is already running, revert sla config before saving if initial_sla_config and self.async_updating: self.sla_configuration = initial_sla_config @@ -1114,14 +1114,14 @@ def save(self, *args, **kwargs): # if the initial sla config exists and async finding update is not running if initial_sla_config is not None and not self.async_updating: # get the new sla config from the saved product - new_sla_config = getattr(self, 'sla_configuration', None) + new_sla_config = getattr(self, "sla_configuration", None) # if the sla config has changed, update finding sla expiration dates within this product if new_sla_config and (initial_sla_config != new_sla_config): # set the async updating flag to true for this product self.async_updating = True super().save(*args, **kwargs) # set the async updating flag to true for the sla config assigned to this product - sla_config = getattr(self, 'sla_configuration', None) + sla_config = getattr(self, "sla_configuration", None) if sla_config: sla_config.async_updating = True super(SLA_Configuration, sla_config).save() @@ -1131,7 +1131,7 @@ def save(self, *args, **kwargs): def get_absolute_url(self): from django.urls import reverse - return reverse('view_product', args=[str(self.id)]) + return reverse("view_product", args=[str(self.id)]) @cached_property def findings_count(self): @@ -1159,7 +1159,7 @@ def findings_active_verified_count(self): @cached_property def endpoint_host_count(self): # active_endpoints is (should be) prefetched - endpoints = getattr(self, 'active_endpoints', None) + endpoints = getattr(self, "active_endpoints", None) hosts = [] for e in endpoints: @@ -1173,7 +1173,7 @@ def endpoint_host_count(self): @cached_property def endpoint_count(self): # active_endpoints is (should be) prefetched - endpoints = getattr(self, 'active_endpoints', None) + endpoints = getattr(self, "active_endpoints", None) if endpoints: return len(self.active_endpoints) return 0 @@ -1218,20 +1218,20 @@ def open_findings(self, start_date=None, end_date=None): severity="Low", date__range=[start_date, end_date]).count() - return {'Critical': critical, - 'High': high, - 'Medium': medium, - 'Low': low, - 'Total': (critical + high + medium + low)} + return {"Critical": critical, + "High": high, + "Medium": medium, + "Low": low, + "Total": (critical + high + medium + low)} def get_breadcrumbs(self): - bc = [{'title': str(self), - 'url': reverse('view_product', args=(self.id,))}] + bc = [{"title": str(self), + "url": reverse("view_product", args=(self.id,))}] return bc @property def get_product_type(self): - return self.prod_type if self.prod_type is not None else 'unknown' + return self.prod_type if self.prod_type is not None else "unknown" # only used in APIv2 serializers.py, query should be aligned with findings_count @cached_property @@ -1284,7 +1284,7 @@ class Tool_Type(models.Model): description = models.CharField(max_length=2000, null=True, blank=True) class Meta: - ordering = ['name'] + ordering = ["name"] def __str__(self): return self.name @@ -1294,13 +1294,13 @@ class Tool_Configuration(models.Model): name = models.CharField(max_length=200, null=False) description = models.CharField(max_length=2000, null=True, blank=True) url = models.CharField(max_length=2000, null=True, blank=True) - tool_type = models.ForeignKey(Tool_Type, related_name='tool_type', on_delete=models.CASCADE) + tool_type = models.ForeignKey(Tool_Type, related_name="tool_type", on_delete=models.CASCADE) authentication_type = models.CharField(max_length=15, choices=( - ('API', 'API Key'), - ('Password', - 'Username/Password'), - ('SSH', 'SSH')), + ("API", "API Key"), + ("Password", + "Username/Password"), + ("SSH", "SSH")), null=True, blank=True) extras = models.CharField(max_length=255, null=True, blank=True, help_text=_("Additional definitions that will be " "consumed by scanner")) @@ -1310,10 +1310,10 @@ class Tool_Configuration(models.Model): verbose_name=_("Title for SSH/API Key")) ssh = models.CharField(max_length=6000, null=True, blank=True) api_key = models.CharField(max_length=600, null=True, blank=True, - verbose_name=_('API Key')) + verbose_name=_("API Key")) class Meta: - ordering = ['name'] + ordering = ["name"] def __str__(self): return self.name @@ -1329,18 +1329,18 @@ class Product_API_Scan_Configuration(models.Model): def __str__(self): name = self.tool_configuration.name if self.service_key_1 or self.service_key_2 or self.service_key_3: - name += f' ({self.details})' + name += f" ({self.details})" return name @property def details(self): - details = '' + details = "" if self.service_key_1: - details += f'{self.service_key_1}' + details += f"{self.service_key_1}" if self.service_key_2: - details += f' | {self.service_key_2}' + details += f" | {self.service_key_2}" if self.service_key_3: - details += f' | {self.service_key_3}' + details += f" | {self.service_key_3}" return details @@ -1365,10 +1365,10 @@ def __init__(self, *args, **kwargs): def clean(self): cleaned_data = super().clean() - if not cleaned_data['password'] and not cleaned_data['ssh'] and not cleaned_data['api_key']: - cleaned_data['password'] = self.password_from_db - cleaned_data['ssh'] = self.ssh_from_db - cleaned_data['api_key'] = self.api_key_from_db + if not cleaned_data["password"] and not cleaned_data["ssh"] and not cleaned_data["api_key"]: + cleaned_data["password"] = self.password_from_db + cleaned_data["ssh"] = self.ssh_from_db + cleaned_data["api_key"] = self.api_key_from_db return cleaned_data @@ -1394,19 +1394,19 @@ class Engagement_Presets(models.Model): created = models.DateTimeField(auto_now_add=True, null=False) class Meta: - ordering = ['title'] + ordering = ["title"] def __str__(self): return self.title -ENGAGEMENT_STATUS_CHOICES = (('Not Started', 'Not Started'), - ('Blocked', 'Blocked'), - ('Cancelled', 'Cancelled'), - ('Completed', 'Completed'), - ('In Progress', 'In Progress'), - ('On Hold', 'On Hold'), - ('Waiting for Resource', 'Waiting for Resource')) +ENGAGEMENT_STATUS_CHOICES = (("Not Started", "Not Started"), + ("Blocked", "Blocked"), + ("Cancelled", "Cancelled"), + ("Completed", "Completed"), + ("In Progress", "In Progress"), + ("On Hold", "On Hold"), + ("Waiting for Resource", "Waiting for Resource")) class Engagement(models.Model): @@ -1433,51 +1433,51 @@ class Engagement(models.Model): check_list = models.BooleanField(default=True) notes = models.ManyToManyField(Notes, blank=True, editable=False) files = models.ManyToManyField(FileUpload, blank=True, editable=False) - status = models.CharField(editable=True, max_length=2000, default='', + status = models.CharField(editable=True, max_length=2000, default="", null=True, choices=ENGAGEMENT_STATUS_CHOICES) progress = models.CharField(max_length=100, - default='threat_model', editable=False) - tmodel_path = models.CharField(max_length=1000, default='none', + default="threat_model", editable=False) + tmodel_path = models.CharField(max_length=1000, default="none", editable=False, blank=True, null=True) risk_acceptance = models.ManyToManyField("Risk_Acceptance", default=None, editable=False, blank=True) done_testing = models.BooleanField(default=False, editable=False) - engagement_type = models.CharField(editable=True, max_length=30, default='Interactive', + engagement_type = models.CharField(editable=True, max_length=30, default="Interactive", null=True, - choices=(('Interactive', 'Interactive'), - ('CI/CD', 'CI/CD'))) + choices=(("Interactive", "Interactive"), + ("CI/CD", "CI/CD"))) build_id = models.CharField(editable=True, max_length=150, - null=True, blank=True, help_text=_("Build ID of the product the engagement tested."), verbose_name=_('Build ID')) + null=True, blank=True, help_text=_("Build ID of the product the engagement tested."), verbose_name=_("Build ID")) commit_hash = models.CharField(editable=True, max_length=150, - null=True, blank=True, help_text=_("Commit hash from repo"), verbose_name=_('Commit Hash')) + null=True, blank=True, help_text=_("Commit hash from repo"), verbose_name=_("Commit Hash")) branch_tag = models.CharField(editable=True, max_length=150, null=True, blank=True, help_text=_("Tag or branch of the product the engagement tested."), verbose_name=_("Branch/Tag")) - build_server = models.ForeignKey(Tool_Configuration, verbose_name=_('Build Server'), help_text=_("Build server responsible for CI/CD test"), null=True, blank=True, related_name='build_server', on_delete=models.CASCADE) - source_code_management_server = models.ForeignKey(Tool_Configuration, null=True, blank=True, verbose_name=_('SCM Server'), help_text=_("Source code server for CI/CD test"), related_name='source_code_management_server', on_delete=models.CASCADE) - source_code_management_uri = models.URLField(max_length=600, null=True, blank=True, editable=True, verbose_name=_('Repo'), help_text=_("Resource link to source code")) - orchestration_engine = models.ForeignKey(Tool_Configuration, verbose_name=_('Orchestration Engine'), help_text=_("Orchestration service responsible for CI/CD test"), null=True, blank=True, related_name='orchestration', on_delete=models.CASCADE) - deduplication_on_engagement = models.BooleanField(default=False, verbose_name=_('Deduplication within this engagement only'), help_text=_("If enabled deduplication will only mark a finding in this engagement as duplicate of another finding if both findings are in this engagement. If disabled, deduplication is on the product level.")) + build_server = models.ForeignKey(Tool_Configuration, verbose_name=_("Build Server"), help_text=_("Build server responsible for CI/CD test"), null=True, blank=True, related_name="build_server", on_delete=models.CASCADE) + source_code_management_server = models.ForeignKey(Tool_Configuration, null=True, blank=True, verbose_name=_("SCM Server"), help_text=_("Source code server for CI/CD test"), related_name="source_code_management_server", on_delete=models.CASCADE) + source_code_management_uri = models.URLField(max_length=600, null=True, blank=True, editable=True, verbose_name=_("Repo"), help_text=_("Resource link to source code")) + orchestration_engine = models.ForeignKey(Tool_Configuration, verbose_name=_("Orchestration Engine"), help_text=_("Orchestration service responsible for CI/CD test"), null=True, blank=True, related_name="orchestration", on_delete=models.CASCADE) + deduplication_on_engagement = models.BooleanField(default=False, verbose_name=_("Deduplication within this engagement only"), help_text=_("If enabled deduplication will only mark a finding in this engagement as duplicate of another finding if both findings are in this engagement. If disabled, deduplication is on the product level.")) tags = TagField(blank=True, force_lowercase=True, help_text=_("Add tags that help describe this engagement. Choose from the list or add new tags. Press Enter key to add.")) inherited_tags = TagField(blank=True, force_lowercase=True, help_text=_("Internal use tags sepcifically for maintaining parity with product. This field will be present as a subset in the tags field")) class Meta: - ordering = ['-target_start'] + ordering = ["-target_start"] indexes = [ - models.Index(fields=['product', 'active']), + models.Index(fields=["product", "active"]), ] def __str__(self): - return "Engagement %i: %s (%s)" % (self.id if id else 0, self.name if self.name else '', + return "Engagement %i: %s (%s)" % (self.id if id else 0, self.name if self.name else "", self.target_start.strftime( "%b %d, %Y")) def get_absolute_url(self): from django.urls import reverse - return reverse('view_engagement', args=[str(self.id)]) + return reverse("view_engagement", args=[str(self.id)]) def copy(self): copy = self @@ -1510,7 +1510,7 @@ def copy(self): return copy def is_overdue(self): - if self.engagement_type == 'CI/CD': + if self.engagement_type == "CI/CD": overdue_grace_days = 10 else: overdue_grace_days = 0 @@ -1524,8 +1524,8 @@ def is_overdue(self): def get_breadcrumbs(self): bc = self.product.get_breadcrumbs() - bc += [{'title': str(self), - 'url': reverse('view_engagement', args=(self.id,))}] + bc += [{"title": str(self), + "url": reverse("view_engagement", args=(self.id,))}] return bc # only used by bulk risk acceptance api @@ -1546,7 +1546,7 @@ def is_ci_cd(self): return self.engagement_type == "CI/CD" def delete(self, *args, **kwargs): - logger.debug('%d engagement delete', self.id) + logger.debug("%d engagement delete", self.id) import dojo.finding.helper as helper helper.prepare_duplicates_for_delete(engagement=self) super().delete(*args, **kwargs) @@ -1567,8 +1567,8 @@ class CWE(models.Model): class Endpoint_Params(models.Model): param = models.CharField(max_length=150) value = models.CharField(max_length=150) - method_type = (('GET', 'GET'), - ('POST', 'POST')) + method_type = (("GET", "GET"), + ("POST", "POST")) method = models.CharField(max_length=20, blank=False, null=True, choices=method_type) @@ -1581,16 +1581,16 @@ class Endpoint_Status(models.Model): false_positive = models.BooleanField(default=False, blank=True) out_of_scope = models.BooleanField(default=False, blank=True) risk_accepted = models.BooleanField(default=False, blank=True) - endpoint = models.ForeignKey('Endpoint', null=False, blank=False, on_delete=models.CASCADE, related_name='status_endpoint') - finding = models.ForeignKey('Finding', null=False, blank=False, on_delete=models.CASCADE, related_name='status_finding') + endpoint = models.ForeignKey("Endpoint", null=False, blank=False, on_delete=models.CASCADE, related_name="status_endpoint") + finding = models.ForeignKey("Finding", null=False, blank=False, on_delete=models.CASCADE, related_name="status_finding") class Meta: indexes = [ - models.Index(fields=['finding', 'mitigated']), - models.Index(fields=['endpoint', 'mitigated']), + models.Index(fields=["finding", "mitigated"]), + models.Index(fields=["endpoint", "mitigated"]), ] constraints = [ - models.UniqueConstraint(fields=['finding', 'endpoint'], name='endpoint-finding relation'), + models.UniqueConstraint(fields=["finding", "endpoint"], name="endpoint-finding relation"), ] def __str__(self): @@ -1642,28 +1642,28 @@ class Endpoint(models.Model): endpoint_params = models.ManyToManyField(Endpoint_Params, blank=True, editable=False) findings = models.ManyToManyField("Finding", blank=True, - verbose_name=_('Findings'), + verbose_name=_("Findings"), through=Endpoint_Status) tags = TagField(blank=True, force_lowercase=True, help_text=_("Add tags that help describe this endpoint. Choose from the list or add new tags. Press Enter key to add.")) inherited_tags = TagField(blank=True, force_lowercase=True, help_text=_("Internal use tags sepcifically for maintaining parity with product. This field will be present as a subset in the tags field")) class Meta: - ordering = ['product', 'host', 'protocol', 'port', 'userinfo', 'path', 'query', 'fragment'] + ordering = ["product", "host", "protocol", "port", "userinfo", "path", "query", "fragment"] indexes = [ - models.Index(fields=['product']), + models.Index(fields=["product"]), ] def __str__(self): try: if self.host: - dummy_scheme = 'dummy-scheme' # workaround for https://github.com/python-hyper/hyperlink/blob/b8c9152cd826bbe8e6cc125648f3738235019705/src/hyperlink/_url.py#L988 + dummy_scheme = "dummy-scheme" # workaround for https://github.com/python-hyper/hyperlink/blob/b8c9152cd826bbe8e6cc125648f3738235019705/src/hyperlink/_url.py#L988 url = hyperlink.EncodedURL( scheme=self.protocol if self.protocol else dummy_scheme, - userinfo=self.userinfo or '', + userinfo=self.userinfo or "", host=self.host, port=self.port, - path=tuple(self.path.split('/')) if self.path else (), + path=tuple(self.path.split("/")) if self.path else (), query=tuple( ( qe.split("=", 1) @@ -1672,68 +1672,68 @@ def __str__(self): ) for qe in self.query.split("&") ) if self.query else (), # inspired by https://github.com/python-hyper/hyperlink/blob/b8c9152cd826bbe8e6cc125648f3738235019705/src/hyperlink/_url.py#L1427 - fragment=self.fragment or '', + fragment=self.fragment or "", ) # Return a normalized version of the URL to avoid differences where there shouldn't be any difference. # Example: https://google.com and https://google.com:443 normalize_path = self.path # it used to add '/' at the end of host clean_url = url.normalize(scheme=True, host=True, path=normalize_path, query=True, fragment=True, userinfo=True, percents=True).to_uri().to_text() if not self.protocol: - if clean_url[:len(dummy_scheme) + 3] == (dummy_scheme + '://'): + if clean_url[:len(dummy_scheme) + 3] == (dummy_scheme + "://"): clean_url = clean_url[len(dummy_scheme) + 3:] else: - msg = 'hyperlink lib did not create URL as was expected' + msg = "hyperlink lib did not create URL as was expected" raise ValueError(msg) return clean_url else: - msg = 'Missing host' + msg = "Missing host" raise ValueError(msg) except: - url = '' + url = "" if self.protocol: - url += f'{self.protocol}://' + url += f"{self.protocol}://" if self.userinfo: - url += f'{self.userinfo}@' + url += f"{self.userinfo}@" if self.host: url += self.host if self.port: - url += f':{self.port}' + url += f":{self.port}" if self.path: - url += '{}{}'.format('/' if self.path[0] != '/' else '', self.path) + url += "{}{}".format("/" if self.path[0] != "/" else "", self.path) if self.query: - url += f'?{self.query}' + url += f"?{self.query}" if self.fragment: - url += f'#{self.fragment}' + url += f"#{self.fragment}" return url def get_absolute_url(self): from django.urls import reverse - return reverse('view_endpoint', args=[str(self.id)]) + return reverse("view_endpoint", args=[str(self.id)]) def clean(self): errors = [] null_char_list = ["0x00", "\x00"] db_type = connection.vendor - if self.protocol or self.protocol == '': - if not re.match(r'^[A-Za-z][A-Za-z0-9\.\-\+]+$', self.protocol): # https://tools.ietf.org/html/rfc3986#section-3.1 + if self.protocol or self.protocol == "": + if not re.match(r"^[A-Za-z][A-Za-z0-9\.\-\+]+$", self.protocol): # https://tools.ietf.org/html/rfc3986#section-3.1 errors.append(ValidationError(f'Protocol "{self.protocol}" has invalid format')) - if self.protocol == '': + if self.protocol == "": self.protocol = None - if self.userinfo or self.userinfo == '': - if not re.match(r'^[A-Za-z0-9\.\-_~%\!\$&\'\(\)\*\+,;=:]+$', self.userinfo): # https://tools.ietf.org/html/rfc3986#section-3.2.1 + if self.userinfo or self.userinfo == "": + if not re.match(r"^[A-Za-z0-9\.\-_~%\!\$&\'\(\)\*\+,;=:]+$", self.userinfo): # https://tools.ietf.org/html/rfc3986#section-3.2.1 errors.append(ValidationError(f'Userinfo "{self.userinfo}" has invalid format')) - if self.userinfo == '': + if self.userinfo == "": self.userinfo = None if self.host: - if not re.match(r'^[A-Za-z0-9_\-\+][A-Za-z0-9_\.\-\+]+$', self.host): + if not re.match(r"^[A-Za-z0-9_\-\+][A-Za-z0-9_\.\-\+]+$", self.host): try: validate_ipv46_address(self.host) except ValidationError: errors.append(ValidationError(f'Host "{self.host}" has invalid format')) else: - errors.append(ValidationError('Host must not be empty')) + errors.append(ValidationError("Host must not be empty")) if self.port or self.port == 0: try: @@ -1744,43 +1744,43 @@ def clean(self): except ValueError: errors.append(ValidationError(f'Port "{self.port}" has invalid format - it is not a number')) - if self.path or self.path == '': + if self.path or self.path == "": while len(self.path) > 0 and self.path[0] == "/": # Endpoint store "root-less" path self.path = self.path[1:] if any(null_char in self.path for null_char in null_char_list): old_value = self.path - if 'postgres' in db_type: - action_string = 'Postgres does not accept NULL character. Attempting to replace with %00...' + if "postgres" in db_type: + action_string = "Postgres does not accept NULL character. Attempting to replace with %00..." for remove_str in null_char_list: - self.path = self.path.replace(remove_str, '%00') + self.path = self.path.replace(remove_str, "%00") logging.error(f'Path "{old_value}" has invalid format - It contains the NULL character. The following action was taken: {action_string}') - if self.path == '': + if self.path == "": self.path = None - if self.query or self.query == '': + if self.query or self.query == "": if len(self.query) > 0 and self.query[0] == "?": self.query = self.query[1:] if any(null_char in self.query for null_char in null_char_list): old_value = self.query - if 'postgres' in db_type: - action_string = 'Postgres does not accept NULL character. Attempting to replace with %00...' + if "postgres" in db_type: + action_string = "Postgres does not accept NULL character. Attempting to replace with %00..." for remove_str in null_char_list: - self.query = self.query.replace(remove_str, '%00') + self.query = self.query.replace(remove_str, "%00") logging.error(f'Query "{old_value}" has invalid format - It contains the NULL character. The following action was taken: {action_string}') - if self.query == '': + if self.query == "": self.query = None - if self.fragment or self.fragment == '': + if self.fragment or self.fragment == "": if len(self.fragment) > 0 and self.fragment[0] == "#": self.fragment = self.fragment[1:] if any(null_char in self.fragment for null_char in null_char_list): old_value = self.fragment - if 'postgres' in db_type: - action_string = 'Postgres does not accept NULL character. Attempting to replace with %00...' + if "postgres" in db_type: + action_string = "Postgres does not accept NULL character. Attempting to replace with %00..." for remove_str in null_char_list: - self.fragment = self.fragment.replace(remove_str, '%00') + self.fragment = self.fragment.replace(remove_str, "%00") logging.error(f'Fragment "{old_value}" has invalid format - It contains the NULL character. The following action was taken: {action_string}') - if self.fragment == '': + if self.fragment == "": self.fragment = None if errors: @@ -1845,7 +1845,7 @@ def active_findings(self): status_finding__false_positive=False, status_finding__out_of_scope=False, status_finding__risk_accepted=False, - ).order_by('numerical_severity') + ).order_by("numerical_severity") return findings def active_verified_findings(self): @@ -1859,7 +1859,7 @@ def active_verified_findings(self): status_finding__false_positive=False, status_finding__out_of_scope=False, status_finding__risk_accepted=False, - ).order_by('numerical_severity') + ).order_by("numerical_severity") return findings @property @@ -1914,7 +1914,7 @@ def host_active_findings(self): status_finding__out_of_scope=False, status_finding__risk_accepted=False, endpoints__in=self.host_endpoints(), - ).order_by('numerical_severity') + ).order_by("numerical_severity") return findings def host_active_verified_findings(self): @@ -1929,7 +1929,7 @@ def host_active_verified_findings(self): status_finding__out_of_scope=False, status_finding__risk_accepted=False, endpoints__in=self.host_endpoints(), - ).order_by('numerical_severity') + ).order_by("numerical_severity") return findings @property @@ -1942,8 +1942,8 @@ def host_active_verified_findings_count(self): def get_breadcrumbs(self): bc = self.product.get_breadcrumbs() - bc += [{'title': self.host, - 'url': reverse('view_endpoint', args=(self.id,))}] + bc += [{"title": self.host, + "url": reverse("view_endpoint", args=(self.id,))}] return bc @staticmethod @@ -1954,7 +1954,7 @@ def from_uri(uri): from urllib.parse import urlparse url = hyperlink.parse(url="//" + urlparse(uri).netloc) except hyperlink.URLParseError as e: - msg = f'Invalid URL format: {e}' + msg = f"Invalid URL format: {e}" raise ValidationError(msg) query_parts = [] # inspired by https://github.com/python-hyper/hyperlink/blob/b8c9152cd826bbe8e6cc125648f3738235019705/src/hyperlink/_url.py#L1768 @@ -1965,13 +1965,13 @@ def from_uri(uri): query_parts.append(f"{k}={v}") query_string = "&".join(query_parts) - protocol = url.scheme if url.scheme != '' else None - userinfo = ':'.join(url.userinfo) if url.userinfo not in [(), ('',)] else None - host = url.host if url.host != '' else None + protocol = url.scheme if url.scheme != "" else None + userinfo = ":".join(url.userinfo) if url.userinfo not in [(), ("",)] else None + host = url.host if url.host != "" else None port = url.port - path = '/'.join(url.path)[:500] if url.path not in [None, (), ('',)] else None - query = query_string[:1000] if query_string is not None and query_string != '' else None - fragment = url.fragment[:500] if url.fragment is not None and url.fragment != '' else None + path = "/".join(url.path)[:500] if url.path not in [None, (), ("",)] else None + query = query_string[:1000] if query_string is not None and query_string != "" else None + fragment = url.fragment[:500] if url.fragment is not None and url.fragment != "" else None return Endpoint( protocol=protocol, @@ -2017,7 +2017,7 @@ class Sonarqube_Issue_Transition(models.Model): transitions = models.CharField(max_length=100) class Meta: - ordering = ('-created', ) + ordering = ("-created", ) class Test(models.Model): @@ -2048,16 +2048,16 @@ class Test(models.Model): version = models.CharField(max_length=100, null=True, blank=True) build_id = models.CharField(editable=True, max_length=150, - null=True, blank=True, help_text=_("Build ID that was tested, a reimport may update this field."), verbose_name=_('Build ID')) + null=True, blank=True, help_text=_("Build ID that was tested, a reimport may update this field."), verbose_name=_("Build ID")) commit_hash = models.CharField(editable=True, max_length=150, - null=True, blank=True, help_text=_("Commit hash tested, a reimport may update this field."), verbose_name=_('Commit Hash')) + null=True, blank=True, help_text=_("Commit hash tested, a reimport may update this field."), verbose_name=_("Commit Hash")) branch_tag = models.CharField(editable=True, max_length=150, null=True, blank=True, help_text=_("Tag or branch that was tested, a reimport may update this field."), verbose_name=_("Branch/Tag")) - api_scan_configuration = models.ForeignKey(Product_API_Scan_Configuration, null=True, editable=True, blank=True, on_delete=models.CASCADE, verbose_name=_('API Scan Configuration')) + api_scan_configuration = models.ForeignKey(Product_API_Scan_Configuration, null=True, editable=True, blank=True, on_delete=models.CASCADE, verbose_name=_("API Scan Configuration")) class Meta: indexes = [ - models.Index(fields=['engagement', 'test_type']), + models.Index(fields=["engagement", "test_type"]), ] def __str__(self): @@ -2067,15 +2067,15 @@ def __str__(self): def get_absolute_url(self): from django.urls import reverse - return reverse('view_test', args=[str(self.id)]) + return reverse("view_test", args=[str(self.id)]) def test_type_name(self) -> str: return self.test_type.name def get_breadcrumbs(self): bc = self.engagement.get_breadcrumbs() - bc += [{'title': str(self), - 'url': reverse('view_test', args=(self.id,))}] + bc += [{"title": str(self), + "url": reverse("view_test", args=(self.id,))}] return bc def copy(self, engagement=None): @@ -2118,55 +2118,55 @@ def accept_risks(self, accepted_risks): def deduplication_algorithm(self): deduplicationAlgorithm = settings.DEDUPE_ALGO_LEGACY - if hasattr(settings, 'DEDUPLICATION_ALGORITHM_PER_PARSER'): + if hasattr(settings, "DEDUPLICATION_ALGORITHM_PER_PARSER"): if (self.test_type.name in settings.DEDUPLICATION_ALGORITHM_PER_PARSER): - deduplicationLogger.debug(f'using DEDUPLICATION_ALGORITHM_PER_PARSER for test_type.name: {self.test_type.name}') + deduplicationLogger.debug(f"using DEDUPLICATION_ALGORITHM_PER_PARSER for test_type.name: {self.test_type.name}") deduplicationAlgorithm = settings.DEDUPLICATION_ALGORITHM_PER_PARSER[self.test_type.name] elif (self.scan_type in settings.DEDUPLICATION_ALGORITHM_PER_PARSER): - deduplicationLogger.debug(f'using DEDUPLICATION_ALGORITHM_PER_PARSER for scan_type: {self.scan_type}') + deduplicationLogger.debug(f"using DEDUPLICATION_ALGORITHM_PER_PARSER for scan_type: {self.scan_type}") deduplicationAlgorithm = settings.DEDUPLICATION_ALGORITHM_PER_PARSER[self.scan_type] else: - deduplicationLogger.debug('Section DEDUPLICATION_ALGORITHM_PER_PARSER not found in settings.dist.py') + deduplicationLogger.debug("Section DEDUPLICATION_ALGORITHM_PER_PARSER not found in settings.dist.py") - deduplicationLogger.debug(f'DEDUPLICATION_ALGORITHM_PER_PARSER is: {deduplicationAlgorithm}') + deduplicationLogger.debug(f"DEDUPLICATION_ALGORITHM_PER_PARSER is: {deduplicationAlgorithm}") return deduplicationAlgorithm @property def hash_code_fields(self): hashCodeFields = None - if hasattr(settings, 'HASHCODE_FIELDS_PER_SCANNER'): + if hasattr(settings, "HASHCODE_FIELDS_PER_SCANNER"): if (self.test_type.name in settings.HASHCODE_FIELDS_PER_SCANNER): - deduplicationLogger.debug(f'using HASHCODE_FIELDS_PER_SCANNER for test_type.name: {self.test_type.name}') + deduplicationLogger.debug(f"using HASHCODE_FIELDS_PER_SCANNER for test_type.name: {self.test_type.name}") hashCodeFields = settings.HASHCODE_FIELDS_PER_SCANNER[self.test_type.name] elif (self.scan_type in settings.HASHCODE_FIELDS_PER_SCANNER): - deduplicationLogger.debug(f'using HASHCODE_FIELDS_PER_SCANNER for scan_type: {self.scan_type}') + deduplicationLogger.debug(f"using HASHCODE_FIELDS_PER_SCANNER for scan_type: {self.scan_type}") hashCodeFields = settings.HASHCODE_FIELDS_PER_SCANNER[self.scan_type] else: - deduplicationLogger.debug('Section HASHCODE_FIELDS_PER_SCANNER not found in settings.dist.py') + deduplicationLogger.debug("Section HASHCODE_FIELDS_PER_SCANNER not found in settings.dist.py") - deduplicationLogger.debug(f'HASHCODE_FIELDS_PER_SCANNER is: {hashCodeFields}') + deduplicationLogger.debug(f"HASHCODE_FIELDS_PER_SCANNER is: {hashCodeFields}") return hashCodeFields @property def hash_code_allows_null_cwe(self): hashCodeAllowsNullCwe = True - if hasattr(settings, 'HASHCODE_ALLOWS_NULL_CWE'): + if hasattr(settings, "HASHCODE_ALLOWS_NULL_CWE"): if (self.test_type.name in settings.HASHCODE_ALLOWS_NULL_CWE): - deduplicationLogger.debug(f'using HASHCODE_ALLOWS_NULL_CWE for test_type.name: {self.test_type.name}') + deduplicationLogger.debug(f"using HASHCODE_ALLOWS_NULL_CWE for test_type.name: {self.test_type.name}") hashCodeAllowsNullCwe = settings.HASHCODE_ALLOWS_NULL_CWE[self.test_type.name] elif (self.scan_type in settings.HASHCODE_ALLOWS_NULL_CWE): - deduplicationLogger.debug(f'using HASHCODE_ALLOWS_NULL_CWE for scan_type: {self.scan_type}') + deduplicationLogger.debug(f"using HASHCODE_ALLOWS_NULL_CWE for scan_type: {self.scan_type}") hashCodeAllowsNullCwe = settings.HASHCODE_ALLOWS_NULL_CWE[self.scan_type] else: - deduplicationLogger.debug('Section HASHCODE_ALLOWS_NULL_CWE not found in settings.dist.py') + deduplicationLogger.debug("Section HASHCODE_ALLOWS_NULL_CWE not found in settings.dist.py") - deduplicationLogger.debug(f'HASHCODE_ALLOWS_NULL_CWE is: {hashCodeAllowsNullCwe}') + deduplicationLogger.debug(f"HASHCODE_ALLOWS_NULL_CWE is: {hashCodeAllowsNullCwe}") return hashCodeAllowsNullCwe def delete(self, *args, **kwargs): - logger.debug('%d test delete', self.id) + logger.debug("%d test delete", self.id) super().delete(*args, **kwargs) calculate_grade(self.engagement.product) @@ -2183,35 +2183,35 @@ def inherit_tags(self, potentially_existing_tags): class Test_Import(TimeStampedModel): - IMPORT_TYPE = 'import' - REIMPORT_TYPE = 'reimport' + IMPORT_TYPE = "import" + REIMPORT_TYPE = "reimport" test = models.ForeignKey(Test, editable=False, null=False, blank=False, on_delete=models.CASCADE) - findings_affected = models.ManyToManyField('Finding', through='Test_Import_Finding_Action') + findings_affected = models.ManyToManyField("Finding", through="Test_Import_Finding_Action") import_settings = JSONField(null=True) - type = models.CharField(max_length=64, null=False, blank=False, default='unknown') + type = models.CharField(max_length=64, null=False, blank=False, default="unknown") version = models.CharField(max_length=100, null=True, blank=True) build_id = models.CharField(editable=True, max_length=150, - null=True, blank=True, help_text=_("Build ID that was tested, a reimport may update this field."), verbose_name=_('Build ID')) + null=True, blank=True, help_text=_("Build ID that was tested, a reimport may update this field."), verbose_name=_("Build ID")) commit_hash = models.CharField(editable=True, max_length=150, - null=True, blank=True, help_text=_("Commit hash tested, a reimport may update this field."), verbose_name=_('Commit Hash')) + null=True, blank=True, help_text=_("Commit hash tested, a reimport may update this field."), verbose_name=_("Commit Hash")) branch_tag = models.CharField(editable=True, max_length=150, null=True, blank=True, help_text=_("Tag or branch that was tested, a reimport may update this field."), verbose_name=_("Branch/Tag")) def get_queryset(self): - logger.debug('prefetch test_import counts') + logger.debug("prefetch test_import counts") super_query = super().get_queryset() - super_query = super_query.annotate(created_findings_count=Count('findings', filter=Q(test_import_finding_action__action=IMPORT_CREATED_FINDING))) - super_query = super_query.annotate(closed_findings_count=Count('findings', filter=Q(test_import_finding_action__action=IMPORT_CLOSED_FINDING))) - super_query = super_query.annotate(reactivated_findings_count=Count('findings', filter=Q(test_import_finding_action__action=IMPORT_REACTIVATED_FINDING))) - super_query = super_query.annotate(untouched_findings_count=Count('findings', filter=Q(test_import_finding_action__action=IMPORT_UNTOUCHED_FINDING))) + super_query = super_query.annotate(created_findings_count=Count("findings", filter=Q(test_import_finding_action__action=IMPORT_CREATED_FINDING))) + super_query = super_query.annotate(closed_findings_count=Count("findings", filter=Q(test_import_finding_action__action=IMPORT_CLOSED_FINDING))) + super_query = super_query.annotate(reactivated_findings_count=Count("findings", filter=Q(test_import_finding_action__action=IMPORT_REACTIVATED_FINDING))) + super_query = super_query.annotate(untouched_findings_count=Count("findings", filter=Q(test_import_finding_action__action=IMPORT_UNTOUCHED_FINDING))) return super_query class Meta: - ordering = ('-id',) + ordering = ("-id",) indexes = [ - models.Index(fields=['created', 'test', 'type']), + models.Index(fields=["created", "test", "type"]), ] def __str__(self): @@ -2228,36 +2228,36 @@ def statistics(self): class Test_Import_Finding_Action(TimeStampedModel): test_import = models.ForeignKey(Test_Import, editable=False, null=False, blank=False, on_delete=models.CASCADE) - finding = models.ForeignKey('Finding', editable=False, null=False, blank=False, on_delete=models.CASCADE) + finding = models.ForeignKey("Finding", editable=False, null=False, blank=False, on_delete=models.CASCADE) action = models.CharField(max_length=100, null=True, blank=True, choices=IMPORT_ACTIONS) class Meta: indexes = [ - models.Index(fields=['finding', 'action', 'test_import']), + models.Index(fields=["finding", "action", "test_import"]), ] - unique_together = (('test_import', 'finding')) - ordering = ('test_import', 'action', 'finding') + unique_together = (("test_import", "finding")) + ordering = ("test_import", "action", "finding") def __str__(self): - return '%i: %s' % (self.finding.id, self.action) + return "%i: %s" % (self.finding.id, self.action) class Finding(models.Model): title = models.CharField(max_length=511, - verbose_name=_('Title'), + verbose_name=_("Title"), help_text=_("A short description of the flaw.")) date = models.DateField(default=get_current_date, - verbose_name=_('Date'), + verbose_name=_("Date"), help_text=_("The date the flaw was discovered.")) sla_start_date = models.DateField( blank=True, null=True, - verbose_name=_('SLA Start Date'), + verbose_name=_("SLA Start Date"), help_text=_("(readonly)The date used as start date for SLA calculation. Set by expiring risk acceptances. Empty by default, causing a fallback to 'date'.")) sla_expiration_date = models.DateField( blank=True, null=True, - verbose_name=_('SLA Expiration Date'), + verbose_name=_("SLA Expiration Date"), help_text=_("(readonly)The date SLA expires for this finding. Empty by default, causing a fallback to 'date'.")) cwe = models.IntegerField(default=0, null=True, blank=True, verbose_name=_("CWE"), @@ -2275,335 +2275,335 @@ class Finding(models.Model): verbose_name=_("EPSS percentile"), help_text=_("EPSS percentile for the CVE. Describes how many CVEs are scored at or below this one."), validators=[MinValueValidator(0.0), MaxValueValidator(1.0)]) - cvssv3_regex = RegexValidator(regex=r'^AV:[NALP]|AC:[LH]|PR:[UNLH]|UI:[NR]|S:[UC]|[CIA]:[NLH]', message="CVSS must be entered in format: 'AV:N/AC:L/PR:N/UI:N/S:C/C:H/I:H/A:H'") + cvssv3_regex = RegexValidator(regex=r"^AV:[NALP]|AC:[LH]|PR:[UNLH]|UI:[NR]|S:[UC]|[CIA]:[NLH]", message="CVSS must be entered in format: 'AV:N/AC:L/PR:N/UI:N/S:C/C:H/I:H/A:H'") cvssv3 = models.TextField(validators=[cvssv3_regex], max_length=117, null=True, - verbose_name=_('CVSS v3'), - help_text=_('Common Vulnerability Scoring System version 3 (CVSSv3) score associated with this flaw.')) + verbose_name=_("CVSS v3"), + help_text=_("Common Vulnerability Scoring System version 3 (CVSSv3) score associated with this flaw.")) cvssv3_score = models.FloatField(null=True, blank=True, - verbose_name=_('CVSSv3 score'), + verbose_name=_("CVSSv3 score"), help_text=_("Numerical CVSSv3 score for the vulnerability. If the vector is given, the score is updated while saving the finding. The value must be between 0-10."), validators=[MinValueValidator(0.0), MaxValueValidator(10.0)]) url = models.TextField(null=True, blank=True, editable=False, - verbose_name=_('URL'), + verbose_name=_("URL"), help_text=_("External reference that provides more information about this flaw.")) # not displayed and pretty much the same as references. To remove? severity = models.CharField(max_length=200, - verbose_name=_('Severity'), - help_text=_('The severity level of this flaw (Critical, High, Medium, Low, Info).')) - description = models.TextField(verbose_name=_('Description'), + verbose_name=_("Severity"), + help_text=_("The severity level of this flaw (Critical, High, Medium, Low, Info).")) + description = models.TextField(verbose_name=_("Description"), help_text=_("Longer more descriptive information about the flaw.")) - mitigation = models.TextField(verbose_name=_('Mitigation'), + mitigation = models.TextField(verbose_name=_("Mitigation"), null=True, blank=True, help_text=_("Text describing how to best fix the flaw.")) - impact = models.TextField(verbose_name=_('Impact'), + impact = models.TextField(verbose_name=_("Impact"), null=True, blank=True, help_text=_("Text describing the impact this flaw has on systems, products, enterprise, etc.")) steps_to_reproduce = models.TextField(null=True, blank=True, - verbose_name=_('Steps to Reproduce'), + verbose_name=_("Steps to Reproduce"), help_text=_("Text describing the steps that must be followed in order to reproduce the flaw / bug.")) severity_justification = models.TextField(null=True, blank=True, - verbose_name=_('Severity Justification'), + verbose_name=_("Severity Justification"), help_text=_("Text describing why a certain severity was associated with this flaw.")) endpoints = models.ManyToManyField(Endpoint, blank=True, - verbose_name=_('Endpoints'), + verbose_name=_("Endpoints"), help_text=_("The hosts within the product that are susceptible to this flaw. + The status of the endpoint associated with this flaw (Vulnerable, Mitigated, ...)."), through=Endpoint_Status) references = models.TextField(null=True, blank=True, db_column="refs", - verbose_name=_('References'), + verbose_name=_("References"), help_text=_("The external documentation available for this flaw.")) test = models.ForeignKey(Test, editable=False, on_delete=models.CASCADE, - verbose_name=_('Test'), + verbose_name=_("Test"), help_text=_("The test that is associated with this flaw.")) active = models.BooleanField(default=True, - verbose_name=_('Active'), + verbose_name=_("Active"), help_text=_("Denotes if this flaw is active or not.")) # note that false positive findings cannot be verified # in defectdojo verified means: "we have verified the finding and it turns out that it's not a false positive" verified = models.BooleanField(default=False, - verbose_name=_('Verified'), + verbose_name=_("Verified"), help_text=_("Denotes if this flaw has been manually verified by the tester.")) false_p = models.BooleanField(default=False, - verbose_name=_('False Positive'), + verbose_name=_("False Positive"), help_text=_("Denotes if this flaw has been deemed a false positive by the tester.")) duplicate = models.BooleanField(default=False, - verbose_name=_('Duplicate'), + verbose_name=_("Duplicate"), help_text=_("Denotes if this flaw is a duplicate of other flaws reported.")) - duplicate_finding = models.ForeignKey('self', + duplicate_finding = models.ForeignKey("self", editable=False, null=True, - related_name='original_finding', + related_name="original_finding", blank=True, on_delete=models.DO_NOTHING, - verbose_name=_('Duplicate Finding'), + verbose_name=_("Duplicate Finding"), help_text=_("Link to the original finding if this finding is a duplicate.")) out_of_scope = models.BooleanField(default=False, - verbose_name=_('Out Of Scope'), + verbose_name=_("Out Of Scope"), help_text=_("Denotes if this flaw falls outside the scope of the test and/or engagement.")) risk_accepted = models.BooleanField(default=False, - verbose_name=_('Risk Accepted'), + verbose_name=_("Risk Accepted"), help_text=_("Denotes if this finding has been marked as an accepted risk.")) under_review = models.BooleanField(default=False, - verbose_name=_('Under Review'), + verbose_name=_("Under Review"), help_text=_("Denotes is this flaw is currently being reviewed.")) last_status_update = models.DateTimeField(editable=False, null=True, blank=True, auto_now_add=True, - verbose_name=_('Last Status Update'), - help_text=_('Timestamp of latest status update (change in status related fields).')) + verbose_name=_("Last Status Update"), + help_text=_("Timestamp of latest status update (change in status related fields).")) review_requested_by = models.ForeignKey(Dojo_User, null=True, blank=True, - related_name='review_requested_by', + related_name="review_requested_by", on_delete=models.RESTRICT, - verbose_name=_('Review Requested By'), + verbose_name=_("Review Requested By"), help_text=_("Documents who requested a review for this finding.")) reviewers = models.ManyToManyField(Dojo_User, blank=True, - verbose_name=_('Reviewers'), + verbose_name=_("Reviewers"), help_text=_("Documents who reviewed the flaw.")) # Defect Tracking Review under_defect_review = models.BooleanField(default=False, - verbose_name=_('Under Defect Review'), + verbose_name=_("Under Defect Review"), help_text=_("Denotes if this finding is under defect review.")) defect_review_requested_by = models.ForeignKey(Dojo_User, null=True, blank=True, - related_name='defect_review_requested_by', + related_name="defect_review_requested_by", on_delete=models.RESTRICT, - verbose_name=_('Defect Review Requested By'), + verbose_name=_("Defect Review Requested By"), help_text=_("Documents who requested a defect review for this flaw.")) is_mitigated = models.BooleanField(default=False, - verbose_name=_('Is Mitigated'), + verbose_name=_("Is Mitigated"), help_text=_("Denotes if this flaw has been fixed.")) thread_id = models.IntegerField(default=0, editable=False, - verbose_name=_('Thread ID')) + verbose_name=_("Thread ID")) mitigated = models.DateTimeField(editable=False, null=True, blank=True, - verbose_name=_('Mitigated'), + verbose_name=_("Mitigated"), help_text=_("Denotes if this flaw has been fixed by storing the date it was fixed.")) mitigated_by = models.ForeignKey(Dojo_User, null=True, editable=False, related_name="mitigated_by", on_delete=models.RESTRICT, - verbose_name=_('Mitigated By'), + verbose_name=_("Mitigated By"), help_text=_("Documents who has marked this flaw as fixed.")) reporter = models.ForeignKey(Dojo_User, editable=False, default=1, - related_name='reporter', + related_name="reporter", on_delete=models.RESTRICT, - verbose_name=_('Reporter'), + verbose_name=_("Reporter"), help_text=_("Documents who reported the flaw.")) notes = models.ManyToManyField(Notes, blank=True, editable=False, - verbose_name=_('Notes'), + verbose_name=_("Notes"), help_text=_("Stores information pertinent to the flaw or the mitigation.")) numerical_severity = models.CharField(max_length=4, - verbose_name=_('Numerical Severity'), - help_text=_('The numerical representation of the severity (S0, S1, S2, S3, S4).')) + verbose_name=_("Numerical Severity"), + help_text=_("The numerical representation of the severity (S0, S1, S2, S3, S4).")) last_reviewed = models.DateTimeField(null=True, editable=False, - verbose_name=_('Last Reviewed'), + verbose_name=_("Last Reviewed"), help_text=_("Provides the date the flaw was last 'touched' by a tester.")) last_reviewed_by = models.ForeignKey(Dojo_User, null=True, editable=False, - related_name='last_reviewed_by', + related_name="last_reviewed_by", on_delete=models.RESTRICT, - verbose_name=_('Last Reviewed By'), + verbose_name=_("Last Reviewed By"), help_text=_("Provides the person who last reviewed the flaw.")) files = models.ManyToManyField(FileUpload, blank=True, editable=False, - verbose_name=_('Files'), - help_text=_('Files(s) related to the flaw.')) + verbose_name=_("Files"), + help_text=_("Files(s) related to the flaw.")) param = models.TextField(null=True, blank=True, editable=False, - verbose_name=_('Parameter'), - help_text=_('Parameter used to trigger the issue (DAST).')) + verbose_name=_("Parameter"), + help_text=_("Parameter used to trigger the issue (DAST).")) payload = models.TextField(null=True, blank=True, editable=False, - verbose_name=_('Payload'), + verbose_name=_("Payload"), help_text=_("Payload used to attack the service / application and trigger the bug / problem.")) hash_code = models.CharField(null=True, blank=True, editable=False, max_length=64, - verbose_name=_('Hash Code'), + verbose_name=_("Hash Code"), help_text=_("A hash over a configurable set of fields that is used for findings deduplication.")) line = models.IntegerField(null=True, blank=True, - verbose_name=_('Line number'), + verbose_name=_("Line number"), help_text=_("Source line number of the attack vector.")) file_path = models.CharField(null=True, blank=True, max_length=4000, - verbose_name=_('File path'), - help_text=_('Identified file(s) containing the flaw.')) + verbose_name=_("File path"), + help_text=_("Identified file(s) containing the flaw.")) component_name = models.CharField(null=True, blank=True, max_length=500, - verbose_name=_('Component name'), - help_text=_('Name of the affected component (library name, part of a system, ...).')) + verbose_name=_("Component name"), + help_text=_("Name of the affected component (library name, part of a system, ...).")) component_version = models.CharField(null=True, blank=True, max_length=100, - verbose_name=_('Component version'), + verbose_name=_("Component version"), help_text=_("Version of the affected component.")) found_by = models.ManyToManyField(Test_Type, editable=False, - verbose_name=_('Found by'), + verbose_name=_("Found by"), help_text=_("The name of the scanner that identified the flaw.")) static_finding = models.BooleanField(default=False, verbose_name=_("Static finding (SAST)"), - help_text=_('Flaw has been detected from a Static Application Security Testing tool (SAST).')) + help_text=_("Flaw has been detected from a Static Application Security Testing tool (SAST).")) dynamic_finding = models.BooleanField(default=True, verbose_name=_("Dynamic finding (DAST)"), - help_text=_('Flaw has been detected from a Dynamic Application Security Testing tool (DAST).')) + help_text=_("Flaw has been detected from a Dynamic Application Security Testing tool (DAST).")) created = models.DateTimeField(auto_now_add=True, null=True, - verbose_name=_('Created'), + verbose_name=_("Created"), help_text=_("The date the finding was created inside DefectDojo.")) scanner_confidence = models.IntegerField(null=True, blank=True, default=None, editable=False, - verbose_name=_('Scanner confidence'), + verbose_name=_("Scanner confidence"), help_text=_("Confidence level of vulnerability which is supplied by the scanner.")) sonarqube_issue = models.ForeignKey(Sonarqube_Issue, null=True, blank=True, help_text=_("The SonarQube issue associated with this finding."), - verbose_name=_('SonarQube issue'), + verbose_name=_("SonarQube issue"), on_delete=models.CASCADE) unique_id_from_tool = models.CharField(null=True, blank=True, max_length=500, - verbose_name=_('Unique ID from tool'), + verbose_name=_("Unique ID from tool"), help_text=_("Vulnerability technical id from the source tool. Allows to track unique vulnerabilities.")) vuln_id_from_tool = models.CharField(null=True, blank=True, max_length=500, - verbose_name=_('Vulnerability ID from tool'), - help_text=_('Non-unique technical id from the source tool associated with the vulnerability type.')) + verbose_name=_("Vulnerability ID from tool"), + help_text=_("Non-unique technical id from the source tool associated with the vulnerability type.")) sast_source_object = models.CharField(null=True, blank=True, max_length=500, - verbose_name=_('SAST Source Object'), - help_text=_('Source object (variable, function...) of the attack vector.')) + verbose_name=_("SAST Source Object"), + help_text=_("Source object (variable, function...) of the attack vector.")) sast_sink_object = models.CharField(null=True, blank=True, max_length=500, - verbose_name=_('SAST Sink Object'), - help_text=_('Sink object (variable, function...) of the attack vector.')) + verbose_name=_("SAST Sink Object"), + help_text=_("Sink object (variable, function...) of the attack vector.")) sast_source_line = models.IntegerField(null=True, blank=True, - verbose_name=_('SAST Source Line number'), + verbose_name=_("SAST Source Line number"), help_text=_("Source line number of the attack vector.")) sast_source_file_path = models.CharField(null=True, blank=True, max_length=4000, - verbose_name=_('SAST Source File Path'), + verbose_name=_("SAST Source File Path"), help_text=_("Source file path of the attack vector.")) nb_occurences = models.IntegerField(null=True, blank=True, - verbose_name=_('Number of occurences'), + verbose_name=_("Number of occurences"), help_text=_("Number of occurences in the source tool when several vulnerabilites were found and aggregated by the scanner.")) # this is useful for vulnerabilities on dependencies : helps answer the question "Did I add this vulnerability or was it discovered recently?" publish_date = models.DateField(null=True, blank=True, - verbose_name=_('Publish date'), + verbose_name=_("Publish date"), help_text=_("Date when this vulnerability was made publicly available.")) # The service is used to generate the hash_code, so that it gets part of the deduplication of findings. service = models.CharField(null=True, blank=True, max_length=200, - verbose_name=_('Service'), - help_text=_('A service is a self-contained piece of functionality within a Product. This is an optional field which is used in deduplication of findings when set.')) + verbose_name=_("Service"), + help_text=_("A service is a self-contained piece of functionality within a Product. This is an optional field which is used in deduplication of findings when set.")) planned_remediation_date = models.DateField(null=True, editable=True, - verbose_name=_('Planned Remediation Date'), + verbose_name=_("Planned Remediation Date"), help_text=_("The date the flaw is expected to be remediated.")) planned_remediation_version = models.CharField(null=True, blank=True, max_length=99, - verbose_name=_('Planned remediation version'), - help_text=_('The target version when the vulnerability should be fixed / remediated')) + verbose_name=_("Planned remediation version"), + help_text=_("The target version when the vulnerability should be fixed / remediated")) effort_for_fixing = models.CharField(null=True, blank=True, max_length=99, - verbose_name=_('Effort for fixing'), - help_text=_('Effort for fixing / remediating the vulnerability (Low, Medium, High)')) + verbose_name=_("Effort for fixing"), + help_text=_("Effort for fixing / remediating the vulnerability (Low, Medium, High)")) tags = TagField(blank=True, force_lowercase=True, help_text=_("Add tags that help describe this finding. Choose from the list or add new tags. Press Enter key to add.")) inherited_tags = TagField(blank=True, force_lowercase=True, help_text=_("Internal use tags sepcifically for maintaining parity with product. This field will be present as a subset in the tags field")) - SEVERITIES = {'Info': 4, 'Low': 3, 'Medium': 2, - 'High': 1, 'Critical': 0} + SEVERITIES = {"Info": 4, "Low": 3, "Medium": 2, + "High": 1, "Critical": 0} class Meta: - ordering = ('numerical_severity', '-date', 'title', 'epss_score', 'epss_percentile') + ordering = ("numerical_severity", "-date", "title", "epss_score", "epss_percentile") indexes = [ - models.Index(fields=['test', 'active', 'verified']), - - models.Index(fields=['test', 'is_mitigated']), - models.Index(fields=['test', 'duplicate']), - models.Index(fields=['test', 'out_of_scope']), - models.Index(fields=['test', 'false_p']), - - models.Index(fields=['test', 'unique_id_from_tool', 'duplicate']), - models.Index(fields=['test', 'hash_code', 'duplicate']), - - models.Index(fields=['test', 'component_name']), - - models.Index(fields=['cve']), - models.Index(fields=['epss_score']), - models.Index(fields=['epss_percentile']), - models.Index(fields=['cwe']), - models.Index(fields=['out_of_scope']), - models.Index(fields=['false_p']), - models.Index(fields=['verified']), - models.Index(fields=['mitigated']), - models.Index(fields=['active']), - models.Index(fields=['numerical_severity']), - models.Index(fields=['date']), - models.Index(fields=['title']), - models.Index(fields=['hash_code']), - models.Index(fields=['unique_id_from_tool']), + models.Index(fields=["test", "active", "verified"]), + + models.Index(fields=["test", "is_mitigated"]), + models.Index(fields=["test", "duplicate"]), + models.Index(fields=["test", "out_of_scope"]), + models.Index(fields=["test", "false_p"]), + + models.Index(fields=["test", "unique_id_from_tool", "duplicate"]), + models.Index(fields=["test", "hash_code", "duplicate"]), + + models.Index(fields=["test", "component_name"]), + + models.Index(fields=["cve"]), + models.Index(fields=["epss_score"]), + models.Index(fields=["epss_percentile"]), + models.Index(fields=["cwe"]), + models.Index(fields=["out_of_scope"]), + models.Index(fields=["false_p"]), + models.Index(fields=["verified"]), + models.Index(fields=["mitigated"]), + models.Index(fields=["active"]), + models.Index(fields=["numerical_severity"]), + models.Index(fields=["date"]), + models.Index(fields=["title"]), + models.Index(fields=["hash_code"]), + models.Index(fields=["unique_id_from_tool"]), # models.Index(fields=['file_path']), # can't add index because the field has max length 4000. - models.Index(fields=['line']), - models.Index(fields=['component_name']), - models.Index(fields=['duplicate']), - models.Index(fields=['is_mitigated']), - models.Index(fields=['duplicate_finding', 'id']), + models.Index(fields=["line"]), + models.Index(fields=["component_name"]), + models.Index(fields=["duplicate"]), + models.Index(fields=["is_mitigated"]), + models.Index(fields=["duplicate_finding", "id"]), ] def __str__(self): @@ -2657,7 +2657,7 @@ def save(self, dedupe_option=True, rules_option=True, product_grading_option=Tru # because we have reduced the number of (super()).save() calls, the helper is no longer called for new findings # so we call it manually - finding_helper.update_finding_status(self, user, changed_fields={'id': (None, None)}) + finding_helper.update_finding_status(self, user, changed_fields={"id": (None, None)}) else: # logger.debug('setting static / dynamic in save') @@ -2681,11 +2681,11 @@ def save(self, dedupe_option=True, rules_option=True, product_grading_option=Tru finding_helper.post_process_finding_save(self, dedupe_option=dedupe_option, rules_option=rules_option, product_grading_option=product_grading_option, issue_updater_option=issue_updater_option, push_to_jira=push_to_jira, user=user, *args, **kwargs) else: - logger.debug('no options selected that require finding post processing') + logger.debug("no options selected that require finding post processing") def get_absolute_url(self): from django.urls import reverse - return reverse('view_finding', args=[str(self.id)]) + return reverse("view_finding", args=[str(self.id)]) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @@ -2732,7 +2732,7 @@ def copy(self, test=None): return copy def delete(self, *args, **kwargs): - logger.debug('%d finding delete', self.id) + logger.debug("%d finding delete", self.id) import dojo.finding.helper as helper helper.finding_delete(self) super().delete(*args, **kwargs) @@ -2754,7 +2754,7 @@ def risk_acceptance(self): def compute_hash_code(self): # Check if all needed settings are defined - if not hasattr(settings, 'HASHCODE_FIELDS_PER_SCANNER') or not hasattr(settings, 'HASHCODE_ALLOWS_NULL_CWE') or not hasattr(settings, 'HASHCODE_ALLOWED_FIELDS'): + if not hasattr(settings, "HASHCODE_FIELDS_PER_SCANNER") or not hasattr(settings, "HASHCODE_ALLOWS_NULL_CWE") or not hasattr(settings, "HASHCODE_ALLOWED_FIELDS"): deduplicationLogger.debug("no or incomplete configuration per hash_code found; using legacy algorithm") return self.compute_hash_code_legacy() @@ -2763,7 +2763,7 @@ def compute_hash_code(self): # Check if hash_code fields are found in the settings if not hash_code_fields: deduplicationLogger.debug( - "No configuration for hash_code computation found; using default fields for " + ('dynamic' if self.dynamic_finding else 'static') + ' scanners') + "No configuration for hash_code computation found; using default fields for " + ("dynamic" if self.dynamic_finding else "static") + " scanners") return self.compute_hash_code_legacy() # Check if all elements of HASHCODE_FIELDS_PER_SCANNER are in HASHCODE_ALLOWED_FIELDS @@ -2780,24 +2780,24 @@ def compute_hash_code(self): + "'. Fallback to legacy mode for this finding.") return self.compute_hash_code_legacy() - deduplicationLogger.debug("computing hash_code for finding id " + str(self.id) + " based on: " + ', '.join(hash_code_fields)) + deduplicationLogger.debug("computing hash_code for finding id " + str(self.id) + " based on: " + ", ".join(hash_code_fields)) - fields_to_hash = '' + fields_to_hash = "" for hashcodeField in hash_code_fields: - if hashcodeField == 'endpoints': + if hashcodeField == "endpoints": # For endpoints, need to compute the field myEndpoints = self.get_endpoints() fields_to_hash = fields_to_hash + myEndpoints - deduplicationLogger.debug(hashcodeField + ' : ' + myEndpoints) - elif hashcodeField == 'vulnerability_ids': + deduplicationLogger.debug(hashcodeField + " : " + myEndpoints) + elif hashcodeField == "vulnerability_ids": # For vulnerability_ids, need to compute the field my_vulnerability_ids = self.get_vulnerability_ids() fields_to_hash = fields_to_hash + my_vulnerability_ids - deduplicationLogger.debug(hashcodeField + ' : ' + my_vulnerability_ids) + deduplicationLogger.debug(hashcodeField + " : " + my_vulnerability_ids) else: # Generically use the finding attribute having the same name, converts to str in case it's integer fields_to_hash = fields_to_hash + str(getattr(self, hashcodeField)) - deduplicationLogger.debug(hashcodeField + ' : ' + str(getattr(self, hashcodeField))) + deduplicationLogger.debug(hashcodeField + " : " + str(getattr(self, hashcodeField))) deduplicationLogger.debug("compute_hash_code - fields_to_hash = " + fields_to_hash) return self.hash_fields(fields_to_hash) @@ -2808,14 +2808,14 @@ def compute_hash_code_legacy(self): # Get vulnerability_ids to use for hash_code computation def get_vulnerability_ids(self): - vulnerability_id_str = '' + vulnerability_id_str = "" if self.id is None: if self.unsaved_vulnerability_ids: deduplicationLogger.debug("get_vulnerability_ids before the finding was saved") # convert list of unsaved vulnerability_ids to the list of their canonical representation vulnerability_id_str_list = [str(vulnerability_id) for vulnerability_id in self.unsaved_vulnerability_ids] # deduplicate (usually done upon saving finding) and sort endpoints - vulnerability_id_str = ''.join(sorted(dict.fromkeys(vulnerability_id_str_list))) + vulnerability_id_str = "".join(sorted(dict.fromkeys(vulnerability_id_str_list))) else: deduplicationLogger.debug("finding has no unsaved vulnerability references") else: @@ -2824,20 +2824,20 @@ def get_vulnerability_ids(self): # convert list of vulnerability_ids to the list of their canonical representation vulnerability_id_str_list = [str(vulnerability_id) for vulnerability_id in vulnerability_ids.all()] # sort vulnerability_ids strings - vulnerability_id_str = ''.join(sorted(vulnerability_id_str_list)) + vulnerability_id_str = "".join(sorted(vulnerability_id_str_list)) return vulnerability_id_str # Get endpoints to use for hash_code computation # (This sometimes reports "None") def get_endpoints(self): - endpoint_str = '' + endpoint_str = "" if (self.id is None): if len(self.unsaved_endpoints) > 0: deduplicationLogger.debug("get_endpoints before the finding was saved") # convert list of unsaved endpoints to the list of their canonical representation endpoint_str_list = [str(endpoint) for endpoint in self.unsaved_endpoints] # deduplicate (usually done upon saving finding) and sort endpoints - endpoint_str = ''.join( + endpoint_str = "".join( sorted( dict.fromkeys(endpoint_str_list))) else: @@ -2850,7 +2850,7 @@ def get_endpoints(self): # convert list of endpoints to the list of their canonical representation endpoint_str_list = [str(endpoint) for endpoint in self.endpoints.all()] # sort endpoints strings - endpoint_str = ''.join( + endpoint_str = "".join( sorted( endpoint_str_list, )) @@ -2858,25 +2858,25 @@ def get_endpoints(self): # Compute the hash_code from the fields to hash def hash_fields(self, fields_to_hash): - if hasattr(settings, 'HASH_CODE_FIELDS_ALWAYS'): + if hasattr(settings, "HASH_CODE_FIELDS_ALWAYS"): for field in settings.HASH_CODE_FIELDS_ALWAYS: if getattr(self, field): fields_to_hash += str(getattr(self, field)) - logger.debug('fields_to_hash : %s', fields_to_hash) - logger.debug('fields_to_hash lower: %s', fields_to_hash.lower()) - return hashlib.sha256(fields_to_hash.casefold().encode('utf-8').strip()).hexdigest() + logger.debug("fields_to_hash : %s", fields_to_hash) + logger.debug("fields_to_hash lower: %s", fields_to_hash.lower()) + return hashlib.sha256(fields_to_hash.casefold().encode("utf-8").strip()).hexdigest() def duplicate_finding_set(self): if self.duplicate: if self.duplicate_finding is not None: originals = Finding.objects.get( - id=self.duplicate_finding.id).original_finding.all().order_by('title') + id=self.duplicate_finding.id).original_finding.all().order_by("title") return originals # we need to add the duplicate_finding here as well else: return [] else: - return self.original_finding.all().order_by('title') + return self.original_finding.all().order_by("title") def get_scanner_confidence_text(self): if self.scanner_confidence and isinstance(self.scanner_confidence, int): @@ -2890,37 +2890,37 @@ def get_scanner_confidence_text(self): @staticmethod def get_numerical_severity(severity): - if severity == 'Critical': - return 'S0' - elif severity == 'High': - return 'S1' - elif severity == 'Medium': - return 'S2' - elif severity == 'Low': - return 'S3' - elif severity == 'Info': - return 'S4' + if severity == "Critical": + return "S0" + elif severity == "High": + return "S1" + elif severity == "Medium": + return "S2" + elif severity == "Low": + return "S3" + elif severity == "Info": + return "S4" else: - return 'S5' + return "S5" @staticmethod def get_number_severity(severity): - if severity == 'Critical': + if severity == "Critical": return 4 - elif severity == 'High': + elif severity == "High": return 3 - elif severity == 'Medium': + elif severity == "Medium": return 2 - elif severity == 'Low': + elif severity == "Low": return 1 - elif severity == 'Info': + elif severity == "Info": return 0 else: return 5 @staticmethod def get_severity(num_severity): - severities = {0: 'Info', 1: 'Low', 2: 'Medium', 3: 'High', 4: 'Critical'} + severities = {0: "Info", 1: "Low", 2: "Medium", 3: "High", 4: "Critical"} if num_severity in severities.keys(): return severities[num_severity] @@ -2929,25 +2929,25 @@ def get_severity(num_severity): def status(self): status = [] if self.under_review: - status += ['Under Review'] + status += ["Under Review"] if self.active: - status += ['Active'] + status += ["Active"] else: - status += ['Inactive'] + status += ["Inactive"] if self.verified: - status += ['Verified'] + status += ["Verified"] if self.mitigated or self.is_mitigated: - status += ['Mitigated'] + status += ["Mitigated"] if self.false_p: - status += ['False Positive'] + status += ["False Positive"] if self.out_of_scope: - status += ['Out Of Scope'] + status += ["Out Of Scope"] if self.duplicate: - status += ['Duplicate'] + status += ["Duplicate"] if self.risk_accepted: - status += ['Risk Accepted'] + status += ["Risk Accepted"] if not len(status): - status += ['Initial'] + status += ["Initial"] return ", ".join([str(s) for s in status]) @@ -2996,7 +2996,7 @@ def get_sla_start_date(self): def get_sla_period(self): sla_configuration = SLA_Configuration.objects.filter(id=self.test.engagement.product.sla_configuration_id).first() sla_period = getattr(sla_configuration, self.severity.lower(), None) - enforce_period = getattr(sla_configuration, str('enforce_' + self.severity.lower()), None) + enforce_period = getattr(sla_configuration, str("enforce_" + self.severity.lower()), None) return sla_period, enforce_period def set_sla_expiration_date(self): @@ -3114,8 +3114,8 @@ def severity_display(self): def get_breadcrumbs(self): bc = self.test.get_breadcrumbs() - bc += [{'title': str(self), - 'url': reverse('view_finding', args=(self.id,))}] + bc += [{"title": str(self), + "url": reverse("view_finding", args=(self.id,))}] return bc def get_valid_request_response_pairs(self): @@ -3155,15 +3155,15 @@ def get_response(self): reqres = request_response_pairs.first() res = base64.b64decode(reqres.burpResponseBase64) # Removes all blank lines - res = re.sub(r'\n\s*\n', '\n', res) + res = re.sub(r"\n\s*\n", "\n", res) return res def latest_note(self): if self.notes.all(): note = self.notes.all()[0] - return note.date.strftime("%Y-%m-%d %H:%M:%S") + ': ' + note.author.get_full_name() + ' : ' + note.entry + return note.date.strftime("%Y-%m-%d %H:%M:%S") + ": " + note.author.get_full_name() + " : " + note.entry - return '' + return "" def get_sast_source_file_path_with_link(self): from dojo.utils import create_bleached_link @@ -3171,9 +3171,9 @@ def get_sast_source_file_path_with_link(self): return None if self.test.engagement.source_code_management_uri is None: return escape(self.sast_source_file_path) - link = self.test.engagement.source_code_management_uri + '/' + self.sast_source_file_path + link = self.test.engagement.source_code_management_uri + "/" + self.sast_source_file_path if self.sast_source_line: - link = link + '#L' + str(self.sast_source_line) + link = link + "#L" + str(self.sast_source_line) return create_bleached_link(link, self.sast_source_file_path) def get_file_path_with_link(self): @@ -3188,13 +3188,13 @@ def get_file_path_with_link(self): def get_scm_type(self): # extract scm type from product custom field 'scm-type' - if hasattr(self.test.engagement, 'product'): - dojo_meta = DojoMeta.objects.filter(product=self.test.engagement.product, name='scm-type').first() + if hasattr(self.test.engagement, "product"): + dojo_meta = DojoMeta.objects.filter(product=self.test.engagement.product, name="scm-type").first() if dojo_meta: st = dojo_meta.value.strip() if st: return st.lower() - return '' + return "" def scm_public_prepare_base_link(self, uri): # scm public (https://scm-domain.org) url template for browse is: @@ -3203,25 +3203,25 @@ def scm_public_prepare_base_link(self, uri): # https://scm-domain.org//.git # so to create browser url - git url should be recomposed like below: - parts_uri = uri.split('.git') + parts_uri = uri.split(".git") return parts_uri[0] def git_public_prepare_scm_link(self, uri, scm_type): # if commit hash or branch/tag is set for engagement/test - # hash or branch/tag should be appended to base browser link - intermediate_path = '/blob/' if scm_type in ['github', 'gitlab'] else '/src/' + intermediate_path = "/blob/" if scm_type in ["github", "gitlab"] else "/src/" link = self.scm_public_prepare_base_link(uri) if self.test.commit_hash: - link += intermediate_path + self.test.commit_hash + '/' + self.file_path + link += intermediate_path + self.test.commit_hash + "/" + self.file_path elif self.test.engagement.commit_hash: - link += intermediate_path + self.test.engagement.commit_hash + '/' + self.file_path + link += intermediate_path + self.test.engagement.commit_hash + "/" + self.file_path elif self.test.branch_tag: - link += intermediate_path + self.test.branch_tag + '/' + self.file_path + link += intermediate_path + self.test.branch_tag + "/" + self.file_path elif self.test.engagement.branch_tag: - link += intermediate_path + self.test.engagement.branch_tag + '/' + self.file_path + link += intermediate_path + self.test.engagement.branch_tag + "/" + self.file_path else: - link += intermediate_path + 'master/' + self.file_path + link += intermediate_path + "master/" + self.file_path return link @@ -3236,14 +3236,14 @@ def bitbucket_standalone_prepare_scm_base_link(self, uri): # https://bb.example.com/scm//.git (username often could be prefixed with ~) # so to create borwser url - git url should be recomposed like below: - parts_uri = uri.split('.git') - parts_scm = parts_uri[0].split('/scm/') - parts_project = parts_scm[1].split('/') + parts_uri = uri.split(".git") + parts_scm = parts_uri[0].split("/scm/") + parts_project = parts_scm[1].split("/") project = parts_project[0] - if project.startswith('~'): - return parts_scm[0] + '/users/' + parts_project[0][1:] + '/repos/' + parts_project[1] + '/browse' + if project.startswith("~"): + return parts_scm[0] + "/users/" + parts_project[0][1:] + "/repos/" + parts_project[1] + "/browse" else: - return parts_scm[0] + '/projects/' + parts_project[0] + '/repos/' + parts_project[1] + '/browse' + return parts_scm[0] + "/projects/" + parts_project[0] + "/repos/" + parts_project[1] + "/browse" def bitbucket_standalone_prepare_scm_link(self, uri): # if commit hash or branch/tag is set for engagement/test - @@ -3251,15 +3251,15 @@ def bitbucket_standalone_prepare_scm_link(self, uri): link = self.bitbucket_standalone_prepare_scm_base_link(uri) if self.test.commit_hash: - link += '/' + self.file_path + '?at=' + self.test.commit_hash + link += "/" + self.file_path + "?at=" + self.test.commit_hash elif self.test.engagement.commit_hash: - link += '/' + self.file_path + '?at=' + self.test.engagement.commit_hash + link += "/" + self.file_path + "?at=" + self.test.engagement.commit_hash elif self.test.branch_tag: - link += '/' + self.file_path + '?at=' + self.test.branch_tag + link += "/" + self.file_path + "?at=" + self.test.branch_tag elif self.test.engagement.branch_tag: - link += '/' + self.file_path + '?at=' + self.test.engagement.branch_tag + link += "/" + self.file_path + "?at=" + self.test.engagement.branch_tag else: - link += '/' + self.file_path + link += "/" + self.file_path return link @@ -3270,25 +3270,25 @@ def get_file_path_with_raw_link(self): link = self.test.engagement.source_code_management_uri scm_type = self.get_scm_type() if (self.test.engagement.source_code_management_uri is not None): - if scm_type == 'bitbucket-standalone': + if scm_type == "bitbucket-standalone": link = self.bitbucket_standalone_prepare_scm_link(link) - elif scm_type in ['github', 'gitlab', 'gitea', 'codeberg', 'bitbucket']: + elif scm_type in ["github", "gitlab", "gitea", "codeberg", "bitbucket"]: link = self.git_public_prepare_scm_link(link, scm_type) - elif 'https://github.com/' in self.test.engagement.source_code_management_uri: - link = self.git_public_prepare_scm_link(link, 'github') + elif "https://github.com/" in self.test.engagement.source_code_management_uri: + link = self.git_public_prepare_scm_link(link, "github") else: - link += '/' + self.file_path + link += "/" + self.file_path else: - link += '/' + self.file_path + link += "/" + self.file_path # than - add line part to browser url if self.line: - if scm_type in ['github', 'gitlab', 'gitea', 'codeberg'] or 'https://github.com/' in self.test.engagement.source_code_management_uri: - link = link + '#L' + str(self.line) - elif scm_type == 'bitbucket-standalone': - link = link + '#' + str(self.line) - elif scm_type == 'bitbucket': - link = link + '#lines-' + str(self.line) + if scm_type in ["github", "gitlab", "gitea", "codeberg"] or "https://github.com/" in self.test.engagement.source_code_management_uri: + link = link + "#L" + str(self.line) + elif scm_type == "bitbucket-standalone": + link = link + "#" + str(self.line) + elif scm_type == "bitbucket": + link = link + "#lines-" + str(self.line) return link def get_references_with_links(self): @@ -3297,13 +3297,13 @@ def get_references_with_links(self): from dojo.utils import create_bleached_link if self.references is None: return None - matches = re.findall(r'([\(|\[]?(https?):((//)|(\\\\))+([\w\d:#@%/;$~_?\+-=\\\.&](#!)?)*[\)|\]]?)', self.references) + matches = re.findall(r"([\(|\[]?(https?):((//)|(\\\\))+([\w\d:#@%/;$~_?\+-=\\\.&](#!)?)*[\)|\]]?)", self.references) processed_matches = [] for match in matches: # Check if match isn't already a markdown link # Only replace the same matches one time, otherwise the links will be corrupted - if not (match[0].startswith('[') or match[0].startswith('(')) and match[0] not in processed_matches: + if not (match[0].startswith("[") or match[0].startswith("(")) and match[0] not in processed_matches: self.references = self.references.replace(match[0], create_bleached_link(match[0], match[0]), 1) processed_matches.append(match[0]) @@ -3346,7 +3346,7 @@ class FindingAdmin(admin.ModelAdmin): # For efficiency with large databases, display many-to-many fields with raw # IDs rather than multi-select raw_id_fields = ( - 'endpoints', + "endpoints", ) @@ -3359,7 +3359,7 @@ def __str__(self): def get_absolute_url(self): from django.urls import reverse - return reverse('view_finding', args=[str(self.finding.id)]) + return reverse("view_finding", args=[str(self.finding.id)]) class Stub_Finding(models.Model): @@ -3371,24 +3371,24 @@ class Stub_Finding(models.Model): reporter = models.ForeignKey(Dojo_User, editable=False, default=1, on_delete=models.RESTRICT) class Meta: - ordering = ('-date', 'title') + ordering = ("-date", "title") def __str__(self): return self.title def get_breadcrumbs(self): bc = self.test.get_breadcrumbs() - bc += [{'title': "Potential Finding: " + str(self), - 'url': reverse('view_potential_finding', args=(self.id,))}] + bc += [{"title": "Potential Finding: " + str(self), + "url": reverse("view_potential_finding", args=(self.id,))}] return bc class Finding_Group(TimeStampedModel): - GROUP_BY_OPTIONS = [('component_name', 'Component Name'), - ('component_name+component_version', 'Component Name + Version'), - ('file_path', 'File path'), - ('finding_title', 'Finding Title')] + GROUP_BY_OPTIONS = [("component_name", "Component Name"), + ("component_name+component_version", "Component Name + Version"), + ("file_path", "File path"), + ("finding_title", "Finding Title")] name = models.CharField(max_length=255, blank=False, null=False) test = models.ForeignKey(Test, on_delete=models.CASCADE) @@ -3446,12 +3446,12 @@ def status(self): return None if any(find.active for find in self.findings.all()): - return 'Active' + return "Active" if all(find.is_mitigated for find in self.findings.all()): - return 'Mitigated' + return "Mitigated" - return 'Inactive' + return "Inactive" @cached_property def mitigated(self): @@ -3462,10 +3462,10 @@ def get_sla_start_date(self): def get_absolute_url(self): from django.urls import reverse - return reverse('view_test', args=[str(self.test.id)]) + return reverse("view_test", args=[str(self.test.id)]) class Meta: - ordering = ['id'] + ordering = ["id"] class Finding_Template(models.Model): @@ -3476,7 +3476,7 @@ class Finding_Template(models.Model): blank=False, verbose_name="Vulnerability Id", help_text="An id of a vulnerability in a security advisory associated with this finding. Can be a Common Vulnerabilities and Exposures (CVE) or from other sources.") - cvssv3_regex = RegexValidator(regex=r'^AV:[NALP]|AC:[LH]|PR:[UNLH]|UI:[NR]|S:[UC]|[CIA]:[NLH]', message="CVSS must be entered in format: 'AV:N/AC:L/PR:N/UI:N/S:C/C:H/I:H/A:H'") + cvssv3_regex = RegexValidator(regex=r"^AV:[NALP]|AC:[LH]|PR:[UNLH]|UI:[NR]|S:[UC]|[CIA]:[NLH]", message="CVSS must be entered in format: 'AV:N/AC:L/PR:N/UI:N/S:C/C:H/I:H/A:H'") cvssv3 = models.TextField(validators=[cvssv3_regex], max_length=117, null=True) severity = models.CharField(max_length=200, null=True, blank=True) description = models.TextField(null=True, blank=True) @@ -3485,27 +3485,27 @@ class Finding_Template(models.Model): references = models.TextField(null=True, blank=True, db_column="refs") last_used = models.DateTimeField(null=True, editable=False) numerical_severity = models.CharField(max_length=4, null=True, blank=True, editable=False) - template_match = models.BooleanField(default=False, verbose_name=_('Template Match Enabled'), help_text=_("Enables this template for matching remediation advice. Match will be applied to all active, verified findings by CWE.")) - template_match_title = models.BooleanField(default=False, verbose_name=_('Match Template by Title and CWE'), help_text=_('Matches by title text (contains search) and CWE.')) + template_match = models.BooleanField(default=False, verbose_name=_("Template Match Enabled"), help_text=_("Enables this template for matching remediation advice. Match will be applied to all active, verified findings by CWE.")) + template_match_title = models.BooleanField(default=False, verbose_name=_("Match Template by Title and CWE"), help_text=_("Matches by title text (contains search) and CWE.")) tags = TagField(blank=True, force_lowercase=True, help_text=_("Add tags that help describe this finding template. Choose from the list or add new tags. Press Enter key to add.")) - SEVERITIES = {'Info': 4, 'Low': 3, 'Medium': 2, - 'High': 1, 'Critical': 0} + SEVERITIES = {"Info": 4, "Low": 3, "Medium": 2, + "High": 1, "Critical": 0} class Meta: - ordering = ['-cwe'] + ordering = ["-cwe"] def __str__(self): return self.title def get_absolute_url(self): from django.urls import reverse - return reverse('edit_template', args=[str(self.id)]) + return reverse("edit_template", args=[str(self.id)]) def get_breadcrumbs(self): - bc = [{'title': str(self), - 'url': reverse('view_template', args=(self.id,))}] + bc = [{"title": str(self), + "url": reverse("view_template", args=(self.id,))}] return bc @cached_property @@ -3538,54 +3538,54 @@ class Vulnerability_Id_Template(models.Model): class Check_List(models.Model): - session_management = models.CharField(max_length=50, default='none') + session_management = models.CharField(max_length=50, default="none") session_issues = models.ManyToManyField(Finding, - related_name='session_issues', + related_name="session_issues", blank=True) - encryption_crypto = models.CharField(max_length=50, default='none') + encryption_crypto = models.CharField(max_length=50, default="none") crypto_issues = models.ManyToManyField(Finding, - related_name='crypto_issues', + related_name="crypto_issues", blank=True) - configuration_management = models.CharField(max_length=50, default='') + configuration_management = models.CharField(max_length=50, default="") config_issues = models.ManyToManyField(Finding, - related_name='config_issues', + related_name="config_issues", blank=True) - authentication = models.CharField(max_length=50, default='none') + authentication = models.CharField(max_length=50, default="none") auth_issues = models.ManyToManyField(Finding, - related_name='auth_issues', + related_name="auth_issues", blank=True) authorization_and_access_control = models.CharField(max_length=50, - default='none') + default="none") author_issues = models.ManyToManyField(Finding, - related_name='author_issues', + related_name="author_issues", blank=True) data_input_sanitization_validation = models.CharField(max_length=50, - default='none') - data_issues = models.ManyToManyField(Finding, related_name='data_issues', + default="none") + data_issues = models.ManyToManyField(Finding, related_name="data_issues", blank=True) - sensitive_data = models.CharField(max_length=50, default='none') + sensitive_data = models.CharField(max_length=50, default="none") sensitive_issues = models.ManyToManyField(Finding, - related_name='sensitive_issues', + related_name="sensitive_issues", blank=True) - other = models.CharField(max_length=50, default='none') - other_issues = models.ManyToManyField(Finding, related_name='other_issues', + other = models.CharField(max_length=50, default="none") + other_issues = models.ManyToManyField(Finding, related_name="other_issues", blank=True) engagement = models.ForeignKey(Engagement, editable=False, - related_name='eng_for_check', on_delete=models.CASCADE) + related_name="eng_for_check", on_delete=models.CASCADE) @staticmethod def get_status(pass_fail): - if pass_fail == 'Pass': - return 'success' - elif pass_fail == 'Fail': - return 'danger' + if pass_fail == "Pass": + return "success" + elif pass_fail == "Fail": + return "danger" else: - return 'warning' + return "warning" def get_breadcrumb(self): bc = self.engagement.get_breadcrumb() - bc += [{'title': "Check List", - 'url': reverse('complete_checklist', + bc += [{"title": "Check List", + "url": reverse("complete_checklist", args=(self.engagement.id,))}] return bc @@ -3596,28 +3596,28 @@ class BurpRawRequestResponse(models.Model): burpResponseBase64 = models.BinaryField() def get_request(self): - return str(base64.b64decode(self.burpRequestBase64), errors='ignore') + return str(base64.b64decode(self.burpRequestBase64), errors="ignore") def get_response(self): - res = str(base64.b64decode(self.burpResponseBase64), errors='ignore') + res = str(base64.b64decode(self.burpResponseBase64), errors="ignore") # Removes all blank lines - res = re.sub(r'\n\s*\n', '\n', res) + res = re.sub(r"\n\s*\n", "\n", res) return res class Risk_Acceptance(models.Model): - TREATMENT_ACCEPT = 'A' - TREATMENT_AVOID = 'V' - TREATMENT_MITIGATE = 'M' - TREATMENT_FIX = 'F' - TREATMENT_TRANSFER = 'T' + TREATMENT_ACCEPT = "A" + TREATMENT_AVOID = "V" + TREATMENT_MITIGATE = "M" + TREATMENT_FIX = "F" + TREATMENT_TRANSFER = "T" TREATMENT_TRANSLATIONS = { - TREATMENT_ACCEPT: _('Accept (The risk is acknowledged, yet remains)'), - TREATMENT_AVOID: _('Avoid (Do not engage with whatever creates the risk)'), - TREATMENT_MITIGATE: _('Mitigate (The risk still exists, yet compensating controls make it less of a threat)'), - TREATMENT_FIX: _('Fix (The risk is eradicated)'), - TREATMENT_TRANSFER: _('Transfer (The risk is transferred to a 3rd party)'), + TREATMENT_ACCEPT: _("Accept (The risk is acknowledged, yet remains)"), + TREATMENT_AVOID: _("Avoid (Do not engage with whatever creates the risk)"), + TREATMENT_MITIGATE: _("Mitigate (The risk still exists, yet compensating controls make it less of a threat)"), + TREATMENT_FIX: _("Fix (The risk is eradicated)"), + TREATMENT_TRANSFER: _("Transfer (The risk is transferred to a 3rd party)"), } TREATMENT_CHOICES = [ @@ -3632,26 +3632,26 @@ class Risk_Acceptance(models.Model): accepted_findings = models.ManyToManyField(Finding) - recommendation = models.CharField(choices=TREATMENT_CHOICES, max_length=2, null=False, default=TREATMENT_FIX, help_text=_("Recommendation from the security team."), verbose_name=_('Security Recommendation')) + recommendation = models.CharField(choices=TREATMENT_CHOICES, max_length=2, null=False, default=TREATMENT_FIX, help_text=_("Recommendation from the security team."), verbose_name=_("Security Recommendation")) recommendation_details = models.TextField(null=True, blank=True, - help_text=_("Explanation of security recommendation"), verbose_name=_('Security Recommendation Details')) + help_text=_("Explanation of security recommendation"), verbose_name=_("Security Recommendation Details")) decision = models.CharField(choices=TREATMENT_CHOICES, max_length=2, null=False, default=TREATMENT_ACCEPT, help_text=_("Risk treatment decision by risk owner")) - decision_details = models.TextField(default=None, blank=True, null=True, help_text=_('If a compensating control exists to mitigate the finding or reduce risk, then list the compensating control(s).')) + decision_details = models.TextField(default=None, blank=True, null=True, help_text=_("If a compensating control exists to mitigate the finding or reduce risk, then list the compensating control(s).")) - accepted_by = models.CharField(max_length=200, default=None, null=True, blank=True, verbose_name=_('Accepted By'), help_text=_("The person that accepts the risk, can be outside of DefectDojo.")) - path = models.FileField(upload_to='risk/%Y/%m/%d', + accepted_by = models.CharField(max_length=200, default=None, null=True, blank=True, verbose_name=_("Accepted By"), help_text=_("The person that accepts the risk, can be outside of DefectDojo.")) + path = models.FileField(upload_to="risk/%Y/%m/%d", editable=True, null=True, - blank=True, verbose_name=_('Proof')) + blank=True, verbose_name=_("Proof")) owner = models.ForeignKey(Dojo_User, editable=True, on_delete=models.RESTRICT, help_text=_("User in DefectDojo owning this acceptance. Only the owner and staff users can edit the risk acceptance.")) - expiration_date = models.DateTimeField(default=None, null=True, blank=True, help_text=_('When the risk acceptance expires, the findings will be reactivated (unless disabled below).')) - expiration_date_warned = models.DateTimeField(default=None, null=True, blank=True, help_text=_('(readonly) Date at which notice about the risk acceptance expiration was sent.')) - expiration_date_handled = models.DateTimeField(default=None, null=True, blank=True, help_text=_('(readonly) When the risk acceptance expiration was handled (manually or by the daily job).')) - reactivate_expired = models.BooleanField(null=False, blank=False, default=True, verbose_name=_('Reactivate findings on expiration'), help_text=_('Reactivate findings when risk acceptance expires?')) - restart_sla_expired = models.BooleanField(default=False, null=False, verbose_name=_('Restart SLA on expiration'), help_text=_("When enabled, the SLA for findings is restarted when the risk acceptance expires.")) + expiration_date = models.DateTimeField(default=None, null=True, blank=True, help_text=_("When the risk acceptance expires, the findings will be reactivated (unless disabled below).")) + expiration_date_warned = models.DateTimeField(default=None, null=True, blank=True, help_text=_("(readonly) Date at which notice about the risk acceptance expiration was sent.")) + expiration_date_handled = models.DateTimeField(default=None, null=True, blank=True, help_text=_("(readonly) When the risk acceptance expiration was handled (manually or by the daily job).")) + reactivate_expired = models.BooleanField(null=False, blank=False, default=True, verbose_name=_("Reactivate findings on expiration"), help_text=_("Reactivate findings when risk acceptance expires?")) + restart_sla_expired = models.BooleanField(default=False, null=False, verbose_name=_("Restart SLA on expiration"), help_text=_("When enabled, the SLA for findings is restarted when the risk acceptance expires.")) notes = models.ManyToManyField(Notes, editable=False) created = models.DateTimeField(auto_now_add=True, null=False) @@ -3668,12 +3668,12 @@ def filename(self): @property def name_and_expiration_info(self): - return str(self.name) + (' (expired ' if self.is_expired else ' (expires ') + (timezone.localtime(self.expiration_date).strftime("%b %d, %Y") if self.expiration_date else 'Never') + ')' + return str(self.name) + (" (expired " if self.is_expired else " (expires ") + (timezone.localtime(self.expiration_date).strftime("%b %d, %Y") if self.expiration_date else "Never") + ")" def get_breadcrumbs(self): bc = self.engagement_set.first().get_breadcrumbs() - bc += [{'title': str(self), - 'url': reverse('view_risk_acceptance', args=( + bc += [{"title": str(self), + "url": reverse("view_risk_acceptance", args=( self.engagement_set.first().product.id, self.id))}] return bc @@ -3719,12 +3719,12 @@ class FileAccessToken(models.Model): token = models.CharField(max_length=255) size = models.CharField(max_length=9, choices=( - ('small', 'Small'), - ('medium', 'Medium'), - ('large', 'Large'), - ('thumbnail', 'Thumbnail'), - ('original', 'Original')), - default='medium') + ("small", "Small"), + ("medium", "Medium"), + ("large", "Large"), + ("thumbnail", "Thumbnail"), + ("original", "Original")), + default="medium") def save(self, *args, **kwargs): if not self.token: @@ -3733,40 +3733,40 @@ def save(self, *args, **kwargs): ANNOUNCEMENT_STYLE_CHOICES = ( - ('info', 'Info'), - ('success', 'Success'), - ('warning', 'Warning'), - ('danger', 'Danger'), + ("info", "Info"), + ("success", "Success"), + ("warning", "Warning"), + ("danger", "Danger"), ) class Announcement(models.Model): message = models.CharField(max_length=500, help_text=_("This dismissable message will be displayed on all pages for authenticated users. It can contain basic html tags, for example https://example.com"), - default='') - style = models.CharField(max_length=64, choices=ANNOUNCEMENT_STYLE_CHOICES, default='info', + default="") + style = models.CharField(max_length=64, choices=ANNOUNCEMENT_STYLE_CHOICES, default="info", help_text=_("The style of banner to display. (info, success, warning, danger)")) dismissable = models.BooleanField(default=False, null=False, blank=True, - verbose_name=_('Dismissable?'), - help_text=_('Ticking this box allows users to dismiss the current announcement'), + verbose_name=_("Dismissable?"), + help_text=_("Ticking this box allows users to dismiss the current announcement"), ) class UserAnnouncement(models.Model): - announcement = models.ForeignKey(Announcement, null=True, editable=False, on_delete=models.CASCADE, related_name='user_announcement') + announcement = models.ForeignKey(Announcement, null=True, editable=False, on_delete=models.CASCADE, related_name="user_announcement") user = models.ForeignKey(Dojo_User, null=True, editable=False, on_delete=models.CASCADE) class BannerConf(models.Model): banner_enable = models.BooleanField(default=False, null=True, blank=True) - banner_message = models.CharField(max_length=500, help_text=_("This message will be displayed on the login page. It can contain basic html tags, for example https://example.com"), default='') + banner_message = models.CharField(max_length=500, help_text=_("This message will be displayed on the login page. It can contain basic html tags, for example https://example.com"), default="") class GITHUB_Conf(models.Model): - configuration_name = models.CharField(max_length=2000, help_text=_("Enter a name to give to this configuration"), default='') - api_key = models.CharField(max_length=2000, help_text=_("Enter your Github API Key"), default='') + configuration_name = models.CharField(max_length=2000, help_text=_("Enter a name to give to this configuration"), default="") + api_key = models.CharField(max_length=2000, help_text=_("Enter your Github API Key"), default="") def __str__(self): return self.configuration_name @@ -3774,11 +3774,11 @@ def __str__(self): class GITHUB_Issue(models.Model): issue_id = models.CharField(max_length=200) - issue_url = models.URLField(max_length=2000, verbose_name=_('GitHub issue URL')) + issue_url = models.URLField(max_length=2000, verbose_name=_("GitHub issue URL")) finding = models.OneToOneField(Finding, null=True, blank=True, on_delete=models.CASCADE) def __str__(self): - return str(self.issue_id) + '| GitHub Issue URL: ' + str(self.issue_url) + return str(self.issue_id) + "| GitHub Issue URL: " + str(self.issue_url) class GITHUB_Clone(models.Model): @@ -3796,8 +3796,8 @@ class GITHUB_Details_Cache(models.Model): class GITHUB_PKey(models.Model): product = models.ForeignKey(Product, on_delete=models.CASCADE) - git_project = models.CharField(max_length=200, blank=True, verbose_name=_('Github project'), help_text=_('Specify your project location. (:user/:repo)')) - git_conf = models.ForeignKey(GITHUB_Conf, verbose_name=_('Github Configuration'), + git_project = models.CharField(max_length=200, blank=True, verbose_name=_("Github project"), help_text=_("Specify your project location. (:user/:repo)")) + git_conf = models.ForeignKey(GITHUB_Conf, verbose_name=_("Github Configuration"), null=True, blank=True, on_delete=models.CASCADE) git_push_notes = models.BooleanField(default=False, blank=True, help_text=_("Notes added to findings will be automatically added to the corresponding github issue")) @@ -3806,41 +3806,41 @@ def __str__(self): class JIRA_Instance(models.Model): - configuration_name = models.CharField(max_length=2000, help_text=_("Enter a name to give to this configuration"), default='') - url = models.URLField(max_length=2000, verbose_name=_('JIRA URL'), help_text=_("For more information how to configure Jira, read the DefectDojo documentation.")) + configuration_name = models.CharField(max_length=2000, help_text=_("Enter a name to give to this configuration"), default="") + url = models.URLField(max_length=2000, verbose_name=_("JIRA URL"), help_text=_("For more information how to configure Jira, read the DefectDojo documentation.")) username = models.CharField(max_length=2000) password = models.CharField(max_length=2000) - if hasattr(settings, 'JIRA_ISSUE_TYPE_CHOICES_CONFIG'): + if hasattr(settings, "JIRA_ISSUE_TYPE_CHOICES_CONFIG"): default_issue_type_choices = settings.JIRA_ISSUE_TYPE_CHOICES_CONFIG else: default_issue_type_choices = ( - ('Task', 'Task'), - ('Story', 'Story'), - ('Epic', 'Epic'), - ('Spike', 'Spike'), - ('Bug', 'Bug'), - ('Security', 'Security'), + ("Task", "Task"), + ("Story", "Story"), + ("Epic", "Epic"), + ("Spike", "Spike"), + ("Bug", "Bug"), + ("Security", "Security"), ) default_issue_type = models.CharField(max_length=255, choices=default_issue_type_choices, - default='Bug', - help_text=_('You can define extra issue types in settings.py')) + default="Bug", + help_text=_("You can define extra issue types in settings.py")) issue_template_dir = models.CharField(max_length=255, null=True, blank=True, help_text=_("Choose the folder containing the Django templates used to render the JIRA issue description. These are stored in dojo/templates/issue-trackers. Leave empty to use the default jira_full templates.")) epic_name_id = models.IntegerField(help_text=_("To obtain the 'Epic name id' visit https:///rest/api/2/field and search for Epic Name. Copy the number out of cf[number] and paste it here.")) - open_status_key = models.IntegerField(verbose_name=_('Reopen Transition ID'), help_text=_("Transition ID to Re-Open JIRA issues, visit https:///rest/api/latest/issue//transitions?expand=transitions.fields to find the ID for your JIRA instance")) - close_status_key = models.IntegerField(verbose_name=_('Close Transition ID'), help_text=_("Transition ID to Close JIRA issues, visit https:///rest/api/latest/issue//transitions?expand=transitions.fields to find the ID for your JIRA instance")) + open_status_key = models.IntegerField(verbose_name=_("Reopen Transition ID"), help_text=_("Transition ID to Re-Open JIRA issues, visit https:///rest/api/latest/issue//transitions?expand=transitions.fields to find the ID for your JIRA instance")) + close_status_key = models.IntegerField(verbose_name=_("Close Transition ID"), help_text=_("Transition ID to Close JIRA issues, visit https:///rest/api/latest/issue//transitions?expand=transitions.fields to find the ID for your JIRA instance")) info_mapping_severity = models.CharField(max_length=200, help_text=_("Maps to the 'Priority' field in Jira. For example: Info")) low_mapping_severity = models.CharField(max_length=200, help_text=_("Maps to the 'Priority' field in Jira. For example: Low")) medium_mapping_severity = models.CharField(max_length=200, help_text=_("Maps to the 'Priority' field in Jira. For example: Medium")) high_mapping_severity = models.CharField(max_length=200, help_text=_("Maps to the 'Priority' field in Jira. For example: High")) critical_mapping_severity = models.CharField(max_length=200, help_text=_("Maps to the 'Priority' field in Jira. For example: Critical")) finding_text = models.TextField(null=True, blank=True, help_text=_("Additional text that will be added to the finding in Jira. For example including how the finding was created or who to contact for more information.")) - accepted_mapping_resolution = models.CharField(null=True, blank=True, max_length=300, help_text=_('JIRA resolution names (comma-separated values) that maps to an Accepted Finding')) - false_positive_mapping_resolution = models.CharField(null=True, blank=True, max_length=300, help_text=_('JIRA resolution names (comma-separated values) that maps to a False Positive Finding')) + accepted_mapping_resolution = models.CharField(null=True, blank=True, max_length=300, help_text=_("JIRA resolution names (comma-separated values) that maps to an Accepted Finding")) + false_positive_mapping_resolution = models.CharField(null=True, blank=True, max_length=300, help_text=_("JIRA resolution names (comma-separated values) that maps to a False Positive Finding")) global_jira_sla_notification = models.BooleanField(default=True, blank=False, verbose_name=_("Globally send SLA notifications as comment?"), help_text=_("This setting can be overidden at the Product level")) finding_jira_sync = models.BooleanField(default=False, blank=False, verbose_name=_("Automatically sync Findings with JIRA?"), help_text=_("If enabled, this will sync changes to a Finding automatically to JIRA")) @@ -3849,25 +3849,25 @@ def __str__(self): @property def accepted_resolutions(self): - return [m.strip() for m in (self.accepted_mapping_resolution or '').split(',')] + return [m.strip() for m in (self.accepted_mapping_resolution or "").split(",")] @property def false_positive_resolutions(self): - return [m.strip() for m in (self.false_positive_mapping_resolution or '').split(',')] + return [m.strip() for m in (self.false_positive_mapping_resolution or "").split(",")] def get_priority(self, status): - if status == 'Info': + if status == "Info": return self.info_mapping_severity - elif status == 'Low': + elif status == "Low": return self.low_mapping_severity - elif status == 'Medium': + elif status == "Medium": return self.medium_mapping_severity - elif status == 'High': + elif status == "High": return self.high_mapping_severity - elif status == 'Critical': + elif status == "Critical": return self.critical_mapping_severity else: - return 'N/A' + return "N/A" # declare form here as we can't import forms.py due to circular imports not even locally @@ -3882,12 +3882,12 @@ def __init__(self, *args, **kwargs): if self.instance: # keep password from db to use if the user entered no password self.password_from_db = self.instance.password - self.fields['password'].required = False + self.fields["password"].required = False def clean(self): cleaned_data = super().clean() - if not cleaned_data['password']: - cleaned_data['password'] = self.password_from_db + if not cleaned_data["password"]: + cleaned_data["password"] = self.password_from_db return cleaned_data @@ -3897,7 +3897,7 @@ class JIRA_Instance_Admin(admin.ModelAdmin): class JIRA_Project(models.Model): - jira_instance = models.ForeignKey(JIRA_Instance, verbose_name=_('JIRA Instance'), + jira_instance = models.ForeignKey(JIRA_Instance, verbose_name=_("JIRA Instance"), null=True, blank=True, on_delete=models.PROTECT) project_key = models.CharField(max_length=200, blank=True) product = models.ForeignKey(Product, on_delete=models.CASCADE, null=True) @@ -3912,9 +3912,9 @@ class JIRA_Project(models.Model): default_assignee = models.CharField(max_length=200, blank=True, null=True, help_text=_("JIRA default assignee (name). If left blank then it defaults to whatever is configured in JIRA.")) jira_labels = models.CharField(max_length=200, blank=True, null=True, - help_text=_('JIRA issue labels space seperated')) + help_text=_("JIRA issue labels space seperated")) add_vulnerability_id_to_jira_label = models.BooleanField(default=False, - verbose_name=_('Add vulnerability Id as a JIRA label'), + verbose_name=_("Add vulnerability Id as a JIRA label"), blank=False) push_all_issues = models.BooleanField(default=False, blank=True, help_text=_("Automatically maintain parity with JIRA. Always create and update JIRA tickets for findings in this Product.")) @@ -3926,11 +3926,11 @@ class JIRA_Project(models.Model): risk_acceptance_expiration_notification = models.BooleanField(default=False, blank=True, verbose_name=_("Send Risk Acceptance expiration notifications as comment?")) def __str__(self): - return ('%s: ' + self.project_key + '(%s)') % (str(self.id), str(self.jira_instance.url) if self.jira_instance else 'None') + return ("%s: " + self.project_key + "(%s)") % (str(self.id), str(self.jira_instance.url) if self.jira_instance else "None") def clean(self): if not self.jira_instance: - msg = 'Cannot save JIRA Project Configuration without JIRA Instance' + msg = "Cannot save JIRA Project Configuration without JIRA Instance" raise ValidationError(msg) @@ -3946,12 +3946,12 @@ def __init__(self, *args, **kwargs): if self.instance: # keep password from db to use if the user entered no password self.password_from_db = self.instance.password - self.fields['password'].required = False + self.fields["password"].required = False def clean(self): cleaned_data = super().clean() - if not cleaned_data['password']: - cleaned_data['password'] = self.password_from_db + if not cleaned_data["password"]: + cleaned_data["password"] = self.password_from_db return cleaned_data @@ -3970,11 +3970,11 @@ class JIRA_Issue(models.Model): jira_creation = models.DateTimeField(editable=True, null=True, - verbose_name=_('Jira creation'), + verbose_name=_("Jira creation"), help_text=_("The date a Jira issue was created from this finding.")) jira_change = models.DateTimeField(editable=True, null=True, - verbose_name=_('Jira last update'), + verbose_name=_("Jira last update"), help_text=_("The date the linked Jira issue was last modified.")) def __str__(self): @@ -3993,7 +3993,7 @@ def set_obj(self, obj): elif isinstance(obj, Engagement): self.engagement = obj else: - msg = f'unknown object type while creating JIRA_Issue: {to_str_typed(obj)}' + msg = f"unknown object type while creating JIRA_Issue: {to_str_typed(obj)}" raise TypeError(msg) @@ -4018,8 +4018,8 @@ class Notifications(models.Model): engagement_added = MultiSelectField(choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION, blank=True) test_added = MultiSelectField(choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION, blank=True) - scan_added = MultiSelectField(choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION, blank=True, help_text=_('Triggered whenever an (re-)import has been done that created/updated/closed findings.')) - scan_added_empty = MultiSelectField(choices=NOTIFICATION_CHOICES, default=[], blank=True, help_text=_('Triggered whenever an (re-)import has been done (even if that created/updated/closed no findings).')) + scan_added = MultiSelectField(choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION, blank=True, help_text=_("Triggered whenever an (re-)import has been done that created/updated/closed findings.")) + scan_added_empty = MultiSelectField(choices=NOTIFICATION_CHOICES, default=[], blank=True, help_text=_("Triggered whenever an (re-)import has been done (even if that created/updated/closed no findings).")) jira_update = MultiSelectField(choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION, blank=True, verbose_name=_("JIRA problems"), help_text=_("JIRA sync happens in the background, errors will be shown as notifications/alerts so make sure to subscribe")) upcoming_engagement = MultiSelectField(choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION, blank=True) stale_engagement = MultiSelectField(choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION, blank=True) @@ -4033,21 +4033,21 @@ class Notifications(models.Model): product = models.ForeignKey(Product, default=None, null=True, editable=False, on_delete=models.CASCADE) template = models.BooleanField(default=False) sla_breach = MultiSelectField(choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION, blank=True, - verbose_name=_('SLA breach'), - help_text=_('Get notified of (upcoming) SLA breaches')) + verbose_name=_("SLA breach"), + help_text=_("Get notified of (upcoming) SLA breaches")) risk_acceptance_expiration = MultiSelectField(choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION, blank=True, - verbose_name=_('Risk Acceptance Expiration'), - help_text=_('Get notified of (upcoming) Risk Acceptance expiries')) + verbose_name=_("Risk Acceptance Expiration"), + help_text=_("Get notified of (upcoming) Risk Acceptance expiries")) sla_breach_combined = MultiSelectField(choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION, blank=True, - verbose_name=_('SLA breach (combined)'), - help_text=_('Get notified of (upcoming) SLA breaches (a message per project)')) + verbose_name=_("SLA breach (combined)"), + help_text=_("Get notified of (upcoming) SLA breaches (a message per project)")) class Meta: constraints = [ - models.UniqueConstraint(fields=['user', 'product'], name="notifications_user_product"), + models.UniqueConstraint(fields=["user", "product"], name="notifications_user_product"), ] indexes = [ - models.Index(fields=['user', 'product']), + models.Index(fields=["user", "product"]), ] def __str__(self): @@ -4086,10 +4086,10 @@ def merge_notifications_list(cls, notifications_list): class NotificationsAdmin(admin.ModelAdmin): - list_filter = ('user', 'product') + list_filter = ("user", "product") def get_list_display(self, request): - list_fields = ['user', 'product'] + list_fields = ["user", "product"] list_fields += [field.name for field in self.model._meta.fields if field.name not in list_fields] return list_fields @@ -4100,33 +4100,33 @@ class Tool_Product_Settings(models.Model): url = models.CharField(max_length=2000, null=True, blank=True) product = models.ForeignKey(Product, default=1, editable=False, on_delete=models.CASCADE) tool_configuration = models.ForeignKey(Tool_Configuration, null=False, - related_name='tool_configuration', on_delete=models.CASCADE) + related_name="tool_configuration", on_delete=models.CASCADE) tool_project_id = models.CharField(max_length=200, null=True, blank=True) notes = models.ManyToManyField(Notes, blank=True, editable=False) class Meta: - ordering = ['name'] + ordering = ["name"] class Tool_Product_History(models.Model): product = models.ForeignKey(Tool_Product_Settings, editable=False, on_delete=models.CASCADE) last_scan = models.DateTimeField(null=False, editable=False, default=now) - succesfull = models.BooleanField(default=True, verbose_name=_('Succesfully')) + succesfull = models.BooleanField(default=True, verbose_name=_("Succesfully")) configuration_details = models.CharField(max_length=2000, null=True, blank=True) class Alerts(models.Model): - title = models.CharField(max_length=250, default='', null=False) + title = models.CharField(max_length=250, default="", null=False) description = models.CharField(max_length=2000, null=True, blank=True) url = models.URLField(max_length=2000, null=True, blank=True) - source = models.CharField(max_length=100, default='Generic') - icon = models.CharField(max_length=25, default='icon-user-check') + source = models.CharField(max_length=100, default="Generic") + icon = models.CharField(max_length=25, default="icon-user-check") user_id = models.ForeignKey(Dojo_User, null=True, editable=False, on_delete=models.CASCADE) created = models.DateTimeField(auto_now_add=True, null=False) class Meta: - ordering = ['-created'] + ordering = ["-created"] class Cred_User(models.Model): @@ -4136,13 +4136,13 @@ class Cred_User(models.Model): role = models.CharField(max_length=200, null=False) authentication = models.CharField(max_length=15, choices=( - ('Form', 'Form Authentication'), - ('SSO', 'SSO Redirect')), - default='Form') + ("Form", "Form Authentication"), + ("SSO", "SSO Redirect")), + default="Form") http_authentication = models.CharField(max_length=15, choices=( - ('Basic', 'Basic'), - ('NTLM', 'NTLM')), + ("Basic", "Basic"), + ("NTLM", "NTLM")), null=True, blank=True) description = models.CharField(max_length=2000, null=True, blank=True) url = models.URLField(max_length=2000, null=False) @@ -4150,10 +4150,10 @@ class Cred_User(models.Model): login_regex = models.CharField(max_length=200, null=True, blank=True) logout_regex = models.CharField(max_length=200, null=True, blank=True) notes = models.ManyToManyField(Notes, blank=True, editable=False) - is_valid = models.BooleanField(default=True, verbose_name=_('Login is valid')) + is_valid = models.BooleanField(default=True, verbose_name=_("Login is valid")) class Meta: - ordering = ['name'] + ordering = ["name"] def __str__(self): return self.name + " (" + self.role + ")" @@ -4162,7 +4162,7 @@ def __str__(self): class Cred_Mapping(models.Model): cred_id = models.ForeignKey(Cred_User, null=False, related_name="cred_user", - verbose_name=_('Credential'), on_delete=models.CASCADE) + verbose_name=_("Credential"), on_delete=models.CASCADE) product = models.ForeignKey(Product, null=True, blank=True, related_name="product", on_delete=models.CASCADE) finding = models.ForeignKey(Finding, null=True, blank=True, @@ -4171,7 +4171,7 @@ class Cred_Mapping(models.Model): related_name="engagement", on_delete=models.CASCADE) test = models.ForeignKey(Test, null=True, blank=True, related_name="test", on_delete=models.CASCADE) is_authn_provider = models.BooleanField(default=False, - verbose_name=_('Authentication Provider')) + verbose_name=_("Authentication Provider")) url = models.URLField(max_length=2000, null=True, blank=True) def __str__(self): @@ -4180,7 +4180,7 @@ def __str__(self): class Language_Type(models.Model): language = models.CharField(max_length=100, null=False) - color = models.CharField(max_length=7, null=True, blank=True, verbose_name=_('HTML color')) + color = models.CharField(max_length=7, null=True, blank=True, verbose_name=_("HTML color")) def __str__(self): return self.language @@ -4190,14 +4190,14 @@ class Languages(models.Model): language = models.ForeignKey(Language_Type, on_delete=models.CASCADE) product = models.ForeignKey(Product, on_delete=models.CASCADE) user = models.ForeignKey(Dojo_User, editable=True, blank=True, null=True, on_delete=models.RESTRICT) - files = models.IntegerField(blank=True, null=True, verbose_name=_('Number of files')) - blank = models.IntegerField(blank=True, null=True, verbose_name=_('Number of blank lines')) - comment = models.IntegerField(blank=True, null=True, verbose_name=_('Number of comment lines')) - code = models.IntegerField(blank=True, null=True, verbose_name=_('Number of code lines')) + files = models.IntegerField(blank=True, null=True, verbose_name=_("Number of files")) + blank = models.IntegerField(blank=True, null=True, verbose_name=_("Number of blank lines")) + comment = models.IntegerField(blank=True, null=True, verbose_name=_("Number of comment lines")) + code = models.IntegerField(blank=True, null=True, verbose_name=_("Number of code lines")) created = models.DateTimeField(auto_now_add=True, null=False) class Meta: - unique_together = [('language', 'product')] + unique_together = [("language", "product")] def __str__(self): return self.language.language @@ -4207,8 +4207,8 @@ class App_Analysis(models.Model): product = models.ForeignKey(Product, on_delete=models.CASCADE) name = models.CharField(max_length=200, null=False) user = models.ForeignKey(Dojo_User, editable=True, on_delete=models.RESTRICT) - confidence = models.IntegerField(blank=True, null=True, verbose_name=_('Confidence level')) - version = models.CharField(max_length=200, null=True, blank=True, verbose_name=_('Version Number')) + confidence = models.IntegerField(blank=True, null=True, verbose_name=_("Confidence level")) + version = models.CharField(max_length=200, null=True, blank=True, verbose_name=_("Version Number")) icon = models.CharField(max_length=200, null=True, blank=True) website = models.URLField(max_length=400, null=True, blank=True) website_found = models.URLField(max_length=400, null=True, blank=True) @@ -4231,11 +4231,11 @@ def __str__(self): class Objects_Product(models.Model): product = models.ForeignKey(Product, on_delete=models.CASCADE) name = models.CharField(max_length=100, null=True, blank=True) - path = models.CharField(max_length=600, verbose_name=_('Full file path'), + path = models.CharField(max_length=600, verbose_name=_("Full file path"), null=True, blank=True) - folder = models.CharField(max_length=400, verbose_name=_('Folder'), + folder = models.CharField(max_length=400, verbose_name=_("Folder"), null=True, blank=True) - artifact = models.CharField(max_length=400, verbose_name=_('Artifact'), + artifact = models.CharField(max_length=400, verbose_name=_("Artifact"), null=True, blank=True) review_status = models.ForeignKey(Objects_Review, on_delete=models.CASCADE) created = models.DateTimeField(auto_now_add=True, null=False) @@ -4260,7 +4260,7 @@ class Testing_Guide_Category(models.Model): updated = models.DateTimeField(auto_now=True) class Meta: - ordering = ('name',) + ordering = ("name",) def __str__(self): return self.name @@ -4278,18 +4278,18 @@ class Testing_Guide(models.Model): updated = models.DateTimeField(auto_now=True) def __str__(self): - return self.testing_guide_category.name + ': ' + self.name + return self.testing_guide_category.name + ": " + self.name class Benchmark_Type(models.Model): name = models.CharField(max_length=300) version = models.CharField(max_length=15) - source = (('PCI', 'PCI'), - ('OWASP ASVS', 'OWASP ASVS'), - ('OWASP Mobile ASVS', 'OWASP Mobile ASVS')) + source = (("PCI", "PCI"), + ("OWASP ASVS", "OWASP ASVS"), + ("OWASP Mobile ASVS", "OWASP Mobile ASVS")) benchmark_source = models.CharField(max_length=20, blank=False, null=True, choices=source, - default='OWASP ASVS') + default="OWASP ASVS") created = models.DateTimeField(auto_now_add=True, null=False) updated = models.DateTimeField(auto_now=True) enabled = models.BooleanField(default=True) @@ -4299,7 +4299,7 @@ def __str__(self): class Benchmark_Category(models.Model): - type = models.ForeignKey(Benchmark_Type, verbose_name=_('Benchmark Type'), on_delete=models.CASCADE) + type = models.ForeignKey(Benchmark_Type, verbose_name=_("Benchmark Type"), on_delete=models.CASCADE) name = models.CharField(max_length=300) objective = models.TextField() references = models.TextField(blank=True, null=True) @@ -4308,10 +4308,10 @@ class Benchmark_Category(models.Model): updated = models.DateTimeField(auto_now=True) class Meta: - ordering = ('name',) + ordering = ("name",) def __str__(self): - return self.name + ': ' + self.type.name + return self.name + ": " + self.type.name class Benchmark_Requirement(models.Model): @@ -4329,54 +4329,54 @@ class Benchmark_Requirement(models.Model): updated = models.DateTimeField(auto_now=True) def __str__(self): - return str(self.objective_number) + ': ' + self.category.name + return str(self.objective_number) + ": " + self.category.name class Benchmark_Product(models.Model): product = models.ForeignKey(Product, on_delete=models.CASCADE) control = models.ForeignKey(Benchmark_Requirement, on_delete=models.CASCADE) - pass_fail = models.BooleanField(default=False, verbose_name=_('Pass'), - help_text=_('Does the product meet the requirement?')) + pass_fail = models.BooleanField(default=False, verbose_name=_("Pass"), + help_text=_("Does the product meet the requirement?")) enabled = models.BooleanField(default=True, - help_text=_('Applicable for this specific product.')) + help_text=_("Applicable for this specific product.")) notes = models.ManyToManyField(Notes, blank=True, editable=False) created = models.DateTimeField(auto_now_add=True, null=False) updated = models.DateTimeField(auto_now=True) class Meta: - unique_together = [('product', 'control')] + unique_together = [("product", "control")] def __str__(self): - return self.product.name + ': ' + self.control.objective_number + ': ' + self.control.category.name + return self.product.name + ": " + self.control.objective_number + ": " + self.control.category.name class Benchmark_Product_Summary(models.Model): product = models.ForeignKey(Product, on_delete=models.CASCADE) benchmark_type = models.ForeignKey(Benchmark_Type, on_delete=models.CASCADE) - asvs_level = (('Level 1', 'Level 1'), - ('Level 2', 'Level 2'), - ('Level 3', 'Level 3')) + asvs_level = (("Level 1", "Level 1"), + ("Level 2", "Level 2"), + ("Level 3", "Level 3")) desired_level = models.CharField(max_length=15, null=False, choices=asvs_level, - default='Level 1') + default="Level 1") current_level = models.CharField(max_length=15, blank=True, null=True, choices=asvs_level, - default='None') + default="None") asvs_level_1_benchmark = models.IntegerField(null=False, default=0, help_text=_("Total number of active benchmarks for this application.")) asvs_level_1_score = models.IntegerField(null=False, default=0, help_text=_("ASVS Level 1 Score")) asvs_level_2_benchmark = models.IntegerField(null=False, default=0, help_text=_("Total number of active benchmarks for this application.")) asvs_level_2_score = models.IntegerField(null=False, default=0, help_text=_("ASVS Level 2 Score")) asvs_level_3_benchmark = models.IntegerField(null=False, default=0, help_text=_("Total number of active benchmarks for this application.")) asvs_level_3_score = models.IntegerField(null=False, default=0, help_text=_("ASVS Level 3 Score")) - publish = models.BooleanField(default=False, help_text=_('Publish score to Product.')) + publish = models.BooleanField(default=False, help_text=_("Publish score to Product.")) created = models.DateTimeField(auto_now_add=True, null=False) updated = models.DateTimeField(auto_now=True) class Meta: - unique_together = [('product', 'benchmark_type')] + unique_together = [("product", "benchmark_type")] def __str__(self): - return self.product.name + ': ' + self.benchmark_type.name + return self.product.name + ": " + self.benchmark_type.name # ========================== @@ -4389,16 +4389,16 @@ class Question(PolymorphicModel, TimeStampedModel): """ class Meta: - ordering = ['order'] + ordering = ["order"] order = models.PositiveIntegerField(default=1, - help_text=_('The render order')) + help_text=_("The render order")) optional = models.BooleanField( default=False, help_text=_("If selected, user doesn't have to answer this question")) - text = models.TextField(blank=False, help_text=_('The question text'), default='') + text = models.TextField(blank=False, help_text=_("The question text"), default="") objects = models.Manager() polymorphic = PolymorphicManager() @@ -4430,7 +4430,7 @@ class Choice(TimeStampedModel): label = models.TextField(default="") class Meta: - ordering = ['order'] + ordering = ["order"] def __str__(self): return self.label @@ -4459,15 +4459,15 @@ def get_form(self): # meant to be a abstract survey, identified by name for purpose class Engagement_Survey(models.Model): name = models.CharField(max_length=200, null=False, blank=False, - editable=True, default='') - description = models.TextField(editable=True, default='') + editable=True, default="") + description = models.TextField(editable=True, default="") questions = models.ManyToManyField(Question) active = models.BooleanField(default=True) class Meta: verbose_name = _("Engagement Survey") verbose_name_plural = "Engagement Surveys" - ordering = ('-active', 'name') + ordering = ("-active", "name") def __str__(self): return self.name @@ -4477,16 +4477,16 @@ def __str__(self): class Answered_Survey(models.Model): # tie this to a specific engagement - engagement = models.ForeignKey(Engagement, related_name='engagement+', + engagement = models.ForeignKey(Engagement, related_name="engagement+", null=True, blank=False, editable=True, on_delete=models.CASCADE) # what surveys have been answered survey = models.ForeignKey(Engagement_Survey, on_delete=models.CASCADE) - assignee = models.ForeignKey(Dojo_User, related_name='assignee', + assignee = models.ForeignKey(Dojo_User, related_name="assignee", null=True, blank=True, editable=True, default=None, on_delete=models.RESTRICT) # who answered it - responder = models.ForeignKey(Dojo_User, related_name='responder', + responder = models.ForeignKey(Dojo_User, related_name="responder", null=True, blank=True, editable=True, default=None, on_delete=models.RESTRICT) completed = models.BooleanField(default=False) @@ -4531,8 +4531,8 @@ class Answer(PolymorphicModel, TimeStampedModel): class TextAnswer(Answer): answer = models.TextField( blank=False, - help_text=_('The answer text'), - default='') + help_text=_("The answer text"), + default="") objects = PolymorphicManager() def __str__(self): @@ -4542,20 +4542,20 @@ def __str__(self): class ChoiceAnswer(Answer): answer = models.ManyToManyField( Choice, - help_text=_('The selected choices as the answer')) + help_text=_("The selected choices as the answer")) objects = PolymorphicManager() def __str__(self): if len(self.answer.all()): return str(self.answer.all()[0]) else: - return 'No Response' + return "No Response" if settings.ENABLE_AUDITLOG: # Register for automatic logging to database - logger.info('enabling audit logging') - auditlog.register(Dojo_User, exclude_fields=['password']) + logger.info("enabling audit logging") + auditlog.register(Dojo_User, exclude_fields=["password"]) auditlog.register(Endpoint) auditlog.register(Engagement) auditlog.register(Finding) @@ -4565,7 +4565,7 @@ def __str__(self): auditlog.register(Test) auditlog.register(Risk_Acceptance) auditlog.register(Finding_Template) - auditlog.register(Cred_User, exclude_fields=['password']) + auditlog.register(Cred_User, exclude_fields=["password"]) from dojo.utils import calculate_grade, to_str_typed # noqa: E402 # there is issue due to a circular import diff --git a/dojo/note_type/urls.py b/dojo/note_type/urls.py index 6f991726d8..76e3c3a6a2 100644 --- a/dojo/note_type/urls.py +++ b/dojo/note_type/urls.py @@ -3,14 +3,14 @@ from dojo.note_type import views urlpatterns = [ - re_path(r'^note_type$', - views.note_type, name='note_type'), - re_path(r'^note/type/(?P\d+)/edit$', - views.edit_note_type, name='edit_note_type'), - re_path(r'^note/type/(?P\d+)/disable$', - views.disable_note_type, name='disable_note_type'), - re_path(r'^note/type/(?P\d+)/enable$', - views.enable_note_type, name='enable_note_type'), - re_path(r'^add_note_type$', - views.add_note_type, name='add_note_type'), + re_path(r"^note_type$", + views.note_type, name="note_type"), + re_path(r"^note/type/(?P\d+)/edit$", + views.edit_note_type, name="edit_note_type"), + re_path(r"^note/type/(?P\d+)/disable$", + views.disable_note_type, name="disable_note_type"), + re_path(r"^note/type/(?P\d+)/enable$", + views.enable_note_type, name="enable_note_type"), + re_path(r"^add_note_type$", + views.add_note_type, name="add_note_type"), ] diff --git a/dojo/note_type/views.py b/dojo/note_type/views.py index 0535d67b5a..c02c92cb82 100644 --- a/dojo/note_type/views.py +++ b/dojo/note_type/views.py @@ -14,49 +14,49 @@ logger = logging.getLogger(__name__) -@user_is_configuration_authorized('dojo.view_note_type') +@user_is_configuration_authorized("dojo.view_note_type") def note_type(request): - initial_queryset = Note_Type.objects.all().order_by('name') - name_words = initial_queryset.values_list('name', flat=True) + initial_queryset = Note_Type.objects.all().order_by("name") + name_words = initial_queryset.values_list("name", flat=True) ntl = NoteTypesFilter(request.GET, queryset=initial_queryset) nts = get_page_items(request, ntl.qs, 25) add_breadcrumb(title="Note Type List", top_level=True, request=request) - return render(request, 'dojo/note_type.html', { - 'name': 'Note Type List', - 'metric': False, - 'user': request.user, - 'nts': nts, - 'ntl': ntl, - 'name_words': name_words}) + return render(request, "dojo/note_type.html", { + "name": "Note Type List", + "metric": False, + "user": request.user, + "nts": nts, + "ntl": ntl, + "name_words": name_words}) -@user_is_configuration_authorized('dojo.change_note_type') +@user_is_configuration_authorized("dojo.change_note_type") def edit_note_type(request, ntid): nt = get_object_or_404(Note_Type, pk=ntid) is_single = nt.is_single nt_form = EditNoteTypeForm(instance=nt, is_single=is_single) - if request.method == "POST" and request.POST.get('edit_note_type'): + if request.method == "POST" and request.POST.get("edit_note_type"): nt_form = EditNoteTypeForm(request.POST, instance=nt, is_single=is_single) if nt_form.is_valid(): nt = nt_form.save() messages.add_message( request, messages.SUCCESS, - 'Note type updated successfully.', + "Note type updated successfully.", extra_tags="alert-success", ) return HttpResponseRedirect(reverse("note_type")) add_breadcrumb(title="Edit Note Type", top_level=False, request=request) - return render(request, 'dojo/edit_note_type.html', { - 'name': 'Edit Note Type', - 'metric': False, - 'user': request.user, - 'nt_form': nt_form, - 'nt': nt}) + return render(request, "dojo/edit_note_type.html", { + "name": "Edit Note Type", + "metric": False, + "user": request.user, + "nt_form": nt_form, + "nt": nt}) -@user_is_configuration_authorized('dojo.change_note_type') +@user_is_configuration_authorized("dojo.change_note_type") def disable_note_type(request, ntid): nt = get_object_or_404(Note_Type, pk=ntid) nt_form = DisableOrEnableNoteTypeForm(instance=nt) @@ -67,21 +67,21 @@ def disable_note_type(request, ntid): messages.add_message( request, messages.SUCCESS, - 'Note type Disabled successfully.', + "Note type Disabled successfully.", extra_tags="alert-success", ) return HttpResponseRedirect(reverse("note_type")) add_breadcrumb(title="Disable Note Type", top_level=False, request=request) - return render(request, 'dojo/disable_note_type.html', { - 'name': 'Disable Note Type', - 'metric': False, - 'user': request.user, - 'nt_form': nt_form, - 'nt': nt}) + return render(request, "dojo/disable_note_type.html", { + "name": "Disable Note Type", + "metric": False, + "user": request.user, + "nt_form": nt_form, + "nt": nt}) -@user_is_configuration_authorized('dojo.change_note_type') +@user_is_configuration_authorized("dojo.change_note_type") def enable_note_type(request, ntid): nt = get_object_or_404(Note_Type, pk=ntid) nt_form = DisableOrEnableNoteTypeForm(instance=nt) @@ -97,30 +97,30 @@ def enable_note_type(request, ntid): ) return HttpResponseRedirect(reverse("note_type")) add_breadcrumb(title="Enable Note Type", top_level=False, request=request) - return render(request, 'dojo/enable_note_type.html', { - 'name': 'Enable Note Type', - 'metric': False, - 'user': request.user, - 'nt_form': nt_form, - 'nt': nt}) + return render(request, "dojo/enable_note_type.html", { + "name": "Enable Note Type", + "metric": False, + "user": request.user, + "nt_form": nt_form, + "nt": nt}) -@user_is_configuration_authorized('dojo.add_note_type') +@user_is_configuration_authorized("dojo.add_note_type") def add_note_type(request): form = NoteTypeForm() - if request.method == 'POST': + if request.method == "POST": form = NoteTypeForm(request.POST) if form.is_valid(): form.save() messages.add_message(request, messages.SUCCESS, - 'Note Type added successfully.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('note_type')) + "Note Type added successfully.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("note_type")) add_breadcrumb(title="Add Note Type", top_level=False, request=request) - return render(request, 'dojo/add_note_type.html', { - 'name': 'Add Note Type', - 'metric': False, - 'user': request.user, - 'form': form, + return render(request, "dojo/add_note_type.html", { + "name": "Add Note Type", + "metric": False, + "user": request.user, + "form": form, }) diff --git a/dojo/notes/urls.py b/dojo/notes/urls.py index ee8861ce2b..9384225eb0 100644 --- a/dojo/notes/urls.py +++ b/dojo/notes/urls.py @@ -3,7 +3,7 @@ from . import views urlpatterns = [ - re_path(r'^notes/(?P\d+)/delete/(?P[\w-]+)/(?P\d+)$', views.delete_note, name='delete_note'), - re_path(r'^notes/(?P\d+)/edit/(?P[\w-]+)/(?P\d+)$', views.edit_note, name='edit_note'), - re_path(r'^notes/(?P\d+)/history/(?P[\w-]+)/(?P\d+)$', views.note_history, name='note_history'), + re_path(r"^notes/(?P\d+)/delete/(?P[\w-]+)/(?P\d+)$", views.delete_note, name="delete_note"), + re_path(r"^notes/(?P\d+)/edit/(?P[\w-]+)/(?P\d+)$", views.edit_note, name="edit_note"), + re_path(r"^notes/(?P\d+)/history/(?P[\w-]+)/(?P\d+)$", views.note_history, name="note_history"), ] diff --git a/dojo/notes/views.py b/dojo/notes/views.py index 3ccb8ae1f8..3bf06a824e 100644 --- a/dojo/notes/views.py +++ b/dojo/notes/views.py @@ -48,13 +48,13 @@ def delete_note(request, id, page, objid): note.delete() messages.add_message(request, messages.SUCCESS, - _('Note deleted.'), - extra_tags='alert-success') + _("Note deleted."), + extra_tags="alert-success") else: messages.add_message(request, messages.SUCCESS, - _('Note was not succesfully deleted.'), - extra_tags='alert-danger') + _("Note was not succesfully deleted."), + extra_tags="alert-danger") return HttpResponseRedirect(reverse(reverse_url, args=(object_id, ))) @@ -87,7 +87,7 @@ def edit_note(request, id, page, objid): if note_type_activation: available_note_types = find_available_notetypes(object, note) - if request.method == 'POST': + if request.method == "POST": if note_type_activation: form = TypedNoteForm(request.POST, available_note_types=available_note_types, instance=note) else: @@ -115,14 +115,14 @@ def edit_note(request, id, page, objid): form = NoteForm() messages.add_message(request, messages.SUCCESS, - _('Note edited.'), - extra_tags='alert-success') + _("Note edited."), + extra_tags="alert-success") return HttpResponseRedirect(reverse(reverse_url, args=(object_id, ))) else: messages.add_message(request, messages.SUCCESS, - _('Note was not succesfully edited.'), - extra_tags='alert-danger') + _("Note was not succesfully edited."), + extra_tags="alert-danger") else: if note_type_activation: form = TypedNoteForm(available_note_types=available_note_types, instance=note) @@ -130,11 +130,11 @@ def edit_note(request, id, page, objid): form = NoteForm(instance=note) return render( - request, 'dojo/edit_note.html', { - 'note': note, - 'form': form, - 'page': page, - 'objid': objid, + request, "dojo/edit_note.html", { + "note": note, + "form": form, + "page": page, + "objid": objid, }) @@ -163,22 +163,22 @@ def note_history(request, id, page, objid): history = note.history.all() - if request.method == 'POST': + if request.method == "POST": return HttpResponseRedirect(reverse(reverse_url, args=(object_id, ))) return render( - request, 'dojo/view_note_history.html', { - 'history': history, - 'note': note, - 'page': page, - 'objid': objid, + request, "dojo/view_note_history.html", { + "history": history, + "note": note, + "page": page, + "objid": objid, }) def find_available_notetypes(finding, editing_note): notes = finding.notes.all() - single_note_types = Note_Type.objects.filter(is_single=True, is_active=True).values_list('id', flat=True) - multiple_note_types = Note_Type.objects.filter(is_single=False, is_active=True).values_list('id', flat=True) + single_note_types = Note_Type.objects.filter(is_single=True, is_active=True).values_list("id", flat=True) + multiple_note_types = Note_Type.objects.filter(is_single=False, is_active=True).values_list("id", flat=True) available_note_types = [] for note_type_id in multiple_note_types: available_note_types.append(note_type_id) @@ -190,5 +190,5 @@ def find_available_notetypes(finding, editing_note): available_note_types.append(note_type_id) available_note_types.append(editing_note.note_type_id) available_note_types = list(set(available_note_types)) - queryset = Note_Type.objects.filter(id__in=available_note_types).order_by('-id') + queryset = Note_Type.objects.filter(id__in=available_note_types).order_by("-id") return queryset diff --git a/dojo/notifications/helper.py b/dojo/notifications/helper.py index b09bf1bea6..5b0d333f92 100644 --- a/dojo/notifications/helper.py +++ b/dojo/notifications/helper.py @@ -28,52 +28,52 @@ def create_notification(event=None, **kwargs): except Exception: system_notifications = Notifications() - if 'recipients' in kwargs: + if "recipients" in kwargs: # mimic existing code so that when recipients is specified, no other system or personal notifications are sent. - logger.debug('creating notifications for recipients: %s', kwargs['recipients']) - for recipient_notifications in Notifications.objects.filter(user__username__in=kwargs['recipients'], user__is_active=True, product=None): + logger.debug("creating notifications for recipients: %s", kwargs["recipients"]) + for recipient_notifications in Notifications.objects.filter(user__username__in=kwargs["recipients"], user__is_active=True, product=None): if event in settings.NOTIFICATIONS_SYSTEM_LEVEL_TRUMP: # merge the system level notifications with the personal level # this allows for system to trump the personal merged_notifications = Notifications.merge_notifications_list([system_notifications, recipient_notifications]) merged_notifications.user = recipient_notifications.user - logger.debug('Sent notification to %s', merged_notifications.user) + logger.debug("Sent notification to %s", merged_notifications.user) process_notifications(event, merged_notifications, **kwargs) else: # Do not trump user preferences and send notifications as usual - logger.debug('Sent notification to %s', recipient_notifications.user) + logger.debug("Sent notification to %s", recipient_notifications.user) process_notifications(event, recipient_notifications, **kwargs) else: - logger.debug('creating system notifications for event: %s', event) + logger.debug("creating system notifications for event: %s", event) # send system notifications to all admin users # parse kwargs before converting them to dicts product_type = None - if 'product_type' in kwargs: - product_type = kwargs.get('product_type') + if "product_type" in kwargs: + product_type = kwargs.get("product_type") logger.debug("Defined product type %s", product_type) product = None - if 'product' in kwargs: - product = kwargs.get('product') + if "product" in kwargs: + product = kwargs.get("product") logger.debug("Defined product %s", product) - elif 'engagement' in kwargs: - product = kwargs['engagement'].product + elif "engagement" in kwargs: + product = kwargs["engagement"].product logger.debug("Defined product of engagement %s", product) - elif 'test' in kwargs: - product = kwargs['test'].engagement.product + elif "test" in kwargs: + product = kwargs["test"].engagement.product logger.debug("Defined product of test %s", product) - elif 'finding' in kwargs: - product = kwargs['finding'].test.engagement.product + elif "finding" in kwargs: + product = kwargs["finding"].test.engagement.product logger.debug("Defined product of finding %s", product) - elif 'obj' in kwargs: + elif "obj" in kwargs: from dojo.utils import get_product - product = get_product(kwargs['obj']) + product = get_product(kwargs["obj"]) logger.debug("Defined product of obj %s", product) # System notifications are sent one with user=None, which will trigger email to configured system email, to global slack channel, etc. @@ -82,22 +82,22 @@ def create_notification(event=None, **kwargs): # All admins will also receive system notifications, but as part of the person global notifications section below # This time user is set, so will trigger email to personal email, to personal slack channel (mention), etc. # only retrieve users which have at least one notification type enabled for this event type. - logger.debug('creating personal notifications for event: %s', event) + logger.debug("creating personal notifications for event: %s", event) # There are notification like deleting a product type that shall not be sent to users. # These notifications will have the parameter no_users=True - if not ('no_users' in kwargs and kwargs['no_users'] is True): + if not ("no_users" in kwargs and kwargs["no_users"] is True): # get users with either global notifications, or a product specific noditiciation # and all admin/superuser, they will always be notified users = Dojo_User.objects.filter(is_active=True).prefetch_related(Prefetch( "notifications_set", queryset=Notifications.objects.filter(Q(product_id=product) | Q(product__isnull=True)), to_attr="applicable_notifications", - )).annotate(applicable_notifications_count=Count('notifications__id', filter=Q(notifications__product_id=product) | Q(notifications__product__isnull=True)))\ + )).annotate(applicable_notifications_count=Count("notifications__id", filter=Q(notifications__product_id=product) | Q(notifications__product__isnull=True)))\ .filter(Q(applicable_notifications_count__gt=0) | Q(is_superuser=True)) # only send to authorized users or admin/superusers - logger.debug('Filtering users for the product %s', product) + logger.debug("Filtering users for the product %s", product) if product: users = get_authorized_users_for_product_and_product_type(users, product, Permissions.Product_View) @@ -106,7 +106,7 @@ def create_notification(event=None, **kwargs): users = get_authorized_users_for_product_type(users, product_type, Permissions.Product_Type_View) else: # nor product_type nor product defined, we should not make noise and send only notifications to admins - logger.debug('Product is not specified, making it silent') + logger.debug("Product is not specified, making it silent") users = users.filter(is_superuser=True) for user in users: @@ -126,26 +126,26 @@ def create_notification(event=None, **kwargs): def create_description(event, *args, **kwargs): if "description" not in kwargs.keys(): - if event == 'product_added': - kwargs["description"] = _('Product %s has been created successfully.') % kwargs['title'] - elif event == 'product_type_added': - kwargs["description"] = _('Product Type %s has been created successfully.') % kwargs['title'] + if event == "product_added": + kwargs["description"] = _("Product %s has been created successfully.") % kwargs["title"] + elif event == "product_type_added": + kwargs["description"] = _("Product Type %s has been created successfully.") % kwargs["title"] else: - kwargs["description"] = _('Event %s has occurred.') % str(event) + kwargs["description"] = _("Event %s has occurred.") % str(event) return kwargs["description"] def create_notification_message(event, user, notification_type, *args, **kwargs): template = f"notifications/{notification_type}/{event.replace('/', '')}.tpl" - kwargs.update({'user': user}) + kwargs.update({"user": user}) notification_message = None try: notification_message = render_to_string(template, kwargs) logger.debug("Rendering from the template %s", template) except TemplateDoesNotExist: - logger.debug('template not found or not implemented yet: %s', template) + logger.debug("template not found or not implemented yet: %s", template) except Exception as e: logger.error("error during rendering of template %s exception is %s", template, e) finally: @@ -153,38 +153,38 @@ def create_notification_message(event, user, notification_type, *args, **kwargs) kwargs["description"] = create_description(event, *args, **kwargs) notification_message = render_to_string(f"notifications/{notification_type}/other.tpl", kwargs) - return notification_message if notification_message else '' + return notification_message if notification_message else "" def process_notifications(event, notifications=None, **kwargs): from dojo.utils import get_system_setting if not notifications: - logger.warning('no notifications!') + logger.warning("no notifications!") return - logger.debug('sending notification ' + ('asynchronously' if we_want_async() else 'synchronously')) - logger.debug('process notifications for %s', notifications.user) - logger.debug('notifications: %s', vars(notifications)) + logger.debug("sending notification " + ("asynchronously" if we_want_async() else "synchronously")) + logger.debug("process notifications for %s", notifications.user) + logger.debug("notifications: %s", vars(notifications)) - slack_enabled = get_system_setting('enable_slack_notifications') - msteams_enabled = get_system_setting('enable_msteams_notifications') - mail_enabled = get_system_setting('enable_mail_notifications') + slack_enabled = get_system_setting("enable_slack_notifications") + msteams_enabled = get_system_setting("enable_msteams_notifications") + mail_enabled = get_system_setting("enable_mail_notifications") - if slack_enabled and 'slack' in getattr(notifications, event, getattr(notifications, 'other')): - logger.debug('Sending Slack Notification') + if slack_enabled and "slack" in getattr(notifications, event, getattr(notifications, "other")): + logger.debug("Sending Slack Notification") send_slack_notification(event, notifications.user, **kwargs) - if msteams_enabled and 'msteams' in getattr(notifications, event, getattr(notifications, 'other')): - logger.debug('Sending MSTeams Notification') + if msteams_enabled and "msteams" in getattr(notifications, event, getattr(notifications, "other")): + logger.debug("Sending MSTeams Notification") send_msteams_notification(event, notifications.user, **kwargs) - if mail_enabled and 'mail' in getattr(notifications, event, getattr(notifications, 'other')): - logger.debug('Sending Mail Notification') + if mail_enabled and "mail" in getattr(notifications, event, getattr(notifications, "other")): + logger.debug("Sending Mail Notification") send_mail_notification(event, notifications.user, **kwargs) - if 'alert' in getattr(notifications, event, getattr(notifications, 'other')): - logger.debug(f'Sending Alert to {notifications.user}') + if "alert" in getattr(notifications, event, getattr(notifications, "other")): + logger.debug(f"Sending Alert to {notifications.user}") send_alert_notification(event, notifications.user, **kwargs) @@ -195,26 +195,26 @@ def send_slack_notification(event, user=None, *args, **kwargs): def _post_slack_message(channel): res = requests.request( - method='POST', - url='https://slack.com/api/chat.postMessage', + method="POST", + url="https://slack.com/api/chat.postMessage", data={ - 'token': get_system_setting('slack_token'), - 'channel': channel, - 'username': get_system_setting('slack_username'), - 'text': create_notification_message(event, user, 'slack', *args, **kwargs), + "token": get_system_setting("slack_token"), + "channel": channel, + "username": get_system_setting("slack_username"), + "text": create_notification_message(event, user, "slack", *args, **kwargs), }) - if 'error' in res.text: + if "error" in res.text: logger.error("Slack is complaining. See raw text below.") logger.error(res.text) - raise RuntimeError('Error posting message to Slack: ' + res.text) + raise RuntimeError("Error posting message to Slack: " + res.text) try: # If the user has slack information on profile and chooses to receive slack notifications # Will receive a DM if user is not None: - logger.debug('personal notification to slack for user %s', user) - if hasattr(user, 'usercontactinfo') and user.usercontactinfo.slack_username is not None: + logger.debug("personal notification to slack for user %s", user) + if hasattr(user, "usercontactinfo") and user.usercontactinfo.slack_username is not None: slack_user_id = user.usercontactinfo.slack_user_id if not slack_user_id: # Lookup the slack userid the first time, then save it. @@ -228,22 +228,22 @@ def _post_slack_message(channel): # only send notification if we managed to find the slack_user_id if slack_user_id: - channel = f'@{slack_user_id}' + channel = f"@{slack_user_id}" _post_slack_message(channel) else: logger.info("The user %s does not have a email address informed for Slack in profile.", user) else: # System scope slack notifications, and not personal would still see this go through - if get_system_setting('slack_channel') is not None: - channel = get_system_setting('slack_channel') + if get_system_setting("slack_channel") is not None: + channel = get_system_setting("slack_channel") logger.info(f"Sending system notification to system channel {channel}.") _post_slack_message(channel) else: - logger.debug('slack_channel not configured: skipping system notification') + logger.debug("slack_channel not configured: skipping system notification") except Exception as e: logger.exception(e) - log_alert(e, 'Slack Notification', title=kwargs['title'], description=str(e), url=kwargs.get('url', None)) + log_alert(e, "Slack Notification", title=kwargs["title"], description=str(e), url=kwargs.get("url", None)) @dojo_async_task @@ -254,84 +254,84 @@ def send_msteams_notification(event, user=None, *args, **kwargs): try: # Microsoft Teams doesn't offer direct message functionality, so no MS Teams PM functionality here... if user is None: - if get_system_setting('msteams_url') is not None: - logger.debug('sending MSTeams message') + if get_system_setting("msteams_url") is not None: + logger.debug("sending MSTeams message") res = requests.request( - method='POST', - url=get_system_setting('msteams_url'), - data=create_notification_message(event, None, 'msteams', *args, **kwargs)) + method="POST", + url=get_system_setting("msteams_url"), + data=create_notification_message(event, None, "msteams", *args, **kwargs)) if res.status_code != 200: logger.error("Error when sending message to Microsoft Teams") logger.error(res.status_code) logger.error(res.text) - raise RuntimeError('Error posting message to Microsoft Teams: ' + res.text) + raise RuntimeError("Error posting message to Microsoft Teams: " + res.text) else: - logger.info('Webhook URL for Microsoft Teams not configured: skipping system notification') + logger.info("Webhook URL for Microsoft Teams not configured: skipping system notification") except Exception as e: logger.exception(e) - log_alert(e, "Microsoft Teams Notification", title=kwargs['title'], description=str(e), url=kwargs['url']) + log_alert(e, "Microsoft Teams Notification", title=kwargs["title"], description=str(e), url=kwargs["url"]) @dojo_async_task @app.task def send_mail_notification(event, user=None, *args, **kwargs): from dojo.utils import get_system_setting - email_from_address = get_system_setting('email_from') + email_from_address = get_system_setting("email_from") # Attempt to get the "to" address if "recipient" in kwargs: address = kwargs.get("recipient") elif user: address = user.email else: - address = get_system_setting('mail_notifications_to') + address = get_system_setting("mail_notifications_to") - logger.debug('notification email for user %s to %s', user, address) + logger.debug("notification email for user %s to %s", user, address) try: subject = f"{get_system_setting('team_name')} notification" - if 'title' in kwargs: + if "title" in kwargs: subject += f": {kwargs['title']}" email = EmailMessage( subject, - create_notification_message(event, user, 'mail', *args, **kwargs), + create_notification_message(event, user, "mail", *args, **kwargs), email_from_address, [address], headers={"From": f"{email_from_address}"}, ) - email.content_subtype = 'html' - logger.debug('sending email alert') + email.content_subtype = "html" + logger.debug("sending email alert") # logger.info(create_notification_message(event, user, 'mail', *args, **kwargs)) email.send(fail_silently=False) except Exception as e: logger.exception(e) - log_alert(e, "Email Notification", title=kwargs['title'], description=str(e), url=kwargs['url']) + log_alert(e, "Email Notification", title=kwargs["title"], description=str(e), url=kwargs["url"]) def send_alert_notification(event, user=None, *args, **kwargs): - logger.debug('sending alert notification to %s', user) + logger.debug("sending alert notification to %s", user) try: # no need to differentiate between user/no user - icon = kwargs.get('icon', 'info-circle') + icon = kwargs.get("icon", "info-circle") try: source = Notifications._meta.get_field(event).verbose_name.title()[:100] except FieldDoesNotExist: source = event.replace("_", " ").title()[:100] alert = Alerts( user_id=user, - title=kwargs.get('title')[:250], - description=create_notification_message(event, user, 'alert', *args, **kwargs)[:2000], - url=kwargs.get('url', reverse('alerts')), + title=kwargs.get("title")[:250], + description=create_notification_message(event, user, "alert", *args, **kwargs)[:2000], + url=kwargs.get("url", reverse("alerts")), icon=icon[:25], source=source, ) # relative urls will fail validation - alert.clean_fields(exclude=['url']) + alert.clean_fields(exclude=["url"]) alert.save() except Exception as e: logger.exception(e) - log_alert(e, "Alert Notification", title=kwargs['title'], description=str(e), url=kwargs['url']) + log_alert(e, "Alert Notification", title=kwargs["title"], description=str(e), url=kwargs["url"]) def get_slack_user_id(user_email): @@ -342,18 +342,18 @@ def get_slack_user_id(user_email): user_id = None res = requests.request( - method='POST', - url='https://slack.com/api/users.lookupByEmail', - data={'token': get_system_setting('slack_token'), 'email': user_email}) + method="POST", + url="https://slack.com/api/users.lookupByEmail", + data={"token": get_system_setting("slack_token"), "email": user_email}) user = json.loads(res.text) slack_user_is_found = False if user: - if 'error' in user: + if "error" in user: logger.error("Slack is complaining. See error message below.") logger.error(user) - raise RuntimeError('Error getting user list from Slack: ' + res.text) + raise RuntimeError("Error getting user list from Slack: " + res.text) else: if "email" in user["user"]["profile"]: if user_email == user["user"]["profile"]["email"]: @@ -377,20 +377,20 @@ def log_alert(e, notification_type=None, *args, **kwargs): for user in users: alert = Alerts( user_id=user, - url=kwargs.get('url', reverse('alerts')), - title=kwargs.get('title', 'Notification issue')[:250], - description=kwargs.get('description', str(e))[:2000], + url=kwargs.get("url", reverse("alerts")), + title=kwargs.get("title", "Notification issue")[:250], + description=kwargs.get("description", str(e))[:2000], icon="exclamation-triangle", - source=notification_type[:100] if notification_type else kwargs.get('source', 'unknown')[:100]) + source=notification_type[:100] if notification_type else kwargs.get("source", "unknown")[:100]) # relative urls will fail validation - alert.clean_fields(exclude=['url']) + alert.clean_fields(exclude=["url"]) alert.save() def notify_test_created(test): - title = 'Test created for ' + str(test.engagement.product) + ': ' + str(test.engagement.name) + ': ' + str(test) - create_notification(event='test_added', title=title, test=test, engagement=test.engagement, product=test.engagement.product, - url=reverse('view_test', args=(test.id,))) + title = "Test created for " + str(test.engagement.product) + ": " + str(test.engagement.name) + ": " + str(test) + create_notification(event="test_added", title=title, test=test, engagement=test.engagement, product=test.engagement.product, + url=reverse("view_test", args=(test.id,))) def notify_scan_added(test, updated_count, new_findings=[], findings_mitigated=[], findings_reactivated=[], findings_untouched=[]): @@ -401,13 +401,13 @@ def notify_scan_added(test, updated_count, new_findings=[], findings_mitigated=[ findings_reactivated = sorted(findings_reactivated, key=lambda x: x.numerical_severity) findings_untouched = sorted(findings_untouched, key=lambda x: x.numerical_severity) - title = 'Created/Updated ' + str(updated_count) + " findings for " + str(test.engagement.product) + ': ' + str(test.engagement.name) + ': ' + str(test) + title = "Created/Updated " + str(updated_count) + " findings for " + str(test.engagement.product) + ": " + str(test.engagement.name) + ": " + str(test) if updated_count == 0: - event = 'scan_added_empty' + event = "scan_added_empty" else: - event = 'scan_added' + event = "scan_added" create_notification(event=event, title=title, findings_new=new_findings, findings_mitigated=findings_mitigated, findings_reactivated=findings_reactivated, finding_count=updated_count, test=test, engagement=test.engagement, product=test.engagement.product, findings_untouched=findings_untouched, - url=reverse('view_test', args=(test.id,))) + url=reverse("view_test", args=(test.id,))) diff --git a/dojo/notifications/urls.py b/dojo/notifications/urls.py index b7171e3779..dc91f7a04e 100644 --- a/dojo/notifications/urls.py +++ b/dojo/notifications/urls.py @@ -3,8 +3,8 @@ from . import views urlpatterns = [ - re_path(r'^notifications$', views.PersonalNotificationsView.as_view(), name='notifications'), - re_path(r'^notifications/system$', views.SystemNotificationsView.as_view(), name='system_notifications'), - re_path(r'^notifications/personal$', views.PersonalNotificationsView.as_view(), name='personal_notifications'), - re_path(r'^notifications/template$', views.TemplateNotificationsView.as_view(), name='template_notifications'), + re_path(r"^notifications$", views.PersonalNotificationsView.as_view(), name="notifications"), + re_path(r"^notifications/system$", views.SystemNotificationsView.as_view(), name="system_notifications"), + re_path(r"^notifications/personal$", views.PersonalNotificationsView.as_view(), name="personal_notifications"), + re_path(r"^notifications/template$", views.TemplateNotificationsView.as_view(), name="template_notifications"), ] diff --git a/dojo/notifications/views.py b/dojo/notifications/views.py index 2c102f59ef..8a94d2ad7c 100644 --- a/dojo/notifications/views.py +++ b/dojo/notifications/views.py @@ -42,10 +42,10 @@ def get_enabled_notifications(self): def get_initial_context(self, request: HttpRequest, notifications: Notifications, scope: str): return { - 'form': self.get_form(request, notifications), - 'scope': scope, - 'enabled_notifications': self.get_enabled_notifications(), - 'admin': request.user.is_superuser, + "form": self.get_form(request, notifications), + "scope": scope, + "enabled_notifications": self.get_enabled_notifications(), + "admin": request.user.is_superuser, } def set_breadcrumbs(self, request: HttpRequest): @@ -58,8 +58,8 @@ def process_form(self, request: HttpRequest, context: dict): messages.add_message( request, messages.SUCCESS, - _('Settings saved.'), - extra_tags='alert-success') + _("Settings saved."), + extra_tags="alert-success") return request, True return request, False diff --git a/dojo/object/urls.py b/dojo/object/urls.py index aa87010b32..b31e935064 100644 --- a/dojo/object/urls.py +++ b/dojo/object/urls.py @@ -3,9 +3,9 @@ from . import views urlpatterns = [ - re_path(r'^product/(?P\d+)/object/add$', views.new_object, name='new_object'), - re_path(r'^product/(?P\d+)/object/(?P\d+)/edit$', views.edit_object, name='edit_object'), - re_path(r'^product/(?P\d+)/object/view$', views.view_objects, name='view_objects'), - re_path(r'^product/(?P\d+)/object/(?P\d+)/delete$', views.delete_object, - name='delete_object'), + re_path(r"^product/(?P\d+)/object/add$", views.new_object, name="new_object"), + re_path(r"^product/(?P\d+)/object/(?P\d+)/edit$", views.edit_object, name="edit_object"), + re_path(r"^product/(?P\d+)/object/view$", views.view_objects, name="view_objects"), + re_path(r"^product/(?P\d+)/object/(?P\d+)/delete$", views.delete_object, + name="delete_object"), ] diff --git a/dojo/object/views.py b/dojo/object/views.py index cdaa60b05a..dfb4f59055 100644 --- a/dojo/object/views.py +++ b/dojo/object/views.py @@ -15,10 +15,10 @@ logger = logging.getLogger(__name__) -@user_is_authorized(Product, Permissions.Product_Tracking_Files_Add, 'pid') +@user_is_authorized(Product, Permissions.Product_Tracking_Files_Add, "pid") def new_object(request, pid): prod = get_object_or_404(Product, id=pid) - if request.method == 'POST': + if request.method == "POST": tform = ObjectSettingsForm(request.POST) if tform.is_valid(): new_prod = tform.save(commit=False) @@ -27,87 +27,87 @@ def new_object(request, pid): messages.add_message(request, messages.SUCCESS, - 'Added Tracked File to a Product', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_objects', args=(pid,))) + "Added Tracked File to a Product", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_objects", args=(pid,))) else: tform = ObjectSettingsForm() product_tab = Product_Tab(prod, title="Add Tracked Files to a Product", tab="settings") - return render(request, 'dojo/new_object.html', - {'tform': tform, - 'product_tab': product_tab, - 'pid': prod.id}) + return render(request, "dojo/new_object.html", + {"tform": tform, + "product_tab": product_tab, + "pid": prod.id}) -@user_is_authorized(Product, Permissions.Product_Tracking_Files_View, 'pid') +@user_is_authorized(Product, Permissions.Product_Tracking_Files_View, "pid") def view_objects(request, pid): product = get_object_or_404(Product, id=pid) - object_queryset = Objects_Product.objects.filter(product=pid).order_by('path', 'folder', 'artifact') + object_queryset = Objects_Product.objects.filter(product=pid).order_by("path", "folder", "artifact") product_tab = Product_Tab(product, title="Tracked Product Files, Paths and Artifacts", tab="settings") return render(request, - 'dojo/view_objects.html', + "dojo/view_objects.html", { - 'object_queryset': object_queryset, - 'product_tab': product_tab, - 'product': product, + "object_queryset": object_queryset, + "product_tab": product_tab, + "product": product, }) -@user_is_authorized(Product, Permissions.Product_Tracking_Files_Edit, 'pid') +@user_is_authorized(Product, Permissions.Product_Tracking_Files_Edit, "pid") def edit_object(request, pid, ttid): object = Objects_Product.objects.get(pk=ttid) product = get_object_or_404(Product, id=pid) if object.product != product: - msg = f'Product {pid} does not fit to product of Object {object.product.id}' + msg = f"Product {pid} does not fit to product of Object {object.product.id}" raise BadRequest(msg) - if request.method == 'POST': + if request.method == "POST": tform = ObjectSettingsForm(request.POST, instance=object) if tform.is_valid(): tform.save() messages.add_message(request, messages.SUCCESS, - 'Tool Product Configuration Successfully Updated.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_objects', args=(pid,))) + "Tool Product Configuration Successfully Updated.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_objects", args=(pid,))) else: tform = ObjectSettingsForm(instance=object) product_tab = Product_Tab(product, title="Edit Tracked Files", tab="settings") return render(request, - 'dojo/edit_object.html', + "dojo/edit_object.html", { - 'tform': tform, - 'product_tab': product_tab, + "tform": tform, + "product_tab": product_tab, }) -@user_is_authorized(Product, Permissions.Product_Tracking_Files_Delete, 'pid') +@user_is_authorized(Product, Permissions.Product_Tracking_Files_Delete, "pid") def delete_object(request, pid, ttid): object = Objects_Product.objects.get(pk=ttid) product = get_object_or_404(Product, id=pid) if object.product != product: - msg = f'Product {pid} does not fit to product of Object {object.product.id}' + msg = f"Product {pid} does not fit to product of Object {object.product.id}" raise BadRequest(msg) - if request.method == 'POST': + if request.method == "POST": tform = ObjectSettingsForm(request.POST, instance=object) object.delete() messages.add_message(request, messages.SUCCESS, - 'Tracked Product Files Deleted.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_objects', args=(pid,))) + "Tracked Product Files Deleted.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_objects", args=(pid,))) else: tform = DeleteObjectsSettingsForm(instance=object) product_tab = Product_Tab(product, title="Delete Product Tool Configuration", tab="settings") return render(request, - 'dojo/delete_object.html', + "dojo/delete_object.html", { - 'tform': tform, - 'product_tab': product_tab, + "tform": tform, + "product_tab": product_tab, }) diff --git a/dojo/okta.py b/dojo/okta.py deleted file mode 100644 index cad1bc081c..0000000000 --- a/dojo/okta.py +++ /dev/null @@ -1,102 +0,0 @@ -""" -Author: selten -Taken from Pull Request #333 of -python-social-auth/socail-core - -""" -from jose import jwt -from jose.jwt import ExpiredSignatureError, JWTError -from six.moves.urllib.parse import urljoin -from social_core.backends.oauth import BaseOAuth2 -from social_core.backends.open_id_connect import OpenIdConnectAuth -from social_core.utils import append_slash - - -class OktaMixin: - def api_url(self): - return append_slash(self.setting('API_URL')) - - def authorization_url(self): - return self._url('v1/authorize') - - def access_token_url(self): - return self._url('v1/token') - - def _url(self, path): - return urljoin(append_slash(self.setting('API_URL')), path) - - def oidc_config(self): - return self.get_json(self._url('/.well-known/openid-configuration?client_id=' + self.setting('KEY'))) - - -class OktaOAuth2(OktaMixin, BaseOAuth2): - """Okta OAuth authentication backend""" - name = 'okta-oauth2' - REDIRECT_STATE = False - ACCESS_TOKEN_METHOD = 'POST' - SCOPE_SEPARATOR = ' ' - - DEFAULT_SCOPE = [ - 'openid', 'profile', - ] - EXTRA_DATA = [ - ('refresh_token', 'refresh_token', True), - ('expires_in', 'expires'), - ('token_type', 'token_type', True), - ] - - def get_user_details(self, response): - """Return user details from Okta account""" - return {'username': response.get('preferred_username'), - 'email': response.get('preferred_username') or '', - 'first_name': response.get('given_name'), - 'last_name': response.get('family_name')} - - def user_data(self, access_token, *args, **kwargs): - """Loads user data from Okta""" - return self.get_json( - self._url('v1/userinfo'), - headers={ - 'Authorization': f'Bearer {access_token}', - }, - ) - - -class OktaOpenIdConnect(OktaOAuth2, OpenIdConnectAuth): - """Okta OpenID-Connect authentication backend""" - name = 'okta-openidconnect' - REDIRECT_STATE = False - ACCESS_TOKEN_METHOD = 'POST' - RESPONSE_TYPE = 'code' - - def validate_and_return_id_token(self, id_token, access_token): - """ - Validates the id_token using Okta. - """ - client_id, _client_secret = self.get_key_and_secret() - claims = None - k = None - - for key in self.get_jwks_keys(): - try: - jwt.decode(id_token, key, audience=client_id, access_token=access_token) - k = key - break - except ExpiredSignatureError: - k = key - break - except JWTError: - if k is None and client_id == 'a-key': - k = self.get_jwks_keys()[0] - - claims = jwt.decode( - id_token, - k, - audience=client_id, - issuer=self.id_token_issuer(), - access_token=access_token, - ) - - self.validate_claims(claims) - - return claims diff --git a/dojo/pipeline.py b/dojo/pipeline.py index 8f05d35d4c..ea020d2d92 100644 --- a/dojo/pipeline.py +++ b/dojo/pipeline.py @@ -19,47 +19,47 @@ def social_uid(backend, details, response, *args, **kwargs): if settings.AZUREAD_TENANT_OAUTH2_ENABLED and isinstance(backend, AzureADTenantOAuth2): """Return user details from Azure AD account""" fullname, first_name, last_name, upn = ( - response.get('name', ''), - response.get('given_name', ''), - response.get('family_name', ''), - response.get('upn'), + response.get("name", ""), + response.get("given_name", ""), + response.get("family_name", ""), + response.get("upn"), ) uid = backend.get_user_id(details, response) - return {'username': upn, - 'email': upn, - 'fullname': fullname, - 'first_name': first_name, - 'last_name': last_name, - 'uid': uid} + return {"username": upn, + "email": upn, + "fullname": fullname, + "first_name": first_name, + "last_name": last_name, + "uid": uid} elif settings.GOOGLE_OAUTH_ENABLED and isinstance(backend, GoogleOAuth2): """Return user details from Google account""" - if 'sub' in response: - google_uid = response['sub'] - elif 'email' in response: - google_uid = response['email'] + if "sub" in response: + google_uid = response["sub"] + elif "email" in response: + google_uid = response["email"] else: - google_uid = response['id'] + google_uid = response["id"] fullname, first_name, last_name, email = ( - response.get('fullname', ''), - response.get('first_name', ''), - response.get('last_name', ''), - response.get('email'), + response.get("fullname", ""), + response.get("first_name", ""), + response.get("last_name", ""), + response.get("email"), ) - return {'username': email, - 'email': email, - 'fullname': fullname, - 'first_name': first_name, - 'last_name': last_name, - 'uid': google_uid} + return {"username": email, + "email": email, + "fullname": fullname, + "first_name": first_name, + "last_name": last_name, + "uid": google_uid} else: uid = backend.get_user_id(details, response) # Used for most backends if uid: - return {'uid': uid} + return {"uid": uid} # Until OKTA PR in social-core is merged # This modified way needs to work else: - return {'uid': response.get('preferred_username')} + return {"uid": response.get("preferred_username")} def modify_permissions(backend, uid, user=None, social=None, *args, **kwargs): @@ -71,22 +71,22 @@ def update_azure_groups(backend, uid, user=None, social=None, *args, **kwargs): # In some wild cases, there could be two social auth users # connected to the same DefectDojo user. Grab the newest one soc = user.social_auth.order_by("-created").first() - token = soc.extra_data['access_token'] + token = soc.extra_data["access_token"] group_names = [] - if 'groups' not in kwargs['response'] or kwargs['response']['groups'] == "": + if "groups" not in kwargs["response"] or kwargs["response"]["groups"] == "": logger.warning("No groups in response. Stopping to update groups of user based on azureAD") return - group_IDs = kwargs['response']['groups'] + group_IDs = kwargs["response"]["groups"] for group_from_response in group_IDs: try: logger.debug("Analysing Group_ID " + group_from_response) - request_headers = {'Authorization': 'Bearer ' + token} + request_headers = {"Authorization": "Bearer " + token} if is_group_id(group_from_response): logger.debug("detected " + group_from_response + " as groupID and will fetch the displayName from microsoft graph") - group_name_request = requests.get((str(soc.extra_data['resource']) + '/v1.0/groups/' + str(group_from_response) + '?$select=displayName'), headers=request_headers) + group_name_request = requests.get((str(soc.extra_data["resource"]) + "/v1.0/groups/" + str(group_from_response) + "?$select=displayName"), headers=request_headers) group_name_request.raise_for_status() group_name_request_json = group_name_request.json() - group_name = group_name_request_json['displayName'] + group_name = group_name_request_json["displayName"] else: logger.debug("detected " + group_from_response + " as group name and will not call microsoft graph") group_name = group_from_response @@ -105,7 +105,7 @@ def update_azure_groups(backend, uid, user=None, social=None, *args, **kwargs): def is_group_id(group): - if re.search(r'^[a-zA-Z0-9]{8,}-[a-zA-Z0-9]{4,}-[a-zA-Z0-9]{4,}-[a-zA-Z0-9]{4,}-[a-zA-Z0-9]{12,}$', group): + if re.search(r"^[a-zA-Z0-9]{8,}-[a-zA-Z0-9]{4,}-[a-zA-Z0-9]{4,}-[a-zA-Z0-9]{4,}-[a-zA-Z0-9]{12,}$", group): return True else: return False @@ -117,13 +117,13 @@ def assign_user_to_groups(user, group_names, social_provider): if created_group: logger.debug("Group %s for social provider %s was created", str(group), social_provider) _group_member, is_member_created = Dojo_Group_Member.objects.get_or_create(group=group, user=user, defaults={ - 'role': Role.objects.get(id=Roles.Maintainer)}) + "role": Role.objects.get(id=Roles.Maintainer)}) if is_member_created: logger.debug("User %s become member of group %s (social provider: %s)", user, str(group), social_provider) def cleanup_old_groups_for_user(user, group_names): - for group_member in Dojo_Group_Member.objects.select_related('group').filter(user=user): + for group_member in Dojo_Group_Member.objects.select_related("group").filter(user=user): group = group_member.group if str(group) not in group_names: logger.debug("Deleting membership of user %s from %s group %s", user, group.social_provider, str(group)) @@ -136,14 +136,14 @@ def update_product_access(backend, uid, user=None, social=None, *args, **kwargs) user_product_names = [prod.name for prod in get_authorized_products(Permissions.Product_View, user)] # Get Gitlab access token soc = user.social_auth.get() - token = soc.extra_data['access_token'] + token = soc.extra_data["access_token"] # Get user's projects list on Gitlab gl = gitlab.Gitlab(settings.SOCIAL_AUTH_GITLAB_API_URL, oauth_token=token) # Get each project path_with_namespace as future product name projects = gl.projects.list(membership=True, min_access_level=settings.GITLAB_PROJECT_MIN_ACCESS_LEVEL, all=True) project_names = [project.path_with_namespace for project in projects] # Create product_type if necessary - product_type, _created = Product_Type.objects.get_or_create(name='Gitlab Import') + product_type, _created = Product_Type.objects.get_or_create(name="Gitlab Import") # For each project: create a new product or update product's authorized_users for project in projects: if project.path_with_namespace not in user_product_names: @@ -154,16 +154,16 @@ def update_product_access(backend, uid, user=None, social=None, *args, **kwargs) # If not, create a product with that name and the GitLab product type product = Product(name=project.path_with_namespace, prod_type=product_type) product.save() - _product_member, _created = Product_Member.objects.get_or_create(product=product, user=user, defaults={'role': Role.objects.get(id=Roles.Owner)}) + _product_member, _created = Product_Member.objects.get_or_create(product=product, user=user, defaults={"role": Role.objects.get(id=Roles.Owner)}) # Import tags and/orl URL if necessary if settings.GITLAB_PROJECT_IMPORT_TAGS: - if hasattr(project, 'topics'): + if hasattr(project, "topics"): if len(project.topics) > 0: product.tags = ",".join(project.topics) - elif hasattr(project, 'tag_list') and len(project.tag_list) > 0: + elif hasattr(project, "tag_list") and len(project.tag_list) > 0: product.tags = ",".join(project.tag_list) if settings.GITLAB_PROJECT_IMPORT_URL: - if hasattr(project, 'web_url') and len(project.web_url) > 0: + if hasattr(project, "web_url") and len(project.web_url) > 0: product.description = "[" + project.web_url + "](" + project.web_url + ")" if settings.GITLAB_PROJECT_IMPORT_TAGS or settings.GITLAB_PROJECT_IMPORT_URL: product.save() @@ -176,7 +176,7 @@ def update_product_access(backend, uid, user=None, social=None, *args, **kwargs) def sanitize_username(username): - allowed_chars_regex = re.compile(r'[\w@.+_-]') + allowed_chars_regex = re.compile(r"[\w@.+_-]") allowed_chars = filter(lambda char: allowed_chars_regex.match(char), list(username)) return "".join(allowed_chars) diff --git a/dojo/product/queries.py b/dojo/product/queries.py index 90307238e3..8d562c0f9a 100644 --- a/dojo/product/queries.py +++ b/dojo/product/queries.py @@ -32,33 +32,33 @@ def get_authorized_products(permission, user=None): return Product.objects.none() if user.is_superuser: - return Product.objects.all().order_by('name') + return Product.objects.all().order_by("name") if user_has_global_permission(user, permission): - return Product.objects.all().order_by('name') + return Product.objects.all().order_by("name") roles = get_roles_for_permission(permission) authorized_product_type_roles = Product_Type_Member.objects.filter( - product_type=OuterRef('prod_type_id'), + product_type=OuterRef("prod_type_id"), user=user, role__in=roles) authorized_product_roles = Product_Member.objects.filter( - product=OuterRef('pk'), + product=OuterRef("pk"), user=user, role__in=roles) authorized_product_type_groups = Product_Type_Group.objects.filter( - product_type=OuterRef('prod_type_id'), + product_type=OuterRef("prod_type_id"), group__users=user, role__in=roles) authorized_product_groups = Product_Group.objects.filter( - product=OuterRef('pk'), + product=OuterRef("pk"), group__users=user, role__in=roles) products = Product.objects.annotate( prod_type__member=Exists(authorized_product_type_roles), member=Exists(authorized_product_roles), prod_type__authorized_group=Exists(authorized_product_type_groups), - authorized_group=Exists(authorized_product_groups)).order_by('name') + authorized_group=Exists(authorized_product_groups)).order_by("name") products = products.filter( Q(prod_type__member=True) | Q(member=True) | Q(prod_type__authorized_group=True) | Q(authorized_group=True)) @@ -70,7 +70,7 @@ def get_authorized_members_for_product(product, permission): user = get_current_user() if user.is_superuser or user_has_permission(user, product, permission): - return Product_Member.objects.filter(product=product).order_by('user__first_name', 'user__last_name').select_related('role', 'user') + return Product_Member.objects.filter(product=product).order_by("user__first_name", "user__last_name").select_related("role", "user") else: return None @@ -80,7 +80,7 @@ def get_authorized_groups_for_product(product, permission): if user.is_superuser or user_has_permission(user, product, permission): authorized_groups = get_authorized_groups(Permissions.Group_View) - return Product_Group.objects.filter(product=product, group__in=authorized_groups).order_by('group__name').select_related('role') + return Product_Group.objects.filter(product=product, group__in=authorized_groups).order_by("group__name").select_related("role") else: return None @@ -92,13 +92,13 @@ def get_authorized_product_members(permission): return Product_Member.objects.none() if user.is_superuser: - return Product_Member.objects.all().select_related('role') + return Product_Member.objects.all().order_by("id").select_related("role") if user_has_global_permission(user, permission): - return Product_Member.objects.all().select_related('role') + return Product_Member.objects.all().order_by("id").select_related("role") products = get_authorized_products(permission) - return Product_Member.objects.filter(product__in=products).select_related('role') + return Product_Member.objects.filter(product__in=products).order_by("id").select_related("role") def get_authorized_product_members_for_user(user, permission): @@ -108,13 +108,13 @@ def get_authorized_product_members_for_user(user, permission): return Product_Member.objects.none() if request_user.is_superuser: - return Product_Member.objects.filter(user=user).select_related('role', 'product') + return Product_Member.objects.filter(user=user).select_related("role", "product") - if hasattr(request_user, 'global_role') and request_user.global_role.role is not None and role_has_permission(request_user.global_role.role.id, permission): - return Product_Member.objects.filter(user=user).select_related('role', 'product') + if hasattr(request_user, "global_role") and request_user.global_role.role is not None and role_has_permission(request_user.global_role.role.id, permission): + return Product_Member.objects.filter(user=user).select_related("role", "product") products = get_authorized_products(permission) - return Product_Member.objects.filter(user=user, product__in=products).select_related('role', 'product') + return Product_Member.objects.filter(user=user, product__in=products).select_related("role", "product") def get_authorized_product_groups(permission): @@ -124,10 +124,10 @@ def get_authorized_product_groups(permission): return Product_Group.objects.none() if user.is_superuser: - return Product_Group.objects.all().select_related('role') + return Product_Group.objects.all().order_by("id").select_related("role") products = get_authorized_products(permission) - return Product_Group.objects.filter(product__in=products).select_related('role') + return Product_Group.objects.filter(product__in=products).order_by("id").select_related("role") def get_authorized_app_analysis(permission): @@ -137,33 +137,33 @@ def get_authorized_app_analysis(permission): return App_Analysis.objects.none() if user.is_superuser: - return App_Analysis.objects.all().order_by('name') + return App_Analysis.objects.all().order_by("id") if user_has_global_permission(user, permission): - return App_Analysis.objects.all().order_by('name') + return App_Analysis.objects.all().order_by("id") roles = get_roles_for_permission(permission) authorized_product_type_roles = Product_Type_Member.objects.filter( - product_type=OuterRef('product__prod_type_id'), + product_type=OuterRef("product__prod_type_id"), user=user, role__in=roles) authorized_product_roles = Product_Member.objects.filter( - product=OuterRef('product_id'), + product=OuterRef("product_id"), user=user, role__in=roles) authorized_product_type_groups = Product_Type_Group.objects.filter( - product_type=OuterRef('product__prod_type_id'), + product_type=OuterRef("product__prod_type_id"), group__users=user, role__in=roles) authorized_product_groups = Product_Group.objects.filter( - product=OuterRef('product_id'), + product=OuterRef("product_id"), group__users=user, role__in=roles) app_analysis = App_Analysis.objects.annotate( product__prod_type__member=Exists(authorized_product_type_roles), product__member=Exists(authorized_product_roles), product__prod_type__authorized_group=Exists(authorized_product_type_groups), - product__authorized_group=Exists(authorized_product_groups)).order_by('name') + product__authorized_group=Exists(authorized_product_groups)).order_by("id") app_analysis = app_analysis.filter( Q(product__prod_type__member=True) | Q(product__member=True) | Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True)) @@ -178,58 +178,58 @@ def get_authorized_dojo_meta(permission): return DojoMeta.objects.none() if user.is_superuser: - return DojoMeta.objects.all().order_by('name') + return DojoMeta.objects.all().order_by("id") if user_has_global_permission(user, permission): - return DojoMeta.objects.all().order_by('name') + return DojoMeta.objects.all().order_by("id") roles = get_roles_for_permission(permission) product_authorized_product_type_roles = Product_Type_Member.objects.filter( - product_type=OuterRef('product__prod_type_id'), + product_type=OuterRef("product__prod_type_id"), user=user, role__in=roles) product_authorized_product_roles = Product_Member.objects.filter( - product=OuterRef('product_id'), + product=OuterRef("product_id"), user=user, role__in=roles) product_authorized_product_type_groups = Product_Type_Group.objects.filter( - product_type=OuterRef('product__prod_type_id'), + product_type=OuterRef("product__prod_type_id"), group__users=user, role__in=roles) product_authorized_product_groups = Product_Group.objects.filter( - product=OuterRef('product_id'), + product=OuterRef("product_id"), group__users=user, role__in=roles) endpoint_authorized_product_type_roles = Product_Type_Member.objects.filter( - product_type=OuterRef('endpoint__product__prod_type_id'), + product_type=OuterRef("endpoint__product__prod_type_id"), user=user, role__in=roles) endpoint_authorized_product_roles = Product_Member.objects.filter( - product=OuterRef('endpoint__product_id'), + product=OuterRef("endpoint__product_id"), user=user, role__in=roles) endpoint_authorized_product_type_groups = Product_Type_Group.objects.filter( - product_type=OuterRef('endpoint__product__prod_type_id'), + product_type=OuterRef("endpoint__product__prod_type_id"), group__users=user, role__in=roles) endpoint_authorized_product_groups = Product_Group.objects.filter( - product=OuterRef('endpoint__product_id'), + product=OuterRef("endpoint__product_id"), group__users=user, role__in=roles) finding_authorized_product_type_roles = Product_Type_Member.objects.filter( - product_type=OuterRef('finding__test__engagement__product__prod_type_id'), + product_type=OuterRef("finding__test__engagement__product__prod_type_id"), user=user, role__in=roles) finding_authorized_product_roles = Product_Member.objects.filter( - product=OuterRef('finding__test__engagement__product_id'), + product=OuterRef("finding__test__engagement__product_id"), user=user, role__in=roles) finding_authorized_product_type_groups = Product_Type_Group.objects.filter( - product_type=OuterRef('finding__test__engagement__product__prod_type_id'), + product_type=OuterRef("finding__test__engagement__product__prod_type_id"), group__users=user, role__in=roles) finding_authorized_product_groups = Product_Group.objects.filter( - product=OuterRef('finding__test__engagement__product_id'), + product=OuterRef("finding__test__engagement__product_id"), group__users=user, role__in=roles) dojo_meta = DojoMeta.objects.annotate( @@ -245,7 +245,7 @@ def get_authorized_dojo_meta(permission): finding__test__engagement__product__member=Exists(finding_authorized_product_roles), finding__test__engagement__product__prod_type__authorized_group=Exists(finding_authorized_product_type_groups), finding__test__engagement__product__authorized_group=Exists(finding_authorized_product_groups), - ).order_by('name') + ).order_by("id") dojo_meta = dojo_meta.filter( Q(product__prod_type__member=True) | Q(product__member=True) @@ -270,33 +270,33 @@ def get_authorized_languages(permission): return Languages.objects.none() if user.is_superuser: - return Languages.objects.all().order_by('language') + return Languages.objects.all().order_by("id") if user_has_global_permission(user, permission): - return Languages.objects.all().order_by('language') + return Languages.objects.all().order_by("id") roles = get_roles_for_permission(permission) authorized_product_type_roles = Product_Type_Member.objects.filter( - product_type=OuterRef('product__prod_type_id'), + product_type=OuterRef("product__prod_type_id"), user=user, role__in=roles) authorized_product_roles = Product_Member.objects.filter( - product=OuterRef('product_id'), + product=OuterRef("product_id"), user=user, role__in=roles) authorized_product_type_groups = Product_Type_Group.objects.filter( - product_type=OuterRef('product__prod_type_id'), + product_type=OuterRef("product__prod_type_id"), group__users=user, role__in=roles) authorized_product_groups = Product_Group.objects.filter( - product=OuterRef('product_id'), + product=OuterRef("product_id"), group__users=user, role__in=roles) languages = Languages.objects.annotate( product__prod_type__member=Exists(authorized_product_type_roles), product__member=Exists(authorized_product_roles), product__prod_type__authorized_group=Exists(authorized_product_type_groups), - product__authorized_group=Exists(authorized_product_groups)).order_by('language') + product__authorized_group=Exists(authorized_product_groups)).order_by("id") languages = languages.filter( Q(product__prod_type__member=True) | Q(product__member=True) | Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True)) @@ -311,33 +311,33 @@ def get_authorized_engagement_presets(permission): return Engagement_Presets.objects.none() if user.is_superuser: - return Engagement_Presets.objects.all().order_by('title') + return Engagement_Presets.objects.all().order_by("id") if user_has_global_permission(user, permission): - return Engagement_Presets.objects.all().order_by('title') + return Engagement_Presets.objects.all().order_by("id") roles = get_roles_for_permission(permission) authorized_product_type_roles = Product_Type_Member.objects.filter( - product_type=OuterRef('product__prod_type_id'), + product_type=OuterRef("product__prod_type_id"), user=user, role__in=roles) authorized_product_roles = Product_Member.objects.filter( - product=OuterRef('product_id'), + product=OuterRef("product_id"), user=user, role__in=roles) authorized_product_type_groups = Product_Type_Group.objects.filter( - product_type=OuterRef('product__prod_type_id'), + product_type=OuterRef("product__prod_type_id"), group__users=user, role__in=roles) authorized_product_groups = Product_Group.objects.filter( - product=OuterRef('product_id'), + product=OuterRef("product_id"), group__users=user, role__in=roles) engagement_presets = Engagement_Presets.objects.annotate( product__prod_type__member=Exists(authorized_product_type_roles), product__member=Exists(authorized_product_roles), product__prod_type__authorized_group=Exists(authorized_product_type_groups), - product__authorized_group=Exists(authorized_product_groups)).order_by('title') + product__authorized_group=Exists(authorized_product_groups)).order_by("id") engagement_presets = engagement_presets.filter( Q(product__prod_type__member=True) | Q(product__member=True) | Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True)) @@ -352,33 +352,33 @@ def get_authorized_product_api_scan_configurations(permission): return Product_API_Scan_Configuration.objects.none() if user.is_superuser: - return Product_API_Scan_Configuration.objects.all() + return Product_API_Scan_Configuration.objects.all().order_by("id") if user_has_global_permission(user, permission): - return Product_API_Scan_Configuration.objects.all() + return Product_API_Scan_Configuration.objects.all().order_by("id") roles = get_roles_for_permission(permission) authorized_product_type_roles = Product_Type_Member.objects.filter( - product_type=OuterRef('product__prod_type_id'), + product_type=OuterRef("product__prod_type_id"), user=user, role__in=roles) authorized_product_roles = Product_Member.objects.filter( - product=OuterRef('product_id'), + product=OuterRef("product_id"), user=user, role__in=roles) authorized_product_type_groups = Product_Type_Group.objects.filter( - product_type=OuterRef('product__prod_type_id'), + product_type=OuterRef("product__prod_type_id"), group__users=user, role__in=roles) authorized_product_groups = Product_Group.objects.filter( - product=OuterRef('product_id'), + product=OuterRef("product_id"), group__users=user, role__in=roles) product_api_scan_configurations = Product_API_Scan_Configuration.objects.annotate( product__prod_type__member=Exists(authorized_product_type_roles), product__member=Exists(authorized_product_roles), product__prod_type__authorized_group=Exists(authorized_product_type_groups), - product__authorized_group=Exists(authorized_product_groups)) + product__authorized_group=Exists(authorized_product_groups)).order_by("id") product_api_scan_configurations = product_api_scan_configurations.filter( Q(product__prod_type__member=True) | Q(product__member=True) | Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True)) diff --git a/dojo/product/signals.py b/dojo/product/signals.py index 02f93cd582..6871f5490d 100644 --- a/dojo/product/signals.py +++ b/dojo/product/signals.py @@ -13,10 +13,10 @@ @receiver(post_save, sender=Product) def product_post_save(sender, instance, created, **kwargs): if created: - create_notification(event='product_added', + create_notification(event="product_added", title=instance.name, product=instance, - url=reverse('view_product', args=(instance.id,))) + url=reverse("view_product", args=(instance.id,))) @receiver(post_delete, sender=Product) @@ -24,15 +24,15 @@ def product_post_delete(sender, instance, **kwargs): if settings.ENABLE_AUDITLOG: le = LogEntry.objects.get( action=LogEntry.Action.DELETE, - content_type=ContentType.objects.get(app_label='dojo', model='product'), + content_type=ContentType.objects.get(app_label="dojo", model="product"), object_id=instance.id, ) description = _('The product "%(name)s" was deleted by %(user)s') % { - 'name': instance.name, 'user': le.actor} + "name": instance.name, "user": le.actor} else: - description = _('The product "%(name)s" was deleted') % {'name': instance.name} - create_notification(event='product_deleted', # template does not exists, it will default to "other" but this event name needs to stay because of unit testing - title=_('Deletion of %(name)s') % {'name': instance.name}, + description = _('The product "%(name)s" was deleted') % {"name": instance.name} + create_notification(event="product_deleted", # template does not exists, it will default to "other" but this event name needs to stay because of unit testing + title=_("Deletion of %(name)s") % {"name": instance.name}, description=description, - url=reverse('product'), + url=reverse("product"), icon="exclamation-triangle") diff --git a/dojo/product/urls.py b/dojo/product/urls.py index f2e05a613f..8e3568e590 100644 --- a/dojo/product/urls.py +++ b/dojo/product/urls.py @@ -5,72 +5,72 @@ urlpatterns = [ # product - re_path(r'^product$', views.product, name='product'), - re_path(r'^product/(?P\d+)$', views.view_product, - name='view_product'), - re_path(r'^product/(?P\d+)/components$', views.view_product_components, - name='view_product_components'), - re_path(r'^product/(?P\d+)/engagements$', views.view_engagements, - name='view_engagements'), + re_path(r"^product$", views.product, name="product"), + re_path(r"^product/(?P\d+)$", views.view_product, + name="view_product"), + re_path(r"^product/(?P\d+)/components$", views.view_product_components, + name="view_product_components"), + re_path(r"^product/(?P\d+)/engagements$", views.view_engagements, + name="view_engagements"), re_path( - r'^product/(?P\d+)/import_scan_results$', + r"^product/(?P\d+)/import_scan_results$", dojo_engagement_views.ImportScanResultsView.as_view(), - name='import_scan_results_prod'), - re_path(r'^product/(?P\d+)/metrics$', views.view_product_metrics, - name='view_product_metrics'), - re_path(r'^product/(?P\d+)/async_burndown_metrics$', views.async_burndown_metrics, - name='async_burndown_metrics'), - re_path(r'^product/(?P\d+)/edit$', views.edit_product, - name='edit_product'), - re_path(r'^product/(?P\d+)/delete$', views.delete_product, - name='delete_product'), - re_path(r'^product/add', views.new_product, name='new_product'), - re_path(r'^product/(?P\d+)/new_engagement$', views.new_eng_for_app, - name='new_eng_for_prod'), - re_path(r'^product/(?P\d+)/new_technology$', views.new_tech_for_prod, - name='new_tech_for_prod'), - re_path(r'^technology/(?P\d+)/edit$', views.edit_technology, - name='edit_technology'), - re_path(r'^technology/(?P\d+)/delete$', views.delete_technology, - name='delete_technology'), - re_path(r'^product/(?P\d+)/new_engagement/cicd$', views.new_eng_for_app_cicd, - name='new_eng_for_prod_cicd'), - re_path(r'^product/(?P\d+)/add_meta_data$', views.add_meta_data, - name='add_meta_data'), - re_path(r'^product/(?P\d+)/edit_notifications$', views.edit_notifications, - name='edit_notifications'), - re_path(r'^product/(?P\d+)/edit_meta_data$', views.edit_meta_data, - name='edit_meta_data'), + name="import_scan_results_prod"), + re_path(r"^product/(?P\d+)/metrics$", views.view_product_metrics, + name="view_product_metrics"), + re_path(r"^product/(?P\d+)/async_burndown_metrics$", views.async_burndown_metrics, + name="async_burndown_metrics"), + re_path(r"^product/(?P\d+)/edit$", views.edit_product, + name="edit_product"), + re_path(r"^product/(?P\d+)/delete$", views.delete_product, + name="delete_product"), + re_path(r"^product/add", views.new_product, name="new_product"), + re_path(r"^product/(?P\d+)/new_engagement$", views.new_eng_for_app, + name="new_eng_for_prod"), + re_path(r"^product/(?P\d+)/new_technology$", views.new_tech_for_prod, + name="new_tech_for_prod"), + re_path(r"^technology/(?P\d+)/edit$", views.edit_technology, + name="edit_technology"), + re_path(r"^technology/(?P\d+)/delete$", views.delete_technology, + name="delete_technology"), + re_path(r"^product/(?P\d+)/new_engagement/cicd$", views.new_eng_for_app_cicd, + name="new_eng_for_prod_cicd"), + re_path(r"^product/(?P\d+)/add_meta_data$", views.add_meta_data, + name="add_meta_data"), + re_path(r"^product/(?P\d+)/edit_notifications$", views.edit_notifications, + name="edit_notifications"), + re_path(r"^product/(?P\d+)/edit_meta_data$", views.edit_meta_data, + name="edit_meta_data"), re_path( - r'^product/(?P\d+)/ad_hoc_finding$', + r"^product/(?P\d+)/ad_hoc_finding$", views.AdHocFindingView.as_view(), - name='ad_hoc_finding'), - re_path(r'^product/(?P\d+)/engagement_presets$', views.engagement_presets, - name='engagement_presets'), - re_path(r'^product/(?P\d+)/engagement_presets/(?P\d+)/edit$', views.edit_engagement_presets, - name='edit_engagement_presets'), - re_path(r'^product/(?P\d+)/engagement_presets/add$', views.add_engagement_presets, - name='add_engagement_presets'), - re_path(r'^product/(?P\d+)/engagement_presets/(?P\d+)/delete$', views.delete_engagement_presets, - name='delete_engagement_presets'), - re_path(r'^product/(?P\d+)/add_member$', views.add_product_member, - name='add_product_member'), - re_path(r'^product/member/(?P\d+)/edit$', views.edit_product_member, - name='edit_product_member'), - re_path(r'^product/member/(?P\d+)/delete$', views.delete_product_member, - name='delete_product_member'), - re_path(r'^product/(?P\d+)/add_api_scan_configuration$', views.add_api_scan_configuration, - name='add_api_scan_configuration'), - re_path(r'^product/(?P\d+)/view_api_scan_configurations$', views.view_api_scan_configurations, - name='view_api_scan_configurations'), - re_path(r'^product/(?P\d+)/edit_api_scan_configuration/(?P\d+)$', views.edit_api_scan_configuration, - name='edit_api_scan_configuration'), - re_path(r'^product/(?P\d+)/delete_api_scan_configuration/(?P\d+)$', views.delete_api_scan_configuration, - name='delete_api_scan_configuration'), - re_path(r'^product/(?P\d+)/add_group$', views.add_product_group, - name='add_product_group'), - re_path(r'^product/group/(?P\d+)/edit$', views.edit_product_group, - name='edit_product_group'), - re_path(r'^product/group/(?P\d+)/delete$', views.delete_product_group, - name='delete_product_group'), + name="ad_hoc_finding"), + re_path(r"^product/(?P\d+)/engagement_presets$", views.engagement_presets, + name="engagement_presets"), + re_path(r"^product/(?P\d+)/engagement_presets/(?P\d+)/edit$", views.edit_engagement_presets, + name="edit_engagement_presets"), + re_path(r"^product/(?P\d+)/engagement_presets/add$", views.add_engagement_presets, + name="add_engagement_presets"), + re_path(r"^product/(?P\d+)/engagement_presets/(?P\d+)/delete$", views.delete_engagement_presets, + name="delete_engagement_presets"), + re_path(r"^product/(?P\d+)/add_member$", views.add_product_member, + name="add_product_member"), + re_path(r"^product/member/(?P\d+)/edit$", views.edit_product_member, + name="edit_product_member"), + re_path(r"^product/member/(?P\d+)/delete$", views.delete_product_member, + name="delete_product_member"), + re_path(r"^product/(?P\d+)/add_api_scan_configuration$", views.add_api_scan_configuration, + name="add_api_scan_configuration"), + re_path(r"^product/(?P\d+)/view_api_scan_configurations$", views.view_api_scan_configurations, + name="view_api_scan_configurations"), + re_path(r"^product/(?P\d+)/edit_api_scan_configuration/(?P\d+)$", views.edit_api_scan_configuration, + name="edit_api_scan_configuration"), + re_path(r"^product/(?P\d+)/delete_api_scan_configuration/(?P\d+)$", views.delete_api_scan_configuration, + name="delete_api_scan_configuration"), + re_path(r"^product/(?P\d+)/add_group$", views.add_product_group, + name="add_product_group"), + re_path(r"^product/group/(?P\d+)/edit$", views.edit_product_group, + name="edit_product_group"), + re_path(r"^product/group/(?P\d+)/delete$", views.delete_product_group, + name="delete_product_group"), ] diff --git a/dojo/product/views.py b/dojo/product/views.py index 95a133bc13..6d90f51518 100644 --- a/dojo/product/views.py +++ b/dojo/product/views.py @@ -132,9 +132,9 @@ def product(request): # perform all stuff for filtering and pagination first, before annotation/prefetching # otherwise the paginator will perform all the annotations/prefetching already only to count the total number of records # see https://code.djangoproject.com/ticket/23771 and https://code.djangoproject.com/ticket/25375 - name_words = prods.values_list('name', flat=True) + name_words = prods.values_list("name", flat=True) prods = prods.annotate( - findings_count=Count('engagement__test__finding', filter=Q(engagement__test__finding__active=True)), + findings_count=Count("engagement__test__finding", filter=Q(engagement__test__finding__active=True)), ) filter_string_matching = get_system_setting("filter_string_matching", False) filter_class = ProductFilterWithoutObjectLookups if filter_string_matching else ProductFilter @@ -148,11 +148,11 @@ def product(request): add_breadcrumb(title=_("Product List"), top_level=not len(request.GET), request=request) - return render(request, 'dojo/product.html', { - 'prod_list': prod_list, - 'prod_filter': prod_filter, - 'name_words': sorted(set(name_words)), - 'user': request.user}) + return render(request, "dojo/product.html", { + "prod_list": prod_list, + "prod_filter": prod_filter, + "name_words": sorted(set(name_words)), + "user": request.user}) def prefetch_for_product(prods): @@ -160,26 +160,26 @@ def prefetch_for_product(prods): if isinstance(prods, QuerySet): # old code can arrive here with prods being a list because the query was already executed - prefetched_prods = prefetched_prods.prefetch_related('team_manager') - prefetched_prods = prefetched_prods.prefetch_related('product_manager') - prefetched_prods = prefetched_prods.prefetch_related('technical_contact') + prefetched_prods = prefetched_prods.prefetch_related("team_manager") + prefetched_prods = prefetched_prods.prefetch_related("product_manager") + prefetched_prods = prefetched_prods.prefetch_related("technical_contact") prefetched_prods = prefetched_prods.annotate( - active_engagement_count=Count('engagement__id', filter=Q(engagement__active=True))) + active_engagement_count=Count("engagement__id", filter=Q(engagement__active=True))) prefetched_prods = prefetched_prods.annotate( - closed_engagement_count=Count('engagement__id', filter=Q(engagement__active=False))) - prefetched_prods = prefetched_prods.annotate(last_engagement_date=Max('engagement__target_start')) - prefetched_prods = prefetched_prods.annotate(active_finding_count=Count('engagement__test__finding__id', + closed_engagement_count=Count("engagement__id", filter=Q(engagement__active=False))) + prefetched_prods = prefetched_prods.annotate(last_engagement_date=Max("engagement__target_start")) + prefetched_prods = prefetched_prods.annotate(active_finding_count=Count("engagement__test__finding__id", filter=Q( engagement__test__finding__active=True))) prefetched_prods = prefetched_prods.annotate( - active_verified_finding_count=Count('engagement__test__finding__id', + active_verified_finding_count=Count("engagement__test__finding__id", filter=Q( engagement__test__finding__active=True, engagement__test__finding__verified=True))) - prefetched_prods = prefetched_prods.prefetch_related('jira_project_set__jira_instance') - prefetched_prods = prefetched_prods.prefetch_related('members') - prefetched_prods = prefetched_prods.prefetch_related('prod_type__members') + prefetched_prods = prefetched_prods.prefetch_related("jira_project_set__jira_instance") + prefetched_prods = prefetched_prods.prefetch_related("members") + prefetched_prods = prefetched_prods.prefetch_related("prod_type__members") active_endpoint_query = Endpoint.objects.filter( status_endpoint__mitigated=False, status_endpoint__false_positive=False, @@ -187,16 +187,16 @@ def prefetch_for_product(prods): status_endpoint__risk_accepted=False, ).distinct() prefetched_prods = prefetched_prods.prefetch_related( - Prefetch('endpoint_set', queryset=active_endpoint_query, to_attr='active_endpoints')) - prefetched_prods = prefetched_prods.prefetch_related('tags') + Prefetch("endpoint_set", queryset=active_endpoint_query, to_attr="active_endpoints")) + prefetched_prods = prefetched_prods.prefetch_related("tags") - if get_system_setting('enable_github'): + if get_system_setting("enable_github"): prefetched_prods = prefetched_prods.prefetch_related( - Prefetch('github_pkey_set', queryset=GITHUB_PKey.objects.all().select_related('git_conf'), - to_attr='github_confs')) + Prefetch("github_pkey_set", queryset=GITHUB_PKey.objects.all().select_related("git_conf"), + to_attr="github_confs")) else: - logger.debug('unable to prefetch because query was already executed') + logger.debug("unable to prefetch because query was already executed") return prefetched_prods @@ -207,11 +207,11 @@ def iso_to_gregorian(iso_year, iso_week, iso_day): return start + timedelta(weeks=iso_week - 1, days=iso_day - 1) -@user_is_authorized(Product, Permissions.Product_View, 'pid') +@user_is_authorized(Product, Permissions.Product_View, "pid") def view_product(request, pid): - prod_query = Product.objects.all().select_related('product_manager', 'technical_contact', 'team_manager', 'sla_configuration') \ - .prefetch_related('members') \ - .prefetch_related('prod_type__members') + prod_query = Product.objects.all().select_related("product_manager", "technical_contact", "team_manager", "sla_configuration") \ + .prefetch_related("members") \ + .prefetch_related("prod_type__members") prod = get_object_or_404(prod_query, id=pid) product_members = get_authorized_members_for_product(prod, Permissions.Product_View) product_type_members = get_authorized_members_for_product_type(prod.prod_type, Permissions.Product_Type_View) @@ -219,11 +219,11 @@ def view_product(request, pid): product_type_groups = get_authorized_groups_for_product_type(prod.prod_type, Permissions.Product_Type_View) personal_notifications_form = ProductNotificationsForm( instance=Notifications.objects.filter(user=request.user).filter(product=prod).first()) - langSummary = Languages.objects.filter(product=prod).aggregate(Sum('files'), Sum('code'), Count('files')) - languages = Languages.objects.filter(product=prod).order_by('-code').select_related('language') - app_analysis = App_Analysis.objects.filter(product=prod).order_by('name') + langSummary = Languages.objects.filter(product=prod).aggregate(Sum("files"), Sum("code"), Count("files")) + languages = Languages.objects.filter(product=prod).order_by("-code").select_related("language") + app_analysis = App_Analysis.objects.filter(product=prod).order_by("name") benchmarks = Benchmark_Product_Summary.objects.filter(product=prod, publish=True, - benchmark_type__enabled=True).order_by('benchmark_type__name') + benchmark_type__enabled=True).order_by("benchmark_type__name") sla = SLA_Configuration.objects.filter(id=prod.sla_configuration_id).first() benchAndPercent = [] for i in range(len(benchmarks)): @@ -234,25 +234,25 @@ def view_product(request, pid): fail_percent = round(100 - success_percent - waiting_percent, 2) print(fail_percent) benchAndPercent.append({ - 'id': benchmarks[i].benchmark_type.id, - 'name': benchmarks[i].benchmark_type, - 'level': desired_level, - 'success': {'count': total_pass, 'percent': success_percent}, - 'waiting': {'count': total_wait, 'percent': waiting_percent}, - 'fail': {'count': total_fail, 'percent': fail_percent}, - 'pass': total_pass + total_fail, - 'total': total, + "id": benchmarks[i].benchmark_type.id, + "name": benchmarks[i].benchmark_type, + "level": desired_level, + "success": {"count": total_pass, "percent": success_percent}, + "waiting": {"count": total_wait, "percent": waiting_percent}, + "fail": {"count": total_fail, "percent": fail_percent}, + "pass": total_pass + total_fail, + "total": total, }) system_settings = System_Settings.objects.get() - product_metadata = dict(prod.product_meta.order_by('name').values_list('name', 'value')) + product_metadata = dict(prod.product_meta.order_by("name").values_list("name", "value")) open_findings = Finding.objects.filter(test__engagement__product=prod, false_p=False, active=True, duplicate=False, - out_of_scope=False).order_by('numerical_severity').values( - 'severity').annotate(count=Count('severity')) + out_of_scope=False).order_by("numerical_severity").values( + "severity").annotate(count=Count("severity")) critical = 0 high = 0 @@ -275,91 +275,91 @@ def view_product(request, pid): total = critical + high + medium + low + info product_tab = Product_Tab(prod, title=_("Product"), tab="overview") - return render(request, 'dojo/view_product_details.html', { - 'prod': prod, - 'product_tab': product_tab, - 'product_metadata': product_metadata, - 'critical': critical, - 'high': high, - 'medium': medium, - 'low': low, - 'info': info, - 'total': total, - 'user': request.user, - 'languages': languages, - 'langSummary': langSummary, - 'app_analysis': app_analysis, - 'system_settings': system_settings, - 'benchmarks_percents': benchAndPercent, - 'benchmarks': benchmarks, - 'product_members': product_members, - 'product_type_members': product_type_members, - 'product_groups': product_groups, - 'product_type_groups': product_type_groups, - 'personal_notifications_form': personal_notifications_form, - 'enabled_notifications': get_enabled_notifications_list(), - 'sla': sla}) - - -@user_is_authorized(Product, Permissions.Component_View, 'pid') + return render(request, "dojo/view_product_details.html", { + "prod": prod, + "product_tab": product_tab, + "product_metadata": product_metadata, + "critical": critical, + "high": high, + "medium": medium, + "low": low, + "info": info, + "total": total, + "user": request.user, + "languages": languages, + "langSummary": langSummary, + "app_analysis": app_analysis, + "system_settings": system_settings, + "benchmarks_percents": benchAndPercent, + "benchmarks": benchmarks, + "product_members": product_members, + "product_type_members": product_type_members, + "product_groups": product_groups, + "product_type_groups": product_type_groups, + "personal_notifications_form": personal_notifications_form, + "enabled_notifications": get_enabled_notifications_list(), + "sla": sla}) + + +@user_is_authorized(Product, Permissions.Component_View, "pid") def view_product_components(request, pid): prod = get_object_or_404(Product, id=pid) product_tab = Product_Tab(prod, title=_("Product"), tab="components") - separator = ', ' + separator = ", " # Get components ordered by component_name and concat component versions to the same row - if connection.vendor == 'postgresql': + if connection.vendor == "postgresql": component_query = Finding.objects.filter(test__engagement__product__id=pid).values("component_name").order_by( - 'component_name').annotate( - component_version=StringAgg('component_version', delimiter=separator, distinct=True, default=Value(''))) + "component_name").annotate( + component_version=StringAgg("component_version", delimiter=separator, distinct=True, default=Value(""))) else: component_query = Finding.objects.filter(test__engagement__product__id=pid).values("component_name") component_query = component_query.annotate( - component_version=Sql_GroupConcat('component_version', separator=separator, distinct=True)) + component_version=Sql_GroupConcat("component_version", separator=separator, distinct=True)) # Append finding counts - component_query = component_query.annotate(total=Count('id')).order_by('component_name', 'component_version') - component_query = component_query.annotate(active=Count('id', filter=Q(active=True))) - component_query = component_query.annotate(duplicate=(Count('id', filter=Q(duplicate=True)))) + component_query = component_query.annotate(total=Count("id")).order_by("component_name", "component_version") + component_query = component_query.annotate(active=Count("id", filter=Q(active=True))) + component_query = component_query.annotate(duplicate=(Count("id", filter=Q(duplicate=True)))) # Default sort by total descending - component_query = component_query.order_by('-total') + component_query = component_query.order_by("-total") comp_filter = ProductComponentFilter(request.GET, queryset=component_query) result = get_page_items(request, comp_filter.qs, 25) # Filter out None values for auto-complete - component_words = component_query.exclude(component_name__isnull=True).values_list('component_name', flat=True) - - return render(request, 'dojo/product_components.html', { - 'prod': prod, - 'filter': comp_filter, - 'product_tab': product_tab, - 'result': result, - 'component_words': sorted(set(component_words)), + component_words = component_query.exclude(component_name__isnull=True).values_list("component_name", flat=True) + + return render(request, "dojo/product_components.html", { + "prod": prod, + "filter": comp_filter, + "product_tab": product_tab, + "result": result, + "component_words": sorted(set(component_words)), }) def identify_view(request): get_data = request.GET - view = get_data.get('type', None) + view = get_data.get("type", None) if view: # value of view is reflected in the template, make sure it's valid # although any XSS should be catch by django autoescape, we see people sometimes using '|safe'... - if view in ['Endpoint', 'Finding']: + if view in ["Endpoint", "Finding"]: return view msg = 'invalid view, view must be "Endpoint" or "Finding"' raise ValueError(msg) else: - if get_data.get('finding__severity', None): - return 'Endpoint' - elif get_data.get('false_positive', None): - return 'Endpoint' - referer = request.META.get('HTTP_REFERER', None) + if get_data.get("finding__severity", None): + return "Endpoint" + elif get_data.get("false_positive", None): + return "Endpoint" + referer = request.META.get("HTTP_REFERER", None) if referer: - if referer.find('type=Endpoint') > -1: - return 'Endpoint' - return 'Finding' + if referer.find("type=Endpoint") > -1: + return "Endpoint" + return "Finding" def finding_querys(request, prod): @@ -373,21 +373,21 @@ def finding_querys(request, prod): # 'test', # 'test__test_type', # 'risk_acceptance_set', - 'reporter') + "reporter") filter_string_matching = get_system_setting("filter_string_matching", False) finding_filter_class = MetricsFindingFilterWithoutObjectLookups if filter_string_matching else MetricsFindingFilter findings = finding_filter_class(request.GET, queryset=findings_query, pid=prod) findings_qs = queryset_check(findings) - filters['form'] = findings.form + filters["form"] = findings.form try: # logger.debug(findings_qs.query) - start_date = findings_qs.earliest('date').date + start_date = findings_qs.earliest("date").date start_date = datetime( start_date.year, start_date.month, start_date.day, tzinfo=timezone.get_current_timezone()) - end_date = findings_qs.latest('date').date + end_date = findings_qs.latest("date").date end_date = datetime( end_date.year, end_date.month, end_date.day, @@ -398,35 +398,35 @@ def finding_querys(request, prod): end_date = timezone.now() week = end_date - timedelta(days=7) # seven days and /newer are considered "new" - filters['accepted'] = findings_qs.filter(finding_helper.ACCEPTED_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date") - filters['verified'] = findings_qs.filter(finding_helper.VERIFIED_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date") - filters['new_verified'] = findings_qs.filter(finding_helper.VERIFIED_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date") - filters['open'] = findings_qs.filter(finding_helper.OPEN_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date") - filters['inactive'] = findings_qs.filter(finding_helper.INACTIVE_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date") - filters['closed'] = findings_qs.filter(finding_helper.CLOSED_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date") - filters['false_positive'] = findings_qs.filter(finding_helper.FALSE_POSITIVE_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date") - filters['out_of_scope'] = findings_qs.filter(finding_helper.OUT_OF_SCOPE_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date") - filters['all'] = findings_qs.order_by("date") - filters['open_vulns'] = findings_qs.filter(finding_helper.OPEN_FINDINGS_QUERY).filter( + filters["accepted"] = findings_qs.filter(finding_helper.ACCEPTED_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date") + filters["verified"] = findings_qs.filter(finding_helper.VERIFIED_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date") + filters["new_verified"] = findings_qs.filter(finding_helper.VERIFIED_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date") + filters["open"] = findings_qs.filter(finding_helper.OPEN_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date") + filters["inactive"] = findings_qs.filter(finding_helper.INACTIVE_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date") + filters["closed"] = findings_qs.filter(finding_helper.CLOSED_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date") + filters["false_positive"] = findings_qs.filter(finding_helper.FALSE_POSITIVE_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date") + filters["out_of_scope"] = findings_qs.filter(finding_helper.OUT_OF_SCOPE_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date") + filters["all"] = findings_qs.order_by("date") + filters["open_vulns"] = findings_qs.filter(finding_helper.OPEN_FINDINGS_QUERY).filter( cwe__isnull=False, - ).order_by('cwe').values( - 'cwe', + ).order_by("cwe").values( + "cwe", ).annotate( - count=Count('cwe'), + count=Count("cwe"), ) - filters['all_vulns'] = findings_qs.filter( + filters["all_vulns"] = findings_qs.filter( duplicate=False, cwe__isnull=False, - ).order_by('cwe').values( - 'cwe', + ).order_by("cwe").values( + "cwe", ).annotate( - count=Count('cwe'), + count=Count("cwe"), ) - filters['start_date'] = start_date - filters['end_date'] = end_date - filters['week'] = week + filters["start_date"] = start_date + filters["end_date"] = end_date + filters["week"] = week return filters @@ -435,31 +435,31 @@ def endpoint_querys(request, prod): filters = {} endpoints_query = Endpoint_Status.objects.filter(finding__test__engagement__product=prod, finding__severity__in=( - 'Critical', 'High', 'Medium', 'Low', 'Info')).prefetch_related( - 'finding__test__engagement', - 'finding__test__engagement__risk_acceptance', - 'finding__risk_acceptance_set', - 'finding__reporter').annotate(severity=F('finding__severity')) + "Critical", "High", "Medium", "Low", "Info")).prefetch_related( + "finding__test__engagement", + "finding__test__engagement__risk_acceptance", + "finding__risk_acceptance_set", + "finding__reporter").annotate(severity=F("finding__severity")) filter_string_matching = get_system_setting("filter_string_matching", False) filter_class = MetricsEndpointFilterWithoutObjectLookups if filter_string_matching else MetricsEndpointFilter endpoints = filter_class(request.GET, queryset=endpoints_query) endpoints_qs = queryset_check(endpoints) - filters['form'] = endpoints.form + filters["form"] = endpoints.form if not endpoints_qs and not endpoints_query: endpoints = endpoints_query endpoints_qs = queryset_check(endpoints) messages.add_message(request, messages.ERROR, - _('All objects have been filtered away. Displaying all objects'), - extra_tags='alert-danger') + _("All objects have been filtered away. Displaying all objects"), + extra_tags="alert-danger") try: - start_date = endpoints_qs.earliest('date').date + start_date = endpoints_qs.earliest("date").date start_date = datetime(start_date.year, start_date.month, start_date.day, tzinfo=timezone.get_current_timezone()) - end_date = endpoints_qs.latest('date').date + end_date = endpoints_qs.latest("date").date end_date = datetime(end_date.year, end_date.month, end_date.day, tzinfo=timezone.get_current_timezone()) @@ -468,59 +468,59 @@ def endpoint_querys(request, prod): end_date = timezone.now() week = end_date - timedelta(days=7) # seven days and /newnewer are considered "new" - filters['accepted'] = endpoints_qs.filter(date__range=[start_date, end_date], + filters["accepted"] = endpoints_qs.filter(date__range=[start_date, end_date], risk_accepted=True).order_by("date") - filters['verified'] = endpoints_qs.filter(date__range=[start_date, end_date], + filters["verified"] = endpoints_qs.filter(date__range=[start_date, end_date], false_positive=False, mitigated=True, out_of_scope=False).order_by("date") - filters['new_verified'] = endpoints_qs.filter(date__range=[week, end_date], + filters["new_verified"] = endpoints_qs.filter(date__range=[week, end_date], false_positive=False, mitigated=True, out_of_scope=False).order_by("date") - filters['open'] = endpoints_qs.filter(date__range=[start_date, end_date], + filters["open"] = endpoints_qs.filter(date__range=[start_date, end_date], mitigated=False, finding__active=True) - filters['inactive'] = endpoints_qs.filter(date__range=[start_date, end_date], + filters["inactive"] = endpoints_qs.filter(date__range=[start_date, end_date], mitigated=True) - filters['closed'] = endpoints_qs.filter(date__range=[start_date, end_date], + filters["closed"] = endpoints_qs.filter(date__range=[start_date, end_date], mitigated=True) - filters['false_positive'] = endpoints_qs.filter(date__range=[start_date, end_date], + filters["false_positive"] = endpoints_qs.filter(date__range=[start_date, end_date], false_positive=True) - filters['out_of_scope'] = endpoints_qs.filter(date__range=[start_date, end_date], + filters["out_of_scope"] = endpoints_qs.filter(date__range=[start_date, end_date], out_of_scope=True) - filters['all'] = endpoints_qs - filters['open_vulns'] = endpoints_qs.filter( + filters["all"] = endpoints_qs + filters["open_vulns"] = endpoints_qs.filter( false_positive=False, out_of_scope=False, mitigated=True, finding__cwe__isnull=False, - ).order_by('finding__cwe').values( - 'finding__cwe', + ).order_by("finding__cwe").values( + "finding__cwe", ).annotate( - count=Count('finding__cwe'), + count=Count("finding__cwe"), ).annotate( - cwe=F('finding__cwe'), + cwe=F("finding__cwe"), ) - filters['all_vulns'] = endpoints_qs.filter( + filters["all_vulns"] = endpoints_qs.filter( finding__cwe__isnull=False, - ).order_by('finding__cwe').values( - 'finding__cwe', + ).order_by("finding__cwe").values( + "finding__cwe", ).annotate( - count=Count('finding__cwe'), + count=Count("finding__cwe"), ).annotate( - cwe=F('finding__cwe'), + cwe=F("finding__cwe"), ) - filters['start_date'] = start_date - filters['end_date'] = end_date - filters['week'] = week + filters["start_date"] = start_date + filters["end_date"] = end_date + filters["week"] = week return filters -@user_is_authorized(Product, Permissions.Product_View, 'pid') +@user_is_authorized(Product, Permissions.Product_View, "pid") def view_product_metrics(request, pid): prod = get_object_or_404(Product, id=pid) engs = Engagement.objects.filter(product=prod, active=True) @@ -530,25 +530,25 @@ def view_product_metrics(request, pid): filter_class = EngagementFilterWithoutObjectLookups if filter_string_matching else EngagementFilter result = filter_class( request.GET, - queryset=Engagement.objects.filter(product=prod, active=False).order_by('-target_end')) + queryset=Engagement.objects.filter(product=prod, active=False).order_by("-target_end")) inactive_engs_page = get_page_items(request, result.qs, 10) filters = {} - if view == 'Finding': + if view == "Finding": filters = finding_querys(request, prod) - elif view == 'Endpoint': + elif view == "Endpoint": filters = endpoint_querys(request, prod) - start_date = timezone.make_aware(datetime.combine(filters['start_date'], datetime.min.time())) - end_date = filters['end_date'] + start_date = timezone.make_aware(datetime.combine(filters["start_date"], datetime.min.time())) + end_date = filters["end_date"] r = relativedelta(end_date, start_date) weeks_between = int(ceil((((r.years * 12) + r.months) * 4.33) + (r.days / 7))) if weeks_between <= 0: weeks_between += 2 - punchcard, ticks = get_punchcard_data(filters.get('open', None), start_date, weeks_between, view) + punchcard, ticks = get_punchcard_data(filters.get("open", None), start_date, weeks_between, view) add_breadcrumb(parent=prod, top_level=False, request=request) @@ -565,55 +565,55 @@ def view_product_metrics(request, pid): accepted_objs_by_severity = get_zero_severity_level() # Optimization: Make all queries lists, and only pull values of fields for metrics based calculations - open_vulnerabilities = list(filters['open_vulns'].values('cwe', 'count')) - all_vulnerabilities = list(filters['all_vulns'].values('cwe', 'count')) + open_vulnerabilities = list(filters["open_vulns"].values("cwe", "count")) + all_vulnerabilities = list(filters["all_vulns"].values("cwe", "count")) - verified_objs_by_severity = list(filters.get('verified').values('severity')) - inactive_objs_by_severity = list(filters.get('inactive').values('severity')) - false_positive_objs_by_severity = list(filters.get('false_positive').values('severity')) - out_of_scope_objs_by_severity = list(filters.get('out_of_scope').values('severity')) - new_objs_by_severity = list(filters.get('new_verified').values('severity')) - all_objs_by_severity = list(filters.get('all').values('severity')) + verified_objs_by_severity = list(filters.get("verified").values("severity")) + inactive_objs_by_severity = list(filters.get("inactive").values("severity")) + false_positive_objs_by_severity = list(filters.get("false_positive").values("severity")) + out_of_scope_objs_by_severity = list(filters.get("out_of_scope").values("severity")) + new_objs_by_severity = list(filters.get("new_verified").values("severity")) + all_objs_by_severity = list(filters.get("all").values("severity")) - all_findings = list(filters.get("all", []).values('id', 'date', 'severity')) - open_findings = list(filters.get("open", []).values('id', 'date', 'mitigated', 'severity')) - closed_findings = list(filters.get("closed", []).values('id', 'date', 'severity')) - accepted_findings = list(filters.get("accepted", []).values('id', 'date', 'severity')) + all_findings = list(filters.get("all", []).values("id", "date", "severity")) + open_findings = list(filters.get("open", []).values("id", "date", "mitigated", "severity")) + closed_findings = list(filters.get("closed", []).values("id", "date", "severity")) + accepted_findings = list(filters.get("accepted", []).values("id", "date", "severity")) """ Optimization: Create dictionaries in the structure of { finding_id: True } for index based search Previously the for-loop below used "if finding in open_findings" -- an average O(n^2) time complexity This allows for "if open_findings.get(finding_id, None)" -- an average O(n) time complexity """ - open_findings_dict = {f.get('id'): True for f in open_findings} - closed_findings_dict = {f.get('id'): True for f in closed_findings} - accepted_findings_dict = {f.get('id'): True for f in accepted_findings} + open_findings_dict = {f.get("id"): True for f in open_findings} + closed_findings_dict = {f.get("id"): True for f in closed_findings} + accepted_findings_dict = {f.get("id"): True for f in accepted_findings} for finding in all_findings: - iso_cal = finding.get('date').isocalendar() + iso_cal = finding.get("date").isocalendar() date = iso_to_gregorian(iso_cal[0], iso_cal[1], 1) html_date = date.strftime("%m/%d
%Y
") unix_timestamp = (tcalendar.timegm(date.timetuple()) * 1000) # Open findings - if open_findings_dict.get(finding.get('id', None), None): + if open_findings_dict.get(finding.get("id", None), None): if unix_timestamp not in critical_weekly: - critical_weekly[unix_timestamp] = {'count': 0, 'week': html_date} + critical_weekly[unix_timestamp] = {"count": 0, "week": html_date} if unix_timestamp not in high_weekly: - high_weekly[unix_timestamp] = {'count': 0, 'week': html_date} + high_weekly[unix_timestamp] = {"count": 0, "week": html_date} if unix_timestamp not in medium_weekly: - medium_weekly[unix_timestamp] = {'count': 0, 'week': html_date} + medium_weekly[unix_timestamp] = {"count": 0, "week": html_date} if unix_timestamp in open_close_weekly: - open_close_weekly[unix_timestamp]['open'] += 1 + open_close_weekly[unix_timestamp]["open"] += 1 else: - open_close_weekly[unix_timestamp] = {'closed': 0, 'open': 1, 'accepted': 0} - open_close_weekly[unix_timestamp]['week'] = html_date + open_close_weekly[unix_timestamp] = {"closed": 0, "open": 1, "accepted": 0} + open_close_weekly[unix_timestamp]["week"] = html_date - if view == 'Finding': - severity = finding.get('severity') - elif view == 'Endpoint': - severity = finding.get('severity') + if view == "Finding": + severity = finding.get("severity") + elif view == "Endpoint": + severity = finding.get("severity") finding_age = calculate_finding_age(finding) if open_objs_by_age.get(finding_age, None): @@ -629,51 +629,51 @@ def view_product_metrics(request, pid): else: severity_weekly[unix_timestamp] = get_zero_severity_level() severity_weekly[unix_timestamp][severity] = 1 - severity_weekly[unix_timestamp]['week'] = html_date + severity_weekly[unix_timestamp]["week"] = html_date - if severity == 'Critical': + if severity == "Critical": if unix_timestamp in critical_weekly: - critical_weekly[unix_timestamp]['count'] += 1 + critical_weekly[unix_timestamp]["count"] += 1 else: - critical_weekly[unix_timestamp] = {'count': 1, 'week': html_date} - elif severity == 'High': + critical_weekly[unix_timestamp] = {"count": 1, "week": html_date} + elif severity == "High": if unix_timestamp in high_weekly: - high_weekly[unix_timestamp]['count'] += 1 + high_weekly[unix_timestamp]["count"] += 1 else: - high_weekly[unix_timestamp] = {'count': 1, 'week': html_date} - elif severity == 'Medium': + high_weekly[unix_timestamp] = {"count": 1, "week": html_date} + elif severity == "Medium": if unix_timestamp in medium_weekly: - medium_weekly[unix_timestamp]['count'] += 1 + medium_weekly[unix_timestamp]["count"] += 1 else: - medium_weekly[unix_timestamp] = {'count': 1, 'week': html_date} + medium_weekly[unix_timestamp] = {"count": 1, "week": html_date} # Optimization: count severity level on server side - if open_objs_by_severity.get(finding.get('severity')) is not None: - open_objs_by_severity[finding.get('severity')] += 1 + if open_objs_by_severity.get(finding.get("severity")) is not None: + open_objs_by_severity[finding.get("severity")] += 1 # Close findings - elif closed_findings_dict.get(finding.get('id', None), None): + elif closed_findings_dict.get(finding.get("id", None), None): if unix_timestamp in open_close_weekly: - open_close_weekly[unix_timestamp]['closed'] += 1 + open_close_weekly[unix_timestamp]["closed"] += 1 else: - open_close_weekly[unix_timestamp] = {'closed': 1, 'open': 0, 'accepted': 0} - open_close_weekly[unix_timestamp]['week'] = html_date + open_close_weekly[unix_timestamp] = {"closed": 1, "open": 0, "accepted": 0} + open_close_weekly[unix_timestamp]["week"] = html_date # Optimization: count severity level on server side - if closed_objs_by_severity.get(finding.get('severity')) is not None: - closed_objs_by_severity[finding.get('severity')] += 1 + if closed_objs_by_severity.get(finding.get("severity")) is not None: + closed_objs_by_severity[finding.get("severity")] += 1 # Risk Accepted findings - if accepted_findings_dict.get(finding.get('id', None), None): + if accepted_findings_dict.get(finding.get("id", None), None): if unix_timestamp in open_close_weekly: - open_close_weekly[unix_timestamp]['accepted'] += 1 + open_close_weekly[unix_timestamp]["accepted"] += 1 else: - open_close_weekly[unix_timestamp] = {'closed': 0, 'open': 0, 'accepted': 1} - open_close_weekly[unix_timestamp]['week'] = html_date + open_close_weekly[unix_timestamp] = {"closed": 0, "open": 0, "accepted": 1} + open_close_weekly[unix_timestamp]["week"] = html_date # Optimization: count severity level on server side - if accepted_objs_by_severity.get(finding.get('severity')) is not None: - accepted_objs_by_severity[finding.get('severity')] += 1 + if accepted_objs_by_severity.get(finding.get("severity")) is not None: + accepted_objs_by_severity[finding.get("severity")] += 1 - tests = Test.objects.filter(engagement__product=prod).prefetch_related('finding_set', 'test_type') - tests = tests.annotate(verified_finding_count=Count('finding__id', filter=Q(finding__verified=True))) + tests = Test.objects.filter(engagement__product=prod).prefetch_related("finding_set", "test_type") + tests = tests.annotate(verified_finding_count=Count("finding__id", filter=Q(finding__verified=True))) test_data = {} for t in tests: @@ -683,71 +683,71 @@ def view_product_metrics(request, pid): test_data[t.test_type.name] = t.verified_finding_count # Optimization: Format Open/Total CWE vulnerabilities graph data here, instead of template - open_vulnerabilities = [['CWE-' + str(f.get('cwe')), f.get('count')] for f in open_vulnerabilities] - all_vulnerabilities = [['CWE-' + str(f.get('cwe')), f.get('count')] for f in all_vulnerabilities] + open_vulnerabilities = [["CWE-" + str(f.get("cwe")), f.get("count")] for f in open_vulnerabilities] + all_vulnerabilities = [["CWE-" + str(f.get("cwe")), f.get("count")] for f in all_vulnerabilities] product_tab = Product_Tab(prod, title=_("Product"), tab="metrics") - return render(request, 'dojo/product_metrics.html', { - 'prod': prod, - 'product_tab': product_tab, - 'engs': engs, - 'inactive_engs': inactive_engs_page, - 'view': view, - 'verified_objs': len(verified_objs_by_severity), - 'verified_objs_by_severity': sum_by_severity_level(verified_objs_by_severity), - 'open_objs': len(open_findings), - 'open_objs_by_severity': open_objs_by_severity, - 'open_objs_by_age': open_objs_by_age, - 'inactive_objs': len(inactive_objs_by_severity), - 'inactive_objs_by_severity': sum_by_severity_level(inactive_objs_by_severity), - 'closed_objs': len(closed_findings), - 'closed_objs_by_severity': closed_objs_by_severity, - 'false_positive_objs': len(false_positive_objs_by_severity), - 'false_positive_objs_by_severity': sum_by_severity_level(false_positive_objs_by_severity), - 'out_of_scope_objs': len(out_of_scope_objs_by_severity), - 'out_of_scope_objs_by_severity': sum_by_severity_level(out_of_scope_objs_by_severity), - 'accepted_objs': len(accepted_findings), - 'accepted_objs_by_severity': accepted_objs_by_severity, - 'new_objs': len(new_objs_by_severity), - 'new_objs_by_severity': sum_by_severity_level(new_objs_by_severity), - 'all_objs': len(all_objs_by_severity), - 'all_objs_by_severity': sum_by_severity_level(all_objs_by_severity), - 'form': filters.get('form', None), - 'reset_link': reverse('view_product_metrics', args=(prod.id,)) + '?type=' + view, - 'open_vulnerabilities_count': len(open_vulnerabilities), - 'open_vulnerabilities': open_vulnerabilities, - 'all_vulnerabilities_count': len(all_vulnerabilities), - 'all_vulnerabilities': all_vulnerabilities, - 'start_date': start_date, - 'punchcard': punchcard, - 'ticks': ticks, - 'open_close_weekly': open_close_weekly, - 'severity_weekly': severity_weekly, - 'critical_weekly': critical_weekly, - 'high_weekly': high_weekly, - 'medium_weekly': medium_weekly, - 'test_data': test_data, - 'user': request.user}) - - -@user_is_authorized(Product, Permissions.Product_View, 'pid') + return render(request, "dojo/product_metrics.html", { + "prod": prod, + "product_tab": product_tab, + "engs": engs, + "inactive_engs": inactive_engs_page, + "view": view, + "verified_objs": len(verified_objs_by_severity), + "verified_objs_by_severity": sum_by_severity_level(verified_objs_by_severity), + "open_objs": len(open_findings), + "open_objs_by_severity": open_objs_by_severity, + "open_objs_by_age": open_objs_by_age, + "inactive_objs": len(inactive_objs_by_severity), + "inactive_objs_by_severity": sum_by_severity_level(inactive_objs_by_severity), + "closed_objs": len(closed_findings), + "closed_objs_by_severity": closed_objs_by_severity, + "false_positive_objs": len(false_positive_objs_by_severity), + "false_positive_objs_by_severity": sum_by_severity_level(false_positive_objs_by_severity), + "out_of_scope_objs": len(out_of_scope_objs_by_severity), + "out_of_scope_objs_by_severity": sum_by_severity_level(out_of_scope_objs_by_severity), + "accepted_objs": len(accepted_findings), + "accepted_objs_by_severity": accepted_objs_by_severity, + "new_objs": len(new_objs_by_severity), + "new_objs_by_severity": sum_by_severity_level(new_objs_by_severity), + "all_objs": len(all_objs_by_severity), + "all_objs_by_severity": sum_by_severity_level(all_objs_by_severity), + "form": filters.get("form", None), + "reset_link": reverse("view_product_metrics", args=(prod.id,)) + "?type=" + view, + "open_vulnerabilities_count": len(open_vulnerabilities), + "open_vulnerabilities": open_vulnerabilities, + "all_vulnerabilities_count": len(all_vulnerabilities), + "all_vulnerabilities": all_vulnerabilities, + "start_date": start_date, + "punchcard": punchcard, + "ticks": ticks, + "open_close_weekly": open_close_weekly, + "severity_weekly": severity_weekly, + "critical_weekly": critical_weekly, + "high_weekly": high_weekly, + "medium_weekly": medium_weekly, + "test_data": test_data, + "user": request.user}) + + +@user_is_authorized(Product, Permissions.Product_View, "pid") def async_burndown_metrics(request, pid): prod = get_object_or_404(Product, id=pid) open_findings_burndown = get_open_findings_burndown(prod) return JsonResponse({ - 'critical': open_findings_burndown.get('Critical', []), - 'high': open_findings_burndown.get('High', []), - 'medium': open_findings_burndown.get('Medium', []), - 'low': open_findings_burndown.get('Low', []), - 'info': open_findings_burndown.get('Info', []), - 'max': open_findings_burndown.get('y_max', 0), - 'min': open_findings_burndown.get('y_min', 0), + "critical": open_findings_burndown.get("Critical", []), + "high": open_findings_burndown.get("High", []), + "medium": open_findings_burndown.get("Medium", []), + "low": open_findings_burndown.get("Low", []), + "info": open_findings_burndown.get("Info", []), + "max": open_findings_burndown.get("y_max", 0), + "min": open_findings_burndown.get("y_min", 0), }) -@user_is_authorized(Product, Permissions.Engagement_View, 'pid') +@user_is_authorized(Product, Permissions.Engagement_View, "pid") def view_engagements(request, pid): prod = get_object_or_404(Product, id=pid) default_page_num = 10 @@ -755,8 +755,8 @@ def view_engagements(request, pid): filter_string_matching = get_system_setting("filter_string_matching", False) filter_class = ProductEngagementFilterWithoutObjectLookups if filter_string_matching else ProductEngagementFilter # In Progress Engagements - engs = Engagement.objects.filter(product=prod, active=True, status="In Progress").order_by('-updated') - active_engs_filter = filter_class(request.GET, queryset=engs, prefix='active') + engs = Engagement.objects.filter(product=prod, active=True, status="In Progress").order_by("-updated") + active_engs_filter = filter_class(request.GET, queryset=engs, prefix="active") result_active_engs = get_page_items(request, active_engs_filter.qs, default_page_num, prefix="engs") # prefetch only after creating the filters to avoid https://code.djangoproject.com/ticket/23771 # and https://code.djangoproject.com/ticket/25375 @@ -765,16 +765,16 @@ def view_engagements(request, pid): recent_test_day_count, ) # Engagements that are queued because they haven't started or paused - engs = Engagement.objects.filter(~Q(status="In Progress"), product=prod, active=True).order_by('-updated') - queued_engs_filter = filter_class(request.GET, queryset=engs, prefix='queued') + engs = Engagement.objects.filter(~Q(status="In Progress"), product=prod, active=True).order_by("-updated") + queued_engs_filter = filter_class(request.GET, queryset=engs, prefix="queued") result_queued_engs = get_page_items(request, queued_engs_filter.qs, default_page_num, prefix="queued_engs") result_queued_engs.object_list = prefetch_for_view_engagements( result_queued_engs.object_list, recent_test_day_count, ) # Cancelled or Completed Engagements - engs = Engagement.objects.filter(product=prod, active=False).order_by('-target_end') - inactive_engs_filter = filter_class(request.GET, queryset=engs, prefix='closed') + engs = Engagement.objects.filter(product=prod, active=False).order_by("-target_end") + inactive_engs_filter = filter_class(request.GET, queryset=engs, prefix="closed") result_inactive_engs = get_page_items(request, inactive_engs_filter.qs, default_page_num, prefix="inactive_engs") result_inactive_engs.object_list = prefetch_for_view_engagements( result_inactive_engs.object_list, @@ -782,50 +782,50 @@ def view_engagements(request, pid): ) product_tab = Product_Tab(prod, title=_("All Engagements"), tab="engagements") - return render(request, 'dojo/view_engagements.html', { - 'prod': prod, - 'product_tab': product_tab, - 'engs': result_active_engs, - 'engs_count': result_active_engs.paginator.count, - 'engs_filter': active_engs_filter, - 'queued_engs': result_queued_engs, - 'queued_engs_count': result_queued_engs.paginator.count, - 'queued_engs_filter': queued_engs_filter, - 'inactive_engs': result_inactive_engs, - 'inactive_engs_count': result_inactive_engs.paginator.count, - 'inactive_engs_filter': inactive_engs_filter, - 'recent_test_day_count': recent_test_day_count, - 'user': request.user}) + return render(request, "dojo/view_engagements.html", { + "prod": prod, + "product_tab": product_tab, + "engs": result_active_engs, + "engs_count": result_active_engs.paginator.count, + "engs_filter": active_engs_filter, + "queued_engs": result_queued_engs, + "queued_engs_count": result_queued_engs.paginator.count, + "queued_engs_filter": queued_engs_filter, + "inactive_engs": result_inactive_engs, + "inactive_engs_count": result_inactive_engs.paginator.count, + "inactive_engs_filter": inactive_engs_filter, + "recent_test_day_count": recent_test_day_count, + "user": request.user}) def prefetch_for_view_engagements(engagements, recent_test_day_count): engagements = engagements.select_related( - 'lead', + "lead", ).prefetch_related( - Prefetch('test_set', queryset=Test.objects.filter( + Prefetch("test_set", queryset=Test.objects.filter( id__in=Subquery( Test.objects.filter( - engagement_id=OuterRef('engagement_id'), + engagement_id=OuterRef("engagement_id"), updated__gte=timezone.now() - timedelta(days=recent_test_day_count), - ).values_list('id', flat=True), + ).values_list("id", flat=True), )), ), - 'test_set__test_type', + "test_set__test_type", ).annotate( - count_tests=Count('test', distinct=True), - count_findings_all=Count('test__finding__id'), - count_findings_open=Count('test__finding__id', filter=Q(test__finding__active=True)), - count_findings_open_verified=Count('test__finding__id', + count_tests=Count("test", distinct=True), + count_findings_all=Count("test__finding__id"), + count_findings_open=Count("test__finding__id", filter=Q(test__finding__active=True)), + count_findings_open_verified=Count("test__finding__id", filter=Q(test__finding__active=True) & Q(test__finding__verified=True)), - count_findings_close=Count('test__finding__id', filter=Q(test__finding__is_mitigated=True)), - count_findings_duplicate=Count('test__finding__id', filter=Q(test__finding__duplicate=True)), - count_findings_accepted=Count('test__finding__id', filter=Q(test__finding__risk_accepted=True)), + count_findings_close=Count("test__finding__id", filter=Q(test__finding__is_mitigated=True)), + count_findings_duplicate=Count("test__finding__id", filter=Q(test__finding__duplicate=True)), + count_findings_accepted=Count("test__finding__id", filter=Q(test__finding__risk_accepted=True)), ) if System_Settings.objects.get().enable_jira: engagements = engagements.prefetch_related( - 'jira_project__jira_instance', - 'product__jira_project_set__jira_instance', + "jira_project__jira_instance", + "product__jira_project_set__jira_instance", ) return engagements @@ -846,14 +846,14 @@ def new_product(request, ptid=None): initial = None if ptid is not None: prod_type = get_object_or_404(Product_Type, pk=ptid) - initial = {'prod_type': prod_type} + initial = {"prod_type": prod_type} form = ProductForm(initial=initial) - if request.method == 'POST': + if request.method == "POST": form = ProductForm(request.POST, instance=Product()) - if get_system_setting('enable_github'): + if get_system_setting("enable_github"): gform = GITHUB_Product_Form(request.POST, instance=GITHUB_PKey()) else: gform = None @@ -865,12 +865,12 @@ def new_product(request, ptid=None): product = form.save() messages.add_message(request, messages.SUCCESS, - _('Product added successfully.'), - extra_tags='alert-success') + _("Product added successfully."), + extra_tags="alert-success") success, jira_project_form = jira_helper.process_jira_project_form(request, product=product) error = not success - if get_system_setting('enable_github'): + if get_system_setting("enable_github"): if gform.is_valid(): github_pkey = gform.save(commit=False) if github_pkey.git_conf is not None and github_pkey.git_project: @@ -878,10 +878,10 @@ def new_product(request, ptid=None): github_pkey.save() messages.add_message(request, messages.SUCCESS, - _('GitHub information added successfully.'), - extra_tags='alert-success') + _("GitHub information added successfully."), + extra_tags="alert-success") # Create appropriate labels in the repo - logger.info('Create label in repo: ' + github_pkey.git_project) + logger.info("Create label in repo: " + github_pkey.git_project) description = _("This label is automatically applied to all issues created by DefectDojo") try: @@ -900,30 +900,30 @@ def new_product(request, ptid=None): repo.create_label(name="security / critical", color="FE2200", description=description) except: - logger.info('Labels cannot be created - they may already exists') + logger.info("Labels cannot be created - they may already exists") if not error: - return HttpResponseRedirect(reverse('view_product', args=(product.id,))) + return HttpResponseRedirect(reverse("view_product", args=(product.id,))) else: # engagement was saved, but JIRA errors, so goto edit_product - return HttpResponseRedirect(reverse('edit_product', args=(product.id,))) + return HttpResponseRedirect(reverse("edit_product", args=(product.id,))) else: - if get_system_setting('enable_jira'): + if get_system_setting("enable_jira"): jira_project_form = JIRAProjectForm() - if get_system_setting('enable_github'): + if get_system_setting("enable_github"): gform = GITHUB_Product_Form() else: gform = None add_breadcrumb(title=_("New Product"), top_level=False, request=request) - return render(request, 'dojo/new_product.html', - {'form': form, - 'jform': jira_project_form, - 'gform': gform}) + return render(request, "dojo/new_product.html", + {"form": form, + "jform": jira_project_form, + "gform": gform}) -@user_is_authorized(Product, Permissions.Product_Edit, 'pid') +@user_is_authorized(Product, Permissions.Product_Edit, "pid") def edit_product(request, pid): product = Product.objects.get(pk=pid) system_settings = System_Settings.objects.get() @@ -940,32 +940,32 @@ def edit_product(request, pid): except: github_inst = None - if request.method == 'POST': + if request.method == "POST": form = ProductForm(request.POST, instance=product) jira_project = jira_helper.get_jira_project(product) if form.is_valid(): initial_sla_config = Product.objects.get(pk=form.instance.id).sla_configuration form.save() - msg = 'Product updated successfully.' + msg = "Product updated successfully." # check if the SLA config was changed, append additional context to message if initial_sla_config != form.instance.sla_configuration: - msg += ' All SLA expiration dates for findings within this product will be recalculated asynchronously for the newly assigned SLA configuration.' + msg += " All SLA expiration dates for findings within this product will be recalculated asynchronously for the newly assigned SLA configuration." messages.add_message(request, messages.SUCCESS, _(msg), - extra_tags='alert-success') + extra_tags="alert-success") success, jform = jira_helper.process_jira_project_form(request, instance=jira_project, product=product) error = not success - if get_system_setting('enable_github') and github_inst: + if get_system_setting("enable_github") and github_inst: gform = GITHUB_Product_Form(request.POST, instance=github_inst) # need to handle delete try: gform.save() except: pass - elif get_system_setting('enable_github'): + elif get_system_setting("enable_github"): gform = GITHUB_Product_Form(request.POST) if gform.is_valid(): new_conf = gform.save(commit=False) @@ -973,11 +973,11 @@ def edit_product(request, pid): new_conf.save() messages.add_message(request, messages.SUCCESS, - _('GITHUB information updated successfully.'), - extra_tags='alert-success') + _("GITHUB information updated successfully."), + extra_tags="alert-success") if not error: - return HttpResponseRedirect(reverse('view_product', args=(pid,))) + return HttpResponseRedirect(reverse("view_product", args=(pid,))) else: form = ProductForm(instance=product) @@ -997,46 +997,46 @@ def edit_product(request, pid): product_tab = Product_Tab(product, title=_("Edit Product"), tab="settings") return render(request, - 'dojo/edit_product.html', - {'form': form, - 'product_tab': product_tab, - 'jform': jform, - 'gform': gform, - 'product': product, + "dojo/edit_product.html", + {"form": form, + "product_tab": product_tab, + "jform": jform, + "gform": gform, + "product": product, }) -@user_is_authorized(Product, Permissions.Product_Delete, 'pid') +@user_is_authorized(Product, Permissions.Product_Delete, "pid") def delete_product(request, pid): product = get_object_or_404(Product, pk=pid) form = DeleteProductForm(instance=product) - if request.method == 'POST': - logger.debug('delete_product: POST') - if 'id' in request.POST and str(product.id) == request.POST['id']: + if request.method == "POST": + logger.debug("delete_product: POST") + if "id" in request.POST and str(product.id) == request.POST["id"]: form = DeleteProductForm(request.POST, instance=product) if form.is_valid(): if get_setting("ASYNC_OBJECT_DELETE"): async_del = async_delete() async_del.delete(product) - message = _('Product and relationships will be removed in the background.') + message = _("Product and relationships will be removed in the background.") else: - message = _('Product and relationships removed.') + message = _("Product and relationships removed.") product.delete() messages.add_message(request, messages.SUCCESS, message, - extra_tags='alert-success') - logger.debug('delete_product: POST RETURN') - return HttpResponseRedirect(reverse('product')) + extra_tags="alert-success") + logger.debug("delete_product: POST RETURN") + return HttpResponseRedirect(reverse("product")) else: - logger.debug('delete_product: POST INVALID FORM') + logger.debug("delete_product: POST INVALID FORM") logger.error(form.errors) - logger.debug('delete_product: GET') + logger.debug("delete_product: GET") - rels = ['Previewing the relationships has been disabled.', ''] - display_preview = get_setting('DELETE_PREVIEW') + rels = ["Previewing the relationships has been disabled.", ""] + display_preview = get_setting("DELETE_PREVIEW") if display_preview: collector = NestedObjects(using=DEFAULT_DB_ALIAS) collector.collect([product]) @@ -1044,25 +1044,25 @@ def delete_product(request, pid): product_tab = Product_Tab(product, title=_("Product"), tab="settings") - logger.debug('delete_product: GET RENDER') + logger.debug("delete_product: GET RENDER") - return render(request, 'dojo/delete_product.html', { - 'product': product, - 'form': form, - 'product_tab': product_tab, - 'rels': rels}) + return render(request, "dojo/delete_product.html", { + "product": product, + "form": form, + "product_tab": product_tab, + "rels": rels}) -@user_is_authorized(Product, Permissions.Engagement_Add, 'pid') +@user_is_authorized(Product, Permissions.Engagement_Add, "pid") def new_eng_for_app(request, pid, cicd=False): jira_project_form = None jira_epic_form = None product = Product.objects.get(id=pid) - if request.method == 'POST': + if request.method == "POST": form = EngForm(request.POST, cicd=cicd, product=product, user=request.user) - logger.debug('new_eng_for_app') + logger.debug("new_eng_for_app") if form.is_valid(): # first create the new engagement @@ -1071,77 +1071,77 @@ def new_eng_for_app(request, pid, cicd=False): engagement.api_test = False engagement.pen_test = False engagement.check_list = False - engagement.product = form.cleaned_data.get('product') + engagement.product = form.cleaned_data.get("product") if engagement.threat_model: - engagement.progress = 'threat_model' + engagement.progress = "threat_model" else: - engagement.progress = 'other' + engagement.progress = "other" if cicd: - engagement.engagement_type = 'CI/CD' + engagement.engagement_type = "CI/CD" engagement.status = "In Progress" engagement.active = True engagement.save() form.save_m2m() - logger.debug('new_eng_for_app: process jira coming') + logger.debug("new_eng_for_app: process jira coming") # new engagement, so do not provide jira_project success, jira_project_form = jira_helper.process_jira_project_form(request, instance=None, engagement=engagement) error = not success - logger.debug('new_eng_for_app: process jira epic coming') + logger.debug("new_eng_for_app: process jira epic coming") success, jira_epic_form = jira_helper.process_jira_epic_form(request, engagement=engagement) error = error or not success messages.add_message(request, messages.SUCCESS, - _('Engagement added successfully.'), - extra_tags='alert-success') + _("Engagement added successfully."), + extra_tags="alert-success") if not error: if "_Add Tests" in request.POST: - return HttpResponseRedirect(reverse('add_tests', args=(engagement.id,))) + return HttpResponseRedirect(reverse("add_tests", args=(engagement.id,))) elif "_Import Scan Results" in request.POST: - return HttpResponseRedirect(reverse('import_scan_results', args=(engagement.id,))) + return HttpResponseRedirect(reverse("import_scan_results", args=(engagement.id,))) else: - return HttpResponseRedirect(reverse('view_engagement', args=(engagement.id,))) + return HttpResponseRedirect(reverse("view_engagement", args=(engagement.id,))) else: # engagement was saved, but JIRA errors, so goto edit_engagement - logger.debug('new_eng_for_app: jira errors') - return HttpResponseRedirect(reverse('edit_engagement', args=(engagement.id,))) + logger.debug("new_eng_for_app: jira errors") + return HttpResponseRedirect(reverse("edit_engagement", args=(engagement.id,))) else: logger.debug(form.errors) else: - form = EngForm(initial={'lead': request.user, 'target_start': timezone.now().date(), - 'target_end': timezone.now().date() + timedelta(days=7), 'product': product}, cicd=cicd, + form = EngForm(initial={"lead": request.user, "target_start": timezone.now().date(), + "target_end": timezone.now().date() + timedelta(days=7), "product": product}, cicd=cicd, product=product, user=request.user) - if get_system_setting('enable_jira'): - logger.debug('showing jira-project-form') - jira_project_form = JIRAProjectForm(target='engagement', product=product) - logger.debug('showing jira-epic-form') + if get_system_setting("enable_jira"): + logger.debug("showing jira-project-form") + jira_project_form = JIRAProjectForm(target="engagement", product=product) + logger.debug("showing jira-epic-form") jira_epic_form = JIRAEngagementForm() if cicd: - title = _('New CI/CD Engagement') + title = _("New CI/CD Engagement") else: - title = _('New Interactive Engagement') + title = _("New Interactive Engagement") product_tab = Product_Tab(product, title=title, tab="engagements") - return render(request, 'dojo/new_eng.html', { - 'form': form, - 'title': title, - 'product_tab': product_tab, - 'jira_epic_form': jira_epic_form, - 'jira_project_form': jira_project_form}) + return render(request, "dojo/new_eng.html", { + "form": form, + "title": title, + "product_tab": product_tab, + "jira_epic_form": jira_epic_form, + "jira_project_form": jira_project_form}) -@user_is_authorized(Product, Permissions.Technology_Add, 'pid') +@user_is_authorized(Product, Permissions.Technology_Add, "pid") def new_tech_for_prod(request, pid): - if request.method == 'POST': + if request.method == "POST": form = AppAnalysisForm(request.POST) if form.is_valid(): tech = form.save(commit=False) @@ -1149,121 +1149,121 @@ def new_tech_for_prod(request, pid): tech.save() messages.add_message(request, messages.SUCCESS, - _('Technology added successfully.'), - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_product', args=(pid,))) + _("Technology added successfully."), + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_product", args=(pid,))) - form = AppAnalysisForm(initial={'user': request.user}) + form = AppAnalysisForm(initial={"user": request.user}) product_tab = Product_Tab(get_object_or_404(Product, id=pid), title=_("Add Technology"), tab="settings") - return render(request, 'dojo/new_tech.html', - {'form': form, - 'product_tab': product_tab, - 'pid': pid}) + return render(request, "dojo/new_tech.html", + {"form": form, + "product_tab": product_tab, + "pid": pid}) -@user_is_authorized(App_Analysis, Permissions.Technology_Edit, 'tid') +@user_is_authorized(App_Analysis, Permissions.Technology_Edit, "tid") def edit_technology(request, tid): technology = get_object_or_404(App_Analysis, id=tid) form = AppAnalysisForm(instance=technology) - if request.method == 'POST': + if request.method == "POST": form = AppAnalysisForm(request.POST, instance=technology) if form.is_valid(): form.save() messages.add_message(request, messages.SUCCESS, - _('Technology changed successfully.'), - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_product', args=(technology.product.id,))) + _("Technology changed successfully."), + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_product", args=(technology.product.id,))) product_tab = Product_Tab(technology.product, title=_("Edit Technology"), tab="settings") - return render(request, 'dojo/edit_technology.html', - {'form': form, - 'product_tab': product_tab, - 'technology': technology}) + return render(request, "dojo/edit_technology.html", + {"form": form, + "product_tab": product_tab, + "technology": technology}) -@user_is_authorized(App_Analysis, Permissions.Technology_Delete, 'tid') +@user_is_authorized(App_Analysis, Permissions.Technology_Delete, "tid") def delete_technology(request, tid): technology = get_object_or_404(App_Analysis, id=tid) form = DeleteAppAnalysisForm(instance=technology) - if request.method == 'POST': + if request.method == "POST": form = Delete_Product_MemberForm(request.POST, instance=technology) technology = form.instance technology.delete() messages.add_message(request, messages.SUCCESS, - _('Technology deleted successfully.'), - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_product', args=(technology.product.id,))) + _("Technology deleted successfully."), + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_product", args=(technology.product.id,))) product_tab = Product_Tab(technology.product, title=_("Delete Technology"), tab="settings") - return render(request, 'dojo/delete_technology.html', { - 'technology': technology, - 'form': form, - 'product_tab': product_tab, + return render(request, "dojo/delete_technology.html", { + "technology": technology, + "form": form, + "product_tab": product_tab, }) -@user_is_authorized(Product, Permissions.Engagement_Add, 'pid') +@user_is_authorized(Product, Permissions.Engagement_Add, "pid") def new_eng_for_app_cicd(request, pid): # we have to use pid=pid here as new_eng_for_app expects kwargs, because that is how django calls the function based on urls.py named groups return new_eng_for_app(request, pid=pid, cicd=True) -@user_is_authorized(Product, Permissions.Product_Edit, 'pid') +@user_is_authorized(Product, Permissions.Product_Edit, "pid") def add_meta_data(request, pid): prod = Product.objects.get(id=pid) - if request.method == 'POST': + if request.method == "POST": form = DojoMetaDataForm(request.POST, instance=DojoMeta(product=prod)) if form.is_valid(): form.save() messages.add_message(request, messages.SUCCESS, - _('Metadata added successfully.'), - extra_tags='alert-success') - if 'add_another' in request.POST: - return HttpResponseRedirect(reverse('add_meta_data', args=(pid,))) + _("Metadata added successfully."), + extra_tags="alert-success") + if "add_another" in request.POST: + return HttpResponseRedirect(reverse("add_meta_data", args=(pid,))) else: - return HttpResponseRedirect(reverse('view_product', args=(pid,))) + return HttpResponseRedirect(reverse("view_product", args=(pid,))) else: form = DojoMetaDataForm() product_tab = Product_Tab(prod, title=_("Add Metadata"), tab="settings") - return render(request, 'dojo/add_product_meta_data.html', - {'form': form, - 'product_tab': product_tab, - 'product': prod, + return render(request, "dojo/add_product_meta_data.html", + {"form": form, + "product_tab": product_tab, + "product": prod, }) -@user_is_authorized(Product, Permissions.Product_Edit, 'pid') +@user_is_authorized(Product, Permissions.Product_Edit, "pid") def edit_meta_data(request, pid): prod = Product.objects.get(id=pid) - if request.method == 'POST': + if request.method == "POST": for key, value in request.POST.items(): - if key.startswith('cfv_'): - cfv_id = int(key.split('_')[1]) + if key.startswith("cfv_"): + cfv_id = int(key.split("_")[1]) cfv = get_object_or_404(DojoMeta, id=cfv_id) value = value.strip() if value: cfv.value = value cfv.save() - if key.startswith('delete_'): - cfv_id = int(key.split('_')[2]) + if key.startswith("delete_"): + cfv_id = int(key.split("_")[2]) cfv = get_object_or_404(DojoMeta, id=cfv_id) cfv.delete() messages.add_message(request, messages.SUCCESS, - _('Metadata edited successfully.'), - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_product', args=(pid,))) + _("Metadata edited successfully."), + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_product", args=(pid,))) product_tab = Product_Tab(prod, title=_("Edit Metadata"), tab="settings") - return render(request, 'dojo/edit_product_meta_data.html', - {'product': prod, - 'product_tab': product_tab, + return render(request, "dojo/edit_product_meta_data.html", + {"product": prod, + "product_tab": product_tab, }) @@ -1321,7 +1321,7 @@ def get_finding_form(self, request: HttpRequest, product: Product): args = [request.POST] if request.method == "POST" else [] # Set the initial form args kwargs = { - "initial": {'date': timezone.now().date()}, + "initial": {"date": timezone.now().date()}, "req_resp": None, "product": product, } @@ -1365,29 +1365,29 @@ def get_github_form(self, request: HttpRequest, test: Test): return None def validate_status_change(self, request: HttpRequest, context: dict): - if ((context["form"]['active'].value() is False - or context["form"]['false_p'].value()) - and context["form"]['duplicate'].value() is False): + if ((context["form"]["active"].value() is False + or context["form"]["false_p"].value()) + and context["form"]["duplicate"].value() is False): closing_disabled = Note_Type.objects.filter(is_mandatory=True, is_active=True).count() if closing_disabled != 0: error_inactive = ValidationError( - _('Can not set a finding as inactive without adding all mandatory notes'), - code='inactive_without_mandatory_notes', + _("Can not set a finding as inactive without adding all mandatory notes"), + code="inactive_without_mandatory_notes", ) error_false_p = ValidationError( - _('Can not set a finding as false positive without adding all mandatory notes'), - code='false_p_without_mandatory_notes', + _("Can not set a finding as false positive without adding all mandatory notes"), + code="false_p_without_mandatory_notes", ) - if context["form"]['active'].value() is False: - context["form"].add_error('active', error_inactive) - if context["form"]['false_p'].value(): - context["form"].add_error('false_p', error_false_p) + if context["form"]["active"].value() is False: + context["form"].add_error("active", error_inactive) + if context["form"]["false_p"].value(): + context["form"].add_error("false_p", error_false_p) messages.add_message( request, messages.ERROR, - _('Can not set a finding as inactive or false positive without adding all mandatory notes'), - extra_tags='alert-danger') + _("Can not set a finding as inactive or false positive without adding all mandatory notes"), + extra_tags="alert-danger") return request @@ -1398,7 +1398,7 @@ def process_finding_form(self, request: HttpRequest, test: Test, context: dict): finding.test = test finding.reporter = request.user finding.numerical_severity = Finding.get_numerical_severity(finding.severity) - finding.tags = context["form"].cleaned_data['tags'] + finding.tags = context["form"].cleaned_data["tags"] finding.save() # Save and add new endpoints finding_helper.add_endpoints(finding, context["form"]) @@ -1419,11 +1419,11 @@ def process_jira_form(self, request: HttpRequest, finding: Finding, context: dic if context["jform"] and context["jform"].is_valid(): # Push to Jira? - logger.debug('jira form valid') - push_to_jira = jira_helper.is_push_all_issues(finding) or context["jform"].cleaned_data.get('push_to_jira') + logger.debug("jira form valid") + push_to_jira = jira_helper.is_push_all_issues(finding) or context["jform"].cleaned_data.get("push_to_jira") jira_message = None # if the jira issue key was changed, update database - new_jira_issue_key = context["jform"].cleaned_data.get('jira_issue') + new_jira_issue_key = context["jform"].cleaned_data.get("jira_issue") if finding.has_jira_issue: # everything in DD around JIRA integration is based on the internal id of the issue in JIRA # instead of on the public jira issue key. @@ -1431,19 +1431,19 @@ def process_jira_form(self, request: HttpRequest, finding: Finding, context: dic # we can assume the issue exist, which is already checked in the validation of the jform if not new_jira_issue_key: jira_helper.finding_unlink_jira(request, finding) - jira_message = 'Link to JIRA issue removed successfully.' + jira_message = "Link to JIRA issue removed successfully." elif new_jira_issue_key != finding.jira_issue.jira_key: jira_helper.finding_unlink_jira(request, finding) jira_helper.finding_link_jira(request, finding, new_jira_issue_key) - jira_message = 'Changed JIRA link successfully.' + jira_message = "Changed JIRA link successfully." else: - logger.debug('finding has no jira issue yet') + logger.debug("finding has no jira issue yet") if new_jira_issue_key: logger.debug( - 'finding has no jira issue yet, but jira issue specified in request. trying to link.') + "finding has no jira issue yet, but jira issue specified in request. trying to link.") jira_helper.finding_link_jira(request, finding, new_jira_issue_key) - jira_message = 'Linked a JIRA issue successfully.' + jira_message = "Linked a JIRA issue successfully." # Determine if a message should be added if jira_message: messages.add_message( @@ -1461,7 +1461,7 @@ def process_github_form(self, request: HttpRequest, finding: Finding, context: d return request, True if context["gform"].is_valid(): - add_external_issue(finding, 'github') + add_external_issue(finding, "github") return request, True else: @@ -1502,8 +1502,8 @@ def process_forms(self, request: HttpRequest, test: Test, context: dict): messages.add_message( request, messages.SUCCESS, - _('Finding added successfully.'), - extra_tags='alert-success') + _("Finding added successfully."), + extra_tags="alert-success") return finding, request, all_forms_valid @@ -1535,59 +1535,59 @@ def post(self, request: HttpRequest, product_id: int): _, request, success = self.process_forms(request, test, context) # Handle the case of a successful form if success: - if '_Finished' in request.POST: - return HttpResponseRedirect(reverse('view_test', args=(test.id,))) + if "_Finished" in request.POST: + return HttpResponseRedirect(reverse("view_test", args=(test.id,))) else: - return HttpResponseRedirect(reverse('add_findings', args=(test.id,))) + return HttpResponseRedirect(reverse("add_findings", args=(test.id,))) else: context["form_error"] = True # Render the form return render(request, self.get_template(), context) -@user_is_authorized(Product, Permissions.Product_View, 'pid') +@user_is_authorized(Product, Permissions.Product_View, "pid") def engagement_presets(request, pid): prod = get_object_or_404(Product, id=pid) presets = Engagement_Presets.objects.filter(product=prod).all() product_tab = Product_Tab(prod, title=_("Engagement Presets"), tab="settings") - return render(request, 'dojo/view_presets.html', - {'product_tab': product_tab, - 'presets': presets, - 'prod': prod}) + return render(request, "dojo/view_presets.html", + {"product_tab": product_tab, + "presets": presets, + "prod": prod}) -@user_is_authorized(Product, Permissions.Product_Edit, 'pid') +@user_is_authorized(Product, Permissions.Product_Edit, "pid") def edit_engagement_presets(request, pid, eid): prod = get_object_or_404(Product, id=pid) preset = get_object_or_404(Engagement_Presets, id=eid) product_tab = Product_Tab(prod, title=_("Edit Engagement Preset"), tab="settings") - if request.method == 'POST': + if request.method == "POST": tform = EngagementPresetsForm(request.POST, instance=preset) if tform.is_valid(): tform.save() messages.add_message( request, messages.SUCCESS, - _('Engagement Preset Successfully Updated.'), - extra_tags='alert-success') - return HttpResponseRedirect(reverse('engagement_presets', args=(pid,))) + _("Engagement Preset Successfully Updated."), + extra_tags="alert-success") + return HttpResponseRedirect(reverse("engagement_presets", args=(pid,))) else: tform = EngagementPresetsForm(instance=preset) - return render(request, 'dojo/edit_presets.html', - {'product_tab': product_tab, - 'tform': tform, - 'prod': prod}) + return render(request, "dojo/edit_presets.html", + {"product_tab": product_tab, + "tform": tform, + "prod": prod}) -@user_is_authorized(Product, Permissions.Product_Edit, 'pid') +@user_is_authorized(Product, Permissions.Product_Edit, "pid") def add_engagement_presets(request, pid): prod = get_object_or_404(Product, id=pid) - if request.method == 'POST': + if request.method == "POST": tform = EngagementPresetsForm(request.POST) if tform.is_valid(): form_copy = tform.save(commit=False) @@ -1597,56 +1597,56 @@ def add_engagement_presets(request, pid): messages.add_message( request, messages.SUCCESS, - _('Engagement Preset Successfully Created.'), - extra_tags='alert-success') - return HttpResponseRedirect(reverse('engagement_presets', args=(pid,))) + _("Engagement Preset Successfully Created."), + extra_tags="alert-success") + return HttpResponseRedirect(reverse("engagement_presets", args=(pid,))) else: tform = EngagementPresetsForm() product_tab = Product_Tab(prod, title=_("New Engagement Preset"), tab="settings") - return render(request, 'dojo/new_params.html', {'tform': tform, 'pid': pid, 'product_tab': product_tab}) + return render(request, "dojo/new_params.html", {"tform": tform, "pid": pid, "product_tab": product_tab}) -@user_is_authorized(Product, Permissions.Product_Edit, 'pid') +@user_is_authorized(Product, Permissions.Product_Edit, "pid") def delete_engagement_presets(request, pid, eid): prod = get_object_or_404(Product, id=pid) preset = get_object_or_404(Engagement_Presets, id=eid) form = DeleteEngagementPresetsForm(instance=preset) - if request.method == 'POST': - if 'id' in request.POST: + if request.method == "POST": + if "id" in request.POST: form = DeleteEngagementPresetsForm(request.POST, instance=preset) if form.is_valid(): preset.delete() messages.add_message(request, messages.SUCCESS, - _('Engagement presets and engagement relationships removed.'), - extra_tags='alert-success') - return HttpResponseRedirect(reverse('engagement_presets', args=(pid,))) + _("Engagement presets and engagement relationships removed."), + extra_tags="alert-success") + return HttpResponseRedirect(reverse("engagement_presets", args=(pid,))) collector = NestedObjects(using=DEFAULT_DB_ALIAS) collector.collect([preset]) rels = collector.nested() product_tab = Product_Tab(prod, title=_("Delete Engagement Preset"), tab="settings") - return render(request, 'dojo/delete_presets.html', - {'product': product, - 'form': form, - 'product_tab': product_tab, - 'rels': rels, + return render(request, "dojo/delete_presets.html", + {"product": product, + "form": form, + "product_tab": product_tab, + "rels": rels, }) -@user_is_authorized(Product, Permissions.Product_View, 'pid') +@user_is_authorized(Product, Permissions.Product_View, "pid") def edit_notifications(request, pid): prod = get_object_or_404(Product, id=pid) - if request.method == 'POST': + if request.method == "POST": product_notifications = Notifications.objects.filter(user=request.user).filter(product=prod).first() if not product_notifications: product_notifications = Notifications(user=request.user, product=prod) - logger.debug('no existing product notifications found') + logger.debug("no existing product notifications found") else: - logger.debug('existing product notifications found') + logger.debug("existing product notifications found") form = ProductNotificationsForm(request.POST, instance=product_notifications) # print(vars(form)) @@ -1655,167 +1655,167 @@ def edit_notifications(request, pid): form.save() messages.add_message(request, messages.SUCCESS, - _('Notification settings updated.'), - extra_tags='alert-success') + _("Notification settings updated."), + extra_tags="alert-success") - return HttpResponseRedirect(reverse('view_product', args=(pid,))) + return HttpResponseRedirect(reverse("view_product", args=(pid,))) -@user_is_authorized(Product, Permissions.Product_Manage_Members, 'pid') +@user_is_authorized(Product, Permissions.Product_Manage_Members, "pid") def add_product_member(request, pid): product = get_object_or_404(Product, pk=pid) - memberform = Add_Product_MemberForm(initial={'product': product.id}) - if request.method == 'POST': - memberform = Add_Product_MemberForm(request.POST, initial={'product': product.id}) + memberform = Add_Product_MemberForm(initial={"product": product.id}) + if request.method == "POST": + memberform = Add_Product_MemberForm(request.POST, initial={"product": product.id}) if memberform.is_valid(): - if memberform.cleaned_data['role'].is_owner and not user_has_permission(request.user, product, + if memberform.cleaned_data["role"].is_owner and not user_has_permission(request.user, product, Permissions.Product_Member_Add_Owner): messages.add_message(request, messages.WARNING, - _('You are not permitted to add users as owners.'), - extra_tags='alert-warning') + _("You are not permitted to add users as owners."), + extra_tags="alert-warning") else: - if 'users' in memberform.cleaned_data and len(memberform.cleaned_data['users']) > 0: - for user in memberform.cleaned_data['users']: + if "users" in memberform.cleaned_data and len(memberform.cleaned_data["users"]) > 0: + for user in memberform.cleaned_data["users"]: existing_members = Product_Member.objects.filter(product=product, user=user) if existing_members.count() == 0: product_member = Product_Member() product_member.product = product product_member.user = user - product_member.role = memberform.cleaned_data['role'] + product_member.role = memberform.cleaned_data["role"] product_member.save() messages.add_message(request, messages.SUCCESS, - _('Product members added successfully.'), - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_product', args=(pid,))) + _("Product members added successfully."), + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_product", args=(pid,))) product_tab = Product_Tab(product, title=_("Add Product Member"), tab="settings") - return render(request, 'dojo/new_product_member.html', { - 'product': product, - 'form': memberform, - 'product_tab': product_tab, + return render(request, "dojo/new_product_member.html", { + "product": product, + "form": memberform, + "product_tab": product_tab, }) -@user_is_authorized(Product_Member, Permissions.Product_Manage_Members, 'memberid') +@user_is_authorized(Product_Member, Permissions.Product_Manage_Members, "memberid") def edit_product_member(request, memberid): member = get_object_or_404(Product_Member, pk=memberid) memberform = Edit_Product_MemberForm(instance=member) - if request.method == 'POST': + if request.method == "POST": memberform = Edit_Product_MemberForm(request.POST, instance=member) if memberform.is_valid(): if member.role.is_owner and not user_has_permission(request.user, member.product, Permissions.Product_Member_Add_Owner): messages.add_message(request, messages.WARNING, - _('You are not permitted to make users to owners.'), - extra_tags='alert-warning') + _("You are not permitted to make users to owners."), + extra_tags="alert-warning") else: memberform.save() messages.add_message(request, messages.SUCCESS, - _('Product member updated successfully.'), - extra_tags='alert-success') - if is_title_in_breadcrumbs('View User'): - return HttpResponseRedirect(reverse('view_user', args=(member.user.id,))) + _("Product member updated successfully."), + extra_tags="alert-success") + if is_title_in_breadcrumbs("View User"): + return HttpResponseRedirect(reverse("view_user", args=(member.user.id,))) else: - return HttpResponseRedirect(reverse('view_product', args=(member.product.id,))) + return HttpResponseRedirect(reverse("view_product", args=(member.product.id,))) product_tab = Product_Tab(member.product, title=_("Edit Product Member"), tab="settings") - return render(request, 'dojo/edit_product_member.html', { - 'memberid': memberid, - 'form': memberform, - 'product_tab': product_tab, + return render(request, "dojo/edit_product_member.html", { + "memberid": memberid, + "form": memberform, + "product_tab": product_tab, }) -@user_is_authorized(Product_Member, Permissions.Product_Member_Delete, 'memberid') +@user_is_authorized(Product_Member, Permissions.Product_Member_Delete, "memberid") def delete_product_member(request, memberid): member = get_object_or_404(Product_Member, pk=memberid) memberform = Delete_Product_MemberForm(instance=member) - if request.method == 'POST': + if request.method == "POST": memberform = Delete_Product_MemberForm(request.POST, instance=member) member = memberform.instance user = member.user member.delete() messages.add_message(request, messages.SUCCESS, - _('Product member deleted successfully.'), - extra_tags='alert-success') - if is_title_in_breadcrumbs('View User'): - return HttpResponseRedirect(reverse('view_user', args=(member.user.id,))) + _("Product member deleted successfully."), + extra_tags="alert-success") + if is_title_in_breadcrumbs("View User"): + return HttpResponseRedirect(reverse("view_user", args=(member.user.id,))) else: if user == request.user: - return HttpResponseRedirect(reverse('product')) + return HttpResponseRedirect(reverse("product")) else: - return HttpResponseRedirect(reverse('view_product', args=(member.product.id,))) + return HttpResponseRedirect(reverse("view_product", args=(member.product.id,))) product_tab = Product_Tab(member.product, title=_("Delete Product Member"), tab="settings") - return render(request, 'dojo/delete_product_member.html', { - 'memberid': memberid, - 'form': memberform, - 'product_tab': product_tab, + return render(request, "dojo/delete_product_member.html", { + "memberid": memberid, + "form": memberform, + "product_tab": product_tab, }) -@user_is_authorized(Product, Permissions.Product_API_Scan_Configuration_Add, 'pid') +@user_is_authorized(Product, Permissions.Product_API_Scan_Configuration_Add, "pid") def add_api_scan_configuration(request, pid): product = get_object_or_404(Product, id=pid) - if request.method == 'POST': + if request.method == "POST": form = Product_API_Scan_ConfigurationForm(request.POST) if form.is_valid(): product_api_scan_configuration = form.save(commit=False) product_api_scan_configuration.product = product try: api = create_API(product_api_scan_configuration.tool_configuration) - if api and hasattr(api, 'test_product_connection'): + if api and hasattr(api, "test_product_connection"): result = api.test_product_connection(product_api_scan_configuration) messages.add_message(request, messages.SUCCESS, - _('API connection successful with message: %(result)s.') % {'result': result}, - extra_tags='alert-success') + _("API connection successful with message: %(result)s.") % {"result": result}, + extra_tags="alert-success") product_api_scan_configuration.save() messages.add_message(request, messages.SUCCESS, - _('API Scan Configuration added successfully.'), - extra_tags='alert-success') - if 'add_another' in request.POST: - return HttpResponseRedirect(reverse('add_api_scan_configuration', args=(pid,))) + _("API Scan Configuration added successfully."), + extra_tags="alert-success") + if "add_another" in request.POST: + return HttpResponseRedirect(reverse("add_api_scan_configuration", args=(pid,))) else: - return HttpResponseRedirect(reverse('view_api_scan_configurations', args=(pid,))) + return HttpResponseRedirect(reverse("view_api_scan_configurations", args=(pid,))) except Exception as e: logger.exception(e) messages.add_message(request, messages.ERROR, str(e), - extra_tags='alert-danger') + extra_tags="alert-danger") else: form = Product_API_Scan_ConfigurationForm() product_tab = Product_Tab(product, title=_("Add API Scan Configuration"), tab="settings") return render(request, - 'dojo/add_product_api_scan_configuration.html', - {'form': form, - 'product_tab': product_tab, - 'product': product, - 'api_scan_configuration_hints': get_api_scan_configuration_hints(), + "dojo/add_product_api_scan_configuration.html", + {"form": form, + "product_tab": product_tab, + "product": product, + "api_scan_configuration_hints": get_api_scan_configuration_hints(), }) -@user_is_authorized(Product, Permissions.Product_View, 'pid') +@user_is_authorized(Product, Permissions.Product_View, "pid") def view_api_scan_configurations(request, pid): product_api_scan_configurations = Product_API_Scan_Configuration.objects.filter(product=pid) product_tab = Product_Tab(get_object_or_404(Product, id=pid), title=_("API Scan Configurations"), tab="settings") return render(request, - 'dojo/view_product_api_scan_configurations.html', + "dojo/view_product_api_scan_configurations.html", { - 'product_api_scan_configurations': product_api_scan_configurations, - 'product_tab': product_tab, - 'pid': pid, + "product_api_scan_configurations": product_api_scan_configurations, + "product_tab": product_tab, + "pid": pid, }) -@user_is_authorized(Product_API_Scan_Configuration, Permissions.Product_API_Scan_Configuration_Edit, 'pascid') +@user_is_authorized(Product_API_Scan_Configuration, Permissions.Product_API_Scan_Configuration_Edit, "pascid") def edit_api_scan_configuration(request, pid, pascid): product_api_scan_configuration = get_object_or_404(Product_API_Scan_Configuration, id=pascid) @@ -1823,45 +1823,45 @@ def edit_api_scan_configuration(request, pid, pascid): pid): # user is trying to edit Tool Configuration from another product (trying to by-pass auth) raise Http404 - if request.method == 'POST': + if request.method == "POST": form = Product_API_Scan_ConfigurationForm(request.POST, instance=product_api_scan_configuration) if form.is_valid(): try: form_copy = form.save(commit=False) api = create_API(form_copy.tool_configuration) - if api and hasattr(api, 'test_product_connection'): + if api and hasattr(api, "test_product_connection"): result = api.test_product_connection(form_copy) messages.add_message(request, messages.SUCCESS, - _('API connection successful with message: %(result)s.') % {'result': result}, - extra_tags='alert-success') + _("API connection successful with message: %(result)s.") % {"result": result}, + extra_tags="alert-success") form.save() messages.add_message(request, messages.SUCCESS, - _('API Scan Configuration successfully updated.'), - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_api_scan_configurations', args=(pid,))) + _("API Scan Configuration successfully updated."), + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_api_scan_configurations", args=(pid,))) except Exception as e: logger.info(e) messages.add_message(request, messages.ERROR, str(e), - extra_tags='alert-danger') + extra_tags="alert-danger") else: form = Product_API_Scan_ConfigurationForm(instance=product_api_scan_configuration) product_tab = Product_Tab(get_object_or_404(Product, id=pid), title=_("Edit API Scan Configuration"), tab="settings") return render(request, - 'dojo/edit_product_api_scan_configuration.html', + "dojo/edit_product_api_scan_configuration.html", { - 'form': form, - 'product_tab': product_tab, - 'api_scan_configuration_hints': get_api_scan_configuration_hints(), + "form": form, + "product_tab": product_tab, + "api_scan_configuration_hints": get_api_scan_configuration_hints(), }) -@user_is_authorized(Product_API_Scan_Configuration, Permissions.Product_API_Scan_Configuration_Delete, 'pascid') +@user_is_authorized(Product_API_Scan_Configuration, Permissions.Product_API_Scan_Configuration_Delete, "pascid") def delete_api_scan_configuration(request, pid, pascid): product_api_scan_configuration = get_object_or_404(Product_API_Scan_Configuration, id=pascid) @@ -1869,120 +1869,120 @@ def delete_api_scan_configuration(request, pid, pascid): pid): # user is trying to delete Tool Configuration from another product (trying to by-pass auth) raise Http404 - if request.method == 'POST': + if request.method == "POST": form = Product_API_Scan_ConfigurationForm(request.POST) product_api_scan_configuration.delete() messages.add_message(request, messages.SUCCESS, - _('API Scan Configuration deleted.'), - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_api_scan_configurations', args=(pid,))) + _("API Scan Configuration deleted."), + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_api_scan_configurations", args=(pid,))) else: form = DeleteProduct_API_Scan_ConfigurationForm(instance=product_api_scan_configuration) product_tab = Product_Tab(get_object_or_404(Product, id=pid), title=_("Delete Tool Configuration"), tab="settings") return render(request, - 'dojo/delete_product_api_scan_configuration.html', + "dojo/delete_product_api_scan_configuration.html", { - 'form': form, - 'product_tab': product_tab, + "form": form, + "product_tab": product_tab, }) -@user_is_authorized(Product_Group, Permissions.Product_Group_Edit, 'groupid') +@user_is_authorized(Product_Group, Permissions.Product_Group_Edit, "groupid") def edit_product_group(request, groupid): logger.exception(groupid) group = get_object_or_404(Product_Group, pk=groupid) groupform = Edit_Product_Group_Form(instance=group) - if request.method == 'POST': + if request.method == "POST": groupform = Edit_Product_Group_Form(request.POST, instance=group) if groupform.is_valid(): if group.role.is_owner and not user_has_permission(request.user, group.product, Permissions.Product_Group_Add_Owner): messages.add_message(request, messages.WARNING, - _('You are not permitted to make groups owners.'), - extra_tags='alert-warning') + _("You are not permitted to make groups owners."), + extra_tags="alert-warning") else: groupform.save() messages.add_message(request, messages.SUCCESS, - _('Product group updated successfully.'), - extra_tags='alert-success') - if is_title_in_breadcrumbs('View Group'): - return HttpResponseRedirect(reverse('view_group', args=(group.group.id,))) + _("Product group updated successfully."), + extra_tags="alert-success") + if is_title_in_breadcrumbs("View Group"): + return HttpResponseRedirect(reverse("view_group", args=(group.group.id,))) else: - return HttpResponseRedirect(reverse('view_product', args=(group.product.id,))) + return HttpResponseRedirect(reverse("view_product", args=(group.product.id,))) product_tab = Product_Tab(group.product, title=_("Edit Product Group"), tab="settings") - return render(request, 'dojo/edit_product_group.html', { - 'groupid': groupid, - 'form': groupform, - 'product_tab': product_tab, + return render(request, "dojo/edit_product_group.html", { + "groupid": groupid, + "form": groupform, + "product_tab": product_tab, }) -@user_is_authorized(Product_Group, Permissions.Product_Group_Delete, 'groupid') +@user_is_authorized(Product_Group, Permissions.Product_Group_Delete, "groupid") def delete_product_group(request, groupid): group = get_object_or_404(Product_Group, pk=groupid) groupform = Delete_Product_GroupForm(instance=group) - if request.method == 'POST': + if request.method == "POST": groupform = Delete_Product_GroupForm(request.POST, instance=group) group = groupform.instance group.delete() messages.add_message(request, messages.SUCCESS, - _('Product group deleted successfully.'), - extra_tags='alert-success') - if is_title_in_breadcrumbs('View Group'): - return HttpResponseRedirect(reverse('view_group', args=(group.group.id,))) + _("Product group deleted successfully."), + extra_tags="alert-success") + if is_title_in_breadcrumbs("View Group"): + return HttpResponseRedirect(reverse("view_group", args=(group.group.id,))) else: # TODO: If user was in the group that was deleted and no longer has access, redirect back to product listing # page - return HttpResponseRedirect(reverse('view_product', args=(group.product.id,))) + return HttpResponseRedirect(reverse("view_product", args=(group.product.id,))) product_tab = Product_Tab(group.product, title=_("Delete Product Group"), tab="settings") - return render(request, 'dojo/delete_product_group.html', { - 'groupid': groupid, - 'form': groupform, - 'product_tab': product_tab, + return render(request, "dojo/delete_product_group.html", { + "groupid": groupid, + "form": groupform, + "product_tab": product_tab, }) -@user_is_authorized(Product, Permissions.Product_Group_Add, 'pid') +@user_is_authorized(Product, Permissions.Product_Group_Add, "pid") def add_product_group(request, pid): product = get_object_or_404(Product, pk=pid) - group_form = Add_Product_GroupForm(initial={'product': product.id}) + group_form = Add_Product_GroupForm(initial={"product": product.id}) - if request.method == 'POST': - group_form = Add_Product_GroupForm(request.POST, initial={'product': product.id}) + if request.method == "POST": + group_form = Add_Product_GroupForm(request.POST, initial={"product": product.id}) if group_form.is_valid(): - if group_form.cleaned_data['role'].is_owner and not user_has_permission(request.user, product, + if group_form.cleaned_data["role"].is_owner and not user_has_permission(request.user, product, Permissions.Product_Group_Add_Owner): messages.add_message(request, messages.WARNING, - _('You are not permitted to add groups as owners.'), - extra_tags='alert-warning') + _("You are not permitted to add groups as owners."), + extra_tags="alert-warning") else: - if 'groups' in group_form.cleaned_data and len(group_form.cleaned_data['groups']) > 0: - for group in group_form.cleaned_data['groups']: + if "groups" in group_form.cleaned_data and len(group_form.cleaned_data["groups"]) > 0: + for group in group_form.cleaned_data["groups"]: groups = Product_Group.objects.filter(product=product, group=group) if groups.count() == 0: product_group = Product_Group() product_group.product = product product_group.group = group - product_group.role = group_form.cleaned_data['role'] + product_group.role = group_form.cleaned_data["role"] product_group.save() messages.add_message(request, messages.SUCCESS, - _('Product groups added successfully.'), - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_product', args=(pid,))) + _("Product groups added successfully."), + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_product", args=(pid,))) product_tab = Product_Tab(product, title=_("Edit Product Group"), tab="settings") - return render(request, 'dojo/new_product_group.html', { - 'product': product, - 'form': group_form, - 'product_tab': product_tab, + return render(request, "dojo/new_product_group.html", { + "product": product, + "form": group_form, + "product_tab": product_tab, }) diff --git a/dojo/product_type/queries.py b/dojo/product_type/queries.py index 4b658798e2..737584a5b0 100644 --- a/dojo/product_type/queries.py +++ b/dojo/product_type/queries.py @@ -19,22 +19,22 @@ def get_authorized_product_types(permission): return Product_Type.objects.none() if user.is_superuser: - return Product_Type.objects.all().order_by('name') + return Product_Type.objects.all().order_by("name") if user_has_global_permission(user, permission): - return Product_Type.objects.all().order_by('name') + return Product_Type.objects.all().order_by("name") roles = get_roles_for_permission(permission) - authorized_roles = Product_Type_Member.objects.filter(product_type=OuterRef('pk'), + authorized_roles = Product_Type_Member.objects.filter(product_type=OuterRef("pk"), user=user, role__in=roles) authorized_groups = Product_Type_Group.objects.filter( - product_type=OuterRef('pk'), + product_type=OuterRef("pk"), group__users=user, role__in=roles) product_types = Product_Type.objects.annotate( member=Exists(authorized_roles), - authorized_group=Exists(authorized_groups)).order_by('name') + authorized_group=Exists(authorized_groups)).order_by("name") product_types = product_types.filter(Q(member=True) | Q(authorized_group=True)) return product_types @@ -44,7 +44,7 @@ def get_authorized_members_for_product_type(product_type, permission): user = get_current_user() if user.is_superuser or user_has_permission(user, product_type, permission): - return Product_Type_Member.objects.filter(product_type=product_type).order_by('user__first_name', 'user__last_name').select_related('role', 'product_type', 'user') + return Product_Type_Member.objects.filter(product_type=product_type).order_by("user__first_name", "user__last_name").select_related("role", "product_type", "user") else: return None @@ -54,7 +54,7 @@ def get_authorized_groups_for_product_type(product_type, permission): if user.is_superuser or user_has_permission(user, product_type, permission): authorized_groups = get_authorized_groups(Permissions.Group_View) - return Product_Type_Group.objects.filter(product_type=product_type, group__in=authorized_groups).order_by('group__name').select_related('role', 'group') + return Product_Type_Group.objects.filter(product_type=product_type, group__in=authorized_groups).order_by("group__name").select_related("role", "group") else: return None @@ -66,13 +66,13 @@ def get_authorized_product_type_members(permission): return Product_Type_Member.objects.none() if user.is_superuser: - return Product_Type_Member.objects.all().select_related('role') + return Product_Type_Member.objects.all().order_by("id").select_related("role") if user_has_global_permission(user, permission): - return Product_Type_Member.objects.all().select_related('role') + return Product_Type_Member.objects.all().order_by("id").select_related("role") product_types = get_authorized_product_types(permission) - return Product_Type_Member.objects.filter(product_type__in=product_types).select_related('role') + return Product_Type_Member.objects.filter(product_type__in=product_types).order_by("id").select_related("role") def get_authorized_product_type_members_for_user(user, permission): @@ -82,13 +82,13 @@ def get_authorized_product_type_members_for_user(user, permission): return Product_Type_Member.objects.none() if request_user.is_superuser: - return Product_Type_Member.objects.filter(user=user).select_related('role', 'product_type') + return Product_Type_Member.objects.filter(user=user).select_related("role", "product_type") - if hasattr(request_user, 'global_role') and request_user.global_role.role is not None and role_has_permission(request_user.global_role.role.id, permission): - return Product_Type_Member.objects.filter(user=user).select_related('role', 'product_type') + if hasattr(request_user, "global_role") and request_user.global_role.role is not None and role_has_permission(request_user.global_role.role.id, permission): + return Product_Type_Member.objects.filter(user=user).select_related("role", "product_type") product_types = get_authorized_product_types(permission) - return Product_Type_Member.objects.filter(user=user, product_type__in=product_types).select_related('role', 'product_type') + return Product_Type_Member.objects.filter(user=user, product_type__in=product_types).select_related("role", "product_type") def get_authorized_product_type_groups(permission): @@ -98,7 +98,7 @@ def get_authorized_product_type_groups(permission): return Product_Type_Group.objects.none() if user.is_superuser: - return Product_Type_Group.objects.all().select_related('role') + return Product_Type_Group.objects.all().order_by("id").select_related("role") product_types = get_authorized_product_types(permission) - return Product_Type_Group.objects.filter(product_type__in=product_types).select_related('role') + return Product_Type_Group.objects.filter(product_type__in=product_types).order_by("id").select_related("role") diff --git a/dojo/product_type/signals.py b/dojo/product_type/signals.py index 15f06b03e6..dde3ff502c 100644 --- a/dojo/product_type/signals.py +++ b/dojo/product_type/signals.py @@ -13,10 +13,10 @@ @receiver(post_save, sender=Product_Type) def product_type_post_save(sender, instance, created, **kwargs): if created: - create_notification(event='product_type_added', + create_notification(event="product_type_added", title=instance.name, product_type=instance, - url=reverse('view_product_type', args=(instance.id,))) + url=reverse("view_product_type", args=(instance.id,))) @receiver(post_delete, sender=Product_Type) @@ -24,16 +24,16 @@ def product_type_post_delete(sender, instance, **kwargs): if settings.ENABLE_AUDITLOG: le = LogEntry.objects.get( action=LogEntry.Action.DELETE, - content_type=ContentType.objects.get(app_label='dojo', model='product_type'), + content_type=ContentType.objects.get(app_label="dojo", model="product_type"), object_id=instance.id, ) description = _('The product type "%(name)s" was deleted by %(user)s') % { - 'name': instance.name, 'user': le.actor} + "name": instance.name, "user": le.actor} else: - description = _('The product type "%(name)s" was deleted') % {'name': instance.name} - create_notification(event='product_type_deleted', # template does not exists, it will default to "other" but this event name needs to stay because of unit testing - title=_('Deletion of %(name)s') % {'name': instance.name}, + description = _('The product type "%(name)s" was deleted') % {"name": instance.name} + create_notification(event="product_type_deleted", # template does not exists, it will default to "other" but this event name needs to stay because of unit testing + title=_("Deletion of %(name)s") % {"name": instance.name}, description=description, no_users=True, - url=reverse('product_type'), + url=reverse("product_type"), icon="exclamation-triangle") diff --git a/dojo/product_type/urls.py b/dojo/product_type/urls.py index 98c6b1cf81..dd64a5e4c0 100644 --- a/dojo/product_type/urls.py +++ b/dojo/product_type/urls.py @@ -5,28 +5,28 @@ urlpatterns = [ # product type - re_path(r'^product/type$', views.product_type, name='product_type'), - re_path(r'^product/type/(?P\d+)$', - views.view_product_type, name='view_product_type'), - re_path(r'^product/type/(?P\d+)/edit$', - views.edit_product_type, name='edit_product_type'), - re_path(r'^product/type/(?P\d+)/delete$', - views.delete_product_type, name='delete_product_type'), - re_path(r'^product/type/add$', views.add_product_type, - name='add_product_type'), - re_path(r'^product/type/(?P\d+)/add_product', + re_path(r"^product/type$", views.product_type, name="product_type"), + re_path(r"^product/type/(?P\d+)$", + views.view_product_type, name="view_product_type"), + re_path(r"^product/type/(?P\d+)/edit$", + views.edit_product_type, name="edit_product_type"), + re_path(r"^product/type/(?P\d+)/delete$", + views.delete_product_type, name="delete_product_type"), + re_path(r"^product/type/add$", views.add_product_type, + name="add_product_type"), + re_path(r"^product/type/(?P\d+)/add_product", product_views.new_product, - name='add_product_to_product_type'), - re_path(r'^product/type/(?P\d+)/add_member$', views.add_product_type_member, - name='add_product_type_member'), - re_path(r'^product/type/member/(?P\d+)/edit$', views.edit_product_type_member, - name='edit_product_type_member'), - re_path(r'^product/type/member/(?P\d+)/delete$', views.delete_product_type_member, - name='delete_product_type_member'), - re_path(r'^product/type/(?P\d+)/add_group$', views.add_product_type_group, - name='add_product_type_group'), - re_path(r'^product/type/group/(?P\d+)/edit$', views.edit_product_type_group, - name='edit_product_type_group'), - re_path(r'^product/type/group/(?P\d+)/delete$', views.delete_product_type_group, - name='delete_product_type_group'), + name="add_product_to_product_type"), + re_path(r"^product/type/(?P\d+)/add_member$", views.add_product_type_member, + name="add_product_type_member"), + re_path(r"^product/type/member/(?P\d+)/edit$", views.edit_product_type_member, + name="edit_product_type_member"), + re_path(r"^product/type/member/(?P\d+)/delete$", views.delete_product_type_member, + name="delete_product_type_member"), + re_path(r"^product/type/(?P\d+)/add_group$", views.add_product_type_group, + name="add_product_type_group"), + re_path(r"^product/type/group/(?P\d+)/edit$", views.edit_product_type_group, + name="edit_product_type_group"), + re_path(r"^product/type/group/(?P\d+)/delete$", views.delete_product_type_group, + name="delete_product_type_group"), ] diff --git a/dojo/product_type/views.py b/dojo/product_type/views.py index 08c91823c0..302aa6dbbf 100644 --- a/dojo/product_type/views.py +++ b/dojo/product_type/views.py @@ -51,7 +51,7 @@ def product_type(request): prod_types = get_authorized_product_types(Permissions.Product_Type_View) - name_words = prod_types.values_list('name', flat=True) + name_words = prod_types.values_list("name", flat=True) ptl = ProductTypeFilter(request.GET, queryset=prod_types) pts = get_page_items(request, ptl.qs, 25) @@ -61,11 +61,11 @@ def product_type(request): page_name = _("Product Type List") add_breadcrumb(title=page_name, top_level=True, request=request) - return render(request, 'dojo/product_type.html', { - 'name': page_name, - 'pts': pts, - 'ptl': ptl, - 'name_words': name_words}) + return render(request, "dojo/product_type.html", { + "name": page_name, + "pts": pts, + "ptl": ptl, + "name_words": name_words}) def prefetch_for_product_type(prod_types): @@ -76,12 +76,12 @@ def prefetch_for_product_type(prod_types): active_verified_findings_query = Q(prod_type__engagement__test__finding__active=True, prod_type__engagement__test__finding__verified=True) prefetch_prod_types = prefetch_prod_types.annotate( - active_findings_count=Count('prod_type__engagement__test__finding__id', filter=active_findings_query)) + active_findings_count=Count("prod_type__engagement__test__finding__id", filter=active_findings_query)) prefetch_prod_types = prefetch_prod_types.annotate( - active_verified_findings_count=Count('prod_type__engagement__test__finding__id', filter=active_verified_findings_query)) - prefetch_prod_types = prefetch_prod_types.annotate(prod_count=Count('prod_type', distinct=True)) + active_verified_findings_count=Count("prod_type__engagement__test__finding__id", filter=active_verified_findings_query)) + prefetch_prod_types = prefetch_prod_types.annotate(prod_count=Count("prod_type", distinct=True)) else: - logger.debug('unable to prefetch because query was already executed') + logger.debug("unable to prefetch because query was already executed") return prefetch_prod_types @@ -90,7 +90,7 @@ def prefetch_for_product_type(prod_types): def add_product_type(request): page_name = _("Add Product Type") form = Product_TypeForm() - if request.method == 'POST': + if request.method == "POST": form = Product_TypeForm(request.POST) if form.is_valid(): product_type = form.save() @@ -101,18 +101,18 @@ def add_product_type(request): member.save() messages.add_message(request, messages.SUCCESS, - _('Product type added successfully.'), - extra_tags='alert-success') - return HttpResponseRedirect(reverse('product_type')) + _("Product type added successfully."), + extra_tags="alert-success") + return HttpResponseRedirect(reverse("product_type")) add_breadcrumb(title=page_name, top_level=False, request=request) - return render(request, 'dojo/new_product_type.html', { - 'name': page_name, - 'form': form, + return render(request, "dojo/new_product_type.html", { + "name": page_name, + "form": form, }) -@user_is_authorized(Product_Type, Permissions.Product_Type_View, 'ptid') +@user_is_authorized(Product_Type, Permissions.Product_Type_View, "ptid") def view_product_type(request, ptid): page_name = _("View Product Type") pt = get_object_or_404(Product_Type, pk=ptid) @@ -121,158 +121,158 @@ def view_product_type(request, ptid): products = get_authorized_products(Permissions.Product_View).filter(prod_type=pt) products = get_page_items(request, products, 25) add_breadcrumb(title=page_name, top_level=False, request=request) - return render(request, 'dojo/view_product_type.html', { - 'name': page_name, - 'pt': pt, - 'products': products, - 'groups': groups, - 'members': members}) + return render(request, "dojo/view_product_type.html", { + "name": page_name, + "pt": pt, + "products": products, + "groups": groups, + "members": members}) -@user_is_authorized(Product_Type, Permissions.Product_Type_Delete, 'ptid') +@user_is_authorized(Product_Type, Permissions.Product_Type_Delete, "ptid") def delete_product_type(request, ptid): product_type = get_object_or_404(Product_Type, pk=ptid) form = Delete_Product_TypeForm(instance=product_type) - if request.method == 'POST': - if 'id' in request.POST and str(product_type.id) == request.POST['id']: + if request.method == "POST": + if "id" in request.POST and str(product_type.id) == request.POST["id"]: form = Delete_Product_TypeForm(request.POST, instance=product_type) if form.is_valid(): if get_setting("ASYNC_OBJECT_DELETE"): async_del = async_delete() async_del.delete(product_type) - message = 'Product Type and relationships will be removed in the background.' + message = "Product Type and relationships will be removed in the background." else: - message = 'Product Type and relationships removed.' + message = "Product Type and relationships removed." product_type.delete() messages.add_message(request, messages.SUCCESS, message, - extra_tags='alert-success') - return HttpResponseRedirect(reverse('product_type')) + extra_tags="alert-success") + return HttpResponseRedirect(reverse("product_type")) - rels = [_('Previewing the relationships has been disabled.'), ''] - display_preview = get_setting('DELETE_PREVIEW') + rels = [_("Previewing the relationships has been disabled."), ""] + display_preview = get_setting("DELETE_PREVIEW") if display_preview: collector = NestedObjects(using=DEFAULT_DB_ALIAS) collector.collect([product_type]) rels = collector.nested() add_breadcrumb(title=_("Delete Product Type"), top_level=False, request=request) - return render(request, 'dojo/delete_product_type.html', - {'product_type': product_type, - 'form': form, - 'rels': rels, + return render(request, "dojo/delete_product_type.html", + {"product_type": product_type, + "form": form, + "rels": rels, }) -@user_is_authorized(Product_Type, Permissions.Product_Type_Edit, 'ptid') +@user_is_authorized(Product_Type, Permissions.Product_Type_Edit, "ptid") def edit_product_type(request, ptid): page_name = "Edit Product Type" pt = get_object_or_404(Product_Type, pk=ptid) members = get_authorized_members_for_product_type(pt, Permissions.Product_Type_Manage_Members) pt_form = Product_TypeForm(instance=pt) - if request.method == "POST" and request.POST.get('edit_product_type'): + if request.method == "POST" and request.POST.get("edit_product_type"): pt_form = Product_TypeForm(request.POST, instance=pt) if pt_form.is_valid(): pt = pt_form.save() messages.add_message( request, messages.SUCCESS, - _('Product type updated successfully.'), + _("Product type updated successfully."), extra_tags="alert-success", ) return HttpResponseRedirect(reverse("product_type")) add_breadcrumb(title=page_name, top_level=False, request=request) - return render(request, 'dojo/edit_product_type.html', { - 'name': page_name, - 'pt_form': pt_form, - 'pt': pt, - 'members': members}) + return render(request, "dojo/edit_product_type.html", { + "name": page_name, + "pt_form": pt_form, + "pt": pt, + "members": members}) -@user_is_authorized(Product_Type, Permissions.Product_Type_Manage_Members, 'ptid') +@user_is_authorized(Product_Type, Permissions.Product_Type_Manage_Members, "ptid") def add_product_type_member(request, ptid): pt = get_object_or_404(Product_Type, pk=ptid) - memberform = Add_Product_Type_MemberForm(initial={'product_type': pt.id}) - if request.method == 'POST': - memberform = Add_Product_Type_MemberForm(request.POST, initial={'product_type': pt.id}) + memberform = Add_Product_Type_MemberForm(initial={"product_type": pt.id}) + if request.method == "POST": + memberform = Add_Product_Type_MemberForm(request.POST, initial={"product_type": pt.id}) if memberform.is_valid(): - if memberform.cleaned_data['role'].is_owner and not user_has_permission(request.user, pt, Permissions.Product_Type_Member_Add_Owner): + if memberform.cleaned_data["role"].is_owner and not user_has_permission(request.user, pt, Permissions.Product_Type_Member_Add_Owner): messages.add_message(request, messages.WARNING, - _('You are not permitted to add users as owners.'), - extra_tags='alert-warning') + _("You are not permitted to add users as owners."), + extra_tags="alert-warning") else: - if 'users' in memberform.cleaned_data and len(memberform.cleaned_data['users']) > 0: - for user in memberform.cleaned_data['users']: + if "users" in memberform.cleaned_data and len(memberform.cleaned_data["users"]) > 0: + for user in memberform.cleaned_data["users"]: members = Product_Type_Member.objects.filter(product_type=pt, user=user) if members.count() == 0: product_type_member = Product_Type_Member() product_type_member.product_type = pt product_type_member.user = user - product_type_member.role = memberform.cleaned_data['role'] + product_type_member.role = memberform.cleaned_data["role"] product_type_member.save() messages.add_message(request, messages.SUCCESS, - _('Product type members added successfully.'), - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_product_type', args=(ptid, ))) + _("Product type members added successfully."), + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_product_type", args=(ptid, ))) add_breadcrumb(title=_("Add Product Type Member"), top_level=False, request=request) - return render(request, 'dojo/new_product_type_member.html', { - 'pt': pt, - 'form': memberform, + return render(request, "dojo/new_product_type_member.html", { + "pt": pt, + "form": memberform, }) -@user_is_authorized(Product_Type_Member, Permissions.Product_Type_Manage_Members, 'memberid') +@user_is_authorized(Product_Type_Member, Permissions.Product_Type_Manage_Members, "memberid") def edit_product_type_member(request, memberid): page_name = _("Edit Product Type Member") member = get_object_or_404(Product_Type_Member, pk=memberid) memberform = Edit_Product_Type_MemberForm(instance=member) - if request.method == 'POST': + if request.method == "POST": memberform = Edit_Product_Type_MemberForm(request.POST, instance=member) if memberform.is_valid(): if not member.role.is_owner: owners = Product_Type_Member.objects.filter(product_type=member.product_type, role__is_owner=True).exclude(id=member.id).count() if owners < 1: messages.add_message(request, messages.SUCCESS, - _('There must be at least one owner for Product Type %(product_type_name)s.') % {'product_type_name': member.product_type.name}, - extra_tags='alert-warning') - if is_title_in_breadcrumbs('View User'): - return HttpResponseRedirect(reverse('view_user', args=(member.user.id, ))) + _("There must be at least one owner for Product Type %(product_type_name)s.") % {"product_type_name": member.product_type.name}, + extra_tags="alert-warning") + if is_title_in_breadcrumbs("View User"): + return HttpResponseRedirect(reverse("view_user", args=(member.user.id, ))) else: - return HttpResponseRedirect(reverse('view_product_type', args=(member.product_type.id, ))) + return HttpResponseRedirect(reverse("view_product_type", args=(member.product_type.id, ))) if member.role.is_owner and not user_has_permission(request.user, member.product_type, Permissions.Product_Type_Member_Add_Owner): messages.add_message(request, messages.WARNING, - 'You are not permitted to make users to owners.', - extra_tags='alert-warning') + "You are not permitted to make users to owners.", + extra_tags="alert-warning") else: memberform.save() messages.add_message(request, messages.SUCCESS, - _('Product type member updated successfully.'), - extra_tags='alert-success') - if is_title_in_breadcrumbs('View User'): - return HttpResponseRedirect(reverse('view_user', args=(member.user.id, ))) + _("Product type member updated successfully."), + extra_tags="alert-success") + if is_title_in_breadcrumbs("View User"): + return HttpResponseRedirect(reverse("view_user", args=(member.user.id, ))) else: - return HttpResponseRedirect(reverse('view_product_type', args=(member.product_type.id, ))) + return HttpResponseRedirect(reverse("view_product_type", args=(member.product_type.id, ))) add_breadcrumb(title=page_name, top_level=False, request=request) - return render(request, 'dojo/edit_product_type_member.html', { - 'name': page_name, - 'memberid': memberid, - 'form': memberform, + return render(request, "dojo/edit_product_type_member.html", { + "name": page_name, + "memberid": memberid, + "form": memberform, }) -@user_is_authorized(Product_Type_Member, Permissions.Product_Type_Member_Delete, 'memberid') +@user_is_authorized(Product_Type_Member, Permissions.Product_Type_Member_Delete, "memberid") def delete_product_type_member(request, memberid): page_name = "Delete Product Type Member" member = get_object_or_404(Product_Type_Member, pk=memberid) memberform = Delete_Product_Type_MemberForm(instance=member) - if request.method == 'POST': + if request.method == "POST": memberform = Delete_Product_Type_MemberForm(request.POST, instance=member) member = memberform.instance if member.role.is_owner: @@ -280,126 +280,126 @@ def delete_product_type_member(request, memberid): if owners <= 1: messages.add_message(request, messages.SUCCESS, - _('There must be at least one owner.'), - extra_tags='alert-warning') - return HttpResponseRedirect(reverse('view_product_type', args=(member.product_type.id, ))) + _("There must be at least one owner."), + extra_tags="alert-warning") + return HttpResponseRedirect(reverse("view_product_type", args=(member.product_type.id, ))) user = member.user member.delete() messages.add_message(request, messages.SUCCESS, - _('Product type member deleted successfully.'), - extra_tags='alert-success') - if is_title_in_breadcrumbs('View User'): - return HttpResponseRedirect(reverse('view_user', args=(member.user.id, ))) + _("Product type member deleted successfully."), + extra_tags="alert-success") + if is_title_in_breadcrumbs("View User"): + return HttpResponseRedirect(reverse("view_user", args=(member.user.id, ))) else: if user == request.user: - return HttpResponseRedirect(reverse('product_type')) + return HttpResponseRedirect(reverse("product_type")) else: - return HttpResponseRedirect(reverse('view_product_type', args=(member.product_type.id, ))) + return HttpResponseRedirect(reverse("view_product_type", args=(member.product_type.id, ))) add_breadcrumb(title=page_name, top_level=False, request=request) - return render(request, 'dojo/delete_product_type_member.html', { - 'name': page_name, - 'memberid': memberid, - 'form': memberform, + return render(request, "dojo/delete_product_type_member.html", { + "name": page_name, + "memberid": memberid, + "form": memberform, }) -@user_is_authorized(Product_Type, Permissions.Product_Type_Group_Add, 'ptid') +@user_is_authorized(Product_Type, Permissions.Product_Type_Group_Add, "ptid") def add_product_type_group(request, ptid): page_name = "Add Product Type Group" pt = get_object_or_404(Product_Type, pk=ptid) - group_form = Add_Product_Type_GroupForm(initial={'product_type': pt.id}) + group_form = Add_Product_Type_GroupForm(initial={"product_type": pt.id}) - if request.method == 'POST': - group_form = Add_Product_Type_GroupForm(request.POST, initial={'product_type': pt.id}) + if request.method == "POST": + group_form = Add_Product_Type_GroupForm(request.POST, initial={"product_type": pt.id}) if group_form.is_valid(): - if group_form.cleaned_data['role'].is_owner and not user_has_permission(request.user, pt, Permissions.Product_Type_Group_Add_Owner): + if group_form.cleaned_data["role"].is_owner and not user_has_permission(request.user, pt, Permissions.Product_Type_Group_Add_Owner): messages.add_message(request, messages.WARNING, - _('You are not permitted to add groups as owners.'), - extra_tags='alert-warning') + _("You are not permitted to add groups as owners."), + extra_tags="alert-warning") else: - if 'groups' in group_form.cleaned_data and len(group_form.cleaned_data['groups']) > 0: - for group in group_form.cleaned_data['groups']: + if "groups" in group_form.cleaned_data and len(group_form.cleaned_data["groups"]) > 0: + for group in group_form.cleaned_data["groups"]: groups = Product_Type_Group.objects.filter(product_type=pt, group=group) if groups.count() == 0: product_type_group = Product_Type_Group() product_type_group.product_type = pt product_type_group.group = group - product_type_group.role = group_form.cleaned_data['role'] + product_type_group.role = group_form.cleaned_data["role"] product_type_group.save() messages.add_message(request, messages.SUCCESS, - _('Product type groups added successfully.'), - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_product_type', args=(ptid,))) + _("Product type groups added successfully."), + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_product_type", args=(ptid,))) add_breadcrumb(title=page_name, top_level=False, request=request) - return render(request, 'dojo/new_product_type_group.html', { - 'name': page_name, - 'pt': pt, - 'form': group_form, + return render(request, "dojo/new_product_type_group.html", { + "name": page_name, + "pt": pt, + "form": group_form, }) -@user_is_authorized(Product_Type_Group, Permissions.Product_Type_Group_Edit, 'groupid') +@user_is_authorized(Product_Type_Group, Permissions.Product_Type_Group_Edit, "groupid") def edit_product_type_group(request, groupid): page_name = "Edit Product Type Group" group = get_object_or_404(Product_Type_Group, pk=groupid) groupform = Edit_Product_Type_Group_Form(instance=group) - if request.method == 'POST': + if request.method == "POST": groupform = Edit_Product_Type_Group_Form(request.POST, instance=group) if groupform.is_valid(): if group.role.is_owner and not user_has_permission(request.user, group.product_type, Permissions.Product_Type_Group_Add_Owner): messages.add_message(request, messages.WARNING, - _('You are not permitted to make groups owners.'), - extra_tags='alert-warning') + _("You are not permitted to make groups owners."), + extra_tags="alert-warning") else: groupform.save() messages.add_message(request, messages.SUCCESS, - _('Product type group updated successfully.'), - extra_tags='alert-success') - if is_title_in_breadcrumbs('View Group'): - return HttpResponseRedirect(reverse('view_group', args=(group.group.id,))) + _("Product type group updated successfully."), + extra_tags="alert-success") + if is_title_in_breadcrumbs("View Group"): + return HttpResponseRedirect(reverse("view_group", args=(group.group.id,))) else: - return HttpResponseRedirect(reverse('view_product_type', args=(group.product_type.id,))) + return HttpResponseRedirect(reverse("view_product_type", args=(group.product_type.id,))) add_breadcrumb(title=page_name, top_level=False, request=request) - return render(request, 'dojo/edit_product_type_group.html', { - 'name': page_name, - 'groupid': groupid, - 'form': groupform, + return render(request, "dojo/edit_product_type_group.html", { + "name": page_name, + "groupid": groupid, + "form": groupform, }) -@user_is_authorized(Product_Type_Group, Permissions.Product_Type_Group_Delete, 'groupid') +@user_is_authorized(Product_Type_Group, Permissions.Product_Type_Group_Delete, "groupid") def delete_product_type_group(request, groupid): page_name = "Delete Product Type Group" group = get_object_or_404(Product_Type_Group, pk=groupid) groupform = Delete_Product_Type_GroupForm(instance=group) - if request.method == 'POST': + if request.method == "POST": groupform = Delete_Product_Type_GroupForm(request.POST, instance=group) group = groupform.instance group.delete() messages.add_message(request, messages.SUCCESS, - _('Product type group deleted successfully.'), - extra_tags='alert-success') - if is_title_in_breadcrumbs('View Group'): - return HttpResponseRedirect(reverse('view_group', args=(group.group.id, ))) + _("Product type group deleted successfully."), + extra_tags="alert-success") + if is_title_in_breadcrumbs("View Group"): + return HttpResponseRedirect(reverse("view_group", args=(group.group.id, ))) else: # TODO: If user was in the group that was deleted and no longer has access, redirect them to the product # types page - return HttpResponseRedirect(reverse('view_product_type', args=(group.product_type.id, ))) + return HttpResponseRedirect(reverse("view_product_type", args=(group.product_type.id, ))) add_breadcrumb(page_name, top_level=False, request=request) - return render(request, 'dojo/delete_product_type_group.html', { - 'name': page_name, - 'groupid': groupid, - 'form': groupform, + return render(request, "dojo/delete_product_type_group.html", { + "name": page_name, + "groupid": groupid, + "form": groupform, }) diff --git a/dojo/regulations/urls.py b/dojo/regulations/urls.py index e977103192..324669f675 100644 --- a/dojo/regulations/urls.py +++ b/dojo/regulations/urls.py @@ -3,7 +3,7 @@ from . import views urlpatterns = [ - re_path(r'^regulations/add', views.new_regulation, name='new_regulation'), - re_path(r'^regulations/(?P\d+)/edit$', views.edit_regulations, - name='edit_regulations'), - re_path(r'^regulations$', views.regulations, name='regulations')] + re_path(r"^regulations/add", views.new_regulation, name="new_regulation"), + re_path(r"^regulations/(?P\d+)/edit$", views.edit_regulations, + name="edit_regulations"), + re_path(r"^regulations$", views.regulations, name="regulations")] diff --git a/dojo/regulations/views.py b/dojo/regulations/views.py index 16fb582e0d..f4d5004d07 100644 --- a/dojo/regulations/views.py +++ b/dojo/regulations/views.py @@ -16,60 +16,60 @@ logger = logging.getLogger(__name__) -@user_is_configuration_authorized('dojo.add_regulation') +@user_is_configuration_authorized("dojo.add_regulation") def new_regulation(request): - if request.method == 'POST': + if request.method == "POST": tform = RegulationForm(request.POST, instance=Regulation()) if tform.is_valid(): tform.save() messages.add_message(request, messages.SUCCESS, - 'Regulation Successfully Created.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('regulations')) + "Regulation Successfully Created.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("regulations")) else: tform = RegulationForm() add_breadcrumb(title="New regulation", top_level=False, request=request) - return render(request, 'dojo/new_regulation.html', - {'form': tform}) + return render(request, "dojo/new_regulation.html", + {"form": tform}) -@user_is_configuration_authorized('dojo.change_regulation') +@user_is_configuration_authorized("dojo.change_regulation") def edit_regulations(request, ttid): regulation = Regulation.objects.get(pk=ttid) - if request.method == 'POST' and request.POST.get('delete'): - user_has_configuration_permission_or_403(request.user, 'dojo.delete_regulation') + if request.method == "POST" and request.POST.get("delete"): + user_has_configuration_permission_or_403(request.user, "dojo.delete_regulation") Regulation.objects.filter(pk=ttid).delete() messages.add_message(request, messages.SUCCESS, - 'Regulation Deleted.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('regulations')) - elif request.method == 'POST': + "Regulation Deleted.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("regulations")) + elif request.method == "POST": tform = RegulationForm(request.POST, instance=regulation) if tform.is_valid(): tform.save() messages.add_message(request, messages.SUCCESS, - 'Regulation Successfully Updated.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('regulations')) + "Regulation Successfully Updated.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("regulations")) else: tform = RegulationForm(instance=regulation) add_breadcrumb(title="Edit Regulation", top_level=False, request=request) return render(request, - 'dojo/edit_regulation.html', + "dojo/edit_regulation.html", { - 'tform': tform, + "tform": tform, }) @login_required def regulations(request): - confs = Regulation.objects.all().order_by('name') + confs = Regulation.objects.all().order_by("name") add_breadcrumb(title="Regulations", top_level=not len(request.GET), request=request) return render(request, - 'dojo/regulations.html', - {'confs': confs, + "dojo/regulations.html", + {"confs": confs, }) diff --git a/dojo/remote_user.py b/dojo/remote_user.py index ed48ce3190..44355d9f45 100644 --- a/dojo/remote_user.py +++ b/dojo/remote_user.py @@ -16,15 +16,15 @@ class RemoteUserAuthentication(OriginalRemoteUserAuthentication): def authenticate(self, request): # process only if request is comming from the trusted proxy node - if IPAddress(request.META['REMOTE_ADDR']) in settings.AUTH_REMOTEUSER_TRUSTED_PROXY: + if IPAddress(request.META["REMOTE_ADDR"]) in settings.AUTH_REMOTEUSER_TRUSTED_PROXY: self.header = settings.AUTH_REMOTEUSER_USERNAME_HEADER if self.header in request.META: return super().authenticate(request) else: return None else: - logger.debug('Requested came from untrusted proxy %s; This is list of trusted proxies: %s', - IPAddress(request.META['REMOTE_ADDR']), + logger.debug("Requested came from untrusted proxy %s; This is list of trusted proxies: %s", + IPAddress(request.META["REMOTE_ADDR"]), settings.AUTH_REMOTEUSER_TRUSTED_PROXY) return None @@ -35,15 +35,15 @@ def process_request(self, request): return # process only if request is comming from the trusted proxy node - if IPAddress(request.META['REMOTE_ADDR']) in settings.AUTH_REMOTEUSER_TRUSTED_PROXY: + if IPAddress(request.META["REMOTE_ADDR"]) in settings.AUTH_REMOTEUSER_TRUSTED_PROXY: self.header = settings.AUTH_REMOTEUSER_USERNAME_HEADER if self.header in request.META: return super().process_request(request) else: return else: - logger.debug('Requested came from untrusted proxy %s; This is list of trusted proxies: %s', - IPAddress(request.META['REMOTE_ADDR']), + logger.debug("Requested came from untrusted proxy %s; This is list of trusted proxies: %s", + IPAddress(request.META["REMOTE_ADDR"]), settings.AUTH_REMOTEUSER_TRUSTED_PROXY) return @@ -80,12 +80,12 @@ def configure_user(self, request, user, created=True): if settings.AUTH_REMOTEUSER_GROUPS_HEADER and \ settings.AUTH_REMOTEUSER_GROUPS_HEADER in request.META: - assign_user_to_groups(user, request.META[settings.AUTH_REMOTEUSER_GROUPS_HEADER].split(','), Dojo_Group.REMOTE) + assign_user_to_groups(user, request.META[settings.AUTH_REMOTEUSER_GROUPS_HEADER].split(","), Dojo_Group.REMOTE) if settings.AUTH_REMOTEUSER_GROUPS_CLEANUP and \ settings.AUTH_REMOTEUSER_GROUPS_HEADER and \ settings.AUTH_REMOTEUSER_GROUPS_HEADER in request.META: - cleanup_old_groups_for_user(user, request.META[settings.AUTH_REMOTEUSER_GROUPS_HEADER].split(',')) + cleanup_old_groups_for_user(user, request.META[settings.AUTH_REMOTEUSER_GROUPS_HEADER].split(",")) if changed: user.save() @@ -94,8 +94,8 @@ def configure_user(self, request, user, created=True): class RemoteUserScheme(OpenApiAuthenticationExtension): - target_class = 'dojo.remote_user.RemoteUserAuthentication' - name = 'remoteUserAuth' + target_class = "dojo.remote_user.RemoteUserAuthentication" + name = "remoteUserAuth" match_subclasses = True priority = 1 @@ -104,12 +104,12 @@ def get_security_definition(self, auto_schema): return {} header_name = settings.AUTH_REMOTEUSER_USERNAME_HEADER - if header_name.startswith('HTTP_'): + if header_name.startswith("HTTP_"): header_name = header_name[5:] - header_name = header_name.replace('_', '-').capitalize() + header_name = header_name.replace("_", "-").capitalize() return { - 'type': 'apiKey', - 'in': 'header', - 'name': header_name, + "type": "apiKey", + "in": "header", + "name": header_name, } diff --git a/dojo/reports/urls.py b/dojo/reports/urls.py index 1d02fbbf55..e6f8cd166c 100644 --- a/dojo/reports/urls.py +++ b/dojo/reports/urls.py @@ -4,36 +4,36 @@ urlpatterns = [ # reports - re_path(r'^product/type/(?P\d+)/report$', - views.product_type_report, name='product_type_report'), - re_path(r'^product/(?P\d+)/report$', - views.product_report, name='product_report'), - re_path(r'^product/(?P\d+)/endpoint/report$', - views.product_endpoint_report, name='product_endpoint_report'), - re_path(r'^engagement/(?P\d+)/report$', views.engagement_report, - name='engagement_report'), - re_path(r'^test/(?P\d+)/report$', views.test_report, - name='test_report'), - re_path(r'^endpoint/(?P\d+)/report$', views.endpoint_report, - name='endpoint_report'), - re_path(r'^endpoint/host/(?P\d+)/report$', views.endpoint_host_report, - name='endpoint_host_report'), - re_path(r'^product/report$', - views.product_findings_report, name='product_findings_report'), - re_path(r'^reports/cover$', - views.report_cover_page, name='report_cover_page'), - re_path(r'^reports/builder$', - views.ReportBuilder.as_view(), name='report_builder'), - re_path(r'^reports/findings$', - views.report_findings, name='report_findings'), - re_path(r'^reports/endpoints$', - views.report_endpoints, name='report_endpoints'), - re_path(r'^reports/custom$', - views.CustomReport.as_view(), name='custom_report'), - re_path(r'^reports/quick$', - views.QuickReportView.as_view(), name='quick_report'), - re_path(r'^reports/csv_export$', - views.CSVExportView.as_view(), name='csv_export'), - re_path(r'^reports/excel_export$', - views.ExcelExportView.as_view(), name='excel_export'), + re_path(r"^product/type/(?P\d+)/report$", + views.product_type_report, name="product_type_report"), + re_path(r"^product/(?P\d+)/report$", + views.product_report, name="product_report"), + re_path(r"^product/(?P\d+)/endpoint/report$", + views.product_endpoint_report, name="product_endpoint_report"), + re_path(r"^engagement/(?P\d+)/report$", views.engagement_report, + name="engagement_report"), + re_path(r"^test/(?P\d+)/report$", views.test_report, + name="test_report"), + re_path(r"^endpoint/(?P\d+)/report$", views.endpoint_report, + name="endpoint_report"), + re_path(r"^endpoint/host/(?P\d+)/report$", views.endpoint_host_report, + name="endpoint_host_report"), + re_path(r"^product/report$", + views.product_findings_report, name="product_findings_report"), + re_path(r"^reports/cover$", + views.report_cover_page, name="report_cover_page"), + re_path(r"^reports/builder$", + views.ReportBuilder.as_view(), name="report_builder"), + re_path(r"^reports/findings$", + views.report_findings, name="report_findings"), + re_path(r"^reports/endpoints$", + views.report_endpoints, name="report_endpoints"), + re_path(r"^reports/custom$", + views.CustomReport.as_view(), name="custom_report"), + re_path(r"^reports/quick$", + views.QuickReportView.as_view(), name="quick_report"), + re_path(r"^reports/csv_export$", + views.CSVExportView.as_view(), name="csv_export"), + re_path(r"^reports/excel_export$", + views.ExcelExportView.as_view(), name="excel_export"), ] diff --git a/dojo/reports/views.py b/dojo/reports/views.py index f67b2f40c5..4a134b0e58 100644 --- a/dojo/reports/views.py +++ b/dojo/reports/views.py @@ -56,20 +56,20 @@ def down(request): - return render(request, 'disabled.html') + return render(request, "disabled.html") def report_url_resolver(request): try: - url_resolver = request.META['HTTP_X_FORWARDED_PROTO'] + "://" + request.META['HTTP_X_FORWARDED_FOR'] + url_resolver = request.META["HTTP_X_FORWARDED_PROTO"] + "://" + request.META["HTTP_X_FORWARDED_FOR"] except: - hostname = request.META['HTTP_HOST'] + hostname = request.META["HTTP_HOST"] port_index = hostname.find(":") if port_index != -1: url_resolver = request.scheme + "://" + hostname[:port_index] else: url_resolver = request.scheme + "://" + hostname - return url_resolver + ":" + request.META['SERVER_PORT'] + return url_resolver + ":" + request.META["SERVER_PORT"] class ReportBuilder(View): @@ -107,7 +107,7 @@ def get_in_use_widgets(self, request): return [ReportOptions(request=request)] def get_template(self): - return 'dojo/report_builder.html' + return "dojo/report_builder.html" def get_context(self, request: HttpRequest) -> dict: return { @@ -132,19 +132,19 @@ def _set_state(self, request: HttpRequest): self.widgets = list(self.selected_widgets.values()) def get_selected_widgets(self, request): - selected_widgets = report_widget_factory(json_data=request.POST['json'], request=request, host=self.host, + selected_widgets = report_widget_factory(json_data=request.POST["json"], request=request, host=self.host, user=self.request.user, finding_notes=False, finding_images=False) - if options := selected_widgets.get('report-options', None): + if options := selected_widgets.get("report-options", None): self.report_format = options.report_type - self.finding_notes = (options.include_finding_notes == '1') - self.finding_images = (options.include_finding_images == '1') + self.finding_notes = (options.include_finding_notes == "1") + self.finding_images = (options.include_finding_images == "1") else: - self.report_format = 'AsciiDoc' + self.report_format = "AsciiDoc" self.finding_notes = True self.finding_images = True - return report_widget_factory(json_data=request.POST['json'], request=request, host=self.host, + return report_widget_factory(json_data=request.POST["json"], request=request, host=self.host, user=request.user, finding_notes=self.finding_notes, finding_images=self.finding_images) @@ -152,10 +152,10 @@ def get_form(self, request): return CustomReportJsonForm(request.POST) def get_template(self): - if self.report_format == 'AsciiDoc': - return 'dojo/custom_asciidoc_report.html' - elif self.report_format == 'HTML': - return 'dojo/custom_html_report.html' + if self.report_format == "AsciiDoc": + return "dojo/custom_asciidoc_report.html" + elif self.report_format == "HTML": + return "dojo/custom_html_report.html" else: raise PermissionDenied @@ -174,13 +174,13 @@ def report_findings(request): filter_class = ReportFindingFilterWithoutObjectLookups if filter_string_matching else ReportFindingFilter findings = filter_class(request.GET, queryset=findings) - title_words = get_words_for_field(Finding, 'title') - component_words = get_words_for_field(Finding, 'component_name') + title_words = get_words_for_field(Finding, "title") + component_words = get_words_for_field(Finding, "component_name") - paged_findings = get_page_items(request, findings.qs.distinct().order_by('numerical_severity'), 25) + paged_findings = get_page_items(request, findings.qs.distinct().order_by("numerical_severity"), 25) return render(request, - 'dojo/report_findings.html', + "dojo/report_findings.html", {"findings": paged_findings, "filtered": findings, "title_words": title_words, @@ -202,7 +202,7 @@ def report_endpoints(request): paged_endpoints = get_page_items(request, endpoints.qs, 25) return render(request, - 'dojo/report_endpoints.html', + "dojo/report_endpoints.html", {"endpoints": paged_endpoints, "filtered": endpoints, "title": "endpoint-list", @@ -210,24 +210,24 @@ def report_endpoints(request): def report_cover_page(request): - report_title = request.GET.get('title', 'Report') - report_subtitle = request.GET.get('subtitle', '') - report_info = request.GET.get('info', '') + report_title = request.GET.get("title", "Report") + report_subtitle = request.GET.get("subtitle", "") + report_info = request.GET.get("info", "") return render(request, - 'dojo/report_cover_page.html', - {'report_title': report_title, - 'report_subtitle': report_subtitle, - 'report_info': report_info}) + "dojo/report_cover_page.html", + {"report_title": report_title, + "report_subtitle": report_subtitle, + "report_info": report_info}) -@user_is_authorized(Product_Type, Permissions.Product_Type_View, 'ptid') +@user_is_authorized(Product_Type, Permissions.Product_Type_View, "ptid") def product_type_report(request, ptid): product_type = get_object_or_404(Product_Type, id=ptid) return generate_report(request, product_type) -@user_is_authorized(Product, Permissions.Product_View, 'pid') +@user_is_authorized(Product, Permissions.Product_View, "pid") def product_report(request, pid): product = get_object_or_404(Product, id=pid) return generate_report(request, product) @@ -238,61 +238,61 @@ def product_findings_report(request): return generate_report(request, findings) -@user_is_authorized(Engagement, Permissions.Engagement_View, 'eid') +@user_is_authorized(Engagement, Permissions.Engagement_View, "eid") def engagement_report(request, eid): engagement = get_object_or_404(Engagement, id=eid) return generate_report(request, engagement) -@user_is_authorized(Test, Permissions.Test_View, 'tid') +@user_is_authorized(Test, Permissions.Test_View, "tid") def test_report(request, tid): test = get_object_or_404(Test, id=tid) return generate_report(request, test) -@user_is_authorized(Endpoint, Permissions.Endpoint_View, 'eid') +@user_is_authorized(Endpoint, Permissions.Endpoint_View, "eid") def endpoint_report(request, eid): endpoint = get_object_or_404(Endpoint, id=eid) return generate_report(request, endpoint, False) -@user_is_authorized(Endpoint, Permissions.Endpoint_View, 'eid') +@user_is_authorized(Endpoint, Permissions.Endpoint_View, "eid") def endpoint_host_report(request, eid): endpoint = get_object_or_404(Endpoint, id=eid) return generate_report(request, endpoint, True) -@user_is_authorized(Product, Permissions.Product_View, 'pid') +@user_is_authorized(Product, Permissions.Product_View, "pid") def product_endpoint_report(request, pid): - product = get_object_or_404(Product.objects.all().prefetch_related('engagement_set__test_set__test_type', 'engagement_set__test_set__environment'), id=pid) + product = get_object_or_404(Product.objects.all().prefetch_related("engagement_set__test_set__test_type", "engagement_set__test_set__environment"), id=pid) endpoint_ids = Endpoint.objects.filter(product=product, finding__active=True, finding__verified=True, finding__false_p=False, finding__duplicate=False, finding__out_of_scope=False, - ).values_list('id', flat=True) + ).values_list("id", flat=True) endpoints = prefetch_related_endpoints_for_report(Endpoint.objects.filter(id__in=endpoint_ids)) endpoints = EndpointReportFilter(request.GET, queryset=endpoints) paged_endpoints = get_page_items(request, endpoints.qs, 25) - report_format = request.GET.get('report_type', 'AsciiDoc') - include_finding_notes = int(request.GET.get('include_finding_notes', 0)) - include_finding_images = int(request.GET.get('include_finding_images', 0)) - include_executive_summary = int(request.GET.get('include_executive_summary', 0)) - include_table_of_contents = int(request.GET.get('include_table_of_contents', 0)) - include_disclaimer = int(request.GET.get('include_disclaimer', 0)) - disclaimer = get_system_setting('disclaimer') + report_format = request.GET.get("report_type", "AsciiDoc") + include_finding_notes = int(request.GET.get("include_finding_notes", 0)) + include_finding_images = int(request.GET.get("include_finding_images", 0)) + include_executive_summary = int(request.GET.get("include_executive_summary", 0)) + include_table_of_contents = int(request.GET.get("include_table_of_contents", 0)) + include_disclaimer = int(request.GET.get("include_disclaimer", 0)) + disclaimer = get_system_setting("disclaimer") if include_disclaimer and len(disclaimer) == 0: - disclaimer = 'Please configure in System Settings.' + disclaimer = "Please configure in System Settings." generate = "_generate" in request.GET add_breadcrumb(parent=product, title="Vulnerable Product Endpoints Report", top_level=False, request=request) report_form = ReportOptionsForm() template = "dojo/product_endpoint_pdf_report.html" try: - start_date = Finding.objects.filter(endpoints__in=endpoints.qs).order_by('date')[:1][0].date + start_date = Finding.objects.filter(endpoints__in=endpoints.qs).order_by("date")[:1][0].date except: start_date = timezone.now() @@ -326,58 +326,58 @@ def product_endpoint_report(request, pid): mitigated__isnull=False) if generate: report_form = ReportOptionsForm(request.GET) - if report_format == 'AsciiDoc': + if report_format == "AsciiDoc": return render(request, - 'dojo/asciidoc_report.html', - {'product_type': None, - 'product': product, - 'accepted_findings': accepted_findings, - 'open_findings': open_findings, - 'closed_findings': closed_findings, - 'verified_findings': verified_findings, - 'engagement': None, - 'test': None, - 'endpoints': endpoints.qs, - 'endpoint': None, - 'findings': None, - 'include_finding_notes': include_finding_notes, - 'include_finding_images': include_finding_images, - 'include_executive_summary': include_executive_summary, - 'include_table_of_contents': include_table_of_contents, - 'include_disclaimer': include_disclaimer, - 'disclaimer': disclaimer, - 'user': request.user, - 'title': 'Generate Report', + "dojo/asciidoc_report.html", + {"product_type": None, + "product": product, + "accepted_findings": accepted_findings, + "open_findings": open_findings, + "closed_findings": closed_findings, + "verified_findings": verified_findings, + "engagement": None, + "test": None, + "endpoints": endpoints.qs, + "endpoint": None, + "findings": None, + "include_finding_notes": include_finding_notes, + "include_finding_images": include_finding_images, + "include_executive_summary": include_executive_summary, + "include_table_of_contents": include_table_of_contents, + "include_disclaimer": include_disclaimer, + "disclaimer": disclaimer, + "user": request.user, + "title": "Generate Report", }) - elif report_format == 'HTML': + elif report_format == "HTML": return render(request, template, - {'product_type': None, - 'product': product, - 'engagement': None, - 'test': None, - 'endpoint': None, - 'endpoints': endpoints.qs, - 'findings': None, - 'include_finding_notes': include_finding_notes, - 'include_finding_images': include_finding_images, - 'include_executive_summary': include_executive_summary, - 'include_table_of_contents': include_table_of_contents, - 'include_disclaimer': include_disclaimer, - 'disclaimer': disclaimer, - 'user': request.user, - 'title': 'Generate Report', + {"product_type": None, + "product": product, + "engagement": None, + "test": None, + "endpoint": None, + "endpoints": endpoints.qs, + "findings": None, + "include_finding_notes": include_finding_notes, + "include_finding_images": include_finding_images, + "include_executive_summary": include_executive_summary, + "include_table_of_contents": include_table_of_contents, + "include_disclaimer": include_disclaimer, + "disclaimer": disclaimer, + "user": request.user, + "title": "Generate Report", }) else: raise Http404 product_tab = Product_Tab(product, "Product Endpoint Report", tab="endpoints") return render(request, - 'dojo/request_endpoint_report.html', + "dojo/request_endpoint_report.html", {"endpoints": paged_endpoints, "filtered": endpoints, "product_tab": product_tab, - 'report_form': report_form, + "report_form": report_form, "name": "Vulnerable Product Endpoints", }) @@ -407,21 +407,21 @@ def generate_report(request, obj, host_view=False): pass else: if obj is None: - msg = 'No object is given to generate report for' + msg = "No object is given to generate report for" raise Exception(msg) else: - msg = f'Report cannot be generated for object of type {type(obj).__name__}' + msg = f"Report cannot be generated for object of type {type(obj).__name__}" raise Exception(msg) - report_format = request.GET.get('report_type', 'AsciiDoc') - include_finding_notes = int(request.GET.get('include_finding_notes', 0)) - include_finding_images = int(request.GET.get('include_finding_images', 0)) - include_executive_summary = int(request.GET.get('include_executive_summary', 0)) - include_table_of_contents = int(request.GET.get('include_table_of_contents', 0)) - include_disclaimer = int(request.GET.get('include_disclaimer', 0)) - disclaimer = get_system_setting('disclaimer') + report_format = request.GET.get("report_type", "AsciiDoc") + include_finding_notes = int(request.GET.get("include_finding_notes", 0)) + include_finding_images = int(request.GET.get("include_finding_images", 0)) + include_executive_summary = int(request.GET.get("include_executive_summary", 0)) + include_table_of_contents = int(request.GET.get("include_table_of_contents", 0)) + include_disclaimer = int(request.GET.get("include_disclaimer", 0)) + disclaimer = get_system_setting("disclaimer") if include_disclaimer and len(disclaimer) == 0: - disclaimer = 'Please configure in System Settings.' + disclaimer = "Please configure in System Settings." generate = "_generate" in request.GET report_name = str(obj) filter_string_matching = get_system_setting("filter_string_matching", False) @@ -452,30 +452,30 @@ def generate_report(request, obj, host_view=False): # include current month months_between += 1 - endpoint_monthly_counts = get_period_counts_legacy(findings.qs.order_by('numerical_severity'), findings.qs.order_by('numerical_severity'), None, + endpoint_monthly_counts = get_period_counts_legacy(findings.qs.order_by("numerical_severity"), findings.qs.order_by("numerical_severity"), None, months_between, start_date, - relative_delta='months') - - context = {'product_type': product_type, - 'products': products, - 'engagements': engagements, - 'tests': tests, - 'report_name': report_name, - 'endpoint_opened_per_month': endpoint_monthly_counts[ - 'opened_per_period'] if endpoint_monthly_counts is not None else [], - 'endpoint_active_findings': findings.qs.distinct().order_by('numerical_severity'), - 'findings': findings.qs.distinct().order_by('numerical_severity'), - 'include_finding_notes': include_finding_notes, - 'include_finding_images': include_finding_images, - 'include_executive_summary': include_executive_summary, - 'include_table_of_contents': include_table_of_contents, - 'include_disclaimer': include_disclaimer, - 'disclaimer': disclaimer, - 'user': user, - 'team_name': settings.TEAM_NAME, - 'title': report_title, - 'host': report_url_resolver(request), - 'user_id': request.user.id} + relative_delta="months") + + context = {"product_type": product_type, + "products": products, + "engagements": engagements, + "tests": tests, + "report_name": report_name, + "endpoint_opened_per_month": endpoint_monthly_counts[ + "opened_per_period"] if endpoint_monthly_counts is not None else [], + "endpoint_active_findings": findings.qs.distinct().order_by("numerical_severity"), + "findings": findings.qs.distinct().order_by("numerical_severity"), + "include_finding_notes": include_finding_notes, + "include_finding_images": include_finding_images, + "include_executive_summary": include_executive_summary, + "include_table_of_contents": include_table_of_contents, + "include_disclaimer": include_disclaimer, + "disclaimer": disclaimer, + "user": user, + "team_name": settings.TEAM_NAME, + "title": report_title, + "host": report_url_resolver(request), + "user_id": request.user.id} elif type(obj).__name__ == "Product": product = obj @@ -488,53 +488,53 @@ def generate_report(request, obj, host_view=False): engagements = Engagement.objects.filter(test__finding__id__in=ids).distinct() tests = Test.objects.filter(finding__id__in=ids).distinct() endpoints = Endpoint.objects.filter(product=product).distinct() - context = {'product': product, - 'engagements': engagements, - 'tests': tests, - 'report_name': report_name, - 'findings': findings.qs.distinct().order_by('numerical_severity'), - 'include_finding_notes': include_finding_notes, - 'include_finding_images': include_finding_images, - 'include_executive_summary': include_executive_summary, - 'include_table_of_contents': include_table_of_contents, - 'include_disclaimer': include_disclaimer, - 'disclaimer': disclaimer, - 'user': user, - 'team_name': settings.TEAM_NAME, - 'title': report_title, - 'endpoints': endpoints, - 'host': report_url_resolver(request), - 'user_id': request.user.id} + context = {"product": product, + "engagements": engagements, + "tests": tests, + "report_name": report_name, + "findings": findings.qs.distinct().order_by("numerical_severity"), + "include_finding_notes": include_finding_notes, + "include_finding_images": include_finding_images, + "include_executive_summary": include_executive_summary, + "include_table_of_contents": include_table_of_contents, + "include_disclaimer": include_disclaimer, + "disclaimer": disclaimer, + "user": user, + "team_name": settings.TEAM_NAME, + "title": report_title, + "endpoints": endpoints, + "host": report_url_resolver(request), + "user_id": request.user.id} elif type(obj).__name__ == "Engagement": - logger.debug('generating report for Engagement') + logger.debug("generating report for Engagement") engagement = obj findings = report_finding_filter_class(request.GET, engagement=engagement, queryset=prefetch_related_findings_for_report(Finding.objects.filter(test__engagement=engagement))) report_name = "Engagement Report: " + str(engagement) - template = 'dojo/engagement_pdf_report.html' + template = "dojo/engagement_pdf_report.html" report_title = "Engagement Report" ids = set(finding.id for finding in findings.qs) # noqa: C401 tests = Test.objects.filter(finding__id__in=ids).distinct() endpoints = Endpoint.objects.filter(product=engagement.product).distinct() - context = {'engagement': engagement, - 'tests': tests, - 'report_name': report_name, - 'findings': findings.qs.distinct().order_by('numerical_severity'), - 'include_finding_notes': include_finding_notes, - 'include_finding_images': include_finding_images, - 'include_executive_summary': include_executive_summary, - 'include_table_of_contents': include_table_of_contents, - 'include_disclaimer': include_disclaimer, - 'disclaimer': disclaimer, - 'user': user, - 'team_name': settings.TEAM_NAME, - 'title': report_title, - 'host': report_url_resolver(request), - 'user_id': request.user.id, - 'endpoints': endpoints} + context = {"engagement": engagement, + "tests": tests, + "report_name": report_name, + "findings": findings.qs.distinct().order_by("numerical_severity"), + "include_finding_notes": include_finding_notes, + "include_finding_images": include_finding_images, + "include_executive_summary": include_executive_summary, + "include_table_of_contents": include_table_of_contents, + "include_disclaimer": include_disclaimer, + "disclaimer": disclaimer, + "user": user, + "team_name": settings.TEAM_NAME, + "title": report_title, + "host": report_url_resolver(request), + "user_id": request.user.id, + "endpoints": endpoints} elif type(obj).__name__ == "Test": test = obj @@ -544,20 +544,20 @@ def generate_report(request, obj, host_view=False): report_name = "Test Report: " + str(test) report_title = "Test Report" - context = {'test': test, - 'report_name': report_name, - 'findings': findings.qs.distinct().order_by('numerical_severity'), - 'include_finding_notes': include_finding_notes, - 'include_finding_images': include_finding_images, - 'include_executive_summary': include_executive_summary, - 'include_table_of_contents': include_table_of_contents, - 'include_disclaimer': include_disclaimer, - 'disclaimer': disclaimer, - 'user': user, - 'team_name': settings.TEAM_NAME, - 'title': report_title, - 'host': report_url_resolver(request), - 'user_id': request.user.id} + context = {"test": test, + "report_name": report_name, + "findings": findings.qs.distinct().order_by("numerical_severity"), + "include_finding_notes": include_finding_notes, + "include_finding_images": include_finding_images, + "include_executive_summary": include_executive_summary, + "include_table_of_contents": include_table_of_contents, + "include_disclaimer": include_disclaimer, + "disclaimer": disclaimer, + "user": user, + "team_name": settings.TEAM_NAME, + "title": report_title, + "host": report_url_resolver(request), + "user_id": request.user.id} elif type(obj).__name__ == "Endpoint": endpoint = obj @@ -570,44 +570,44 @@ def generate_report(request, obj, host_view=False): report_name = "Endpoint Report: " + str(endpoint) endpoints = Endpoint.objects.filter(pk=endpoint.id).distinct() report_title = "Endpoint Report" - template = 'dojo/endpoint_pdf_report.html' + template = "dojo/endpoint_pdf_report.html" findings = report_finding_filter_class(request.GET, queryset=prefetch_related_findings_for_report(Finding.objects.filter(endpoints__in=endpoints))) - context = {'endpoint': endpoint, - 'endpoints': endpoints, - 'report_name': report_name, - 'findings': findings.qs.distinct().order_by('numerical_severity'), - 'include_finding_notes': include_finding_notes, - 'include_finding_images': include_finding_images, - 'include_executive_summary': include_executive_summary, - 'include_table_of_contents': include_table_of_contents, - 'include_disclaimer': include_disclaimer, - 'disclaimer': disclaimer, - 'user': user, - 'team_name': get_system_setting('team_name'), - 'title': report_title, - 'host': report_url_resolver(request), - 'user_id': request.user.id} + context = {"endpoint": endpoint, + "endpoints": endpoints, + "report_name": report_name, + "findings": findings.qs.distinct().order_by("numerical_severity"), + "include_finding_notes": include_finding_notes, + "include_finding_images": include_finding_images, + "include_executive_summary": include_executive_summary, + "include_table_of_contents": include_table_of_contents, + "include_disclaimer": include_disclaimer, + "disclaimer": disclaimer, + "user": user, + "team_name": get_system_setting("team_name"), + "title": report_title, + "host": report_url_resolver(request), + "user_id": request.user.id} elif type(obj).__name__ in ["QuerySet", "CastTaggedQuerySet", "TagulousCastTaggedQuerySet"]: findings = report_finding_filter_class(request.GET, queryset=prefetch_related_findings_for_report(obj).distinct()) - report_name = 'Finding' - template = 'dojo/finding_pdf_report.html' + report_name = "Finding" + template = "dojo/finding_pdf_report.html" report_title = "Finding Report" - context = {'findings': findings.qs.distinct().order_by('numerical_severity'), - 'report_name': report_name, - 'include_finding_notes': include_finding_notes, - 'include_finding_images': include_finding_images, - 'include_executive_summary': include_executive_summary, - 'include_table_of_contents': include_table_of_contents, - 'include_disclaimer': include_disclaimer, - 'disclaimer': disclaimer, - 'user': user, - 'team_name': settings.TEAM_NAME, - 'title': report_title, - 'host': report_url_resolver(request), - 'user_id': request.user.id} + context = {"findings": findings.qs.distinct().order_by("numerical_severity"), + "report_name": report_name, + "include_finding_notes": include_finding_notes, + "include_finding_images": include_finding_images, + "include_executive_summary": include_executive_summary, + "include_table_of_contents": include_table_of_contents, + "include_disclaimer": include_disclaimer, + "disclaimer": disclaimer, + "user": user, + "team_name": settings.TEAM_NAME, + "title": report_title, + "host": report_url_resolver(request), + "user_id": request.user.id} else: raise Http404 @@ -615,58 +615,58 @@ def generate_report(request, obj, host_view=False): if generate: report_form = ReportOptionsForm(request.GET) - if report_format == 'AsciiDoc': + if report_format == "AsciiDoc": return render(request, - 'dojo/asciidoc_report.html', - {'product_type': product_type, - 'product': product, - 'engagement': engagement, - 'test': test, - 'endpoint': endpoint, - 'findings': findings.qs.distinct().order_by('numerical_severity'), - 'include_finding_notes': include_finding_notes, - 'include_finding_images': include_finding_images, - 'include_executive_summary': include_executive_summary, - 'include_table_of_contents': include_table_of_contents, - 'include_disclaimer': include_disclaimer, - 'disclaimer': disclaimer, - 'user': user, - 'team_name': settings.TEAM_NAME, - 'title': report_title, - 'user_id': request.user.id, - 'host': report_url_resolver(request), - 'host_view': host_view, - 'context': context, + "dojo/asciidoc_report.html", + {"product_type": product_type, + "product": product, + "engagement": engagement, + "test": test, + "endpoint": endpoint, + "findings": findings.qs.distinct().order_by("numerical_severity"), + "include_finding_notes": include_finding_notes, + "include_finding_images": include_finding_images, + "include_executive_summary": include_executive_summary, + "include_table_of_contents": include_table_of_contents, + "include_disclaimer": include_disclaimer, + "disclaimer": disclaimer, + "user": user, + "team_name": settings.TEAM_NAME, + "title": report_title, + "user_id": request.user.id, + "host": report_url_resolver(request), + "host_view": host_view, + "context": context, }) - elif report_format == 'HTML': + elif report_format == "HTML": return render(request, template, - {'product_type': product_type, - 'product': product, - 'engagement': engagement, - 'report_name': report_name, - 'test': test, - 'endpoint': endpoint, - 'endpoints': endpoints, - 'findings': findings.qs.distinct().order_by('numerical_severity'), - 'include_finding_notes': include_finding_notes, - 'include_finding_images': include_finding_images, - 'include_executive_summary': include_executive_summary, - 'include_table_of_contents': include_table_of_contents, - 'include_disclaimer': include_disclaimer, - 'disclaimer': disclaimer, - 'user': user, - 'team_name': settings.TEAM_NAME, - 'title': report_title, - 'user_id': request.user.id, - 'host': "", - 'host_view': host_view, - 'context': context, + {"product_type": product_type, + "product": product, + "engagement": engagement, + "report_name": report_name, + "test": test, + "endpoint": endpoint, + "endpoints": endpoints, + "findings": findings.qs.distinct().order_by("numerical_severity"), + "include_finding_notes": include_finding_notes, + "include_finding_images": include_finding_images, + "include_executive_summary": include_executive_summary, + "include_table_of_contents": include_table_of_contents, + "include_disclaimer": include_disclaimer, + "disclaimer": disclaimer, + "user": user, + "team_name": settings.TEAM_NAME, + "title": report_title, + "user_id": request.user.id, + "host": "", + "host_view": host_view, + "context": context, }) else: raise Http404 - paged_findings = get_page_items(request, findings.qs.distinct().order_by('numerical_severity'), 25) + paged_findings = get_page_items(request, findings.qs.distinct().order_by("numerical_severity"), 25) product_tab = None if engagement: @@ -683,41 +683,41 @@ def generate_report(request, obj, host_view=False): else: product_tab = Product_Tab(endpoint.product, title="Endpoint Report", tab="endpoints") - return render(request, 'dojo/request_report.html', - {'product_type': product_type, - 'product': product, - 'product_tab': product_tab, - 'engagement': engagement, - 'test': test, - 'endpoint': endpoint, - 'findings': findings, - 'paged_findings': paged_findings, - 'report_form': report_form, - 'host_view': host_view, - 'context': context, + return render(request, "dojo/request_report.html", + {"product_type": product_type, + "product": product, + "product_tab": product_tab, + "engagement": engagement, + "test": test, + "endpoint": endpoint, + "findings": findings, + "paged_findings": paged_findings, + "report_form": report_form, + "host_view": host_view, + "context": context, }) def prefetch_related_findings_for_report(findings): - return findings.prefetch_related('test', - 'test__engagement__product', - 'test__engagement__product__prod_type', - 'risk_acceptance_set', - 'risk_acceptance_set__accepted_findings', - 'burprawrequestresponse_set', - 'endpoints', - 'tags', - 'notes', - 'files', - 'reporter', - 'mitigated_by', + return findings.prefetch_related("test", + "test__engagement__product", + "test__engagement__product__prod_type", + "risk_acceptance_set", + "risk_acceptance_set__accepted_findings", + "burprawrequestresponse_set", + "endpoints", + "tags", + "notes", + "files", + "reporter", + "mitigated_by", ) def prefetch_related_endpoints_for_report(endpoints): return endpoints.prefetch_related( - 'product', - 'tags', + "product", + "tags", ) @@ -730,23 +730,23 @@ def get_list_index(list, index): def get_findings(request): - url = request.META.get('QUERY_STRING') + url = request.META.get("QUERY_STRING") if not url: - msg = 'Please use the report button when viewing findings' + msg = "Please use the report button when viewing findings" raise Http404(msg) else: - if url.startswith('url='): + if url.startswith("url="): url = url[4:] - views = ['all', 'open', 'inactive', 'verified', - 'closed', 'accepted', 'out_of_scope', - 'false_positive', 'inactive'] + views = ["all", "open", "inactive", "verified", + "closed", "accepted", "out_of_scope", + "false_positive", "inactive"] # request.path = url obj_name = obj_id = view = query = None - path_items = list(filter(None, re.split(r'/|\?', url))) + path_items = list(filter(None, re.split(r"/|\?", url))) try: - finding_index = path_items.index('finding') + finding_index = path_items.index("finding") except ValueError: finding_index = -1 # There is a engagement or product here @@ -775,32 +775,32 @@ def get_findings(request): filter_name = None if view: - if view == 'open': - filter_name = 'Open' - elif view == 'inactive': - filter_name = 'Inactive' - elif view == 'verified': - filter_name = 'Verified' - elif view == 'closed': - filter_name = 'Closed' - elif view == 'accepted': - filter_name = 'Accepted' - elif view == 'out_of_scope': - filter_name = 'Out of Scope' - elif view == 'false_positive': - filter_name = 'False Positive' + if view == "open": + filter_name = "Open" + elif view == "inactive": + filter_name = "Inactive" + elif view == "verified": + filter_name = "Verified" + elif view == "closed": + filter_name = "Closed" + elif view == "accepted": + filter_name = "Accepted" + elif view == "out_of_scope": + filter_name = "Out of Scope" + elif view == "false_positive": + filter_name = "False Positive" obj = pid = eid = tid = None if obj_id: - if 'product' in obj_name: + if "product" in obj_name: pid = obj_id obj = get_object_or_404(Product, id=pid) user_has_permission_or_403(request.user, obj, Permissions.Product_View) - elif 'engagement' in obj_name: + elif "engagement" in obj_name: eid = obj_id obj = get_object_or_404(Engagement, id=eid) user_has_permission_or_403(request.user, obj, Permissions.Engagement_View) - elif 'test' in obj_name: + elif "test" in obj_name: tid = obj_id obj = get_object_or_404(Test, id=tid) user_has_permission_or_403(request.user, obj, Permissions.Test_View) @@ -821,7 +821,7 @@ def add_findings_data(self): return self.findings def get_template(self): - return 'dojo/finding_pdf_report.html' + return "dojo/finding_pdf_report.html" def get(self, request): findings, obj = get_findings(request) @@ -841,29 +841,29 @@ def generate_quick_report(self, request, findings, obj=None): test = obj return render(request, self.get_template(), { - 'report_name': 'Finding Report', - 'product': product, - 'engagement': engagement, - 'test': test, - 'findings': findings, - 'user': request.user, - 'team_name': settings.TEAM_NAME, - 'title': 'Finding Report', - 'user_id': request.user.id, + "report_name": "Finding Report", + "product": product, + "engagement": engagement, + "test": test, + "findings": findings, + "user": request.user, + "team_name": settings.TEAM_NAME, + "title": "Finding Report", + "user_id": request.user.id, }) def get_excludes(): - return ['SEVERITIES', 'age', 'github_issue', 'jira_issue', 'objects', 'risk_acceptance', - 'test__engagement__product__authorized_group', 'test__engagement__product__member', - 'test__engagement__product__prod_type__authorized_group', 'test__engagement__product__prod_type__member', - 'unsaved_endpoints', 'unsaved_vulnerability_ids', 'unsaved_files', 'unsaved_request', 'unsaved_response', - 'unsaved_tags', 'vulnerability_ids', 'cve'] + return ["SEVERITIES", "age", "github_issue", "jira_issue", "objects", "risk_acceptance", + "test__engagement__product__authorized_group", "test__engagement__product__member", + "test__engagement__product__prod_type__authorized_group", "test__engagement__product__prod_type__member", + "unsaved_endpoints", "unsaved_vulnerability_ids", "unsaved_files", "unsaved_request", "unsaved_response", + "unsaved_tags", "vulnerability_ids", "cve"] def get_foreign_keys(): - return ['defect_review_requested_by', 'duplicate_finding', 'finding_group', 'last_reviewed_by', - 'mitigated_by', 'reporter', 'review_requested_by', 'sonarqube_issue', 'test'] + return ["defect_review_requested_by", "duplicate_finding", "finding_group", "last_reviewed_by", + "mitigated_by", "reporter", "review_requested_by", "sonarqube_issue", "test"] def get_attributes(): @@ -884,8 +884,8 @@ def get(self, request): findings, _obj = get_findings(request) self.findings = findings findings = self.add_findings_data() - response = HttpResponse(content_type='text/csv') - response['Content-Disposition'] = 'attachment; filename=findings.csv' + response = HttpResponse(content_type="text/csv") + response["Content-Disposition"] = "attachment; filename=findings.csv" writer = csv.writer(response) allowed_attributes = get_attributes() excludes_list = get_excludes() @@ -899,23 +899,23 @@ def get(self, request): self.fields = fields for key in dir(finding): try: - if key not in excludes_list and (not callable(getattr(finding, key)) or key in allowed_attributes) and not key.startswith('_'): + if key not in excludes_list and (not callable(getattr(finding, key)) or key in allowed_attributes) and not key.startswith("_"): if callable(getattr(finding, key)) and key not in allowed_attributes: continue fields.append(key) except Exception as exc: - logger.error('Error in attribute: ' + str(exc)) + logger.error("Error in attribute: " + str(exc)) fields.append(key) continue - fields.append('test') - fields.append('found_by') - fields.append('engagement_id') - fields.append('engagement') - fields.append('product_id') - fields.append('product') - fields.append('endpoints') - fields.append('vulnerability_ids') - fields.append('tags') + fields.append("test") + fields.append("found_by") + fields.append("engagement_id") + fields.append("engagement") + fields.append("product_id") + fields.append("product") + fields.append("endpoints") + fields.append("vulnerability_ids") + fields.append("tags") self.fields = fields self.add_extra_headers() @@ -926,7 +926,7 @@ def get(self, request): fields = [] for key in dir(finding): try: - if key not in excludes_list and (not callable(getattr(finding, key)) or key in allowed_attributes) and not key.startswith('_'): + if key not in excludes_list and (not callable(getattr(finding, key)) or key in allowed_attributes) and not key.startswith("_"): if not callable(getattr(finding, key)): value = finding.__dict__.get(key) if (key in allowed_foreign_keys or key in allowed_attributes) and getattr(finding, key): @@ -937,10 +937,10 @@ def get(self, request): else: value = str(getattr(finding, key)) if value and isinstance(value, str): - value = value.replace('\n', ' NEWLINE ').replace('\r', '') + value = value.replace("\n", " NEWLINE ").replace("\r", "") fields.append(value) except Exception as exc: - logger.error('Error in attribute: ' + str(exc)) + logger.error("Error in attribute: " + str(exc)) fields.append("Value not supported") continue fields.append(finding.test.title) @@ -950,40 +950,40 @@ def get(self, request): fields.append(finding.test.engagement.product.id) fields.append(finding.test.engagement.product.name) - endpoint_value = '' + endpoint_value = "" num_endpoints = 0 for endpoint in finding.endpoints.all(): num_endpoints += 1 - endpoint_value += f'{str(endpoint)}; ' - if endpoint_value.endswith('; '): + endpoint_value += f"{str(endpoint)}; " + if endpoint_value.endswith("; "): endpoint_value = endpoint_value[:-2] if len(endpoint_value) > EXCEL_CHAR_LIMIT: - endpoint_value = endpoint_value[:EXCEL_CHAR_LIMIT - 3] + '...' + endpoint_value = endpoint_value[:EXCEL_CHAR_LIMIT - 3] + "..." fields.append(endpoint_value) - vulnerability_ids_value = '' + vulnerability_ids_value = "" num_vulnerability_ids = 0 for vulnerability_id in finding.vulnerability_ids: num_vulnerability_ids += 1 if num_vulnerability_ids > 5: - vulnerability_ids_value += '...' + vulnerability_ids_value += "..." break - vulnerability_ids_value += f'{str(vulnerability_id)}; ' + vulnerability_ids_value += f"{str(vulnerability_id)}; " if finding.cve and vulnerability_ids_value.find(finding.cve) < 0: vulnerability_ids_value += finding.cve - if vulnerability_ids_value.endswith('; '): + if vulnerability_ids_value.endswith("; "): vulnerability_ids_value = vulnerability_ids_value[:-2] fields.append(vulnerability_ids_value) # Tags - tags_value = '' + tags_value = "" num_tags = 0 for tag in finding.tags.all(): num_tags += 1 if num_tags > 5: - tags_value += '...' + tags_value += "..." break - tags_value += f'{str(tag)}; ' - if tags_value.endswith('; '): + tags_value += f"{str(tag)}; " + if tags_value.endswith("; "): tags_value = tags_value[:-2] fields.append(tags_value) @@ -1014,7 +1014,7 @@ def get(self, request): workbook = Workbook() workbook.iso_dates = True worksheet = workbook.active - worksheet.title = 'Findings' + worksheet.title = "Findings" self.worksheet = worksheet font_bold = Font(bold=True) self.font_bold = font_bold @@ -1028,38 +1028,38 @@ def get(self, request): col_num = 1 for key in dir(finding): try: - if key not in excludes_list and (not callable(getattr(finding, key)) or key in allowed_attributes) and not key.startswith('_'): + if key not in excludes_list and (not callable(getattr(finding, key)) or key in allowed_attributes) and not key.startswith("_"): if callable(getattr(finding, key)) and key not in allowed_attributes: continue cell = worksheet.cell(row=row_num, column=col_num, value=key) cell.font = font_bold col_num += 1 except Exception as exc: - logger.error('Error in attribute: ' + str(exc)) + logger.error("Error in attribute: " + str(exc)) cell = worksheet.cell(row=row_num, column=col_num, value=key) continue - cell = worksheet.cell(row=row_num, column=col_num, value='found_by') + cell = worksheet.cell(row=row_num, column=col_num, value="found_by") cell.font = font_bold col_num += 1 - worksheet.cell(row=row_num, column=col_num, value='engagement_id') + worksheet.cell(row=row_num, column=col_num, value="engagement_id") cell = cell.font = font_bold col_num += 1 - cell = worksheet.cell(row=row_num, column=col_num, value='engagement') + cell = worksheet.cell(row=row_num, column=col_num, value="engagement") cell.font = font_bold col_num += 1 - cell = worksheet.cell(row=row_num, column=col_num, value='product_id') + cell = worksheet.cell(row=row_num, column=col_num, value="product_id") cell.font = font_bold col_num += 1 - cell = worksheet.cell(row=row_num, column=col_num, value='product') + cell = worksheet.cell(row=row_num, column=col_num, value="product") cell.font = font_bold col_num += 1 - cell = worksheet.cell(row=row_num, column=col_num, value='endpoints') + cell = worksheet.cell(row=row_num, column=col_num, value="endpoints") cell.font = font_bold col_num += 1 - cell = worksheet.cell(row=row_num, column=col_num, value='vulnerability_ids') + cell = worksheet.cell(row=row_num, column=col_num, value="vulnerability_ids") cell.font = font_bold col_num += 1 - cell = worksheet.cell(row=row_num, column=col_num, value='tags') + cell = worksheet.cell(row=row_num, column=col_num, value="tags") cell.font = font_bold col_num += 1 self.row_num = row_num @@ -1071,7 +1071,7 @@ def get(self, request): col_num = 1 for key in dir(finding): try: - if key not in excludes_list and (not callable(getattr(finding, key)) or key in allowed_attributes) and not key.startswith('_'): + if key not in excludes_list and (not callable(getattr(finding, key)) or key in allowed_attributes) and not key.startswith("_"): if not callable(getattr(finding, key)): value = finding.__dict__.get(key) if (key in allowed_foreign_keys or key in allowed_attributes) and getattr(finding, key): @@ -1086,7 +1086,7 @@ def get(self, request): worksheet.cell(row=row_num, column=col_num, value=value) col_num += 1 except Exception as exc: - logger.error('Error in attribute: ' + str(exc)) + logger.error("Error in attribute: " + str(exc)) worksheet.cell(row=row_num, column=col_num, value="Value not supported") continue worksheet.cell(row=row_num, column=col_num, value=finding.test.test_type.name) @@ -1100,37 +1100,37 @@ def get(self, request): worksheet.cell(row=row_num, column=col_num, value=finding.test.engagement.product.name) col_num += 1 - endpoint_value = '' + endpoint_value = "" num_endpoints = 0 for endpoint in finding.endpoints.all(): num_endpoints += 1 - endpoint_value += f'{str(endpoint)}; \n' - if endpoint_value.endswith('; \n'): + endpoint_value += f"{str(endpoint)}; \n" + if endpoint_value.endswith("; \n"): endpoint_value = endpoint_value[:-3] if len(endpoint_value) > EXCEL_CHAR_LIMIT: - endpoint_value = endpoint_value[:EXCEL_CHAR_LIMIT - 3] + '...' + endpoint_value = endpoint_value[:EXCEL_CHAR_LIMIT - 3] + "..." worksheet.cell(row=row_num, column=col_num, value=endpoint_value) col_num += 1 - vulnerability_ids_value = '' + vulnerability_ids_value = "" num_vulnerability_ids = 0 for vulnerability_id in finding.vulnerability_ids: num_vulnerability_ids += 1 if num_vulnerability_ids > 5: - vulnerability_ids_value += '...' + vulnerability_ids_value += "..." break - vulnerability_ids_value += f'{str(vulnerability_id)}; \n' + vulnerability_ids_value += f"{str(vulnerability_id)}; \n" if finding.cve and vulnerability_ids_value.find(finding.cve) < 0: vulnerability_ids_value += finding.cve - if vulnerability_ids_value.endswith('; \n'): + if vulnerability_ids_value.endswith("; \n"): vulnerability_ids_value = vulnerability_ids_value[:-3] worksheet.cell(row=row_num, column=col_num, value=vulnerability_ids_value) col_num += 1 # tags - tags_value = '' + tags_value = "" for tag in finding.tags.all(): - tags_value += f'{str(tag)}; \n' - if tags_value.endswith('; \n'): + tags_value += f"{str(tag)}; \n" + if tags_value.endswith("; \n"): tags_value = tags_value[:-3] worksheet.cell(row=row_num, column=col_num, value=tags_value) col_num += 1 @@ -1147,7 +1147,7 @@ def get(self, request): response = HttpResponse( content=stream, - content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', + content_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", ) - response['Content-Disposition'] = 'attachment; filename=findings.xlsx' + response["Content-Disposition"] = "attachment; filename=findings.xlsx" return response diff --git a/dojo/reports/widgets.py b/dojo/reports/widgets.py index b6593a6a0d..58a05e4510 100644 --- a/dojo/reports/widgets.py +++ b/dojo/reports/widgets.py @@ -31,7 +31,7 @@ class CustomReportJsonForm(forms.Form): json = forms.CharField() def clean_json(self): - jdata = self.cleaned_data['json'] + jdata = self.cleaned_data["json"] try: json.loads(jdata) except: @@ -60,14 +60,14 @@ class Meta: class Div(form_widget): def __init__(self, attrs=None): # Use slightly better defaults than HTML's 20x2 box - default_attrs = {'style': 'width:100%;min-height:400px'} + default_attrs = {"style": "width:100%;min-height:400px"} if attrs: default_attrs.update(attrs) super().__init__(default_attrs) def render(self, name, value, attrs=None, renderer=None): if value is None: - value = '' + value = "" final_attrs = self.build_attrs(attrs) return format_html( '
' @@ -107,7 +107,7 @@ def render(self, name, value, attrs=None, renderer=None): class WYSIWYGContentForm(forms.Form): heading = forms.CharField(max_length=200, required=False, initial="WYSIWYG Content") - content = forms.CharField(required=False, widget=Div(attrs={'class': 'editor'})) + content = forms.CharField(required=False, widget=Div(attrs={"class": "editor"})) hidden_content = forms.CharField(widget=forms.HiddenInput(), required=True) class Meta: @@ -117,7 +117,7 @@ class Meta: # base Widget class others will inherit from class Widget: def __init__(self, *args, **kwargs): - self.title = 'Base Widget' + self.title = "Base Widget" self.form = None self.multiple = "false" @@ -137,7 +137,7 @@ def get_option_form(self): class PageBreak(Widget): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.title = 'Page Break' + self.title = "Page Break" self.form = None self.multiple = "true" @@ -145,7 +145,7 @@ def get_html(self): return mark_safe('
') def get_asciidoc(self): - return mark_safe('
<<<
') + return mark_safe("
<<<
") def get_option_form(self): return mark_safe( @@ -158,15 +158,15 @@ def get_option_form(self): class ReportOptions(Widget): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.title = 'Report Options' + self.title = "Report Options" self.form = CustomReportOptionsForm() self.extra_help = "Choose additional report options. These will apply to the overall report." def get_asciidoc(self): - return mark_safe('') + return mark_safe("") def get_html(self): - return mark_safe('') + return mark_safe("") def get_option_form(self): html = render_to_string("dojo/report_widget.html", {"form": self.form, @@ -179,7 +179,7 @@ def get_option_form(self): class CoverPage(Widget): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.title = 'Cover Page' + self.title = "Cover Page" self.form = CoverPageForm() self.help_text = "The cover page includes a page break after its content." @@ -197,14 +197,14 @@ def get_option_form(self): html = render_to_string("dojo/report_widget.html", {"form": self.form, "multiple": self.multiple, "title": self.title, - 'extra_help': self.help_text}) + "extra_help": self.help_text}) return mark_safe(html) class TableOfContents(Widget): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.title = 'Table Of Contents' + self.title = "Table Of Contents" self.form = TableOfContentsForm() self.help_text = "The table of contents includes a page break after its content." @@ -220,16 +220,16 @@ def get_option_form(self): html = render_to_string("dojo/report_widget.html", {"form": self.form, "multiple": self.multiple, "title": self.title, - 'extra_help': self.help_text}) + "extra_help": self.help_text}) return mark_safe(html) class WYSIWYGContent(Widget): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.title = 'WYSIWYG Content' + self.title = "WYSIWYG Content" self.form = WYSIWYGContentForm() - self.multiple = 'true' + self.multiple = "true" def get_html(self): html = render_to_string("dojo/custom_html_report_wysiwyg_content.html", {"title": self.title, @@ -250,42 +250,42 @@ def get_option_form(self): class FindingList(Widget): def __init__(self, *args, **kwargs): - if 'request' in kwargs: - self.request = kwargs.get('request') - if 'user_id' in kwargs: - self.user_id = kwargs.get('user_id') + if "request" in kwargs: + self.request = kwargs.get("request") + if "user_id" in kwargs: + self.user_id = kwargs.get("user_id") - if 'host' in kwargs: - self.host = kwargs.get('host') + if "host" in kwargs: + self.host = kwargs.get("host") - if 'findings' in kwargs: - self.findings = kwargs.get('findings') + if "findings" in kwargs: + self.findings = kwargs.get("findings") else: msg = "Need to instantiate with finding queryset." raise Exception(msg) - if 'finding_notes' in kwargs: - self.finding_notes = kwargs.get('finding_notes') + if "finding_notes" in kwargs: + self.finding_notes = kwargs.get("finding_notes") else: self.finding_notes = False - if 'finding_images' in kwargs: - self.finding_images = kwargs.get('finding_images') + if "finding_images" in kwargs: + self.finding_images = kwargs.get("finding_images") else: self.finding_images = False super().__init__(*args, **kwargs) - self.title = 'Finding List' - if hasattr(self.findings, 'form'): + self.title = "Finding List" + if hasattr(self.findings, "form"): self.form = self.findings.form else: self.form = None - self.multiple = 'true' + self.multiple = "true" self.extra_help = "You can use this form to filter findings and select only the ones to be included in the " \ "report." - self.title_words = get_words_for_field(Finding, 'title') - self.component_words = get_words_for_field(Finding, 'component_name') + self.title_words = get_words_for_field(Finding, "title") + self.component_words = get_words_for_field(Finding, "component_name") if self.request is not None: self.paged_findings = get_page_items(self.request, self.findings.qs, 25) @@ -312,7 +312,7 @@ def get_html(self): return mark_safe(html) def get_option_form(self): - html = render_to_string('dojo/report_findings.html', + html = render_to_string("dojo/report_findings.html", {"findings": self.paged_findings, "filtered": self.findings, "title_words": self.title_words, @@ -326,40 +326,40 @@ def get_option_form(self): class EndpointList(Widget): def __init__(self, *args, **kwargs): - if 'request' in kwargs: - self.request = kwargs.get('request') - if 'user_id' in kwargs: - self.user_id = kwargs.get('user_id') + if "request" in kwargs: + self.request = kwargs.get("request") + if "user_id" in kwargs: + self.user_id = kwargs.get("user_id") - if 'host' in kwargs: - self.host = kwargs.get('host') + if "host" in kwargs: + self.host = kwargs.get("host") - if 'endpoints' in kwargs: - self.endpoints = kwargs.get('endpoints') + if "endpoints" in kwargs: + self.endpoints = kwargs.get("endpoints") else: msg = "Need to instantiate with endpoint queryset." raise Exception(msg) - if 'finding_notes' in kwargs: - self.finding_notes = kwargs.get('finding_notes') + if "finding_notes" in kwargs: + self.finding_notes = kwargs.get("finding_notes") else: self.finding_notes = False - if 'finding_images' in kwargs: - self.finding_images = kwargs.get('finding_images') + if "finding_images" in kwargs: + self.finding_images = kwargs.get("finding_images") else: self.finding_images = False super().__init__(*args, **kwargs) - self.title = 'Endpoint List' + self.title = "Endpoint List" self.form = self.endpoints.form - self.multiple = 'false' + self.multiple = "false" if self.request is not None: self.paged_endpoints = get_page_items(self.request, self.endpoints.qs, 25) else: self.paged_endpoints = self.endpoints - self.multiple = 'true' + self.multiple = "true" self.extra_help = "You can use this form to filter endpoints and select only the ones to be included in the " \ "report." @@ -383,7 +383,7 @@ def get_asciidoc(self): return mark_safe(asciidoc) def get_option_form(self): - html = render_to_string('dojo/report_endpoints.html', + html = render_to_string("dojo/report_endpoints.html", {"endpoints": self.paged_endpoints, "filtered": self.endpoints, "request": self.request, @@ -398,9 +398,9 @@ def report_widget_factory(json_data=None, request=None, user=None, finding_notes selected_widgets = OrderedDict() widgets = json.loads(json_data) for idx, widget in enumerate(widgets): - if list(widget.keys())[0] == 'page-break': - selected_widgets[list(widget.keys())[0] + '-' + str(idx)] = PageBreak() - if list(widget.keys())[0] == 'endpoint-list': + if list(widget.keys())[0] == "page-break": + selected_widgets[list(widget.keys())[0] + "-" + str(idx)] = PageBreak() + if list(widget.keys())[0] == "endpoint-list": endpoints = Endpoint.objects.filter(finding__active=True, finding__verified=True, finding__false_p=False, @@ -409,10 +409,10 @@ def report_widget_factory(json_data=None, request=None, user=None, finding_notes ).distinct() d = QueryDict(mutable=True) for item in widget.get(list(widget.keys())[0]): - if item['name'] in d: - d.appendlist(item['name'], item['value']) + if item["name"] in d: + d.appendlist(item["name"], item["value"]) else: - d[item['name']] = item['value'] + d[item["name"]] = item["value"] endpoints = Endpoint.objects.filter(id__in=endpoints) filter_string_matching = get_system_setting("filter_string_matching", False) @@ -422,60 +422,60 @@ def report_widget_factory(json_data=None, request=None, user=None, finding_notes endpoints = EndpointList(request=request, endpoints=endpoints, finding_notes=finding_notes, finding_images=finding_images, host=host, user_id=user_id) - selected_widgets[list(widget.keys())[0] + '-' + str(idx)] = endpoints + selected_widgets[list(widget.keys())[0] + "-" + str(idx)] = endpoints - if list(widget.keys())[0] == 'finding-list': + if list(widget.keys())[0] == "finding-list": findings = Finding.objects.all() d = QueryDict(mutable=True) for item in widget.get(list(widget.keys())[0]): - if item['name'] in d: - d.appendlist(item['name'], item['value']) + if item["name"] in d: + d.appendlist(item["name"], item["value"]) else: - d[item['name']] = item['value'] + d[item["name"]] = item["value"] filter_string_matching = get_system_setting("filter_string_matching", False) filter_class = ReportFindingFilterWithoutObjectLookups if filter_string_matching else ReportFindingFilter findings = filter_class(d, queryset=findings) user_id = user.id if user is not None else None - selected_widgets[list(widget.keys())[0] + '-' + str(idx)] = FindingList(request=request, findings=findings, + selected_widgets[list(widget.keys())[0] + "-" + str(idx)] = FindingList(request=request, findings=findings, finding_notes=finding_notes, finding_images=finding_images, host=host, user_id=user_id) - if list(widget.keys())[0] == 'wysiwyg-content': + if list(widget.keys())[0] == "wysiwyg-content": wysiwyg_content = WYSIWYGContent(request=request) wysiwyg_content.title = \ - next((item for item in widget.get(list(widget.keys())[0]) if item["name"] == 'heading'), None)['value'] + next((item for item in widget.get(list(widget.keys())[0]) if item["name"] == "heading"), None)["value"] wysiwyg_content.content = \ - next((item for item in widget.get(list(widget.keys())[0]) if item["name"] == 'hidden_content'), None)['value'] - selected_widgets[list(widget.keys())[0] + '-' + str(idx)] = wysiwyg_content - if list(widget.keys())[0] == 'report-options': + next((item for item in widget.get(list(widget.keys())[0]) if item["name"] == "hidden_content"), None)["value"] + selected_widgets[list(widget.keys())[0] + "-" + str(idx)] = wysiwyg_content + if list(widget.keys())[0] == "report-options": options = ReportOptions(request=request) options.include_finding_notes = \ - next((item for item in widget.get(list(widget.keys())[0]) if item["name"] == 'include_finding_notes'), None)[ - 'value'] + next((item for item in widget.get(list(widget.keys())[0]) if item["name"] == "include_finding_notes"), None)[ + "value"] options.include_finding_images = \ - next((item for item in widget.get(list(widget.keys())[0]) if item["name"] == 'include_finding_images'), None)[ - 'value'] + next((item for item in widget.get(list(widget.keys())[0]) if item["name"] == "include_finding_images"), None)[ + "value"] options.report_type = \ - next((item for item in widget.get(list(widget.keys())[0]) if item["name"] == 'report_type'), None)['value'] + next((item for item in widget.get(list(widget.keys())[0]) if item["name"] == "report_type"), None)["value"] options.report_name = \ - next((item for item in widget.get(list(widget.keys())[0]) if item["name"] == 'report_name'), None)['value'] + next((item for item in widget.get(list(widget.keys())[0]) if item["name"] == "report_name"), None)["value"] selected_widgets[list(widget.keys())[0]] = options - if list(widget.keys())[0] == 'table-of-contents': + if list(widget.keys())[0] == "table-of-contents": toc = TableOfContents(request=request) - toc.title = next((item for item in widget.get(list(widget.keys())[0]) if item["name"] == 'heading'), None)[ - 'value'] - toc.depth = next((item for item in widget.get(list(widget.keys())[0]) if item["name"] == 'depth'), None)['value'] + toc.title = next((item for item in widget.get(list(widget.keys())[0]) if item["name"] == "heading"), None)[ + "value"] + toc.depth = next((item for item in widget.get(list(widget.keys())[0]) if item["name"] == "depth"), None)["value"] toc.depth = int(toc.depth) + 1 selected_widgets[list(widget.keys())[0]] = toc - if list(widget.keys())[0] == 'cover-page': + if list(widget.keys())[0] == "cover-page": cover_page = CoverPage(request=request) - cover_page.title = next((item for item in widget.get(list(widget.keys())[0]) if item["name"] == 'heading'), None)[ - 'value'] + cover_page.title = next((item for item in widget.get(list(widget.keys())[0]) if item["name"] == "heading"), None)[ + "value"] cover_page.sub_heading = \ - next((item for item in widget.get(list(widget.keys())[0]) if item["name"] == 'sub_heading'), None)['value'] + next((item for item in widget.get(list(widget.keys())[0]) if item["name"] == "sub_heading"), None)["value"] cover_page.meta_info = \ - next((item for item in widget.get(list(widget.keys())[0]) if item["name"] == 'meta_info'), None)['value'] + next((item for item in widget.get(list(widget.keys())[0]) if item["name"] == "meta_info"), None)["value"] selected_widgets[list(widget.keys())[0]] = cover_page return selected_widgets diff --git a/dojo/risk_acceptance/api.py b/dojo/risk_acceptance/api.py index bb78356fdf..eec197bbd9 100644 --- a/dojo/risk_acceptance/api.py +++ b/dojo/risk_acceptance/api.py @@ -14,16 +14,16 @@ from dojo.engagement.queries import get_authorized_engagements from dojo.models import Risk_Acceptance, User, Vulnerability_Id -AcceptedRisk = NamedTuple('AcceptedRisk', (('vulnerability_id', str), ('justification', str), ('accepted_by', str))) +AcceptedRisk = NamedTuple("AcceptedRisk", (("vulnerability_id", str), ("justification", str), ("accepted_by", str))) class AcceptedRiskSerializer(serializers.Serializer): vulnerability_id = serializers.CharField( max_length=50, - label='Vulnerability Id', - help_text='An id of a vulnerability in a security advisory associated with this finding. Can be a Common Vulnerabilities and Exposure (CVE) or from other sources.') - justification = serializers.CharField(help_text='Justification for accepting findings with this vulnerability id') - accepted_by = serializers.CharField(max_length=200, help_text='Name or email of person who accepts the risk') + label="Vulnerability Id", + help_text="An id of a vulnerability in a security advisory associated with this finding. Can be a Common Vulnerabilities and Exposure (CVE) or from other sources.") + justification = serializers.CharField(help_text="Justification for accepting findings with this vulnerability id") + accepted_by = serializers.CharField(max_length=200, help_text="Name or email of person who accepts the risk") def create(self, validated_data): return AcceptedRisk(**validated_data) @@ -40,7 +40,7 @@ def risk_application_model_class(self): request=AcceptedRiskSerializer(many=True), responses={status.HTTP_201_CREATED: RiskAcceptanceSerializer(many=True)}, ) - @action(methods=['post'], detail=True, permission_classes=[IsAdminUser], serializer_class=AcceptedRiskSerializer, + @action(methods=["post"], detail=True, permission_classes=[IsAdminUser], serializer_class=AcceptedRiskSerializer, filter_backends=[], pagination_class=None) def accept_risks(self, request, pk=None): model = self.get_object() @@ -63,7 +63,7 @@ class AcceptedFindingsMixin(ABC): request=AcceptedRiskSerializer(many=True), responses={status.HTTP_201_CREATED: RiskAcceptanceSerializer(many=True)}, ) - @action(methods=['post'], detail=False, permission_classes=[IsAdminUser], serializer_class=AcceptedRiskSerializer) + @action(methods=["post"], detail=False, permission_classes=[IsAdminUser], serializer_class=AcceptedRiskSerializer) def accept_risks(self, request): serializer = AcceptedRiskSerializer(data=request.data, many=True) if serializer.is_valid(): @@ -86,12 +86,12 @@ def _accept_risks(accepted_risks: List[AcceptedRisk], base_findings: QuerySet, o for risk in accepted_risks: vulnerability_ids = Vulnerability_Id.objects \ .filter(vulnerability_id=risk.vulnerability_id) \ - .values('finding') + .values("finding") findings = base_findings.filter(id__in=vulnerability_ids) if findings.exists(): # TODO we could use risk.vulnerability_id to name the risk_acceptance, but would need to check for existing risk_acceptances in that case # so for now we add some timestamp based suffix - name = risk.vulnerability_id + ' via api at ' + timezone.now().strftime('%b %d, %Y, %H:%M:%S') + name = risk.vulnerability_id + " via api at " + timezone.now().strftime("%b %d, %Y, %H:%M:%S") acceptance = Risk_Acceptance.objects.create(owner=owner, name=name[:100], decision=Risk_Acceptance.TREATMENT_ACCEPT, decision_details=risk.justification, diff --git a/dojo/risk_acceptance/helper.py b/dojo/risk_acceptance/helper.py index 098bf52aaf..559aeea215 100644 --- a/dojo/risk_acceptance/helper.py +++ b/dojo/risk_acceptance/helper.py @@ -16,13 +16,13 @@ def expire_now(risk_acceptance): - logger.info('Expiring risk acceptance %i:%s with %i findings', risk_acceptance.id, risk_acceptance, len(risk_acceptance.accepted_findings.all())) + logger.info("Expiring risk acceptance %i:%s with %i findings", risk_acceptance.id, risk_acceptance, len(risk_acceptance.accepted_findings.all())) reactivated_findings = [] if risk_acceptance.reactivate_expired: for finding in risk_acceptance.accepted_findings.all(): if not finding.active: - logger.debug('%i:%s: unaccepting a.k.a reactivating finding.', finding.id, finding) + logger.debug("%i:%s: unaccepting a.k.a reactivating finding.", finding.id, finding) finding.active = True finding.risk_accepted = False # Update any endpoint statuses on each of the findings @@ -35,7 +35,7 @@ def expire_now(risk_acceptance): reactivated_findings.append(finding) # findings remain in this risk acceptance for reporting / metrics purposes else: - logger.debug('%i:%s already active, no changes made.', finding.id, finding) + logger.debug("%i:%s already active, no changes made.", finding.id, finding) # best effort JIRA integration, no status changes post_jira_comments(risk_acceptance, risk_acceptance.accepted_findings.all(), expiration_message_creator) @@ -45,26 +45,26 @@ def expire_now(risk_acceptance): risk_acceptance.save() accepted_findings = risk_acceptance.accepted_findings.all() - title = 'Risk acceptance with ' + str(len(accepted_findings)) + " accepted findings has expired for " + \ - str(risk_acceptance.engagement.product) + ': ' + str(risk_acceptance.engagement.name) + title = "Risk acceptance with " + str(len(accepted_findings)) + " accepted findings has expired for " + \ + str(risk_acceptance.engagement.product) + ": " + str(risk_acceptance.engagement.name) - create_notification(event='risk_acceptance_expiration', title=title, risk_acceptance=risk_acceptance, accepted_findings=accepted_findings, + create_notification(event="risk_acceptance_expiration", title=title, risk_acceptance=risk_acceptance, accepted_findings=accepted_findings, reactivated_findings=reactivated_findings, engagement=risk_acceptance.engagement, product=risk_acceptance.engagement.product, - url=reverse('view_risk_acceptance', args=(risk_acceptance.engagement.id, risk_acceptance.id))) + url=reverse("view_risk_acceptance", args=(risk_acceptance.engagement.id, risk_acceptance.id))) def reinstate(risk_acceptance, old_expiration_date): if risk_acceptance.expiration_date_handled: - logger.info('Reinstating risk acceptance %i:%s with %i findings', risk_acceptance.id, risk_acceptance, len(risk_acceptance.accepted_findings.all())) + logger.info("Reinstating risk acceptance %i:%s with %i findings", risk_acceptance.id, risk_acceptance, len(risk_acceptance.accepted_findings.all())) - expiration_delta_days = get_system_setting('risk_acceptance_form_default_days', 90) + expiration_delta_days = get_system_setting("risk_acceptance_form_default_days", 90) risk_acceptance.expiration_date = timezone.now() + relativedelta(days=expiration_delta_days) reinstated_findings = [] for finding in risk_acceptance.accepted_findings.all(): if finding.active: - logger.debug('%i:%s: accepting a.k.a. deactivating finding', finding.id, finding) + logger.debug("%i:%s: accepting a.k.a. deactivating finding", finding.id, finding) finding.active = False finding.risk_accepted = True # Update any endpoint statuses on each of the findings @@ -72,7 +72,7 @@ def reinstate(risk_acceptance, old_expiration_date): finding.save(dedupe_option=False) reinstated_findings.append(finding) else: - logger.debug('%i:%s: already inactive, not making any changes', finding.id, finding) + logger.debug("%i:%s: already inactive, not making any changes", finding.id, finding) # best effort JIRA integration, no status changes post_jira_comments(risk_acceptance, risk_acceptance.accepted_findings.all(), reinstation_message_creator) @@ -106,7 +106,7 @@ def delete(eng, risk_acceptance): def remove_finding_from_risk_acceptance(risk_acceptance, finding): - logger.debug('removing finding %i from risk acceptance %i', finding.id, risk_acceptance.id) + logger.debug("removing finding %i from risk acceptance %i", finding.id, risk_acceptance.id) risk_acceptance.accepted_findings.remove(finding) finding.active = True finding.risk_accepted = False @@ -149,7 +149,7 @@ def expiration_handler(*args, **kwargs): risk_acceptances = get_expired_risk_acceptances_to_handle() - logger.info('expiring %i risk acceptances that are past expiration date', len(risk_acceptances)) + logger.info("expiring %i risk acceptances that are past expiration date", len(risk_acceptances)) for risk_acceptance in risk_acceptances: expire_now(risk_acceptance) # notification created by expire_now code @@ -158,18 +158,18 @@ def expiration_handler(*args, **kwargs): if heads_up_days > 0: risk_acceptances = get_almost_expired_risk_acceptances_to_handle(heads_up_days) - logger.info('notifying for %i risk acceptances that are expiring within %i days', len(risk_acceptances), heads_up_days) + logger.info("notifying for %i risk acceptances that are expiring within %i days", len(risk_acceptances), heads_up_days) for risk_acceptance in risk_acceptances: - logger.debug('notifying for risk acceptance %i:%s with %i findings', risk_acceptance.id, risk_acceptance, len(risk_acceptance.accepted_findings.all())) + logger.debug("notifying for risk acceptance %i:%s with %i findings", risk_acceptance.id, risk_acceptance, len(risk_acceptance.accepted_findings.all())) - notification_title = 'Risk acceptance with ' + str(len(risk_acceptance.accepted_findings.all())) + " accepted findings will expire on " + \ + notification_title = "Risk acceptance with " + str(len(risk_acceptance.accepted_findings.all())) + " accepted findings will expire on " + \ timezone.localtime(risk_acceptance.expiration_date).strftime("%b %d, %Y") + " for " + \ - str(risk_acceptance.engagement.product) + ': ' + str(risk_acceptance.engagement.name) + str(risk_acceptance.engagement.product) + ": " + str(risk_acceptance.engagement.name) - create_notification(event='risk_acceptance_expiration', title=notification_title, risk_acceptance=risk_acceptance, + create_notification(event="risk_acceptance_expiration", title=notification_title, risk_acceptance=risk_acceptance, accepted_findings=risk_acceptance.accepted_findings.all(), engagement=risk_acceptance.engagement, product=risk_acceptance.engagement.product, - url=reverse('view_risk_acceptance', args=(risk_acceptance.engagement.id, risk_acceptance.id))) + url=reverse("view_risk_acceptance", args=(risk_acceptance.engagement.id, risk_acceptance.id))) post_jira_comments(risk_acceptance, expiration_warning_message_creator, heads_up_days) @@ -178,42 +178,42 @@ def expiration_handler(*args, **kwargs): def expiration_message_creator(risk_acceptance, heads_up_days=0): - return 'Risk acceptance [(%s)|%s] with %i findings has expired' % \ + return "Risk acceptance [(%s)|%s] with %i findings has expired" % \ (escape_for_jira(risk_acceptance.name), - get_full_url(reverse('view_risk_acceptance', args=(risk_acceptance.engagement.id, risk_acceptance.id))), + get_full_url(reverse("view_risk_acceptance", args=(risk_acceptance.engagement.id, risk_acceptance.id))), len(risk_acceptance.accepted_findings.all())) def expiration_warning_message_creator(risk_acceptance, heads_up_days=0): - return 'Risk acceptance [(%s)|%s] with %i findings will expire in %i days' % \ + return "Risk acceptance [(%s)|%s] with %i findings will expire in %i days" % \ (escape_for_jira(risk_acceptance.name), - get_full_url(reverse('view_risk_acceptance', args=(risk_acceptance.engagement.id, risk_acceptance.id))), + get_full_url(reverse("view_risk_acceptance", args=(risk_acceptance.engagement.id, risk_acceptance.id))), len(risk_acceptance.accepted_findings.all()), heads_up_days) def reinstation_message_creator(risk_acceptance, heads_up_days=0): - return 'Risk acceptance [(%s)|%s] with %i findings has been reinstated (expires on %s)' % \ + return "Risk acceptance [(%s)|%s] with %i findings has been reinstated (expires on %s)" % \ (escape_for_jira(risk_acceptance.name), - get_full_url(reverse('view_risk_acceptance', args=(risk_acceptance.engagement.id, risk_acceptance.id))), + get_full_url(reverse("view_risk_acceptance", args=(risk_acceptance.engagement.id, risk_acceptance.id))), len(risk_acceptance.accepted_findings.all()), timezone.localtime(risk_acceptance.expiration_date).strftime("%b %d, %Y")) def accepted_message_creator(risk_acceptance, heads_up_days=0): if risk_acceptance: - return 'Finding has been added to risk acceptance [(%s)|%s] with %i findings (expires on %s)' % \ + return "Finding has been added to risk acceptance [(%s)|%s] with %i findings (expires on %s)" % \ (escape_for_jira(risk_acceptance.name), - get_full_url(reverse('view_risk_acceptance', args=(risk_acceptance.engagement.id, risk_acceptance.id))), + get_full_url(reverse("view_risk_acceptance", args=(risk_acceptance.engagement.id, risk_acceptance.id))), len(risk_acceptance.accepted_findings.all()), timezone.localtime(risk_acceptance.expiration_date).strftime("%b %d, %Y")) else: - return 'Finding has been risk accepted' + return "Finding has been risk accepted" def unaccepted_message_creator(risk_acceptance, heads_up_days=0): if risk_acceptance: - return 'finding was unaccepted/deleted from risk acceptance [({})|{}]'.format(escape_for_jira(risk_acceptance.name), - get_full_url(reverse('view_risk_acceptance', args=(risk_acceptance.engagement.id, risk_acceptance.id)))) + return "finding was unaccepted/deleted from risk acceptance [({})|{}]".format(escape_for_jira(risk_acceptance.name), + get_full_url(reverse("view_risk_acceptance", args=(risk_acceptance.engagement.id, risk_acceptance.id)))) else: - return 'Finding is no longer risk accepted' + return "Finding is no longer risk accepted" def post_jira_comment(finding, message_factory, heads_up_days=0): @@ -263,10 +263,10 @@ def get_almost_expired_risk_acceptances_to_handle(heads_up_days): def prefetch_for_expiration(risk_acceptances): - return risk_acceptances.prefetch_related('accepted_findings', 'accepted_findings__jira_issue', - 'engagement_set', - 'engagement__jira_project', - 'engagement__jira_project__jira_instance', + return risk_acceptances.prefetch_related("accepted_findings", "accepted_findings__jira_issue", + "engagement_set", + "engagement__jira_project", + "engagement__jira_project__jira_instance", ) @@ -274,7 +274,7 @@ def simple_risk_accept(finding, perform_save=True): if not finding.test.engagement.product.enable_simple_risk_acceptance: raise PermissionDenied - logger.debug('accepting finding %i:%s', finding.id, finding) + logger.debug("accepting finding %i:%s", finding.id, finding) finding.risk_accepted = True # risk accepted, so finding no longer considered active finding.active = False @@ -288,9 +288,9 @@ def simple_risk_accept(finding, perform_save=True): def risk_unaccept(finding, perform_save=True): - logger.debug('unaccepting finding %i:%s if it is currently risk accepted', finding.id, finding) + logger.debug("unaccepting finding %i:%s if it is currently risk accepted", finding.id, finding) if finding.risk_accepted: - logger.debug('unaccepting finding %i:%s', finding.id, finding) + logger.debug("unaccepting finding %i:%s", finding.id, finding) # removing from ManyToMany will not fail for non-existing entries remove_from_any_risk_acceptance(finding) if not finding.mitigated and not finding.false_p and not finding.out_of_scope: @@ -299,7 +299,7 @@ def risk_unaccept(finding, perform_save=True): # Update any endpoint statuses on each of the findings update_endpoint_statuses(finding, False) if perform_save: - logger.debug('saving unaccepted finding %i:%s', finding.id, finding) + logger.debug("saving unaccepted finding %i:%s", finding.id, finding) finding.save(dedupe_option=False) # post_jira_comment might reload from database so see unaccepted finding. but the comment diff --git a/dojo/risk_acceptance/queries.py b/dojo/risk_acceptance/queries.py index 2d45fb6445..9cbf89fb5c 100644 --- a/dojo/risk_acceptance/queries.py +++ b/dojo/risk_acceptance/queries.py @@ -12,33 +12,33 @@ def get_authorized_risk_acceptances(permission): return Risk_Acceptance.objects.none() if user.is_superuser: - return Risk_Acceptance.objects.all() + return Risk_Acceptance.objects.all().order_by("id") if user_has_global_permission(user, permission): - return Risk_Acceptance.objects.all() + return Risk_Acceptance.objects.all().order_by("id") roles = get_roles_for_permission(permission) authorized_product_type_roles = Product_Type_Member.objects.filter( - product_type=OuterRef('engagement__product__prod_type_id'), + product_type=OuterRef("engagement__product__prod_type_id"), user=user, role__in=roles) authorized_product_roles = Product_Member.objects.filter( - product=OuterRef('engagement__product_id'), + product=OuterRef("engagement__product_id"), user=user, role__in=roles) authorized_product_type_groups = Product_Type_Group.objects.filter( - product_type=OuterRef('engagement__product__prod_type_id'), + product_type=OuterRef("engagement__product__prod_type_id"), group__users=user, role__in=roles) authorized_product_groups = Product_Group.objects.filter( - product=OuterRef('engagement__product_id'), + product=OuterRef("engagement__product_id"), group__users=user, role__in=roles) risk_acceptances = Risk_Acceptance.objects.annotate( product__prod_type__member=Exists(authorized_product_type_roles), product__member=Exists(authorized_product_roles), product__prod_type__authorized_group=Exists(authorized_product_type_groups), - product__authorized_group=Exists(authorized_product_groups)) + product__authorized_group=Exists(authorized_product_groups)).order_by("id") risk_acceptances = risk_acceptances.filter( Q(product__prod_type__member=True) | Q(product__member=True) | Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True)) diff --git a/dojo/search/urls.py b/dojo/search/urls.py index 9f88f29c7b..879ccba4c6 100644 --- a/dojo/search/urls.py +++ b/dojo/search/urls.py @@ -4,6 +4,6 @@ urlpatterns = [ # search - re_path(r'^simple_search$', views.simple_search, - name='simple_search'), + re_path(r"^simple_search$", views.simple_search, + name="simple_search"), ] diff --git a/dojo/search/views.py b/dojo/search/views.py index 6ae591063c..6c6bc3217d 100644 --- a/dojo/search/views.py +++ b/dojo/search/views.py @@ -25,7 +25,7 @@ logger = logging.getLogger(__name__) # explicitly use our own regex pattern here as django-watson is sensitive so we want to control it here independently of models.py etc. -vulnerability_id_pattern = re.compile(r'(^[A-Z]+-[A-Z\d-]+)$') +vulnerability_id_pattern = re.compile(r"(^[A-Z]+-[A-Z\d-]+)$") max_results = settings.SEARCH_MAX_RESULTS @@ -46,7 +46,7 @@ def simple_search(request): languages = None app_analysis = None vulnerability_ids = None - clean_query = '' + clean_query = "" cookie = False form = SimpleSearchForm() @@ -56,12 +56,12 @@ def simple_search(request): component_words = None # if request.method == 'GET' and "query" in request.GET: - if request.method == 'GET': + if request.method == "GET": form = SimpleSearchForm(request.GET) if form.is_valid(): cookie = True - clean_query = form.cleaned_data['query'] or '' + clean_query = form.cleaned_data["query"] or "" original_clean_query = clean_query operators, keywords = parse_search_query(clean_query) @@ -108,31 +108,31 @@ def simple_search(request): title_words = None component_words = None - keywords_query = ' '.join(keywords) + keywords_query = " ".join(keywords) if search_finding_id: - logger.debug('searching finding id') + logger.debug("searching finding id") findings = authorized_findings - findings = findings.filter(id=operators['id'][0]) + findings = findings.filter(id=operators["id"][0]) elif search_findings: - logger.debug('searching findings') + logger.debug("searching findings") filter_string_matching = get_system_setting("filter_string_matching", False) finding_filter_class = FindingFilterWithoutObjectLookups if filter_string_matching else FindingFilter - findings_filter = finding_filter_class(request.GET, queryset=findings, user=request.user, pid=None, prefix='finding') + findings_filter = finding_filter_class(request.GET, queryset=findings, user=request.user, pid=None, prefix="finding") # setting initial values for filters is not supported and discouraged: https://django-filter.readthedocs.io/en/stable/guide/tips.html#using-initial-values-as-defaults # we could try to modify request.GET before generating the filter, but for now we'll leave it as is - title_words = get_words_for_field(Finding, 'title') - component_words = get_words_for_field(Finding, 'component_name') + title_words = get_words_for_field(Finding, "title") + component_words = get_words_for_field(Finding, "component_name") findings = findings_filter.qs findings = apply_tag_filters(findings, operators) findings = apply_endpoint_filter(findings, operators) - findings = perform_keyword_search_for_operator(findings, operators, 'finding', keywords_query) + findings = perform_keyword_search_for_operator(findings, operators, "finding", keywords_query) else: findings = None @@ -141,44 +141,44 @@ def simple_search(request): # prefetch after watson to avoid inavlid query errors due to watson not understanding prefetching if findings is not None: # check for None to avoid query execution - logger.debug('prefetching findings') + logger.debug("prefetching findings") findings = get_page_items(request, findings, 25) findings.object_list = prefetch_for_findings(findings.object_list) # some over the top tag displaying happening... - findings.object_list = findings.object_list.prefetch_related('test__engagement__product__tags') + findings.object_list = findings.object_list.prefetch_related("test__engagement__product__tags") - tag = operators['tag'] if 'tag' in operators else keywords - tags = operators['tags'] if 'tags' in operators else keywords - not_tag = operators['not-tag'] if 'not-tag' in operators else keywords - not_tags = operators['not-tags'] if 'not-tags' in operators else keywords + tag = operators["tag"] if "tag" in operators else keywords + tags = operators["tags"] if "tags" in operators else keywords + not_tag = operators["not-tag"] if "not-tag" in operators else keywords + not_tags = operators["not-tags"] if "not-tags" in operators else keywords if search_tags and tag or tags or not_tag or not_tags: - logger.debug('searching tags') + logger.debug("searching tags") Q1, Q2, Q3, Q4 = Q(), Q(), Q(), Q() if tag: - tag = ','.join(tag) # contains needs a single value + tag = ",".join(tag) # contains needs a single value Q1 = Q(tags__name__contains=tag) if tags: Q2 = Q(tags__name__in=tags) if not_tag: - not_tag = ','.join(not_tag) # contains needs a single value + not_tag = ",".join(not_tag) # contains needs a single value Q3 = Q(tags__name__contains=not_tag) if not_tags: Q4 = Q(tags__name__in=not_tags) - tagged_findings = authorized_findings.filter(Q1 | Q2).exclude(Q3 | Q4).distinct()[:max_results].prefetch_related('tags') + tagged_findings = authorized_findings.filter(Q1 | Q2).exclude(Q3 | Q4).distinct()[:max_results].prefetch_related("tags") tagged_finding_templates = authorized_finding_templates.filter(Q1 | Q2).exclude(Q3 | Q4).distinct()[:max_results] - tagged_tests = authorized_tests.filter(Q1 | Q2).exclude(Q3 | Q4).distinct()[:max_results].prefetch_related('tags') - tagged_engagements = authorized_engagements.filter(Q1 | Q2).exclude(Q3 | Q4).distinct()[:max_results].prefetch_related('tags') - tagged_products = authorized_products.filter(Q1 | Q2).exclude(Q3 | Q4).distinct()[:max_results].prefetch_related('tags') - tagged_endpoints = authorized_endpoints.filter(Q1 | Q2).exclude(Q3 | Q4).distinct()[:max_results].prefetch_related('tags') + tagged_tests = authorized_tests.filter(Q1 | Q2).exclude(Q3 | Q4).distinct()[:max_results].prefetch_related("tags") + tagged_engagements = authorized_engagements.filter(Q1 | Q2).exclude(Q3 | Q4).distinct()[:max_results].prefetch_related("tags") + tagged_products = authorized_products.filter(Q1 | Q2).exclude(Q3 | Q4).distinct()[:max_results].prefetch_related("tags") + tagged_endpoints = authorized_endpoints.filter(Q1 | Q2).exclude(Q3 | Q4).distinct()[:max_results].prefetch_related("tags") else: tagged_findings = None tagged_finding_templates = None @@ -190,7 +190,7 @@ def simple_search(request): tagged_results = tagged_findings or tagged_finding_templates or tagged_tests or tagged_engagements or tagged_products or tagged_endpoints if search_finding_templates: - logger.debug('searching finding templates') + logger.debug("searching finding templates") finding_templates = authorized_finding_templates finding_templates = apply_tag_filters(finding_templates, operators) @@ -204,7 +204,7 @@ def simple_search(request): finding_templates = None if search_tests: - logger.debug('searching tests') + logger.debug("searching tests") tests = authorized_tests tests = apply_tag_filters(tests, operators) @@ -213,13 +213,13 @@ def simple_search(request): watson_results = watson.filter(tests, keywords_query) tests = tests.filter(id__in=[watson.id for watson in watson_results]) - tests = tests.prefetch_related('engagement', 'engagement__product', 'test_type', 'tags', 'engagement__tags', 'engagement__product__tags') + tests = tests.prefetch_related("engagement", "engagement__product", "test_type", "tags", "engagement__tags", "engagement__product__tags") tests = tests[:max_results] else: tests = None if search_engagements: - logger.debug('searching engagements') + logger.debug("searching engagements") engagements = authorized_engagements engagements = apply_tag_filters(engagements, operators) @@ -228,13 +228,13 @@ def simple_search(request): watson_results = watson.filter(engagements, keywords_query) engagements = engagements.filter(id__in=[watson.id for watson in watson_results]) - engagements = engagements.prefetch_related('product', 'product__tags', 'tags') + engagements = engagements.prefetch_related("product", "product__tags", "tags") engagements = engagements[:max_results] else: engagements = None if search_products: - logger.debug('searching products') + logger.debug("searching products") products = authorized_products products = apply_tag_filters(products, operators) @@ -243,13 +243,13 @@ def simple_search(request): watson_results = watson.filter(products, keywords_query) products = products.filter(id__in=[watson.id for watson in watson_results]) - products = products.prefetch_related('tags') + products = products.prefetch_related("tags") products = products[:max_results] else: products = None if search_endpoints: - logger.debug('searching endpoint') + logger.debug("searching endpoint") endpoints = authorized_endpoints endpoints = apply_tag_filters(endpoints, operators) @@ -261,16 +261,16 @@ def simple_search(request): endpoints = None if search_languages: - logger.debug('searching languages') + logger.debug("searching languages") languages = Languages.objects.filter(language__language__icontains=keywords_query) - languages = languages.prefetch_related('product', 'product__tags') + languages = languages.prefetch_related("product", "product__tags") languages = languages[:max_results] else: languages = None if search_technologies: - logger.debug('searching technologies') + logger.debug("searching technologies") app_analysis = authorized_app_analysis app_analysis = app_analysis.filter(name__icontains=keywords_query) @@ -279,26 +279,26 @@ def simple_search(request): app_analysis = None if search_vulnerability_ids: - logger.debug('searching vulnerability_ids') + logger.debug("searching vulnerability_ids") vulnerability_ids = authorized_vulnerability_ids vulnerability_ids = apply_vulnerability_id_filter(vulnerability_ids, operators) if keywords_query: watson_results = watson.filter(vulnerability_ids, keywords_query) vulnerability_ids = vulnerability_ids.filter(id__in=[watson.id for watson in watson_results]) - vulnerability_ids = vulnerability_ids.prefetch_related('finding__test__engagement__product', 'finding__test__engagement__product__tags') + vulnerability_ids = vulnerability_ids.prefetch_related("finding__test__engagement__product", "finding__test__engagement__product__tags") vulnerability_ids = vulnerability_ids[:max_results] else: vulnerability_ids = None if keywords_query: - logger.debug('searching generic') - logger.debug('going generic with: %s', keywords_query) + logger.debug("searching generic") + logger.debug("going generic with: %s", keywords_query) generic = watson.search(keywords_query, models=( authorized_findings, authorized_tests, authorized_engagements, authorized_products, authorized_endpoints, authorized_finding_templates, authorized_vulnerability_ids, authorized_app_analysis)) \ - .prefetch_related('object')[:max_results] + .prefetch_related("object")[:max_results] else: generic = None @@ -311,7 +311,7 @@ def simple_search(request): # generic = watson.search("'CVE-2020-6754'")[:10].prefetch_related('object') # generic = watson.search(" 'ISEC-433'")[:10].prefetch_related('object') - logger.debug('all searched') + logger.debug("all searched") else: logger.debug(form.errors) @@ -319,49 +319,49 @@ def simple_search(request): add_breadcrumb(title=_("Simple Search"), top_level=True, request=request) - activetab = 'findings' if findings \ - else 'products' if products \ - else 'engagements' if engagements else \ - 'tests' if tests else \ - 'endpoint' if endpoints else \ - 'tagged' if tagged_results else \ - 'vulnerability_ids' if vulnerability_ids else \ - 'generic' - - response = render(request, 'dojo/simple_search.html', { - 'clean_query': original_clean_query, - 'languages': languages, - 'app_analysis': app_analysis, - 'tests': tests, - 'findings': findings, - 'finding_templates': finding_templates, - 'filtered': findings_filter, - 'title_words': title_words, - 'component_words': component_words, - 'products': products, - 'tagged_tests': tagged_tests, - 'tagged_findings': tagged_findings, - 'tagged_finding_templates': tagged_finding_templates, - 'tagged_products': tagged_products, - 'tagged_endpoints': tagged_endpoints, - 'tagged_engagements': tagged_engagements, - 'engagements': engagements, - 'endpoints': endpoints, - 'vulnerability_ids': vulnerability_ids, - 'name': _('Simple Search'), - 'metric': False, - 'user': request.user, - 'form': form, - 'activetab': activetab, - 'show_product_column': True, - 'generic': generic}) + activetab = "findings" if findings \ + else "products" if products \ + else "engagements" if engagements else \ + "tests" if tests else \ + "endpoint" if endpoints else \ + "tagged" if tagged_results else \ + "vulnerability_ids" if vulnerability_ids else \ + "generic" + + response = render(request, "dojo/simple_search.html", { + "clean_query": original_clean_query, + "languages": languages, + "app_analysis": app_analysis, + "tests": tests, + "findings": findings, + "finding_templates": finding_templates, + "filtered": findings_filter, + "title_words": title_words, + "component_words": component_words, + "products": products, + "tagged_tests": tagged_tests, + "tagged_findings": tagged_findings, + "tagged_finding_templates": tagged_finding_templates, + "tagged_products": tagged_products, + "tagged_endpoints": tagged_endpoints, + "tagged_engagements": tagged_engagements, + "engagements": engagements, + "endpoints": endpoints, + "vulnerability_ids": vulnerability_ids, + "name": _("Simple Search"), + "metric": False, + "user": request.user, + "form": form, + "activetab": activetab, + "show_product_column": True, + "generic": generic}) if cookie: response.set_cookie("highlight", value=keywords_query, max_age=None, expires=None, - path='/', secure=True, httponly=False) + path="/", secure=True, httponly=False) else: - response.delete_cookie("highlight", path='/') + response.delete_cookie("highlight", path="/") return response """ @@ -411,8 +411,8 @@ def parse_search_query(clean_query): query_parts = shlex.split(clean_query) for query_part in query_parts: - if ':' in query_part: - query_part_split = query_part.split(':', 1) + if ":" in query_part: + query_part_split = query_part.split(":", 1) operator = query_part_split[0] parameter = query_part_split[1].strip() @@ -424,9 +424,9 @@ def parse_search_query(clean_query): else: keywords.append(vulnerability_id_fix(query_part)) - logger.debug(f'query: {clean_query}') - logger.debug(f'operators: {operators}') - logger.debug(f'keywords: {keywords}') + logger.debug(f"query: {clean_query}") + logger.debug(f"operators: {operators}") + logger.debug(f"keywords: {keywords}") return operators, keywords @@ -441,91 +441,91 @@ def vulnerability_id_fix(keyword): # - https://github.com/DefectDojo/django-DefectDojo/issues/2081 vulnerability_ids = [] - keyword_parts = keyword.split(',') + keyword_parts = keyword.split(",") for keyword_part in keyword_parts: if bool(vulnerability_id_pattern.match(keyword_part)): - vulnerability_ids.append('\'' + keyword_part + '\'') + vulnerability_ids.append("'" + keyword_part + "'") if vulnerability_ids: - return ' '.join(vulnerability_ids) + return " ".join(vulnerability_ids) else: return keyword def apply_tag_filters(qs, operators, skip_relations=False): - tag_filters = {'tag': ''} + tag_filters = {"tag": ""} if qs.model == Finding: tag_filters = { - 'tag': '', - 'test-tag': 'test__', - 'engagement-tag': 'test__engagement__', - 'product-tag': 'test__engagement__product__', + "tag": "", + "test-tag": "test__", + "engagement-tag": "test__engagement__", + "product-tag": "test__engagement__product__", } if qs.model == Test: tag_filters = { - 'tag': '', - 'test-tag': '', - 'engagement-tag': 'engagement__', - 'product-tag': 'engagement__product__', + "tag": "", + "test-tag": "", + "engagement-tag": "engagement__", + "product-tag": "engagement__product__", } if qs.model == Engagement: tag_filters = { - 'tag': '', - 'test-tag': 'test__', - 'engagement-tag': '', - 'product-tag': 'product__', + "tag": "", + "test-tag": "test__", + "engagement-tag": "", + "product-tag": "product__", } if qs.model == Product: tag_filters = { - 'tag': '', - 'test-tag': 'engagement__test__', - 'engagement-tag': 'engagement__', - 'product-tag': '', + "tag": "", + "test-tag": "engagement__test__", + "engagement-tag": "engagement__", + "product-tag": "", } for tag_filter in tag_filters: if tag_filter in operators: value = operators[tag_filter] - value = ','.join(value) # contains needs a single value - qs = qs.filter(**{f'{tag_filters[tag_filter]}tags__name__contains': value}) + value = ",".join(value) # contains needs a single value + qs = qs.filter(**{f"{tag_filters[tag_filter]}tags__name__contains": value}) for tag_filter in tag_filters: - if tag_filter + 's' in operators: - value = operators[tag_filter + 's'] - qs = qs.filter(**{f'{tag_filters[tag_filter]}tags__name__in': value}) + if tag_filter + "s" in operators: + value = operators[tag_filter + "s"] + qs = qs.filter(**{f"{tag_filters[tag_filter]}tags__name__in": value}) # negative search based on not- prefix (not-tags, not-test-tags, not-engagement-tags, not-product-tags, etc) for tag_filter in tag_filters: - tag_filter = 'not-' + tag_filter + tag_filter = "not-" + tag_filter if tag_filter in operators: value = operators[tag_filter] - value = ','.join(value) # contains needs a single value - qs = qs.exclude(**{'{}tags__name__contains'.format(tag_filters[tag_filter.replace('not-', '')]): value}) + value = ",".join(value) # contains needs a single value + qs = qs.exclude(**{"{}tags__name__contains".format(tag_filters[tag_filter.replace("not-", "")]): value}) for tag_filter in tag_filters: - tag_filter = 'not-' + tag_filter - if tag_filter + 's' in operators: - value = operators[tag_filter + 's'] - qs = qs.exclude(**{'{}tags__name__in'.format(tag_filters[tag_filter.replace('not-', '')]): value}) + tag_filter = "not-" + tag_filter + if tag_filter + "s" in operators: + value = operators[tag_filter + "s"] + qs = qs.exclude(**{"{}tags__name__in".format(tag_filters[tag_filter.replace("not-", "")]): value}) return qs def apply_endpoint_filter(qs, operators): - if 'endpoint' in operators: - qs = qs.filter(endpoints__host__contains=','.join(operators['endpoint'])) + if "endpoint" in operators: + qs = qs.filter(endpoints__host__contains=",".join(operators["endpoint"])) return qs def apply_vulnerability_id_filter(qs, operators): - if 'vulnerability_id' in operators: - value = operators['vulnerability_id'] + if "vulnerability_id" in operators: + value = operators["vulnerability_id"] # possible value: # ['CVE-2020-6754] @@ -534,8 +534,8 @@ def apply_vulnerability_id_filter(qs, operators): # ['CVE-2020-6754,CVE-2018-7489', 'CVE-2020-1234'] # so flatten like mad: - vulnerability_ids = list(itertools.chain.from_iterable([vulnerability_id.split(',') for vulnerability_id in value])) - logger.debug('vulnerability_id filter: %s', vulnerability_ids) + vulnerability_ids = list(itertools.chain.from_iterable([vulnerability_id.split(",") for vulnerability_id in value])) + logger.debug("vulnerability_id filter: %s", vulnerability_ids) qs = qs.filter(Q(vulnerability_id__in=vulnerability_ids)) return qs @@ -543,17 +543,17 @@ def apply_vulnerability_id_filter(qs, operators): def perform_keyword_search_for_operator(qs, operators, operator, keywords_query): watson_results = None - operator_query = '' - keywords_query = '' if not keywords_query else keywords_query + operator_query = "" + keywords_query = "" if not keywords_query else keywords_query if operator in operators: - operator_query = ' '.join(operators[operator]) + operator_query = " ".join(operators[operator]) keywords_query = operator_query + keywords_query keywords_query = keywords_query.strip() if keywords_query: - logger.debug('going watson with: %s', keywords_query) + logger.debug("going watson with: %s", keywords_query) # watson is too slow to get all results or even to count them # counting also results in invalid queries with group by errors watson_results = watson.filter(qs, keywords_query)[:max_results] diff --git a/dojo/settings/.settings.dist.py.sha256sum b/dojo/settings/.settings.dist.py.sha256sum index 7de0e88d53..edb32f1df2 100644 --- a/dojo/settings/.settings.dist.py.sha256sum +++ b/dojo/settings/.settings.dist.py.sha256sum @@ -1 +1 @@ -d087fa0474058683faf7ddf1b98ca6b22f4fe18d6d900122e4c641b6d8ce5772 +c2958435897ef7f3d4bda5cb7386c84f317f02f8cc16077d544b9ec50cd6e9ba diff --git a/dojo/settings/attribute-maps/django_saml_uri.py b/dojo/settings/attribute-maps/django_saml_uri.py index 83fd538420..3d6dc33a2d 100644 --- a/dojo/settings/attribute-maps/django_saml_uri.py +++ b/dojo/settings/attribute-maps/django_saml_uri.py @@ -1,19 +1,19 @@ -X500ATTR_OID = 'urn:oid:2.5.4.' -PKCS_9 = 'urn:oid:1.2.840.113549.1.9.1.' -UCL_DIR_PILOT = 'urn:oid:0.9.2342.19200300.100.1.' +X500ATTR_OID = "urn:oid:2.5.4." +PKCS_9 = "urn:oid:1.2.840.113549.1.9.1." +UCL_DIR_PILOT = "urn:oid:0.9.2342.19200300.100.1." MAP = { - 'identifier': 'urn:oasis:names:tc:SAML:2.0:attrname-format:uri', - 'fro': { - X500ATTR_OID + '3': 'first_name', # cn - X500ATTR_OID + '4': 'last_name', # sn - PKCS_9 + '1': 'email', - UCL_DIR_PILOT + '1': 'uid', + "identifier": "urn:oasis:names:tc:SAML:2.0:attrname-format:uri", + "fro": { + X500ATTR_OID + "3": "first_name", # cn + X500ATTR_OID + "4": "last_name", # sn + PKCS_9 + "1": "email", + UCL_DIR_PILOT + "1": "uid", }, - 'to': { - 'first_name': X500ATTR_OID + '3', - 'last_name': X500ATTR_OID + '4', - 'email': PKCS_9 + '1', - 'uid': UCL_DIR_PILOT + '1', + "to": { + "first_name": X500ATTR_OID + "3", + "last_name": X500ATTR_OID + "4", + "email": PKCS_9 + "1", + "uid": UCL_DIR_PILOT + "1", }, } diff --git a/dojo/settings/attribute-maps/saml_uri.py b/dojo/settings/attribute-maps/saml_uri.py index c2e7694f89..d2e948a915 100644 --- a/dojo/settings/attribute-maps/saml_uri.py +++ b/dojo/settings/attribute-maps/saml_uri.py @@ -1,10 +1,10 @@ -__author__ = 'rolandh' +__author__ = "rolandh" EDUPERSON_OID = "urn:oid:1.3.6.1.4.1.5923.1.1.1." X500ATTR_OID = "urn:oid:2.5.4." NOREDUPERSON_OID = "urn:oid:1.3.6.1.4.1.2428.90.1." NETSCAPE_LDAP = "urn:oid:2.16.840.1.113730.3.1." -UCL_DIR_PILOT = 'urn:oid:0.9.2342.19200300.100.1.' +UCL_DIR_PILOT = "urn:oid:0.9.2342.19200300.100.1." PKCS_9 = "urn:oid:1.2.840.113549.1.9.1." UMICH = "urn:oid:1.3.6.1.4.1.250.1.57." SCHAC = "urn:oid:1.3.6.1.4.1.25178.1.2." @@ -12,232 +12,232 @@ MAP = { "identifier": "urn:oasis:names:tc:SAML:2.0:attrname-format:uri", "fro": { - EDUPERSON_OID + '2': 'eduPersonNickname', - EDUPERSON_OID + '9': 'eduPersonScopedAffiliation', - EDUPERSON_OID + '11': 'eduPersonAssurance', - EDUPERSON_OID + '10': 'eduPersonTargetedID', - EDUPERSON_OID + '4': 'eduPersonOrgUnitDN', - NOREDUPERSON_OID + '6': 'norEduOrgAcronym', - NOREDUPERSON_OID + '7': 'norEduOrgUniqueIdentifier', - NOREDUPERSON_OID + '4': 'norEduPersonLIN', - EDUPERSON_OID + '1': 'eduPersonAffiliation', - NOREDUPERSON_OID + '2': 'norEduOrgUnitUniqueNumber', - NETSCAPE_LDAP + '40': 'userSMIMECertificate', - NOREDUPERSON_OID + '1': 'norEduOrgUniqueNumber', - NETSCAPE_LDAP + '241': 'displayName', - UCL_DIR_PILOT + '37': 'associatedDomain', - EDUPERSON_OID + '6': 'eduPersonPrincipalName', - NOREDUPERSON_OID + '8': 'norEduOrgUnitUniqueIdentifier', - NOREDUPERSON_OID + '9': 'federationFeideSchemaVersion', - X500ATTR_OID + '53': 'deltaRevocationList', - X500ATTR_OID + '52': 'supportedAlgorithms', - X500ATTR_OID + '51': 'houseIdentifier', - X500ATTR_OID + '50': 'uniqueMember', - X500ATTR_OID + '19': 'physicalDeliveryOfficeName', - X500ATTR_OID + '18': 'postOfficeBox', - X500ATTR_OID + '17': 'postalCode', - X500ATTR_OID + '16': 'postalAddress', - X500ATTR_OID + '15': 'businessCategory', - X500ATTR_OID + '14': 'searchGuide', - EDUPERSON_OID + '5': 'eduPersonPrimaryAffiliation', - X500ATTR_OID + '12': 'title', - X500ATTR_OID + '11': 'ou', - X500ATTR_OID + '10': 'o', - X500ATTR_OID + '37': 'cACertificate', - X500ATTR_OID + '36': 'userCertificate', - X500ATTR_OID + '31': 'member', - X500ATTR_OID + '30': 'supportedApplicationContext', - X500ATTR_OID + '33': 'roleOccupant', - X500ATTR_OID + '32': 'owner', - NETSCAPE_LDAP + '1': 'carLicense', - PKCS_9 + '1': 'email', - NETSCAPE_LDAP + '3': 'employeeNumber', - NETSCAPE_LDAP + '2': 'departmentNumber', - X500ATTR_OID + '39': 'certificateRevocationList', - X500ATTR_OID + '38': 'authorityRevocationList', - NETSCAPE_LDAP + '216': 'userPKCS12', - EDUPERSON_OID + '8': 'eduPersonPrimaryOrgUnitDN', - X500ATTR_OID + '9': 'street', - X500ATTR_OID + '8': 'st', - NETSCAPE_LDAP + '39': 'preferredLanguage', - EDUPERSON_OID + '7': 'eduPersonEntitlement', - X500ATTR_OID + '2': 'knowledgeInformation', - X500ATTR_OID + '7': 'l', - X500ATTR_OID + '6': 'c', - X500ATTR_OID + '5': 'serialNumber', - X500ATTR_OID + '4': 'sn', - X500ATTR_OID + '3': 'cn', - UCL_DIR_PILOT + '60': 'jpegPhoto', - X500ATTR_OID + '65': 'pseudonym', - NOREDUPERSON_OID + '5': 'norEduPersonNIN', - UCL_DIR_PILOT + '3': 'mail', - UCL_DIR_PILOT + '25': 'dc', - X500ATTR_OID + '40': 'crossCertificatePair', - X500ATTR_OID + '42': 'givenName', - X500ATTR_OID + '43': 'initials', - X500ATTR_OID + '44': 'generationQualifier', - X500ATTR_OID + '45': 'x500UniqueIdentifier', - X500ATTR_OID + '46': 'dnQualifier', - X500ATTR_OID + '47': 'enhancedSearchGuide', - X500ATTR_OID + '48': 'protocolInformation', - X500ATTR_OID + '54': 'dmdName', - NETSCAPE_LDAP + '4': 'employeeType', - X500ATTR_OID + '22': 'teletexTerminalIdentifier', - X500ATTR_OID + '23': 'facsimileTelephoneNumber', - X500ATTR_OID + '20': 'telephoneNumber', - X500ATTR_OID + '21': 'telexNumber', - X500ATTR_OID + '26': 'registeredAddress', - X500ATTR_OID + '27': 'destinationIndicator', - X500ATTR_OID + '24': 'x121Address', - X500ATTR_OID + '25': 'internationaliSDNNumber', - X500ATTR_OID + '28': 'preferredDeliveryMethod', - X500ATTR_OID + '29': 'presentationAddress', - EDUPERSON_OID + '3': 'eduPersonOrgDN', - NOREDUPERSON_OID + '3': 'norEduPersonBirthDate', - UMICH + '57': 'labeledURI', - UCL_DIR_PILOT + '1': 'uid', - SCHAC + '1': 'schacMotherTongue', - SCHAC + '2': 'schacGender', - SCHAC + '3': 'schacDateOfBirth', - SCHAC + '4': 'schacPlaceOfBirth', - SCHAC + '5': 'schacCountryOfCitizenship', - SCHAC + '6': 'schacSn1', - SCHAC + '7': 'schacSn2', - SCHAC + '8': 'schacPersonalTitle', - SCHAC + '9': 'schacHomeOrganization', - SCHAC + '10': 'schacHomeOrganizationType', - SCHAC + '11': 'schacCountryOfResidence', - SCHAC + '12': 'schacUserPresenceID', - SCHAC + '13': 'schacPersonalPosition', - SCHAC + '14': 'schacPersonalUniqueCode', - SCHAC + '15': 'schacPersonalUniqueID', - SCHAC + '17': 'schacExpiryDate', - SCHAC + '18': 'schacUserPrivateAttribute', - SCHAC + '19': 'schacUserStatus', - SCHAC + '20': 'schacProjectMembership', - SCHAC + '21': 'schacProjectSpecificRole', + EDUPERSON_OID + "2": "eduPersonNickname", + EDUPERSON_OID + "9": "eduPersonScopedAffiliation", + EDUPERSON_OID + "11": "eduPersonAssurance", + EDUPERSON_OID + "10": "eduPersonTargetedID", + EDUPERSON_OID + "4": "eduPersonOrgUnitDN", + NOREDUPERSON_OID + "6": "norEduOrgAcronym", + NOREDUPERSON_OID + "7": "norEduOrgUniqueIdentifier", + NOREDUPERSON_OID + "4": "norEduPersonLIN", + EDUPERSON_OID + "1": "eduPersonAffiliation", + NOREDUPERSON_OID + "2": "norEduOrgUnitUniqueNumber", + NETSCAPE_LDAP + "40": "userSMIMECertificate", + NOREDUPERSON_OID + "1": "norEduOrgUniqueNumber", + NETSCAPE_LDAP + "241": "displayName", + UCL_DIR_PILOT + "37": "associatedDomain", + EDUPERSON_OID + "6": "eduPersonPrincipalName", + NOREDUPERSON_OID + "8": "norEduOrgUnitUniqueIdentifier", + NOREDUPERSON_OID + "9": "federationFeideSchemaVersion", + X500ATTR_OID + "53": "deltaRevocationList", + X500ATTR_OID + "52": "supportedAlgorithms", + X500ATTR_OID + "51": "houseIdentifier", + X500ATTR_OID + "50": "uniqueMember", + X500ATTR_OID + "19": "physicalDeliveryOfficeName", + X500ATTR_OID + "18": "postOfficeBox", + X500ATTR_OID + "17": "postalCode", + X500ATTR_OID + "16": "postalAddress", + X500ATTR_OID + "15": "businessCategory", + X500ATTR_OID + "14": "searchGuide", + EDUPERSON_OID + "5": "eduPersonPrimaryAffiliation", + X500ATTR_OID + "12": "title", + X500ATTR_OID + "11": "ou", + X500ATTR_OID + "10": "o", + X500ATTR_OID + "37": "cACertificate", + X500ATTR_OID + "36": "userCertificate", + X500ATTR_OID + "31": "member", + X500ATTR_OID + "30": "supportedApplicationContext", + X500ATTR_OID + "33": "roleOccupant", + X500ATTR_OID + "32": "owner", + NETSCAPE_LDAP + "1": "carLicense", + PKCS_9 + "1": "email", + NETSCAPE_LDAP + "3": "employeeNumber", + NETSCAPE_LDAP + "2": "departmentNumber", + X500ATTR_OID + "39": "certificateRevocationList", + X500ATTR_OID + "38": "authorityRevocationList", + NETSCAPE_LDAP + "216": "userPKCS12", + EDUPERSON_OID + "8": "eduPersonPrimaryOrgUnitDN", + X500ATTR_OID + "9": "street", + X500ATTR_OID + "8": "st", + NETSCAPE_LDAP + "39": "preferredLanguage", + EDUPERSON_OID + "7": "eduPersonEntitlement", + X500ATTR_OID + "2": "knowledgeInformation", + X500ATTR_OID + "7": "l", + X500ATTR_OID + "6": "c", + X500ATTR_OID + "5": "serialNumber", + X500ATTR_OID + "4": "sn", + X500ATTR_OID + "3": "cn", + UCL_DIR_PILOT + "60": "jpegPhoto", + X500ATTR_OID + "65": "pseudonym", + NOREDUPERSON_OID + "5": "norEduPersonNIN", + UCL_DIR_PILOT + "3": "mail", + UCL_DIR_PILOT + "25": "dc", + X500ATTR_OID + "40": "crossCertificatePair", + X500ATTR_OID + "42": "givenName", + X500ATTR_OID + "43": "initials", + X500ATTR_OID + "44": "generationQualifier", + X500ATTR_OID + "45": "x500UniqueIdentifier", + X500ATTR_OID + "46": "dnQualifier", + X500ATTR_OID + "47": "enhancedSearchGuide", + X500ATTR_OID + "48": "protocolInformation", + X500ATTR_OID + "54": "dmdName", + NETSCAPE_LDAP + "4": "employeeType", + X500ATTR_OID + "22": "teletexTerminalIdentifier", + X500ATTR_OID + "23": "facsimileTelephoneNumber", + X500ATTR_OID + "20": "telephoneNumber", + X500ATTR_OID + "21": "telexNumber", + X500ATTR_OID + "26": "registeredAddress", + X500ATTR_OID + "27": "destinationIndicator", + X500ATTR_OID + "24": "x121Address", + X500ATTR_OID + "25": "internationaliSDNNumber", + X500ATTR_OID + "28": "preferredDeliveryMethod", + X500ATTR_OID + "29": "presentationAddress", + EDUPERSON_OID + "3": "eduPersonOrgDN", + NOREDUPERSON_OID + "3": "norEduPersonBirthDate", + UMICH + "57": "labeledURI", + UCL_DIR_PILOT + "1": "uid", + SCHAC + "1": "schacMotherTongue", + SCHAC + "2": "schacGender", + SCHAC + "3": "schacDateOfBirth", + SCHAC + "4": "schacPlaceOfBirth", + SCHAC + "5": "schacCountryOfCitizenship", + SCHAC + "6": "schacSn1", + SCHAC + "7": "schacSn2", + SCHAC + "8": "schacPersonalTitle", + SCHAC + "9": "schacHomeOrganization", + SCHAC + "10": "schacHomeOrganizationType", + SCHAC + "11": "schacCountryOfResidence", + SCHAC + "12": "schacUserPresenceID", + SCHAC + "13": "schacPersonalPosition", + SCHAC + "14": "schacPersonalUniqueCode", + SCHAC + "15": "schacPersonalUniqueID", + SCHAC + "17": "schacExpiryDate", + SCHAC + "18": "schacUserPrivateAttribute", + SCHAC + "19": "schacUserStatus", + SCHAC + "20": "schacProjectMembership", + SCHAC + "21": "schacProjectSpecificRole", }, "to": { - 'roleOccupant': X500ATTR_OID + '33', - 'gn': X500ATTR_OID + '42', - 'norEduPersonNIN': NOREDUPERSON_OID + '5', - 'title': X500ATTR_OID + '12', - 'facsimileTelephoneNumber': X500ATTR_OID + '23', - 'mail': UCL_DIR_PILOT + '3', - 'postOfficeBox': X500ATTR_OID + '18', - 'fax': X500ATTR_OID + '23', - 'telephoneNumber': X500ATTR_OID + '20', - 'norEduPersonBirthDate': NOREDUPERSON_OID + '3', - 'rfc822Mailbox': UCL_DIR_PILOT + '3', - 'dc': UCL_DIR_PILOT + '25', - 'countryName': X500ATTR_OID + '6', - 'emailAddress': PKCS_9 + '1', - 'employeeNumber': NETSCAPE_LDAP + '3', - 'organizationName': X500ATTR_OID + '10', - 'eduPersonAssurance': EDUPERSON_OID + '11', - 'norEduOrgAcronym': NOREDUPERSON_OID + '6', - 'registeredAddress': X500ATTR_OID + '26', - 'physicalDeliveryOfficeName': X500ATTR_OID + '19', - 'associatedDomain': UCL_DIR_PILOT + '37', - 'l': X500ATTR_OID + '7', - 'stateOrProvinceName': X500ATTR_OID + '8', - 'federationFeideSchemaVersion': NOREDUPERSON_OID + '9', - 'pkcs9email': PKCS_9 + '1', - 'givenName': X500ATTR_OID + '42', - 'givenname': X500ATTR_OID + '42', - 'x500UniqueIdentifier': X500ATTR_OID + '45', - 'eduPersonNickname': EDUPERSON_OID + '2', - 'houseIdentifier': X500ATTR_OID + '51', - 'street': X500ATTR_OID + '9', - 'supportedAlgorithms': X500ATTR_OID + '52', - 'preferredLanguage': NETSCAPE_LDAP + '39', - 'postalAddress': X500ATTR_OID + '16', - 'email': PKCS_9 + '1', - 'norEduOrgUnitUniqueIdentifier': NOREDUPERSON_OID + '8', - 'eduPersonPrimaryOrgUnitDN': EDUPERSON_OID + '8', - 'c': X500ATTR_OID + '6', - 'teletexTerminalIdentifier': X500ATTR_OID + '22', - 'o': X500ATTR_OID + '10', - 'cACertificate': X500ATTR_OID + '37', - 'telexNumber': X500ATTR_OID + '21', - 'ou': X500ATTR_OID + '11', - 'initials': X500ATTR_OID + '43', - 'eduPersonOrgUnitDN': EDUPERSON_OID + '4', - 'deltaRevocationList': X500ATTR_OID + '53', - 'norEduPersonLIN': NOREDUPERSON_OID + '4', - 'supportedApplicationContext': X500ATTR_OID + '30', - 'eduPersonEntitlement': EDUPERSON_OID + '7', - 'generationQualifier': X500ATTR_OID + '44', - 'eduPersonAffiliation': EDUPERSON_OID + '1', - 'edupersonaffiliation': EDUPERSON_OID + '1', - 'eduPersonPrincipalName': EDUPERSON_OID + '6', - 'edupersonprincipalname': EDUPERSON_OID + '6', - 'localityName': X500ATTR_OID + '7', - 'owner': X500ATTR_OID + '32', - 'norEduOrgUnitUniqueNumber': NOREDUPERSON_OID + '2', - 'searchGuide': X500ATTR_OID + '14', - 'certificateRevocationList': X500ATTR_OID + '39', - 'organizationalUnitName': X500ATTR_OID + '11', - 'userCertificate': X500ATTR_OID + '36', - 'preferredDeliveryMethod': X500ATTR_OID + '28', - 'internationaliSDNNumber': X500ATTR_OID + '25', - 'uniqueMember': X500ATTR_OID + '50', - 'departmentNumber': NETSCAPE_LDAP + '2', - 'enhancedSearchGuide': X500ATTR_OID + '47', - 'userPKCS12': NETSCAPE_LDAP + '216', - 'eduPersonTargetedID': EDUPERSON_OID + '10', - 'norEduOrgUniqueNumber': NOREDUPERSON_OID + '1', - 'x121Address': X500ATTR_OID + '24', - 'destinationIndicator': X500ATTR_OID + '27', - 'eduPersonPrimaryAffiliation': EDUPERSON_OID + '5', - 'surname': X500ATTR_OID + '4', - 'jpegPhoto': UCL_DIR_PILOT + '60', - 'eduPersonScopedAffiliation': EDUPERSON_OID + '9', - 'edupersonscopedaffiliation': EDUPERSON_OID + '9', - 'protocolInformation': X500ATTR_OID + '48', - 'knowledgeInformation': X500ATTR_OID + '2', - 'employeeType': NETSCAPE_LDAP + '4', - 'userSMIMECertificate': NETSCAPE_LDAP + '40', - 'member': X500ATTR_OID + '31', - 'streetAddress': X500ATTR_OID + '9', - 'dmdName': X500ATTR_OID + '54', - 'postalCode': X500ATTR_OID + '17', - 'pseudonym': X500ATTR_OID + '65', - 'dnQualifier': X500ATTR_OID + '46', - 'crossCertificatePair': X500ATTR_OID + '40', - 'eduPersonOrgDN': EDUPERSON_OID + '3', - 'authorityRevocationList': X500ATTR_OID + '38', - 'displayName': NETSCAPE_LDAP + '241', - 'businessCategory': X500ATTR_OID + '15', - 'serialNumber': X500ATTR_OID + '5', - 'norEduOrgUniqueIdentifier': NOREDUPERSON_OID + '7', - 'st': X500ATTR_OID + '8', - 'carLicense': NETSCAPE_LDAP + '1', - 'presentationAddress': X500ATTR_OID + '29', - 'sn': X500ATTR_OID + '4', - 'cn': X500ATTR_OID + '3', - 'domainComponent': UCL_DIR_PILOT + '25', - 'labeledURI': UMICH + '57', - 'uid': UCL_DIR_PILOT + '1', - 'schacMotherTongue': SCHAC + '1', - 'schacGender': SCHAC + '2', - 'schacDateOfBirth': SCHAC + '3', - 'schacPlaceOfBirth': SCHAC + '4', - 'schacCountryOfCitizenship': SCHAC + '5', - 'schacSn1': SCHAC + '6', - 'schacSn2': SCHAC + '7', - 'schacPersonalTitle': SCHAC + '8', - 'schacHomeOrganization': SCHAC + '9', - 'schacHomeOrganizationType': SCHAC + '10', - 'schacCountryOfResidence': SCHAC + '11', - 'schacUserPresenceID': SCHAC + '12', - 'schacPersonalPosition': SCHAC + '13', - 'schacPersonalUniqueCode': SCHAC + '14', - 'schacPersonalUniqueID': SCHAC + '15', - 'schacExpiryDate': SCHAC + '17', - 'schacUserPrivateAttribute': SCHAC + '18', - 'schacUserStatus': SCHAC + '19', - 'schacProjectMembership': SCHAC + '20', - 'schacProjectSpecificRole': SCHAC + '21', + "roleOccupant": X500ATTR_OID + "33", + "gn": X500ATTR_OID + "42", + "norEduPersonNIN": NOREDUPERSON_OID + "5", + "title": X500ATTR_OID + "12", + "facsimileTelephoneNumber": X500ATTR_OID + "23", + "mail": UCL_DIR_PILOT + "3", + "postOfficeBox": X500ATTR_OID + "18", + "fax": X500ATTR_OID + "23", + "telephoneNumber": X500ATTR_OID + "20", + "norEduPersonBirthDate": NOREDUPERSON_OID + "3", + "rfc822Mailbox": UCL_DIR_PILOT + "3", + "dc": UCL_DIR_PILOT + "25", + "countryName": X500ATTR_OID + "6", + "emailAddress": PKCS_9 + "1", + "employeeNumber": NETSCAPE_LDAP + "3", + "organizationName": X500ATTR_OID + "10", + "eduPersonAssurance": EDUPERSON_OID + "11", + "norEduOrgAcronym": NOREDUPERSON_OID + "6", + "registeredAddress": X500ATTR_OID + "26", + "physicalDeliveryOfficeName": X500ATTR_OID + "19", + "associatedDomain": UCL_DIR_PILOT + "37", + "l": X500ATTR_OID + "7", + "stateOrProvinceName": X500ATTR_OID + "8", + "federationFeideSchemaVersion": NOREDUPERSON_OID + "9", + "pkcs9email": PKCS_9 + "1", + "givenName": X500ATTR_OID + "42", + "givenname": X500ATTR_OID + "42", + "x500UniqueIdentifier": X500ATTR_OID + "45", + "eduPersonNickname": EDUPERSON_OID + "2", + "houseIdentifier": X500ATTR_OID + "51", + "street": X500ATTR_OID + "9", + "supportedAlgorithms": X500ATTR_OID + "52", + "preferredLanguage": NETSCAPE_LDAP + "39", + "postalAddress": X500ATTR_OID + "16", + "email": PKCS_9 + "1", + "norEduOrgUnitUniqueIdentifier": NOREDUPERSON_OID + "8", + "eduPersonPrimaryOrgUnitDN": EDUPERSON_OID + "8", + "c": X500ATTR_OID + "6", + "teletexTerminalIdentifier": X500ATTR_OID + "22", + "o": X500ATTR_OID + "10", + "cACertificate": X500ATTR_OID + "37", + "telexNumber": X500ATTR_OID + "21", + "ou": X500ATTR_OID + "11", + "initials": X500ATTR_OID + "43", + "eduPersonOrgUnitDN": EDUPERSON_OID + "4", + "deltaRevocationList": X500ATTR_OID + "53", + "norEduPersonLIN": NOREDUPERSON_OID + "4", + "supportedApplicationContext": X500ATTR_OID + "30", + "eduPersonEntitlement": EDUPERSON_OID + "7", + "generationQualifier": X500ATTR_OID + "44", + "eduPersonAffiliation": EDUPERSON_OID + "1", + "edupersonaffiliation": EDUPERSON_OID + "1", + "eduPersonPrincipalName": EDUPERSON_OID + "6", + "edupersonprincipalname": EDUPERSON_OID + "6", + "localityName": X500ATTR_OID + "7", + "owner": X500ATTR_OID + "32", + "norEduOrgUnitUniqueNumber": NOREDUPERSON_OID + "2", + "searchGuide": X500ATTR_OID + "14", + "certificateRevocationList": X500ATTR_OID + "39", + "organizationalUnitName": X500ATTR_OID + "11", + "userCertificate": X500ATTR_OID + "36", + "preferredDeliveryMethod": X500ATTR_OID + "28", + "internationaliSDNNumber": X500ATTR_OID + "25", + "uniqueMember": X500ATTR_OID + "50", + "departmentNumber": NETSCAPE_LDAP + "2", + "enhancedSearchGuide": X500ATTR_OID + "47", + "userPKCS12": NETSCAPE_LDAP + "216", + "eduPersonTargetedID": EDUPERSON_OID + "10", + "norEduOrgUniqueNumber": NOREDUPERSON_OID + "1", + "x121Address": X500ATTR_OID + "24", + "destinationIndicator": X500ATTR_OID + "27", + "eduPersonPrimaryAffiliation": EDUPERSON_OID + "5", + "surname": X500ATTR_OID + "4", + "jpegPhoto": UCL_DIR_PILOT + "60", + "eduPersonScopedAffiliation": EDUPERSON_OID + "9", + "edupersonscopedaffiliation": EDUPERSON_OID + "9", + "protocolInformation": X500ATTR_OID + "48", + "knowledgeInformation": X500ATTR_OID + "2", + "employeeType": NETSCAPE_LDAP + "4", + "userSMIMECertificate": NETSCAPE_LDAP + "40", + "member": X500ATTR_OID + "31", + "streetAddress": X500ATTR_OID + "9", + "dmdName": X500ATTR_OID + "54", + "postalCode": X500ATTR_OID + "17", + "pseudonym": X500ATTR_OID + "65", + "dnQualifier": X500ATTR_OID + "46", + "crossCertificatePair": X500ATTR_OID + "40", + "eduPersonOrgDN": EDUPERSON_OID + "3", + "authorityRevocationList": X500ATTR_OID + "38", + "displayName": NETSCAPE_LDAP + "241", + "businessCategory": X500ATTR_OID + "15", + "serialNumber": X500ATTR_OID + "5", + "norEduOrgUniqueIdentifier": NOREDUPERSON_OID + "7", + "st": X500ATTR_OID + "8", + "carLicense": NETSCAPE_LDAP + "1", + "presentationAddress": X500ATTR_OID + "29", + "sn": X500ATTR_OID + "4", + "cn": X500ATTR_OID + "3", + "domainComponent": UCL_DIR_PILOT + "25", + "labeledURI": UMICH + "57", + "uid": UCL_DIR_PILOT + "1", + "schacMotherTongue": SCHAC + "1", + "schacGender": SCHAC + "2", + "schacDateOfBirth": SCHAC + "3", + "schacPlaceOfBirth": SCHAC + "4", + "schacCountryOfCitizenship": SCHAC + "5", + "schacSn1": SCHAC + "6", + "schacSn2": SCHAC + "7", + "schacPersonalTitle": SCHAC + "8", + "schacHomeOrganization": SCHAC + "9", + "schacHomeOrganizationType": SCHAC + "10", + "schacCountryOfResidence": SCHAC + "11", + "schacUserPresenceID": SCHAC + "12", + "schacPersonalPosition": SCHAC + "13", + "schacPersonalUniqueCode": SCHAC + "14", + "schacPersonalUniqueID": SCHAC + "15", + "schacExpiryDate": SCHAC + "17", + "schacUserPrivateAttribute": SCHAC + "18", + "schacUserStatus": SCHAC + "19", + "schacProjectMembership": SCHAC + "20", + "schacProjectSpecificRole": SCHAC + "21", }, } diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index d511c1af55..cb01f8b51b 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -31,18 +31,18 @@ # reference: https://pypi.org/project/django-environ/ env = environ.FileAwareEnv( # Set casting and default values - DD_SITE_URL=(str, 'http://localhost:8080'), + DD_SITE_URL=(str, "http://localhost:8080"), DD_DEBUG=(bool, False), DD_TEMPLATE_DEBUG=(bool, False), - DD_LOG_LEVEL=(str, ''), + DD_LOG_LEVEL=(str, ""), DD_DJANGO_METRICS_ENABLED=(bool, False), - DD_LOGIN_REDIRECT_URL=(str, '/'), - DD_LOGIN_URL=(str, '/login'), + DD_LOGIN_REDIRECT_URL=(str, "/"), + DD_LOGIN_URL=(str, "/login"), DD_DJANGO_ADMIN_ENABLED=(bool, True), DD_SESSION_COOKIE_HTTPONLY=(bool, True), DD_CSRF_COOKIE_HTTPONLY=(bool, True), DD_SECURE_SSL_REDIRECT=(bool, False), - DD_SECURE_CROSS_ORIGIN_OPENER_POLICY=(str, 'same-origin'), + DD_SECURE_CROSS_ORIGIN_OPENER_POLICY=(str, "same-origin"), DD_SECURE_HSTS_INCLUDE_SUBDOMAINS=(bool, False), DD_SECURE_HSTS_SECONDS=(int, 31536000), # One year expiration DD_SESSION_COOKIE_SECURE=(bool, False), @@ -51,56 +51,56 @@ DD_CSRF_COOKIE_SECURE=(bool, False), DD_CSRF_TRUSTED_ORIGINS=(list, []), DD_SECURE_CONTENT_TYPE_NOSNIFF=(bool, True), - DD_CSRF_COOKIE_SAMESITE=(str, 'Lax'), - DD_SESSION_COOKIE_SAMESITE=(str, 'Lax'), + DD_CSRF_COOKIE_SAMESITE=(str, "Lax"), + DD_SESSION_COOKIE_SAMESITE=(str, "Lax"), DD_APPEND_SLASH=(bool, True), - DD_TIME_ZONE=(str, 'UTC'), - DD_LANG=(str, 'en-us'), - DD_TEAM_NAME=(str, 'Security Team'), - DD_ADMINS=(str, 'DefectDojo:dojo@localhost,Admin:admin@localhost'), + DD_TIME_ZONE=(str, "UTC"), + DD_LANG=(str, "en-us"), + DD_TEAM_NAME=(str, "Security Team"), + DD_ADMINS=(str, "DefectDojo:dojo@localhost,Admin:admin@localhost"), DD_WHITENOISE=(bool, False), DD_TRACK_MIGRATIONS=(bool, True), DD_SECURE_PROXY_SSL_HEADER=(bool, False), - DD_TEST_RUNNER=(str, 'django.test.runner.DiscoverRunner'), - DD_URL_PREFIX=(str, ''), - DD_ROOT=(str, root('dojo')), - DD_LANGUAGE_CODE=(str, 'en-us'), + DD_TEST_RUNNER=(str, "django.test.runner.DiscoverRunner"), + DD_URL_PREFIX=(str, ""), + DD_ROOT=(str, root("dojo")), + DD_LANGUAGE_CODE=(str, "en-us"), DD_SITE_ID=(int, 1), DD_USE_I18N=(bool, True), DD_USE_TZ=(bool, True), - DD_MEDIA_URL=(str, '/media/'), - DD_MEDIA_ROOT=(str, root('media')), - DD_STATIC_URL=(str, '/static/'), - DD_STATIC_ROOT=(str, root('static')), - DD_CELERY_BROKER_URL=(str, ''), - DD_CELERY_BROKER_SCHEME=(str, 'sqla+sqlite'), - DD_CELERY_BROKER_USER=(str, ''), - DD_CELERY_BROKER_PASSWORD=(str, ''), - DD_CELERY_BROKER_HOST=(str, ''), + DD_MEDIA_URL=(str, "/media/"), + DD_MEDIA_ROOT=(str, root("media")), + DD_STATIC_URL=(str, "/static/"), + DD_STATIC_ROOT=(str, root("static")), + DD_CELERY_BROKER_URL=(str, ""), + DD_CELERY_BROKER_SCHEME=(str, "sqla+sqlite"), + DD_CELERY_BROKER_USER=(str, ""), + DD_CELERY_BROKER_PASSWORD=(str, ""), + DD_CELERY_BROKER_HOST=(str, ""), DD_CELERY_BROKER_PORT=(int, -1), - DD_CELERY_BROKER_PATH=(str, '/dojo.celerydb.sqlite'), - DD_CELERY_BROKER_PARAMS=(str, ''), - DD_CELERY_BROKER_TRANSPORT_OPTIONS=(str, ''), + DD_CELERY_BROKER_PATH=(str, "/dojo.celerydb.sqlite"), + DD_CELERY_BROKER_PARAMS=(str, ""), + DD_CELERY_BROKER_TRANSPORT_OPTIONS=(str, ""), DD_CELERY_TASK_IGNORE_RESULT=(bool, True), - DD_CELERY_RESULT_BACKEND=(str, 'django-db'), + DD_CELERY_RESULT_BACKEND=(str, "django-db"), DD_CELERY_RESULT_EXPIRES=(int, 86400), - DD_CELERY_BEAT_SCHEDULE_FILENAME=(str, root('dojo.celery.beat.db')), - DD_CELERY_TASK_SERIALIZER=(str, 'pickle'), + DD_CELERY_BEAT_SCHEDULE_FILENAME=(str, root("dojo.celery.beat.db")), + DD_CELERY_TASK_SERIALIZER=(str, "pickle"), DD_CELERY_PASS_MODEL_BY_ID=(str, True), - DD_FOOTER_VERSION=(str, ''), + DD_FOOTER_VERSION=(str, ""), # models should be passed to celery by ID, default is False (for now) DD_FORCE_LOWERCASE_TAGS=(bool, True), DD_MAX_TAG_LENGTH=(int, 25), - DD_DATABASE_ENGINE=(str, 'django.db.backends.mysql'), - DD_DATABASE_HOST=(str, 'mysql'), - DD_DATABASE_NAME=(str, 'defectdojo'), + DD_DATABASE_ENGINE=(str, "django.db.backends.mysql"), + DD_DATABASE_HOST=(str, "mysql"), + DD_DATABASE_NAME=(str, "defectdojo"), # default django database name for testing is test_ - DD_TEST_DATABASE_NAME=(str, 'test_defectdojo'), - DD_DATABASE_PASSWORD=(str, 'defectdojo'), + DD_TEST_DATABASE_NAME=(str, "test_defectdojo"), + DD_DATABASE_PASSWORD=(str, "defectdojo"), DD_DATABASE_PORT=(int, 3306), - DD_DATABASE_USER=(str, 'defectdojo'), - DD_SECRET_KEY=(str, ''), - DD_CREDENTIAL_AES_256_KEY=(str, '.'), + DD_DATABASE_USER=(str, "defectdojo"), + DD_SECRET_KEY=(str, ""), + DD_CREDENTIAL_AES_256_KEY=(str, "."), DD_DATA_UPLOAD_MAX_MEMORY_SIZE=(int, 8388608), # Max post size set to 8mb DD_FORGOT_PASSWORD=(bool, True), # do we show link "I forgot my password" on login screen DD_PASSWORD_RESET_TIMEOUT=(int, 259200), # 3 days, in seconds (the deafult) @@ -110,91 +110,91 @@ DD_SOCIAL_LOGIN_AUTO_REDIRECT=(bool, False), # auto-redirect if there is only one social login method DD_SOCIAL_AUTH_TRAILING_SLASH=(bool, True), DD_SOCIAL_AUTH_AUTH0_OAUTH2_ENABLED=(bool, False), - DD_SOCIAL_AUTH_AUTH0_KEY=(str, ''), - DD_SOCIAL_AUTH_AUTH0_SECRET=(str, ''), - DD_SOCIAL_AUTH_AUTH0_DOMAIN=(str, ''), - DD_SOCIAL_AUTH_AUTH0_SCOPE=(list, ['openid', 'profile', 'email']), + DD_SOCIAL_AUTH_AUTH0_KEY=(str, ""), + DD_SOCIAL_AUTH_AUTH0_SECRET=(str, ""), + DD_SOCIAL_AUTH_AUTH0_DOMAIN=(str, ""), + DD_SOCIAL_AUTH_AUTH0_SCOPE=(list, ["openid", "profile", "email"]), DD_SOCIAL_AUTH_GOOGLE_OAUTH2_ENABLED=(bool, False), - DD_SOCIAL_AUTH_GOOGLE_OAUTH2_KEY=(str, ''), - DD_SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET=(str, ''), - DD_SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_DOMAINS=(list, ['']), - DD_SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_EMAILS=(list, ['']), + DD_SOCIAL_AUTH_GOOGLE_OAUTH2_KEY=(str, ""), + DD_SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET=(str, ""), + DD_SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_DOMAINS=(list, [""]), + DD_SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_EMAILS=(list, [""]), DD_SOCIAL_AUTH_OKTA_OAUTH2_ENABLED=(bool, False), - DD_SOCIAL_AUTH_OKTA_OAUTH2_KEY=(str, ''), - DD_SOCIAL_AUTH_OKTA_OAUTH2_SECRET=(str, ''), - DD_SOCIAL_AUTH_OKTA_OAUTH2_API_URL=(str, 'https://{your-org-url}/oauth2'), + DD_SOCIAL_AUTH_OKTA_OAUTH2_KEY=(str, ""), + DD_SOCIAL_AUTH_OKTA_OAUTH2_SECRET=(str, ""), + DD_SOCIAL_AUTH_OKTA_OAUTH2_API_URL=(str, "https://{your-org-url}/oauth2"), DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_ENABLED=(bool, False), - DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_KEY=(str, ''), - DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_SECRET=(str, ''), - DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_TENANT_ID=(str, ''), - DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_RESOURCE=(str, 'https://graph.microsoft.com/'), + DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_KEY=(str, ""), + DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_SECRET=(str, ""), + DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_TENANT_ID=(str, ""), + DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_RESOURCE=(str, "https://graph.microsoft.com/"), DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_GET_GROUPS=(bool, False), - DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_GROUPS_FILTER=(str, ''), + DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_GROUPS_FILTER=(str, ""), DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_CLEANUP_GROUPS=(bool, True), DD_SOCIAL_AUTH_GITLAB_OAUTH2_ENABLED=(bool, False), DD_SOCIAL_AUTH_GITLAB_PROJECT_AUTO_IMPORT=(bool, False), DD_SOCIAL_AUTH_GITLAB_PROJECT_IMPORT_TAGS=(bool, False), DD_SOCIAL_AUTH_GITLAB_PROJECT_IMPORT_URL=(bool, False), DD_SOCIAL_AUTH_GITLAB_PROJECT_MIN_ACCESS_LEVEL=(int, 20), - DD_SOCIAL_AUTH_GITLAB_KEY=(str, ''), - DD_SOCIAL_AUTH_GITLAB_SECRET=(str, ''), - DD_SOCIAL_AUTH_GITLAB_API_URL=(str, 'https://gitlab.com'), - DD_SOCIAL_AUTH_GITLAB_SCOPE=(list, ['read_user', 'openid']), + DD_SOCIAL_AUTH_GITLAB_KEY=(str, ""), + DD_SOCIAL_AUTH_GITLAB_SECRET=(str, ""), + DD_SOCIAL_AUTH_GITLAB_API_URL=(str, "https://gitlab.com"), + DD_SOCIAL_AUTH_GITLAB_SCOPE=(list, ["read_user", "openid"]), DD_SOCIAL_AUTH_KEYCLOAK_OAUTH2_ENABLED=(bool, False), - DD_SOCIAL_AUTH_KEYCLOAK_KEY=(str, ''), - DD_SOCIAL_AUTH_KEYCLOAK_SECRET=(str, ''), - DD_SOCIAL_AUTH_KEYCLOAK_PUBLIC_KEY=(str, ''), - DD_SOCIAL_AUTH_KEYCLOAK_AUTHORIZATION_URL=(str, ''), - DD_SOCIAL_AUTH_KEYCLOAK_ACCESS_TOKEN_URL=(str, ''), - DD_SOCIAL_AUTH_KEYCLOAK_LOGIN_BUTTON_TEXT=(str, 'Login with Keycloak'), + DD_SOCIAL_AUTH_KEYCLOAK_KEY=(str, ""), + DD_SOCIAL_AUTH_KEYCLOAK_SECRET=(str, ""), + DD_SOCIAL_AUTH_KEYCLOAK_PUBLIC_KEY=(str, ""), + DD_SOCIAL_AUTH_KEYCLOAK_AUTHORIZATION_URL=(str, ""), + DD_SOCIAL_AUTH_KEYCLOAK_ACCESS_TOKEN_URL=(str, ""), + DD_SOCIAL_AUTH_KEYCLOAK_LOGIN_BUTTON_TEXT=(str, "Login with Keycloak"), DD_SOCIAL_AUTH_GITHUB_ENTERPRISE_OAUTH2_ENABLED=(bool, False), - DD_SOCIAL_AUTH_GITHUB_ENTERPRISE_URL=(str, ''), - DD_SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL=(str, ''), - DD_SOCIAL_AUTH_GITHUB_ENTERPRISE_KEY=(str, ''), - DD_SOCIAL_AUTH_GITHUB_ENTERPRISE_SECRET=(str, ''), + DD_SOCIAL_AUTH_GITHUB_ENTERPRISE_URL=(str, ""), + DD_SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL=(str, ""), + DD_SOCIAL_AUTH_GITHUB_ENTERPRISE_KEY=(str, ""), + DD_SOCIAL_AUTH_GITHUB_ENTERPRISE_SECRET=(str, ""), DD_SAML2_ENABLED=(bool, False), # Allows to override default SAML authentication backend. Check https://djangosaml2.readthedocs.io/contents/setup.html#custom-user-attributes-processing - DD_SAML2_AUTHENTICATION_BACKENDS=(str, 'djangosaml2.backends.Saml2Backend'), + DD_SAML2_AUTHENTICATION_BACKENDS=(str, "djangosaml2.backends.Saml2Backend"), # Force Authentication to make SSO possible with SAML2 DD_SAML2_FORCE_AUTH=(bool, True), - DD_SAML2_LOGIN_BUTTON_TEXT=(str, 'Login with SAML'), + DD_SAML2_LOGIN_BUTTON_TEXT=(str, "Login with SAML"), # Optional: display the idp SAML Logout URL in DefectDojo - DD_SAML2_LOGOUT_URL=(str, ''), + DD_SAML2_LOGOUT_URL=(str, ""), # Metadata is required for SAML, choose either remote url or local file path - DD_SAML2_METADATA_AUTO_CONF_URL=(str, ''), - DD_SAML2_METADATA_LOCAL_FILE_PATH=(str, ''), # ex. '/public/share/idp_metadata.xml' + DD_SAML2_METADATA_AUTO_CONF_URL=(str, ""), + DD_SAML2_METADATA_LOCAL_FILE_PATH=(str, ""), # ex. '/public/share/idp_metadata.xml' # Optional, default is SITE_URL + /saml2/metadata/ - DD_SAML2_ENTITY_ID=(str, ''), + DD_SAML2_ENTITY_ID=(str, ""), # Allow to create user that are not already in the Django database DD_SAML2_CREATE_USER=(bool, False), DD_SAML2_ATTRIBUTES_MAP=(dict, { # Change Email/UserName/FirstName/LastName to corresponding SAML2 userprofile attributes. # format: SAML attrib:django_user_model - 'Email': 'email', - 'UserName': 'username', - 'Firstname': 'first_name', - 'Lastname': 'last_name', + "Email": "email", + "UserName": "username", + "Firstname": "first_name", + "Lastname": "last_name", }), DD_SAML2_ALLOW_UNKNOWN_ATTRIBUTE=(bool, False), # Authentication via HTTP Proxy which put username to HTTP Header REMOTE_USER DD_AUTH_REMOTEUSER_ENABLED=(bool, False), # Names of headers which will be used for processing user data. # WARNING: Possible spoofing of headers. Read Warning in https://docs.djangoproject.com/en/3.2/howto/auth-remote-user/#configuration - DD_AUTH_REMOTEUSER_USERNAME_HEADER=(str, 'REMOTE_USER'), - DD_AUTH_REMOTEUSER_EMAIL_HEADER=(str, ''), - DD_AUTH_REMOTEUSER_FIRSTNAME_HEADER=(str, ''), - DD_AUTH_REMOTEUSER_LASTNAME_HEADER=(str, ''), - DD_AUTH_REMOTEUSER_GROUPS_HEADER=(str, ''), + DD_AUTH_REMOTEUSER_USERNAME_HEADER=(str, "REMOTE_USER"), + DD_AUTH_REMOTEUSER_EMAIL_HEADER=(str, ""), + DD_AUTH_REMOTEUSER_FIRSTNAME_HEADER=(str, ""), + DD_AUTH_REMOTEUSER_LASTNAME_HEADER=(str, ""), + DD_AUTH_REMOTEUSER_GROUPS_HEADER=(str, ""), DD_AUTH_REMOTEUSER_GROUPS_CLEANUP=(bool, True), # Comma separated list of IP ranges with trusted proxies - DD_AUTH_REMOTEUSER_TRUSTED_PROXY=(list, ['127.0.0.1/32']), + DD_AUTH_REMOTEUSER_TRUSTED_PROXY=(list, ["127.0.0.1/32"]), # REMOTE_USER will be processed only on login page. Check https://docs.djangoproject.com/en/3.2/howto/auth-remote-user/#using-remote-user-on-login-pages-only DD_AUTH_REMOTEUSER_LOGIN_ONLY=(bool, False), # `RemoteUser` is usually used behind AuthN proxy and users should not know about this mechanism from Swagger because it is not usable by users. # It should be hidden by default. DD_AUTH_REMOTEUSER_VISIBLE_IN_SWAGGER=(bool, False), # if somebody is using own documentation how to use DefectDojo in his own company - DD_DOCUMENTATION_URL=(str, 'https://documentation.defectdojo.com'), + DD_DOCUMENTATION_URL=(str, "https://documentation.defectdojo.com"), # merging findings doesn't always work well with dedupe and reimport etc. # disable it if you see any issues (and report them on github) DD_DISABLE_FINDING_MERGE=(bool, False), @@ -218,9 +218,9 @@ DD_MAX_AUTOCOMPLETE_WORDS=(int, 20000), DD_JIRA_SSL_VERIFY=(bool, True), # You can set extra Jira issue types via a simple env var that supports a csv format, like "Work Item,Vulnerability" - DD_JIRA_EXTRA_ISSUE_TYPES=(str, ''), + DD_JIRA_EXTRA_ISSUE_TYPES=(str, ""), # if you want to keep logging to the console but in json format, change this here to 'json_console' - DD_LOGGING_HANDLER=(str, 'console'), + DD_LOGGING_HANDLER=(str, "console"), # If true, drf-spectacular will load CSS & JS from default CDN, otherwise from static resources DD_DEFAULT_SWAGGER_UI=(bool, False), DD_ALERT_REFRESH=(bool, True), @@ -232,7 +232,7 @@ # regular expression to exclude one or more parsers # could be usefull to limit parser allowed # AWS Scout2 Scan Parser is deprecated (see https://github.com/DefectDojo/django-DefectDojo/pull/5268) - DD_PARSER_EXCLUDE=(str, ''), + DD_PARSER_EXCLUDE=(str, ""), # when enabled in sytem settings, every minute a job run to delete excess duplicates # we limit the amount of duplicates that can be deleted in a single run of that job # to prevent overlapping runs of that job from occurrring @@ -246,8 +246,8 @@ # Allow grouping of findings in the same test, for example to group findings per dependency # DD_FEATURE_FINDING_GROUPS feature is moved to system_settings, will be removed from settings file DD_FEATURE_FINDING_GROUPS=(bool, True), - DD_JIRA_TEMPLATE_ROOT=(str, 'dojo/templates/issue-trackers'), - DD_TEMPLATE_DIR_PREFIX=(str, 'dojo/templates/'), + DD_JIRA_TEMPLATE_ROOT=(str, "dojo/templates/issue-trackers"), + DD_TEMPLATE_DIR_PREFIX=(str, "dojo/templates/"), # Initial behaviour in Defect Dojo was to delete all duplicates when an original was deleted # New behaviour is to leave the duplicates in place, but set the oldest of duplicates as new original # Set to True to revert to the old behaviour where all duplicates are deleted @@ -255,7 +255,7 @@ # Enable Rate Limiting for the login page DD_RATE_LIMITER_ENABLED=(bool, False), # Examples include 5/m 100/h and more https://django-ratelimit.readthedocs.io/en/stable/rates.html#simple-rates - DD_RATE_LIMITER_RATE=(str, '5/m'), + DD_RATE_LIMITER_RATE=(str, "5/m"), # Block the requests after rate limit is exceeded DD_RATE_LIMITER_BLOCK=(bool, False), # Forces the user to change password on next login. @@ -275,8 +275,8 @@ # for very large objects DD_DELETE_PREVIEW=(bool, True), # List of acceptable file types that can be uploaded to a given object via arbitrary file upload - DD_FILE_UPLOAD_TYPES=(list, ['.txt', '.pdf', '.json', '.xml', '.csv', '.yml', '.png', '.jpeg', - '.sarif', '.xslx', '.doc', '.html', '.js', '.nessus', '.zip']), + DD_FILE_UPLOAD_TYPES=(list, [".txt", ".pdf", ".json", ".xml", ".csv", ".yml", ".png", ".jpeg", + ".sarif", ".xslx", ".doc", ".html", ".js", ".nessus", ".zip"]), # Max file size for scan added via API in MB DD_SCAN_FILE_MAX_SIZE=(int, 100), # When disabled, existing user tokens will not be removed but it will not be @@ -285,9 +285,9 @@ # You can set extra Jira headers by suppling a dictionary in header: value format (pass as env var like "headr_name=value,another_header=anohter_value") DD_ADDITIONAL_HEADERS=(dict, {}), # Set fields used by the hashcode generator for deduplication, via en env variable that contains a JSON string - DD_HASHCODE_FIELDS_PER_SCANNER=(str, ''), + DD_HASHCODE_FIELDS_PER_SCANNER=(str, ""), # Set deduplication algorithms per parser, via en env variable that contains a JSON string - DD_DEDUPLICATION_ALGORITHM_PER_PARSER=(str, ''), + DD_DEDUPLICATION_ALGORITHM_PER_PARSER=(str, ""), # Dictates whether cloud banner is created or not DD_CREATE_CLOUD_BANNER=(bool, True), # With this setting turned on, Dojo maintains an audit log of changes made to entities (Findings, Tests, Engagements, Procuts, ...) @@ -307,121 +307,121 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, params): result_list = [] result_list.append(scheme) - result_list.append(':') + result_list.append(":") if double_slashes: - result_list.append('//') + result_list.append("//") result_list.append(user) if len(password) > 0: - result_list.append(':') + result_list.append(":") result_list.append(password) if len(user) > 0 or len(password) > 0: - result_list.append('@') + result_list.append("@") result_list.append(host) if port >= 0: - result_list.append(':') + result_list.append(":") result_list.append(str(port)) - if len(path) > 0 and path[0] != '/': - result_list.append('/') + if len(path) > 0 and path[0] != "/": + result_list.append("/") result_list.append(path) - if len(params) > 0 and params[0] != '?': - result_list.append('?') + if len(params) > 0 and params[0] != "?": + result_list.append("?") result_list.append(params) - return ''.join(result_list) + return "".join(result_list) # Read .env file as default or from the command line, DD_ENV_PATH -if os.path.isfile(root('dojo/settings/.env.prod')) or 'DD_ENV_PATH' in os.environ: - env.read_env(root('dojo/settings/' + env.str('DD_ENV_PATH', '.env.prod'))) +if os.path.isfile(root("dojo/settings/.env.prod")) or "DD_ENV_PATH" in os.environ: + env.read_env(root("dojo/settings/" + env.str("DD_ENV_PATH", ".env.prod"))) # ------------------------------------------------------------------------------ # GENERAL # ------------------------------------------------------------------------------ # False if not in os.environ -DEBUG = env('DD_DEBUG') -TEMPLATE_DEBUG = env('DD_TEMPLATE_DEBUG') +DEBUG = env("DD_DEBUG") +TEMPLATE_DEBUG = env("DD_TEMPLATE_DEBUG") # Hosts/domain names that are valid for this site; required if DEBUG is False # See https://docs.djangoproject.com/en/2.0/ref/settings/#allowed-hosts -SITE_URL = env('DD_SITE_URL') -ALLOWED_HOSTS = tuple(env.list('DD_ALLOWED_HOSTS', default=['localhost', '127.0.0.1'])) +SITE_URL = env("DD_SITE_URL") +ALLOWED_HOSTS = tuple(env.list("DD_ALLOWED_HOSTS", default=["localhost", "127.0.0.1"])) # Raises django's ImproperlyConfigured exception if SECRET_KEY not in os.environ -SECRET_KEY = env('DD_SECRET_KEY') +SECRET_KEY = env("DD_SECRET_KEY") # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. -TIME_ZONE = env('DD_TIME_ZONE') +TIME_ZONE = env("DD_TIME_ZONE") # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html -LANGUAGE_CODE = env('DD_LANGUAGE_CODE') +LANGUAGE_CODE = env("DD_LANGUAGE_CODE") -SITE_ID = env('DD_SITE_ID') +SITE_ID = env("DD_SITE_ID") # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. -USE_I18N = env('DD_USE_I18N') +USE_I18N = env("DD_USE_I18N") # If you set this to False, Django will not use timezone-aware datetimes. -USE_TZ = env('DD_USE_TZ') +USE_TZ = env("DD_USE_TZ") -TEST_RUNNER = env('DD_TEST_RUNNER') +TEST_RUNNER = env("DD_TEST_RUNNER") -ALERT_REFRESH = env('DD_ALERT_REFRESH') +ALERT_REFRESH = env("DD_ALERT_REFRESH") DISABLE_ALERT_COUNTER = env("DD_DISABLE_ALERT_COUNTER") MAX_ALERTS_PER_USER = env("DD_MAX_ALERTS_PER_USER") -TAG_PREFETCHING = env('DD_TAG_PREFETCHING') +TAG_PREFETCHING = env("DD_TAG_PREFETCHING") # ------------------------------------------------------------------------------ # DATABASE # ------------------------------------------------------------------------------ # Parse database connection url strings like psql://user:pass@127.0.0.1:8458/db -if os.getenv('DD_DATABASE_URL') is not None: +if os.getenv("DD_DATABASE_URL") is not None: DATABASES = { - 'default': env.db('DD_DATABASE_URL'), + "default": env.db("DD_DATABASE_URL"), } else: DATABASES = { - 'default': { - 'ENGINE': env('DD_DATABASE_ENGINE'), - 'NAME': env('DD_DATABASE_NAME'), - 'TEST': { - 'NAME': env('DD_TEST_DATABASE_NAME'), + "default": { + "ENGINE": env("DD_DATABASE_ENGINE"), + "NAME": env("DD_DATABASE_NAME"), + "TEST": { + "NAME": env("DD_TEST_DATABASE_NAME"), }, - 'USER': env('DD_DATABASE_USER'), - 'PASSWORD': env('DD_DATABASE_PASSWORD'), - 'HOST': env('DD_DATABASE_HOST'), - 'PORT': env('DD_DATABASE_PORT'), + "USER": env("DD_DATABASE_USER"), + "PASSWORD": env("DD_DATABASE_PASSWORD"), + "HOST": env("DD_DATABASE_HOST"), + "PORT": env("DD_DATABASE_PORT"), }, } # Track migrations through source control rather than making migrations locally -if env('DD_TRACK_MIGRATIONS'): - MIGRATION_MODULES = {'dojo': 'dojo.db_migrations'} +if env("DD_TRACK_MIGRATIONS"): + MIGRATION_MODULES = {"dojo": "dojo.db_migrations"} # Default for automatically created id fields, # see https://docs.djangoproject.com/en/3.2/releases/3.2/#customizing-type-of-auto-created-primary-keys -DEFAULT_AUTO_FIELD = 'django.db.models.AutoField' +DEFAULT_AUTO_FIELD = "django.db.models.AutoField" # ------------------------------------------------------------------------------ # MEDIA # ------------------------------------------------------------------------------ -DOJO_ROOT = env('DD_ROOT') +DOJO_ROOT = env("DD_ROOT") # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/var/www/example.com/media/" -MEDIA_ROOT = env('DD_MEDIA_ROOT') +MEDIA_ROOT = env("DD_MEDIA_ROOT") # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://example.com/media/", "http://media.example.com/" -MEDIA_URL = env('DD_MEDIA_URL') +MEDIA_URL = env("DD_MEDIA_URL") # ------------------------------------------------------------------------------ # STATIC @@ -431,32 +431,32 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/var/www/example.com/static/" -STATIC_ROOT = env('DD_STATIC_ROOT') +STATIC_ROOT = env("DD_STATIC_ROOT") # URL prefix for static files. # Example: "http://example.com/static/", "http://static.example.com/" -STATIC_URL = env('DD_STATIC_URL') +STATIC_URL = env("DD_STATIC_URL") # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. - os.path.join(os.path.dirname(DOJO_ROOT), 'components', 'node_modules'), + os.path.join(os.path.dirname(DOJO_ROOT), "components", "node_modules"), ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( - 'django.contrib.staticfiles.finders.FileSystemFinder', - 'django.contrib.staticfiles.finders.AppDirectoriesFinder', + "django.contrib.staticfiles.finders.FileSystemFinder", + "django.contrib.staticfiles.finders.AppDirectoriesFinder", ) FILE_UPLOAD_HANDLERS = ( "django.core.files.uploadhandler.TemporaryFileUploadHandler", ) -DATA_UPLOAD_MAX_MEMORY_SIZE = env('DD_DATA_UPLOAD_MAX_MEMORY_SIZE') +DATA_UPLOAD_MAX_MEMORY_SIZE = env("DD_DATA_UPLOAD_MAX_MEMORY_SIZE") # ------------------------------------------------------------------------------ # URLS @@ -467,33 +467,33 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param # 'axes.backends.AxesModelBackend', # ] -ROOT_URLCONF = 'dojo.urls' +ROOT_URLCONF = "dojo.urls" # Python dotted path to the WSGI application used by Django's runserver. # https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application -WSGI_APPLICATION = 'dojo.wsgi.application' +WSGI_APPLICATION = "dojo.wsgi.application" -URL_PREFIX = env('DD_URL_PREFIX') +URL_PREFIX = env("DD_URL_PREFIX") # ------------------------------------------------------------------------------ # AUTHENTICATION # ------------------------------------------------------------------------------ -LOGIN_REDIRECT_URL = env('DD_LOGIN_REDIRECT_URL') -LOGIN_URL = env('DD_LOGIN_URL') +LOGIN_REDIRECT_URL = env("DD_LOGIN_REDIRECT_URL") +LOGIN_URL = env("DD_LOGIN_URL") # These are the individidual modules supported by social-auth AUTHENTICATION_BACKENDS = ( - 'social_core.backends.auth0.Auth0OAuth2', - 'social_core.backends.google.GoogleOAuth2', - 'dojo.okta.OktaOAuth2', - 'social_core.backends.azuread_tenant.AzureADTenantOAuth2', - 'social_core.backends.gitlab.GitLabOAuth2', - 'social_core.backends.keycloak.KeycloakOAuth2', - 'social_core.backends.github_enterprise.GithubEnterpriseOAuth2', - 'dojo.remote_user.RemoteUserBackend', - 'django.contrib.auth.backends.RemoteUserBackend', - 'django.contrib.auth.backends.ModelBackend', + "social_core.backends.auth0.Auth0OAuth2", + "social_core.backends.google.GoogleOAuth2", + "social_core.backends.okta.OktaOAuth2", + "social_core.backends.azuread_tenant.AzureADTenantOAuth2", + "social_core.backends.gitlab.GitLabOAuth2", + "social_core.backends.keycloak.KeycloakOAuth2", + "social_core.backends.github_enterprise.GithubEnterpriseOAuth2", + "dojo.remote_user.RemoteUserBackend", + "django.contrib.auth.backends.RemoteUserBackend", + "django.contrib.auth.backends.ModelBackend", ) # Make Argon2 the default password hasher by listing it first @@ -501,166 +501,166 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param # PASSWORD_HASHERS list here as a variable which we could modify, # so we have to list all the hashers present in Django :-( PASSWORD_HASHERS = [ - 'django.contrib.auth.hashers.Argon2PasswordHasher', - 'django.contrib.auth.hashers.PBKDF2PasswordHasher', - 'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher', - 'django.contrib.auth.hashers.BCryptSHA256PasswordHasher', - 'django.contrib.auth.hashers.BCryptPasswordHasher', - 'django.contrib.auth.hashers.MD5PasswordHasher', + "django.contrib.auth.hashers.Argon2PasswordHasher", + "django.contrib.auth.hashers.PBKDF2PasswordHasher", + "django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher", + "django.contrib.auth.hashers.BCryptSHA256PasswordHasher", + "django.contrib.auth.hashers.BCryptPasswordHasher", + "django.contrib.auth.hashers.MD5PasswordHasher", ] SOCIAL_AUTH_PIPELINE = ( - 'social_core.pipeline.social_auth.social_details', - 'dojo.pipeline.social_uid', - 'social_core.pipeline.social_auth.auth_allowed', - 'social_core.pipeline.social_auth.social_user', - 'social_core.pipeline.user.get_username', - 'social_core.pipeline.social_auth.associate_by_email', - 'dojo.pipeline.create_user', - 'dojo.pipeline.modify_permissions', - 'social_core.pipeline.social_auth.associate_user', - 'social_core.pipeline.social_auth.load_extra_data', - 'social_core.pipeline.user.user_details', - 'dojo.pipeline.update_azure_groups', - 'dojo.pipeline.update_product_access', + "social_core.pipeline.social_auth.social_details", + "dojo.pipeline.social_uid", + "social_core.pipeline.social_auth.auth_allowed", + "social_core.pipeline.social_auth.social_user", + "social_core.pipeline.user.get_username", + "social_core.pipeline.social_auth.associate_by_email", + "dojo.pipeline.create_user", + "dojo.pipeline.modify_permissions", + "social_core.pipeline.social_auth.associate_user", + "social_core.pipeline.social_auth.load_extra_data", + "social_core.pipeline.user.user_details", + "dojo.pipeline.update_azure_groups", + "dojo.pipeline.update_product_access", ) CLASSIC_AUTH_ENABLED = True -FORGOT_PASSWORD = env('DD_FORGOT_PASSWORD') -FORGOT_USERNAME = env('DD_FORGOT_USERNAME') -PASSWORD_RESET_TIMEOUT = env('DD_PASSWORD_RESET_TIMEOUT') +FORGOT_PASSWORD = env("DD_FORGOT_PASSWORD") +FORGOT_USERNAME = env("DD_FORGOT_USERNAME") +PASSWORD_RESET_TIMEOUT = env("DD_PASSWORD_RESET_TIMEOUT") # Showing login form (form is not needed for external auth: OKTA, Google Auth, etc.) -SHOW_LOGIN_FORM = env('DD_SOCIAL_AUTH_SHOW_LOGIN_FORM') -SOCIAL_LOGIN_AUTO_REDIRECT = env('DD_SOCIAL_LOGIN_AUTO_REDIRECT') -SOCIAL_AUTH_CREATE_USER = env('DD_SOCIAL_AUTH_CREATE_USER') +SHOW_LOGIN_FORM = env("DD_SOCIAL_AUTH_SHOW_LOGIN_FORM") +SOCIAL_LOGIN_AUTO_REDIRECT = env("DD_SOCIAL_LOGIN_AUTO_REDIRECT") +SOCIAL_AUTH_CREATE_USER = env("DD_SOCIAL_AUTH_CREATE_USER") -SOCIAL_AUTH_STRATEGY = 'social_django.strategy.DjangoStrategy' -SOCIAL_AUTH_STORAGE = 'social_django.models.DjangoStorage' -SOCIAL_AUTH_ADMIN_USER_SEARCH_FIELDS = ['username', 'first_name', 'last_name', 'email'] +SOCIAL_AUTH_STRATEGY = "social_django.strategy.DjangoStrategy" +SOCIAL_AUTH_STORAGE = "social_django.models.DjangoStorage" +SOCIAL_AUTH_ADMIN_USER_SEARCH_FIELDS = ["username", "first_name", "last_name", "email"] SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL = True -GOOGLE_OAUTH_ENABLED = env('DD_SOCIAL_AUTH_GOOGLE_OAUTH2_ENABLED') -SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = env('DD_SOCIAL_AUTH_GOOGLE_OAUTH2_KEY') -SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = env('DD_SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET') -SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_DOMAINS = env('DD_SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_DOMAINS') -SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_EMAILS = env('DD_SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_EMAILS') -SOCIAL_AUTH_LOGIN_ERROR_URL = '/login' -SOCIAL_AUTH_BACKEND_ERROR_URL = '/login' - -OKTA_OAUTH_ENABLED = env('DD_SOCIAL_AUTH_OKTA_OAUTH2_ENABLED') -SOCIAL_AUTH_OKTA_OAUTH2_KEY = env('DD_SOCIAL_AUTH_OKTA_OAUTH2_KEY') -SOCIAL_AUTH_OKTA_OAUTH2_SECRET = env('DD_SOCIAL_AUTH_OKTA_OAUTH2_SECRET') -SOCIAL_AUTH_OKTA_OAUTH2_API_URL = env('DD_SOCIAL_AUTH_OKTA_OAUTH2_API_URL') - -AZUREAD_TENANT_OAUTH2_ENABLED = env('DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_ENABLED') -SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_KEY = env('DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_KEY') -SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_SECRET = env('DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_SECRET') -SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_TENANT_ID = env('DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_TENANT_ID') -SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_RESOURCE = env('DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_RESOURCE') -AZUREAD_TENANT_OAUTH2_GET_GROUPS = env('DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_GET_GROUPS') -AZUREAD_TENANT_OAUTH2_GROUPS_FILTER = env('DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_GROUPS_FILTER') -AZUREAD_TENANT_OAUTH2_CLEANUP_GROUPS = env('DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_CLEANUP_GROUPS') - -GITLAB_OAUTH2_ENABLED = env('DD_SOCIAL_AUTH_GITLAB_OAUTH2_ENABLED') -GITLAB_PROJECT_AUTO_IMPORT = env('DD_SOCIAL_AUTH_GITLAB_PROJECT_AUTO_IMPORT') -GITLAB_PROJECT_IMPORT_TAGS = env('DD_SOCIAL_AUTH_GITLAB_PROJECT_IMPORT_TAGS') -GITLAB_PROJECT_IMPORT_URL = env('DD_SOCIAL_AUTH_GITLAB_PROJECT_IMPORT_URL') -GITLAB_PROJECT_MIN_ACCESS_LEVEL = env('DD_SOCIAL_AUTH_GITLAB_PROJECT_MIN_ACCESS_LEVEL') -SOCIAL_AUTH_GITLAB_KEY = env('DD_SOCIAL_AUTH_GITLAB_KEY') -SOCIAL_AUTH_GITLAB_SECRET = env('DD_SOCIAL_AUTH_GITLAB_SECRET') -SOCIAL_AUTH_GITLAB_API_URL = env('DD_SOCIAL_AUTH_GITLAB_API_URL') -SOCIAL_AUTH_GITLAB_SCOPE = env('DD_SOCIAL_AUTH_GITLAB_SCOPE') +GOOGLE_OAUTH_ENABLED = env("DD_SOCIAL_AUTH_GOOGLE_OAUTH2_ENABLED") +SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = env("DD_SOCIAL_AUTH_GOOGLE_OAUTH2_KEY") +SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = env("DD_SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET") +SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_DOMAINS = env("DD_SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_DOMAINS") +SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_EMAILS = env("DD_SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_EMAILS") +SOCIAL_AUTH_LOGIN_ERROR_URL = "/login" +SOCIAL_AUTH_BACKEND_ERROR_URL = "/login" + +OKTA_OAUTH_ENABLED = env("DD_SOCIAL_AUTH_OKTA_OAUTH2_ENABLED") +SOCIAL_AUTH_OKTA_OAUTH2_KEY = env("DD_SOCIAL_AUTH_OKTA_OAUTH2_KEY") +SOCIAL_AUTH_OKTA_OAUTH2_SECRET = env("DD_SOCIAL_AUTH_OKTA_OAUTH2_SECRET") +SOCIAL_AUTH_OKTA_OAUTH2_API_URL = env("DD_SOCIAL_AUTH_OKTA_OAUTH2_API_URL") + +AZUREAD_TENANT_OAUTH2_ENABLED = env("DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_ENABLED") +SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_KEY = env("DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_KEY") +SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_SECRET = env("DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_SECRET") +SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_TENANT_ID = env("DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_TENANT_ID") +SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_RESOURCE = env("DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_RESOURCE") +AZUREAD_TENANT_OAUTH2_GET_GROUPS = env("DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_GET_GROUPS") +AZUREAD_TENANT_OAUTH2_GROUPS_FILTER = env("DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_GROUPS_FILTER") +AZUREAD_TENANT_OAUTH2_CLEANUP_GROUPS = env("DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_CLEANUP_GROUPS") + +GITLAB_OAUTH2_ENABLED = env("DD_SOCIAL_AUTH_GITLAB_OAUTH2_ENABLED") +GITLAB_PROJECT_AUTO_IMPORT = env("DD_SOCIAL_AUTH_GITLAB_PROJECT_AUTO_IMPORT") +GITLAB_PROJECT_IMPORT_TAGS = env("DD_SOCIAL_AUTH_GITLAB_PROJECT_IMPORT_TAGS") +GITLAB_PROJECT_IMPORT_URL = env("DD_SOCIAL_AUTH_GITLAB_PROJECT_IMPORT_URL") +GITLAB_PROJECT_MIN_ACCESS_LEVEL = env("DD_SOCIAL_AUTH_GITLAB_PROJECT_MIN_ACCESS_LEVEL") +SOCIAL_AUTH_GITLAB_KEY = env("DD_SOCIAL_AUTH_GITLAB_KEY") +SOCIAL_AUTH_GITLAB_SECRET = env("DD_SOCIAL_AUTH_GITLAB_SECRET") +SOCIAL_AUTH_GITLAB_API_URL = env("DD_SOCIAL_AUTH_GITLAB_API_URL") +SOCIAL_AUTH_GITLAB_SCOPE = env("DD_SOCIAL_AUTH_GITLAB_SCOPE") # Add required scope if auto import is enabled if GITLAB_PROJECT_AUTO_IMPORT: - SOCIAL_AUTH_GITLAB_SCOPE += ['read_repository'] - -AUTH0_OAUTH2_ENABLED = env('DD_SOCIAL_AUTH_AUTH0_OAUTH2_ENABLED') -SOCIAL_AUTH_AUTH0_KEY = env('DD_SOCIAL_AUTH_AUTH0_KEY') -SOCIAL_AUTH_AUTH0_SECRET = env('DD_SOCIAL_AUTH_AUTH0_SECRET') -SOCIAL_AUTH_AUTH0_DOMAIN = env('DD_SOCIAL_AUTH_AUTH0_DOMAIN') -SOCIAL_AUTH_AUTH0_SCOPE = env('DD_SOCIAL_AUTH_AUTH0_SCOPE') -SOCIAL_AUTH_TRAILING_SLASH = env('DD_SOCIAL_AUTH_TRAILING_SLASH') - -KEYCLOAK_OAUTH2_ENABLED = env('DD_SOCIAL_AUTH_KEYCLOAK_OAUTH2_ENABLED') -SOCIAL_AUTH_KEYCLOAK_KEY = env('DD_SOCIAL_AUTH_KEYCLOAK_KEY') -SOCIAL_AUTH_KEYCLOAK_SECRET = env('DD_SOCIAL_AUTH_KEYCLOAK_SECRET') -SOCIAL_AUTH_KEYCLOAK_PUBLIC_KEY = env('DD_SOCIAL_AUTH_KEYCLOAK_PUBLIC_KEY') -SOCIAL_AUTH_KEYCLOAK_AUTHORIZATION_URL = env('DD_SOCIAL_AUTH_KEYCLOAK_AUTHORIZATION_URL') -SOCIAL_AUTH_KEYCLOAK_ACCESS_TOKEN_URL = env('DD_SOCIAL_AUTH_KEYCLOAK_ACCESS_TOKEN_URL') -SOCIAL_AUTH_KEYCLOAK_LOGIN_BUTTON_TEXT = env('DD_SOCIAL_AUTH_KEYCLOAK_LOGIN_BUTTON_TEXT') - -GITHUB_ENTERPRISE_OAUTH2_ENABLED = env('DD_SOCIAL_AUTH_GITHUB_ENTERPRISE_OAUTH2_ENABLED') -SOCIAL_AUTH_GITHUB_ENTERPRISE_URL = env('DD_SOCIAL_AUTH_GITHUB_ENTERPRISE_URL') -SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL = env('DD_SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL') -SOCIAL_AUTH_GITHUB_ENTERPRISE_KEY = env('DD_SOCIAL_AUTH_GITHUB_ENTERPRISE_KEY') -SOCIAL_AUTH_GITHUB_ENTERPRISE_SECRET = env('DD_SOCIAL_AUTH_GITHUB_ENTERPRISE_SECRET') - -DOCUMENTATION_URL = env('DD_DOCUMENTATION_URL') + SOCIAL_AUTH_GITLAB_SCOPE += ["read_repository"] + +AUTH0_OAUTH2_ENABLED = env("DD_SOCIAL_AUTH_AUTH0_OAUTH2_ENABLED") +SOCIAL_AUTH_AUTH0_KEY = env("DD_SOCIAL_AUTH_AUTH0_KEY") +SOCIAL_AUTH_AUTH0_SECRET = env("DD_SOCIAL_AUTH_AUTH0_SECRET") +SOCIAL_AUTH_AUTH0_DOMAIN = env("DD_SOCIAL_AUTH_AUTH0_DOMAIN") +SOCIAL_AUTH_AUTH0_SCOPE = env("DD_SOCIAL_AUTH_AUTH0_SCOPE") +SOCIAL_AUTH_TRAILING_SLASH = env("DD_SOCIAL_AUTH_TRAILING_SLASH") + +KEYCLOAK_OAUTH2_ENABLED = env("DD_SOCIAL_AUTH_KEYCLOAK_OAUTH2_ENABLED") +SOCIAL_AUTH_KEYCLOAK_KEY = env("DD_SOCIAL_AUTH_KEYCLOAK_KEY") +SOCIAL_AUTH_KEYCLOAK_SECRET = env("DD_SOCIAL_AUTH_KEYCLOAK_SECRET") +SOCIAL_AUTH_KEYCLOAK_PUBLIC_KEY = env("DD_SOCIAL_AUTH_KEYCLOAK_PUBLIC_KEY") +SOCIAL_AUTH_KEYCLOAK_AUTHORIZATION_URL = env("DD_SOCIAL_AUTH_KEYCLOAK_AUTHORIZATION_URL") +SOCIAL_AUTH_KEYCLOAK_ACCESS_TOKEN_URL = env("DD_SOCIAL_AUTH_KEYCLOAK_ACCESS_TOKEN_URL") +SOCIAL_AUTH_KEYCLOAK_LOGIN_BUTTON_TEXT = env("DD_SOCIAL_AUTH_KEYCLOAK_LOGIN_BUTTON_TEXT") + +GITHUB_ENTERPRISE_OAUTH2_ENABLED = env("DD_SOCIAL_AUTH_GITHUB_ENTERPRISE_OAUTH2_ENABLED") +SOCIAL_AUTH_GITHUB_ENTERPRISE_URL = env("DD_SOCIAL_AUTH_GITHUB_ENTERPRISE_URL") +SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL = env("DD_SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL") +SOCIAL_AUTH_GITHUB_ENTERPRISE_KEY = env("DD_SOCIAL_AUTH_GITHUB_ENTERPRISE_KEY") +SOCIAL_AUTH_GITHUB_ENTERPRISE_SECRET = env("DD_SOCIAL_AUTH_GITHUB_ENTERPRISE_SECRET") + +DOCUMENTATION_URL = env("DD_DOCUMENTATION_URL") # Setting SLA_NOTIFY_ACTIVE and SLA_NOTIFY_ACTIVE_VERIFIED to False will disable the feature # If you import thousands of Active findings through your pipeline everyday, # and make the choice of enabling SLA notifications for non-verified findings, # be mindful of performance. # 'SLA_NOTIFY_ACTIVE', 'SLA_NOTIFY_ACTIVE_VERIFIED_ONLY' and 'SLA_NOTIFY_WITH_JIRA_ONLY' are moved to system settings, will be removed here -SLA_NOTIFY_ACTIVE = env('DD_SLA_NOTIFY_ACTIVE') # this will include 'verified' findings as well as non-verified. -SLA_NOTIFY_ACTIVE_VERIFIED_ONLY = env('DD_SLA_NOTIFY_ACTIVE_VERIFIED_ONLY') -SLA_NOTIFY_WITH_JIRA_ONLY = env('DD_SLA_NOTIFY_WITH_JIRA_ONLY') # Based on the 2 above, but only with a JIRA link -SLA_NOTIFY_PRE_BREACH = env('DD_SLA_NOTIFY_PRE_BREACH') # in days, notify between dayofbreach minus this number until dayofbreach -SLA_NOTIFY_POST_BREACH = env('DD_SLA_NOTIFY_POST_BREACH') # in days, skip notifications for findings that go past dayofbreach plus this number -SLA_BUSINESS_DAYS = env('DD_SLA_BUSINESS_DAYS') # Use business days to calculate SLA's and age of a finding instead of calendar days +SLA_NOTIFY_ACTIVE = env("DD_SLA_NOTIFY_ACTIVE") # this will include 'verified' findings as well as non-verified. +SLA_NOTIFY_ACTIVE_VERIFIED_ONLY = env("DD_SLA_NOTIFY_ACTIVE_VERIFIED_ONLY") +SLA_NOTIFY_WITH_JIRA_ONLY = env("DD_SLA_NOTIFY_WITH_JIRA_ONLY") # Based on the 2 above, but only with a JIRA link +SLA_NOTIFY_PRE_BREACH = env("DD_SLA_NOTIFY_PRE_BREACH") # in days, notify between dayofbreach minus this number until dayofbreach +SLA_NOTIFY_POST_BREACH = env("DD_SLA_NOTIFY_POST_BREACH") # in days, skip notifications for findings that go past dayofbreach plus this number +SLA_BUSINESS_DAYS = env("DD_SLA_BUSINESS_DAYS") # Use business days to calculate SLA's and age of a finding instead of calendar days -SEARCH_MAX_RESULTS = env('DD_SEARCH_MAX_RESULTS') -SIMILAR_FINDINGS_MAX_RESULTS = env('DD_SIMILAR_FINDINGS_MAX_RESULTS') -MAX_REQRESP_FROM_API = env('DD_MAX_REQRESP_FROM_API') -MAX_AUTOCOMPLETE_WORDS = env('DD_MAX_AUTOCOMPLETE_WORDS') +SEARCH_MAX_RESULTS = env("DD_SEARCH_MAX_RESULTS") +SIMILAR_FINDINGS_MAX_RESULTS = env("DD_SIMILAR_FINDINGS_MAX_RESULTS") +MAX_REQRESP_FROM_API = env("DD_MAX_REQRESP_FROM_API") +MAX_AUTOCOMPLETE_WORDS = env("DD_MAX_AUTOCOMPLETE_WORDS") LOGIN_EXEMPT_URLS = ( - rf'^{URL_PREFIX}static/', - rf'^{URL_PREFIX}webhook/([\w-]+)$', - rf'^{URL_PREFIX}webhook/', - rf'^{URL_PREFIX}jira/webhook/([\w-]+)$', - rf'^{URL_PREFIX}jira/webhook/', - rf'^{URL_PREFIX}reports/cover$', - rf'^{URL_PREFIX}finding/image/(?P[^/]+)$', - rf'^{URL_PREFIX}api/v2/', - r'complete/', - r'empty_questionnaire/([\d]+)/answer', - rf'^{URL_PREFIX}password_reset/', - rf'^{URL_PREFIX}forgot_username', - rf'^{URL_PREFIX}reset/', + rf"^{URL_PREFIX}static/", + rf"^{URL_PREFIX}webhook/([\w-]+)$", + rf"^{URL_PREFIX}webhook/", + rf"^{URL_PREFIX}jira/webhook/([\w-]+)$", + rf"^{URL_PREFIX}jira/webhook/", + rf"^{URL_PREFIX}reports/cover$", + rf"^{URL_PREFIX}finding/image/(?P[^/]+)$", + rf"^{URL_PREFIX}api/v2/", + r"complete/", + r"empty_questionnaire/([\d]+)/answer", + rf"^{URL_PREFIX}password_reset/", + rf"^{URL_PREFIX}forgot_username", + rf"^{URL_PREFIX}reset/", ) AUTH_PASSWORD_VALIDATORS = [ { - 'NAME': 'dojo.user.validators.DojoCommonPasswordValidator', + "NAME": "dojo.user.validators.DojoCommonPasswordValidator", }, { - 'NAME': 'dojo.user.validators.MinLengthValidator', + "NAME": "dojo.user.validators.MinLengthValidator", }, { - 'NAME': 'dojo.user.validators.MaxLengthValidator', + "NAME": "dojo.user.validators.MaxLengthValidator", }, { - 'NAME': 'dojo.user.validators.NumberValidator', + "NAME": "dojo.user.validators.NumberValidator", }, { - 'NAME': 'dojo.user.validators.UppercaseValidator', + "NAME": "dojo.user.validators.UppercaseValidator", }, { - 'NAME': 'dojo.user.validators.LowercaseValidator', + "NAME": "dojo.user.validators.LowercaseValidator", }, { - 'NAME': 'dojo.user.validators.SymbolValidator', + "NAME": "dojo.user.validators.SymbolValidator", }, ] # https://django-ratelimit.readthedocs.io/en/stable/index.html -RATE_LIMITER_ENABLED = env('DD_RATE_LIMITER_ENABLED') -RATE_LIMITER_RATE = env('DD_RATE_LIMITER_RATE') # Examples include 5/m 100/h and more https://django-ratelimit.readthedocs.io/en/stable/rates.html#simple-rates -RATE_LIMITER_BLOCK = env('DD_RATE_LIMITER_BLOCK') # Block the requests after rate limit is exceeded -RATE_LIMITER_ACCOUNT_LOCKOUT = env('DD_RATE_LIMITER_ACCOUNT_LOCKOUT') # Forces the user to change password on next login. +RATE_LIMITER_ENABLED = env("DD_RATE_LIMITER_ENABLED") +RATE_LIMITER_RATE = env("DD_RATE_LIMITER_RATE") # Examples include 5/m 100/h and more https://django-ratelimit.readthedocs.io/en/stable/rates.html#simple-rates +RATE_LIMITER_BLOCK = env("DD_RATE_LIMITER_BLOCK") # Block the requests after rate limit is exceeded +RATE_LIMITER_ACCOUNT_LOCKOUT = env("DD_RATE_LIMITER_ACCOUNT_LOCKOUT") # Forces the user to change password on next login. # ------------------------------------------------------------------------------ # SECURITY DIRECTIVES @@ -668,118 +668,118 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param # If True, the SecurityMiddleware redirects all non-HTTPS requests to HTTPS # (except for those URLs matching a regular expression listed in SECURE_REDIRECT_EXEMPT). -SECURE_SSL_REDIRECT = env('DD_SECURE_SSL_REDIRECT') +SECURE_SSL_REDIRECT = env("DD_SECURE_SSL_REDIRECT") # If True, the SecurityMiddleware sets the X-Content-Type-Options: nosniff; -SECURE_CONTENT_TYPE_NOSNIFF = env('DD_SECURE_CONTENT_TYPE_NOSNIFF') +SECURE_CONTENT_TYPE_NOSNIFF = env("DD_SECURE_CONTENT_TYPE_NOSNIFF") # Whether to use HTTPOnly flag on the session cookie. # If this is set to True, client-side JavaScript will not to be able to access the session cookie. -SESSION_COOKIE_HTTPONLY = env('DD_SESSION_COOKIE_HTTPONLY') +SESSION_COOKIE_HTTPONLY = env("DD_SESSION_COOKIE_HTTPONLY") # Whether to use HttpOnly flag on the CSRF cookie. If this is set to True, # client-side JavaScript will not to be able to access the CSRF cookie. -CSRF_COOKIE_HTTPONLY = env('DD_CSRF_COOKIE_HTTPONLY') +CSRF_COOKIE_HTTPONLY = env("DD_CSRF_COOKIE_HTTPONLY") # Whether to use a secure cookie for the session cookie. If this is set to True, # the cookie will be marked as secure, which means browsers may ensure that the # cookie is only sent with an HTTPS connection. -SESSION_COOKIE_SECURE = env('DD_SESSION_COOKIE_SECURE') -SESSION_COOKIE_SAMESITE = env('DD_SESSION_COOKIE_SAMESITE') +SESSION_COOKIE_SECURE = env("DD_SESSION_COOKIE_SECURE") +SESSION_COOKIE_SAMESITE = env("DD_SESSION_COOKIE_SAMESITE") # Override default Django behavior for incorrect URLs -APPEND_SLASH = env('DD_APPEND_SLASH') +APPEND_SLASH = env("DD_APPEND_SLASH") # Whether to use a secure cookie for the CSRF cookie. -CSRF_COOKIE_SECURE = env('DD_CSRF_COOKIE_SECURE') -CSRF_COOKIE_SAMESITE = env('DD_CSRF_COOKIE_SAMESITE') +CSRF_COOKIE_SECURE = env("DD_CSRF_COOKIE_SECURE") +CSRF_COOKIE_SAMESITE = env("DD_CSRF_COOKIE_SAMESITE") # A list of trusted origins for unsafe requests (e.g. POST). # Use comma-separated list of domains, they will be split to list automatically # Only specify this settings if the contents is not an empty list (the default) -if env('DD_CSRF_TRUSTED_ORIGINS') != ['[]']: - CSRF_TRUSTED_ORIGINS = env('DD_CSRF_TRUSTED_ORIGINS') +if env("DD_CSRF_TRUSTED_ORIGINS") != ["[]"]: + CSRF_TRUSTED_ORIGINS = env("DD_CSRF_TRUSTED_ORIGINS") # Unless set to None, the SecurityMiddleware sets the Cross-Origin Opener Policy # header on all responses that do not already have it to the value provided. -SECURE_CROSS_ORIGIN_OPENER_POLICY = env('DD_SECURE_CROSS_ORIGIN_OPENER_POLICY') if env('DD_SECURE_CROSS_ORIGIN_OPENER_POLICY') != 'None' else None +SECURE_CROSS_ORIGIN_OPENER_POLICY = env("DD_SECURE_CROSS_ORIGIN_OPENER_POLICY") if env("DD_SECURE_CROSS_ORIGIN_OPENER_POLICY") != "None" else None -if env('DD_SECURE_PROXY_SSL_HEADER'): - SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') +if env("DD_SECURE_PROXY_SSL_HEADER"): + SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https") -if env('DD_SECURE_HSTS_INCLUDE_SUBDOMAINS'): - SECURE_HSTS_SECONDS = env('DD_SECURE_HSTS_SECONDS') - SECURE_HSTS_INCLUDE_SUBDOMAINS = env('DD_SECURE_HSTS_INCLUDE_SUBDOMAINS') +if env("DD_SECURE_HSTS_INCLUDE_SUBDOMAINS"): + SECURE_HSTS_SECONDS = env("DD_SECURE_HSTS_SECONDS") + SECURE_HSTS_INCLUDE_SUBDOMAINS = env("DD_SECURE_HSTS_INCLUDE_SUBDOMAINS") -SESSION_EXPIRE_AT_BROWSER_CLOSE = env('DD_SESSION_EXPIRE_AT_BROWSER_CLOSE') -SESSION_COOKIE_AGE = env('DD_SESSION_COOKIE_AGE') +SESSION_EXPIRE_AT_BROWSER_CLOSE = env("DD_SESSION_EXPIRE_AT_BROWSER_CLOSE") +SESSION_COOKIE_AGE = env("DD_SESSION_COOKIE_AGE") # ------------------------------------------------------------------------------ # DEFECTDOJO SPECIFIC # ------------------------------------------------------------------------------ # Credential Key -CREDENTIAL_AES_256_KEY = env('DD_CREDENTIAL_AES_256_KEY') -DB_KEY = env('DD_CREDENTIAL_AES_256_KEY') +CREDENTIAL_AES_256_KEY = env("DD_CREDENTIAL_AES_256_KEY") +DB_KEY = env("DD_CREDENTIAL_AES_256_KEY") # Used in a few places to prefix page headings and in email salutations -TEAM_NAME = env('DD_TEAM_NAME') +TEAM_NAME = env("DD_TEAM_NAME") # Used to configure a custom version in the footer of the base.html template. -FOOTER_VERSION = env('DD_FOOTER_VERSION') +FOOTER_VERSION = env("DD_FOOTER_VERSION") # Django-tagging settings -FORCE_LOWERCASE_TAGS = env('DD_FORCE_LOWERCASE_TAGS') -MAX_TAG_LENGTH = env('DD_MAX_TAG_LENGTH') +FORCE_LOWERCASE_TAGS = env("DD_FORCE_LOWERCASE_TAGS") +MAX_TAG_LENGTH = env("DD_MAX_TAG_LENGTH") # ------------------------------------------------------------------------------ # ADMIN # ------------------------------------------------------------------------------ -ADMINS = getaddresses([env('DD_ADMINS')]) +ADMINS = getaddresses([env("DD_ADMINS")]) # https://docs.djangoproject.com/en/dev/ref/settings/#managers MANAGERS = ADMINS # Django admin enabled -DJANGO_ADMIN_ENABLED = env('DD_DJANGO_ADMIN_ENABLED') +DJANGO_ADMIN_ENABLED = env("DD_DJANGO_ADMIN_ENABLED") # ------------------------------------------------------------------------------ # API V2 # ------------------------------------------------------------------------------ -API_TOKENS_ENABLED = env('DD_API_TOKENS_ENABLED') +API_TOKENS_ENABLED = env("DD_API_TOKENS_ENABLED") REST_FRAMEWORK = { - 'DEFAULT_SCHEMA_CLASS': 'drf_spectacular.openapi.AutoSchema', - 'DEFAULT_AUTHENTICATION_CLASSES': ( - 'rest_framework.authentication.SessionAuthentication', - 'rest_framework.authentication.BasicAuthentication', + "DEFAULT_SCHEMA_CLASS": "drf_spectacular.openapi.AutoSchema", + "DEFAULT_AUTHENTICATION_CLASSES": ( + "rest_framework.authentication.SessionAuthentication", + "rest_framework.authentication.BasicAuthentication", ), - 'DEFAULT_PERMISSION_CLASSES': ( - 'rest_framework.permissions.DjangoModelPermissions', + "DEFAULT_PERMISSION_CLASSES": ( + "rest_framework.permissions.DjangoModelPermissions", ), - 'DEFAULT_RENDERER_CLASSES': ( - 'rest_framework.renderers.JSONRenderer', + "DEFAULT_RENDERER_CLASSES": ( + "rest_framework.renderers.JSONRenderer", ), - 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination', - 'PAGE_SIZE': 25, - 'EXCEPTION_HANDLER': 'dojo.api_v2.exception_handler.custom_exception_handler', + "DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.LimitOffsetPagination", + "PAGE_SIZE": 25, + "EXCEPTION_HANDLER": "dojo.api_v2.exception_handler.custom_exception_handler", } if API_TOKENS_ENABLED: - REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'] += ('rest_framework.authentication.TokenAuthentication',) + REST_FRAMEWORK["DEFAULT_AUTHENTICATION_CLASSES"] += ("rest_framework.authentication.TokenAuthentication",) SPECTACULAR_SETTINGS = { - 'TITLE': 'Defect Dojo API v2', - 'DESCRIPTION': 'Defect Dojo - Open Source vulnerability Management made easy. Prefetch related parameters/responses not yet in the schema.', - 'VERSION': __version__, - 'SCHEMA_PATH_PREFIX': "/api/v2", + "TITLE": "Defect Dojo API v2", + "DESCRIPTION": "Defect Dojo - Open Source vulnerability Management made easy. Prefetch related parameters/responses not yet in the schema.", + "VERSION": __version__, + "SCHEMA_PATH_PREFIX": "/api/v2", # OTHER SETTINGS # the following set to False could help some client generators # 'ENUM_ADD_EXPLICIT_BLANK_NULL_CHOICE': False, - 'PREPROCESSING_HOOKS': ['dojo.urls.drf_spectacular_preprocessing_filter_spec'], - 'POSTPROCESSING_HOOKS': ['dojo.api_v2.prefetch.schema.prefetch_postprocessing_hook'], + "PREPROCESSING_HOOKS": ["dojo.urls.drf_spectacular_preprocessing_filter_spec"], + "POSTPROCESSING_HOOKS": ["dojo.api_v2.prefetch.schema.prefetch_postprocessing_hook"], # show file selection dialogue, see https://github.com/tfranzel/drf-spectacular/issues/455 "COMPONENT_SPLIT_REQUEST": True, "SWAGGER_UI_SETTINGS": { @@ -787,9 +787,9 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param }, } -if not env('DD_DEFAULT_SWAGGER_UI'): - SPECTACULAR_SETTINGS['SWAGGER_UI_DIST'] = 'SIDECAR' - SPECTACULAR_SETTINGS['SWAGGER_UI_FAVICON_HREF'] = 'SIDECAR' +if not env("DD_DEFAULT_SWAGGER_UI"): + SPECTACULAR_SETTINGS["SWAGGER_UI_DIST"] = "SIDECAR" + SPECTACULAR_SETTINGS["SWAGGER_UI_FAVICON_HREF"] = "SIDECAR" # ------------------------------------------------------------------------------ # TEMPLATES @@ -797,21 +797,21 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param TEMPLATES = [ { - 'BACKEND': 'django.template.backends.django.DjangoTemplates', - 'APP_DIRS': True, - 'OPTIONS': { - 'debug': env('DD_DEBUG'), - 'context_processors': [ - 'django.template.context_processors.debug', - 'django.template.context_processors.request', - 'django.contrib.auth.context_processors.auth', - 'django.contrib.messages.context_processors.messages', - 'social_django.context_processors.backends', - 'social_django.context_processors.login_redirect', - 'dojo.context_processors.globalize_vars', - 'dojo.context_processors.bind_system_settings', - 'dojo.context_processors.bind_alert_count', - 'dojo.context_processors.bind_announcement', + "BACKEND": "django.template.backends.django.DjangoTemplates", + "APP_DIRS": True, + "OPTIONS": { + "debug": env("DD_DEBUG"), + "context_processors": [ + "django.template.context_processors.debug", + "django.template.context_processors.request", + "django.contrib.auth.context_processors.auth", + "django.contrib.messages.context_processors.messages", + "social_django.context_processors.backends", + "social_django.context_processors.login_redirect", + "dojo.context_processors.globalize_vars", + "dojo.context_processors.bind_system_settings", + "dojo.context_processors.bind_alert_count", + "dojo.context_processors.bind_announcement", ], }, }, @@ -822,69 +822,69 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param # ------------------------------------------------------------------------------ INSTALLED_APPS = ( - 'django.contrib.auth', - 'django.contrib.contenttypes', - 'django.contrib.sessions', - 'django.contrib.sites', - 'django.contrib.messages', - 'django.contrib.staticfiles', - 'polymorphic', # provides admin templates - 'django.contrib.admin', - 'django.contrib.humanize', - 'auditlog', - 'dojo', - 'watson', - 'tagging', # not used, but still needed for migration 0065_django_tagulous.py (v1.10.0) - 'imagekit', - 'multiselectfield', - 'rest_framework', - 'rest_framework.authtoken', - 'dbbackup', - 'django_celery_results', - 'social_django', - 'drf_spectacular', - 'drf_spectacular_sidecar', # required for Django collectstatic discovery - 'tagulous', - 'fontawesomefree', - 'django_filters', + "django.contrib.auth", + "django.contrib.contenttypes", + "django.contrib.sessions", + "django.contrib.sites", + "django.contrib.messages", + "django.contrib.staticfiles", + "polymorphic", # provides admin templates + "django.contrib.admin", + "django.contrib.humanize", + "auditlog", + "dojo", + "watson", + "tagging", # not used, but still needed for migration 0065_django_tagulous.py (v1.10.0) + "imagekit", + "multiselectfield", + "rest_framework", + "rest_framework.authtoken", + "dbbackup", + "django_celery_results", + "social_django", + "drf_spectacular", + "drf_spectacular_sidecar", # required for Django collectstatic discovery + "tagulous", + "fontawesomefree", + "django_filters", ) # ------------------------------------------------------------------------------ # MIDDLEWARE # ------------------------------------------------------------------------------ DJANGO_MIDDLEWARE_CLASSES = [ - 'django.middleware.common.CommonMiddleware', - 'dojo.middleware.APITrailingSlashMiddleware', - 'dojo.middleware.DojoSytemSettingsMiddleware', - 'django.contrib.sessions.middleware.SessionMiddleware', - 'django.middleware.csrf.CsrfViewMiddleware', - 'django.middleware.security.SecurityMiddleware', - 'django.contrib.auth.middleware.AuthenticationMiddleware', - 'django.contrib.messages.middleware.MessageMiddleware', - 'django.middleware.clickjacking.XFrameOptionsMiddleware', - 'dojo.middleware.LoginRequiredMiddleware', - 'dojo.middleware.AdditionalHeaderMiddleware', - 'social_django.middleware.SocialAuthExceptionMiddleware', - 'watson.middleware.SearchContextMiddleware', - 'dojo.middleware.AuditlogMiddleware', - 'crum.CurrentRequestUserMiddleware', - 'dojo.request_cache.middleware.RequestCacheMiddleware', + "django.middleware.common.CommonMiddleware", + "dojo.middleware.APITrailingSlashMiddleware", + "dojo.middleware.DojoSytemSettingsMiddleware", + "django.contrib.sessions.middleware.SessionMiddleware", + "django.middleware.csrf.CsrfViewMiddleware", + "django.middleware.security.SecurityMiddleware", + "django.contrib.auth.middleware.AuthenticationMiddleware", + "django.contrib.messages.middleware.MessageMiddleware", + "django.middleware.clickjacking.XFrameOptionsMiddleware", + "dojo.middleware.LoginRequiredMiddleware", + "dojo.middleware.AdditionalHeaderMiddleware", + "social_django.middleware.SocialAuthExceptionMiddleware", + "watson.middleware.SearchContextMiddleware", + "dojo.middleware.AuditlogMiddleware", + "crum.CurrentRequestUserMiddleware", + "dojo.request_cache.middleware.RequestCacheMiddleware", ] MIDDLEWARE = DJANGO_MIDDLEWARE_CLASSES # WhiteNoise allows your web app to serve its own static files, # making it a self-contained unit that can be deployed anywhere without relying on nginx -if env('DD_WHITENOISE'): +if env("DD_WHITENOISE"): WHITE_NOISE = [ # Simplified static file serving. # https://warehouse.python.org/project/whitenoise/ - 'whitenoise.middleware.WhiteNoiseMiddleware', + "whitenoise.middleware.WhiteNoiseMiddleware", ] MIDDLEWARE = MIDDLEWARE + WHITE_NOISE EMAIL_CONFIG = env.email_url( - 'DD_EMAIL_URL', default='smtp://user@:password@localhost:25') + "DD_EMAIL_URL", default="smtp://user@:password@localhost:25") vars().update(EMAIL_CONFIG) @@ -905,9 +905,9 @@ def saml2_attrib_map_format(dict): return dout -SAML2_ENABLED = env('DD_SAML2_ENABLED') -SAML2_LOGIN_BUTTON_TEXT = env('DD_SAML2_LOGIN_BUTTON_TEXT') -SAML2_LOGOUT_URL = env('DD_SAML2_LOGOUT_URL') +SAML2_ENABLED = env("DD_SAML2_ENABLED") +SAML2_LOGIN_BUTTON_TEXT = env("DD_SAML2_LOGIN_BUTTON_TEXT") +SAML2_LOGOUT_URL = env("DD_SAML2_LOGOUT_URL") if SAML2_ENABLED: from os import path @@ -915,77 +915,77 @@ def saml2_attrib_map_format(dict): import saml2.saml # SSO_URL = env('DD_SSO_URL') SAML_METADATA = {} - if len(env('DD_SAML2_METADATA_AUTO_CONF_URL')) > 0: - SAML_METADATA['remote'] = [{"url": env('DD_SAML2_METADATA_AUTO_CONF_URL')}] - if len(env('DD_SAML2_METADATA_LOCAL_FILE_PATH')) > 0: - SAML_METADATA['local'] = [env('DD_SAML2_METADATA_LOCAL_FILE_PATH')] - INSTALLED_APPS += ('djangosaml2',) - MIDDLEWARE.append('djangosaml2.middleware.SamlSessionMiddleware') - AUTHENTICATION_BACKENDS += (env('DD_SAML2_AUTHENTICATION_BACKENDS'),) - LOGIN_EXEMPT_URLS += (rf'^{URL_PREFIX}saml2/',) + if len(env("DD_SAML2_METADATA_AUTO_CONF_URL")) > 0: + SAML_METADATA["remote"] = [{"url": env("DD_SAML2_METADATA_AUTO_CONF_URL")}] + if len(env("DD_SAML2_METADATA_LOCAL_FILE_PATH")) > 0: + SAML_METADATA["local"] = [env("DD_SAML2_METADATA_LOCAL_FILE_PATH")] + INSTALLED_APPS += ("djangosaml2",) + MIDDLEWARE.append("djangosaml2.middleware.SamlSessionMiddleware") + AUTHENTICATION_BACKENDS += (env("DD_SAML2_AUTHENTICATION_BACKENDS"),) + LOGIN_EXEMPT_URLS += (rf"^{URL_PREFIX}saml2/",) SAML_LOGOUT_REQUEST_PREFERRED_BINDING = saml2.BINDING_HTTP_POST SAML_IGNORE_LOGOUT_ERRORS = True - SAML_DJANGO_USER_MAIN_ATTRIBUTE = 'username' + SAML_DJANGO_USER_MAIN_ATTRIBUTE = "username" # SAML_DJANGO_USER_MAIN_ATTRIBUTE_LOOKUP = '__iexact' SAML_USE_NAME_ID_AS_USERNAME = True - SAML_CREATE_UNKNOWN_USER = env('DD_SAML2_CREATE_USER') - SAML_ATTRIBUTE_MAPPING = saml2_attrib_map_format(env('DD_SAML2_ATTRIBUTES_MAP')) - SAML_FORCE_AUTH = env('DD_SAML2_FORCE_AUTH') - SAML_ALLOW_UNKNOWN_ATTRIBUTES = env('DD_SAML2_ALLOW_UNKNOWN_ATTRIBUTE') + SAML_CREATE_UNKNOWN_USER = env("DD_SAML2_CREATE_USER") + SAML_ATTRIBUTE_MAPPING = saml2_attrib_map_format(env("DD_SAML2_ATTRIBUTES_MAP")) + SAML_FORCE_AUTH = env("DD_SAML2_FORCE_AUTH") + SAML_ALLOW_UNKNOWN_ATTRIBUTES = env("DD_SAML2_ALLOW_UNKNOWN_ATTRIBUTE") BASEDIR = path.dirname(path.abspath(__file__)) - if len(env('DD_SAML2_ENTITY_ID')) == 0: - SAML2_ENTITY_ID = f'{SITE_URL}/saml2/metadata/' + if len(env("DD_SAML2_ENTITY_ID")) == 0: + SAML2_ENTITY_ID = f"{SITE_URL}/saml2/metadata/" else: - SAML2_ENTITY_ID = env('DD_SAML2_ENTITY_ID') + SAML2_ENTITY_ID = env("DD_SAML2_ENTITY_ID") SAML_CONFIG = { # full path to the xmlsec1 binary programm - 'xmlsec_binary': '/usr/bin/xmlsec1', + "xmlsec_binary": "/usr/bin/xmlsec1", # your entity id, usually your subdomain plus the url to the metadata view - 'entityid': str(SAML2_ENTITY_ID), + "entityid": str(SAML2_ENTITY_ID), # directory with attribute mapping - 'attribute_map_dir': path.join(BASEDIR, 'attribute-maps'), + "attribute_map_dir": path.join(BASEDIR, "attribute-maps"), # do now discard attributes not specified in attribute-maps - 'allow_unknown_attributes': SAML_ALLOW_UNKNOWN_ATTRIBUTES, + "allow_unknown_attributes": SAML_ALLOW_UNKNOWN_ATTRIBUTES, # this block states what services we provide - 'service': { + "service": { # we are just a lonely SP - 'sp': { - 'name': 'Defect_Dojo', - 'name_id_format': saml2.saml.NAMEID_FORMAT_TRANSIENT, - 'want_response_signed': False, - 'want_assertions_signed': True, - 'force_authn': SAML_FORCE_AUTH, - 'allow_unsolicited': True, + "sp": { + "name": "Defect_Dojo", + "name_id_format": saml2.saml.NAMEID_FORMAT_TRANSIENT, + "want_response_signed": False, + "want_assertions_signed": True, + "force_authn": SAML_FORCE_AUTH, + "allow_unsolicited": True, # For Okta add signed logout requets. Enable this: # "logout_requests_signed": True, - 'endpoints': { + "endpoints": { # url and binding to the assetion consumer service view # do not change the binding or service name - 'assertion_consumer_service': [ - (f'{SITE_URL}/saml2/acs/', + "assertion_consumer_service": [ + (f"{SITE_URL}/saml2/acs/", saml2.BINDING_HTTP_POST), ], # url and binding to the single logout service view # do not change the binding or service name - 'single_logout_service': [ + "single_logout_service": [ # Disable next two lines for HTTP_REDIRECT for IDP's that only support HTTP_POST. Ex. Okta: - (f'{SITE_URL}/saml2/ls/', + (f"{SITE_URL}/saml2/ls/", saml2.BINDING_HTTP_REDIRECT), - (f'{SITE_URL}/saml2/ls/post', + (f"{SITE_URL}/saml2/ls/post", saml2.BINDING_HTTP_POST), ], }, # attributes that this project need to identify a user - 'required_attributes': ['Email', 'UserName'], + "required_attributes": ["Email", "UserName"], # attributes that may be useful to have but not required - 'optional_attributes': ['Firstname', 'Lastname'], + "optional_attributes": ["Firstname", "Lastname"], # in this section the list of IdPs we talk to are defined # This is not mandatory! All the IdP available in the metadata will be considered. @@ -1009,10 +1009,10 @@ def saml2_attrib_map_format(dict): # where the remote metadata is stored, local, remote or mdq server. # One metadatastore or many ... - 'metadata': SAML_METADATA, + "metadata": SAML_METADATA, # set to 1 to output debugging information - 'debug': 0, + "debug": 0, # Signing # 'key_file': path.join(BASEDIR, 'private.key'), # private part @@ -1025,120 +1025,120 @@ def saml2_attrib_map_format(dict): # }], # own metadata settings - 'contact_person': [ - {'given_name': 'Lorenzo', - 'sur_name': 'Gil', - 'company': 'Yaco Sistemas', - 'email_address': 'lgs@yaco.es', - 'contact_type': 'technical'}, - {'given_name': 'Angel', - 'sur_name': 'Fernandez', - 'company': 'Yaco Sistemas', - 'email_address': 'angel@yaco.es', - 'contact_type': 'administrative'}, + "contact_person": [ + {"given_name": "Lorenzo", + "sur_name": "Gil", + "company": "Yaco Sistemas", + "email_address": "lgs@yaco.es", + "contact_type": "technical"}, + {"given_name": "Angel", + "sur_name": "Fernandez", + "company": "Yaco Sistemas", + "email_address": "angel@yaco.es", + "contact_type": "administrative"}, ], # you can set multilanguage information here - 'organization': { - 'name': [('Yaco Sistemas', 'es'), ('Yaco Systems', 'en')], - 'display_name': [('Yaco', 'es'), ('Yaco', 'en')], - 'url': [('http://www.yaco.es', 'es'), ('http://www.yaco.com', 'en')], + "organization": { + "name": [("Yaco Sistemas", "es"), ("Yaco Systems", "en")], + "display_name": [("Yaco", "es"), ("Yaco", "en")], + "url": [("http://www.yaco.es", "es"), ("http://www.yaco.com", "en")], }, - 'valid_for': 24, # how long is our metadata valid + "valid_for": 24, # how long is our metadata valid } # ------------------------------------------------------------------------------ # REMOTE_USER # ------------------------------------------------------------------------------ -AUTH_REMOTEUSER_ENABLED = env('DD_AUTH_REMOTEUSER_ENABLED') -AUTH_REMOTEUSER_USERNAME_HEADER = env('DD_AUTH_REMOTEUSER_USERNAME_HEADER') -AUTH_REMOTEUSER_EMAIL_HEADER = env('DD_AUTH_REMOTEUSER_EMAIL_HEADER') -AUTH_REMOTEUSER_FIRSTNAME_HEADER = env('DD_AUTH_REMOTEUSER_FIRSTNAME_HEADER') -AUTH_REMOTEUSER_LASTNAME_HEADER = env('DD_AUTH_REMOTEUSER_LASTNAME_HEADER') -AUTH_REMOTEUSER_GROUPS_HEADER = env('DD_AUTH_REMOTEUSER_GROUPS_HEADER') -AUTH_REMOTEUSER_GROUPS_CLEANUP = env('DD_AUTH_REMOTEUSER_GROUPS_CLEANUP') -AUTH_REMOTEUSER_VISIBLE_IN_SWAGGER = env('DD_AUTH_REMOTEUSER_VISIBLE_IN_SWAGGER') +AUTH_REMOTEUSER_ENABLED = env("DD_AUTH_REMOTEUSER_ENABLED") +AUTH_REMOTEUSER_USERNAME_HEADER = env("DD_AUTH_REMOTEUSER_USERNAME_HEADER") +AUTH_REMOTEUSER_EMAIL_HEADER = env("DD_AUTH_REMOTEUSER_EMAIL_HEADER") +AUTH_REMOTEUSER_FIRSTNAME_HEADER = env("DD_AUTH_REMOTEUSER_FIRSTNAME_HEADER") +AUTH_REMOTEUSER_LASTNAME_HEADER = env("DD_AUTH_REMOTEUSER_LASTNAME_HEADER") +AUTH_REMOTEUSER_GROUPS_HEADER = env("DD_AUTH_REMOTEUSER_GROUPS_HEADER") +AUTH_REMOTEUSER_GROUPS_CLEANUP = env("DD_AUTH_REMOTEUSER_GROUPS_CLEANUP") +AUTH_REMOTEUSER_VISIBLE_IN_SWAGGER = env("DD_AUTH_REMOTEUSER_VISIBLE_IN_SWAGGER") AUTH_REMOTEUSER_TRUSTED_PROXY = IPSet() -for ip_range in env('DD_AUTH_REMOTEUSER_TRUSTED_PROXY'): +for ip_range in env("DD_AUTH_REMOTEUSER_TRUSTED_PROXY"): AUTH_REMOTEUSER_TRUSTED_PROXY.add(IPNetwork(ip_range)) -if env('DD_AUTH_REMOTEUSER_LOGIN_ONLY'): - RemoteUserMiddleware = 'dojo.remote_user.PersistentRemoteUserMiddleware' +if env("DD_AUTH_REMOTEUSER_LOGIN_ONLY"): + RemoteUserMiddleware = "dojo.remote_user.PersistentRemoteUserMiddleware" else: - RemoteUserMiddleware = 'dojo.remote_user.RemoteUserMiddleware' + RemoteUserMiddleware = "dojo.remote_user.RemoteUserMiddleware" # we need to add middleware just behindAuthenticationMiddleware as described in https://docs.djangoproject.com/en/3.2/howto/auth-remote-user/#configuration for i in range(len(MIDDLEWARE)): - if MIDDLEWARE[i] == 'django.contrib.auth.middleware.AuthenticationMiddleware': + if MIDDLEWARE[i] == "django.contrib.auth.middleware.AuthenticationMiddleware": MIDDLEWARE.insert(i + 1, RemoteUserMiddleware) break if AUTH_REMOTEUSER_ENABLED: - REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'] = \ - ('dojo.remote_user.RemoteUserAuthentication',) + \ - REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'] + REST_FRAMEWORK["DEFAULT_AUTHENTICATION_CLASSES"] = \ + ("dojo.remote_user.RemoteUserAuthentication",) + \ + REST_FRAMEWORK["DEFAULT_AUTHENTICATION_CLASSES"] # ------------------------------------------------------------------------------ # CELERY # ------------------------------------------------------------------------------ # Celery settings -CELERY_BROKER_URL = env('DD_CELERY_BROKER_URL') \ - if len(env('DD_CELERY_BROKER_URL')) > 0 else generate_url( - env('DD_CELERY_BROKER_SCHEME'), +CELERY_BROKER_URL = env("DD_CELERY_BROKER_URL") \ + if len(env("DD_CELERY_BROKER_URL")) > 0 else generate_url( + env("DD_CELERY_BROKER_SCHEME"), True, - env('DD_CELERY_BROKER_USER'), - env('DD_CELERY_BROKER_PASSWORD'), - env('DD_CELERY_BROKER_HOST'), - env('DD_CELERY_BROKER_PORT'), - env('DD_CELERY_BROKER_PATH'), - env('DD_CELERY_BROKER_PARAMS'), + env("DD_CELERY_BROKER_USER"), + env("DD_CELERY_BROKER_PASSWORD"), + env("DD_CELERY_BROKER_HOST"), + env("DD_CELERY_BROKER_PORT"), + env("DD_CELERY_BROKER_PATH"), + env("DD_CELERY_BROKER_PARAMS"), ) -CELERY_TASK_IGNORE_RESULT = env('DD_CELERY_TASK_IGNORE_RESULT') -CELERY_RESULT_BACKEND = env('DD_CELERY_RESULT_BACKEND') +CELERY_TASK_IGNORE_RESULT = env("DD_CELERY_TASK_IGNORE_RESULT") +CELERY_RESULT_BACKEND = env("DD_CELERY_RESULT_BACKEND") CELERY_TIMEZONE = TIME_ZONE -CELERY_RESULT_EXPIRES = env('DD_CELERY_RESULT_EXPIRES') -CELERY_BEAT_SCHEDULE_FILENAME = env('DD_CELERY_BEAT_SCHEDULE_FILENAME') -CELERY_ACCEPT_CONTENT = ['pickle', 'json', 'msgpack', 'yaml'] -CELERY_TASK_SERIALIZER = env('DD_CELERY_TASK_SERIALIZER') -CELERY_PASS_MODEL_BY_ID = env('DD_CELERY_PASS_MODEL_BY_ID') +CELERY_RESULT_EXPIRES = env("DD_CELERY_RESULT_EXPIRES") +CELERY_BEAT_SCHEDULE_FILENAME = env("DD_CELERY_BEAT_SCHEDULE_FILENAME") +CELERY_ACCEPT_CONTENT = ["pickle", "json", "msgpack", "yaml"] +CELERY_TASK_SERIALIZER = env("DD_CELERY_TASK_SERIALIZER") +CELERY_PASS_MODEL_BY_ID = env("DD_CELERY_PASS_MODEL_BY_ID") -if len(env('DD_CELERY_BROKER_TRANSPORT_OPTIONS')) > 0: - CELERY_BROKER_TRANSPORT_OPTIONS = json.loads(env('DD_CELERY_BROKER_TRANSPORT_OPTIONS')) +if len(env("DD_CELERY_BROKER_TRANSPORT_OPTIONS")) > 0: + CELERY_BROKER_TRANSPORT_OPTIONS = json.loads(env("DD_CELERY_BROKER_TRANSPORT_OPTIONS")) -CELERY_IMPORTS = ('dojo.tools.tool_issue_updater', ) +CELERY_IMPORTS = ("dojo.tools.tool_issue_updater", ) # Celery beat scheduled tasks CELERY_BEAT_SCHEDULE = { - 'add-alerts': { - 'task': 'dojo.tasks.add_alerts', - 'schedule': timedelta(hours=1), - 'args': [timedelta(hours=1)], + "add-alerts": { + "task": "dojo.tasks.add_alerts", + "schedule": timedelta(hours=1), + "args": [timedelta(hours=1)], }, - 'cleanup-alerts': { - 'task': 'dojo.tasks.cleanup_alerts', - 'schedule': timedelta(hours=8), + "cleanup-alerts": { + "task": "dojo.tasks.cleanup_alerts", + "schedule": timedelta(hours=8), }, - 'dedupe-delete': { - 'task': 'dojo.tasks.async_dupe_delete', - 'schedule': timedelta(minutes=1), - 'args': [timedelta(minutes=1)], + "dedupe-delete": { + "task": "dojo.tasks.async_dupe_delete", + "schedule": timedelta(minutes=1), + "args": [timedelta(minutes=1)], }, - 'flush_auditlog': { - 'task': 'dojo.tasks.flush_auditlog', - 'schedule': timedelta(hours=8), + "flush_auditlog": { + "task": "dojo.tasks.flush_auditlog", + "schedule": timedelta(hours=8), }, - 'update-findings-from-source-issues': { - 'task': 'dojo.tools.tool_issue_updater.update_findings_from_source_issues', - 'schedule': timedelta(hours=3), + "update-findings-from-source-issues": { + "task": "dojo.tools.tool_issue_updater.update_findings_from_source_issues", + "schedule": timedelta(hours=3), }, - 'compute-sla-age-and-notify': { - 'task': 'dojo.tasks.async_sla_compute_and_notify_task', - 'schedule': crontab(hour=7, minute=30), + "compute-sla-age-and-notify": { + "task": "dojo.tasks.async_sla_compute_and_notify_task", + "schedule": crontab(hour=7, minute=30), }, - 'risk_acceptance_expiration_handler': { - 'task': 'dojo.risk_acceptance.helper.expiration_handler', - 'schedule': crontab(minute=0, hour='*/3'), # every 3 hours + "risk_acceptance_expiration_handler": { + "task": "dojo.risk_acceptance.helper.expiration_handler", + "schedule": crontab(minute=0, hour="*/3"), # every 3 hours }, # 'jira_status_reconciliation': { # 'task': 'dojo.tasks.jira_status_reconciliation_task', @@ -1160,16 +1160,16 @@ def saml2_attrib_map_format(dict): # reference: https://github.com/korfuri/django-prometheus/issues/34 PROMETHEUS_EXPORT_MIGRATIONS = False # django metrics for monitoring -if env('DD_DJANGO_METRICS_ENABLED'): - DJANGO_METRICS_ENABLED = env('DD_DJANGO_METRICS_ENABLED') - INSTALLED_APPS = INSTALLED_APPS + ('django_prometheus',) - MIDDLEWARE = ['django_prometheus.middleware.PrometheusBeforeMiddleware'] + \ +if env("DD_DJANGO_METRICS_ENABLED"): + DJANGO_METRICS_ENABLED = env("DD_DJANGO_METRICS_ENABLED") + INSTALLED_APPS = INSTALLED_APPS + ("django_prometheus",) + MIDDLEWARE = ["django_prometheus.middleware.PrometheusBeforeMiddleware"] + \ MIDDLEWARE + \ - ['django_prometheus.middleware.PrometheusAfterMiddleware'] - database_engine = DATABASES.get('default').get('ENGINE') - DATABASES['default']['ENGINE'] = database_engine.replace('django.', 'django_prometheus.', 1) + ["django_prometheus.middleware.PrometheusAfterMiddleware"] + database_engine = DATABASES.get("default").get("ENGINE") + DATABASES["default"]["ENGINE"] = database_engine.replace("django.", "django_prometheus.", 1) # CELERY_RESULT_BACKEND.replace('django.core','django_prometheus.', 1) - LOGIN_EXEMPT_URLS += (rf'^{URL_PREFIX}django_metrics/',) + LOGIN_EXEMPT_URLS += (rf"^{URL_PREFIX}django_metrics/",) # ------------------------------------ @@ -1184,98 +1184,98 @@ def saml2_attrib_map_format(dict): HASHCODE_FIELDS_PER_SCANNER = { # In checkmarx, same CWE may appear with different severities: example "sql injection" (high) and "blind sql injection" (low). # Including the severity in the hash_code keeps those findings not duplicate - 'Anchore Engine Scan': ['title', 'severity', 'component_name', 'component_version', 'file_path'], - 'AnchoreCTL Vuln Report': ['title', 'severity', 'component_name', 'component_version', 'file_path'], - 'AnchoreCTL Policies Report': ['title', 'severity', 'component_name', 'file_path'], - 'Anchore Enterprise Policy Check': ['title', 'severity', 'component_name', 'file_path'], - 'Anchore Grype': ['title', 'severity', 'component_name', 'component_version'], - 'Aqua Scan': ['severity', 'vulnerability_ids', 'component_name', 'component_version'], - 'Bandit Scan': ['file_path', 'line', 'vuln_id_from_tool'], - 'CargoAudit Scan': ['vulnerability_ids', 'severity', 'component_name', 'component_version', 'vuln_id_from_tool'], - 'Checkmarx Scan': ['cwe', 'severity', 'file_path'], - 'Checkmarx OSA': ['vulnerability_ids', 'component_name'], - 'Cloudsploit Scan': ['title', 'description'], - 'Coverity Scan JSON Report': ['title', 'cwe', 'line', 'file_path', 'description'], - 'SonarQube Scan': ['cwe', 'severity', 'file_path'], - 'SonarQube API Import': ['title', 'file_path', 'line'], - 'Sonatype Application Scan': ['title', 'cwe', 'file_path', 'component_name', 'component_version', 'vulnerability_ids'], - 'Dependency Check Scan': ['title', 'cwe', 'file_path'], - 'Dockle Scan': ['title', 'description', 'vuln_id_from_tool'], - 'Dependency Track Finding Packaging Format (FPF) Export': ['component_name', 'component_version', 'vulnerability_ids'], - 'Mobsfscan Scan': ['title', 'severity', 'cwe'], - 'Tenable Scan': ['title', 'severity', 'vulnerability_ids', 'cwe', 'description'], - 'Nexpose Scan': ['title', 'severity', 'vulnerability_ids', 'cwe'], + "Anchore Engine Scan": ["title", "severity", "component_name", "component_version", "file_path"], + "AnchoreCTL Vuln Report": ["title", "severity", "component_name", "component_version", "file_path"], + "AnchoreCTL Policies Report": ["title", "severity", "component_name", "file_path"], + "Anchore Enterprise Policy Check": ["title", "severity", "component_name", "file_path"], + "Anchore Grype": ["title", "severity", "component_name", "component_version"], + "Aqua Scan": ["severity", "vulnerability_ids", "component_name", "component_version"], + "Bandit Scan": ["file_path", "line", "vuln_id_from_tool"], + "CargoAudit Scan": ["vulnerability_ids", "severity", "component_name", "component_version", "vuln_id_from_tool"], + "Checkmarx Scan": ["cwe", "severity", "file_path"], + "Checkmarx OSA": ["vulnerability_ids", "component_name"], + "Cloudsploit Scan": ["title", "description"], + "Coverity Scan JSON Report": ["title", "cwe", "line", "file_path", "description"], + "SonarQube Scan": ["cwe", "severity", "file_path"], + "SonarQube API Import": ["title", "file_path", "line"], + "Sonatype Application Scan": ["title", "cwe", "file_path", "component_name", "component_version", "vulnerability_ids"], + "Dependency Check Scan": ["title", "cwe", "file_path"], + "Dockle Scan": ["title", "description", "vuln_id_from_tool"], + "Dependency Track Finding Packaging Format (FPF) Export": ["component_name", "component_version", "vulnerability_ids"], + "Mobsfscan Scan": ["title", "severity", "cwe"], + "Tenable Scan": ["title", "severity", "vulnerability_ids", "cwe", "description"], + "Nexpose Scan": ["title", "severity", "vulnerability_ids", "cwe"], # possible improvement: in the scanner put the library name into file_path, then dedup on cwe + file_path + severity - 'NPM Audit Scan': ['title', 'severity', 'file_path', 'vulnerability_ids', 'cwe'], - 'NPM Audit v7+ Scan': ['title', 'severity', 'cwe', 'vuln_id_from_tool'], + "NPM Audit Scan": ["title", "severity", "file_path", "vulnerability_ids", "cwe"], + "NPM Audit v7+ Scan": ["title", "severity", "cwe", "vuln_id_from_tool"], # possible improvement: in the scanner put the library name into file_path, then dedup on cwe + file_path + severity - 'Yarn Audit Scan': ['title', 'severity', 'file_path', 'vulnerability_ids', 'cwe'], + "Yarn Audit Scan": ["title", "severity", "file_path", "vulnerability_ids", "cwe"], # possible improvement: in the scanner put the library name into file_path, then dedup on vulnerability_ids + file_path + severity - 'Mend Scan': ['title', 'severity', 'description'], - 'ZAP Scan': ['title', 'cwe', 'severity'], - 'Qualys Scan': ['title', 'severity', 'endpoints'], + "Mend Scan": ["title", "severity", "description"], + "ZAP Scan": ["title", "cwe", "severity"], + "Qualys Scan": ["title", "severity", "endpoints"], # 'Qualys Webapp Scan': ['title', 'unique_id_from_tool'], - 'PHP Symfony Security Check': ['title', 'vulnerability_ids'], - 'Clair Scan': ['title', 'vulnerability_ids', 'description', 'severity'], + "PHP Symfony Security Check": ["title", "vulnerability_ids"], + "Clair Scan": ["title", "vulnerability_ids", "description", "severity"], # for backwards compatibility because someone decided to rename this scanner: - 'Symfony Security Check': ['title', 'vulnerability_ids'], - 'DSOP Scan': ['vulnerability_ids'], - 'Acunetix Scan': ['title', 'description'], - 'Terrascan Scan': ['vuln_id_from_tool', 'title', 'severity', 'file_path', 'line', 'component_name'], - 'Trivy Operator Scan': ['title', 'severity', 'vulnerability_ids', 'description'], - 'Trivy Scan': ['title', 'severity', 'vulnerability_ids', 'cwe', 'description'], - 'TFSec Scan': ['severity', 'vuln_id_from_tool', 'file_path', 'line'], - 'Snyk Scan': ['vuln_id_from_tool', 'file_path', 'component_name', 'component_version'], - 'GitLab Dependency Scanning Report': ['title', 'vulnerability_ids', 'file_path', 'component_name', 'component_version'], - 'SpotBugs Scan': ['cwe', 'severity', 'file_path', 'line'], - 'JFrog Xray Unified Scan': ['vulnerability_ids', 'file_path', 'component_name', 'component_version'], - 'JFrog Xray On Demand Binary Scan': ["title", "component_name", "component_version"], - 'Scout Suite Scan': ['file_path', 'vuln_id_from_tool'], # for now we use file_path as there is no attribute for "service" - 'Meterian Scan': ['cwe', 'component_name', 'component_version', 'description', 'severity'], - 'Github Vulnerability Scan': ['title', 'severity', 'component_name', 'vulnerability_ids', 'file_path'], - 'Solar Appscreener Scan': ['title', 'file_path', 'line', 'severity'], - 'pip-audit Scan': ['vuln_id_from_tool', 'component_name', 'component_version'], - 'Rubocop Scan': ['vuln_id_from_tool', 'file_path', 'line'], - 'JFrog Xray Scan': ['title', 'description', 'component_name', 'component_version'], - 'CycloneDX Scan': ['vuln_id_from_tool', 'component_name', 'component_version'], - 'SSLyze Scan (JSON)': ['title', 'description'], - 'Harbor Vulnerability Scan': ['title', 'mitigation'], - 'Rusty Hog Scan': ['file_path', 'payload'], - 'StackHawk HawkScan': ['vuln_id_from_tool', 'component_name', 'component_version'], - 'Hydra Scan': ['title', 'description'], - 'DrHeader JSON Importer': ['title', 'description'], - 'Whispers': ['vuln_id_from_tool', 'file_path', 'line'], - 'Blackduck Hub Scan': ['title', 'vulnerability_ids', 'component_name', 'component_version'], - 'Veracode SourceClear Scan': ['title', 'vulnerability_ids', 'component_name', 'component_version', 'severity'], - 'Vulners Scan': ['vuln_id_from_tool', 'component_name'], - 'Twistlock Image Scan': ['title', 'severity', 'component_name', 'component_version'], - 'NeuVector (REST)': ['title', 'severity', 'component_name', 'component_version'], - 'NeuVector (compliance)': ['title', 'vuln_id_from_tool', 'description'], - 'Wpscan': ['title', 'description', 'severity'], - 'Popeye Scan': ['title', 'description'], - 'Nuclei Scan': ['title', 'cwe', 'severity'], - 'KubeHunter Scan': ['title', 'description'], - 'kube-bench Scan': ['title', 'vuln_id_from_tool', 'description'], - 'Threagile risks report': ['title', 'cwe', "severity"], - 'Trufflehog Scan': ['title', 'description', 'line'], - 'Humble Json Importer': ['title'], - 'MSDefender Parser': ['title', 'description'], - 'HCLAppScan XML': ['title', 'description'], - 'KICS Scan': ['file_path', 'line', 'severity', 'description', 'title'], - 'MobSF Scan': ['title', 'description', 'severity'], - 'OSV Scan': ['title', 'description', 'severity'], - 'Snyk Code Scan': ['vuln_id_from_tool', 'file_path'], - 'Deepfence Threatmapper Report': ['title', 'description', 'severity'], - 'Bearer CLI': ['title', 'severity'], - 'Nancy Scan': ['title', 'vuln_id_from_tool'], - 'Wiz Scan': ['title', 'description', 'severity'], - 'Kiuwan SCA Scan': ['description', 'severity', 'component_name', 'component_version', 'cwe'], - 'Kubescape JSON Importer': ['title', 'component_name'], + "Symfony Security Check": ["title", "vulnerability_ids"], + "DSOP Scan": ["vulnerability_ids"], + "Acunetix Scan": ["title", "description"], + "Terrascan Scan": ["vuln_id_from_tool", "title", "severity", "file_path", "line", "component_name"], + "Trivy Operator Scan": ["title", "severity", "vulnerability_ids", "description"], + "Trivy Scan": ["title", "severity", "vulnerability_ids", "cwe", "description"], + "TFSec Scan": ["severity", "vuln_id_from_tool", "file_path", "line"], + "Snyk Scan": ["vuln_id_from_tool", "file_path", "component_name", "component_version"], + "GitLab Dependency Scanning Report": ["title", "vulnerability_ids", "file_path", "component_name", "component_version"], + "SpotBugs Scan": ["cwe", "severity", "file_path", "line"], + "JFrog Xray Unified Scan": ["vulnerability_ids", "file_path", "component_name", "component_version"], + "JFrog Xray On Demand Binary Scan": ["title", "component_name", "component_version"], + "Scout Suite Scan": ["file_path", "vuln_id_from_tool"], # for now we use file_path as there is no attribute for "service" + "Meterian Scan": ["cwe", "component_name", "component_version", "description", "severity"], + "Github Vulnerability Scan": ["title", "severity", "component_name", "vulnerability_ids", "file_path"], + "Solar Appscreener Scan": ["title", "file_path", "line", "severity"], + "pip-audit Scan": ["vuln_id_from_tool", "component_name", "component_version"], + "Rubocop Scan": ["vuln_id_from_tool", "file_path", "line"], + "JFrog Xray Scan": ["title", "description", "component_name", "component_version"], + "CycloneDX Scan": ["vuln_id_from_tool", "component_name", "component_version"], + "SSLyze Scan (JSON)": ["title", "description"], + "Harbor Vulnerability Scan": ["title", "mitigation"], + "Rusty Hog Scan": ["file_path", "payload"], + "StackHawk HawkScan": ["vuln_id_from_tool", "component_name", "component_version"], + "Hydra Scan": ["title", "description"], + "DrHeader JSON Importer": ["title", "description"], + "Whispers": ["vuln_id_from_tool", "file_path", "line"], + "Blackduck Hub Scan": ["title", "vulnerability_ids", "component_name", "component_version"], + "Veracode SourceClear Scan": ["title", "vulnerability_ids", "component_name", "component_version", "severity"], + "Vulners Scan": ["vuln_id_from_tool", "component_name"], + "Twistlock Image Scan": ["title", "severity", "component_name", "component_version"], + "NeuVector (REST)": ["title", "severity", "component_name", "component_version"], + "NeuVector (compliance)": ["title", "vuln_id_from_tool", "description"], + "Wpscan": ["title", "description", "severity"], + "Popeye Scan": ["title", "description"], + "Nuclei Scan": ["title", "cwe", "severity"], + "KubeHunter Scan": ["title", "description"], + "kube-bench Scan": ["title", "vuln_id_from_tool", "description"], + "Threagile risks report": ["title", "cwe", "severity"], + "Trufflehog Scan": ["title", "description", "line"], + "Humble Json Importer": ["title"], + "MSDefender Parser": ["title", "description"], + "HCLAppScan XML": ["title", "description"], + "KICS Scan": ["file_path", "line", "severity", "description", "title"], + "MobSF Scan": ["title", "description", "severity"], + "OSV Scan": ["title", "description", "severity"], + "Snyk Code Scan": ["vuln_id_from_tool", "file_path"], + "Deepfence Threatmapper Report": ["title", "description", "severity"], + "Bearer CLI": ["title", "severity"], + "Nancy Scan": ["title", "vuln_id_from_tool"], + "Wiz Scan": ["title", "description", "severity"], + "Kubescape JSON Importer": ["title", "component_name"], + "Kiuwan SCA Scan": ["description", "severity", "component_name", "component_version", "cwe"], } # Override the hardcoded settings here via the env var -if len(env('DD_HASHCODE_FIELDS_PER_SCANNER')) > 0: - env_hashcode_fields_per_scanner = json.loads(env('DD_HASHCODE_FIELDS_PER_SCANNER')) +if len(env("DD_HASHCODE_FIELDS_PER_SCANNER")) > 0: + env_hashcode_fields_per_scanner = json.loads(env("DD_HASHCODE_FIELDS_PER_SCANNER")) for key, value in env_hashcode_fields_per_scanner.items(): if key in HASHCODE_FIELDS_PER_SCANNER: logger.info(f"Replacing {key} with value {value} (previously set to {HASHCODE_FIELDS_PER_SCANNER[key]}) from env var DD_HASHCODE_FIELDS_PER_SCANNER") @@ -1289,73 +1289,73 @@ def saml2_attrib_map_format(dict): # If False and cwe = 0, then the hash_code computation will fallback to legacy algorithm for the concerned finding # Default is True (if scanner is not configured here but is configured in HASHCODE_FIELDS_PER_SCANNER, it allows null cwe) HASHCODE_ALLOWS_NULL_CWE = { - 'Anchore Engine Scan': True, - 'AnchoreCTL Vuln Report': True, - 'AnchoreCTL Policies Report': True, - 'Anchore Enterprise Policy Check': True, - 'Anchore Grype': True, - 'AWS Prowler Scan': True, - 'AWS Prowler V3': True, - 'Checkmarx Scan': False, - 'Checkmarx OSA': True, - 'Cloudsploit Scan': True, - 'SonarQube Scan': False, - 'Dependency Check Scan': True, - 'Mobsfscan Scan': False, - 'Tenable Scan': True, - 'Nexpose Scan': True, - 'NPM Audit Scan': True, - 'NPM Audit v7+ Scan': True, - 'Yarn Audit Scan': True, - 'Mend Scan': True, - 'ZAP Scan': False, - 'Qualys Scan': True, - 'DSOP Scan': True, - 'Acunetix Scan': True, - 'Trivy Operator Scan': True, - 'Trivy Scan': True, - 'SpotBugs Scan': False, - 'Scout Suite Scan': True, - 'AWS Security Hub Scan': True, - 'Meterian Scan': True, - 'SARIF': True, - 'Hadolint Dockerfile check': True, - 'Semgrep JSON Report': True, - 'Generic Findings Import': True, - 'Edgescan Scan': True, - 'Bugcrowd API Import': True, - 'Veracode SourceClear Scan': True, - 'Vulners Scan': True, - 'Twistlock Image Scan': True, - 'Wpscan': True, - 'Rusty Hog Scan': True, - 'Codechecker Report native': True, - 'Wazuh': True, - 'Nuclei Scan': True, - 'Threagile risks report': True, + "Anchore Engine Scan": True, + "AnchoreCTL Vuln Report": True, + "AnchoreCTL Policies Report": True, + "Anchore Enterprise Policy Check": True, + "Anchore Grype": True, + "AWS Prowler Scan": True, + "AWS Prowler V3": True, + "Checkmarx Scan": False, + "Checkmarx OSA": True, + "Cloudsploit Scan": True, + "SonarQube Scan": False, + "Dependency Check Scan": True, + "Mobsfscan Scan": False, + "Tenable Scan": True, + "Nexpose Scan": True, + "NPM Audit Scan": True, + "NPM Audit v7+ Scan": True, + "Yarn Audit Scan": True, + "Mend Scan": True, + "ZAP Scan": False, + "Qualys Scan": True, + "DSOP Scan": True, + "Acunetix Scan": True, + "Trivy Operator Scan": True, + "Trivy Scan": True, + "SpotBugs Scan": False, + "Scout Suite Scan": True, + "AWS Security Hub Scan": True, + "Meterian Scan": True, + "SARIF": True, + "Hadolint Dockerfile check": True, + "Semgrep JSON Report": True, + "Generic Findings Import": True, + "Edgescan Scan": True, + "Bugcrowd API Import": True, + "Veracode SourceClear Scan": True, + "Vulners Scan": True, + "Twistlock Image Scan": True, + "Wpscan": True, + "Rusty Hog Scan": True, + "Codechecker Report native": True, + "Wazuh": True, + "Nuclei Scan": True, + "Threagile risks report": True, } # List of fields that are known to be usable in hash_code computation) # 'endpoints' is a pseudo field that uses the endpoints (for dynamic scanners) # 'unique_id_from_tool' is often not needed here as it can be used directly in the dedupe algorithm, but it's also possible to use it for hashing -HASHCODE_ALLOWED_FIELDS = ['title', 'cwe', 'vulnerability_ids', 'line', 'file_path', 'payload', 'component_name', 'component_version', 'description', 'endpoints', 'unique_id_from_tool', 'severity', 'vuln_id_from_tool', 'mitigation'] +HASHCODE_ALLOWED_FIELDS = ["title", "cwe", "vulnerability_ids", "line", "file_path", "payload", "component_name", "component_version", "description", "endpoints", "unique_id_from_tool", "severity", "vuln_id_from_tool", "mitigation"] # Adding fields to the hash_code calculation regardless of the previous settings -HASH_CODE_FIELDS_ALWAYS = ['service'] +HASH_CODE_FIELDS_ALWAYS = ["service"] # ------------------------------------ # Deduplication configuration # ------------------------------------ # List of algorithms # legacy one with multiple conditions (default mode) -DEDUPE_ALGO_LEGACY = 'legacy' +DEDUPE_ALGO_LEGACY = "legacy" # based on dojo_finding.unique_id_from_tool only (for checkmarx detailed, or sonarQube detailed for example) -DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL = 'unique_id_from_tool' +DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL = "unique_id_from_tool" # based on dojo_finding.hash_code only -DEDUPE_ALGO_HASH_CODE = 'hash_code' +DEDUPE_ALGO_HASH_CODE = "hash_code" # unique_id_from_tool or hash_code # Makes it possible to deduplicate on a technical id (same parser) and also on some functional fields (cross-parsers deduplication) -DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL_OR_HASH_CODE = 'unique_id_from_tool_or_hash_code' +DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL_OR_HASH_CODE = "unique_id_from_tool_or_hash_code" # Allows to deduplicate with endpoints if endpoints is not included in the hashcode. # Possible values are: scheme, host, port, path, query, fragment, userinfo, and user. For a details description see https://hyperlink.readthedocs.io/en/latest/api.html#attributes. @@ -1366,136 +1366,136 @@ def saml2_attrib_map_format(dict): # - Host and path (['host', 'path']) means: A and B stay untouched because the path is different. # # If a finding has more than one endpoint, only one endpoint pair must match to mark the finding as duplicate. -DEDUPE_ALGO_ENDPOINT_FIELDS = ['host', 'path'] +DEDUPE_ALGO_ENDPOINT_FIELDS = ["host", "path"] # Choice of deduplication algorithm per parser # Key = the scan_type from factory.py (= the test_type) # Default is DEDUPE_ALGO_LEGACY DEDUPLICATION_ALGORITHM_PER_PARSER = { - 'Anchore Engine Scan': DEDUPE_ALGO_HASH_CODE, - 'AnchoreCTL Vuln Report': DEDUPE_ALGO_HASH_CODE, - 'AnchoreCTL Policies Report': DEDUPE_ALGO_HASH_CODE, - 'Anchore Enterprise Policy Check': DEDUPE_ALGO_HASH_CODE, - 'Anchore Grype': DEDUPE_ALGO_HASH_CODE, - 'Aqua Scan': DEDUPE_ALGO_HASH_CODE, - 'AuditJS Scan': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, - 'AWS Prowler Scan': DEDUPE_ALGO_HASH_CODE, - 'AWS Prowler V3': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, + "Anchore Engine Scan": DEDUPE_ALGO_HASH_CODE, + "AnchoreCTL Vuln Report": DEDUPE_ALGO_HASH_CODE, + "AnchoreCTL Policies Report": DEDUPE_ALGO_HASH_CODE, + "Anchore Enterprise Policy Check": DEDUPE_ALGO_HASH_CODE, + "Anchore Grype": DEDUPE_ALGO_HASH_CODE, + "Aqua Scan": DEDUPE_ALGO_HASH_CODE, + "AuditJS Scan": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, + "AWS Prowler Scan": DEDUPE_ALGO_HASH_CODE, + "AWS Prowler V3": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, "AWS Security Finding Format (ASFF) Scan": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, - 'Burp REST API': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, - 'Bandit Scan': DEDUPE_ALGO_HASH_CODE, - 'CargoAudit Scan': DEDUPE_ALGO_HASH_CODE, - 'Checkmarx Scan detailed': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, - 'Checkmarx Scan': DEDUPE_ALGO_HASH_CODE, - 'Checkmarx One Scan': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, - 'Checkmarx OSA': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL_OR_HASH_CODE, - 'Codechecker Report native': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, - 'Coverity API': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, - 'Coverity Scan JSON Report': DEDUPE_ALGO_HASH_CODE, - 'Cobalt.io API': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, - 'Crunch42 Scan': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, - 'Dependency Track Finding Packaging Format (FPF) Export': DEDUPE_ALGO_HASH_CODE, - 'Mobsfscan Scan': DEDUPE_ALGO_HASH_CODE, - 'SonarQube Scan detailed': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, - 'SonarQube Scan': DEDUPE_ALGO_HASH_CODE, - 'SonarQube API Import': DEDUPE_ALGO_HASH_CODE, - 'Sonatype Application Scan': DEDUPE_ALGO_HASH_CODE, - 'Dependency Check Scan': DEDUPE_ALGO_HASH_CODE, - 'Dockle Scan': DEDUPE_ALGO_HASH_CODE, - 'Tenable Scan': DEDUPE_ALGO_HASH_CODE, - 'Nexpose Scan': DEDUPE_ALGO_HASH_CODE, - 'NPM Audit Scan': DEDUPE_ALGO_HASH_CODE, - 'NPM Audit v7+ Scan': DEDUPE_ALGO_HASH_CODE, - 'Yarn Audit Scan': DEDUPE_ALGO_HASH_CODE, - 'Mend Scan': DEDUPE_ALGO_HASH_CODE, - 'ZAP Scan': DEDUPE_ALGO_HASH_CODE, - 'Qualys Scan': DEDUPE_ALGO_HASH_CODE, - 'PHP Symfony Security Check': DEDUPE_ALGO_HASH_CODE, - 'Acunetix Scan': DEDUPE_ALGO_HASH_CODE, - 'Clair Scan': DEDUPE_ALGO_HASH_CODE, + "Burp REST API": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, + "Bandit Scan": DEDUPE_ALGO_HASH_CODE, + "CargoAudit Scan": DEDUPE_ALGO_HASH_CODE, + "Checkmarx Scan detailed": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, + "Checkmarx Scan": DEDUPE_ALGO_HASH_CODE, + "Checkmarx One Scan": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, + "Checkmarx OSA": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL_OR_HASH_CODE, + "Codechecker Report native": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, + "Coverity API": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, + "Coverity Scan JSON Report": DEDUPE_ALGO_HASH_CODE, + "Cobalt.io API": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, + "Crunch42 Scan": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, + "Dependency Track Finding Packaging Format (FPF) Export": DEDUPE_ALGO_HASH_CODE, + "Mobsfscan Scan": DEDUPE_ALGO_HASH_CODE, + "SonarQube Scan detailed": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, + "SonarQube Scan": DEDUPE_ALGO_HASH_CODE, + "SonarQube API Import": DEDUPE_ALGO_HASH_CODE, + "Sonatype Application Scan": DEDUPE_ALGO_HASH_CODE, + "Dependency Check Scan": DEDUPE_ALGO_HASH_CODE, + "Dockle Scan": DEDUPE_ALGO_HASH_CODE, + "Tenable Scan": DEDUPE_ALGO_HASH_CODE, + "Nexpose Scan": DEDUPE_ALGO_HASH_CODE, + "NPM Audit Scan": DEDUPE_ALGO_HASH_CODE, + "NPM Audit v7+ Scan": DEDUPE_ALGO_HASH_CODE, + "Yarn Audit Scan": DEDUPE_ALGO_HASH_CODE, + "Mend Scan": DEDUPE_ALGO_HASH_CODE, + "ZAP Scan": DEDUPE_ALGO_HASH_CODE, + "Qualys Scan": DEDUPE_ALGO_HASH_CODE, + "PHP Symfony Security Check": DEDUPE_ALGO_HASH_CODE, + "Acunetix Scan": DEDUPE_ALGO_HASH_CODE, + "Clair Scan": DEDUPE_ALGO_HASH_CODE, # 'Qualys Webapp Scan': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, # Must also uncomment qualys webapp line in hashcode fields per scanner - 'Veracode Scan': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL_OR_HASH_CODE, - 'Veracode SourceClear Scan': DEDUPE_ALGO_HASH_CODE, + "Veracode Scan": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL_OR_HASH_CODE, + "Veracode SourceClear Scan": DEDUPE_ALGO_HASH_CODE, # for backwards compatibility because someone decided to rename this scanner: - 'Symfony Security Check': DEDUPE_ALGO_HASH_CODE, - 'DSOP Scan': DEDUPE_ALGO_HASH_CODE, - 'Terrascan Scan': DEDUPE_ALGO_HASH_CODE, - 'Trivy Operator Scan': DEDUPE_ALGO_HASH_CODE, - 'Trivy Scan': DEDUPE_ALGO_HASH_CODE, - 'TFSec Scan': DEDUPE_ALGO_HASH_CODE, - 'HackerOne Cases': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL_OR_HASH_CODE, - 'Snyk Scan': DEDUPE_ALGO_HASH_CODE, - 'GitLab Dependency Scanning Report': DEDUPE_ALGO_HASH_CODE, - 'GitLab SAST Report': DEDUPE_ALGO_HASH_CODE, - 'Govulncheck Scanner': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, - 'GitLab Container Scan': DEDUPE_ALGO_HASH_CODE, - 'GitLab Secret Detection Report': DEDUPE_ALGO_HASH_CODE, - 'Checkov Scan': DEDUPE_ALGO_HASH_CODE, - 'SpotBugs Scan': DEDUPE_ALGO_HASH_CODE, - 'JFrog Xray Unified Scan': DEDUPE_ALGO_HASH_CODE, - 'JFrog Xray On Demand Binary Scan': DEDUPE_ALGO_HASH_CODE, - 'Scout Suite Scan': DEDUPE_ALGO_HASH_CODE, - 'AWS Security Hub Scan': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, - 'Meterian Scan': DEDUPE_ALGO_HASH_CODE, - 'Github Vulnerability Scan': DEDUPE_ALGO_HASH_CODE, - 'Cloudsploit Scan': DEDUPE_ALGO_HASH_CODE, - 'SARIF': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL_OR_HASH_CODE, - 'Azure Security Center Recommendations Scan': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, - 'Hadolint Dockerfile check': DEDUPE_ALGO_HASH_CODE, - 'Semgrep JSON Report': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL_OR_HASH_CODE, - 'Generic Findings Import': DEDUPE_ALGO_HASH_CODE, - 'Trufflehog Scan': DEDUPE_ALGO_HASH_CODE, - 'Trufflehog3 Scan': DEDUPE_ALGO_HASH_CODE, - 'Detect-secrets Scan': DEDUPE_ALGO_HASH_CODE, - 'Solar Appscreener Scan': DEDUPE_ALGO_HASH_CODE, - 'Gitleaks Scan': DEDUPE_ALGO_HASH_CODE, - 'pip-audit Scan': DEDUPE_ALGO_HASH_CODE, - 'Nancy Scan': DEDUPE_ALGO_HASH_CODE, - 'Edgescan Scan': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, - 'Bugcrowd API Import': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, - 'Rubocop Scan': DEDUPE_ALGO_HASH_CODE, - 'JFrog Xray Scan': DEDUPE_ALGO_HASH_CODE, - 'CycloneDX Scan': DEDUPE_ALGO_HASH_CODE, - 'SSLyze Scan (JSON)': DEDUPE_ALGO_HASH_CODE, - 'Harbor Vulnerability Scan': DEDUPE_ALGO_HASH_CODE, - 'Rusty Hog Scan': DEDUPE_ALGO_HASH_CODE, - 'StackHawk HawkScan': DEDUPE_ALGO_HASH_CODE, - 'Hydra Scan': DEDUPE_ALGO_HASH_CODE, - 'DrHeader JSON Importer': DEDUPE_ALGO_HASH_CODE, - 'PWN SAST': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, - 'Whispers': DEDUPE_ALGO_HASH_CODE, - 'Blackduck Hub Scan': DEDUPE_ALGO_HASH_CODE, - 'BlackDuck API': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, - 'Blackduck Binary Analysis': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, - 'docker-bench-security Scan': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, - 'Vulners Scan': DEDUPE_ALGO_HASH_CODE, - 'Twistlock Image Scan': DEDUPE_ALGO_HASH_CODE, - 'NeuVector (REST)': DEDUPE_ALGO_HASH_CODE, - 'NeuVector (compliance)': DEDUPE_ALGO_HASH_CODE, - 'Wpscan': DEDUPE_ALGO_HASH_CODE, - 'Popeye Scan': DEDUPE_ALGO_HASH_CODE, - 'Nuclei Scan': DEDUPE_ALGO_HASH_CODE, - 'KubeHunter Scan': DEDUPE_ALGO_HASH_CODE, - 'kube-bench Scan': DEDUPE_ALGO_HASH_CODE, - 'Threagile risks report': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL_OR_HASH_CODE, - 'Humble Json Importer': DEDUPE_ALGO_HASH_CODE, - 'Wazuh Scan': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, - 'MSDefender Parser': DEDUPE_ALGO_HASH_CODE, - 'HCLAppScan XML': DEDUPE_ALGO_HASH_CODE, - 'KICS Scan': DEDUPE_ALGO_HASH_CODE, - 'MobSF Scan': DEDUPE_ALGO_HASH_CODE, - 'OSV Scan': DEDUPE_ALGO_HASH_CODE, - 'Nosey Parker Scan': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL_OR_HASH_CODE, - 'Bearer CLI': DEDUPE_ALGO_HASH_CODE, - 'Wiz Scan': DEDUPE_ALGO_HASH_CODE, - 'Kiuwan SCA Scan': DEDUPE_ALGO_HASH_CODE, - 'Deepfence Threatmapper Report': DEDUPE_ALGO_HASH_CODE, - 'Kubescape JSON Importer': DEDUPE_ALGO_HASH_CODE, + "Symfony Security Check": DEDUPE_ALGO_HASH_CODE, + "DSOP Scan": DEDUPE_ALGO_HASH_CODE, + "Terrascan Scan": DEDUPE_ALGO_HASH_CODE, + "Trivy Operator Scan": DEDUPE_ALGO_HASH_CODE, + "Trivy Scan": DEDUPE_ALGO_HASH_CODE, + "TFSec Scan": DEDUPE_ALGO_HASH_CODE, + "HackerOne Cases": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL_OR_HASH_CODE, + "Snyk Scan": DEDUPE_ALGO_HASH_CODE, + "GitLab Dependency Scanning Report": DEDUPE_ALGO_HASH_CODE, + "GitLab SAST Report": DEDUPE_ALGO_HASH_CODE, + "Govulncheck Scanner": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, + "GitLab Container Scan": DEDUPE_ALGO_HASH_CODE, + "GitLab Secret Detection Report": DEDUPE_ALGO_HASH_CODE, + "Checkov Scan": DEDUPE_ALGO_HASH_CODE, + "SpotBugs Scan": DEDUPE_ALGO_HASH_CODE, + "JFrog Xray Unified Scan": DEDUPE_ALGO_HASH_CODE, + "JFrog Xray On Demand Binary Scan": DEDUPE_ALGO_HASH_CODE, + "Scout Suite Scan": DEDUPE_ALGO_HASH_CODE, + "AWS Security Hub Scan": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, + "Meterian Scan": DEDUPE_ALGO_HASH_CODE, + "Github Vulnerability Scan": DEDUPE_ALGO_HASH_CODE, + "Cloudsploit Scan": DEDUPE_ALGO_HASH_CODE, + "SARIF": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL_OR_HASH_CODE, + "Azure Security Center Recommendations Scan": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, + "Hadolint Dockerfile check": DEDUPE_ALGO_HASH_CODE, + "Semgrep JSON Report": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL_OR_HASH_CODE, + "Generic Findings Import": DEDUPE_ALGO_HASH_CODE, + "Trufflehog Scan": DEDUPE_ALGO_HASH_CODE, + "Trufflehog3 Scan": DEDUPE_ALGO_HASH_CODE, + "Detect-secrets Scan": DEDUPE_ALGO_HASH_CODE, + "Solar Appscreener Scan": DEDUPE_ALGO_HASH_CODE, + "Gitleaks Scan": DEDUPE_ALGO_HASH_CODE, + "pip-audit Scan": DEDUPE_ALGO_HASH_CODE, + "Nancy Scan": DEDUPE_ALGO_HASH_CODE, + "Edgescan Scan": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, + "Bugcrowd API Import": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, + "Rubocop Scan": DEDUPE_ALGO_HASH_CODE, + "JFrog Xray Scan": DEDUPE_ALGO_HASH_CODE, + "CycloneDX Scan": DEDUPE_ALGO_HASH_CODE, + "SSLyze Scan (JSON)": DEDUPE_ALGO_HASH_CODE, + "Harbor Vulnerability Scan": DEDUPE_ALGO_HASH_CODE, + "Rusty Hog Scan": DEDUPE_ALGO_HASH_CODE, + "StackHawk HawkScan": DEDUPE_ALGO_HASH_CODE, + "Hydra Scan": DEDUPE_ALGO_HASH_CODE, + "DrHeader JSON Importer": DEDUPE_ALGO_HASH_CODE, + "PWN SAST": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, + "Whispers": DEDUPE_ALGO_HASH_CODE, + "Blackduck Hub Scan": DEDUPE_ALGO_HASH_CODE, + "BlackDuck API": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, + "Blackduck Binary Analysis": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, + "docker-bench-security Scan": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, + "Vulners Scan": DEDUPE_ALGO_HASH_CODE, + "Twistlock Image Scan": DEDUPE_ALGO_HASH_CODE, + "NeuVector (REST)": DEDUPE_ALGO_HASH_CODE, + "NeuVector (compliance)": DEDUPE_ALGO_HASH_CODE, + "Wpscan": DEDUPE_ALGO_HASH_CODE, + "Popeye Scan": DEDUPE_ALGO_HASH_CODE, + "Nuclei Scan": DEDUPE_ALGO_HASH_CODE, + "KubeHunter Scan": DEDUPE_ALGO_HASH_CODE, + "kube-bench Scan": DEDUPE_ALGO_HASH_CODE, + "Threagile risks report": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL_OR_HASH_CODE, + "Humble Json Importer": DEDUPE_ALGO_HASH_CODE, + "Wazuh Scan": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, + "MSDefender Parser": DEDUPE_ALGO_HASH_CODE, + "HCLAppScan XML": DEDUPE_ALGO_HASH_CODE, + "KICS Scan": DEDUPE_ALGO_HASH_CODE, + "MobSF Scan": DEDUPE_ALGO_HASH_CODE, + "OSV Scan": DEDUPE_ALGO_HASH_CODE, + "Nosey Parker Scan": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL_OR_HASH_CODE, + "Bearer CLI": DEDUPE_ALGO_HASH_CODE, + "Wiz Scan": DEDUPE_ALGO_HASH_CODE, + "Deepfence Threatmapper Report": DEDUPE_ALGO_HASH_CODE, + "Kubescape JSON Importer": DEDUPE_ALGO_HASH_CODE, + "Kiuwan SCA Scan": DEDUPE_ALGO_HASH_CODE, } # Override the hardcoded settings here via the env var -if len(env('DD_DEDUPLICATION_ALGORITHM_PER_PARSER')) > 0: - env_dedup_algorithm_per_parser = json.loads(env('DD_DEDUPLICATION_ALGORITHM_PER_PARSER')) +if len(env("DD_DEDUPLICATION_ALGORITHM_PER_PARSER")) > 0: + env_dedup_algorithm_per_parser = json.loads(env("DD_DEDUPLICATION_ALGORITHM_PER_PARSER")) for key, value in env_dedup_algorithm_per_parser.items(): if key in DEDUPLICATION_ALGORITHM_PER_PARSER: logger.info(f"Replacing {key} with value {value} (previously set to {DEDUPLICATION_ALGORITHM_PER_PARSER[key]}) from env var DD_DEDUPLICATION_ALGORITHM_PER_PARSER") @@ -1504,137 +1504,137 @@ def saml2_attrib_map_format(dict): logger.info(f"Adding {key} with value {value} from env var DD_DEDUPLICATION_ALGORITHM_PER_PARSER") DEDUPLICATION_ALGORITHM_PER_PARSER[key] = value -DUPE_DELETE_MAX_PER_RUN = env('DD_DUPE_DELETE_MAX_PER_RUN') +DUPE_DELETE_MAX_PER_RUN = env("DD_DUPE_DELETE_MAX_PER_RUN") -DISABLE_FINDING_MERGE = env('DD_DISABLE_FINDING_MERGE') +DISABLE_FINDING_MERGE = env("DD_DISABLE_FINDING_MERGE") -TRACK_IMPORT_HISTORY = env('DD_TRACK_IMPORT_HISTORY') +TRACK_IMPORT_HISTORY = env("DD_TRACK_IMPORT_HISTORY") # ------------------------------------------------------------------------------ # JIRA # ------------------------------------------------------------------------------ # The 'Bug' issue type is mandatory, as it is used as the default choice. JIRA_ISSUE_TYPE_CHOICES_CONFIG = ( - ('Task', 'Task'), - ('Story', 'Story'), - ('Epic', 'Epic'), - ('Spike', 'Spike'), - ('Bug', 'Bug'), - ('Security', 'Security'), + ("Task", "Task"), + ("Story", "Story"), + ("Epic", "Epic"), + ("Spike", "Spike"), + ("Bug", "Bug"), + ("Security", "Security"), ) -if env('DD_JIRA_EXTRA_ISSUE_TYPES') != '': - if env('DD_JIRA_EXTRA_ISSUE_TYPES').count(',') > 0: - for extra_type in env('DD_JIRA_EXTRA_ISSUE_TYPES').split(','): +if env("DD_JIRA_EXTRA_ISSUE_TYPES") != "": + if env("DD_JIRA_EXTRA_ISSUE_TYPES").count(",") > 0: + for extra_type in env("DD_JIRA_EXTRA_ISSUE_TYPES").split(","): JIRA_ISSUE_TYPE_CHOICES_CONFIG += (extra_type, extra_type) else: - JIRA_ISSUE_TYPE_CHOICES_CONFIG += (env('DD_JIRA_EXTRA_ISSUE_TYPES'), env('DD_JIRA_EXTRA_ISSUE_TYPES')) + JIRA_ISSUE_TYPE_CHOICES_CONFIG += (env("DD_JIRA_EXTRA_ISSUE_TYPES"), env("DD_JIRA_EXTRA_ISSUE_TYPES")) -JIRA_SSL_VERIFY = env('DD_JIRA_SSL_VERIFY') +JIRA_SSL_VERIFY = env("DD_JIRA_SSL_VERIFY") # ------------------------------------------------------------------------------ # LOGGING # ------------------------------------------------------------------------------ # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. -LOGGING_HANDLER = env('DD_LOGGING_HANDLER') +LOGGING_HANDLER = env("DD_LOGGING_HANDLER") -LOG_LEVEL = env('DD_LOG_LEVEL') +LOG_LEVEL = env("DD_LOG_LEVEL") if not LOG_LEVEL: - LOG_LEVEL = 'DEBUG' if DEBUG else 'INFO' + LOG_LEVEL = "DEBUG" if DEBUG else "INFO" LOGGING = { - 'version': 1, - 'disable_existing_loggers': False, - 'formatters': { - 'verbose': { - 'format': '[%(asctime)s] %(levelname)s [%(name)s:%(lineno)d] %(message)s', - 'datefmt': '%d/%b/%Y %H:%M:%S', + "version": 1, + "disable_existing_loggers": False, + "formatters": { + "verbose": { + "format": "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)d] %(message)s", + "datefmt": "%d/%b/%Y %H:%M:%S", }, - 'simple': { - 'format': '%(levelname)s %(funcName)s %(lineno)d %(message)s', + "simple": { + "format": "%(levelname)s %(funcName)s %(lineno)d %(message)s", }, - 'json': { - '()': 'json_log_formatter.JSONFormatter', + "json": { + "()": "json_log_formatter.JSONFormatter", }, }, - 'filters': { - 'require_debug_false': { - '()': 'django.utils.log.RequireDebugFalse', + "filters": { + "require_debug_false": { + "()": "django.utils.log.RequireDebugFalse", }, - 'require_debug_true': { - '()': 'django.utils.log.RequireDebugTrue', + "require_debug_true": { + "()": "django.utils.log.RequireDebugTrue", }, }, - 'handlers': { - 'mail_admins': { - 'level': 'ERROR', - 'filters': ['require_debug_false'], - 'class': 'django.utils.log.AdminEmailHandler', + "handlers": { + "mail_admins": { + "level": "ERROR", + "filters": ["require_debug_false"], + "class": "django.utils.log.AdminEmailHandler", }, - 'console': { - 'class': 'logging.StreamHandler', - 'formatter': 'verbose', + "console": { + "class": "logging.StreamHandler", + "formatter": "verbose", }, - 'json_console': { - 'class': 'logging.StreamHandler', - 'formatter': 'json', + "json_console": { + "class": "logging.StreamHandler", + "formatter": "json", }, }, - 'loggers': { - 'django.request': { - 'handlers': ['mail_admins', 'console'], - 'level': str(LOG_LEVEL), - 'propagate': False, + "loggers": { + "django.request": { + "handlers": ["mail_admins", "console"], + "level": str(LOG_LEVEL), + "propagate": False, }, - 'django.security': { - 'handlers': [rf'{LOGGING_HANDLER}'], - 'level': str(LOG_LEVEL), - 'propagate': False, + "django.security": { + "handlers": [rf"{LOGGING_HANDLER}"], + "level": str(LOG_LEVEL), + "propagate": False, }, - 'celery': { - 'handlers': [rf'{LOGGING_HANDLER}'], - 'level': str(LOG_LEVEL), - 'propagate': False, + "celery": { + "handlers": [rf"{LOGGING_HANDLER}"], + "level": str(LOG_LEVEL), + "propagate": False, # workaround some celery logging known issue - 'worker_hijack_root_logger': False, + "worker_hijack_root_logger": False, }, - 'dojo': { - 'handlers': [rf'{LOGGING_HANDLER}'], - 'level': str(LOG_LEVEL), - 'propagate': False, + "dojo": { + "handlers": [rf"{LOGGING_HANDLER}"], + "level": str(LOG_LEVEL), + "propagate": False, }, - 'dojo.specific-loggers.deduplication': { - 'handlers': [rf'{LOGGING_HANDLER}'], - 'level': str(LOG_LEVEL), - 'propagate': False, + "dojo.specific-loggers.deduplication": { + "handlers": [rf"{LOGGING_HANDLER}"], + "level": str(LOG_LEVEL), + "propagate": False, }, - 'saml2': { - 'handlers': [rf'{LOGGING_HANDLER}'], - 'level': str(LOG_LEVEL), - 'propagate': False, + "saml2": { + "handlers": [rf"{LOGGING_HANDLER}"], + "level": str(LOG_LEVEL), + "propagate": False, }, - 'MARKDOWN': { + "MARKDOWN": { # The markdown library is too verbose in it's logging, reducing the verbosity in our logs. - 'handlers': [rf'{LOGGING_HANDLER}'], - 'level': str(LOG_LEVEL), - 'propagate': False, + "handlers": [rf"{LOGGING_HANDLER}"], + "level": str(LOG_LEVEL), + "propagate": False, }, - 'titlecase': { + "titlecase": { # The titlecase library is too verbose in it's logging, reducing the verbosity in our logs. - 'handlers': [rf'{LOGGING_HANDLER}'], - 'level': str(LOG_LEVEL), - 'propagate': False, + "handlers": [rf"{LOGGING_HANDLER}"], + "level": str(LOG_LEVEL), + "propagate": False, }, }, } # override filter to ensure sensitive variables are also hidden when DEBUG = True -DEFAULT_EXCEPTION_REPORTER_FILTER = 'dojo.settings.exception_filter.CustomExceptionReporterFilter' +DEFAULT_EXCEPTION_REPORTER_FILTER = "dojo.settings.exception_filter.CustomExceptionReporterFilter" # As we require `innodb_large_prefix = ON` for MySQL, we can silence the # warning about large varchar with unique indices. -SILENCED_SYSTEM_CHECKS = ['mysql.E001'] +SILENCED_SYSTEM_CHECKS = ["mysql.E001"] # Issue on benchmark : "The number of GET/POST parameters exceeded settings.DATA_UPLOAD_MAX_NUMBER_FIELD S" DATA_UPLOAD_MAX_NUMBER_FIELDS = 10240 @@ -1653,32 +1653,32 @@ def saml2_attrib_map_format(dict): PARSER_EXCLUDE = env("DD_PARSER_EXCLUDE") SERIALIZATION_MODULES = { - 'xml': 'tagulous.serializers.xml_serializer', - 'json': 'tagulous.serializers.json', - 'python': 'tagulous.serializers.python', - 'yaml': 'tagulous.serializers.pyyaml', + "xml": "tagulous.serializers.xml_serializer", + "json": "tagulous.serializers.json", + "python": "tagulous.serializers.python", + "yaml": "tagulous.serializers.pyyaml", } # There seems to be no way just use the default and just leave out jquery, so we have to copy... # ... and keep it up-to-date. TAGULOUS_AUTOCOMPLETE_JS = ( # 'tagulous/lib/jquery.js', - 'tagulous/lib/select2-4/js/select2.full.min.js', - 'tagulous/tagulous.js', - 'tagulous/adaptor/select2-4.js', + "tagulous/lib/select2-4/js/select2.full.min.js", + "tagulous/tagulous.js", + "tagulous/adaptor/select2-4.js", ) # using 'element' for width should take width from css defined in template, but it doesn't. So set to 70% here. -TAGULOUS_AUTOCOMPLETE_SETTINGS = {'placeholder': "Enter some tags (comma separated, use enter to select / create a new tag)", 'width': '70%'} +TAGULOUS_AUTOCOMPLETE_SETTINGS = {"placeholder": "Enter some tags (comma separated, use enter to select / create a new tag)", "width": "70%"} -EDITABLE_MITIGATED_DATA = env('DD_EDITABLE_MITIGATED_DATA') +EDITABLE_MITIGATED_DATA = env("DD_EDITABLE_MITIGATED_DATA") # FEATURE_FINDING_GROUPS feature is moved to system_settings, will be removed from settings file -FEATURE_FINDING_GROUPS = env('DD_FEATURE_FINDING_GROUPS') -JIRA_TEMPLATE_ROOT = env('DD_JIRA_TEMPLATE_ROOT') -TEMPLATE_DIR_PREFIX = env('DD_TEMPLATE_DIR_PREFIX') +FEATURE_FINDING_GROUPS = env("DD_FEATURE_FINDING_GROUPS") +JIRA_TEMPLATE_ROOT = env("DD_JIRA_TEMPLATE_ROOT") +TEMPLATE_DIR_PREFIX = env("DD_TEMPLATE_DIR_PREFIX") -DUPLICATE_CLUSTER_CASCADE_DELETE = env('DD_DUPLICATE_CLUSTER_CASCADE_DELETE') +DUPLICATE_CLUSTER_CASCADE_DELETE = env("DD_DUPLICATE_CLUSTER_CASCADE_DELETE") # Deside if SonarQube API parser should download the security hotspots SONARQUBE_API_PARSER_HOTSPOTS = env("DD_SONARQUBE_API_PARSER_HOTSPOTS") @@ -1701,17 +1701,17 @@ def saml2_attrib_map_format(dict): SILENCED_SYSTEM_CHECKS = ["django_jsonfield_backport.W001"] VULNERABILITY_URLS = { - 'CVE': 'https://nvd.nist.gov/vuln/detail/', - 'GHSA': 'https://github.com/advisories/', - 'OSV': 'https://osv.dev/vulnerability/', - 'PYSEC': 'https://osv.dev/vulnerability/', - 'SNYK': 'https://snyk.io/vuln/', - 'RUSTSEC': 'https://rustsec.org/advisories/', - 'VNS': 'https://vulners.com/', - 'RHSA': 'https://access.redhat.com/errata/', - 'RHBA': 'https://access.redhat.com/errata/', - 'RHEA': 'https://access.redhat.com/errata/', - 'FEDORA': 'https://bodhi.fedoraproject.org/updates/', + "CVE": "https://nvd.nist.gov/vuln/detail/", + "GHSA": "https://github.com/advisories/", + "OSV": "https://osv.dev/vulnerability/", + "PYSEC": "https://osv.dev/vulnerability/", + "SNYK": "https://snyk.io/vuln/", + "RUSTSEC": "https://rustsec.org/advisories/", + "VNS": "https://vulners.com/", + "RHSA": "https://access.redhat.com/errata/", + "RHBA": "https://access.redhat.com/errata/", + "RHEA": "https://access.redhat.com/errata/", + "FEDORA": "https://bodhi.fedoraproject.org/updates/", } # List of acceptable file types that can be uploaded to a given object via arbitrary file upload FILE_UPLOAD_TYPES = env("DD_FILE_UPLOAD_TYPES") @@ -1719,22 +1719,22 @@ def saml2_attrib_map_format(dict): # AttributeError: Problem installing fixture '/app/dojo/fixtures/defect_dojo_sample_data.json': 'Settings' object has no attribute 'AUDITLOG_DISABLE_ON_RAW_SAVE' AUDITLOG_DISABLE_ON_RAW_SAVE = False # You can set extra Jira headers by suppling a dictionary in header: value format (pass as env var like "headr_name=value,another_header=anohter_value") -ADDITIONAL_HEADERS = env('DD_ADDITIONAL_HEADERS') +ADDITIONAL_HEADERS = env("DD_ADDITIONAL_HEADERS") # Dictates whether cloud banner is created or not -CREATE_CLOUD_BANNER = env('DD_CREATE_CLOUD_BANNER') +CREATE_CLOUD_BANNER = env("DD_CREATE_CLOUD_BANNER") # ------------------------------------------------------------------------------ # Auditlog # ------------------------------------------------------------------------------ -AUDITLOG_FLUSH_RETENTION_PERIOD = env('DD_AUDITLOG_FLUSH_RETENTION_PERIOD') -ENABLE_AUDITLOG = env('DD_ENABLE_AUDITLOG') -USE_FIRST_SEEN = env('DD_USE_FIRST_SEEN') -USE_QUALYS_LEGACY_SEVERITY_PARSING = env('DD_QUALYS_LEGACY_SEVERITY_PARSING') +AUDITLOG_FLUSH_RETENTION_PERIOD = env("DD_AUDITLOG_FLUSH_RETENTION_PERIOD") +ENABLE_AUDITLOG = env("DD_ENABLE_AUDITLOG") +USE_FIRST_SEEN = env("DD_USE_FIRST_SEEN") +USE_QUALYS_LEGACY_SEVERITY_PARSING = env("DD_QUALYS_LEGACY_SEVERITY_PARSING") # ------------------------------------------------------------------------------ # Notifications # ------------------------------------------------------------------------------ -NOTIFICATIONS_SYSTEM_LEVEL_TRUMP = env('DD_NOTIFICATIONS_SYSTEM_LEVEL_TRUMP') +NOTIFICATIONS_SYSTEM_LEVEL_TRUMP = env("DD_NOTIFICATIONS_SYSTEM_LEVEL_TRUMP") # ------------------------------------------------------------------------------ # Ignored Warnings diff --git a/dojo/settings/settings.py b/dojo/settings/settings.py index 2d378c742f..abb8168506 100644 --- a/dojo/settings/settings.py +++ b/dojo/settings/settings.py @@ -8,14 +8,14 @@ # how to tune the configuration to your needs. include( - 'settings.dist.py', - optional('local_settings.py'), + "settings.dist.py", + optional("local_settings.py"), ) -if not (DEBUG or ('collectstatic' in sys.argv)): - with (Path(__file__).parent / 'settings.dist.py').open('rb') as file: +if not (DEBUG or ("collectstatic" in sys.argv)): + with (Path(__file__).parent / "settings.dist.py").open("rb") as file: real_hash = hashlib.sha256(file.read()).hexdigest() - with (Path(__file__).parent / '.settings.dist.py.sha256sum').open('rb') as file: + with (Path(__file__).parent / ".settings.dist.py.sha256sum").open("rb") as file: expected_hash = file.read().decode().strip() if real_hash != expected_hash: msg = "Change of 'settings.dist.py' file was detected. It is not allowed to edit this file. " \ diff --git a/dojo/settings/unittest.py b/dojo/settings/unittest.py index 7132d3b928..62f37afcdb 100644 --- a/dojo/settings/unittest.py +++ b/dojo/settings/unittest.py @@ -7,8 +7,8 @@ DEBUG = True DATABASES = { - 'default': { - 'ENGINE': 'django.db.backends.sqlite3', - 'NAME': 'unittest.sqlite', + "default": { + "ENGINE": "django.db.backends.sqlite3", + "NAME": "unittest.sqlite", }, } diff --git a/dojo/sla_config/urls.py b/dojo/sla_config/urls.py index c0a72f6d5c..764da20eaa 100644 --- a/dojo/sla_config/urls.py +++ b/dojo/sla_config/urls.py @@ -3,7 +3,7 @@ from . import views urlpatterns = [ - re_path(r'^sla_config/add', views.new_sla_config, name='new_sla_config'), - re_path(r'^sla_config/(?P\d+)/edit$', views.edit_sla_config, name='edit_sla_config'), - re_path(r'^sla_config$', views.sla_config, name='sla_config'), + re_path(r"^sla_config/add", views.new_sla_config, name="new_sla_config"), + re_path(r"^sla_config/(?P\d+)/edit$", views.edit_sla_config, name="edit_sla_config"), + re_path(r"^sla_config$", views.sla_config, name="sla_config"), ] diff --git a/dojo/sla_config/views.py b/dojo/sla_config/views.py index 28aefd0c3b..f95461283f 100644 --- a/dojo/sla_config/views.py +++ b/dojo/sla_config/views.py @@ -14,64 +14,64 @@ logger = logging.getLogger(__name__) -@user_is_configuration_authorized('dojo.add_sla_configuration') +@user_is_configuration_authorized("dojo.add_sla_configuration") def new_sla_config(request): - if request.method == 'POST': + if request.method == "POST": tform = SLAConfigForm(request.POST, instance=SLA_Configuration()) if tform.is_valid(): tform.save() messages.add_message(request, messages.SUCCESS, - 'SLA configuration Successfully Created.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('sla_config')) + "SLA configuration Successfully Created.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("sla_config")) else: tform = SLAConfigForm() add_breadcrumb( title="New SLA configuration", top_level=False, request=request) - return render(request, 'dojo/new_sla_config.html', - {'form': tform}) + return render(request, "dojo/new_sla_config.html", + {"form": tform}) -@user_is_configuration_authorized('dojo.change_sla_configuration') +@user_is_configuration_authorized("dojo.change_sla_configuration") def edit_sla_config(request, slaid): sla_config = SLA_Configuration.objects.get(pk=slaid) - if request.method == 'POST' and request.POST.get('delete'): + if request.method == "POST" and request.POST.get("delete"): if sla_config.id != 1: if Product.objects.filter(sla_configuration=sla_config).count(): msg = f'The "{sla_config}" SLA configuration could not be deleted, as it is currently in use by one or more products.' messages.add_message(request, messages.ERROR, msg, - extra_tags='alert-warning') + extra_tags="alert-warning") else: user_has_configuration_permission_or_403( - request.user, 'dojo.delete_sla_configuration') + request.user, "dojo.delete_sla_configuration") sla_config.delete() messages.add_message(request, messages.SUCCESS, - 'SLA Configuration Deleted.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('sla_config')) + "SLA Configuration Deleted.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("sla_config")) else: messages.add_message(request, messages.ERROR, - 'The Default SLA Configuration cannot be deleted.', - extra_tags='alert-danger') - return HttpResponseRedirect(reverse('sla_config')) + "The Default SLA Configuration cannot be deleted.", + extra_tags="alert-danger") + return HttpResponseRedirect(reverse("sla_config")) - elif request.method == 'POST': + elif request.method == "POST": form = SLAConfigForm(request.POST, instance=sla_config) if form.is_valid(): form.save(commit=True) messages.add_message(request, messages.SUCCESS, - 'SLA configuration successfully updated. All SLA expiration dates for findings within this SLA configuration will be recalculated asynchronously.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('sla_config')) + "SLA configuration successfully updated. All SLA expiration dates for findings within this SLA configuration will be recalculated asynchronously.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("sla_config")) else: form = SLAConfigForm(instance=sla_config) @@ -81,24 +81,24 @@ def edit_sla_config(request, slaid): request=request) return render(request, - 'dojo/edit_sla_config.html', + "dojo/edit_sla_config.html", { - 'form': form, + "form": form, }) -@user_is_configuration_authorized('dojo.view_sla_configuration') +@user_is_configuration_authorized("dojo.view_sla_configuration") def sla_config(request): settings = System_Settings.objects.all() - confs = SLA_Configuration.objects.all().order_by('name') + confs = SLA_Configuration.objects.all().order_by("name") add_breadcrumb( title="SLA Configurations", top_level=not len( request.GET), request=request) return render(request, - 'dojo/sla_config.html', - {'confs': confs, - 'settings': settings, + "dojo/sla_config.html", + {"confs": confs, + "settings": settings, }) diff --git a/dojo/survey/urls.py b/dojo/survey/urls.py index 2286b83226..d5c4dbd9e5 100644 --- a/dojo/survey/urls.py +++ b/dojo/survey/urls.py @@ -15,66 +15,66 @@ admin.autodiscover() urlpatterns = [ - re_path(r'^questionnaire$', + re_path(r"^questionnaire$", views.questionnaire, - name='questionnaire'), - re_path(r'^questionnaire/create$', + name="questionnaire"), + re_path(r"^questionnaire/create$", views.create_questionnaire, - name='create_questionnaire'), - re_path(r'^questionnaire/(?P\d+)/edit$', + name="create_questionnaire"), + re_path(r"^questionnaire/(?P\d+)/edit$", views.edit_questionnaire, - name='edit_questionnaire'), - re_path(r'^questionnaire/(?P\d+)/delete', + name="edit_questionnaire"), + re_path(r"^questionnaire/(?P\d+)/delete", views.delete_questionnaire, - name='delete_questionnaire'), - re_path(r'^questionnaire/(?P\d+)/edit/questions$', + name="delete_questionnaire"), + re_path(r"^questionnaire/(?P\d+)/edit/questions$", views.edit_questionnaire_questions, - name='edit_questionnaire_questions'), - re_path(r'^questions$', + name="edit_questionnaire_questions"), + re_path(r"^questions$", views.questions, - name='questions'), - re_path(r'^questions/add$', + name="questions"), + re_path(r"^questions/add$", views.create_question, - name='create_question'), - re_path(r'^questions/(?P\d+)/edit$', + name="create_question"), + re_path(r"^questions/(?P\d+)/edit$", views.edit_question, - name='edit_question'), - re_path(r'^choices/add$', + name="edit_question"), + re_path(r"^choices/add$", views.add_choices, - name='add_choices'), - re_path(r'^engagement/(?P\d+)/add_questionnaire$', + name="add_choices"), + re_path(r"^engagement/(?P\d+)/add_questionnaire$", views.add_questionnaire, - name='add_questionnaire'), - re_path(r'^engagement/(?P\d+)/questionnaire/(?P\d+)/answer', + name="add_questionnaire"), + re_path(r"^engagement/(?P\d+)/questionnaire/(?P\d+)/answer", views.answer_questionnaire, - name='answer_questionnaire'), - re_path(r'^engagement/(?P\d+)/questionnaire/(?P\d+)/delete', + name="answer_questionnaire"), + re_path(r"^engagement/(?P\d+)/questionnaire/(?P\d+)/delete", views.delete_engagement_survey, - name='delete_engagement_survey'), - re_path(r'^engagement/(?P\d+)/questionnaire/(?P\d+)$', + name="delete_engagement_survey"), + re_path(r"^engagement/(?P\d+)/questionnaire/(?P\d+)$", views.view_questionnaire, - name='view_questionnaire'), - re_path(r'^engagement/(?P\d+)/questionnaire/(?P\d+)/assign', + name="view_questionnaire"), + re_path(r"^engagement/(?P\d+)/questionnaire/(?P\d+)/assign", views.assign_questionnaire, - name='assign_questionnaire'), + name="assign_questionnaire"), # Questionnaires without an engagemnet - re_path(r'^empty_questionnaire$', + re_path(r"^empty_questionnaire$", views.add_empty_questionnaire, - name='add_empty_questionnaire'), - re_path(r'^empty_questionnaire/(?P\d+)$', + name="add_empty_questionnaire"), + re_path(r"^empty_questionnaire/(?P\d+)$", views.view_empty_survey, - name='view_empty_survey'), - re_path(r'^empty_questionnaire/(?P\d+)/delete$', + name="view_empty_survey"), + re_path(r"^empty_questionnaire/(?P\d+)/delete$", views.delete_empty_questionnaire, - name='delete_empty_questionnaire'), - re_path(r'^general_questionnaire/(?P\d+)/delete$', + name="delete_empty_questionnaire"), + re_path(r"^general_questionnaire/(?P\d+)/delete$", views.delete_general_questionnaire, - name='delete_general_questionnaire'), - re_path(r'^empty_questionnaire/(?P\d+)/answer$', + name="delete_general_questionnaire"), + re_path(r"^empty_questionnaire/(?P\d+)/answer$", views.answer_empty_survey, - name='answer_empty_survey'), - re_path(r'^empty_questionnaire/(?P\d+)/new_engagement$', + name="answer_empty_survey"), + re_path(r"^empty_questionnaire/(?P\d+)/new_engagement$", views.engagement_empty_survey, - name='engagement_empty_survey'), + name="engagement_empty_survey"), ] diff --git a/dojo/survey/views.py b/dojo/survey/views.py index 5e036c6856..422fe93498 100644 --- a/dojo/survey/views.py +++ b/dojo/survey/views.py @@ -52,14 +52,14 @@ from dojo.utils import add_breadcrumb, get_page_items -@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid') +@user_is_authorized(Engagement, Permissions.Engagement_Edit, "eid") def delete_engagement_survey(request, eid, sid): engagement = get_object_or_404(Engagement, id=eid) survey = get_object_or_404(Answered_Survey, id=sid) questions = get_answered_questions(survey=survey, read_only=True) form = Delete_Questionnaire_Form(instance=survey) - if request.method == 'POST': + if request.method == "POST": form = Delete_Questionnaire_Form(request.POST, instance=survey) if form.is_valid(): answers = Answer.polymorphic.filter( @@ -72,25 +72,25 @@ def delete_engagement_survey(request, eid, sid): messages.add_message( request, messages.SUCCESS, - 'Questionnaire deleted successfully.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_engagement', args=(engagement.id, ))) + "Questionnaire deleted successfully.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_engagement", args=(engagement.id, ))) else: messages.add_message( request, messages.ERROR, - 'Unable to delete Questionnaire.', - extra_tags='alert-danger') + "Unable to delete Questionnaire.", + extra_tags="alert-danger") add_breadcrumb( title="Delete " + survey.survey.name + " Questionnaire", top_level=False, request=request) - return render(request, 'defectDojo-engagement-survey/delete_questionnaire.html', { - 'survey': survey, - 'form': form, - 'engagement': engagement, - 'questions': questions, + return render(request, "defectDojo-engagement-survey/delete_questionnaire.html", { + "survey": survey, + "form": form, + "engagement": engagement, + "questions": questions, }) @@ -108,13 +108,13 @@ def answer_questionnaire(request, eid, sid): messages.add_message( request, messages.ERROR, - 'You must be authorized to answer questionnaire. Otherwise, enable anonymous response in system settings.', - extra_tags='alert-danger') + "You must be authorized to answer questionnaire. Otherwise, enable anonymous response in system settings.", + extra_tags="alert-danger") raise PermissionDenied questions = get_answered_questions(survey=survey, read_only=False) - if request.method == 'POST': + if request.method == "POST": questions = [ q.get_form()( request.POST or None, @@ -140,48 +140,48 @@ def answer_questionnaire(request, eid, sid): messages.add_message( request, messages.SUCCESS, - 'Successfully answered, all answers valid.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_engagement', args=(engagement.id, ))) + "Successfully answered, all answers valid.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_engagement", args=(engagement.id, ))) else: messages.add_message( request, messages.ERROR, - 'Questionnaire has errors, please correct.', - extra_tags='alert-danger') + "Questionnaire has errors, please correct.", + extra_tags="alert-danger") add_breadcrumb( title="Answer " + survey.survey.name + " Survey", top_level=False, request=request) - return render(request, 'defectDojo-engagement-survey/answer_survey.html', { - 'survey': survey, - 'engagement': engagement, - 'questions': questions, + return render(request, "defectDojo-engagement-survey/answer_survey.html", { + "survey": survey, + "engagement": engagement, + "questions": questions, }) -@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid') +@user_is_authorized(Engagement, Permissions.Engagement_Edit, "eid") def assign_questionnaire(request, eid, sid): survey = get_object_or_404(Answered_Survey, id=sid) engagement = get_object_or_404(Engagement, id=eid) form = AssignUserForm(instance=survey) - if request.method == 'POST': + if request.method == "POST": form = AssignUserForm(request.POST) if form.is_valid(): - user = form.cleaned_data['assignee'] + user = form.cleaned_data["assignee"] survey.assignee = user survey.save() - return HttpResponseRedirect(reverse('view_engagement', args=(engagement.id,))) + return HttpResponseRedirect(reverse("view_engagement", args=(engagement.id,))) add_breadcrumb(title="Assign Questionnaire", top_level=False, request=request) - return render(request, 'defectDojo-engagement-survey/assign_survey.html', { - 'survey': survey, - 'form': form, + return render(request, "defectDojo-engagement-survey/assign_survey.html", { + "survey": survey, + "form": form, }) -@user_is_authorized(Engagement, Permissions.Engagement_View, 'eid') +@user_is_authorized(Engagement, Permissions.Engagement_View, "eid") def view_questionnaire(request, eid, sid): survey = get_object_or_404(Answered_Survey, id=sid) engagement = get_object_or_404(Engagement, id=eid) @@ -191,12 +191,12 @@ def view_questionnaire(request, eid, sid): title=survey.survey.name + " Questionnaire Responses", top_level=False, request=request) - return render(request, 'defectDojo-engagement-survey/view_survey.html', { - 'survey': survey, - 'user': request.user, - 'engagement': engagement, - 'questions': questions, - 'name': survey.survey.name + " Questionnaire Responses", + return render(request, "defectDojo-engagement-survey/view_survey.html", { + "survey": survey, + "user": request.user, + "engagement": engagement, + "questions": questions, + "name": survey.survey.name + " Questionnaire Responses", }) @@ -214,12 +214,12 @@ def get_answered_questions(survey=None, read_only=False): if read_only: for question in questions: - question.fields['answer'].widget.attrs = {"readonly": "readonly", "disabled": "disabled"} + question.fields["answer"].widget.attrs = {"readonly": "readonly", "disabled": "disabled"} return questions -@user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid') +@user_is_authorized(Engagement, Permissions.Engagement_Edit, "eid") def add_questionnaire(request, eid): user = request.user engagement = get_object_or_404(Engagement, id=eid) @@ -227,7 +227,7 @@ def add_questionnaire(request, eid): surveys = Engagement_Survey.objects.exclude(id__in=ids) form = Add_Questionnaire_Form() - if request.method == 'POST': + if request.method == "POST": form = Add_Questionnaire_Form(request.POST) if form.is_valid(): survey = form.save(commit=False) @@ -236,29 +236,29 @@ def add_questionnaire(request, eid): messages.add_message( request, messages.SUCCESS, - 'Questionnaire successfully added, answers pending.', - extra_tags='alert-success') - if 'respond_survey' in request.POST: - return HttpResponseRedirect(reverse('answer_questionnaire', args=(eid, survey.id))) - return HttpResponseRedirect(reverse('view_engagement', args=(eid,))) + "Questionnaire successfully added, answers pending.", + extra_tags="alert-success") + if "respond_survey" in request.POST: + return HttpResponseRedirect(reverse("answer_questionnaire", args=(eid, survey.id))) + return HttpResponseRedirect(reverse("view_engagement", args=(eid,))) else: messages.add_message( request, messages.ERROR, - 'Questionnaire could not be added.', - extra_tags='alert-danger') + "Questionnaire could not be added.", + extra_tags="alert-danger") form.fields["survey"].queryset = surveys add_breadcrumb(title="Add Questionnaire", top_level=False, request=request) - return render(request, 'defectDojo-engagement-survey/add_survey.html', { - 'surveys': surveys, - 'user': user, - 'form': form, - 'engagement': engagement, + return render(request, "defectDojo-engagement-survey/add_survey.html", { + "surveys": surveys, + "user": user, + "form": form, + "engagement": engagement, }) -@user_is_configuration_authorized('dojo.change_engagement_survey') +@user_is_configuration_authorized("dojo.change_engagement_survey") def edit_questionnaire(request, sid): survey = get_object_or_404(Engagement_Survey, id=sid) old_name = survey.name @@ -271,10 +271,10 @@ def edit_questionnaire(request, sid): messages.add_message( request, messages.ERROR, - 'This questionnaire already has answered instances. If you change it, the responses may no longer be valid.', - extra_tags='alert-info') + "This questionnaire already has answered instances. If you change it, the responses may no longer be valid.", + extra_tags="alert-info") - if request.method == 'POST': + if request.method == "POST": form = CreateQuestionnaireForm(request.POST, instance=survey) if form.is_valid(): if survey.name != old_name or \ @@ -285,33 +285,33 @@ def edit_questionnaire(request, sid): messages.add_message( request, messages.SUCCESS, - 'Questionnaire successfully updated, you may now add/edit questions.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('edit_questionnaire', args=(survey.id,))) + "Questionnaire successfully updated, you may now add/edit questions.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("edit_questionnaire", args=(survey.id,))) else: messages.add_message( request, messages.SUCCESS, - 'No changes detected, questionnaire not updated.', - extra_tags='alert-warning') - if 'add_questions' in request.POST: - return HttpResponseRedirect(reverse('edit_questionnaire_questions', args=(survey.id,))) + "No changes detected, questionnaire not updated.", + extra_tags="alert-warning") + if "add_questions" in request.POST: + return HttpResponseRedirect(reverse("edit_questionnaire_questions", args=(survey.id,))) else: messages.add_message( request, messages.ERROR, - 'Please correct any errors displayed below.', - extra_tags='alert-danger') + "Please correct any errors displayed below.", + extra_tags="alert-danger") add_breadcrumb(title="Edit Questionnaire", top_level=False, request=request) - return render(request, 'defectDojo-engagement-survey/create_questionnaire.html', { + return render(request, "defectDojo-engagement-survey/create_questionnaire.html", { "survey": survey, "form": form, "name": "Edit Questionnaire", }) -@user_is_configuration_authorized('dojo.delete_engagement_survey') +@user_is_configuration_authorized("dojo.delete_engagement_survey") def delete_questionnaire(request, sid): survey = get_object_or_404(Engagement_Survey, id=sid) form = Delete_Eng_Survey_Form(instance=survey) @@ -319,53 +319,53 @@ def delete_questionnaire(request, sid): collector.collect([survey]) rels = collector.nested() - if request.method == 'POST': - if 'id' in request.POST and str(survey.id) == request.POST['id']: + if request.method == "POST": + if "id" in request.POST and str(survey.id) == request.POST["id"]: form = Delete_Eng_Survey_Form(request.POST, instance=survey) if form.is_valid(): survey.delete() messages.add_message( request, messages.SUCCESS, - 'Questionnaire and relationships removed.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('questionnaire')) + "Questionnaire and relationships removed.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("questionnaire")) add_breadcrumb(title="Delete Questionnaire", top_level=False, request=request) - return render(request, 'defectDojo-engagement-survey/delete_questionnaire.html', { - 'survey': survey, - 'form': form, - 'rels': rels, + return render(request, "defectDojo-engagement-survey/delete_questionnaire.html", { + "survey": survey, + "form": form, + "rels": rels, }) -@user_is_configuration_authorized('dojo.add_engagement_survey') +@user_is_configuration_authorized("dojo.add_engagement_survey") def create_questionnaire(request): form = CreateQuestionnaireForm() survey = None - if request.method == 'POST': + if request.method == "POST": form = CreateQuestionnaireForm(request.POST) if form.is_valid(): survey = form.save() messages.add_message( request, messages.SUCCESS, - 'Questionnaire successfully created, you may now add questions.', - extra_tags='alert-success') - if 'add_questions' in request.POST: - return HttpResponseRedirect(reverse('edit_questionnaire_questions', args=(survey.id,))) + "Questionnaire successfully created, you may now add questions.", + extra_tags="alert-success") + if "add_questions" in request.POST: + return HttpResponseRedirect(reverse("edit_questionnaire_questions", args=(survey.id,))) else: - return HttpResponseRedirect(reverse('questionnaire')) + return HttpResponseRedirect(reverse("questionnaire")) else: messages.add_message( request, messages.ERROR, - 'Please correct any errors displayed below.', - extra_tags='alert-danger') + "Please correct any errors displayed below.", + extra_tags="alert-danger") add_breadcrumb(title="Create Questionnaire", top_level=False, request=request) - return render(request, 'defectDojo-engagement-survey/create_questionnaire.html', { + return render(request, "defectDojo-engagement-survey/create_questionnaire.html", { "survey": survey, "form": form, "name": "Create Survey", @@ -375,15 +375,15 @@ def create_questionnaire(request): # complex permission check inside the function def edit_questionnaire_questions(request, sid): survey = get_object_or_404(Engagement_Survey, id=sid) - if not user_has_configuration_permission(request.user, 'dojo.add_engagement_survey') and \ - not user_has_configuration_permission(request.user, 'dojo.change_engagement_survey'): + if not user_has_configuration_permission(request.user, "dojo.add_engagement_survey") and \ + not user_has_configuration_permission(request.user, "dojo.change_engagement_survey"): raise PermissionDenied answered_surveys = Answered_Survey.objects.filter(survey=survey) reverted = False form = EditQuestionnaireQuestionsForm(instance=survey) - if request.method == 'POST': + if request.method == "POST": form = EditQuestionnaireQuestionsForm(request.POST, instance=survey) if form.is_valid(): @@ -398,30 +398,30 @@ def edit_questionnaire_questions(request, sid): messages.add_message( request, messages.SUCCESS, - 'Answered questionnaires associated with this survey have been set to uncompleted.', - extra_tags='alert-warning') + "Answered questionnaires associated with this survey have been set to uncompleted.", + extra_tags="alert-warning") messages.add_message( request, messages.SUCCESS, - 'Questionnaire questions successfully saved.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('questionnaire')) + "Questionnaire questions successfully saved.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("questionnaire")) else: messages.add_message( request, messages.ERROR, - 'Questionnaire questions not saved, please correct any errors displayed below.', - extra_tags='alert-success') + "Questionnaire questions not saved, please correct any errors displayed below.", + extra_tags="alert-success") add_breadcrumb(title="Update Questionnaire Questions", top_level=False, request=request) - return render(request, 'defectDojo-engagement-survey/edit_survey_questions.html', { + return render(request, "defectDojo-engagement-survey/edit_survey_questions.html", { "survey": survey, "form": form, "name": "Update Survey Questions", }) -@user_is_configuration_authorized('dojo.view_engagement_survey') +@user_is_configuration_authorized("dojo.view_engagement_survey") def questionnaire(request): surveys = Engagement_Survey.objects.all() surveys = QuestionnaireFilter(request.GET, queryset=surveys) @@ -432,7 +432,7 @@ def questionnaire(request): survey.delete() add_breadcrumb(title="Questionnaires", top_level=True, request=request) - return render(request, 'defectDojo-engagement-survey/list_surveys.html', { + return render(request, "defectDojo-engagement-survey/list_surveys.html", { "surveys": paged_surveys, "filtered": surveys, "general": general_surveys, @@ -440,20 +440,20 @@ def questionnaire(request): }) -@user_is_configuration_authorized('dojo.view_question') +@user_is_configuration_authorized("dojo.view_question") def questions(request): questions = Question.polymorphic.all() questions = QuestionFilter(request.GET, queryset=questions) paged_questions = get_page_items(request, questions.qs, 25) add_breadcrumb(title="Questions", top_level=False, request=request) - return render(request, 'defectDojo-engagement-survey/list_questions.html', { + return render(request, "defectDojo-engagement-survey/list_questions.html", { "questions": paged_questions, "filtered": questions, "name": "Questions", }) -@user_is_configuration_authorized('dojo.add_question') +@user_is_configuration_authorized("dojo.add_question") def create_question(request): error = False form = CreateQuestionForm() @@ -461,39 +461,39 @@ def create_question(request): choiceQuestionFrom = CreateChoiceQuestionForm() created_question = None - if 'return' in request.GET: - return HttpResponseRedirect(reverse('questionnaire')) + if "return" in request.GET: + return HttpResponseRedirect(reverse("questionnaire")) - if request.method == 'POST': + if request.method == "POST": form = CreateQuestionForm(request.POST) textQuestionForm = CreateTextQuestionForm(request.POST) choiceQuestionFrom = CreateChoiceQuestionForm(request.POST) if form.is_valid(): - type = form.cleaned_data['type'] - if type == 'text': + type = form.cleaned_data["type"] + if type == "text": if textQuestionForm.is_valid(): created_question = TextQuestion.objects.create( - optional=form.cleaned_data['optional'], - order=form.cleaned_data['order'], - text=form.cleaned_data['text']) + optional=form.cleaned_data["optional"], + order=form.cleaned_data["order"], + text=form.cleaned_data["text"]) messages.add_message( request, messages.SUCCESS, - 'Text Question added successfully.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('questions')) + "Text Question added successfully.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("questions")) else: error = True - elif type == 'choice': + elif type == "choice": if choiceQuestionFrom.is_valid(): created_question = ChoiceQuestion.objects.create( - optional=form.cleaned_data['optional'], - order=form.cleaned_data['order'], - text=form.cleaned_data['text'], - multichoice=choiceQuestionFrom.cleaned_data['multichoice']) - choices_to_process = pickle.loads(choiceQuestionFrom.cleaned_data['answer_choices']) + optional=form.cleaned_data["optional"], + order=form.cleaned_data["order"], + text=form.cleaned_data["text"], + multichoice=choiceQuestionFrom.cleaned_data["multichoice"]) + choices_to_process = pickle.loads(choiceQuestionFrom.cleaned_data["answer_choices"]) for c in choices_to_process: if c is not None and len(c) > 0: @@ -503,27 +503,27 @@ def create_question(request): messages.add_message( request, messages.SUCCESS, - 'Choice Question added successfully.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('questions')) + "Choice Question added successfully.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("questions")) else: error = True - if '_popup' in request.GET and not error: + if "_popup" in request.GET and not error: resp = f'' resp += '' return HttpResponse(resp) add_breadcrumb(title="Add Question", top_level=False, request=request) - return render(request, 'defectDojo-engagement-survey/create_related_question.html', { - 'name': 'Add Question', - 'form': form, - 'textForm': textQuestionForm, - 'choiceForm': choiceQuestionFrom, + return render(request, "defectDojo-engagement-survey/create_related_question.html", { + "name": "Add Question", + "form": form, + "textForm": textQuestionForm, + "choiceForm": choiceQuestionFrom, }) -@user_is_configuration_authorized('dojo.change_question') +@user_is_configuration_authorized("dojo.change_question") def edit_question(request, qid): try: question = Question.polymorphic.get(id=qid) @@ -538,22 +538,22 @@ def edit_question(request, qid): messages.add_message( request, messages.ERROR, - 'This question is part of an already answered survey. If you change it, the responses ' - 'may no longer be valid.', - extra_tags='alert-info') + "This question is part of an already answered survey. If you change it, the responses " + "may no longer be valid.", + extra_tags="alert-info") type = str(ContentType.objects.get_for_model(question)) - if type == 'dojo | text question': + if type == "dojo | text question": form = EditTextQuestionForm(instance=question) - elif type == 'dojo | choice question': + elif type == "dojo | choice question": form = EditChoiceQuestionForm(instance=question) else: raise Http404 - if request.method == 'POST': - if type == 'dojo | text question': + if request.method == "POST": + if type == "dojo | text question": form = EditTextQuestionForm(request.POST, instance=question) - elif type == 'dojo | choice question': + elif type == "dojo | choice question": form = EditChoiceQuestionForm(request.POST, instance=question) else: raise Http404 @@ -569,27 +569,27 @@ def edit_question(request, qid): messages.add_message( request, messages.SUCCESS, - 'Answered surveys associated with this survey have been set to uncompleted.', - extra_tags='alert-warning') + "Answered surveys associated with this survey have been set to uncompleted.", + extra_tags="alert-warning") messages.add_message( request, messages.SUCCESS, - 'Question updated successfully.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('questions')) + "Question updated successfully.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("questions")) add_breadcrumb(title="Edit Question", top_level=False, request=request) - return render(request, 'defectDojo-engagement-survey/edit_question.html', { - 'name': 'Edit Question', - 'question': question, - 'form': form, + return render(request, "defectDojo-engagement-survey/edit_question.html", { + "name": "Edit Question", + "question": question, + "form": form, }) -@user_is_configuration_authorized('dojo.change_question') +@user_is_configuration_authorized("dojo.change_question") def add_choices(request): form = AddChoicesForm() - if request.method == 'POST': + if request.method == "POST": form = AddChoicesForm(request.POST) if form.is_valid(): choice, created = Choice.objects.get_or_create(**form.cleaned_data) @@ -597,29 +597,29 @@ def add_choices(request): messages.add_message( request, messages.SUCCESS, - 'Choice added successfully.', - extra_tags='alert-success') - if '_popup' in request.GET: - resp = '' + "Choice added successfully.", + extra_tags="alert-success") + if "_popup" in request.GET: + resp = "" if created: resp = f'' resp += '' return HttpResponse(resp) add_breadcrumb(title="Add Choice", top_level=False, request=request) - return render(request, 'defectDojo-engagement-survey/add_choices.html', { - 'name': 'Add Choice', - 'form': form, + return render(request, "defectDojo-engagement-survey/add_choices.html", { + "name": "Add Choice", + "form": form, }) # Empty questionnaire functions -@user_is_configuration_authorized('dojo.add_engagement_survey') +@user_is_configuration_authorized("dojo.add_engagement_survey") def add_empty_questionnaire(request): user = request.user surveys = Engagement_Survey.objects.all() form = AddGeneralQuestionnaireForm() engagement = None - if request.method == 'POST': + if request.method == "POST": form = AddGeneralQuestionnaireForm(request.POST) if form.is_valid(): survey = form.save(commit=False) @@ -628,29 +628,29 @@ def add_empty_questionnaire(request): messages.add_message( request, messages.SUCCESS, - 'Engagement Created, Questionnaire successfully added, answers pending.', - extra_tags='alert-success') - if 'respond_survey' in request.POST: - return HttpResponseRedirect(reverse('dashboard')) - return HttpResponseRedirect(reverse('questionnaire')) + "Engagement Created, Questionnaire successfully added, answers pending.", + extra_tags="alert-success") + if "respond_survey" in request.POST: + return HttpResponseRedirect(reverse("dashboard")) + return HttpResponseRedirect(reverse("questionnaire")) else: messages.add_message( request, messages.ERROR, - 'Questionnaire could not be added.', - extra_tags='alert-danger') + "Questionnaire could not be added.", + extra_tags="alert-danger") form.fields["survey"].queryset = surveys add_breadcrumb(title="Add Empty Questionnaire", top_level=False, request=request) - return render(request, 'defectDojo-engagement-survey/add_survey.html', { - 'surveys': surveys, - 'user': user, - 'form': form, - 'engagement': engagement, + return render(request, "defectDojo-engagement-survey/add_survey.html", { + "surveys": surveys, + "user": user, + "form": form, + "engagement": engagement, }) -@user_is_configuration_authorized('dojo.view_engagement_survey') +@user_is_configuration_authorized("dojo.view_engagement_survey") def view_empty_survey(request, esid): survey = get_object_or_404(Answered_Survey, id=esid) engagement = None @@ -659,23 +659,23 @@ def view_empty_survey(request, esid): title=survey.survey.name + " Questionnaire Responses", top_level=False, request=request) - return render(request, 'defectDojo-engagement-survey/view_survey.html', { - 'survey': survey, - 'user': request.user, - 'engagement': engagement, - 'questions': questions, - 'name': survey.survey.name + " Questionnaire Responses", + return render(request, "defectDojo-engagement-survey/view_survey.html", { + "survey": survey, + "user": request.user, + "engagement": engagement, + "questions": questions, + "name": survey.survey.name + " Questionnaire Responses", }) -@user_is_configuration_authorized('dojo.delete_engagement_survey') +@user_is_configuration_authorized("dojo.delete_engagement_survey") def delete_empty_questionnaire(request, esid): engagement = None survey = get_object_or_404(Answered_Survey, id=esid) questions = get_answered_questions(survey=survey, read_only=True) form = Delete_Questionnaire_Form(instance=survey) - if request.method == 'POST': + if request.method == "POST": form = Delete_Questionnaire_Form(request.POST, instance=survey) if form.is_valid(): answers = Answer.objects.filter( @@ -687,61 +687,61 @@ def delete_empty_questionnaire(request, esid): messages.add_message( request, messages.SUCCESS, - 'Questionnaire deleted successfully.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('survey')) + "Questionnaire deleted successfully.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("survey")) else: messages.add_message( request, messages.ERROR, - 'Unable to delete Questionnaire.', - extra_tags='alert-danger') + "Unable to delete Questionnaire.", + extra_tags="alert-danger") add_breadcrumb( title="Delete " + survey.survey.name + " Questionnaire", top_level=False, request=request) - return render(request, 'defectDojo-engagement-survey/delete_questionnaire.html', { - 'survey': survey, - 'form': form, - 'engagement': engagement, - 'questions': questions, + return render(request, "defectDojo-engagement-survey/delete_questionnaire.html", { + "survey": survey, + "form": form, + "engagement": engagement, + "questions": questions, }) -@user_is_configuration_authorized('dojo.delete_engagement_survey') +@user_is_configuration_authorized("dojo.delete_engagement_survey") def delete_general_questionnaire(request, esid): engagement = None questions = None survey = get_object_or_404(General_Survey, id=esid) form = DeleteGeneralQuestionnaireForm(instance=survey) - if request.method == 'POST': + if request.method == "POST": form = DeleteGeneralQuestionnaireForm(request.POST, instance=survey) if form.is_valid(): survey.delete() messages.add_message( request, messages.SUCCESS, - 'Questionnaire deleted successfully.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('questionnaire')) + "Questionnaire deleted successfully.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("questionnaire")) else: messages.add_message( request, messages.ERROR, - 'Unable to delete questionnaire.', - extra_tags='alert-danger') + "Unable to delete questionnaire.", + extra_tags="alert-danger") add_breadcrumb( title="Delete " + survey.survey.name + " Questionnaire", top_level=False, request=request) - return render(request, 'defectDojo-engagement-survey/delete_questionnaire.html', { - 'survey': survey, - 'form': form, - 'engagement': engagement, - 'questions': questions, + return render(request, "defectDojo-engagement-survey/delete_questionnaire.html", { + "survey": survey, + "form": form, + "engagement": engagement, + "questions": questions, }) @@ -756,8 +756,8 @@ def answer_empty_survey(request, esid): messages.add_message( request, messages.ERROR, - 'You must be logged in to answer questionnaire. Otherwise, enable anonymous response in system settings.', - extra_tags='alert-danger') + "You must be logged in to answer questionnaire. Otherwise, enable anonymous response in system settings.", + extra_tags="alert-danger") # will render 403 raise PermissionDenied @@ -770,7 +770,7 @@ def answer_empty_survey(request, esid): for q in Question.polymorphic.filter(engagement_survey=engagement_survey) ] - if request.method == 'POST': + if request.method == "POST": survey = Answered_Survey(survey=engagement_survey) survey.save() questions = [ @@ -799,33 +799,33 @@ def answer_empty_survey(request, esid): general_survey.num_responses = general_survey.num_responses + 1 general_survey.save() if request.user.is_anonymous: - message = 'Your responses have been recorded.' + message = "Your responses have been recorded." else: - message = 'Successfully answered, all answers valid.' + message = "Successfully answered, all answers valid." messages.add_message( request, messages.SUCCESS, message, - extra_tags='alert-success') + extra_tags="alert-success") return HttpResponseRedirect( - reverse('dashboard')) + reverse("dashboard")) else: messages.add_message( request, messages.ERROR, - 'Questionnaire has errors, please correct.', - extra_tags='alert-danger') + "Questionnaire has errors, please correct.", + extra_tags="alert-danger") add_breadcrumb( title="Answer Empty " + engagement_survey.name + " Questionnaire", top_level=False, request=request) if survey is None: survey = engagement_survey - return render(request, 'defectDojo-engagement-survey/answer_survey.html', { - 'survey': survey, - 'engagement': engagement, - 'questions': questions, + return render(request, "defectDojo-engagement-survey/answer_survey.html", { + "survey": survey, + "engagement": engagement, + "questions": questions, }) @@ -834,10 +834,10 @@ def engagement_empty_survey(request, esid): engagement = None form = AddEngagementForm() - if request.method == 'POST': + if request.method == "POST": form = AddEngagementForm(request.POST) if form.is_valid(): - product = form.cleaned_data.get('product') + product = form.cleaned_data.get("product") user_has_permission_or_403(request.user, product, Permissions.Engagement_Add) engagement = Engagement( product_id=product.id, @@ -849,17 +849,17 @@ def engagement_empty_survey(request, esid): messages.add_message( request, messages.SUCCESS, - 'Engagement created and questionnaire successfully linked.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('edit_engagement', args=(engagement.id, ))) + "Engagement created and questionnaire successfully linked.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("edit_engagement", args=(engagement.id, ))) else: messages.add_message( request, messages.ERROR, - 'Questionnaire could not be added.', - extra_tags='alert-danger') + "Questionnaire could not be added.", + extra_tags="alert-danger") add_breadcrumb( title="Link Questionnaire to new Engagement", top_level=False, request=request) - return render(request, 'defectDojo-engagement-survey/add_engagement.html', {'form': form}) + return render(request, "defectDojo-engagement-survey/add_engagement.html", {"form": form}) diff --git a/dojo/system_settings/urls.py b/dojo/system_settings/urls.py index da5788bc81..1452493095 100644 --- a/dojo/system_settings/urls.py +++ b/dojo/system_settings/urls.py @@ -4,8 +4,8 @@ urlpatterns = [ re_path( - r'^system_settings$', + r"^system_settings$", views.SystemSettingsView.as_view(), - name='system_settings', + name="system_settings", ), ] diff --git a/dojo/system_settings/views.py b/dojo/system_settings/views.py index 991fe46ca2..3690201a05 100644 --- a/dojo/system_settings/views.py +++ b/dojo/system_settings/views.py @@ -61,38 +61,38 @@ def validate_form( context: dict, ) -> Tuple[HttpRequest, bool]: if context["form"].is_valid(): - if (context["form"].cleaned_data['default_group'] is None and context["form"].cleaned_data['default_group_role'] is not None) or \ - (context["form"].cleaned_data['default_group'] is not None and context["form"].cleaned_data['default_group_role'] is None): + if (context["form"].cleaned_data["default_group"] is None and context["form"].cleaned_data["default_group_role"] is not None) or \ + (context["form"].cleaned_data["default_group"] is not None and context["form"].cleaned_data["default_group_role"] is None): messages.add_message( request, messages.WARNING, - 'Settings cannot be saved: Default group and Default group role must either both be set or both be empty.', - extra_tags='alert-warning') - elif context["form"].cleaned_data['minimum_password_length'] >= context["form"].cleaned_data['maximum_password_length']: + "Settings cannot be saved: Default group and Default group role must either both be set or both be empty.", + extra_tags="alert-warning") + elif context["form"].cleaned_data["minimum_password_length"] >= context["form"].cleaned_data["maximum_password_length"]: messages.add_message( request, messages.WARNING, - 'Settings cannot be saved: Minimum required password length must be less than maximum required password length.', - extra_tags='alert-warning') - elif context["form"].cleaned_data['enable_deduplication'] is True and context["form"].cleaned_data['false_positive_history'] is True: + "Settings cannot be saved: Minimum required password length must be less than maximum required password length.", + extra_tags="alert-warning") + elif context["form"].cleaned_data["enable_deduplication"] is True and context["form"].cleaned_data["false_positive_history"] is True: messages.add_message( request, messages.WARNING, - 'Settings cannot be saved: Deduplicate findings and False positive history can not be set at the same time.', - extra_tags='alert-warning') - elif context["form"].cleaned_data['retroactive_false_positive_history'] is True and context["form"].cleaned_data['false_positive_history'] is False: + "Settings cannot be saved: Deduplicate findings and False positive history can not be set at the same time.", + extra_tags="alert-warning") + elif context["form"].cleaned_data["retroactive_false_positive_history"] is True and context["form"].cleaned_data["false_positive_history"] is False: messages.add_message( request, messages.WARNING, - 'Settings cannot be saved: Retroactive false positive history can not be set without False positive history.', - extra_tags='alert-warning') + "Settings cannot be saved: Retroactive false positive history can not be set without False positive history.", + extra_tags="alert-warning") else: context["form"].save() messages.add_message( request, messages.SUCCESS, - 'Settings saved.', - extra_tags='alert-success') + "Settings saved.", + extra_tags="alert-success") return request, True return request, False @@ -101,7 +101,7 @@ def get_celery_status( context: dict, ) -> None: # Celery needs to be set with the setting: CELERY_RESULT_BACKEND = 'db+sqlite:///dojo.celeryresults.sqlite' - if hasattr(settings, 'CELERY_RESULT_BACKEND'): + if hasattr(settings, "CELERY_RESULT_BACKEND"): # Check the status of Celery by sending calling a celery task context["celery_bool"] = get_celery_worker_status() diff --git a/dojo/tags_signals.py b/dojo/tags_signals.py index 10869a5f28..f7e09fa9b0 100644 --- a/dojo/tags_signals.py +++ b/dojo/tags_signals.py @@ -63,7 +63,7 @@ def inherit_product_tags(instance) -> bool: if product and product.enable_product_tag_inheritance: return True - return get_system_setting('enable_product_tag_inheritance') + return get_system_setting("enable_product_tag_inheritance") def get_product(instance): diff --git a/dojo/tasks.py b/dojo/tasks.py index 8f4941bbe9..9e96c25860 100644 --- a/dojo/tasks.py +++ b/dojo/tasks.py @@ -20,31 +20,31 @@ # Logs the error to the alerts table, which appears in the notification toolbar def log_generic_alert(source, title, description): - create_notification(event='other', title=title, description=description, - icon='bullseye', source=source) + create_notification(event="other", title=title, description=description, + icon="bullseye", source=source) @app.task(bind=True) def add_alerts(self, runinterval): now = timezone.now() - upcoming_engagements = Engagement.objects.filter(target_start__gt=now + timedelta(days=3), target_start__lt=now + timedelta(days=3) + runinterval).order_by('target_start') + upcoming_engagements = Engagement.objects.filter(target_start__gt=now + timedelta(days=3), target_start__lt=now + timedelta(days=3) + runinterval).order_by("target_start") for engagement in upcoming_engagements: - create_notification(event='upcoming_engagement', - title=f'Upcoming engagement: {engagement.name}', + create_notification(event="upcoming_engagement", + title=f"Upcoming engagement: {engagement.name}", engagement=engagement, recipients=[engagement.lead], - url=reverse('view_engagement', args=(engagement.id,))) + url=reverse("view_engagement", args=(engagement.id,))) stale_engagements = Engagement.objects.filter( target_start__gt=now - runinterval, target_end__lt=now, - status='In Progress').order_by('-target_end') + status="In Progress").order_by("-target_end") for eng in stale_engagements: - create_notification(event='stale_engagement', - title=f'Stale Engagement: {eng.name}', + create_notification(event="stale_engagement", + title=f"Stale Engagement: {eng.name}", description='The engagement "{}" is stale. Target end was {}.'.format(eng.name, eng.target_end.strftime("%b. %d, %Y")), - url=reverse('view_engagement', args=(eng.id,)), + url=reverse("view_engagement", args=(eng.id,)), recipients=[eng.lead]) system_settings = System_Settings.objects.get() @@ -52,13 +52,13 @@ def add_alerts(self, runinterval): # Close Engagements older than user defined days close_days = system_settings.engagement_auto_close_days unclosed_engagements = Engagement.objects.filter(target_end__lte=now - timedelta(days=close_days), - status='In Progress').order_by('target_end') + status="In Progress").order_by("target_end") for eng in unclosed_engagements: - create_notification(event='auto_close_engagement', + create_notification(event="auto_close_engagement", title=eng.name, description='The engagement "{}" has auto-closed. Target end was {}.'.format(eng.name, eng.target_end.strftime("%b. %d, %Y")), - url=reverse('view_engagement', args=(eng.id,)), + url=reverse("view_engagement", args=(eng.id,)), recipients=[eng.lead]) unclosed_engagements.update(status="Completed", active=False, updated=timezone.now()) @@ -79,13 +79,13 @@ def cleanup_alerts(*args, **kwargs): if max_alerts_per_user > -1: total_deleted_count = 0 - logger.info('start deleting oldest alerts if a user has more than %s alerts', max_alerts_per_user) + logger.info("start deleting oldest alerts if a user has more than %s alerts", max_alerts_per_user) users = User.objects.all() for user in users: - alerts_to_delete = Alerts.objects.filter(user_id=user.id).order_by('-created')[max_alerts_per_user:].values_list("id", flat=True) + alerts_to_delete = Alerts.objects.filter(user_id=user.id).order_by("-created")[max_alerts_per_user:].values_list("id", flat=True) total_deleted_count += len(alerts_to_delete) Alerts.objects.filter(pk__in=list(alerts_to_delete)).delete() - logger.info('total number of alerts deleted: %s', total_deleted_count) + logger.info("total number of alerts deleted: %s", total_deleted_count) @app.task(bind=True) @@ -103,9 +103,9 @@ def flush_auditlog(*args, **kwargs): logger.debug("Initially received %d Logentries", event_count) if event_count > 0: subset._raw_delete(subset.db) - logger.debug('Total number of audit log entries deleted: %s', event_count) + logger.debug("Total number of audit log entries deleted: %s", event_count) else: - logger.debug('No outdated Logentries found') + logger.debug("No outdated Logentries found") @app.task(bind=True) @@ -119,7 +119,7 @@ def async_dupe_delete(*args, **kwargs): enabled = False if enabled and dupe_max is None: - logger.info('skipping deletion of excess duplicates: max_dupes not configured') + logger.info("skipping deletion of excess duplicates: max_dupes not configured") return if enabled: @@ -130,17 +130,17 @@ def async_dupe_delete(*args, **kwargs): results = Finding.objects \ .filter(duplicate=True) \ .order_by() \ - .values('duplicate_finding') \ - .annotate(num_dupes=Count('id')) \ + .values("duplicate_finding") \ + .annotate(num_dupes=Count("id")) \ .filter(num_dupes__gt=dupe_max)[:total_duplicate_delete_count_max_per_run] - originals_with_too_many_duplicates_ids = [result['duplicate_finding'] for result in results] + originals_with_too_many_duplicates_ids = [result["duplicate_finding"] for result in results] - originals_with_too_many_duplicates = Finding.objects.filter(id__in=originals_with_too_many_duplicates_ids).order_by('id') + originals_with_too_many_duplicates = Finding.objects.filter(id__in=originals_with_too_many_duplicates_ids).order_by("id") # prefetch to make it faster originals_with_too_many_duplicates = originals_with_too_many_duplicates.prefetch_related(Prefetch("original_finding", - queryset=Finding.objects.filter(duplicate=True).order_by('date'))) + queryset=Finding.objects.filter(duplicate=True).order_by("date"))) total_deleted_count = 0 for original in originals_with_too_many_duplicates: @@ -148,7 +148,7 @@ def async_dupe_delete(*args, **kwargs): dupe_count = len(duplicate_list) - dupe_max for finding in duplicate_list: - deduplicationLogger.debug(f'deleting finding {finding.id}:{finding.title} ({finding.hash_code}))') + deduplicationLogger.debug(f"deleting finding {finding.id}:{finding.title} ({finding.hash_code}))") finding.delete() total_deleted_count += 1 dupe_count -= 1 @@ -160,7 +160,7 @@ def async_dupe_delete(*args, **kwargs): if total_deleted_count >= total_duplicate_delete_count_max_per_run: break - logger.info('total number of excess duplicates deleted: %s', total_deleted_count) + logger.info("total number of excess duplicates deleted: %s", total_deleted_count) @app.task(ignore_result=False) diff --git a/dojo/templatetags/announcement_banner_tags.py b/dojo/templatetags/announcement_banner_tags.py index 616cd40196..8600bb3b79 100644 --- a/dojo/templatetags/announcement_banner_tags.py +++ b/dojo/templatetags/announcement_banner_tags.py @@ -9,8 +9,8 @@ @register.filter def bleach_announcement_message(message): allowed_attributes = bleach.ALLOWED_ATTRIBUTES - allowed_attributes['a'] = allowed_attributes['a'] + ['style', 'target'] + allowed_attributes["a"] = allowed_attributes["a"] + ["style", "target"] return mark_safe(bleach.clean( message, attributes=allowed_attributes, - css_sanitizer=CSSSanitizer(allowed_css_properties=['color', 'font-weight']))) + css_sanitizer=CSSSanitizer(allowed_css_properties=["color", "font-weight"]))) diff --git a/dojo/templatetags/dict_key.py b/dojo/templatetags/dict_key.py index 2724e8fa84..71bdefcfd4 100644 --- a/dojo/templatetags/dict_key.py +++ b/dojo/templatetags/dict_key.py @@ -1,6 +1,6 @@ from django.template.defaultfilters import register -@register.filter(name='dict_key') +@register.filter(name="dict_key") def dict_key(d, key): return d.get(key) diff --git a/dojo/templatetags/display_tags.py b/dojo/templatetags/display_tags.py index 42b82dd085..0efa8936a4 100644 --- a/dojo/templatetags/display_tags.py +++ b/dojo/templatetags/display_tags.py @@ -57,21 +57,21 @@ ] finding_related_action_classes_dict = { - 'reset_finding_duplicate_status': 'fa-solid fa-eraser', - 'set_finding_as_original': 'fa-brands fa-superpowers', - 'mark_finding_duplicate': 'fa-solid fa-copy', + "reset_finding_duplicate_status": "fa-solid fa-eraser", + "set_finding_as_original": "fa-brands fa-superpowers", + "mark_finding_duplicate": "fa-solid fa-copy", } finding_related_action_title_dict = { - 'reset_finding_duplicate_status': 'Reset duplicate status', - 'set_finding_as_original': 'Set as original', - 'mark_finding_duplicate': 'Mark as duplicate', + "reset_finding_duplicate_status": "Reset duplicate status", + "set_finding_as_original": "Set as original", + "mark_finding_duplicate": "Mark as duplicate", } supported_file_formats = [ - 'apng', 'avif', 'gif', 'jpg', - 'jpeg', 'jfif', 'pjpeg', 'pjp', - 'png', 'svg', 'webp', 'pdf', + "apng", "avif", "gif", "jpg", + "jpeg", "jfif", "pjpeg", "pjp", + "png", "svg", "webp", "pdf", ] @@ -79,12 +79,12 @@ def markdown_render(value): if value: markdown_text = markdown.markdown(value, - extensions=['markdown.extensions.nl2br', - 'markdown.extensions.sane_lists', - 'markdown.extensions.codehilite', - 'markdown.extensions.fenced_code', - 'markdown.extensions.toc', - 'markdown.extensions.tables']) + extensions=["markdown.extensions.nl2br", + "markdown.extensions.sane_lists", + "markdown.extensions.codehilite", + "markdown.extensions.fenced_code", + "markdown.extensions.toc", + "markdown.extensions.tables"]) return mark_safe(bleach.clean(markdown_text, tags=markdown_tags, attributes=markdown_attrs, css_sanitizer=markdown_styles)) @@ -95,22 +95,22 @@ def text_shortener(value, length): return return_value -@register.filter(name='url_shortener') +@register.filter(name="url_shortener") def url_shortener(value): return text_shortener(value, 80) -@register.filter(name='breadcrumb_shortener') +@register.filter(name="breadcrumb_shortener") def breadcrumb_shortener(value): return text_shortener(value, 15) -@register.filter(name='get_pwd') +@register.filter(name="get_pwd") def get_pwd(value): return prepare_for_view(value) -@register.filter(name='checklist_status') +@register.filter(name="checklist_status") def checklist_status(value): return Check_List.get_status(value) @@ -127,7 +127,7 @@ def linebreaksasciidocbr(value, autoescape=None): if autoescape: value = escape(value) - return mark_safe(value.replace('\n', ' +
')) + return mark_safe(value.replace("\n", " +
")) @register.simple_tag @@ -174,14 +174,14 @@ def content_type_str(obj): return ContentType.objects.get_for_model(obj) -@register.filter(name='remove_string') +@register.filter(name="remove_string") def remove_string(string, value): - return string.replace(value, '') + return string.replace(value, "") @register.filter def percentage(fraction, value): - return_value = '' + return_value = "" if int(value) > 0: try: return_value = "%.1f%%" % ((float(fraction) / float(value)) * 100) @@ -193,9 +193,9 @@ def percentage(fraction, value): @register.filter def format_epss(value): try: - return f'{value:.2%}' + return f"{value:.2%}" except (ValueError, TypeError): - return 'N.A.' + return "N.A." def asvs_calc_level(benchmark_score): @@ -215,7 +215,7 @@ def asvs_calc_level(benchmark_score): elif benchmark_score.desired_level == "Level 3": benchmarks = benchmarks.filter(control__level_3=True) - noted_benchmarks = benchmarks.filter(notes__isnull=False).order_by('id').distinct() + noted_benchmarks = benchmarks.filter(notes__isnull=False).order_by("id").distinct() noted_benchmarks_ids = [b.id for b in noted_benchmarks] total = len(benchmarks) @@ -235,13 +235,13 @@ def asvs_level(benchmark_score): level = percentage(total_viewed, total) return _("Checklist is %(level)s full (pass: %(total_viewed)s, total: %(total)s)") % { - 'level': level, - 'total_viewed': total_viewed, - 'total': total, + "level": level, + "total_viewed": total_viewed, + "total": total, } -@register.filter(name='version_num') +@register.filter(name="version_num") def version_num(value): version = "" if value: @@ -250,9 +250,9 @@ def version_num(value): return version -@register.filter(name='group_sla') +@register.filter(name="group_sla") def group_sla(group): - if not get_system_setting('enable_finding_sla'): + if not get_system_setting("enable_finding_sla"): return "" if not group.findings.all(): @@ -260,13 +260,13 @@ def group_sla(group): # if there is at least 1 finding, there will be date, severity etc to calculate sla # Get the first finding with the highests severity - finding = group.findings.all().order_by('severity').first() + finding = group.findings.all().order_by("severity").first() return finding_sla(finding) -@register.filter(name='finding_sla') +@register.filter(name="finding_sla") def finding_sla(finding): - if not get_system_setting('enable_finding_sla'): + if not get_system_setting("enable_finding_sla"): return "" sla_age, enforce_sla = finding.get_sla_period() @@ -278,28 +278,28 @@ def finding_sla(finding): find_sla = finding.sla_days_remaining() if finding.mitigated: status = "blue" - status_text = 'Remediated within SLA for ' + severity.lower() + ' findings (' + str(sla_age) + ' days since ' + finding.get_sla_start_date().strftime("%b %d, %Y") + ')' + status_text = "Remediated within SLA for " + severity.lower() + " findings (" + str(sla_age) + " days since " + finding.get_sla_start_date().strftime("%b %d, %Y") + ")" if find_sla and find_sla < 0: status = "orange" find_sla = abs(find_sla) - status_text = 'Out of SLA: Remediated ' + str( - find_sla) + ' days past SLA for ' + severity.lower() + ' findings (' + str(sla_age) + ' days since ' + finding.get_sla_start_date().strftime("%b %d, %Y") + ')' + status_text = "Out of SLA: Remediated " + str( + find_sla) + " days past SLA for " + severity.lower() + " findings (" + str(sla_age) + " days since " + finding.get_sla_start_date().strftime("%b %d, %Y") + ")" else: status = "green" - status_text = 'Remediation for ' + severity.lower() + ' findings in ' + str(sla_age) + ' days or less since ' + finding.get_sla_start_date().strftime("%b %d, %Y") + status_text = "Remediation for " + severity.lower() + " findings in " + str(sla_age) + " days or less since " + finding.get_sla_start_date().strftime("%b %d, %Y") if find_sla and find_sla < 0: status = "red" - status_text = 'Overdue: Remediation for ' + severity.lower() + ' findings in ' + str( - sla_age) + ' days or less since ' + finding.get_sla_start_date().strftime("%b %d, %Y") + status_text = "Overdue: Remediation for " + severity.lower() + " findings in " + str( + sla_age) + " days or less since " + finding.get_sla_start_date().strftime("%b %d, %Y") if find_sla is not None: title = '' \ - '' + str(find_sla) + '' + '' + str(find_sla) + "" return mark_safe(title) -@register.filter(name='product_grade') +@register.filter(name="product_grade") def product_grade(product): grade = "" system_settings = System_Settings.objects.get() @@ -311,15 +311,15 @@ def product_grade(product): calculate_grade(product) if prod_numeric_grade: if prod_numeric_grade >= system_settings.product_grade_a: - grade = 'A' + grade = "A" elif prod_numeric_grade < system_settings.product_grade_a and prod_numeric_grade >= system_settings.product_grade_b: - grade = 'B' + grade = "B" elif prod_numeric_grade < system_settings.product_grade_b and prod_numeric_grade >= system_settings.product_grade_c: - grade = 'C' + grade = "C" elif prod_numeric_grade < system_settings.product_grade_c and prod_numeric_grade >= system_settings.product_grade_d: - grade = 'D' + grade = "D" elif prod_numeric_grade <= system_settings.product_grade_f: - grade = 'F' + grade = "F" return grade @@ -334,7 +334,7 @@ def display_index(data, index): def action_log_entry(value, autoescape=None): import json history = json.loads(value) - text = '' + text = "" for k in history.keys(): text += k.capitalize() + ' changed from "' + \ history[k][0] + '" to "' + history[k][1] + '"\n' @@ -343,16 +343,16 @@ def action_log_entry(value, autoescape=None): @register.simple_tag(takes_context=True) def dojo_body_class(context): - request = context['request'] - return request.COOKIES.get('dojo-sidebar', 'min') + request = context["request"] + return request.COOKIES.get("dojo-sidebar", "min") -@register.filter(name='datediff_time') +@register.filter(name="datediff_time") def datediff_time(date1, date2): date_str = "" diff = dateutil.relativedelta.relativedelta(date2, date1) - attrs = ['years', 'months', 'days'] - human_date = ['%d %s' % (getattr(diff, attr), getattr(diff, attr) > 1 and attr or attr[:-1]) + attrs = ["years", "months", "days"] + human_date = ["%d %s" % (getattr(diff, attr), getattr(diff, attr) > 1 and attr or attr[:-1]) for attr in attrs if getattr(diff, attr)] for date_part in human_date: date_str = date_str + date_part + " " @@ -364,7 +364,7 @@ def datediff_time(date1, date2): return date_str -@register.filter(name='overdue') +@register.filter(name="overdue") def overdue(date1): date_str = "" if date1 < datetime.datetime.now().date(): @@ -373,7 +373,7 @@ def overdue(date1): return date_str -@register.filter(name='notspecified') +@register.filter(name="notspecified") def notspecified(text): if text: return text @@ -416,7 +416,7 @@ def render(self, context): num_cols = self.num_cols context[self.varname] = zip( *[chain(iterable, [None] * (num_cols - 1))] * num_cols) - return '' + return "" try: _, iterable, _, num_cols, _, _, varname = token.split_contents() @@ -429,11 +429,11 @@ def render(self, context): @register.simple_tag(takes_context=True) def pic_token(context, image, size): - user_id = context['user_id'] + user_id = context["user_id"] user = User.objects.get(id=user_id) token = FileAccessToken(user=user, file=image, size=size) token.save() - return reverse('download_finding_pic', args=[token.token]) + return reverse("download_finding_pic", args=[token.token]) @register.filter @@ -489,24 +489,24 @@ def stars(filled, total, tooltip): code += '" return code @register.filter def business_criticality_icon(value): if value == Product.VERY_HIGH_CRITICALITY: - return mark_safe(stars(5, 5, 'Very High')) + return mark_safe(stars(5, 5, "Very High")) if value == Product.HIGH_CRITICALITY: - return mark_safe(stars(4, 5, 'High')) + return mark_safe(stars(4, 5, "High")) if value == Product.MEDIUM_CRITICALITY: - return mark_safe(stars(3, 5, 'Medium')) + return mark_safe(stars(3, 5, "Medium")) if value == Product.LOW_CRITICALITY: - return mark_safe(stars(2, 5, 'Low')) + return mark_safe(stars(2, 5, "Low")) if value == Product.VERY_LOW_CRITICALITY: - return mark_safe(stars(1, 5, 'Very Low')) + return mark_safe(stars(1, 5, "Very Low")) if value == Product.NONE_CRITICALITY: - return mark_safe(stars(0, 5, 'None')) + return mark_safe(stars(0, 5, "None")) else: return "" # mark_safe(not_specified_icon('Business Criticality Not Specified')) @@ -522,15 +522,15 @@ def last_value(value): @register.filter def platform_icon(value): if value == Product.WEB_PLATFORM: - return mark_safe(icon('list-alt', 'Web')) + return mark_safe(icon("list-alt", "Web")) elif value == Product.DESKTOP_PLATFORM: - return mark_safe(icon('desktop', 'Desktop')) + return mark_safe(icon("desktop", "Desktop")) elif value == Product.MOBILE_PLATFORM: - return mark_safe(icon('mobile', 'Mobile')) + return mark_safe(icon("mobile", "Mobile")) elif value == Product.WEB_SERVICE_PLATFORM: - return mark_safe(icon('plug', 'Web Service')) + return mark_safe(icon("plug", "Web Service")) elif value == Product.IOT: - return mark_safe(icon('random', 'Internet of Things')) + return mark_safe(icon("random", "Internet of Things")) else: return "" # mark_safe(not_specified_icon('Platform Not Specified')) @@ -538,11 +538,11 @@ def platform_icon(value): @register.filter def lifecycle_icon(value): if value == Product.CONSTRUCTION: - return mark_safe(icon('compass', 'Explore')) + return mark_safe(icon("compass", "Explore")) if value == Product.PRODUCTION: - return mark_safe(icon('ship', 'Sustain')) + return mark_safe(icon("ship", "Sustain")) if value == Product.RETIREMENT: - return mark_safe(icon('moon-o', 'Retire')) + return mark_safe(icon("moon-o", "Retire")) else: return "" # mark_safe(not_specified_icon('Lifecycle Not Specified')) @@ -550,17 +550,17 @@ def lifecycle_icon(value): @register.filter def origin_icon(value): if value == Product.THIRD_PARTY_LIBRARY_ORIGIN: - return mark_safe(icon('book', 'Third-Party Library')) + return mark_safe(icon("book", "Third-Party Library")) if value == Product.PURCHASED_ORIGIN: - return mark_safe(icon('money', 'Purchased')) + return mark_safe(icon("money", "Purchased")) if value == Product.CONTRACTOR_ORIGIN: - return mark_safe(icon('suitcase', 'Contractor Developed')) + return mark_safe(icon("suitcase", "Contractor Developed")) if value == Product.INTERNALLY_DEVELOPED_ORIGIN: - return mark_safe(icon('home', 'Internally Developed')) + return mark_safe(icon("home", "Internally Developed")) if value == Product.OPEN_SOURCE_ORIGIN: - return mark_safe(icon('code', 'Open Source')) + return mark_safe(icon("code", "Open Source")) if value == Product.OUTSOURCED_ORIGIN: - return mark_safe(icon('globe', 'Outsourced')) + return mark_safe(icon("globe", "Outsourced")) else: return "" # mark_safe(not_specified_icon('Origin Not Specified')) @@ -568,102 +568,102 @@ def origin_icon(value): @register.filter def external_audience_icon(value): if value: - return mark_safe(icon('users', 'External Audience')) + return mark_safe(icon("users", "External Audience")) else: - return '' + return "" @register.filter def internet_accessible_icon(value): if value: - return mark_safe(icon('cloud', 'Internet Accessible')) + return mark_safe(icon("cloud", "Internet Accessible")) else: - return '' + return "" @register.filter def get_severity_count(id, table): if table == "test": counts = Finding.objects.filter(test=id). \ - prefetch_related('test__engagement__product').aggregate( + prefetch_related("test__engagement__product").aggregate( total=Sum( - Case(When(severity__in=('Critical', 'High', 'Medium', 'Low'), + Case(When(severity__in=("Critical", "High", "Medium", "Low"), then=Value(1)), output_field=IntegerField())), critical=Sum( - Case(When(severity='Critical', + Case(When(severity="Critical", then=Value(1)), output_field=IntegerField())), high=Sum( - Case(When(severity='High', + Case(When(severity="High", then=Value(1)), output_field=IntegerField())), medium=Sum( - Case(When(severity='Medium', + Case(When(severity="Medium", then=Value(1)), output_field=IntegerField())), low=Sum( - Case(When(severity='Low', + Case(When(severity="Low", then=Value(1)), output_field=IntegerField())), info=Sum( - Case(When(severity='Info', + Case(When(severity="Info", then=Value(1)), output_field=IntegerField())), ) elif table == "engagement": counts = Finding.objects.filter(test__engagement=id, active=True, duplicate=False). \ - prefetch_related('test__engagement__product').aggregate( + prefetch_related("test__engagement__product").aggregate( total=Sum( - Case(When(severity__in=('Critical', 'High', 'Medium', 'Low'), + Case(When(severity__in=("Critical", "High", "Medium", "Low"), then=Value(1)), output_field=IntegerField())), critical=Sum( - Case(When(severity='Critical', + Case(When(severity="Critical", then=Value(1)), output_field=IntegerField())), high=Sum( - Case(When(severity='High', + Case(When(severity="High", then=Value(1)), output_field=IntegerField())), medium=Sum( - Case(When(severity='Medium', + Case(When(severity="Medium", then=Value(1)), output_field=IntegerField())), low=Sum( - Case(When(severity='Low', + Case(When(severity="Low", then=Value(1)), output_field=IntegerField())), info=Sum( - Case(When(severity='Info', + Case(When(severity="Info", then=Value(1)), output_field=IntegerField())), ) elif table == "product": counts = Finding.objects.filter(test__engagement__product=id). \ - prefetch_related('test__engagement__product').aggregate( + prefetch_related("test__engagement__product").aggregate( total=Sum( - Case(When(severity__in=('Critical', 'High', 'Medium', 'Low'), + Case(When(severity__in=("Critical", "High", "Medium", "Low"), then=Value(1)), output_field=IntegerField())), critical=Sum( - Case(When(severity='Critical', + Case(When(severity="Critical", then=Value(1)), output_field=IntegerField())), high=Sum( - Case(When(severity='High', + Case(When(severity="High", then=Value(1)), output_field=IntegerField())), medium=Sum( - Case(When(severity='Medium', + Case(When(severity="Medium", then=Value(1)), output_field=IntegerField())), low=Sum( - Case(When(severity='Low', + Case(When(severity="Low", then=Value(1)), output_field=IntegerField())), info=Sum( - Case(When(severity='Info', + Case(When(severity="Info", then=Value(1)), output_field=IntegerField())), ) @@ -731,30 +731,30 @@ def finding_display_status(finding): # add urls for some statuses # outputs html, so make sure to escape user provided fields display_status = finding.status() - if 'Risk Accepted' in display_status: + if "Risk Accepted" in display_status: ra = finding.risk_acceptance if ra: - url = reverse('view_risk_acceptance', args=(finding.test.engagement.id, ra.id)) + url = reverse("view_risk_acceptance", args=(finding.test.engagement.id, ra.id)) info = ra.name_and_expiration_info link = 'Risk Accepted' - display_status = display_status.replace('Risk Accepted', link) + display_status = display_status.replace("Risk Accepted", link) if finding.under_review: - url = reverse('defect_finding_review', args=(finding.id, )) + url = reverse("defect_finding_review", args=(finding.id, )) link = 'Under Review' - display_status = display_status.replace('Under Review', link) + display_status = display_status.replace("Under Review", link) if finding.duplicate: - url = '#' - name = 'unknown' + url = "#" + name = "unknown" if finding.duplicate_finding: - url = reverse('view_finding', args=(finding.duplicate_finding.id,)) - name = finding.duplicate_finding.title + ', ' + \ - finding.duplicate_finding.created.strftime('%b %d, %Y, %H:%M:%S') + url = reverse("view_finding", args=(finding.duplicate_finding.id,)) + name = finding.duplicate_finding.title + ", " + \ + finding.duplicate_finding.created.strftime("%b %d, %Y, %H:%M:%S") link = 'Duplicate' - display_status = display_status.replace('Duplicate', link) + display_status = display_status.replace("Duplicate", link) return display_status @@ -762,8 +762,8 @@ def finding_display_status(finding): @register.filter def cwe_url(cwe): if not cwe: - return '' - return 'https://cwe.mitre.org/data/definitions/' + str(cwe) + '.html' + return "" + return "https://cwe.mitre.org/data/definitions/" + str(cwe) + ".html" @register.filter @@ -785,7 +785,7 @@ def vulnerability_url(vulnerability_id): for key in settings.VULNERABILITY_URLS: if vulnerability_id.upper().startswith(key): return settings.VULNERABILITY_URLS[key] + str(vulnerability_id) - return '' + return "" @register.filter @@ -866,15 +866,15 @@ def get_thumbnail(file): @register.filter def finding_extended_title(finding): if not finding: - return '' + return "" result = finding.title vulnerability_ids = finding.vulnerability_ids if vulnerability_ids: - result += ' (' + vulnerability_ids[0] + ')' + result += " (" + vulnerability_ids[0] + ")" if finding.cwe: - result += ' (CWE-' + str(finding.cwe) + ')' + result += " (CWE-" + str(finding.cwe) + ")" return result @@ -886,17 +886,17 @@ def finding_duplicate_cluster_size(finding): @register.filter def finding_related_action_classes(related_action): - return finding_related_action_classes_dict.get(related_action, '') + return finding_related_action_classes_dict.get(related_action, "") @register.filter def finding_related_action_title(related_action): - return finding_related_action_title_dict.get(related_action, '') + return finding_related_action_title_dict.get(related_action, "") @register.filter def product_findings(product, findings): - return findings.filter(test__engagement__product=product).order_by('numerical_severity') + return findings.filter(test__engagement__product=product).order_by("numerical_severity") @register.filter @@ -915,7 +915,7 @@ def esc(x): jira_project = jira_helper.get_jira_project(product_or_engagement) if not jira_project: - return '' + return "" html = """ """ - icon = 'fa-info-circle' - color = '' + icon = "fa-info-circle" + color = "" return mark_safe(html % (icon, color, icon, esc(test_import.id), - esc(test_import.import_settings.get('active', None)), - esc(test_import.import_settings.get('verified', None)), - esc(test_import.import_settings.get('minimum_severity', None)), - esc(test_import.import_settings.get('close_old_findings', None)), - esc(test_import.import_settings.get('push_to_jira', None)), - esc(test_import.import_settings.get('tags', None)), - esc(test_import.import_settings.get('endpoints', test_import.import_settings.get('endpoint', None))))) + esc(test_import.import_settings.get("active", None)), + esc(test_import.import_settings.get("verified", None)), + esc(test_import.import_settings.get("minimum_severity", None)), + esc(test_import.import_settings.get("close_old_findings", None)), + esc(test_import.import_settings.get("push_to_jira", None)), + esc(test_import.import_settings.get("tags", None)), + esc(test_import.import_settings.get("endpoints", test_import.import_settings.get("endpoint", None))))) @register.filter(needs_autoescape=True) def import_history(finding, autoescape=True): if not finding or not settings.TRACK_IMPORT_HISTORY: - return '' + return "" if autoescape: conditional_escape @@ -1016,7 +1016,7 @@ def import_history(finding, autoescape=True): if not status_changes or len(status_changes) < 2: # assumption is that the first status_change is the initial import - return '' + return "" html = """ @@ -1026,8 +1026,8 @@ def import_history(finding, autoescape=True): """ - list_of_status_changes = '' + list_of_status_changes = "" for status_change in status_changes: - list_of_status_changes += '' + status_change.created.strftime('%b %d, %Y, %H:%M:%S') + ': ' + status_change.get_action_display() + '
' + list_of_status_changes += "" + status_change.created.strftime("%b %d, %Y, %H:%M:%S") + ": " + status_change.get_action_display() + "
" return mark_safe(html % (list_of_status_changes)) diff --git a/dojo/templatetags/event_tags.py b/dojo/templatetags/event_tags.py index 1c69ab8d8f..2b40868a04 100644 --- a/dojo/templatetags/event_tags.py +++ b/dojo/templatetags/event_tags.py @@ -7,9 +7,9 @@ def _process_field_attributes(field, attr, process): # split attribute name and value from 'attr:value' string - params = attr.split(':', 1) + params = attr.split(":", 1) attribute = params[0] - value = params[1] if len(params) == 2 else '' + value = params[1] if len(params) == 2 else "" # decorate field.as_widget method with updated attributes old_as_widget = field.as_widget @@ -31,9 +31,9 @@ def as_widget(self, widget=None, attrs=None, only_initial=False): def addcss(field, attr): def process(widget, attrs, attribute, value): if attrs.get(attribute): - attrs[attribute] += ' ' + value + attrs[attribute] += " " + value elif widget.attrs.get(attribute): - attrs[attribute] = widget.attrs[attribute] + ' ' + value + attrs[attribute] = widget.attrs[attribute] + " " + value else: attrs[attribute] = value @@ -77,7 +77,7 @@ def sum_dict(d): @register.filter def nice_title(title): - pat = re.compile(r'Finding [0-9][0-9][0-9]:*') + pat = re.compile(r"Finding [0-9][0-9][0-9]:*") s = pat.split(title, 2) try: ret = s[1] diff --git a/dojo/templatetags/get_attribute.py b/dojo/templatetags/get_attribute.py index 625a722c4f..49f98941df 100644 --- a/dojo/templatetags/get_attribute.py +++ b/dojo/templatetags/get_attribute.py @@ -9,4 +9,4 @@ def get_attribute(obj, name): if hasattr(obj, name): return getattr(obj, name) else: - return '' + return "" diff --git a/dojo/templatetags/get_banner.py b/dojo/templatetags/get_banner.py index da1a0f2e01..26ab7d3bbe 100644 --- a/dojo/templatetags/get_banner.py +++ b/dojo/templatetags/get_banner.py @@ -14,14 +14,14 @@ def get_banner_conf(attribute): banner_config = BannerConf.objects.get() value = getattr(banner_config, attribute, None) if value: - if attribute == 'banner_message': + if attribute == "banner_message": # only admin can edit login banner, so we allow html, but still bleach it allowed_attributes = bleach.ALLOWED_ATTRIBUTES - allowed_attributes['a'] = allowed_attributes['a'] + ['style', 'target'] + allowed_attributes["a"] = allowed_attributes["a"] + ["style", "target"] return mark_safe(bleach.clean( value, attributes=allowed_attributes, - css_sanitizer=CSSSanitizer(allowed_css_properties=['color', 'font-weight']))) + css_sanitizer=CSSSanitizer(allowed_css_properties=["color", "font-weight"]))) else: return value else: diff --git a/dojo/templatetags/get_endpoint_status.py b/dojo/templatetags/get_endpoint_status.py index c3dbfd9cea..2d9f09d8d1 100644 --- a/dojo/templatetags/get_endpoint_status.py +++ b/dojo/templatetags/get_endpoint_status.py @@ -6,12 +6,12 @@ register = template.Library() -@register.filter(name='has_endpoints') +@register.filter(name="has_endpoints") def has_endpoints(finding): return True if finding.endpoints.all() else False -@register.filter(name='get_vulnerable_endpoints') +@register.filter(name="get_vulnerable_endpoints") def get_vulnerable_endpoints(finding): return finding.endpoints.filter( status_endpoint__mitigated=False, @@ -20,7 +20,7 @@ def get_vulnerable_endpoints(finding): status_endpoint__risk_accepted=False) -@register.filter(name='get_mitigated_endpoints') +@register.filter(name="get_mitigated_endpoints") def get_mitigated_endpoints(finding): return finding.endpoints.filter( Q(status_endpoint__mitigated=True) @@ -42,7 +42,7 @@ def endpoint_display_status(endpoint, finding): if status.mitigated: statuses.append("Mitigated") if statuses: - return ', '.join(statuses) + return ", ".join(statuses) else: return "Active" diff --git a/dojo/templatetags/get_note_status.py b/dojo/templatetags/get_note_status.py index 17aff8a8f0..ab5b648585 100644 --- a/dojo/templatetags/get_note_status.py +++ b/dojo/templatetags/get_note_status.py @@ -3,7 +3,7 @@ register = template.Library() -@register.filter(name='get_public_notes') +@register.filter(name="get_public_notes") def get_public_notes(notes): if notes: return notes.filter(private=False) diff --git a/dojo/templatetags/get_notetype_availability.py b/dojo/templatetags/get_notetype_availability.py index e3529ab97d..59673b3a4e 100644 --- a/dojo/templatetags/get_notetype_availability.py +++ b/dojo/templatetags/get_notetype_availability.py @@ -3,7 +3,7 @@ register = template.Library() -@register.filter(name='get_notetype_notes_count') +@register.filter(name="get_notetype_notes_count") def get_notetype_notes_count(notes): notes_without_type = notes.filter(note_type=None).count() notes_count = notes.count() diff --git a/dojo/templatetags/navigation_tags.py b/dojo/templatetags/navigation_tags.py index 3a1363af33..d5848af1f0 100644 --- a/dojo/templatetags/navigation_tags.py +++ b/dojo/templatetags/navigation_tags.py @@ -11,13 +11,13 @@ @register.simple_tag(takes_context=True) def query_string_as_hidden(context): - request = context['request'] - query_string = request.META['QUERY_STRING'] - inputs = '' + request = context["request"] + query_string = request.META["QUERY_STRING"] + inputs = "" if query_string: - parameters = query_string.split('&') + parameters = query_string.split("&") for param in parameters: - parts = param.split('=') + parts = param.split("=") if len(parts) == 2: inputs += f"" else: @@ -26,39 +26,39 @@ def query_string_as_hidden(context): @register.simple_tag -def url_replace(request, field='page', value=1): - if field is None or field == '': - field = 'page' +def url_replace(request, field="page", value=1): + if field is None or field == "": + field = "page" dict_ = request.GET.copy() dict_[field] = value return dict_.urlencode() @register.simple_tag -def dojo_sort(request, display='Name', value='title', default=None): - field = 'o' +def dojo_sort(request, display="Name", value="title", default=None): + field = "o" icon = '\d+)$', + r"^test/(?P\d+)$", views.ViewTest.as_view(), - name='view_test', + name="view_test", ), - re_path(r'^test/(?P\d+)/ics$', views.test_ics, - name='test_ics'), - re_path(r'^test/(?P\d+)/edit$', views.edit_test, - name='edit_test'), - re_path(r'^test/(?P\d+)/delete$', views.delete_test, - name='delete_test'), - re_path(r'^test/(?P\d+)/copy$', views.copy_test, - name='copy_test'), + re_path(r"^test/(?P\d+)/ics$", views.test_ics, + name="test_ics"), + re_path(r"^test/(?P\d+)/edit$", views.edit_test, + name="edit_test"), + re_path(r"^test/(?P\d+)/delete$", views.delete_test, + name="delete_test"), + re_path(r"^test/(?P\d+)/copy$", views.copy_test, + name="copy_test"), re_path( - r'^test/(?P\d+)/add_findings$', + r"^test/(?P\d+)/add_findings$", views.AddFindingView.as_view(), - name='add_findings'), - re_path(r'^test/(?P\d+)/add_findings/(?P\d+)$', - views.add_temp_finding, name='add_temp_finding'), - re_path(r'^test/(?P\d+)/search$', views.search, name='search'), + name="add_findings"), + re_path(r"^test/(?P\d+)/add_findings/(?P\d+)$", + views.add_temp_finding, name="add_temp_finding"), + re_path(r"^test/(?P\d+)/search$", views.search, name="search"), re_path( - r'^test/(?P\d+)/re_import_scan_results', + r"^test/(?P\d+)/re_import_scan_results", views.ReImportScanResultsView.as_view(), - name='re_import_scan_results'), + name="re_import_scan_results"), ] diff --git a/dojo/test/views.py b/dojo/test/views.py index 202247ad33..1fe004fccd 100644 --- a/dojo/test/views.py +++ b/dojo/test/views.py @@ -80,36 +80,36 @@ ) logger = logging.getLogger(__name__) -parse_logger = logging.getLogger('dojo') +parse_logger = logging.getLogger("dojo") deduplicationLogger = logging.getLogger("dojo.specific-loggers.deduplication") def prefetch_for_findings(findings): prefetched_findings = findings if isinstance(findings, QuerySet): # old code can arrive here with prods being a list because the query was already executed - prefetched_findings = prefetched_findings.select_related('reporter') - prefetched_findings = prefetched_findings.prefetch_related('jira_issue__jira_project__jira_instance') - prefetched_findings = prefetched_findings.prefetch_related('test__test_type') - prefetched_findings = prefetched_findings.prefetch_related('test__engagement__jira_project__jira_instance') - prefetched_findings = prefetched_findings.prefetch_related('test__engagement__product__jira_project_set__jira_instance') - prefetched_findings = prefetched_findings.prefetch_related('found_by') - prefetched_findings = prefetched_findings.prefetch_related('risk_acceptance_set') + prefetched_findings = prefetched_findings.select_related("reporter") + prefetched_findings = prefetched_findings.prefetch_related("jira_issue__jira_project__jira_instance") + prefetched_findings = prefetched_findings.prefetch_related("test__test_type") + prefetched_findings = prefetched_findings.prefetch_related("test__engagement__jira_project__jira_instance") + prefetched_findings = prefetched_findings.prefetch_related("test__engagement__product__jira_project_set__jira_instance") + prefetched_findings = prefetched_findings.prefetch_related("found_by") + prefetched_findings = prefetched_findings.prefetch_related("risk_acceptance_set") # we could try to prefetch only the latest note with SubQuery and OuterRef, but I'm getting that MySql doesn't support limits in subqueries. - prefetched_findings = prefetched_findings.prefetch_related('notes') - prefetched_findings = prefetched_findings.prefetch_related('tags') + prefetched_findings = prefetched_findings.prefetch_related("notes") + prefetched_findings = prefetched_findings.prefetch_related("tags") # filter out noop reimport actions from finding status history - prefetched_findings = prefetched_findings.prefetch_related(Prefetch('test_import_finding_action_set', + prefetched_findings = prefetched_findings.prefetch_related(Prefetch("test_import_finding_action_set", queryset=Test_Import_Finding_Action.objects.exclude(action=IMPORT_UNTOUCHED_FINDING))) - prefetched_findings = prefetched_findings.prefetch_related('endpoints') - prefetched_findings = prefetched_findings.prefetch_related('status_finding') - prefetched_findings = prefetched_findings.annotate(active_endpoint_count=Count('status_finding__id', filter=Q(status_finding__mitigated=False))) - prefetched_findings = prefetched_findings.annotate(mitigated_endpoint_count=Count('status_finding__id', filter=Q(status_finding__mitigated=True))) - prefetched_findings = prefetched_findings.prefetch_related('finding_group_set__jira_issue') - prefetched_findings = prefetched_findings.prefetch_related('duplicate_finding') - prefetched_findings = prefetched_findings.prefetch_related('vulnerability_id_set') + prefetched_findings = prefetched_findings.prefetch_related("endpoints") + prefetched_findings = prefetched_findings.prefetch_related("status_finding") + prefetched_findings = prefetched_findings.annotate(active_endpoint_count=Count("status_finding__id", filter=Q(status_finding__mitigated=False))) + prefetched_findings = prefetched_findings.annotate(mitigated_endpoint_count=Count("status_finding__id", filter=Q(status_finding__mitigated=True))) + prefetched_findings = prefetched_findings.prefetch_related("finding_group_set__jira_issue") + prefetched_findings = prefetched_findings.prefetch_related("duplicate_finding") + prefetched_findings = prefetched_findings.prefetch_related("vulnerability_id_set") else: - logger.debug('unable to prefetch because query was already executed') + logger.debug("unable to prefetch because query was already executed") return prefetched_findings @@ -117,7 +117,7 @@ def prefetch_for_findings(findings): class ViewTest(View): def get_test(self, test_id: int): test_prefetched = get_authorized_tests(Permissions.Test_View) - test_prefetched = test_prefetched.annotate(total_reimport_count=Count('test_import__id', distinct=True)) + test_prefetched = test_prefetched.annotate(total_reimport_count=Count("test_import__id", distinct=True)) return get_object_or_404(test_prefetched, pk=test_id) def get_test_import_data(self, request: HttpRequest, test: Test): @@ -145,7 +145,7 @@ def get_findings(self, request: HttpRequest, test: Test): filter_string_matching = get_system_setting("filter_string_matching", False) finding_filter_class = FindingFilterWithoutObjectLookups if filter_string_matching else FindingFilter findings = finding_filter_class(request.GET, queryset=findings) - paged_findings = get_page_items_and_count(request, prefetch_for_findings(findings.qs), 25, prefix='findings') + paged_findings = get_page_items_and_count(request, prefetch_for_findings(findings.qs), 25, prefix="findings") return { "findings": paged_findings, @@ -192,8 +192,8 @@ def get_initial_context(self, request: HttpRequest, test: Test): "test": test, "prod": test.engagement.product, "product_tab": product_tab, - "title_words": get_words_for_field(Finding, 'title'), - "component_words": get_words_for_field(Finding, 'component_name'), + "title_words": get_words_for_field(Finding, "title"), + "component_words": get_words_for_field(Finding, "component_name"), "notes": notes, "note_type_activation": note_type_activation, "available_note_types": available_note_types, @@ -205,8 +205,8 @@ def get_initial_context(self, request: HttpRequest, test: Test): "cred_test": Cred_Mapping.objects.filter(test=test).select_related("cred_id").order_by("cred_id"), "jira_project": jira_helper.get_jira_project(test), "bulk_edit_form": FindingBulkUpdateForm(request.GET), - 'finding_groups': test.finding_group_set.all().prefetch_related("findings", "jira_issue", "creator", "findings__vulnerability_id_set"), - 'finding_group_by_options': Finding_Group.GROUP_BY_OPTIONS, + "finding_groups": test.finding_group_set.all().prefetch_related("findings", "jira_issue", "creator", "findings__vulnerability_id_set"), + "finding_group_by_options": Finding_Group.GROUP_BY_OPTIONS, } # Set the form using the context, and then update the context @@ -234,8 +234,8 @@ def process_form(self, request: HttpRequest, test: Test, context: dict): messages.add_message( request, messages.SUCCESS, - _('Note added successfully.'), - extra_tags='alert-success') + _("Note added successfully."), + extra_tags="alert-success") return request, True return request, False @@ -285,58 +285,58 @@ def post(self, request: HttpRequest, test_id: int): # return prefetch_for_test_imports -@user_is_authorized(Test, Permissions.Test_Edit, 'tid') +@user_is_authorized(Test, Permissions.Test_Edit, "tid") def edit_test(request, tid): test = get_object_or_404(Test, pk=tid) form = TestForm(instance=test) - if request.method == 'POST': + if request.method == "POST": form = TestForm(request.POST, instance=test) if form.is_valid(): form.save() messages.add_message(request, messages.SUCCESS, - _('Test saved.'), - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_engagement', args=(test.engagement.id,))) + _("Test saved."), + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_engagement", args=(test.engagement.id,))) - form.initial['target_start'] = test.target_start.date() - form.initial['target_end'] = test.target_end.date() - form.initial['description'] = test.description + form.initial["target_start"] = test.target_start.date() + form.initial["target_end"] = test.target_end.date() + form.initial["description"] = test.description product_tab = Product_Tab(test.engagement.product, title=_("Edit Test"), tab="engagements") product_tab.setEngagement(test.engagement) - return render(request, 'dojo/edit_test.html', - {'test': test, - 'product_tab': product_tab, - 'form': form, + return render(request, "dojo/edit_test.html", + {"test": test, + "product_tab": product_tab, + "form": form, }) -@user_is_authorized(Test, Permissions.Test_Delete, 'tid') +@user_is_authorized(Test, Permissions.Test_Delete, "tid") def delete_test(request, tid): test = get_object_or_404(Test, pk=tid) eng = test.engagement form = DeleteTestForm(instance=test) - if request.method == 'POST': - if 'id' in request.POST and str(test.id) == request.POST['id']: + if request.method == "POST": + if "id" in request.POST and str(test.id) == request.POST["id"]: form = DeleteTestForm(request.POST, instance=test) if form.is_valid(): if get_setting("ASYNC_OBJECT_DELETE"): async_del = async_delete() async_del.delete(test) - message = _('Test and relationships will be removed in the background.') + message = _("Test and relationships will be removed in the background.") else: - message = _('Test and relationships removed.') + message = _("Test and relationships removed.") test.delete() messages.add_message(request, messages.SUCCESS, message, - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_engagement', args=(eng.id,))) + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_engagement", args=(eng.id,))) - rels = ['Previewing the relationships has been disabled.', ''] - display_preview = get_setting('DELETE_PREVIEW') + rels = ["Previewing the relationships has been disabled.", ""] + display_preview = get_setting("DELETE_PREVIEW") if display_preview: collector = NestedObjects(using=DEFAULT_DB_ALIAS) collector.collect([test]) @@ -344,56 +344,56 @@ def delete_test(request, tid): product_tab = Product_Tab(test.engagement.product, title=_("Delete Test"), tab="engagements") product_tab.setEngagement(test.engagement) - return render(request, 'dojo/delete_test.html', - {'test': test, - 'product_tab': product_tab, - 'form': form, - 'rels': rels, - 'deletable_objects': rels, + return render(request, "dojo/delete_test.html", + {"test": test, + "product_tab": product_tab, + "form": form, + "rels": rels, + "deletable_objects": rels, }) -@user_is_authorized(Test, Permissions.Test_Edit, 'tid') +@user_is_authorized(Test, Permissions.Test_Edit, "tid") def copy_test(request, tid): test = get_object_or_404(Test, id=tid) product = test.engagement.product engagement_list = get_authorized_engagements(Permissions.Engagement_Edit).filter(product=product) form = CopyTestForm(engagements=engagement_list) - if request.method == 'POST': + if request.method == "POST": form = CopyTestForm(request.POST, engagements=engagement_list) if form.is_valid(): - engagement = form.cleaned_data.get('engagement') + engagement = form.cleaned_data.get("engagement") product = test.engagement.product test_copy = test.copy(engagement=engagement) calculate_grade(product) messages.add_message( request, messages.SUCCESS, - 'Test Copied successfully.', - extra_tags='alert-success') - create_notification(event='test_copied', # TODO - if 'copy' functionality will be supported by API as well, 'create_notification' needs to be migrated to place where it will be able to cover actions from both interfaces - title=f'Copying of {test.title}', + "Test Copied successfully.", + extra_tags="alert-success") + create_notification(event="test_copied", # TODO - if 'copy' functionality will be supported by API as well, 'create_notification' needs to be migrated to place where it will be able to cover actions from both interfaces + title=f"Copying of {test.title}", description=f'The test "{test.title}" was copied by {request.user} to {engagement.name}', product=product, - url=request.build_absolute_uri(reverse('view_test', args=(test_copy.id,))), + url=request.build_absolute_uri(reverse("view_test", args=(test_copy.id,))), recipients=[test.engagement.lead], icon="exclamation-triangle") - return redirect_to_return_url_or_else(request, reverse('view_engagement', args=(engagement.id, ))) + return redirect_to_return_url_or_else(request, reverse("view_engagement", args=(engagement.id, ))) else: messages.add_message( request, messages.ERROR, - 'Unable to copy test, please try again.', - extra_tags='alert-danger') + "Unable to copy test, please try again.", + extra_tags="alert-danger") product_tab = Product_Tab(product, title="Copy Test", tab="engagements") - return render(request, 'dojo/copy_object.html', { - 'source': test, - 'source_label': 'Test', - 'destination_label': 'Engagement', - 'product_tab': product_tab, - 'form': form, + return render(request, "dojo/copy_object.html", { + "source": test, + "source_label": "Test", + "destination_label": "Engagement", + "product_tab": product_tab, + "form": form, }) @@ -401,31 +401,31 @@ def copy_test(request, tid): @vary_on_cookie def test_calendar(request): - if not get_system_setting('enable_calendar'): + if not get_system_setting("enable_calendar"): raise Resolver404 - if 'lead' not in request.GET or '0' in request.GET.getlist('lead'): + if "lead" not in request.GET or "0" in request.GET.getlist("lead"): tests = get_authorized_tests(Permissions.Test_View) else: filters = [] - leads = request.GET.getlist('lead', '') - if '-1' in request.GET.getlist('lead'): - leads.remove('-1') + leads = request.GET.getlist("lead", "") + if "-1" in request.GET.getlist("lead"): + leads.remove("-1") filters.append(Q(lead__isnull=True)) filters.append(Q(lead__in=leads)) tests = get_authorized_tests(Permissions.Test_View).filter(reduce(operator.or_, filters)) - tests = tests.prefetch_related('test_type', 'lead', 'engagement__product') + tests = tests.prefetch_related("test_type", "lead", "engagement__product") add_breadcrumb(title=_("Test Calendar"), top_level=True, request=request) - return render(request, 'dojo/calendar.html', { - 'caltype': 'tests', - 'leads': request.GET.getlist('lead', ''), - 'tests': tests, - 'users': get_authorized_users(Permissions.Test_View)}) + return render(request, "dojo/calendar.html", { + "caltype": "tests", + "leads": request.GET.getlist("lead", ""), + "tests": tests, + "users": get_authorized_users(Permissions.Test_View)}) -@user_is_authorized(Test, Permissions.Test_View, 'tid') +@user_is_authorized(Test, Permissions.Test_View, "tid") def test_ics(request, tid): test = get_object_or_404(Test, id=tid) start_date = datetime.combine(test.target_start, datetime.min.time()) @@ -443,8 +443,8 @@ def test_ics(request, tid): ) output = cal.serialize() response = HttpResponse(content=output) - response['Content-Type'] = 'text/calendar' - response['Content-Disposition'] = f'attachment; filename={test.test_type.name}.ics' + response["Content-Type"] = "text/calendar" + response["Content-Disposition"] = f"attachment; filename={test.test_type.name}.ics" return response @@ -461,7 +461,7 @@ def get_initial_context(self, request: HttpRequest, test: Test): "form": finding_form, "product_tab": product_tab, "temp": False, - 'test': test, + "test": test, "tid": test.id, "pid": test.engagement.product.id, "form_error": False, @@ -473,7 +473,7 @@ def get_finding_form(self, request: HttpRequest, test: Test): args = [request.POST] if request.method == "POST" else [] # Set the initial form args kwargs = { - "initial": {'date': timezone.now().date(), 'verified': True, 'dynamic_finding': False}, + "initial": {"date": timezone.now().date(), "verified": True, "dynamic_finding": False}, "req_resp": None, "product": test.engagement.product, } @@ -500,27 +500,27 @@ def get_jira_form(self, request: HttpRequest, test: Test, finding_form: AddFindi return None def validate_status_change(self, request: HttpRequest, context: dict): - if ((context["form"]['active'].value() is False - or context["form"]['false_p'].value()) - and context["form"]['duplicate'].value() is False): + if ((context["form"]["active"].value() is False + or context["form"]["false_p"].value()) + and context["form"]["duplicate"].value() is False): closing_disabled = Note_Type.objects.filter(is_mandatory=True, is_active=True).count() if closing_disabled != 0: error_inactive = ValidationError( - _('Can not set a finding as inactive without adding all mandatory notes'), - code='inactive_without_mandatory_notes') + _("Can not set a finding as inactive without adding all mandatory notes"), + code="inactive_without_mandatory_notes") error_false_p = ValidationError( - _('Can not set a finding as false positive without adding all mandatory notes'), - code='false_p_without_mandatory_notes') - if context["form"]['active'].value() is False: - context["form"].add_error('active', error_inactive) - if context["form"]['false_p'].value(): - context["form"].add_error('false_p', error_false_p) + _("Can not set a finding as false positive without adding all mandatory notes"), + code="false_p_without_mandatory_notes") + if context["form"]["active"].value() is False: + context["form"].add_error("active", error_inactive) + if context["form"]["false_p"].value(): + context["form"].add_error("false_p", error_false_p) messages.add_message( request, messages.ERROR, - _('Can not set a finding as inactive or false positive without adding all mandatory notes'), - extra_tags='alert-danger') + _("Can not set a finding as inactive or false positive without adding all mandatory notes"), + extra_tags="alert-danger") return request @@ -531,7 +531,7 @@ def process_finding_form(self, request: HttpRequest, test: Test, context: dict): finding.test = test finding.reporter = request.user finding.numerical_severity = Finding.get_numerical_severity(finding.severity) - finding.tags = context["form"].cleaned_data['tags'] + finding.tags = context["form"].cleaned_data["tags"] finding.save() # Save and add new endpoints finding_helper.add_endpoints(finding, context["form"]) @@ -553,10 +553,10 @@ def process_jira_form(self, request: HttpRequest, finding: Finding, context: dic if context["jform"] and context["jform"].is_valid(): # can't use helper as when push_all_jira_issues is True, the checkbox gets disabled and is always false # push_to_jira = jira_helper.is_push_to_jira(finding, jform.cleaned_data.get('push_to_jira')) - push_to_jira = jira_helper.is_push_all_issues(finding) or context["jform"].cleaned_data.get('push_to_jira') + push_to_jira = jira_helper.is_push_all_issues(finding) or context["jform"].cleaned_data.get("push_to_jira") jira_message = None # if the jira issue key was changed, update database - new_jira_issue_key = context["jform"].cleaned_data.get('jira_issue') + new_jira_issue_key = context["jform"].cleaned_data.get("jira_issue") if finding.has_jira_issue: # everything in DD around JIRA integration is based on the internal id of the issue in JIRA # instead of on the public jira issue key. @@ -564,18 +564,18 @@ def process_jira_form(self, request: HttpRequest, finding: Finding, context: dic # we can assume the issue exist, which is already checked in the validation of the jform if not new_jira_issue_key: jira_helper.finding_unlink_jira(request, finding) - jira_message = 'Link to JIRA issue removed successfully.' + jira_message = "Link to JIRA issue removed successfully." elif new_jira_issue_key != finding.jira_issue.jira_key: jira_helper.finding_unlink_jira(request, finding) jira_helper.finding_link_jira(request, finding, new_jira_issue_key) - jira_message = 'Changed JIRA link successfully.' + jira_message = "Changed JIRA link successfully." else: - logger.debug('finding has no jira issue yet') + logger.debug("finding has no jira issue yet") if new_jira_issue_key: - logger.debug('finding has no jira issue yet, but jira issue specified in request. trying to link.') + logger.debug("finding has no jira issue yet, but jira issue specified in request. trying to link.") jira_helper.finding_link_jira(request, finding, new_jira_issue_key) - jira_message = 'Linked a JIRA issue successfully.' + jira_message = "Linked a JIRA issue successfully." # Determine if a message should be added if jira_message: messages.add_message( @@ -623,8 +623,8 @@ def process_forms(self, request: HttpRequest, test: Test, context: dict): # Create a notification create_notification( - event='finding_added', - title=_(f'Addition of {finding.title}'), + event="finding_added", + title=_(f"Addition of {finding.title}"), finding=finding, description=_(f'Finding "{finding.title}" was added by {request.user}'), url=reverse("view_finding", args=(finding.id,)), @@ -633,8 +633,8 @@ def process_forms(self, request: HttpRequest, test: Test, context: dict): messages.add_message( request, messages.SUCCESS, - _('Finding added successfully.'), - extra_tags='alert-success') + _("Finding added successfully."), + extra_tags="alert-success") return finding, request, all_forms_valid @@ -662,17 +662,17 @@ def post(self, request: HttpRequest, test_id: int): _, request, success = self.process_forms(request, test, context) # Handle the case of a successful form if success: - if '_Finished' in request.POST: - return HttpResponseRedirect(reverse('view_test', args=(test.id,))) + if "_Finished" in request.POST: + return HttpResponseRedirect(reverse("view_test", args=(test.id,))) else: - return HttpResponseRedirect(reverse('add_findings', args=(test.id,))) + return HttpResponseRedirect(reverse("add_findings", args=(test.id,))) else: context["form_error"] = True # Render the form return render(request, self.get_template(), context) -@user_is_authorized(Test, Permissions.Finding_Add, 'tid') +@user_is_authorized(Test, Permissions.Finding_Add, "tid") def add_temp_finding(request, tid, fid): jform = None test = get_object_or_404(Test, id=tid) @@ -680,30 +680,30 @@ def add_temp_finding(request, tid, fid): findings = Finding_Template.objects.all() push_all_jira_issues = jira_helper.is_push_all_issues(finding) - if request.method == 'POST': + if request.method == "POST": form = AddFindingForm(request.POST, req_resp=None, product=test.engagement.product) if jira_helper.get_jira_project(test): - jform = JIRAFindingForm(push_all=jira_helper.is_push_all_issues(test), prefix='jiraform', jira_project=jira_helper.get_jira_project(test), finding_form=form) - logger.debug(f'jform valid: {jform.is_valid()}') + jform = JIRAFindingForm(push_all=jira_helper.is_push_all_issues(test), prefix="jiraform", jira_project=jira_helper.get_jira_project(test), finding_form=form) + logger.debug(f"jform valid: {jform.is_valid()}") - if (form['active'].value() is False or form['false_p'].value()) and form['duplicate'].value() is False: + if (form["active"].value() is False or form["false_p"].value()) and form["duplicate"].value() is False: closing_disabled = Note_Type.objects.filter(is_mandatory=True, is_active=True).count() if closing_disabled != 0: error_inactive = ValidationError( - _('Can not set a finding as inactive without adding all mandatory notes'), - code='not_active_or_false_p_true') + _("Can not set a finding as inactive without adding all mandatory notes"), + code="not_active_or_false_p_true") error_false_p = ValidationError( - _('Can not set a finding as false positive without adding all mandatory notes'), - code='not_active_or_false_p_true') - if form['active'].value() is False: - form.add_error('active', error_inactive) - if form['false_p'].value(): - form.add_error('false_p', error_false_p) + _("Can not set a finding as false positive without adding all mandatory notes"), + code="not_active_or_false_p_true") + if form["active"].value() is False: + form.add_error("active", error_inactive) + if form["false_p"].value(): + form.add_error("false_p", error_false_p) messages.add_message(request, messages.ERROR, - _('Can not set a finding as inactive or false positive without adding all mandatory notes'), - extra_tags='alert-danger') + _("Can not set a finding as inactive or false positive without adding all mandatory notes"), + extra_tags="alert-danger") if form.is_valid(): finding.last_used = timezone.now() finding.save() @@ -713,9 +713,9 @@ def add_temp_finding(request, tid, fid): new_finding.numerical_severity = Finding.get_numerical_severity( new_finding.severity) - new_finding.tags = form.cleaned_data['tags'] + new_finding.tags = form.cleaned_data["tags"] new_finding.cvssv3 = finding.cvssv3 - new_finding.date = form.cleaned_data['date'] or datetime.today() + new_finding.date = form.cleaned_data["date"] or datetime.today() finding_helper.update_finding_status(new_finding, request.user) @@ -725,82 +725,82 @@ def add_temp_finding(request, tid, fid): finding_helper.add_endpoints(new_finding, form) new_finding.save() - if 'jiraform-push_to_jira' in request.POST: - jform = JIRAFindingForm(request.POST, prefix='jiraform', instance=new_finding, push_all=push_all_jira_issues, jira_project=jira_helper.get_jira_project(test), finding_form=form) + if "jiraform-push_to_jira" in request.POST: + jform = JIRAFindingForm(request.POST, prefix="jiraform", instance=new_finding, push_all=push_all_jira_issues, jira_project=jira_helper.get_jira_project(test), finding_form=form) if jform.is_valid(): - if jform.cleaned_data.get('push_to_jira'): + if jform.cleaned_data.get("push_to_jira"): jira_helper.push_to_jira(new_finding) else: - add_error_message_to_response(f'jira form validation failed: {jform.errors}') - if 'request' in form.cleaned_data or 'response' in form.cleaned_data: + add_error_message_to_response(f"jira form validation failed: {jform.errors}") + if "request" in form.cleaned_data or "response" in form.cleaned_data: burp_rr = BurpRawRequestResponse( finding=new_finding, - burpRequestBase64=base64.b64encode(form.cleaned_data.get('request', '').encode("utf-8")), - burpResponseBase64=base64.b64encode(form.cleaned_data.get('response', '').encode("utf-8")), + burpRequestBase64=base64.b64encode(form.cleaned_data.get("request", "").encode("utf-8")), + burpResponseBase64=base64.b64encode(form.cleaned_data.get("response", "").encode("utf-8")), ) burp_rr.clean() burp_rr.save() messages.add_message(request, messages.SUCCESS, - _('Finding from template added successfully.'), - extra_tags='alert-success') + _("Finding from template added successfully."), + extra_tags="alert-success") - return HttpResponseRedirect(reverse('view_test', args=(test.id,))) + return HttpResponseRedirect(reverse("view_test", args=(test.id,))) else: messages.add_message(request, messages.ERROR, - _('The form has errors, please correct them below.'), - extra_tags='alert-danger') + _("The form has errors, please correct them below."), + extra_tags="alert-danger") else: - form = AddFindingForm(req_resp=None, product=test.engagement.product, initial={'active': False, - 'date': timezone.now().date(), - 'verified': False, - 'false_p': False, - 'duplicate': False, - 'out_of_scope': False, - 'title': finding.title, - 'description': finding.description, - 'cwe': finding.cwe, - 'severity': finding.severity, - 'mitigation': finding.mitigation, - 'impact': finding.impact, - 'references': finding.references, - 'numerical_severity': finding.numerical_severity}) + form = AddFindingForm(req_resp=None, product=test.engagement.product, initial={"active": False, + "date": timezone.now().date(), + "verified": False, + "false_p": False, + "duplicate": False, + "out_of_scope": False, + "title": finding.title, + "description": finding.description, + "cwe": finding.cwe, + "severity": finding.severity, + "mitigation": finding.mitigation, + "impact": finding.impact, + "references": finding.references, + "numerical_severity": finding.numerical_severity}) if jira_helper.get_jira_project(test): - jform = JIRAFindingForm(push_all=jira_helper.is_push_all_issues(test), prefix='jiraform', jira_project=jira_helper.get_jira_project(test), finding_form=form) + jform = JIRAFindingForm(push_all=jira_helper.is_push_all_issues(test), prefix="jiraform", jira_project=jira_helper.get_jira_project(test), finding_form=form) product_tab = Product_Tab(test.engagement.product, title=_("Add Finding"), tab="engagements") product_tab.setEngagement(test.engagement) - return render(request, 'dojo/add_findings.html', - {'form': form, - 'product_tab': product_tab, - 'jform': jform, - 'findings': findings, - 'temp': True, - 'fid': finding.id, - 'tid': test.id, - 'test': test, + return render(request, "dojo/add_findings.html", + {"form": form, + "product_tab": product_tab, + "jform": jform, + "findings": findings, + "temp": True, + "fid": finding.id, + "tid": test.id, + "test": test, }) -@user_is_authorized(Test, Permissions.Test_View, 'tid') +@user_is_authorized(Test, Permissions.Test_View, "tid") def search(request, tid): test = get_object_or_404(Test, id=tid) templates = Finding_Template.objects.all() templates = TemplateFindingFilter(request.GET, queryset=templates) paged_templates = get_page_items(request, templates.qs, 25) - title_words = get_words_for_field(Finding_Template, 'title') + title_words = get_words_for_field(Finding_Template, "title") add_breadcrumb(parent=test, title=_("Add From Template"), top_level=False, request=request) - return render(request, 'dojo/templates.html', - {'templates': paged_templates, - 'filtered': templates, - 'title_words': title_words, - 'tid': tid, - 'add_from_template': True, + return render(request, "dojo/templates.html", + {"templates": paged_templates, + "filtered": templates, + "title_words": title_words, + "tid": tid, + "add_from_template": True, }) @@ -836,7 +836,7 @@ def get_jira_form( jira_form = None push_all_jira_issues = False # Decide if we need to present the Push to JIRA form - if get_system_setting('enable_jira'): + if get_system_setting("enable_jira"): # Determine if jira issues should be pushed automatically push_all_jira_issues = jira_helper.is_push_all_issues(test) # Only return the form if the jira is enabled on this engagement or product @@ -845,12 +845,12 @@ def get_jira_form( jira_form = JIRAImportScanForm( request.POST, push_all=push_all_jira_issues, - prefix='jiraform', + prefix="jiraform", ) else: jira_form = JIRAImportScanForm( push_all=push_all_jira_issues, - prefix='jiraform', + prefix="jiraform", ) return jira_form, push_all_jira_issues @@ -948,15 +948,15 @@ def process_form( "create_finding_groups_for_all_findings": form.cleaned_data.get("create_finding_groups_for_all_findings"), }) # Override the form values of active and verified - if activeChoice := form.cleaned_data.get('active', None): - if activeChoice == 'force_to_true': + if activeChoice := form.cleaned_data.get("active", None): + if activeChoice == "force_to_true": context["active"] = True - elif activeChoice == 'force_to_false': + elif activeChoice == "force_to_false": context["active"] = False - if verifiedChoice := form.cleaned_data.get('verified', None): - if verifiedChoice == 'force_to_true': + if verifiedChoice := form.cleaned_data.get("verified", None): + if verifiedChoice == "force_to_true": context["verified"] = True - elif verifiedChoice == 'force_to_false': + elif verifiedChoice == "force_to_false": context["verified"] = False # Override the tags and version context.get("test").tags = context.get("tags") diff --git a/dojo/test_type/urls.py b/dojo/test_type/urls.py index 3e25e75124..5deedc20bd 100644 --- a/dojo/test_type/urls.py +++ b/dojo/test_type/urls.py @@ -4,9 +4,9 @@ urlpatterns = [ # test types - re_path(r'^test_type$', views.test_type, name='test_type'), - re_path(r'^test_type/add$', views.add_test_type, - name='add_test_type'), - re_path(r'^test_type/(?P\d+)/edit$', - views.edit_test_type, name='edit_test_type'), + re_path(r"^test_type$", views.test_type, name="test_type"), + re_path(r"^test_type/add$", views.add_test_type, + name="add_test_type"), + re_path(r"^test_type/(?P\d+)/edit$", + views.edit_test_type, name="edit_test_type"), ] diff --git a/dojo/test_type/views.py b/dojo/test_type/views.py index 3b17556b29..d7a2b9739b 100644 --- a/dojo/test_type/views.py +++ b/dojo/test_type/views.py @@ -24,59 +24,59 @@ @login_required def test_type(request): - initial_queryset = Test_Type.objects.all().order_by('name') - name_words = initial_queryset.values_list('name', flat=True) + initial_queryset = Test_Type.objects.all().order_by("name") + name_words = initial_queryset.values_list("name", flat=True) test_types = TestTypeFilter(request.GET, queryset=initial_queryset) tts = get_page_items(request, test_types.qs, 25) add_breadcrumb(title="Test Type List", top_level=True, request=request) - return render(request, 'dojo/test_type.html', { - 'name': 'Test Type List', - 'metric': False, - 'user': request.user, - 'tts': tts, - 'test_types': test_types, - 'name_words': name_words}) + return render(request, "dojo/test_type.html", { + "name": "Test Type List", + "metric": False, + "user": request.user, + "tts": tts, + "test_types": test_types, + "name_words": name_words}) -@user_is_configuration_authorized('dojo.add_test_type') +@user_is_configuration_authorized("dojo.add_test_type") def add_test_type(request): form = Test_TypeForm() - if request.method == 'POST': + if request.method == "POST": form = Test_TypeForm(request.POST) if form.is_valid(): form.save() messages.add_message(request, messages.SUCCESS, - 'Test type added successfully.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('test_type')) + "Test type added successfully.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("test_type")) add_breadcrumb(title="Add Test Type", top_level=False, request=request) - return render(request, 'dojo/new_test_type.html', { - 'name': 'Add Test Type', - 'metric': False, - 'user': request.user, - 'form': form, + return render(request, "dojo/new_test_type.html", { + "name": "Add Test Type", + "metric": False, + "user": request.user, + "form": form, }) -@user_is_configuration_authorized('dojo.change_test_type') +@user_is_configuration_authorized("dojo.change_test_type") def edit_test_type(request, ptid): tt = get_object_or_404(Test_Type, pk=ptid) form = Test_TypeForm(instance=tt) - if request.method == 'POST': + if request.method == "POST": form = Test_TypeForm(request.POST, instance=tt) if form.is_valid(): tt = form.save() messages.add_message(request, messages.SUCCESS, - 'Test type updated successfully.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('test_type')) + "Test type updated successfully.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("test_type")) add_breadcrumb(title="Edit Test Type", top_level=False, request=request) - return render(request, 'dojo/edit_test_type.html', { - 'name': 'Edit Test Type', - 'metric': False, - 'user': request.user, - 'form': form, - 'pt': tt}) + return render(request, "dojo/edit_test_type.html", { + "name": "Edit Test Type", + "metric": False, + "user": request.user, + "form": form, + "pt": tt}) diff --git a/dojo/tool_config/factory.py b/dojo/tool_config/factory.py index 8a1bb203f4..61fce9caa5 100644 --- a/dojo/tool_config/factory.py +++ b/dojo/tool_config/factory.py @@ -6,12 +6,12 @@ from dojo.tools.api_vulners.api_client import VulnersAPI SCAN_APIS = { - 'Bugcrowd API': BugcrowdAPI, - 'BlackDuck API': BlackduckAPI, - 'Cobalt.io': CobaltAPI, - 'Edgescan': EdgescanAPI, - 'SonarQube': SonarQubeAPI, - 'Vulners': VulnersAPI, + "Bugcrowd API": BugcrowdAPI, + "BlackDuck API": BlackduckAPI, + "Cobalt.io": CobaltAPI, + "Edgescan": EdgescanAPI, + "SonarQube": SonarQubeAPI, + "Vulners": VulnersAPI, } diff --git a/dojo/tool_config/urls.py b/dojo/tool_config/urls.py index 0c7c16c9b9..263142742e 100644 --- a/dojo/tool_config/urls.py +++ b/dojo/tool_config/urls.py @@ -3,8 +3,8 @@ from . import views urlpatterns = [ - re_path(r'^tool_config/add', views.new_tool_config, name='add_tool_config'), - re_path(r'^tool_config/(?P\d+)/edit$', views.edit_tool_config, - name='edit_tool_config'), - re_path(r'^tool_config$', views.tool_config, name='tool_config'), + re_path(r"^tool_config/add", views.new_tool_config, name="add_tool_config"), + re_path(r"^tool_config/(?P\d+)/edit$", views.edit_tool_config, + name="edit_tool_config"), + re_path(r"^tool_config$", views.tool_config, name="tool_config"), ] diff --git a/dojo/tool_config/views.py b/dojo/tool_config/views.py index 4744a260c6..c6c514d09d 100644 --- a/dojo/tool_config/views.py +++ b/dojo/tool_config/views.py @@ -15,70 +15,70 @@ logger = logging.getLogger(__name__) -@user_is_configuration_authorized('dojo.add_tool_configuration') +@user_is_configuration_authorized("dojo.add_tool_configuration") def new_tool_config(request): - if request.method == 'POST': + if request.method == "POST": tform = ToolConfigForm(request.POST) if tform.is_valid(): form_copy = tform.save(commit=False) try: api = create_API(form_copy) - if api and hasattr(api, 'test_connection'): + if api and hasattr(api, "test_connection"): result = api.test_connection() messages.add_message(request, messages.SUCCESS, - f'API connection successful with message: {result}.', - extra_tags='alert-success') + f"API connection successful with message: {result}.", + extra_tags="alert-success") form_copy.save() messages.add_message(request, messages.SUCCESS, - 'Tool Configuration successfully updated.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('tool_config')) + "Tool Configuration successfully updated.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("tool_config")) except Exception as e: logger.exception(e) messages.add_message(request, messages.ERROR, str(e), - extra_tags='alert-danger') + extra_tags="alert-danger") else: tform = ToolConfigForm() - if 'tool_type' in request.GET: - tform.fields['tool_type'].initial = request.GET.get('tool_type') + if "tool_type" in request.GET: + tform.fields["tool_type"].initial = request.GET.get("tool_type") add_breadcrumb(title="New Tool Configuration", top_level=False, request=request) - return render(request, 'dojo/new_tool_config.html', - {'tform': tform}) + return render(request, "dojo/new_tool_config.html", + {"tform": tform}) -@user_is_configuration_authorized('dojo.change_tool_configuration') +@user_is_configuration_authorized("dojo.change_tool_configuration") def edit_tool_config(request, ttid): tool_config = Tool_Configuration.objects.get(pk=ttid) - if request.method == 'POST': + if request.method == "POST": tform = ToolConfigForm(request.POST, instance=tool_config) if tform.is_valid(): form_copy = tform.save(commit=False) - form_copy.password = dojo_crypto_encrypt(tform.cleaned_data['password']) - form_copy.ssh = dojo_crypto_encrypt(tform.cleaned_data['ssh']) + form_copy.password = dojo_crypto_encrypt(tform.cleaned_data["password"]) + form_copy.ssh = dojo_crypto_encrypt(tform.cleaned_data["ssh"]) try: api = create_API(form_copy) - if api and hasattr(api, 'test_connection'): + if api and hasattr(api, "test_connection"): result = api.test_connection() messages.add_message(request, messages.SUCCESS, - f'API connection successful with message: {result}.', - extra_tags='alert-success') + f"API connection successful with message: {result}.", + extra_tags="alert-success") form_copy.save() messages.add_message(request, messages.SUCCESS, - 'Tool Configuration successfully updated.', - extra_tags='alert-success') - return HttpResponseRedirect(reverse('tool_config')) + "Tool Configuration successfully updated.", + extra_tags="alert-success") + return HttpResponseRedirect(reverse("tool_config")) except Exception as e: logger.info(e) messages.add_message(request, messages.ERROR, str(e), - extra_tags='alert-danger') + extra_tags="alert-danger") else: tool_config.password = prepare_for_view(tool_config.password) tool_config.ssh = prepare_for_view(tool_config.ssh) @@ -86,17 +86,17 @@ def edit_tool_config(request, ttid): add_breadcrumb(title="Edit Tool Configuration", top_level=False, request=request) return render(request, - 'dojo/edit_tool_config.html', + "dojo/edit_tool_config.html", { - 'tform': tform, + "tform": tform, }) -@user_is_configuration_authorized('dojo.view_tool_configuration') +@user_is_configuration_authorized("dojo.view_tool_configuration") def tool_config(request): - confs = Tool_Configuration.objects.all().order_by('name') + confs = Tool_Configuration.objects.all().order_by("name") add_breadcrumb(title="Tool Configuration List", top_level=not len(request.GET), request=request) return render(request, - 'dojo/tool_config.html', - {'confs': confs, + "dojo/tool_config.html", + {"confs": confs, }) diff --git a/dojo/tool_product/queries.py b/dojo/tool_product/queries.py index b098ef050a..6bc23bdb98 100644 --- a/dojo/tool_product/queries.py +++ b/dojo/tool_product/queries.py @@ -12,33 +12,33 @@ def get_authorized_tool_product_settings(permission): return Tool_Product_Settings.objects.none() if user.is_superuser: - return Tool_Product_Settings.objects.all() + return Tool_Product_Settings.objects.all().order_by("id") if user_has_global_permission(user, permission): - return Tool_Product_Settings.objects.all() + return Tool_Product_Settings.objects.all().order_by("id") roles = get_roles_for_permission(permission) authorized_product_type_roles = Product_Type_Member.objects.filter( - product_type=OuterRef('product__prod_type_id'), + product_type=OuterRef("product__prod_type_id"), user=user, role__in=roles) authorized_product_roles = Product_Member.objects.filter( - product=OuterRef('product_id'), + product=OuterRef("product_id"), user=user, role__in=roles) authorized_product_type_groups = Product_Type_Group.objects.filter( - product_type=OuterRef('product__prod_type_id'), + product_type=OuterRef("product__prod_type_id"), group__users=user, role__in=roles) authorized_product_groups = Product_Group.objects.filter( - product=OuterRef('product_id'), + product=OuterRef("product_id"), group__users=user, role__in=roles) tool_product_settings = Tool_Product_Settings.objects.annotate( product__prod_type__member=Exists(authorized_product_type_roles), product__member=Exists(authorized_product_roles), product__prod_type__authorized_group=Exists(authorized_product_type_groups), - product__authorized_group=Exists(authorized_product_groups)) + product__authorized_group=Exists(authorized_product_groups)).order_by("id") tool_product_settings = tool_product_settings.filter( Q(product__prod_type__member=True) | Q(product__member=True) | Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True)) diff --git a/dojo/tool_product/urls.py b/dojo/tool_product/urls.py index cb26295adf..9acc6cdb13 100644 --- a/dojo/tool_product/urls.py +++ b/dojo/tool_product/urls.py @@ -3,9 +3,9 @@ from . import views urlpatterns = [ - re_path(r'^product/(?P\d+)/tool_product/add$', views.new_tool_product, name='new_tool_product'), - re_path(r'^product/(?P\d+)/tool_product/all$', views.all_tool_product, name='all_tool_product'), - re_path(r'^product/(?P\d+)/tool_product/(?P\d+)/edit$', views.edit_tool_product, name='edit_tool_product'), - re_path(r'^product/(?P\d+)/tool_product/(?P\d+)/delete$', views.delete_tool_product, - name='delete_tool_product'), + re_path(r"^product/(?P\d+)/tool_product/add$", views.new_tool_product, name="new_tool_product"), + re_path(r"^product/(?P\d+)/tool_product/all$", views.all_tool_product, name="all_tool_product"), + re_path(r"^product/(?P\d+)/tool_product/(?P\d+)/edit$", views.edit_tool_product, name="edit_tool_product"), + re_path(r"^product/(?P\d+)/tool_product/(?P\d+)/delete$", views.delete_tool_product, + name="delete_tool_product"), ] diff --git a/dojo/tool_product/views.py b/dojo/tool_product/views.py index ff24442d5f..2e606956b8 100644 --- a/dojo/tool_product/views.py +++ b/dojo/tool_product/views.py @@ -17,10 +17,10 @@ logger = logging.getLogger(__name__) -@user_is_authorized(Product, Permissions.Product_Edit, 'pid') +@user_is_authorized(Product, Permissions.Product_Edit, "pid") def new_tool_product(request, pid): prod = get_object_or_404(Product, id=pid) - if request.method == 'POST': + if request.method == "POST": tform = ToolProductSettingsForm(request.POST) if tform.is_valid(): # form.tool_type = tool_type @@ -31,83 +31,83 @@ def new_tool_product(request, pid): messages.add_message( request, messages.SUCCESS, - _('Product Tool Configuration Successfully Created.'), - extra_tags='alert-success') + _("Product Tool Configuration Successfully Created."), + extra_tags="alert-success") return HttpResponseRedirect( - reverse('all_tool_product', args=(pid, ))) + reverse("all_tool_product", args=(pid, ))) else: tform = ToolProductSettingsForm() product_tab = Product_Tab(prod, title=_("Tool Configurations"), tab="settings") - return render(request, 'dojo/new_tool_product.html', { - 'tform': tform, - 'product_tab': product_tab, - 'pid': pid, + return render(request, "dojo/new_tool_product.html", { + "tform": tform, + "product_tab": product_tab, + "pid": pid, }) -@user_is_authorized(Product, Permissions.Product_Edit, 'pid') +@user_is_authorized(Product, Permissions.Product_Edit, "pid") def all_tool_product(request, pid): prod = get_object_or_404(Product, id=pid) - tools = Tool_Product_Settings.objects.filter(product=prod).order_by('name') + tools = Tool_Product_Settings.objects.filter(product=prod).order_by("name") product_tab = Product_Tab(prod, title=_("Tool Configurations"), tab="settings") - return render(request, 'dojo/view_tool_product_all.html', { - 'prod': prod, - 'tools': tools, - 'product_tab': product_tab, + return render(request, "dojo/view_tool_product_all.html", { + "prod": prod, + "tools": tools, + "product_tab": product_tab, }) -@user_is_authorized(Product, Permissions.Product_Edit, 'pid') +@user_is_authorized(Product, Permissions.Product_Edit, "pid") def edit_tool_product(request, pid, ttid): product = get_object_or_404(Product, id=pid) tool_product = Tool_Product_Settings.objects.get(pk=ttid) if tool_product.product != product: - msg = f'Product {pid} does not fit to product of Tool_Product {tool_product.product.id}' + msg = f"Product {pid} does not fit to product of Tool_Product {tool_product.product.id}" raise BadRequest(msg) - if request.method == 'POST': + if request.method == "POST": tform = ToolProductSettingsForm(request.POST, instance=tool_product) if tform.is_valid(): tform.save() messages.add_message( request, messages.SUCCESS, - _('Tool Product Configuration Successfully Updated.'), - extra_tags='alert-success') - return HttpResponseRedirect(reverse('all_tool_product', args=(pid, ))) + _("Tool Product Configuration Successfully Updated."), + extra_tags="alert-success") + return HttpResponseRedirect(reverse("all_tool_product", args=(pid, ))) else: tform = ToolProductSettingsForm(instance=tool_product) product_tab = Product_Tab(product, title=_("Edit Product Tool Configuration"), tab="settings") - return render(request, 'dojo/edit_tool_product.html', { - 'tform': tform, - 'product_tab': product_tab, + return render(request, "dojo/edit_tool_product.html", { + "tform": tform, + "product_tab": product_tab, }) -@user_is_authorized(Product, Permissions.Product_Edit, 'pid') +@user_is_authorized(Product, Permissions.Product_Edit, "pid") def delete_tool_product(request, pid, ttid): tool_product = Tool_Product_Settings.objects.get(pk=ttid) product = get_object_or_404(Product, id=pid) if tool_product.product != product: - msg = f'Product {pid} does not fit to product of Tool_Product {tool_product.product.id}' + msg = f"Product {pid} does not fit to product of Tool_Product {tool_product.product.id}" raise BadRequest(msg) - if request.method == 'POST': + if request.method == "POST": DeleteToolProductSettingsForm(request.POST, instance=tool_product) tool_product.delete() messages.add_message( request, messages.SUCCESS, - _('Tool Product Successfully Deleted.'), - extra_tags='alert-success') - return HttpResponseRedirect(reverse('all_tool_product', args=(pid, ))) + _("Tool Product Successfully Deleted."), + extra_tags="alert-success") + return HttpResponseRedirect(reverse("all_tool_product", args=(pid, ))) else: tform = ToolProductSettingsForm(instance=tool_product) product_tab = Product_Tab(product, title=_("Delete Product Tool Configuration"), tab="settings") - return render(request, 'dojo/delete_tool_product.html', { - 'tform': tform, - 'product_tab': product_tab, + return render(request, "dojo/delete_tool_product.html", { + "tform": tform, + "product_tab": product_tab, }) diff --git a/dojo/tool_type/urls.py b/dojo/tool_type/urls.py index 8d40e00d77..3b79b58d1b 100644 --- a/dojo/tool_type/urls.py +++ b/dojo/tool_type/urls.py @@ -3,8 +3,8 @@ from . import views urlpatterns = [ - re_path(r'^tool_type/add', views.new_tool_type, name='add_tool_type'), - re_path(r'^tool_type/(?P\d+)/edit$', views.edit_tool_type, - name='edit_tool_type'), - re_path(r'^tool_type$', views.tool_type, name='tool_type'), + re_path(r"^tool_type/add", views.new_tool_type, name="add_tool_type"), + re_path(r"^tool_type/(?P\d+)/edit$", views.edit_tool_type, + name="edit_tool_type"), + re_path(r"^tool_type$", views.tool_type, name="tool_type"), ] diff --git a/dojo/tool_type/views.py b/dojo/tool_type/views.py index 75683718c4..8886701209 100644 --- a/dojo/tool_type/views.py +++ b/dojo/tool_type/views.py @@ -15,49 +15,49 @@ logger = logging.getLogger(__name__) -@user_is_configuration_authorized('dojo.add_tool_type') +@user_is_configuration_authorized("dojo.add_tool_type") def new_tool_type(request): - if request.method == 'POST': + if request.method == "POST": tform = ToolTypeForm(request.POST, instance=Tool_Type()) if tform.is_valid(): tform.save() messages.add_message(request, messages.SUCCESS, - _('Tool Type Configuration Successfully Created.'), - extra_tags='alert-success') - return HttpResponseRedirect(reverse('tool_type')) + _("Tool Type Configuration Successfully Created."), + extra_tags="alert-success") + return HttpResponseRedirect(reverse("tool_type")) else: tform = ToolTypeForm() - if 'name' in request.GET: - tform.fields['name'].initial = request.GET.get('name') + if "name" in request.GET: + tform.fields["name"].initial = request.GET.get("name") add_breadcrumb(title=_("New Tool Type Configuration"), top_level=False, request=request) - return render(request, 'dojo/new_tool_type.html', {'tform': tform}) + return render(request, "dojo/new_tool_type.html", {"tform": tform}) -@user_is_configuration_authorized('dojo.change_tool_type') +@user_is_configuration_authorized("dojo.change_tool_type") def edit_tool_type(request, ttid): tool_type = Tool_Type.objects.get(pk=ttid) - if request.method == 'POST': + if request.method == "POST": tform = ToolTypeForm(request.POST, instance=tool_type) if tform.is_valid(): tform.save() messages.add_message(request, messages.SUCCESS, - _('Tool Type successfully updated.'), - extra_tags='alert-success') - return HttpResponseRedirect(reverse('tool_type')) + _("Tool Type successfully updated."), + extra_tags="alert-success") + return HttpResponseRedirect(reverse("tool_type")) else: tform = ToolTypeForm(instance=tool_type) add_breadcrumb(title=_("Edit Tool Type"), top_level=False, request=request) - return render(request, 'dojo/edit_tool_type.html', {'tform': tform}) + return render(request, "dojo/edit_tool_type.html", {"tform": tform}) -@user_is_configuration_authorized('dojo.view_tool_type') +@user_is_configuration_authorized("dojo.view_tool_type") def tool_type(request): - confs = Tool_Type.objects.all().order_by('name') + confs = Tool_Type.objects.all().order_by("name") add_breadcrumb(title=_("Tool Type List"), top_level=not len(request.GET), request=request) - return render(request, 'dojo/tool_type.html', {'confs': confs}) + return render(request, "dojo/tool_type.html", {"confs": confs}) diff --git a/dojo/tools/acunetix/parser.py b/dojo/tools/acunetix/parser.py index 272f295acf..289496a03f 100644 --- a/dojo/tools/acunetix/parser.py +++ b/dojo/tools/acunetix/parser.py @@ -15,7 +15,7 @@ def get_description_for_scan_types(self, scan_type): return "Acunetix Scanner in XML format or Acunetix 360 Scanner in JSON format" def get_findings(self, filename, test): - if '.xml' in str(filename): + if ".xml" in str(filename): return AcunetixXMLParser().get_findings(filename, test) - elif '.json' in str(filename): + elif ".json" in str(filename): return AcunetixJSONParser().get_findings(filename, test) diff --git a/dojo/tools/anchorectl_policies/parser.py b/dojo/tools/anchorectl_policies/parser.py index 30dd42e32b..818b21d1c2 100644 --- a/dojo/tools/anchorectl_policies/parser.py +++ b/dojo/tools/anchorectl_policies/parser.py @@ -30,7 +30,7 @@ def get_findings(self, filename, test): items = [] try: for image in data: - if image['detail'] is not None: + if image["detail"] is not None: for result in image["detail"]: try: gate = result["gate"] diff --git a/dojo/tools/aws_prowler_v3plus/parser.py b/dojo/tools/aws_prowler_v3plus/parser.py index 8e62047ac8..5d550dcf5c 100644 --- a/dojo/tools/aws_prowler_v3plus/parser.py +++ b/dojo/tools/aws_prowler_v3plus/parser.py @@ -15,10 +15,10 @@ def get_description_for_scan_types(self, scan_type): return "Exports from AWS Prowler v3 in JSON format or from Prowler v4 in OCSF-JSON format." def get_findings(self, file, test): - if file.name.lower().endswith('.ocsf.json'): + if file.name.lower().endswith(".ocsf.json"): return AWSProwlerV4Parser().process_ocsf_json(file, test) - elif file.name.lower().endswith('.json'): + elif file.name.lower().endswith(".json"): return AWSProwlerV3Parser().process_json(file, test) else: - msg = 'Unknown file format' + msg = "Unknown file format" raise ValueError(msg) diff --git a/dojo/tools/aws_prowler_v3plus/prowler_v3.py b/dojo/tools/aws_prowler_v3plus/prowler_v3.py index cce0472b67..67bed2497c 100644 --- a/dojo/tools/aws_prowler_v3plus/prowler_v3.py +++ b/dojo/tools/aws_prowler_v3plus/prowler_v3.py @@ -13,30 +13,30 @@ def process_json(self, file, test): data = json.load(file) for deserialized in data: - status = deserialized.get('Status') - if status.upper() != 'FAIL': + status = deserialized.get("Status") + if status.upper() != "FAIL": continue - account = deserialized.get('AccountId') - region = deserialized.get('Region') - provider = deserialized.get('Provider') - compliance = str(deserialized.get('Compliance')) - result_extended = deserialized.get('StatusExtended') - general_description = deserialized.get('Description') - asff_compliance_type = " / ".join(deserialized.get('CheckType')) - severity = deserialized.get('Severity', 'Info').capitalize() - aws_service_name = deserialized.get('ServiceName') - impact = deserialized.get('Risk') - mitigation = deserialized.get('Remediation', {}).get('Recommendation', {}).get("Text", '') - mitigation = str(mitigation) + "\n" + str(deserialized.get('Remediation', {}).get('Code')) - documentation = deserialized.get('Remediation', {}).get('Recommendation', {}).get("Url") - documentation = str(documentation) + "\n" + str(deserialized.get('RelatedUrl')) - security_domain = deserialized.get('ResourceType') - timestamp = deserialized.get('AssessmentStartTime') - resource_arn = deserialized.get('ResourceArn') - account_id = deserialized.get('AccountId') - resource_id = deserialized.get('ResourceId') - unique_id_from_tool = deserialized.get('FindingUniqueId') + account = deserialized.get("AccountId") + region = deserialized.get("Region") + provider = deserialized.get("Provider") + compliance = str(deserialized.get("Compliance")) + result_extended = deserialized.get("StatusExtended") + general_description = deserialized.get("Description") + asff_compliance_type = " / ".join(deserialized.get("CheckType")) + severity = deserialized.get("Severity", "Info").capitalize() + aws_service_name = deserialized.get("ServiceName") + impact = deserialized.get("Risk") + mitigation = deserialized.get("Remediation", {}).get("Recommendation", {}).get("Text", "") + mitigation = str(mitigation) + "\n" + str(deserialized.get("Remediation", {}).get("Code")) + documentation = deserialized.get("Remediation", {}).get("Recommendation", {}).get("Url") + documentation = str(documentation) + "\n" + str(deserialized.get("RelatedUrl")) + security_domain = deserialized.get("ResourceType") + timestamp = deserialized.get("AssessmentStartTime") + resource_arn = deserialized.get("ResourceArn") + account_id = deserialized.get("AccountId") + resource_id = deserialized.get("ResourceId") + unique_id_from_tool = deserialized.get("FindingUniqueId") if not resource_arn or resource_arn == "": component_name = str(provider) + "-" + str(account_id) + "-" + str(region) + "-" + str(resource_id) else: @@ -51,7 +51,7 @@ def process_json(self, file, test): "\n**Security Domain:** " + str(security_domain) # improving key to get duplicates - dupe_key = hashlib.sha256(unique_id_from_tool.encode('utf-8')).hexdigest() + dupe_key = hashlib.sha256(unique_id_from_tool.encode("utf-8")).hexdigest() if dupe_key in dupes: find = dupes[dupe_key] if description is not None: diff --git a/dojo/tools/aws_prowler_v3plus/prowler_v4.py b/dojo/tools/aws_prowler_v3plus/prowler_v4.py index ac3c4a99e6..71b63c368b 100644 --- a/dojo/tools/aws_prowler_v3plus/prowler_v4.py +++ b/dojo/tools/aws_prowler_v3plus/prowler_v4.py @@ -15,31 +15,31 @@ def process_ocsf_json(self, file, test): # https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/reporting/#json for deserialized in data: - status = deserialized.get('status_code') - if status.upper() != 'FAIL': + status = deserialized.get("status_code") + if status.upper() != "FAIL": continue - account_id = deserialized.get('cloud', {}).get('account', {}).get("uid", '') - region = deserialized.get('resources', [{}])[0].get('region', '') - provider = deserialized.get('cloud', {}).get('provider', '') - compliance = '' - compliance_field = deserialized.get('unmapped', {}).get("compliance", {}) + account_id = deserialized.get("cloud", {}).get("account", {}).get("uid", "") + region = deserialized.get("resources", [{}])[0].get("region", "") + provider = deserialized.get("cloud", {}).get("provider", "") + compliance = "" + compliance_field = deserialized.get("unmapped", {}).get("compliance", {}) if compliance_field: - compliance = ' | '.join([f"{key}:{','.join(value)}" for key, value in compliance_field.items()]) - result_extended = deserialized.get('status_detail') - general_description = deserialized.get('finding_info', {}).get('desc', '') - asff_compliance_type = deserialized.get('unmapped', {}).get('check_type', '') - severity = deserialized.get('severity', 'Info').capitalize() - aws_service_name = deserialized.get('resources', [{}])[0].get('group', {}).get('name', '') - impact = deserialized.get('risk_details') - mitigation = deserialized.get('remediation', {}).get("desc", '') - documentation = deserialized.get('remediation', {}).get("references", '') - documentation = str(documentation) + "\n" + str(deserialized.get('unmapped', {}).get('related_url', '')) - security_domain = deserialized.get('resources', [{}])[0].get('type', '') + compliance = " | ".join([f"{key}:{','.join(value)}" for key, value in compliance_field.items()]) + result_extended = deserialized.get("status_detail") + general_description = deserialized.get("finding_info", {}).get("desc", "") + asff_compliance_type = deserialized.get("unmapped", {}).get("check_type", "") + severity = deserialized.get("severity", "Info").capitalize() + aws_service_name = deserialized.get("resources", [{}])[0].get("group", {}).get("name", "") + impact = deserialized.get("risk_details") + mitigation = deserialized.get("remediation", {}).get("desc", "") + documentation = deserialized.get("remediation", {}).get("references", "") + documentation = str(documentation) + "\n" + str(deserialized.get("unmapped", {}).get("related_url", "")) + security_domain = deserialized.get("resources", [{}])[0].get("type", "") timestamp = deserialized.get("event_time") - resource_arn = deserialized.get('resources', [{}])[0].get('uid', '') - resource_id = deserialized.get('resources', [{}])[0].get('name', '') - unique_id_from_tool = deserialized.get('finding_info', {}).get('uid', '') + resource_arn = deserialized.get("resources", [{}])[0].get("uid", "") + resource_id = deserialized.get("resources", [{}])[0].get("name", "") + unique_id_from_tool = deserialized.get("finding_info", {}).get("uid", "") if not resource_arn or resource_arn == "": component_name = str(provider) + "-" + str(account_id) + "-" + str(region) + "-" + str(resource_id) else: @@ -55,7 +55,7 @@ def process_ocsf_json(self, file, test): "\n**ASFF Compliance Type:** " + str(asff_compliance_type) # improving key to get duplicates - dupe_key = hashlib.sha256(unique_id_from_tool.encode('utf-8')).hexdigest() + dupe_key = hashlib.sha256(unique_id_from_tool.encode("utf-8")).hexdigest() if dupe_key in dupes: find = dupes[dupe_key] if description is not None: diff --git a/dojo/tools/awssecurityhub/compliance.py b/dojo/tools/awssecurityhub/compliance.py index 8f12016ff8..bd1eadcd3b 100644 --- a/dojo/tools/awssecurityhub/compliance.py +++ b/dojo/tools/awssecurityhub/compliance.py @@ -23,7 +23,7 @@ def get_item(self, finding: dict, test): description += f"**AWS Finding ARN:** {finding_id}\n" description += f"**Resource IDs:** {', '.join(set(resource_arns))}\n" description += f"**AwsAccountId:** {finding.get('AwsAccountId', '')}\n" - if finding.get('Region'): + if finding.get("Region"): description += f"**Region:** {finding.get('Region', '')}\n" description += f"**Generator ID:** {finding.get('GeneratorId', '')}\n" if finding.get("Compliance", {}).get("Status", "PASSED") == "PASSED": diff --git a/dojo/tools/awssecurityhub/guardduty.py b/dojo/tools/awssecurityhub/guardduty.py index 19987d0ddf..a789a9740d 100644 --- a/dojo/tools/awssecurityhub/guardduty.py +++ b/dojo/tools/awssecurityhub/guardduty.py @@ -32,8 +32,8 @@ def get_item(self, finding: dict, test): mitigated = datetime.utcnow() description = f"This is a GuardDuty Finding\n{finding.get('Description', '')}" + "\n" description += f"**AWS Finding ARN:** {finding_id}\n" - if finding.get('SourceUrl'): - sourceurl = "[" + finding.get('SourceUrl') + "](" + finding.get('SourceUrl') + ")" + if finding.get("SourceUrl"): + sourceurl = "[" + finding.get("SourceUrl") + "](" + finding.get("SourceUrl") + ")" description += f"**SourceURL:** {sourceurl}\n" description += f"**AwsAccountId:** {finding.get('AwsAccountId', '')}\n" description += f"**Region:** {finding.get('Region', '')}\n" diff --git a/dojo/tools/awssecurityhub/parser.py b/dojo/tools/awssecurityhub/parser.py index 2bc71c2e91..3d07d2554c 100644 --- a/dojo/tools/awssecurityhub/parser.py +++ b/dojo/tools/awssecurityhub/parser.py @@ -33,8 +33,8 @@ def get_tests(self, scan_type, scan): test = ParserTest( name=self.ID, type=self.ID, version="", ) - test.description = "**AWS Accounts:** " + ', '.join(set(aws_acc)) + "\n" - test.description += "**Finding Origins:** " + ', '.join(set(prod)) + "\n" + test.description = "**AWS Accounts:** " + ", ".join(set(aws_acc)) + "\n" + test.description += "**Finding Origins:** " + ", ".join(set(prod)) + "\n" test.findings = self.get_items(data, report_date) return [test] diff --git a/dojo/tools/blackduck_binary_analysis/importer.py b/dojo/tools/blackduck_binary_analysis/importer.py index 5b54321129..14f72ae6f7 100644 --- a/dojo/tools/blackduck_binary_analysis/importer.py +++ b/dojo/tools/blackduck_binary_analysis/importer.py @@ -76,7 +76,7 @@ def _process_vuln_results( ) def __partition_by_key(self, csv_file): - csv_results = csv.DictReader(csv_file, delimiter=',', quotechar='"') + csv_results = csv.DictReader(csv_file, delimiter=",", quotechar='"') vulnerabilities = defaultdict(set) key = "Object SHA1" diff --git a/dojo/tools/blackduck_binary_analysis/parser.py b/dojo/tools/blackduck_binary_analysis/parser.py index b69fad5d91..2c34787a9c 100644 --- a/dojo/tools/blackduck_binary_analysis/parser.py +++ b/dojo/tools/blackduck_binary_analysis/parser.py @@ -54,7 +54,7 @@ def ingest_findings(self, sorted_findings, test): # Some of the CVSSv2 vectors have a trailing # colon that needs to be removed cvss_v3 = False - cvss_vectors = i.cvss_vector_v2.replace(':/', '/') + cvss_vectors = i.cvss_vector_v2.replace(":/", "/") cvss_obj = CVSS2(cvss_vectors) else: cvss_vectors = "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:N" diff --git a/dojo/tools/burp_api/parser.py b/dojo/tools/burp_api/parser.py index 1ec4c6b62d..ec801d8e28 100644 --- a/dojo/tools/burp_api/parser.py +++ b/dojo/tools/burp_api/parser.py @@ -111,7 +111,7 @@ def get_clean_base64(self, value): output += data.decode() except UnicodeDecodeError: output += "Decoding of the DataSegment failed. Thus, decoded with `latin1`. The result is the following one:\n" - output += data.decode('latin1') + output += data.decode("latin1") elif segment["type"] == "SnipSegment": output += f"\n<...> ({segment['length']} bytes)" elif segment["type"] == "HighlightSegment": diff --git a/dojo/tools/checkmarx_one/parser.py b/dojo/tools/checkmarx_one/parser.py index aa6e1b4a11..6db92a1e1f 100644 --- a/dojo/tools/checkmarx_one/parser.py +++ b/dojo/tools/checkmarx_one/parser.py @@ -193,7 +193,7 @@ def parse_vulnerabilities( for result in results: id = result.get("identifiers")[0].get("value") cwe = None - if 'vulnerabilityDetails' in result: + if "vulnerabilityDetails" in result: cwe = result.get("vulnerabilites").get("cweId") severity = result.get("severity") locations_uri = result.get("location").get("file") diff --git a/dojo/tools/chefinspect/parser.py b/dojo/tools/chefinspect/parser.py index 22b7cdfd0a..06769d4d03 100644 --- a/dojo/tools/chefinspect/parser.py +++ b/dojo/tools/chefinspect/parser.py @@ -35,19 +35,19 @@ def get_findings(self, file, test): for line in loglines: if len(line) != 0: json_object = json.loads(line) - description = str(json_object.get('description')) + "\n\n" - description += "batch_runtime: " + str(json_object.get('batch_runtime')) + "\n" - description += "application_group: " + str(json_object.get('application_group')) + "\n" - description += "zone: " + str(json_object.get('zone')) + "\n" - description += "office: " + str(json_object.get('office')) + "\n" - description += "dc: " + str(json_object.get('dc')) + "\n" - description += "environment: " + str(json_object.get('environment')) + "\n" - description += "id: " + str(json_object.get('id')) + "\n" - description += "control_tags: " + str(json_object.get('control_tags')) + "\n" - description += "platform: " + str(json_object.get('platform')) + "\n" - description += "profile: " + str(json_object.get('profile')) + "\n" - description += "group: " + str(json_object.get('group')) + "\n" - description += "results: " + str(json_object.get('results')) + "\n" + description = str(json_object.get("description")) + "\n\n" + description += "batch_runtime: " + str(json_object.get("batch_runtime")) + "\n" + description += "application_group: " + str(json_object.get("application_group")) + "\n" + description += "zone: " + str(json_object.get("zone")) + "\n" + description += "office: " + str(json_object.get("office")) + "\n" + description += "dc: " + str(json_object.get("dc")) + "\n" + description += "environment: " + str(json_object.get("environment")) + "\n" + description += "id: " + str(json_object.get("id")) + "\n" + description += "control_tags: " + str(json_object.get("control_tags")) + "\n" + description += "platform: " + str(json_object.get("platform")) + "\n" + description += "profile: " + str(json_object.get("profile")) + "\n" + description += "group: " + str(json_object.get("group")) + "\n" + description += "results: " + str(json_object.get("results")) + "\n" result.append( Finding( title=json_object.get("title"), diff --git a/dojo/tools/clair/parser.py b/dojo/tools/clair/parser.py index dd03c49d7c..8b82aa8ec6 100644 --- a/dojo/tools/clair/parser.py +++ b/dojo/tools/clair/parser.py @@ -31,10 +31,10 @@ def parse_json(self, json_output): tree = json.loads(str(data, "utf-8")) except BaseException: tree = json.loads(data) - if tree.get('image'): + if tree.get("image"): self.scanner = "clair" subtree = tree.get("vulnerabilities") - elif tree.get('LayerCount'): + elif tree.get("LayerCount"): self.scanner = "clairklar" subtree = tree.get("Vulnerabilities") except BaseException: diff --git a/dojo/tools/crunch42/parser.py b/dojo/tools/crunch42/parser.py index 10917ca84b..02868e45b5 100644 --- a/dojo/tools/crunch42/parser.py +++ b/dojo/tools/crunch42/parser.py @@ -58,7 +58,7 @@ def get_items(self, tree, test): def get_item(self, issue, title, test): fingerprint = issue["fingerprint"] pointer = issue["pointer"] - message = issue["specificDescription"] if 'specificDescription' in issue else title + message = issue["specificDescription"] if "specificDescription" in issue else title score = issue["score"] criticality = issue["criticality"] if criticality == 1: diff --git a/dojo/tools/dependency_track/parser.py b/dojo/tools/dependency_track/parser.py index e7a39ea4bd..2e3467623f 100644 --- a/dojo/tools/dependency_track/parser.py +++ b/dojo/tools/dependency_track/parser.py @@ -112,47 +112,47 @@ def _convert_dependency_track_finding_to_dojo_finding(self, dependency_track_fin :return: A DefectDojo Finding model """ # Validation of required fields - if 'vulnerability' not in dependency_track_finding: + if "vulnerability" not in dependency_track_finding: msg = "Missing 'vulnerability' node from finding!" raise ValueError(msg) - if 'vulnId' not in dependency_track_finding['vulnerability']: + if "vulnId" not in dependency_track_finding["vulnerability"]: msg = "Missing 'vulnId' node from vulnerability!" raise ValueError(msg) - vuln_id = dependency_track_finding['vulnerability']['vulnId'] - if 'source' not in dependency_track_finding['vulnerability']: + vuln_id = dependency_track_finding["vulnerability"]["vulnId"] + if "source" not in dependency_track_finding["vulnerability"]: msg = "Missing 'source' node from vulnerability!" raise ValueError(msg) - source = dependency_track_finding['vulnerability']['source'] - if 'component' not in dependency_track_finding: + source = dependency_track_finding["vulnerability"]["source"] + if "component" not in dependency_track_finding: msg = "Missing 'component' node from finding!" raise ValueError(msg) - if 'name' not in dependency_track_finding['component']: + if "name" not in dependency_track_finding["component"]: msg = "Missing 'name' node from component!" raise ValueError(msg) - component_name = dependency_track_finding['component']['name'] + component_name = dependency_track_finding["component"]["name"] # Build the title of the Dojo finding # Note: the 'version' of a component is not a requirement in the Dependency Track data model. # As such we only add in version information if it is present. - if 'version' in dependency_track_finding['component'] and dependency_track_finding['component']['version'] is not None: - component_version = dependency_track_finding['component']['version'] + if "version" in dependency_track_finding["component"] and dependency_track_finding["component"]["version"] is not None: + component_version = dependency_track_finding["component"]["version"] else: component_version = None if component_version is not None: version_description = component_version else: - version_description = '' + version_description = "" title = f"{component_name}:{version_description} affected by: {vuln_id} ({source})" # We should collect all the vulnerability ids, the FPF format can add additional IDs as aliases # we add these aliases in the vulnerability_id list making sure duplicate findings get correctly deduplicated # older version of Dependency-track might not include these field therefore lets check first - if dependency_track_finding['vulnerability'].get('aliases'): + if dependency_track_finding["vulnerability"].get("aliases"): # There can be multiple alias entries set_of_ids = set() - set_of_sources = {'cveId', 'sonatypeId', 'ghsaId', 'osvId', 'snykId', 'gsdId', 'vulnDbId'} - for alias in dependency_track_finding['vulnerability']['aliases']: + set_of_sources = {"cveId", "sonatypeId", "ghsaId", "osvId", "snykId", "gsdId", "vulnDbId"} + for alias in dependency_track_finding["vulnerability"]["aliases"]: for source in set_of_sources: if source in alias: set_of_ids.add(alias[source]) @@ -161,11 +161,11 @@ def _convert_dependency_track_finding_to_dojo_finding(self, dependency_track_fin # The vulnId is not always a CVE (e.g. if the vulnerability is not from the NVD source) # So here we set the cve for the DefectDojo finding to null unless the source of the # Dependency Track vulnerability is NVD - vulnerability_id = [vuln_id] if source is not None and source.upper() == 'NVD' else None + vulnerability_id = [vuln_id] if source is not None and source.upper() == "NVD" else None # Default CWE to CWE-1035 Using Components with Known Vulnerabilities if there is no CWE - if 'cweId' in dependency_track_finding['vulnerability'] and dependency_track_finding['vulnerability']['cweId'] is not None: - cwe = dependency_track_finding['vulnerability']['cweId'] + if "cweId" in dependency_track_finding["vulnerability"] and dependency_track_finding["vulnerability"]["cweId"] is not None: + cwe = dependency_track_finding["vulnerability"]["cweId"] else: cwe = 1035 @@ -179,8 +179,8 @@ def _convert_dependency_track_finding_to_dojo_finding(self, dependency_track_fin f"{component_description} is affected by the vulnerability with an id of {vuln_id} as " \ f"identified by {source}." # Append purl info if it is present - if 'purl' in dependency_track_finding['component'] and dependency_track_finding['component']['purl'] is not None: - component_purl = dependency_track_finding['component']['purl'] + if "purl" in dependency_track_finding["component"] and dependency_track_finding["component"]["purl"] is not None: + component_purl = dependency_track_finding["component"]["purl"] vulnerability_description = vulnerability_description + f"\nThe purl of the affected component is: {component_purl}." # there is no file_path in the report, but defect dojo needs it otherwise it skips deduplication: # see https://github.com/DefectDojo/django-DefectDojo/issues/3647 @@ -188,40 +188,40 @@ def _convert_dependency_track_finding_to_dojo_finding(self, dependency_track_fin # hash code dedupe config for this parser file_path = component_purl else: - file_path = 'unknown' + file_path = "unknown" # Append other info about vulnerability description info if it is present - if 'title' in dependency_track_finding['vulnerability'] and dependency_track_finding['vulnerability']['title'] is not None: - vulnerability_description = vulnerability_description + "\nVulnerability Title: {title}".format(title=dependency_track_finding['vulnerability']['title']) - if 'subtitle' in dependency_track_finding['vulnerability'] and dependency_track_finding['vulnerability']['subtitle'] is not None: - vulnerability_description = vulnerability_description + "\nVulnerability Subtitle: {subtitle}".format(subtitle=dependency_track_finding['vulnerability']['subtitle']) - if 'description' in dependency_track_finding['vulnerability'] and dependency_track_finding['vulnerability']['description'] is not None: - vulnerability_description = vulnerability_description + "\nVulnerability Description: {description}".format(description=dependency_track_finding['vulnerability']['description']) - if 'uuid' in dependency_track_finding['vulnerability'] and dependency_track_finding['vulnerability']['uuid'] is not None: - vuln_id_from_tool = dependency_track_finding['vulnerability']['uuid'] + if "title" in dependency_track_finding["vulnerability"] and dependency_track_finding["vulnerability"]["title"] is not None: + vulnerability_description = vulnerability_description + "\nVulnerability Title: {title}".format(title=dependency_track_finding["vulnerability"]["title"]) + if "subtitle" in dependency_track_finding["vulnerability"] and dependency_track_finding["vulnerability"]["subtitle"] is not None: + vulnerability_description = vulnerability_description + "\nVulnerability Subtitle: {subtitle}".format(subtitle=dependency_track_finding["vulnerability"]["subtitle"]) + if "description" in dependency_track_finding["vulnerability"] and dependency_track_finding["vulnerability"]["description"] is not None: + vulnerability_description = vulnerability_description + "\nVulnerability Description: {description}".format(description=dependency_track_finding["vulnerability"]["description"]) + if "uuid" in dependency_track_finding["vulnerability"] and dependency_track_finding["vulnerability"]["uuid"] is not None: + vuln_id_from_tool = dependency_track_finding["vulnerability"]["uuid"] # Get severity according to Dependency Track and convert it to a severity DefectDojo understands - dependency_track_severity = dependency_track_finding['vulnerability']['severity'] + dependency_track_severity = dependency_track_finding["vulnerability"]["severity"] vulnerability_severity = self._convert_dependency_track_severity_to_dojo_severity(dependency_track_severity) if vulnerability_severity is None: logger.warning("Detected severity of %s that could not be mapped for %s. Defaulting to Informational!", dependency_track_severity, title) vulnerability_severity = "Informational" # Get the cvss score of the vulnerabililty - cvss_score = dependency_track_finding['vulnerability'].get("cvssV3BaseScore") + cvss_score = dependency_track_finding["vulnerability"].get("cvssV3BaseScore") # Use the analysis state from Dependency Track to determine if the finding has already been marked as a false positive upstream - analysis = dependency_track_finding.get('analysis') - is_false_positive = True if analysis is not None and analysis.get('state') == 'FALSE_POSITIVE' else False + analysis = dependency_track_finding.get("analysis") + is_false_positive = True if analysis is not None and analysis.get("state") == "FALSE_POSITIVE" else False # Get the EPSS details - if 'epssPercentile' in dependency_track_finding['vulnerability']: - epss_percentile = dependency_track_finding['vulnerability']['epssPercentile'] + if "epssPercentile" in dependency_track_finding["vulnerability"]: + epss_percentile = dependency_track_finding["vulnerability"]["epssPercentile"] else: epss_percentile = None - if 'epssScore' in dependency_track_finding['vulnerability']: - epss_score = dependency_track_finding['vulnerability']['epssScore'] + if "epssScore" in dependency_track_finding["vulnerability"]: + epss_score = dependency_track_finding["vulnerability"]["epssScore"] else: epss_score = None @@ -275,7 +275,7 @@ def get_findings(self, file, test): # Load the contents of the JSON file into a dictionary data = file.read() try: - findings_export_dict = json.loads(str(data, 'utf-8')) + findings_export_dict = json.loads(str(data, "utf-8")) except: findings_export_dict = json.loads(data) @@ -285,7 +285,7 @@ def get_findings(self, file, test): # Make sure the findings key exists in the dictionary and that it is not null or an empty list # If it is null or an empty list then exit - if 'findings' not in findings_export_dict or not findings_export_dict['findings']: + if "findings" not in findings_export_dict or not findings_export_dict["findings"]: return [] # Start with an empty list of findings @@ -293,7 +293,7 @@ def get_findings(self, file, test): # If we have gotten this far then there should be one or more findings # Loop through each finding from Dependency Track - for dependency_track_finding in findings_export_dict['findings']: + for dependency_track_finding in findings_export_dict["findings"]: # Convert a Dependency Track finding to a DefectDojo finding dojo_finding = self._convert_dependency_track_finding_to_dojo_finding(dependency_track_finding, test) diff --git a/dojo/tools/factory.py b/dojo/tools/factory.py index 2ec6f7c036..ca9e8cf392 100644 --- a/dojo/tools/factory.py +++ b/dojo/tools/factory.py @@ -80,14 +80,14 @@ def get_api_scan_configuration_hints(): for scan_type in scan_types: tool_type = parser.requires_tool_type(scan_type) res.append({ - 'name': name, - 'id': name.lower().replace(' ', '_').replace('.', ''), - 'tool_type_name': tool_type, - 'tool_types': Tool_Type.objects.filter(name=tool_type), - 'tool_configurations': Tool_Configuration.objects.filter(tool_type__name=tool_type), - 'hint': parser.api_scan_configuration_hint(), + "name": name, + "id": name.lower().replace(" ", "_").replace(".", ""), + "tool_type_name": tool_type, + "tool_types": Tool_Type.objects.filter(name=tool_type), + "tool_configurations": Tool_Configuration.objects.filter(tool_type__name=tool_type), + "hint": parser.api_scan_configuration_hint(), }) - return sorted(res, key=lambda x: x['name'].lower()) + return sorted(res, key=lambda x: x["name"].lower()) def requires_tool_type(scan_type): diff --git a/dojo/tools/fortify/fpr_parser.py b/dojo/tools/fortify/fpr_parser.py index b2fb90474e..d0d62e2aa9 100644 --- a/dojo/tools/fortify/fpr_parser.py +++ b/dojo/tools/fortify/fpr_parser.py @@ -9,11 +9,11 @@ class FortifyFPRParser: def parse_fpr(self, filename, test): if str(filename.__class__) == "": - input_zip = zipfile.ZipFile(filename.name, 'r') + input_zip = zipfile.ZipFile(filename.name, "r") else: - input_zip = zipfile.ZipFile(filename, 'r') + input_zip = zipfile.ZipFile(filename, "r") zipdata = {name: input_zip.read(name) for name in input_zip.namelist()} - root = ElementTree.fromstring(zipdata["audit.fvdl"].decode('utf-8')) + root = ElementTree.fromstring(zipdata["audit.fvdl"].decode("utf-8")) regex = r"{.*}" matches = re.match(regex, root.tag) try: diff --git a/dojo/tools/fortify/parser.py b/dojo/tools/fortify/parser.py index 85fdc6d33e..2b1f3e21e3 100644 --- a/dojo/tools/fortify/parser.py +++ b/dojo/tools/fortify/parser.py @@ -13,7 +13,7 @@ def get_description_for_scan_types(self, scan_type): return "Import Findings in FPR or XML file format." def get_findings(self, filename, test): - if str(filename.name).endswith('.xml'): + if str(filename.name).endswith(".xml"): return FortifyXMLParser().parse_xml(filename, test) - elif str(filename.name).endswith('.fpr'): + elif str(filename.name).endswith(".fpr"): return FortifyFPRParser().parse_fpr(filename, test) diff --git a/dojo/tools/gitlab_sast/parser.py b/dojo/tools/gitlab_sast/parser.py index 68dcfc9fd6..f4d169b205 100644 --- a/dojo/tools/gitlab_sast/parser.py +++ b/dojo/tools/gitlab_sast/parser.py @@ -26,11 +26,11 @@ def get_findings(self, json_output, test): def get_tests(self, scan_type, handle): tree = self.parse_json(handle) - scan = tree.get('scan') + scan = tree.get("scan") if scan: - scanner_name = scan['scanner']['name'] - scanner_type = scan['scanner']['name'] - scanner_version = scan['scanner']['version'] + scanner_name = scan["scanner"]["name"] + scanner_type = scan["scanner"]["name"] + scanner_version = scan["scanner"]["version"] else: scanner_name = scanner_type = scanner_version = None @@ -45,7 +45,7 @@ def get_tests(self, scan_type, handle): def parse_json(self, json_output): data = json_output.read() try: - tree = json.loads(str(data, 'utf-8')) + tree = json.loads(str(data, "utf-8")) except: tree = json.loads(data) @@ -54,7 +54,7 @@ def parse_json(self, json_output): def get_items(self, tree): items = {} scanner = tree.get("scan", {}).get("scanner", {}) - for node in tree['vulnerabilities']: + for node in tree["vulnerabilities"]: item = self.get_item(node, scanner) if item: items[item.unique_id_from_tool] = item @@ -63,83 +63,83 @@ def get_items(self, tree): def get_confidence_numeric(self, argument): switcher = { - 'Confirmed': 1, # Certain - 'High': 3, # Firm - 'Medium': 4, # Firm - 'Low': 6, # Tentative - 'Experimental': 7, # Tentative + "Confirmed": 1, # Certain + "High": 3, # Firm + "Medium": 4, # Firm + "Low": 6, # Tentative + "Experimental": 7, # Tentative } return switcher.get(argument, None) def get_item(self, vuln, scanner): - unique_id_from_tool = vuln['id'] if 'id' in vuln else vuln['cve'] - title = '' - if 'name' in vuln: - title = vuln['name'] - elif 'message' in vuln: - title = vuln['message'] - elif 'description' in vuln: - title = vuln['description'] + unique_id_from_tool = vuln["id"] if "id" in vuln else vuln["cve"] + title = "" + if "name" in vuln: + title = vuln["name"] + elif "message" in vuln: + title = vuln["message"] + elif "description" in vuln: + title = vuln["description"] else: # All other fields are optional, if none of them has a value, fall back on the unique id title = unique_id_from_tool description = f"Scanner: {scanner.get('name', 'Could not be determined')}\n" - if 'message' in vuln: + if "message" in vuln: description += f"{vuln['message']}\n" - if 'description' in vuln: + if "description" in vuln: description += f"{vuln['description']}\n" - location = vuln['location'] - file_path = location['file'] if 'file' in location else None + location = vuln["location"] + file_path = location["file"] if "file" in location else None - line = location['start_line'] if 'start_line' in location else None + line = location["start_line"] if "start_line" in location else None sast_object = None sast_source_file_path = None sast_source_line = None - if 'class' in location and 'method' in location: + if "class" in location and "method" in location: sast_object = f"{location['class']}#{location['method']}" - elif 'class' in location: - sast_object = location['class'] - elif 'method' in location: - sast_object = location['method'] + elif "class" in location: + sast_object = location["class"] + elif "method" in location: + sast_object = location["method"] if sast_object is not None: sast_source_file_path = file_path sast_source_line = line - if 'end_line' in location: - line = location['end_line'] + if "end_line" in location: + line = location["end_line"] - severity = vuln.get('severity') - if severity is None or severity == 'Undefined' or severity == 'Unknown': + severity = vuln.get("severity") + if severity is None or severity == "Undefined" or severity == "Unknown": # Severity can be "Undefined" or "Unknown" in SAST report # In that case we set it as Info and specify the initial severity in the title - title = f'[{severity} severity] {title}' - severity = 'Info' - scanner_confidence = self.get_confidence_numeric(vuln.get('confidence', 'Unkown')) + title = f"[{severity} severity] {title}" + severity = "Info" + scanner_confidence = self.get_confidence_numeric(vuln.get("confidence", "Unkown")) - mitigation = vuln['solution'] if 'solution' in vuln else '' + mitigation = vuln["solution"] if "solution" in vuln else "" cwe = None vulnerability_id = None - references = '' - if 'identifiers' in vuln: - for identifier in vuln['identifiers']: - if identifier['type'].lower() == 'cwe': - if isinstance(identifier['value'], int): - cwe = identifier['value'] - elif identifier['value'].isdigit(): - cwe = int(identifier['value']) - elif identifier['type'].lower() == 'cve': - vulnerability_id = identifier['value'] + references = "" + if "identifiers" in vuln: + for identifier in vuln["identifiers"]: + if identifier["type"].lower() == "cwe": + if isinstance(identifier["value"], int): + cwe = identifier["value"] + elif identifier["value"].isdigit(): + cwe = int(identifier["value"]) + elif identifier["type"].lower() == "cve": + vulnerability_id = identifier["value"] else: references += f"Identifier type: {identifier['type']}\n" references += f"Name: {identifier['name']}\n" references += f"Value: {identifier['value']}\n" - if 'url' in identifier: + if "url" in identifier: references += f"URL: {identifier['url']}\n" - references += '\n' + references += "\n" finding = Finding( title=title, diff --git a/dojo/tools/govulncheck/parser.py b/dojo/tools/govulncheck/parser.py index 6e76330e68..0c5bb4191b 100644 --- a/dojo/tools/govulncheck/parser.py +++ b/dojo/tools/govulncheck/parser.py @@ -41,7 +41,7 @@ def get_finding_trace_info(self, data, osv_id): # Browse the findings to look for matching OSV-id. If the OSV-id is matching, extract traces. trace_info_strs = [] for elem in data: - if 'finding' in elem.keys(): + if "finding" in elem.keys(): finding = elem["finding"] if finding.get("osv") == osv_id: trace_info = finding.get("trace", []) @@ -59,12 +59,12 @@ def get_finding_trace_info(self, data, osv_id): def get_affected_version(self, data, osv_id): # Browse the findings to look for matching OSV-id. If the OSV-id is matching, extract the first affected version. for elem in data: - if 'finding' in elem.keys(): + if "finding" in elem.keys(): finding = elem["finding"] if finding.get("osv") == osv_id: trace_info = finding.get("trace", []) for trace in trace_info: - if 'version' in trace.keys(): + if "version" in trace.keys(): return trace.get("version") return "" @@ -127,25 +127,25 @@ def get_findings(self, scan_file, test): elif isinstance(data, list): # Parsing for new govulncheck output format for elem in data: - if 'osv' in elem.keys(): + if "osv" in elem.keys(): cve = elem["osv"]["aliases"][0] osv_data = elem["osv"] affected_package = osv_data["affected"][0]["package"] affected_ranges = osv_data["affected"][0]["ranges"] affected_ecosystem = affected_package.get("ecosystem", "Unknown") - impact = osv_data.get('details', 'Unknown') + impact = osv_data.get("details", "Unknown") formatted_ranges = [] - summary = osv_data.get('summary', 'Unknown') + summary = osv_data.get("summary", "Unknown") component_name = affected_package["name"] id = osv_data["id"] for r in affected_ranges: - events = r['events'] + events = r["events"] event_pairs = [] for i in range(0, len(events), 2): # Events come in pairs: introduced, then fixed - introduced = events[i].get('introduced', 'Unknown') - fixed = events[i + 1].get('fixed', 'Unknown') if i + 1 < len(events) else 'Unknown' + introduced = events[i].get("introduced", "Unknown") + fixed = events[i + 1].get("fixed", "Unknown") if i + 1 < len(events) else "Unknown" event_pairs.append(f"\n\t\tIntroduced in {introduced}, fixed in {fixed}") formatted_ranges.append(f"type {r['type']}: {'. '.join(event_pairs)}") range_info = "\n ".join(formatted_ranges) @@ -177,9 +177,9 @@ def get_findings(self, scan_file, test): else: title = f"{osv_data['id']} - {affected_package['name']}" - affected_version = self.get_affected_version(data, osv_data['id']) + affected_version = self.get_affected_version(data, osv_data["id"]) - if 'severity' in elem["osv"].keys(): + if "severity" in elem["osv"].keys(): severity = elem["osv"]["severity"] else: severity = SEVERITY diff --git a/dojo/tools/harbor_vulnerability/parser.py b/dojo/tools/harbor_vulnerability/parser.py index 4186544b21..c70c7031a5 100644 --- a/dojo/tools/harbor_vulnerability/parser.py +++ b/dojo/tools/harbor_vulnerability/parser.py @@ -38,7 +38,7 @@ def get_findings(self, filename, test): pass # Early exit if empty - if 'vulnerability' not in locals() or vulnerability is None: + if "vulnerability" not in locals() or vulnerability is None: return [] for item in vulnerability: diff --git a/dojo/tools/hcl_appscan/parser.py b/dojo/tools/hcl_appscan/parser.py index fbf1a49b25..00124b3f6c 100644 --- a/dojo/tools/hcl_appscan/parser.py +++ b/dojo/tools/hcl_appscan/parser.py @@ -40,66 +40,66 @@ def get_findings(self, file, test): description = "" for item in finding: match item.tag: - case 'severity': + case "severity": output = self.xmltreehelper(item) if output is None: severity = "Info" else: severity = output.strip(" ").capitalize() - case 'cwe': + case "cwe": cwe = int(self.xmltreehelper(item)) - case 'remediation': + case "remediation": remediation = self.xmltreehelper(item) - case 'advisory': + case "advisory": advisory = self.xmltreehelper(item) - case 'issue-type': + case "issue-type": title = self.xmltreehelper(item).strip() description = description + "Issue-Type:" + title + "\n" - case 'issue-type-name': + case "issue-type-name": title = self.xmltreehelper(item).strip() description = description + "Issue-Type-Name:" + title + "\n" - case 'location': + case "location": location = self.xmltreehelper(item) description = description + "Location:" + location + "\n" - case 'domain': + case "domain": domain = self.xmltreehelper(item) title += "_" + domain.strip() description = description + "Domain:" + domain + "\n" - case 'threat-class': + case "threat-class": threatclass = self.xmltreehelper(item) description = description + "Threat-Class:" + threatclass + "\n" - case 'entity': + case "entity": entity = self.xmltreehelper(item) title += "_" + entity.strip() description = description + "Entity:" + entity + "\n" - case 'security-risks': + case "security-risks": security_risks = self.xmltreehelper(item) description = description + "Security-Risks:" + security_risks + "\n" - case 'cause-id': + case "cause-id": causeid = self.xmltreehelper(item) title += "_" + causeid.strip() description = description + "Cause-Id:" + causeid + "\n" - case 'url-name': + case "url-name": urlname = self.xmltreehelper(item) title += "_" + urlname.strip() description = description + "Url-Name:" + urlname + "\n" - case 'element': + case "element": element = self.xmltreehelper(item) description = description + "Element:" + element + "\n" - case 'element-type': + case "element-type": elementtype = self.xmltreehelper(item) description = description + "ElementType:" + elementtype + "\n" - case 'path': + case "path": path = self.xmltreehelper(item) title += "_" + path.strip() description = description + "Path:" + path + "\n" - case 'scheme': + case "scheme": scheme = self.xmltreehelper(item) description = description + "Scheme:" + scheme + "\n" - case 'host': + case "host": host = self.xmltreehelper(item) description = description + "Host:" + host + "\n" - case 'port': + case "port": port = self.xmltreehelper(item) description = description + "Port:" + port + "\n" finding = Finding( diff --git a/dojo/tools/humble/parser.py b/dojo/tools/humble/parser.py index f9044c0210..ddab4b7430 100644 --- a/dojo/tools/humble/parser.py +++ b/dojo/tools/humble/parser.py @@ -22,8 +22,8 @@ def get_findings(self, filename, test): except ValueError: data = {} if data != {}: - url = data['[0. Info]']['URL'] - for content in data['[1. Missing HTTP Security Headers]']: + url = data["[0. Info]"]["URL"] + for content in data["[1. Missing HTTP Security Headers]"]: if content != "Nothing to report, all seems OK!": finding = Finding(title="Missing header: " + str(content), description="This security Header is missing: " + content, @@ -32,7 +32,7 @@ def get_findings(self, filename, test): dynamic_finding=True) items.append(finding) finding.unsaved_endpoints = [Endpoint.from_uri(url)] - for content in data['[2. Fingerprint HTTP Response Headers]']: + for content in data["[2. Fingerprint HTTP Response Headers]"]: if content != "Nothing to report, all seems OK!": finding = Finding(title="Available fingerprint:" + str(content), description="This fingerprint HTTP Response Header is available. Please remove it: " + content, @@ -41,7 +41,7 @@ def get_findings(self, filename, test): dynamic_finding=True) items.append(finding) finding.unsaved_endpoints = [Endpoint.from_uri(url)] - for content in data['[3. Deprecated HTTP Response Headers/Protocols and Insecure Values]']: + for content in data["[3. Deprecated HTTP Response Headers/Protocols and Insecure Values]"]: if content != "Nothing to report, all seems OK!": finding = Finding(title="Deprecated header: " + str(content), description="This deprecated HTTP Response Header is available. Please remove it: " + content, @@ -50,7 +50,7 @@ def get_findings(self, filename, test): dynamic_finding=True) items.append(finding) finding.unsaved_endpoints = [Endpoint.from_uri(url)] - for content in data['[4. Empty HTTP Response Headers Values]']: + for content in data["[4. Empty HTTP Response Headers Values]"]: if content != "Nothing to report, all seems OK!": finding = Finding(title="Empty HTTP response header: " + str(content), description="This empty HTTP Response Header value is available. Please remove it: " + content, diff --git a/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py b/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py index 786635b3ff..053df04aa0 100644 --- a/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py +++ b/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py @@ -139,8 +139,8 @@ def get_vuln_id_from_tool(vulnerability): def clean_title(title): if title.startswith("Issue summary: "): title = title[len("Issue summary: "):] - if '\n' in title: - title = title[:title.index('\n')] + if "\n" in title: + title = title[:title.index("\n")] return title diff --git a/dojo/tools/kubeaudit/parser.py b/dojo/tools/kubeaudit/parser.py index 065eb45427..3349afa1d8 100644 --- a/dojo/tools/kubeaudit/parser.py +++ b/dojo/tools/kubeaudit/parser.py @@ -32,19 +32,19 @@ def get_findings(self, filename, test): tree = json.loads(str(line, "utf-8")) except BaseException: tree = json.loads(line) - AuditResultName = tree.get('AuditResultName', None) - DeprecatedMajor = tree.get('DeprecatedMajor', None) - DeprecatedMinor = tree.get('DeprecatedMinor', None) - IntroducedMajor = tree.get('IntroducedMajor', None) - IntroducedMinor = tree.get('IntroducedMinor', None) - ResourceApiVersion = tree.get('ResourceApiVersion', None) - ResourceKind = tree.get('ResourceKind', None) - ResourceName = tree.get('ResourceName', None) - level = tree.get('level', None) - msg = tree.get('msg', None) - Container = tree.get('Container', None) - MissingAnnotation = tree.get('MissingAnnotation', None) - ResourceNamespace = tree.get('ResourceNamespace', None) + AuditResultName = tree.get("AuditResultName", None) + DeprecatedMajor = tree.get("DeprecatedMajor", None) + DeprecatedMinor = tree.get("DeprecatedMinor", None) + IntroducedMajor = tree.get("IntroducedMajor", None) + IntroducedMinor = tree.get("IntroducedMinor", None) + ResourceApiVersion = tree.get("ResourceApiVersion", None) + ResourceKind = tree.get("ResourceKind", None) + ResourceName = tree.get("ResourceName", None) + level = tree.get("level", None) + msg = tree.get("msg", None) + Container = tree.get("Container", None) + MissingAnnotation = tree.get("MissingAnnotation", None) + ResourceNamespace = tree.get("ResourceNamespace", None) description = "" if AuditResultName: description += "AuditResultName: " + AuditResultName + "\n" diff --git a/dojo/tools/kubehunter/parser.py b/dojo/tools/kubehunter/parser.py index ef9abf25c8..61d8a8a052 100644 --- a/dojo/tools/kubehunter/parser.py +++ b/dojo/tools/kubehunter/parser.py @@ -24,31 +24,31 @@ def get_findings(self, file, test): dupes = {} # Find any missing attribute - vulnerabilities = data['vulnerabilities'] + vulnerabilities = data["vulnerabilities"] check_required_attributes(vulnerabilities) for item in vulnerabilities: - vulnerability_id = item.get('vid') - title = item['vulnerability'] + vulnerability_id = item.get("vid") + title = item["vulnerability"] # Finding details information - findingdetail = '**Hunter**: ' + item.get('hunter') + '\n\n' - findingdetail += '**Category**: ' + item.get('category') + '\n\n' - findingdetail += '**Location**: ' + item.get('location') + '\n\n' - findingdetail += '**Description**:\n' + item.get('description') + '\n\n' + findingdetail = "**Hunter**: " + item.get("hunter") + "\n\n" + findingdetail += "**Category**: " + item.get("category") + "\n\n" + findingdetail += "**Location**: " + item.get("location") + "\n\n" + findingdetail += "**Description**:\n" + item.get("description") + "\n\n" # Finding severity - severity = item.get('severity', 'info') - allowed_severity = ['info', 'low', 'medium', 'high', "critical"] + severity = item.get("severity", "info") + allowed_severity = ["info", "low", "medium", "high", "critical"] if severity.lower() in allowed_severity: severity = severity.capitalize() else: - severity = 'Info' + severity = "Info" # Finding mitigation and reference - avd_reference = item.get('avd_reference') + avd_reference = item.get("avd_reference") - if avd_reference and avd_reference != '' and vulnerability_id != 'None': + if avd_reference and avd_reference != "" and vulnerability_id != "None": mitigation = f"Further details can be found in kube-hunter documentation available at : {avd_reference}" references = "**Kube-hunter AVD reference**: " + avd_reference else: @@ -56,9 +56,9 @@ def get_findings(self, file, test): references = None # Finding evidence - evidence = item.get('evidence') - if evidence and evidence != '' and evidence != 'none': - steps_to_reproduce = '**Evidence**: ' + item.get('evidence') + evidence = item.get("evidence") + if evidence and evidence != "" and evidence != "none": + steps_to_reproduce = "**Evidence**: " + item.get("evidence") else: steps_to_reproduce = None @@ -79,8 +79,8 @@ def get_findings(self, file, test): # internal de-duplication if finding.steps_to_reproduce is None: - finding.steps_to_reproduce = '' - dupe_key = hashlib.sha256(str(finding.description + finding.title + finding.steps_to_reproduce + finding.vuln_id_from_tool).encode('utf-8')).hexdigest() + finding.steps_to_reproduce = "" + dupe_key = hashlib.sha256(str(finding.description + finding.title + finding.steps_to_reproduce + finding.vuln_id_from_tool).encode("utf-8")).hexdigest() if dupe_key not in dupes: dupes[dupe_key] = finding diff --git a/dojo/tools/kubescape/parser.py b/dojo/tools/kubescape/parser.py index d26031a6a7..877a903db1 100644 --- a/dojo/tools/kubescape/parser.py +++ b/dojo/tools/kubescape/parser.py @@ -54,19 +54,19 @@ def get_findings(self, filename, test): for resource in data["resources"]: resourceid = resource["resourceID"] resource_type, resource_name = self.parse_resource_id(resourceid) - results = ([each for each in data["results"] if each.get('resourceID') == resourceid]) + results = ([each for each in data["results"] if each.get("resourceID") == resourceid]) controls = results[0].get("controls", []) for control in controls: # This condition is true if the result doesn't contain the status for each control (old format) - retrocompatibility_condition = 'status' not in control or 'status' not in control['status'] + retrocompatibility_condition = "status" not in control or "status" not in control["status"] if retrocompatibility_condition or control["status"]["status"] == "failed": control_name = control["name"] if resource_type and resource_name and control_name: title = f"{control_name} - {resource_type} {resource_name}" else: title = f"{control_name} - {resourceid}" - controlID = control['controlID'] + controlID = control["controlID"] # Find control details controlSummary = self.find_control_summary_by_id(data, controlID) diff --git a/dojo/tools/microfocus_webinspect/parser.py b/dojo/tools/microfocus_webinspect/parser.py index 6b1669ffaf..6f3cbb947f 100644 --- a/dojo/tools/microfocus_webinspect/parser.py +++ b/dojo/tools/microfocus_webinspect/parser.py @@ -55,11 +55,11 @@ def get_findings(self, file, test): description = "" classifications = issue.find("Classifications") if classifications is not None: - for content in classifications.findall('Classification'): + for content in classifications.findall("Classification"): # detect CWE number # TODO support more than one CWE number if "kind" in content.attrib and "CWE" == content.attrib["kind"]: - cwe = MicrofocusWebinspectParser.get_cwe(content.attrib['identifier']) + cwe = MicrofocusWebinspectParser.get_cwe(content.attrib["identifier"]) description += "\n\n" + content.text + "\n" finding = Finding( diff --git a/dojo/tools/mobsf/__init__.py b/dojo/tools/mobsf/__init__.py index 16eb15eddb..20f78a98a5 100644 --- a/dojo/tools/mobsf/__init__.py +++ b/dojo/tools/mobsf/__init__.py @@ -1 +1 @@ -__author__ = 'Aaron Weaver' +__author__ = "Aaron Weaver" diff --git a/dojo/tools/mobsf/parser.py b/dojo/tools/mobsf/parser.py index 6567c69b82..9e8ccf9102 100644 --- a/dojo/tools/mobsf/parser.py +++ b/dojo/tools/mobsf/parser.py @@ -21,7 +21,7 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, filename, test): tree = filename.read() try: - data = json.loads(str(tree, 'utf-8')) + data = json.loads(str(tree, "utf-8")) except: data = json.loads(tree) find_date = datetime.now() @@ -113,7 +113,7 @@ def get_findings(self, filename, test): if "insecure_connections" in data: for details in data["insecure_connections"]: insecure_urls = "" - for url in details.split(','): + for url in details.split(","): insecure_urls = insecure_urls + url + "\n" mobsf_item = { diff --git a/dojo/tools/ms_defender/parser.py b/dojo/tools/ms_defender/parser.py index 6eed53b7dc..07cf6de404 100644 --- a/dojo/tools/ms_defender/parser.py +++ b/dojo/tools/ms_defender/parser.py @@ -21,18 +21,18 @@ def get_description_for_scan_types(self, scan_type): return ("MSDefender findings can be retrieved using the REST API") def get_findings(self, file, test): - if str(file.name).endswith('.json'): + if str(file.name).endswith(".json"): vulnerabilityfile = json.load(file) - vulnerabilitydata = vulnerabilityfile['value'] + vulnerabilitydata = vulnerabilityfile["value"] for vulnerability in vulnerabilitydata: self.process_json(vulnerability) - elif str(file.name).endswith('.zip'): + elif str(file.name).endswith(".zip"): if str(file.__class__) == "": - input_zip = zipfile.ZipFile(file.name, 'r') + input_zip = zipfile.ZipFile(file.name, "r") else: - input_zip = zipfile.ZipFile(file, 'r') + input_zip = zipfile.ZipFile(file, "r") zipdata = {name: input_zip.read(name) for name in input_zip.namelist()} - if zipdata.get('machines/') is None or zipdata.get('vulnerabilities/') is None: + if zipdata.get("machines/") is None or zipdata.get("vulnerabilities/") is None: return [] else: vulnerabilityfiles = [] @@ -45,16 +45,16 @@ def get_findings(self, file, test): vulnerabilities = [] machines = {} for vulnerabilityfile in vulnerabilityfiles: - output = json.loads(zipdata[vulnerabilityfile].decode('ascii'))['value'] + output = json.loads(zipdata[vulnerabilityfile].decode("ascii"))["value"] for data in output: vulnerabilities.append(data) for machinefile in machinefiles: - output = json.loads(zipdata[machinefile].decode('ascii'))['value'] + output = json.loads(zipdata[machinefile].decode("ascii"))["value"] for data in output: - machines[data.get('id')] = data + machines[data.get("id")] = data for vulnerability in vulnerabilities: try: - self.process_zip(vulnerability, machines[vulnerability['machineId']]) + self.process_zip(vulnerability, machines[vulnerability["machineId"]]) except (IndexError, KeyError): self.process_json(vulnerability) else: @@ -63,83 +63,83 @@ def get_findings(self, file, test): def process_json(self, vulnerability): description = "" - description += "cveId: " + str(vulnerability['cveId']) + "\n" - description += "machineId: " + str(vulnerability['machineId']) + "\n" - description += "fixingKbId: " + str(vulnerability['fixingKbId']) + "\n" - description += "productName: " + str(vulnerability['productName']) + "\n" - description += "productVendor: " + str(vulnerability['productVendor']) + "\n" - description += "productVersion: " + str(vulnerability['productVersion']) + "\n" - title = str(vulnerability['cveId']) + description += "cveId: " + str(vulnerability["cveId"]) + "\n" + description += "machineId: " + str(vulnerability["machineId"]) + "\n" + description += "fixingKbId: " + str(vulnerability["fixingKbId"]) + "\n" + description += "productName: " + str(vulnerability["productName"]) + "\n" + description += "productVendor: " + str(vulnerability["productVendor"]) + "\n" + description += "productVersion: " + str(vulnerability["productVersion"]) + "\n" + title = str(vulnerability["cveId"]) finding = Finding( title=title + "_" + vulnerability["machineId"], - severity=self.severity_check(vulnerability['severity']), + severity=self.severity_check(vulnerability["severity"]), description=description, static_finding=False, dynamic_finding=True, ) - if vulnerability['fixingKbId'] is not None: - finding.mitigation = vulnerability['fixingKbId'] - if vulnerability['cveId'] is not None: + if vulnerability["fixingKbId"] is not None: + finding.mitigation = vulnerability["fixingKbId"] + if vulnerability["cveId"] is not None: finding.unsaved_vulnerability_ids = [] - finding.unsaved_vulnerability_ids.append(vulnerability['cveId']) + finding.unsaved_vulnerability_ids.append(vulnerability["cveId"]) self.findings.append(finding) finding.unsaved_endpoints = [] def process_zip(self, vulnerability, machine): description = "" - description += "cveId: " + str(vulnerability['cveId']) + "\n" - description += "machineId: " + str(vulnerability['machineId']) + "\n" - description += "fixingKbId: " + str(vulnerability['fixingKbId']) + "\n" - description += "productName: " + str(vulnerability['productName']) + "\n" - description += "productVendor: " + str(vulnerability['productVendor']) + "\n" - description += "productVersion: " + str(vulnerability['productVersion']) + "\n" - description += "machine Info: id: " + str(machine['id']) + "\n" - description += "machine Info: osPlatform: " + str(machine['osPlatform']) + "\n" - description += "machine Info: osVersion: " + str(machine['osVersion']) + "\n" - description += "machine Info: osProcessor: " + str(machine['osProcessor']) + "\n" - description += "machine Info: version: " + str(machine['version']) + "\n" - description += "machine Info: agentVersion: " + str(machine['agentVersion']) + "\n" - description += "machine Info: osBuild: " + str(machine['osBuild']) + "\n" - description += "machine Info: healthStatus: " + str(machine['healthStatus']) + "\n" - description += "machine Info: deviceValue: " + str(machine['deviceValue']) + "\n" - description += "machine Info: rbacGroupId: " + str(machine['rbacGroupId']) + "\n" - description += "machine Info: rbacGroupName: " + str(machine['rbacGroupName']) + "\n" - description += "machine Info: riskScore: " + str(machine['riskScore']) + "\n" - description += "machine Info: exposureLevel: " + str(machine['exposureLevel']) + "\n" - description += "machine Info: isAadJoined: " + str(machine['isAadJoined']) + "\n" - description += "machine Info: aadDeviceId: " + str(machine['aadDeviceId']) + "\n" - description += "machine Info: defenderAvStatus: " + str(machine['defenderAvStatus']) + "\n" - description += "machine Info: onboardingStatus: " + str(machine['onboardingStatus']) + "\n" - description += "machine Info: osArchitecture: " + str(machine['osArchitecture']) + "\n" - description += "machine Info: managedBy: " + str(machine['managedBy']) + "\n" - title = str(vulnerability['cveId']) - if str(machine['computerDnsName']) != "null": - title = title + "_" + str(machine['computerDnsName']) - if str(machine['osPlatform']) != "null": - title = title + "_" + str(machine['osPlatform']) + description += "cveId: " + str(vulnerability["cveId"]) + "\n" + description += "machineId: " + str(vulnerability["machineId"]) + "\n" + description += "fixingKbId: " + str(vulnerability["fixingKbId"]) + "\n" + description += "productName: " + str(vulnerability["productName"]) + "\n" + description += "productVendor: " + str(vulnerability["productVendor"]) + "\n" + description += "productVersion: " + str(vulnerability["productVersion"]) + "\n" + description += "machine Info: id: " + str(machine["id"]) + "\n" + description += "machine Info: osPlatform: " + str(machine["osPlatform"]) + "\n" + description += "machine Info: osVersion: " + str(machine["osVersion"]) + "\n" + description += "machine Info: osProcessor: " + str(machine["osProcessor"]) + "\n" + description += "machine Info: version: " + str(machine["version"]) + "\n" + description += "machine Info: agentVersion: " + str(machine["agentVersion"]) + "\n" + description += "machine Info: osBuild: " + str(machine["osBuild"]) + "\n" + description += "machine Info: healthStatus: " + str(machine["healthStatus"]) + "\n" + description += "machine Info: deviceValue: " + str(machine["deviceValue"]) + "\n" + description += "machine Info: rbacGroupId: " + str(machine["rbacGroupId"]) + "\n" + description += "machine Info: rbacGroupName: " + str(machine["rbacGroupName"]) + "\n" + description += "machine Info: riskScore: " + str(machine["riskScore"]) + "\n" + description += "machine Info: exposureLevel: " + str(machine["exposureLevel"]) + "\n" + description += "machine Info: isAadJoined: " + str(machine["isAadJoined"]) + "\n" + description += "machine Info: aadDeviceId: " + str(machine["aadDeviceId"]) + "\n" + description += "machine Info: defenderAvStatus: " + str(machine["defenderAvStatus"]) + "\n" + description += "machine Info: onboardingStatus: " + str(machine["onboardingStatus"]) + "\n" + description += "machine Info: osArchitecture: " + str(machine["osArchitecture"]) + "\n" + description += "machine Info: managedBy: " + str(machine["managedBy"]) + "\n" + title = str(vulnerability["cveId"]) + if str(machine["computerDnsName"]) != "null": + title = title + "_" + str(machine["computerDnsName"]) + if str(machine["osPlatform"]) != "null": + title = title + "_" + str(machine["osPlatform"]) finding = Finding( title=title + "_" + vulnerability["machineId"], - severity=self.severity_check(vulnerability['severity']), + severity=self.severity_check(vulnerability["severity"]), description=description, static_finding=False, dynamic_finding=True, ) - if vulnerability['fixingKbId'] is not None: - finding.mitigation = vulnerability['fixingKbId'] - if vulnerability['cveId'] is not None: + if vulnerability["fixingKbId"] is not None: + finding.mitigation = vulnerability["fixingKbId"] + if vulnerability["cveId"] is not None: finding.unsaved_vulnerability_ids = [] - finding.unsaved_vulnerability_ids.append(vulnerability['cveId']) + finding.unsaved_vulnerability_ids.append(vulnerability["cveId"]) self.findings.append(finding) finding.unsaved_endpoints = [] - if machine['computerDnsName'] is not None: - finding.unsaved_endpoints.append(Endpoint(host=str(machine['computerDnsName']))) - if machine['lastIpAddress'] is not None: - finding.unsaved_endpoints.append(Endpoint(host=str(machine['lastIpAddress']))) - if machine['lastExternalIpAddress'] is not None: - finding.unsaved_endpoints.append(Endpoint(host=str(machine['lastExternalIpAddress']))) + if machine["computerDnsName"] is not None: + finding.unsaved_endpoints.append(Endpoint(host=str(machine["computerDnsName"]))) + if machine["lastIpAddress"] is not None: + finding.unsaved_endpoints.append(Endpoint(host=str(machine["lastIpAddress"]))) + if machine["lastExternalIpAddress"] is not None: + finding.unsaved_endpoints.append(Endpoint(host=str(machine["lastExternalIpAddress"]))) def severity_check(self, input): - if input in ['Informational', 'Low', 'Medium', 'High', 'Critical']: + if input in ["Informational", "Low", "Medium", "High", "Critical"]: return input else: return "Informational" diff --git a/dojo/tools/nancy/parser.py b/dojo/tools/nancy/parser.py index 19534728c8..52b45bc52e 100644 --- a/dojo/tools/nancy/parser.py +++ b/dojo/tools/nancy/parser.py @@ -37,24 +37,24 @@ def get_items(self, vulnerable, test): findings = [] for vuln in vulnerable: finding = None - severity = 'Info' + severity = "Info" # the tool does not define severity, however it # provides CVSSv3 vector which will calculate # severity dynamically on save() references = [] - if vuln['Vulnerabilities']: - comp_name = vuln['Coordinates'].split(':')[1].split('@')[0] - comp_version = vuln['Coordinates'].split(':')[1].split('@')[1] + if vuln["Vulnerabilities"]: + comp_name = vuln["Coordinates"].split(":")[1].split("@")[0] + comp_version = vuln["Coordinates"].split(":")[1].split("@")[1] - references.append(vuln['Reference']) + references.append(vuln["Reference"]) - for associated_vuln in vuln['Vulnerabilities']: + for associated_vuln in vuln["Vulnerabilities"]: # create the finding object(s) - references.append(associated_vuln['Reference']) - vulnerability_ids = [associated_vuln['Cve']] + references.append(associated_vuln["Reference"]) + vulnerability_ids = [associated_vuln["Cve"]] finding = Finding( - title=associated_vuln['Title'], - description=associated_vuln['Description'], + title=associated_vuln["Title"], + description=associated_vuln["Description"], test=test, severity=severity, component_name=comp_name, @@ -71,14 +71,14 @@ def get_items(self, vulnerable, test): finding.unsaved_vulnerability_ids = vulnerability_ids # CVSSv3 vector - if associated_vuln['CvssVector']: + if associated_vuln["CvssVector"]: finding.cvssv3 = CVSS3( - associated_vuln['CvssVector']).clean_vector() + associated_vuln["CvssVector"]).clean_vector() # do we have a CWE? - if associated_vuln['Title'].startswith('CWE-'): - cwe = (associated_vuln['Title'] - .split(':')[0].split('-')[1]) + if associated_vuln["Title"].startswith("CWE-"): + cwe = (associated_vuln["Title"] + .split(":")[0].split("-")[1]) finding.cwe = int(cwe) findings.append(finding) diff --git a/dojo/tools/nikto/json_parser.py b/dojo/tools/nikto/json_parser.py index a51deafce6..4a4c24cc53 100644 --- a/dojo/tools/nikto/json_parser.py +++ b/dojo/tools/nikto/json_parser.py @@ -19,7 +19,7 @@ def process_json(self, file, test): f"**msg:** `{vulnerability.get('msg')}`", f"**HTTP Method:** `{vulnerability.get('method')}`", ]) - if vulnerability.get('OSVDB') is not None: + if vulnerability.get("OSVDB") is not None: description += "\n" + f"**OSVDB:** `{vulnerability.get('OSVDB')}`" finding = Finding( title=vulnerability.get("msg"), diff --git a/dojo/tools/noseyparker/parser.py b/dojo/tools/noseyparker/parser.py index 787d696f46..8c4a80190d 100644 --- a/dojo/tools/noseyparker/parser.py +++ b/dojo/tools/noseyparker/parser.py @@ -42,21 +42,21 @@ def get_findings(self, file, test): for line in data: # Set rule to the current secret type (e.g. AWS S3 Bucket) try: - rule_name = line['rule_name'] - secret = line['match_content'] + rule_name = line["rule_name"] + secret = line["match_content"] except Exception: msg = "Invalid Nosey Parker data, make sure to use Nosey Parker v0.16.0" raise ValueError(msg) # Set Finding details - for match in line['matches']: + for match in line["matches"]: # The following path is to account for the variability in the JSON lines output - num_elements = len(match['provenance']) - 1 - json_path = match['provenance'][num_elements] + num_elements = len(match["provenance"]) - 1 + json_path = match["provenance"][num_elements] title = f"Secret(s) Found in Repository with Commit ID {json_path['commit_provenance']['commit_metadata']['commit_id']}" - filepath = json_path['commit_provenance']['blob_path'] - line_num = match['location']['source_span']['start']['line'] + filepath = json_path["commit_provenance"]["blob_path"] + line_num = match["location"]["source_span"]["start"]["line"] description = f"Secret found of type: {rule_name} \n" \ f"SECRET starts with: '{secret[:3]}' \n" \ f"Committer Name: {json_path['commit_provenance']['commit_metadata']['committer_name']} \n" \ @@ -81,7 +81,7 @@ def get_findings(self, file, test): cwe=798, title=title, description=description, - severity='High', + severity="High", mitigation="Reset the account/token and remove from source code. Store secrets/tokens/passwords in secret managers or secure vaults.", date=datetime.today().strftime("%Y-%m-%d"), verified=False, diff --git a/dojo/tools/nuclei/parser.py b/dojo/tools/nuclei/parser.py index 4ce0d22b74..cbded9e013 100644 --- a/dojo/tools/nuclei/parser.py +++ b/dojo/tools/nuclei/parser.py @@ -38,7 +38,7 @@ def get_findings(self, filename, test): for template in content: data.append(template) elif filecontent[0] == "{": - file = filecontent.split('\n') + file = filecontent.split("\n") for line in file: if line != "": data.append(json.loads(line)) diff --git a/dojo/tools/openvas/csv_parser.py b/dojo/tools/openvas/csv_parser.py index 1a5cc9a056..4d3011d82f 100644 --- a/dojo/tools/openvas/csv_parser.py +++ b/dojo/tools/openvas/csv_parser.py @@ -95,7 +95,7 @@ def __init__(self): super().__init__() def map_column_value(self, finding, column_value): - cve_pattern = r'CVE-\d{4}-\d{4,7}' + cve_pattern = r"CVE-\d{4}-\d{4,7}" cves = re.findall(cve_pattern, column_value) for cve in cves: finding.unsaved_vulnerability_ids.append(cve) diff --git a/dojo/tools/openvas/parser.py b/dojo/tools/openvas/parser.py index ce548db587..a103a4d892 100644 --- a/dojo/tools/openvas/parser.py +++ b/dojo/tools/openvas/parser.py @@ -13,7 +13,7 @@ def get_description_for_scan_types(self, scan_type): return "Import CSV or XML output of Greenbone OpenVAS report." def get_findings(self, filename, test): - if str(filename.name).endswith('.csv'): + if str(filename.name).endswith(".csv"): return OpenVASCSVParser().get_findings(filename, test) - elif str(filename.name).endswith('.xml'): + elif str(filename.name).endswith(".xml"): return OpenVASXMLParser().get_findings(filename, test) diff --git a/dojo/tools/redhatsatellite/parser.py b/dojo/tools/redhatsatellite/parser.py index 6ea077f976..102f47876f 100644 --- a/dojo/tools/redhatsatellite/parser.py +++ b/dojo/tools/redhatsatellite/parser.py @@ -65,7 +65,7 @@ def get_findings(self, filename, test): description += "**bugs:** " + str(bugs) + "\n" if module_streams != []: description += "**module_streams:** " + str(module_streams) + "\n" - description += "**packages:** " + ', '.join(packages) + description += "**packages:** " + ", ".join(packages) find = Finding( title=title, test=test, diff --git a/dojo/tools/sarif/parser.py b/dojo/tools/sarif/parser.py index f311d03463..701dd78723 100644 --- a/dojo/tools/sarif/parser.py +++ b/dojo/tools/sarif/parser.py @@ -221,15 +221,15 @@ def get_snippet(result): def get_codeFlowsDescription(codeFlows): description = "" for codeFlow in codeFlows: - for threadFlow in codeFlow.get('threadFlows', []): + for threadFlow in codeFlow.get("threadFlows", []): if "locations" not in threadFlow: continue description = f"**{_('Code flow')}:**\n" line = 1 - for location in threadFlow.get('locations', []): - physicalLocation = location.get('location', {}).get('physicalLocation', {}) + for location in threadFlow.get("locations", []): + physicalLocation = location.get("location", {}).get("physicalLocation", {}) region = physicalLocation.get("region", {}) uri = physicalLocation.get("artifactLocation").get("uri") @@ -248,12 +248,12 @@ def get_codeFlowsDescription(codeFlows): description += f"{line}. {uri}{start_line}{start_column}{snippet}\n" - if 'message' in location.get('location', {}): - message_field = location.get('location', {}).get('message', {}) - if 'markdown' in message_field: - message = message_field.get('markdown', '') + if "message" in location.get("location", {}): + message_field = location.get("location", {}).get("message", {}) + if "markdown" in message_field: + message = message_field.get("markdown", "") else: - message = message_field.get('text', '') + message = message_field.get("text", "") description += f"\t{message}\n" @@ -443,7 +443,7 @@ def get_item(result, rules, artifacts, run_date): # manage tags provided in the report and rule and remove duplicated tags = list(set(get_properties_tags(rule) + get_properties_tags(result))) - tags = [s.removeprefix('external/cwe/') for s in tags] + tags = [s.removeprefix("external/cwe/") for s in tags] finding.tags = tags # manage fingerprints diff --git a/dojo/tools/sonarqube/parser.py b/dojo/tools/sonarqube/parser.py index 9d92c6e3e9..b06a7e83fa 100644 --- a/dojo/tools/sonarqube/parser.py +++ b/dojo/tools/sonarqube/parser.py @@ -41,9 +41,9 @@ def get_findings(self, file, test): return [] if file.name.endswith(".zip"): if str(file.__class__) == "": - input_zip = zipfile.ZipFile(file.name, 'r') + input_zip = zipfile.ZipFile(file.name, "r") else: - input_zip = zipfile.ZipFile(file, 'r') + input_zip = zipfile.ZipFile(file, "r") zipdata = {name: input_zip.read(name) for name in input_zip.namelist()} return SonarQubeRESTAPIZIP().get_items(zipdata, test, self.mode) else: diff --git a/dojo/tools/sonarqube/sonarqube_restapi_json.py b/dojo/tools/sonarqube/sonarqube_restapi_json.py index 409007e7b8..bb735f038c 100644 --- a/dojo/tools/sonarqube/sonarqube_restapi_json.py +++ b/dojo/tools/sonarqube/sonarqube_restapi_json.py @@ -62,20 +62,20 @@ def get_json_items(self, json_content, test, mode): message = issue.get("message") cwe = None if "Category: CWE-" in message: - cwe_pattern = r'Category: CWE-\d{1,5}' + cwe_pattern = r"Category: CWE-\d{1,5}" cwes = re.findall(cwe_pattern, message) if cwes: cwe = cwes[0].split("Category: CWE-")[1] cvss = None if "CVSS Score: " in message: - cvss_pattern = r'CVSS Score: \d{1}.\d{1}' + cvss_pattern = r"CVSS Score: \d{1}.\d{1}" cvsss = re.findall(cvss_pattern, message) if cvsss: cvss = cvsss[0].split("CVSS Score: ")[1] component_name = None component_version = None if "Filename: " in message and " | " in message: - component_pattern = r'Filename: .* \| ' + component_pattern = r"Filename: .* \| " comp = re.findall(component_pattern, message) if comp: component_result = comp[0].split("Filename: ")[1].split(" | ")[0] @@ -119,22 +119,22 @@ def get_json_items(self, json_content, test, mode): ) vulnids = [] if "Reference: CVE" in message: - cve_pattern = r'Reference: CVE-\d{4}-\d{4,7}' + cve_pattern = r"Reference: CVE-\d{4}-\d{4,7}" cves = re.findall(cve_pattern, message) for cve in cves: vulnids.append(cve.split("Reference: ")[1]) if "References: CVE" in message: - cve_pattern = r'References: CVE-\d{4}-\d{4,7}' + cve_pattern = r"References: CVE-\d{4}-\d{4,7}" cves = re.findall(cve_pattern, message) for cve in cves: vulnids.append(cve.split("References: ")[1]) if "Reference: GHSA" in message: - cve_pattern = r'Reference: GHSA-[23456789cfghjmpqrvwx]{4}-[23456789cfghjmpqrvwx]{4}-[23456789cfghjmpqrvwx]{4}' + cve_pattern = r"Reference: GHSA-[23456789cfghjmpqrvwx]{4}-[23456789cfghjmpqrvwx]{4}-[23456789cfghjmpqrvwx]{4}" cves = re.findall(cve_pattern, message) for cve in cves: vulnids.append(cve.split("Reference: ")[1]) if "References: GHSA" in message: - cve_pattern = r'References: GHSA-[23456789cfghjmpqrvwx]{4}-[23456789cfghjmpqrvwx]{4}-[23456789cfghjmpqrvwx]{4}' + cve_pattern = r"References: GHSA-[23456789cfghjmpqrvwx]{4}-[23456789cfghjmpqrvwx]{4}-[23456789cfghjmpqrvwx]{4}" cves = re.findall(cve_pattern, message) for cve in cves: vulnids.append(cve.split("References: ")[1]) diff --git a/dojo/tools/sonarqube/sonarqube_restapi_zip.py b/dojo/tools/sonarqube/sonarqube_restapi_zip.py index 983678423a..3ad7ba0a9c 100644 --- a/dojo/tools/sonarqube/sonarqube_restapi_zip.py +++ b/dojo/tools/sonarqube/sonarqube_restapi_zip.py @@ -8,6 +8,6 @@ def get_items(self, files, test, mode): total_findings_per_file = [] for dictkey in files.keys(): if dictkey.endswith(".json"): - json_content = json.loads(files[dictkey].decode('ascii')) + json_content = json.loads(files[dictkey].decode("ascii")) total_findings_per_file += SonarQubeRESTAPIJSON().get_json_items(json_content, test, mode) return total_findings_per_file diff --git a/dojo/tools/ssh_audit/parser.py b/dojo/tools/ssh_audit/parser.py index 4e3ddb4b36..02e4381263 100644 --- a/dojo/tools/ssh_audit/parser.py +++ b/dojo/tools/ssh_audit/parser.py @@ -39,13 +39,13 @@ def get_findings(self, filename, test): except ValueError: data = {} if data != {}: - title = data['banner']['raw'] - for cve in data['cves']: - cvename = cve['name'] + title = data["banner"]["raw"] + for cve in data["cves"]: + cvename = cve["name"] description = [f"**CVE**: {cvename}"] description.append(f"**Description**: {cve['description']}") description.append(f"**Banner**: {title}") - severity = self.convert_cvss_score(raw_value=cve['cvssv2']) + severity = self.convert_cvss_score(raw_value=cve["cvssv2"]) finding = Finding(title=str(title) + "_" + str(cvename), test=test, description="\n".join(description), @@ -53,15 +53,15 @@ def get_findings(self, filename, test): static_finding=False) items.append(finding) finding.unsaved_endpoints = [] - endpoint = Endpoint(host=data['target'].split(':')[0], port=data['target'].split(':')[1]) + endpoint = Endpoint(host=data["target"].split(":")[0], port=data["target"].split(":")[1]) finding.unsaved_endpoints.append(endpoint) - for kex in data['kex']: - if 'fail' in kex['notes'] and 'warn' in kex['notes']: - kexname = kex['algorithm'] + for kex in data["kex"]: + if "fail" in kex["notes"] and "warn" in kex["notes"]: + kexname = kex["algorithm"] description = [f"**Algorithm**: {kexname}"] description.append(f"**Description Failure**: {kex['notes']['fail']}") description.append(f"**Description Warning**: {kex['notes']['warn']}") - if kex['notes'].get('info'): + if kex["notes"].get("info"): description.append(f"**Info**: {kex['notes']['info']}") severity = "High" finding = Finding(title=str(title) + "_" + str(kexname), @@ -71,13 +71,13 @@ def get_findings(self, filename, test): static_finding=False) items.append(finding) finding.unsaved_endpoints = [] - endpoint = Endpoint(host=data['target'].split(':')[0], port=data['target'].split(':')[1]) + endpoint = Endpoint(host=data["target"].split(":")[0], port=data["target"].split(":")[1]) finding.unsaved_endpoints.append(endpoint) - elif 'fail' in kex['notes']: - kexname = kex['algorithm'] + elif "fail" in kex["notes"]: + kexname = kex["algorithm"] description = [f"**Algorithm**: {kexname}"] description.append(f"**Description Failure**: {kex['notes']['fail']}") - if kex['notes'].get('info'): + if kex["notes"].get("info"): description.append(f"**Info**: {kex['notes']['info']}") severity = "High" finding = Finding(title=str(title) + "_" + str(kexname), @@ -87,13 +87,13 @@ def get_findings(self, filename, test): static_finding=False) items.append(finding) finding.unsaved_endpoints = [] - endpoint = Endpoint(host=data['target'].split(':')[0], port=data['target'].split(':')[1]) + endpoint = Endpoint(host=data["target"].split(":")[0], port=data["target"].split(":")[1]) finding.unsaved_endpoints.append(endpoint) - elif 'warn' in kex['notes']: - kexname = kex['algorithm'] + elif "warn" in kex["notes"]: + kexname = kex["algorithm"] description = [f"**Algorithm**: {kexname}"] description.append(f"**Description Warning**: {kex['notes']['warn']}") - if kex['notes'].get('info'): + if kex["notes"].get("info"): description.append(f"**Info**: {kex['notes']['info']}") severity = "Medium" finding = Finding(title=str(title) + "_" + str(kexname), @@ -103,17 +103,17 @@ def get_findings(self, filename, test): static_finding=False) items.append(finding) finding.unsaved_endpoints = [] - endpoint = Endpoint(host=data['target'].split(':')[0], port=data['target'].split(':')[1]) + endpoint = Endpoint(host=data["target"].split(":")[0], port=data["target"].split(":")[1]) finding.unsaved_endpoints.append(endpoint) - for key in data['key']: - if 'fail' in key['notes'] and 'warn' in key['notes']: - keyname = key['algorithm'] + for key in data["key"]: + if "fail" in key["notes"] and "warn" in key["notes"]: + keyname = key["algorithm"] description = [f"**Algorithm**: {keyname}"] description.append(f"**Description Failure**: {key['notes']['fail']}") description.append(f"**Description Warning**: {key['notes']['warn']}") - if 'keysize' in key: + if "keysize" in key: description.append(f"**KeySize**: {key['keysize']}") - if key['notes'].get('info'): + if key["notes"].get("info"): description.append(f"**Info**: {key['notes']['info']}") severity = "High" finding = Finding(title=str(title) + "_" + str(keyname), @@ -123,15 +123,15 @@ def get_findings(self, filename, test): static_finding=False) items.append(finding) finding.unsaved_endpoints = [] - endpoint = Endpoint(host=data['target'].split(':')[0], port=data['target'].split(':')[1]) + endpoint = Endpoint(host=data["target"].split(":")[0], port=data["target"].split(":")[1]) finding.unsaved_endpoints.append(endpoint) - elif 'fail' in key['notes']: - keyname = key['algorithm'] + elif "fail" in key["notes"]: + keyname = key["algorithm"] description = [f"**Algorithm**: {keyname}"] description.append(f"**Description Failure**: {key['notes']['fail']}") - if 'keysize' in key: + if "keysize" in key: description.append(f"**KeySize**: {key['keysize']}") - if key['notes'].get('info'): + if key["notes"].get("info"): description.append(f"**Info**: {key['notes']['info']}") severity = "High" finding = Finding(title=str(title) + "_" + str(keyname), @@ -141,15 +141,15 @@ def get_findings(self, filename, test): static_finding=False) items.append(finding) finding.unsaved_endpoints = [] - endpoint = Endpoint(host=data['target'].split(':')[0], port=data['target'].split(':')[1]) + endpoint = Endpoint(host=data["target"].split(":")[0], port=data["target"].split(":")[1]) finding.unsaved_endpoints.append(endpoint) - elif 'warn' in key['notes']: - keyname = key['algorithm'] + elif "warn" in key["notes"]: + keyname = key["algorithm"] description = [f"**Algorithm**: {keyname}"] description.append(f"**Description Warning**: {key['notes']['warn']}") - if 'keysize' in key: + if "keysize" in key: description.append(f"**KeySize**: {key['keysize']}") - if key['notes'].get('info'): + if key["notes"].get("info"): description.append(f"**Info**: {key['notes']['info']}") severity = "Medium" finding = Finding(title=str(title) + "_" + str(keyname), @@ -159,15 +159,15 @@ def get_findings(self, filename, test): static_finding=False) items.append(finding) finding.unsaved_endpoints = [] - endpoint = Endpoint(host=data['target'].split(':')[0], port=data['target'].split(':')[1]) + endpoint = Endpoint(host=data["target"].split(":")[0], port=data["target"].split(":")[1]) finding.unsaved_endpoints.append(endpoint) - for mac in data['mac']: - if 'fail' in mac['notes'] and 'warn' in mac['notes']: - macname = mac['algorithm'] + for mac in data["mac"]: + if "fail" in mac["notes"] and "warn" in mac["notes"]: + macname = mac["algorithm"] description = [f"**Algorithm**: {macname}"] description.append(f"**Description Failure**: {mac['notes']['fail']}") description.append(f"**Description Warning**: {mac['notes']['warn']}") - if mac['notes'].get('info'): + if mac["notes"].get("info"): description.append(f"**Info**: {mac['notes']['info']}") severity = "High" finding = Finding(title=str(title) + "_" + str(macname), @@ -177,13 +177,13 @@ def get_findings(self, filename, test): static_finding=False) items.append(finding) finding.unsaved_endpoints = [] - endpoint = Endpoint(host=data['target'].split(':')[0], port=data['target'].split(':')[1]) + endpoint = Endpoint(host=data["target"].split(":")[0], port=data["target"].split(":")[1]) finding.unsaved_endpoints.append(endpoint) - elif 'fail' in mac['notes']: - macname = mac['algorithm'] + elif "fail" in mac["notes"]: + macname = mac["algorithm"] description = [f"**Algorithm**: {macname}"] description.append(f"**Description Failure**: {mac['notes']['fail']}") - if mac['notes'].get('info'): + if mac["notes"].get("info"): description.append(f"**Info**: {mac['notes']['info']}") severity = "High" finding = Finding(title=str(title) + "_" + str(macname), @@ -193,13 +193,13 @@ def get_findings(self, filename, test): static_finding=False) items.append(finding) finding.unsaved_endpoints = [] - endpoint = Endpoint(host=data['target'].split(':')[0], port=data['target'].split(':')[1]) + endpoint = Endpoint(host=data["target"].split(":")[0], port=data["target"].split(":")[1]) finding.unsaved_endpoints.append(endpoint) - elif 'warn' in mac['notes']: - macname = mac['algorithm'] + elif "warn" in mac["notes"]: + macname = mac["algorithm"] description = [f"**Algorithm**: {macname}"] description.append(f"**Description Warning**: {mac['notes']['warn']}") - if mac['notes'].get('info'): + if mac["notes"].get("info"): description.append(f"**Info**: {mac['notes']['info']}") severity = "Medium" finding = Finding(title=str(title) + "_" + str(macname), @@ -209,6 +209,6 @@ def get_findings(self, filename, test): static_finding=False) items.append(finding) finding.unsaved_endpoints = [] - endpoint = Endpoint(host=data['target'].split(':')[0], port=data['target'].split(':')[1]) + endpoint = Endpoint(host=data["target"].split(":")[0], port=data["target"].split(":")[1]) finding.unsaved_endpoints.append(endpoint) return items diff --git a/dojo/tools/sysdig_reports/parser.py b/dojo/tools/sysdig_reports/parser.py index 7d1ad6dc89..bc2ebea455 100644 --- a/dojo/tools/sysdig_reports/parser.py +++ b/dojo/tools/sysdig_reports/parser.py @@ -24,10 +24,10 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, filename, test): if filename is None: return () - if filename.name.lower().endswith('.csv'): + if filename.name.lower().endswith(".csv"): arr_data = CSVParser().parse(filename=filename) return self.parse_csv(arr_data=arr_data, test=test) - elif filename.name.lower().endswith('.json'): + elif filename.name.lower().endswith(".json"): scan_data = filename.read() try: data = json.loads(str(scan_data, "utf-8")) @@ -43,31 +43,31 @@ def parse_json(self, data, test): return [] findings = [] for item in vulnerability: - imageId = item.get('imageId', '') - imagePullString = item.get('imagePullString', '') - osName = item.get('osName', '') - k8sClusterName = item.get('k8sClusterName', '') - k8sNamespaceName = item.get('k8sNamespaceName', '') - k8sWorkloadType = item.get('k8sWorkloadType', '') - k8sWorkloadName = item.get('k8sWorkloadName', '') - k8sPodContainerName = item.get('k8sPodContainerName', '') - vulnName = item.get('vulnName', '') - vulnSeverity = item.get('vulnSeverity', '') - vulnLink = item.get('vulnLink', '') - vulnCvssVersion = item.get('vulnCvssVersion', '') - vulnCvssScore = item.get('vulnCvssScore', '') - vulnCvssVector = item.get('vulnCvssVector', '') - vulnDisclosureDate = item.get('vulnDisclosureDate', '') - vulnSolutionDate = item.get('vulnSolutionDate', '') - vulnExploitable = item.get('vulnExploitable', '') - vulnFixAvailable = item.get('vulnFixAvailable', '') - vulnFixVersion = item.get('vulnFixVersion', '') - packageName = item.get('packageName', '') - packageType = item.get('packageType', '') - packagePath = item.get('packagePath', '') - packageVersion = item.get('packageVersion', '') - packageSuggestedFix = item.get('packageSuggestedFix', '') - k8sPodCount = item.get('k8sPodCount', '') + imageId = item.get("imageId", "") + imagePullString = item.get("imagePullString", "") + osName = item.get("osName", "") + k8sClusterName = item.get("k8sClusterName", "") + k8sNamespaceName = item.get("k8sNamespaceName", "") + k8sWorkloadType = item.get("k8sWorkloadType", "") + k8sWorkloadName = item.get("k8sWorkloadName", "") + k8sPodContainerName = item.get("k8sPodContainerName", "") + vulnName = item.get("vulnName", "") + vulnSeverity = item.get("vulnSeverity", "") + vulnLink = item.get("vulnLink", "") + vulnCvssVersion = item.get("vulnCvssVersion", "") + vulnCvssScore = item.get("vulnCvssScore", "") + vulnCvssVector = item.get("vulnCvssVector", "") + vulnDisclosureDate = item.get("vulnDisclosureDate", "") + vulnSolutionDate = item.get("vulnSolutionDate", "") + vulnExploitable = item.get("vulnExploitable", "") + vulnFixAvailable = item.get("vulnFixAvailable", "") + vulnFixVersion = item.get("vulnFixVersion", "") + packageName = item.get("packageName", "") + packageType = item.get("packageType", "") + packagePath = item.get("packagePath", "") + packageVersion = item.get("packageVersion", "") + packageSuggestedFix = item.get("packageSuggestedFix", "") + k8sPodCount = item.get("k8sPodCount", "") description = "" description += "imageId: " + imageId + "\n" description += "imagePullString: " + imagePullString + "\n" @@ -103,7 +103,7 @@ def parse_json(self, data, test): component_name=packageName, component_version=packageVersion, ) - if vulnName != '': + if vulnName != "": find.unsaved_vulnerability_ids = [] find.unsaved_vulnerability_ids.append(vulnName) findings.append(find) @@ -178,7 +178,7 @@ def parse_csv(self, arr_data, test): finding.description += f"\n - **Publish Date:** {row.vuln_publish_date}" finding.description += f"\n - **CVSS Version:** {row.cvss_version}" finding.description += f"\n - **CVSS Vector:** {row.cvss_vector}" - if row.public_exploit != '': + if row.public_exploit != "": finding.description += f"\n - **Public Exploit:** {row.public_exploit}" finding.description += "\n\n###Package Details" if row.package_type == "os": @@ -188,10 +188,10 @@ def parse_csv(self, arr_data, test): finding.description += f"\n - **Package Name:** {row.package_name}" finding.description += f"\n - **Package Version:** {row.package_version}" finding.description += f"\n - **In-Use:** {row.in_use}" - if row.package_path != '': + if row.package_path != "": finding.description += f"\n - **Package Path:** {row.package_path}" finding.file_path = row.package_path - if row.package_suggested_fix != '': + if row.package_suggested_fix != "": finding.mitigation = f"Package suggested fix version: {row.package_suggested_fix}" finding.description += f"\n - **Package suggested fix version:** {row.package_suggested_fix}" if row.package_type == "os": diff --git a/dojo/tools/sysdig_reports/sysdig_csv_parser.py b/dojo/tools/sysdig_reports/sysdig_csv_parser.py index 199baa6f9f..534b30c7e2 100644 --- a/dojo/tools/sysdig_reports/sysdig_csv_parser.py +++ b/dojo/tools/sysdig_reports/sysdig_csv_parser.py @@ -16,8 +16,8 @@ def parse(self, filename) -> SysdigData: content = filename.read() if isinstance(content, bytes): - content = content.decode('utf-8') - reader = csv.DictReader(io.StringIO(content), delimiter=',', quotechar='"') + content = content.decode("utf-8") + reader = csv.DictReader(io.StringIO(content), delimiter=",", quotechar='"') # normalise on lower case for consistency reader.fieldnames = [name.lower() for name in reader.fieldnames] @@ -43,38 +43,38 @@ def parse(self, filename) -> SysdigData: csv_data_record = SysdigData() - csv_data_record.vulnerability_id = row.get('vulnerability id', '') - csv_data_record.severity = csv_data_record._map_severity(row.get('severity').upper()) - csv_data_record.package_name = row.get('package name', '') - csv_data_record.package_version = row.get('package version', '') - csv_data_record.package_type = row.get('package type', '') - csv_data_record.package_path = row.get('package path', '') - csv_data_record.image = row.get('image', '') - csv_data_record.os_name = row.get('os name', '') - csv_data_record.cvss_version = row.get('cvss version', '') - csv_data_record.cvss_score = row.get('cvss score', '') - csv_data_record.cvss_vector = row.get('cvss vector', '') - csv_data_record.vuln_link = row.get('vuln link', '') - csv_data_record.vuln_publish_date = row.get('vuln publish date', '') - csv_data_record.vuln_fix_date = row.get('vuln fix date', '') - csv_data_record.vuln_fix_version = row.get('fix version', '') - csv_data_record.public_exploit = row.get('public exploit', '') - csv_data_record.k8s_cluster_name = row.get('k8s cluster name', '') - csv_data_record.k8s_namespace_name = row.get('k8s namespace name', '') - csv_data_record.k8s_workload_type = row.get('k8s workload type', '') - csv_data_record.k8s_workload_name = row.get('k8s workload name', '') - csv_data_record.k8s_container_name = row.get('k8s container name', '') - csv_data_record.image_id = row.get('image id', '') - csv_data_record.k8s_pod_count = row.get('k8s pod count', '') - csv_data_record.package_suggested_fix = row.get('package suggested fix', '') - csv_data_record.in_use = row.get('in use', '') == 'TRUE' - csv_data_record.risk_accepted = row.get('risk accepted', '') == 'TRUE' - csv_data_record.registry_name = row.get('registry name', '') - csv_data_record.registry_image_repository = row.get('registry image repository', '') - csv_data_record.cloud_provider_name = row.get('cloud provider name', '') - csv_data_record.cloud_provider_account_id = row.get('cloud provider account ID', '') - csv_data_record.cloud_provider_region = row.get('cloud provider region', '') - csv_data_record.registry_vendor = row.get('registry vendor', '') + csv_data_record.vulnerability_id = row.get("vulnerability id", "") + csv_data_record.severity = csv_data_record._map_severity(row.get("severity").upper()) + csv_data_record.package_name = row.get("package name", "") + csv_data_record.package_version = row.get("package version", "") + csv_data_record.package_type = row.get("package type", "") + csv_data_record.package_path = row.get("package path", "") + csv_data_record.image = row.get("image", "") + csv_data_record.os_name = row.get("os name", "") + csv_data_record.cvss_version = row.get("cvss version", "") + csv_data_record.cvss_score = row.get("cvss score", "") + csv_data_record.cvss_vector = row.get("cvss vector", "") + csv_data_record.vuln_link = row.get("vuln link", "") + csv_data_record.vuln_publish_date = row.get("vuln publish date", "") + csv_data_record.vuln_fix_date = row.get("vuln fix date", "") + csv_data_record.vuln_fix_version = row.get("fix version", "") + csv_data_record.public_exploit = row.get("public exploit", "") + csv_data_record.k8s_cluster_name = row.get("k8s cluster name", "") + csv_data_record.k8s_namespace_name = row.get("k8s namespace name", "") + csv_data_record.k8s_workload_type = row.get("k8s workload type", "") + csv_data_record.k8s_workload_name = row.get("k8s workload name", "") + csv_data_record.k8s_container_name = row.get("k8s container name", "") + csv_data_record.image_id = row.get("image id", "") + csv_data_record.k8s_pod_count = row.get("k8s pod count", "") + csv_data_record.package_suggested_fix = row.get("package suggested fix", "") + csv_data_record.in_use = row.get("in use", "") == "TRUE" + csv_data_record.risk_accepted = row.get("risk accepted", "") == "TRUE" + csv_data_record.registry_name = row.get("registry name", "") + csv_data_record.registry_image_repository = row.get("registry image repository", "") + csv_data_record.cloud_provider_name = row.get("cloud provider name", "") + csv_data_record.cloud_provider_account_id = row.get("cloud provider account ID", "") + csv_data_record.cloud_provider_region = row.get("cloud provider region", "") + csv_data_record.registry_vendor = row.get("registry vendor", "") arr_csv_data.append(csv_data_record) diff --git a/dojo/tools/tenable/csv_format.py b/dojo/tools/tenable/csv_format.py index e4a3cd9cd8..208a6cd62d 100644 --- a/dojo/tools/tenable/csv_format.py +++ b/dojo/tools/tenable/csv_format.py @@ -67,11 +67,11 @@ def detect_delimiter(self, content: str): """Detect the delimiter of the CSV file""" if isinstance(content, bytes): content = content.decode("utf-8") - first_line = content.split('\n')[0] - if ';' in first_line: - return ';' + first_line = content.split("\n")[0] + if ";" in first_line: + return ";" else: - return ',' # default to comma if no semicolon found + return "," # default to comma if no semicolon found def get_findings(self, filename: str, test: Test): # Read the CSV diff --git a/dojo/tools/trivy/parser.py b/dojo/tools/trivy/parser.py index e50ce0963b..defc54a922 100644 --- a/dojo/tools/trivy/parser.py +++ b/dojo/tools/trivy/parser.py @@ -363,7 +363,7 @@ def get_lines_as_string_table(self, lines): # Create the table string table_string = f"{header_row}\n" for item in lines: - row = "\t".join(str(item.get(header, '')) for header in headers) + row = "\t".join(str(item.get(header, "")) for header in headers) table_string += f"{row}\n" return table_string diff --git a/dojo/tools/utils.py b/dojo/tools/utils.py index 4820382ef8..addb8c8549 100644 --- a/dojo/tools/utils.py +++ b/dojo/tools/utils.py @@ -12,15 +12,15 @@ def get_npm_cwe(item_node): "cwe": "CWE-1234" "cwe": "[\"CWE-173\",\"CWE-200\",\"CWE-601\"]" (or "[]") """ - cwe_node = item_node.get('cwe') + cwe_node = item_node.get("cwe") if cwe_node: if isinstance(cwe_node, list): return int(cwe_node[0][4:]) - elif cwe_node.startswith('CWE-'): + elif cwe_node.startswith("CWE-"): cwe_string = cwe_node[4:] if cwe_string: return int(cwe_string) - elif cwe_node.startswith('['): + elif cwe_node.startswith("["): cwe = json.loads(cwe_node) if cwe: return int(cwe[0][4:]) diff --git a/dojo/tools/wazuh/parser.py b/dojo/tools/wazuh/parser.py index dcdf42effa..ae4bf98c22 100644 --- a/dojo/tools/wazuh/parser.py +++ b/dojo/tools/wazuh/parser.py @@ -61,7 +61,7 @@ def get_findings(self, file, test): dupe_key = title + cve + agent_name + package_name + package_version else: dupe_key = title + cve + package_name + package_version - dupe_key = hashlib.sha256(dupe_key.encode('utf-8')).hexdigest() + dupe_key = hashlib.sha256(dupe_key.encode("utf-8")).hexdigest() if dupe_key in dupes: find = dupes[dupe_key] diff --git a/dojo/tools/yarn_audit/parser.py b/dojo/tools/yarn_audit/parser.py index ff26ba37b4..b13c2ffd68 100644 --- a/dojo/tools/yarn_audit/parser.py +++ b/dojo/tools/yarn_audit/parser.py @@ -22,11 +22,11 @@ def get_findings(self, json_output, test): if isinstance(lines, bytes): lines = lines.decode("utf-8") # passes in unittests, but would fail in production if '"type"' in lines: - lines = lines.split('\n') + lines = lines.split("\n") tree = (json.loads(line) for line in lines if "{" in line) return self.get_items_yarn(tree, test) elif '"value"' in lines: - lines = lines.split('\n') + lines = lines.split("\n") tree = (json.loads(line) for line in lines if "{" in line) return self.get_items_yarn2(tree, test) else: @@ -57,8 +57,8 @@ def get_items_yarn2(self, tree, test): childissue = child.get("Issue") childseverity = child.get("Severity") child_vuln_version = child.get("Vulnerable Versions") - child_tree_versions = ', '.join(set(child.get("Tree Versions"))) - child_dependents = ', '.join(set(child.get("Dependents"))) + child_tree_versions = ", ".join(set(child.get("Tree Versions"))) + child_dependents = ", ".join(set(child.get("Dependents"))) description += childissue + "\n" description += "**Vulnerable Versions:** " + child_vuln_version + "\n" description += "**Dependents:** " + child_dependents + "\n" diff --git a/dojo/urls.py b/dojo/urls.py index dd438c8f72..6845887860 100644 --- a/dojo/urls.py +++ b/dojo/urls.py @@ -111,70 +111,70 @@ admin.autodiscover() # custom handlers -handler500 = 'dojo.views.custom_error_view' -handler400 = 'dojo.views.custom_bad_request_view' +handler500 = "dojo.views.custom_error_view" +handler400 = "dojo.views.custom_bad_request_view" # v2 api written in django-rest-framework v2_api = DefaultRouter() -v2_api.register(r'technologies', AppAnalysisViewSet) -v2_api.register(r'configuration_permissions', ConfigurationPermissionViewSet) -v2_api.register(r'credentials', CredentialsViewSet) -v2_api.register(r'credential_mappings', CredentialsMappingViewSet) -v2_api.register(r'endpoints', EndPointViewSet) -v2_api.register(r'endpoint_meta_import', EndpointMetaImporterView, basename='endpointmetaimport') -v2_api.register(r'endpoint_status', EndpointStatusViewSet) -v2_api.register(r'engagements', EngagementViewSet) -v2_api.register(r'development_environments', DevelopmentEnvironmentViewSet) -v2_api.register(r'finding_templates', FindingTemplatesViewSet) -v2_api.register(r'findings', FindingViewSet, basename='finding') -v2_api.register(r'jira_configurations', JiraInstanceViewSet) # backwards compatibility -v2_api.register(r'jira_instances', JiraInstanceViewSet) -v2_api.register(r'jira_finding_mappings', JiraIssuesViewSet) -v2_api.register(r'jira_product_configurations', JiraProjectViewSet) # backwards compatibility -v2_api.register(r'jira_projects', JiraProjectViewSet) -v2_api.register(r'products', ProductViewSet) -v2_api.register(r'product_types', ProductTypeViewSet) -v2_api.register(r'dojo_groups', DojoGroupViewSet) -v2_api.register(r'dojo_group_members', DojoGroupMemberViewSet) -v2_api.register(r'product_type_members', ProductTypeMemberViewSet) -v2_api.register(r'product_members', ProductMemberViewSet) -v2_api.register(r'product_type_groups', ProductTypeGroupViewSet) -v2_api.register(r'product_groups', ProductGroupViewSet) -v2_api.register(r'roles', RoleViewSet) -v2_api.register(r'global_roles', GlobalRoleViewSet) -v2_api.register(r'sla_configurations', SLAConfigurationViewset) -v2_api.register(r'sonarqube_issues', SonarqubeIssueViewSet) -v2_api.register(r'sonarqube_transitions', SonarqubeIssueTransitionViewSet) -v2_api.register(r'product_api_scan_configurations', ProductAPIScanConfigurationViewSet) -v2_api.register(r'stub_findings', StubFindingsViewSet) -v2_api.register(r'tests', TestsViewSet) -v2_api.register(r'test_types', TestTypesViewSet) -v2_api.register(r'test_imports', TestImportViewSet) -v2_api.register(r'tool_configurations', ToolConfigurationsViewSet) -v2_api.register(r'tool_product_settings', ToolProductSettingsViewSet) -v2_api.register(r'tool_types', ToolTypesViewSet) -v2_api.register(r'users', UsersViewSet) -v2_api.register(r'user_contact_infos', UserContactInfoViewSet) -v2_api.register(r'import-scan', ImportScanView, basename='importscan') -v2_api.register(r'reimport-scan', ReImportScanView, basename='reimportscan') -v2_api.register(r'metadata', DojoMetaViewSet, basename='metadata') -v2_api.register(r'notes', NotesViewSet) -v2_api.register(r'note_type', NoteTypeViewSet) -v2_api.register(r'system_settings', SystemSettingsViewSet) -v2_api.register(r'regulations', RegulationsViewSet) -v2_api.register(r'risk_acceptance', RiskAcceptanceViewSet) -v2_api.register(r'language_types', LanguageTypeViewSet) -v2_api.register(r'languages', LanguageViewSet) -v2_api.register(r'import-languages', ImportLanguagesView, basename='importlanguages') -v2_api.register(r'notifications', NotificationsViewSet, basename='notifications') -v2_api.register(r'engagement_presets', EngagementPresetsViewset) -v2_api.register(r'network_locations', NetworkLocationsViewset) -v2_api.register(r'questionnaire_answers', QuestionnaireAnswerViewSet) -v2_api.register(r'questionnaire_answered_questionnaires', QuestionnaireAnsweredSurveyViewSet) -v2_api.register(r'questionnaire_engagement_questionnaires', QuestionnaireEngagementSurveyViewSet) -v2_api.register(r'questionnaire_general_questionnaires', QuestionnaireGeneralSurveyViewSet) -v2_api.register(r'questionnaire_questions', QuestionnaireQuestionViewSet) -v2_api.register(r'announcements', AnnouncementViewSet) +v2_api.register(r"announcements", AnnouncementViewSet, basename="announcement") +v2_api.register(r"configuration_permissions", ConfigurationPermissionViewSet, basename="permission") +v2_api.register(r"credential_mappings", CredentialsMappingViewSet, basename="cred_mapping") +v2_api.register(r"credentials", CredentialsViewSet, basename="cred_user") +v2_api.register(r"development_environments", DevelopmentEnvironmentViewSet, basename="development_environment") +v2_api.register(r"dojo_groups", DojoGroupViewSet, basename="dojo_group") +v2_api.register(r"dojo_group_members", DojoGroupMemberViewSet, basename="dojo_group_member") +v2_api.register(r"endpoints", EndPointViewSet, basename="endpoint") +v2_api.register(r"endpoint_meta_import", EndpointMetaImporterView, basename="endpointmetaimport") +v2_api.register(r"endpoint_status", EndpointStatusViewSet, basename="endpoint_status") +v2_api.register(r"engagements", EngagementViewSet, basename="engagement") +v2_api.register(r"engagement_presets", EngagementPresetsViewset, basename="engagement_presets") +v2_api.register(r"finding_templates", FindingTemplatesViewSet, basename="finding_template") +v2_api.register(r"findings", FindingViewSet, basename="finding") +v2_api.register(r"global_roles", GlobalRoleViewSet, basename="global_role") +v2_api.register(r"import-languages", ImportLanguagesView, basename="importlanguages") +v2_api.register(r"import-scan", ImportScanView, basename="importscan") +v2_api.register(r"jira_instances", JiraInstanceViewSet, basename="jira_instance") +v2_api.register(r"jira_configurations", JiraInstanceViewSet, basename="jira_configurations") # backwards compatibility +v2_api.register(r"jira_finding_mappings", JiraIssuesViewSet, basename="jira_issue") +v2_api.register(r"jira_product_configurations", JiraProjectViewSet, basename="jira_product_configurations") # backwards compatibility +v2_api.register(r"jira_projects", JiraProjectViewSet, basename="jira_project") +v2_api.register(r"languages", LanguageViewSet, basename="languages") +v2_api.register(r"language_types", LanguageTypeViewSet, basename="language_type") +v2_api.register(r"metadata", DojoMetaViewSet, basename="metadata") +v2_api.register(r"network_locations", NetworkLocationsViewset, basename="network_locations") +v2_api.register(r"notes", NotesViewSet, basename="notes") +v2_api.register(r"note_type", NoteTypeViewSet, basename="note_type") +v2_api.register(r"notifications", NotificationsViewSet, basename="notifications") +v2_api.register(r"products", ProductViewSet, basename="product") +v2_api.register(r"product_api_scan_configurations", ProductAPIScanConfigurationViewSet, basename="product_api_scan_configuration") +v2_api.register(r"product_groups", ProductGroupViewSet, basename="product_group") +v2_api.register(r"product_members", ProductMemberViewSet, basename="product_member") +v2_api.register(r"product_types", ProductTypeViewSet, basename="product_type") +v2_api.register(r"product_type_members", ProductTypeMemberViewSet, basename="product_type_member") +v2_api.register(r"product_type_groups", ProductTypeGroupViewSet, basename="product_type_group") +v2_api.register(r"regulations", RegulationsViewSet, basename="regulations") +v2_api.register(r"reimport-scan", ReImportScanView, basename="reimportscan") +v2_api.register(r"risk_acceptance", RiskAcceptanceViewSet, basename="risk_acceptance") +v2_api.register(r"roles", RoleViewSet, basename="role") +v2_api.register(r"sla_configurations", SLAConfigurationViewset, basename="sla_configurations") +v2_api.register(r"sonarqube_issues", SonarqubeIssueViewSet, basename="sonarqube_issue") +v2_api.register(r"sonarqube_transitions", SonarqubeIssueTransitionViewSet, basename="sonarqube_issue_transition") +v2_api.register(r"stub_findings", StubFindingsViewSet, basename="stub_finding") +v2_api.register(r"system_settings", SystemSettingsViewSet, basename="system_settings") +v2_api.register(r"technologies", AppAnalysisViewSet, basename="app_analysis") +v2_api.register(r"tests", TestsViewSet, basename="test") +v2_api.register(r"test_types", TestTypesViewSet, basename="test_type") +v2_api.register(r"test_imports", TestImportViewSet, basename="test_imports") +v2_api.register(r"tool_configurations", ToolConfigurationsViewSet, basename="tool_configuration") +v2_api.register(r"tool_product_settings", ToolProductSettingsViewSet, basename="tool_product_settings") +v2_api.register(r"tool_types", ToolTypesViewSet, basename="tool_type") +v2_api.register(r"users", UsersViewSet, basename="user") +v2_api.register(r"user_contact_infos", UserContactInfoViewSet, basename="usercontactinfo") +v2_api.register(r"questionnaire_answers", QuestionnaireAnswerViewSet, basename="answer") +v2_api.register(r"questionnaire_answered_questionnaires", QuestionnaireAnsweredSurveyViewSet, basename="answered_survey") +v2_api.register(r"questionnaire_engagement_questionnaires", QuestionnaireEngagementSurveyViewSet, basename="engagement_survey") +v2_api.register(r"questionnaire_general_questionnaires", QuestionnaireGeneralSurveyViewSet, basename="general_survey") +v2_api.register(r"questionnaire_questions", QuestionnaireQuestionViewSet, basename="question") ur = [] ur += dev_env_urls ur += endpoint_urls @@ -211,60 +211,60 @@ api_v2_urls = [ # Django Rest Framework API v2 - re_path(r'^{}api/v2/'.format(get_system_setting('url_prefix')), include(v2_api.urls)), - re_path(r'^{}api/v2/user_profile/'.format(get_system_setting('url_prefix')), UserProfileView.as_view(), name='user_profile'), + re_path(r"^{}api/v2/".format(get_system_setting("url_prefix")), include(v2_api.urls)), + re_path(r"^{}api/v2/user_profile/".format(get_system_setting("url_prefix")), UserProfileView.as_view(), name="user_profile"), ] -if hasattr(settings, 'API_TOKENS_ENABLED'): +if hasattr(settings, "API_TOKENS_ENABLED"): if settings.API_TOKENS_ENABLED: api_v2_urls += [ re_path( f"^{get_system_setting('url_prefix')}api/v2/api-token-auth/", tokenviews.obtain_auth_token, - name='api-token-auth', + name="api-token-auth", ), ] urlpatterns = [] # sometimes urlpatterns needed be added from local_settings.py before other URLs of core dojo -if hasattr(settings, 'PRELOAD_URL_PATTERNS'): +if hasattr(settings, "PRELOAD_URL_PATTERNS"): urlpatterns += settings.PRELOAD_URL_PATTERNS urlpatterns += [ # action history - re_path(r'^{}history/(?P\d+)/(?P\d+)$'.format(get_system_setting('url_prefix')), views.action_history, name='action_history'), - re_path(r'^{}'.format(get_system_setting('url_prefix')), include(ur)), + re_path(r"^{}history/(?P\d+)/(?P\d+)$".format(get_system_setting("url_prefix")), views.action_history, name="action_history"), + re_path(r"^{}".format(get_system_setting("url_prefix")), include(ur)), # drf-spectacular = OpenAPI3 - re_path(r'^{}api/v2/oa3/schema/'.format(get_system_setting('url_prefix')), SpectacularAPIView.as_view(), name='schema_oa3'), - re_path(r'^{}api/v2/oa3/swagger-ui/'.format(get_system_setting('url_prefix')), SpectacularSwaggerView.as_view(url=get_system_setting('url_prefix') + '/api/v2/oa3/schema/?format=json'), name='swagger-ui_oa3'), + re_path(r"^{}api/v2/oa3/schema/".format(get_system_setting("url_prefix")), SpectacularAPIView.as_view(), name="schema_oa3"), + re_path(r"^{}api/v2/oa3/swagger-ui/".format(get_system_setting("url_prefix")), SpectacularSwaggerView.as_view(url=get_system_setting("url_prefix") + "/api/v2/oa3/schema/?format=json"), name="swagger-ui_oa3"), - re_path(r'^robots.txt', lambda x: HttpResponse("User-Agent: *\nDisallow: /", content_type="text/plain"), name="robots_file"), - re_path(r'^manage_files/(?P\d+)/(?P\w+)$', views.manage_files, name='manage_files'), - re_path(r'^access_file/(?P\d+)/(?P\d+)/(?P\w+)$', views.access_file, name='access_file'), - re_path(r'^{}/(?P.*)$'.format(settings.MEDIA_URL.strip('/')), views.protected_serve, {'document_root': settings.MEDIA_ROOT}), + re_path(r"^robots.txt", lambda x: HttpResponse("User-Agent: *\nDisallow: /", content_type="text/plain"), name="robots_file"), + re_path(r"^manage_files/(?P\d+)/(?P\w+)$", views.manage_files, name="manage_files"), + re_path(r"^access_file/(?P\d+)/(?P\d+)/(?P\w+)$", views.access_file, name="access_file"), + re_path(r"^{}/(?P.*)$".format(settings.MEDIA_URL.strip("/")), views.protected_serve, {"document_root": settings.MEDIA_ROOT}), ] urlpatterns += api_v2_urls urlpatterns += survey_urls -if hasattr(settings, 'DJANGO_METRICS_ENABLED'): +if hasattr(settings, "DJANGO_METRICS_ENABLED"): if settings.DJANGO_METRICS_ENABLED: - urlpatterns += [re_path(r'^{}django_metrics/'.format(get_system_setting('url_prefix')), include('django_prometheus.urls'))] + urlpatterns += [re_path(r"^{}django_metrics/".format(get_system_setting("url_prefix")), include("django_prometheus.urls"))] -if hasattr(settings, 'SAML2_ENABLED'): +if hasattr(settings, "SAML2_ENABLED"): if settings.SAML2_ENABLED: # django saml2 - urlpatterns += [re_path(r'^saml2/', include('djangosaml2.urls'))] + urlpatterns += [re_path(r"^saml2/", include("djangosaml2.urls"))] -if hasattr(settings, 'DJANGO_ADMIN_ENABLED'): +if hasattr(settings, "DJANGO_ADMIN_ENABLED"): if settings.DJANGO_ADMIN_ENABLED: # django admin - urlpatterns += [re_path(r'^{}admin/'.format(get_system_setting('url_prefix')), admin.site.urls)] + urlpatterns += [re_path(r"^{}admin/".format(get_system_setting("url_prefix")), admin.site.urls)] # sometimes urlpatterns needed be added from local_settings.py to avoid having to modify core defect dojo files -if hasattr(settings, 'EXTRA_URL_PATTERNS'): +if hasattr(settings, "EXTRA_URL_PATTERNS"): urlpatterns += settings.EXTRA_URL_PATTERNS diff --git a/dojo/user/queries.py b/dojo/user/queries.py index 1242dbb7dd..0a6b414388 100644 --- a/dojo/user/queries.py +++ b/dojo/user/queries.py @@ -19,12 +19,12 @@ def get_authorized_users_for_product_type(users, product_type, permission): roles = get_roles_for_permission(permission) product_type_members = Product_Type_Member.objects \ .filter(product_type=product_type, role__in=roles) \ - .select_related('user') + .select_related("user") product_type_groups = Product_Type_Group.objects \ .filter(product_type=product_type, role__in=roles) group_members = Dojo_Group_Member.objects \ .filter(group__in=[ptg.group for ptg in product_type_groups]) \ - .select_related('user') + .select_related("user") return users.filter(Q(id__in=[ptm.user.id for ptm in product_type_members]) | Q(id__in=[gm.user.id for gm in group_members]) | Q(global_role__role__in=roles) @@ -38,10 +38,10 @@ def get_authorized_users_for_product_and_product_type(users, product, permission roles = get_roles_for_permission(permission) product_members = Product_Member.objects \ .filter(product=product, role__in=roles) \ - .select_related('user') + .select_related("user") product_type_members = Product_Type_Member.objects \ .filter(product_type=product.prod_type, role__in=roles) \ - .select_related('user') + .select_related("user") product_groups = Product_Group.objects \ .filter(product=product, role__in=roles) product_type_groups = Product_Type_Group.objects \ @@ -50,7 +50,7 @@ def get_authorized_users_for_product_and_product_type(users, product, permission .filter( Q(group__in=[pg.group for pg in product_groups]) | Q(group__in=[ptg.group for ptg in product_type_groups])) \ - .select_related('user') + .select_related("user") return users.filter(Q(id__in=[pm.user.id for pm in product_members]) | Q(id__in=[ptm.user.id for ptm in product_type_members]) | Q(id__in=[gm.user.id for gm in group_members]) @@ -70,21 +70,21 @@ def get_authorized_users(permission, user=None): if user.is_anonymous: return Dojo_User.objects.none() - users = Dojo_User.objects.all().order_by('first_name', 'last_name', 'username') + users = Dojo_User.objects.all().order_by("first_name", "last_name", "username") if user.is_superuser or user_has_global_permission(user, permission): return users - authorized_products = get_authorized_products(permission).values('id') - authorized_product_types = get_authorized_product_types(permission).values('id') + authorized_products = get_authorized_products(permission).values("id") + authorized_product_types = get_authorized_product_types(permission).values("id") roles = get_roles_for_permission(permission) product_members = Product_Member.objects \ .filter(product_id__in=authorized_products, role__in=roles) \ - .select_related('user') + .select_related("user") product_type_members = Product_Type_Member.objects \ .filter(product_type_id__in=authorized_product_types, role__in=roles) \ - .select_related('user') + .select_related("user") product_groups = Product_Group.objects \ .filter(product_id__in=authorized_products, role__in=roles) product_type_groups = Product_Type_Group.objects \ @@ -93,7 +93,7 @@ def get_authorized_users(permission, user=None): .filter( Q(group__in=[pg.group for pg in product_groups]) | Q(group__in=[ptg.group for ptg in product_type_groups])) \ - .select_related('user') + .select_related("user") return users.filter(Q(id__in=[pm.user.id for pm in product_members]) | Q(id__in=[ptm.user.id for ptm in product_type_members]) | Q(id__in=[gm.user.id for gm in group_members]) diff --git a/dojo/user/urls.py b/dojo/user/urls.py index 8dbf0b0686..01971b808d 100644 --- a/dojo/user/urls.py +++ b/dojo/user/urls.py @@ -7,52 +7,52 @@ urlpatterns = [ # social-auth-django required url package - re_path('', include('social_django.urls', namespace='social')), + re_path("", include("social_django.urls", namespace="social")), # user specific - re_path(r'^login$', views.login_view, name='login'), - re_path(r'^logout$', views.logout_view, name='logout'), - re_path(r'^alerts$', views.alerts, name='alerts'), - re_path(r'^alerts/json$', views.alerts_json, name='alerts_json'), - re_path(r'^alerts/count$', views.alertcount, name='alertcount'), - re_path(r'^delete_alerts$', views.delete_alerts, name='delete_alerts'), - re_path(r'^profile$', views.view_profile, name='view_profile'), - re_path(r'^change_password$', views.change_password, name='change_password'), - re_path(r'^user$', views.user, name='users'), - re_path(r'^user/add$', views.add_user, name='add_user'), - re_path(r'^user/(?P\d+)$', views.view_user, name='view_user'), - re_path(r'^user/(?P\d+)/edit$', views.edit_user, name='edit_user'), - re_path(r'^user/(?P\d+)/delete', views.delete_user, name='delete_user'), - re_path(r'^user/(?P\d+)/add_product_type_member$', views.add_product_type_member, name='add_product_type_member_user'), - re_path(r'^user/(?P\d+)/add_product_member$', views.add_product_member, name='add_product_member_user'), - re_path(r'^user/(?P\d+)/add_group_member$', views.add_group_member, name='add_group_member_user'), - re_path(r'^user/(?P\d+)/edit_permissions$', views.edit_permissions, name='edit_user_permissions'), + re_path(r"^login$", views.login_view, name="login"), + re_path(r"^logout$", views.logout_view, name="logout"), + re_path(r"^alerts$", views.alerts, name="alerts"), + re_path(r"^alerts/json$", views.alerts_json, name="alerts_json"), + re_path(r"^alerts/count$", views.alertcount, name="alertcount"), + re_path(r"^delete_alerts$", views.delete_alerts, name="delete_alerts"), + re_path(r"^profile$", views.view_profile, name="view_profile"), + re_path(r"^change_password$", views.change_password, name="change_password"), + re_path(r"^user$", views.user, name="users"), + re_path(r"^user/add$", views.add_user, name="add_user"), + re_path(r"^user/(?P\d+)$", views.view_user, name="view_user"), + re_path(r"^user/(?P\d+)/edit$", views.edit_user, name="edit_user"), + re_path(r"^user/(?P\d+)/delete", views.delete_user, name="delete_user"), + re_path(r"^user/(?P\d+)/add_product_type_member$", views.add_product_type_member, name="add_product_type_member_user"), + re_path(r"^user/(?P\d+)/add_product_member$", views.add_product_member, name="add_product_member_user"), + re_path(r"^user/(?P\d+)/add_group_member$", views.add_group_member, name="add_group_member_user"), + re_path(r"^user/(?P\d+)/edit_permissions$", views.edit_permissions, name="edit_user_permissions"), ] if settings.FORGOT_PASSWORD: urlpatterns.extend([ - re_path(r'^password_reset/$', views.DojoPasswordResetView.as_view( - template_name='login/password_reset.html', + re_path(r"^password_reset/$", views.DojoPasswordResetView.as_view( + template_name="login/password_reset.html", ), name="password_reset"), - re_path(r'^password_reset/done/$', auth_views.PasswordResetDoneView.as_view( - template_name='login/password_reset_done.html', - ), name='password_reset_done'), - re_path(r'^reset/(?P[0-9A-Za-z_\-]+)/(?P[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,40})/$', auth_views.PasswordResetConfirmView.as_view( - template_name='login/password_reset_confirm.html', - ), name='password_reset_confirm'), - re_path(r'^reset/done/$', auth_views.PasswordResetCompleteView.as_view( - template_name='login/password_reset_complete.html', - ), name='password_reset_complete'), + re_path(r"^password_reset/done/$", auth_views.PasswordResetDoneView.as_view( + template_name="login/password_reset_done.html", + ), name="password_reset_done"), + re_path(r"^reset/(?P[0-9A-Za-z_\-]+)/(?P[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,40})/$", auth_views.PasswordResetConfirmView.as_view( + template_name="login/password_reset_confirm.html", + ), name="password_reset_confirm"), + re_path(r"^reset/done/$", auth_views.PasswordResetCompleteView.as_view( + template_name="login/password_reset_complete.html", + ), name="password_reset_complete"), ]) if settings.FORGOT_USERNAME: urlpatterns.extend([ - re_path(r'^forgot_username_done/$', auth_views.PasswordResetDoneView.as_view( - template_name='login/forgot_username_done.html', + re_path(r"^forgot_username_done/$", auth_views.PasswordResetDoneView.as_view( + template_name="login/forgot_username_done.html", ), name="forgot_username_done"), - re_path(r'^forgot_username/$', views.DojoForgotUsernameView.as_view( - template_name='login/forgot_username.html', + re_path(r"^forgot_username/$", views.DojoForgotUsernameView.as_view( + template_name="login/forgot_username.html", success_url=reverse_lazy("forgot_username_done"), ), name="forgot_username"), ]) if settings.API_TOKENS_ENABLED: - urlpatterns += [re_path(r'^api/key-v2$', views.api_v2_key, name='api_v2_key')] + urlpatterns += [re_path(r"^api/key-v2$", views.api_v2_key, name="api_v2_key")] diff --git a/dojo/user/utils.py b/dojo/user/utils.py index 1c48859dba..b4c1dd7b38 100644 --- a/dojo/user/utils.py +++ b/dojo/user/utils.py @@ -3,28 +3,28 @@ class Permission_Helper: def __init__(self, *args, **kwargs): - self.name = kwargs.pop('name') - self.app = kwargs.pop('app') - self.view = kwargs.pop('view', False) - self.add = kwargs.pop('add', False) - self.change = kwargs.pop('change', False) - self.delete = kwargs.pop('delete', False) + self.name = kwargs.pop("name") + self.app = kwargs.pop("app") + self.view = kwargs.pop("view", False) + self.add = kwargs.pop("add", False) + self.change = kwargs.pop("change", False) + self.delete = kwargs.pop("delete", False) def display_name(self): - if self.name == 'bannerconf': - return 'Login Banner' - elif self.name == 'cred user': - return 'Credentials' - elif self.name == 'github conf': - return 'GitHub Configurations' - elif self.name == 'engagement survey': - return 'Questionnaires' - elif self.name == 'permission': - return 'Configuration Permissions' - elif self.name == 'sla configuration': - return 'SLA Configurations' + if self.name == "bannerconf": + return "Login Banner" + elif self.name == "cred user": + return "Credentials" + elif self.name == "github conf": + return "GitHub Configurations" + elif self.name == "engagement survey": + return "Questionnaires" + elif self.name == "permission": + return "Configuration Permissions" + elif self.name == "sla configuration": + return "SLA Configurations" else: - return self.name.title() + 's' + return self.name.title() + "s" def view_codename(self): if self.view: @@ -65,57 +65,57 @@ def codenames(self): def get_configuration_permissions_fields(): - if get_system_setting('enable_github'): + if get_system_setting("enable_github"): github_permissions = [ - Permission_Helper(name='github conf', app='dojo', view=True, add=True, delete=True), + Permission_Helper(name="github conf", app="dojo", view=True, add=True, delete=True), ] else: github_permissions = [] - if get_system_setting('enable_google_sheets'): + if get_system_setting("enable_google_sheets"): google_sheet_permissions = [ - Permission_Helper(name='google sheet', app='dojo', change=True), + Permission_Helper(name="google sheet", app="dojo", change=True), ] else: google_sheet_permissions = [] - if get_system_setting('enable_jira'): + if get_system_setting("enable_jira"): jira_permissions = [ - Permission_Helper(name='jira instance', app='dojo', view=True, add=True, change=True, delete=True), + Permission_Helper(name="jira instance", app="dojo", view=True, add=True, change=True, delete=True), ] else: jira_permissions = [] - if get_system_setting('enable_questionnaires'): + if get_system_setting("enable_questionnaires"): questionnaire_permissions = [ - Permission_Helper(name='engagement survey', app='dojo', view=True, add=True, change=True, delete=True), - Permission_Helper(name='question', app='dojo', view=True, add=True, change=True), + Permission_Helper(name="engagement survey", app="dojo", view=True, add=True, change=True, delete=True), + Permission_Helper(name="question", app="dojo", view=True, add=True, change=True), ] else: questionnaire_permissions = [] rules_permissions = [] permission_fields = [ - Permission_Helper(name='cred user', app='dojo', view=True, add=True, change=True, delete=True), - Permission_Helper(name='development environment', app='dojo', add=True, change=True, delete=True), - Permission_Helper(name='finding template', app='dojo', view=True, add=True, change=True, delete=True)] + \ + Permission_Helper(name="cred user", app="dojo", view=True, add=True, change=True, delete=True), + Permission_Helper(name="development environment", app="dojo", add=True, change=True, delete=True), + Permission_Helper(name="finding template", app="dojo", view=True, add=True, change=True, delete=True)] + \ github_permissions + \ google_sheet_permissions + [ - Permission_Helper(name='group', app='auth', view=True, add=True)] + \ + Permission_Helper(name="group", app="auth", view=True, add=True)] + \ jira_permissions + [ - Permission_Helper(name='language type', app='dojo', view=True, add=True, change=True, delete=True), - Permission_Helper(name='bannerconf', app='dojo', change=True), - Permission_Helper(name='announcement', app='dojo', change=True), - Permission_Helper(name='note type', app='dojo', view=True, add=True, change=True, delete=True), - Permission_Helper(name='product type', app='dojo', add=True)] + \ + Permission_Helper(name="language type", app="dojo", view=True, add=True, change=True, delete=True), + Permission_Helper(name="bannerconf", app="dojo", change=True), + Permission_Helper(name="announcement", app="dojo", change=True), + Permission_Helper(name="note type", app="dojo", view=True, add=True, change=True, delete=True), + Permission_Helper(name="product type", app="dojo", add=True)] + \ questionnaire_permissions + [ - Permission_Helper(name='regulation', app='dojo', add=True, change=True, delete=True)] + \ + Permission_Helper(name="regulation", app="dojo", add=True, change=True, delete=True)] + \ rules_permissions + [ - Permission_Helper(name='sla configuration', app='dojo', view=True, add=True, change=True, delete=True), - Permission_Helper(name='test type', app='dojo', add=True, change=True), - Permission_Helper(name='tool configuration', app='dojo', view=True, add=True, change=True, delete=True), - Permission_Helper(name='tool type', app='dojo', view=True, add=True, change=True, delete=True), - Permission_Helper(name='user', app='auth', view=True, add=True, change=True, delete=True), + Permission_Helper(name="sla configuration", app="dojo", view=True, add=True, change=True, delete=True), + Permission_Helper(name="test type", app="dojo", add=True, change=True), + Permission_Helper(name="tool configuration", app="dojo", view=True, add=True, change=True, delete=True), + Permission_Helper(name="tool type", app="dojo", view=True, add=True, change=True, delete=True), + Permission_Helper(name="user", app="auth", view=True, add=True, change=True, delete=True), ] return permission_fields diff --git a/dojo/user/validators.py b/dojo/user/validators.py index 17e35c781b..c97cc9214e 100644 --- a/dojo/user/validators.py +++ b/dojo/user/validators.py @@ -9,89 +9,89 @@ class MinLengthValidator: def validate(self, password, user=None): - if len(password) < get_system_setting('minimum_password_length'): + if len(password) < get_system_setting("minimum_password_length"): raise ValidationError( self.get_help_text(), - code='password_too_short') + code="password_too_short") else: return None def get_help_text(self): - return gettext('Password must be at least {minimum_length} characters long.'.format( - minimum_length=get_system_setting('minimum_password_length'))) + return gettext("Password must be at least {minimum_length} characters long.".format( + minimum_length=get_system_setting("minimum_password_length"))) class MaxLengthValidator: def validate(self, password, user=None): - if len(password) > get_system_setting('maximum_password_length'): + if len(password) > get_system_setting("maximum_password_length"): raise ValidationError( self.get_help_text(), - code='password_too_short') + code="password_too_short") else: return None def get_help_text(self): - return gettext('Password must be less than {maximum_length} characters long.'.format( - maximum_length=get_system_setting('maximum_password_length'))) + return gettext("Password must be less than {maximum_length} characters long.".format( + maximum_length=get_system_setting("maximum_password_length"))) class NumberValidator: def validate(self, password, user=None): - if not re.findall(r'\d', password) and get_system_setting('number_character_required'): + if not re.findall(r"\d", password) and get_system_setting("number_character_required"): raise ValidationError( self.get_help_text(), - code='password_no_number') + code="password_no_number") else: return None def get_help_text(self): - return gettext('Password must contain at least 1 digit, 0-9.') + return gettext("Password must contain at least 1 digit, 0-9.") class UppercaseValidator: def validate(self, password, user=None): - if not re.findall('[A-Z]', password) and get_system_setting('uppercase_character_required'): + if not re.findall("[A-Z]", password) and get_system_setting("uppercase_character_required"): raise ValidationError( self.get_help_text(), - code='password_no_upper') + code="password_no_upper") else: return None def get_help_text(self): - return gettext('Password must contain at least 1 uppercase letter, A-Z.') + return gettext("Password must contain at least 1 uppercase letter, A-Z.") class LowercaseValidator: def validate(self, password, user=None): - if not re.findall('[a-z]', password) and get_system_setting('lowercase_character_required'): + if not re.findall("[a-z]", password) and get_system_setting("lowercase_character_required"): raise ValidationError( self.get_help_text(), - code='password_no_lower') + code="password_no_lower") else: return None def get_help_text(self): - return gettext('Password must contain at least 1 lowercase letter, a-z.') + return gettext("Password must contain at least 1 lowercase letter, a-z.") class SymbolValidator: def validate(self, password, user=None): contains_special_character = re.findall(r'[(){}\[\]|~!@#$%^&*_\-+=;:\'",\`<>\./?]', password) - if not contains_special_character and get_system_setting('special_character_required'): + if not contains_special_character and get_system_setting("special_character_required"): raise ValidationError( self.get_help_text(), - code='password_no_symbol') + code="password_no_symbol") else: return None def get_help_text(self): - return gettext('The password must contain at least 1 special character, ' + return gettext("The password must contain at least 1 special character, " + """()[]{}|`~!@#$%^&*_-+=;:'",<>./?.""") class DojoCommonPasswordValidator(CommonPasswordValidator): def validate(self, password, user=None): - if get_system_setting('non_common_password_required'): + if get_system_setting("non_common_password_required"): return super().validate(password, user) else: return None diff --git a/dojo/user/views.py b/dojo/user/views.py index 25d4692ea9..91661db691 100644 --- a/dojo/user/views.py +++ b/dojo/user/views.py @@ -55,13 +55,13 @@ class DojoLoginView(LoginView): - template_name = 'dojo/login.html' + template_name = "dojo/login.html" authentication_form = AuthenticationForm def form_valid(self, form): last_login = None with contextlib.suppress(Exception): - username = form.cleaned_data.get('username') + username = form.cleaned_data.get("username") user = Dojo_User.objects.get(username=username) last_login = user.last_login response = super().form_valid(form) @@ -71,7 +71,7 @@ def form_valid(self, form): self.request, messages.SUCCESS, _(f'Hello {name}! Your last login was {naturaltime(last_login)} ({last_login.strftime("%Y-%m-%d %I:%M:%S %p")})'), - extra_tags='alert-success') + extra_tags="alert-success") return response @@ -81,11 +81,11 @@ def api_v2_key(request): # This check should not be necessary because url should not be in 'urlpatterns' but we never know if not settings.API_TOKENS_ENABLED: raise PermissionDenied - api_key = '' + api_key = "" form = APIKeyForm(instance=request.user) - if request.method == 'POST': # new key requested + if request.method == "POST": # new key requested form = APIKeyForm(request.POST, instance=request.user) - if form.is_valid() and form.cleaned_data['id'] == request.user.id: + if form.is_valid() and form.cleaned_data["id"] == request.user.id: try: api_key = Token.objects.get(user=request.user) api_key.delete() @@ -94,8 +94,8 @@ def api_v2_key(request): api_key = Token.objects.create(user=request.user) messages.add_message(request, messages.SUCCESS, - _('API Key generated successfully.'), - extra_tags='alert-success') + _("API Key generated successfully."), + extra_tags="alert-success") else: raise PermissionDenied else: @@ -105,18 +105,18 @@ def api_v2_key(request): api_key = Token.objects.create(user=request.user) add_breadcrumb(title=_("API Key"), top_level=True, request=request) - return render(request, 'dojo/api_v2_key.html', - {'name': _('API v2 Key'), - 'metric': False, - 'user': request.user, - 'key': api_key, - 'form': form, + return render(request, "dojo/api_v2_key.html", + {"name": _("API v2 Key"), + "metric": False, + "user": request.user, + "key": api_key, + "form": form, }) # # user specific -@dojo_ratelimit(key='post:username') -@dojo_ratelimit(key='post:password') +@dojo_ratelimit(key="post:username") +@dojo_ratelimit(key="post:password") def login_view(request): if not settings.SHOW_LOGIN_FORM and settings.SOCIAL_LOGIN_AUTO_REDIRECT and sum([ settings.GOOGLE_OAUTH_ENABLED, @@ -127,30 +127,30 @@ def login_view(request): settings.KEYCLOAK_OAUTH2_ENABLED, settings.GITHUB_ENTERPRISE_OAUTH2_ENABLED, settings.SAML2_ENABLED, - ]) == 1 and 'force_login_form' not in request.GET: + ]) == 1 and "force_login_form" not in request.GET: if settings.GOOGLE_OAUTH_ENABLED: - social_auth = 'google-oauth2' + social_auth = "google-oauth2" elif settings.OKTA_OAUTH_ENABLED: - social_auth = 'okta-oauth2' + social_auth = "okta-oauth2" elif settings.AZUREAD_TENANT_OAUTH2_ENABLED: - social_auth = 'azuread-tenant-oauth2' + social_auth = "azuread-tenant-oauth2" elif settings.GITLAB_OAUTH2_ENABLED: - social_auth = 'gitlab' + social_auth = "gitlab" elif settings.KEYCLOAK_OAUTH2_ENABLED: - social_auth = 'keycloak' + social_auth = "keycloak" elif settings.AUTH0_OAUTH2_ENABLED: - social_auth = 'auth0' + social_auth = "auth0" elif settings.GITHUB_ENTERPRISE_OAUTH2_ENABLED: - social_auth = 'github-enterprise' + social_auth = "github-enterprise" else: - return HttpResponseRedirect('/saml2/login') + return HttpResponseRedirect("/saml2/login") try: - return HttpResponseRedirect('{}?{}'.format(reverse('social:begin', args=[social_auth]), - urlencode({'next': request.GET.get('next')}))) + return HttpResponseRedirect("{}?{}".format(reverse("social:begin", args=[social_auth]), + urlencode({"next": request.GET.get("next")}))) except: - return HttpResponseRedirect(reverse('social:begin', args=[social_auth])) + return HttpResponseRedirect(reverse("social:begin", args=[social_auth])) else: - return DojoLoginView.as_view(template_name='dojo/login.html', authentication_form=AuthenticationForm)(request) + return DojoLoginView.as_view(template_name="dojo/login.html", authentication_form=AuthenticationForm)(request) def logout_view(request): @@ -161,18 +161,18 @@ def logout_view(request): else: messages.add_message(request, messages.SUCCESS, - _('You have logged out successfully.'), - extra_tags='alert-success') + _("You have logged out successfully."), + extra_tags="alert-success") - return HttpResponseRedirect(reverse('login')) + return HttpResponseRedirect(reverse("login")) @user_passes_test(lambda u: u.is_active) def alerts(request): alerts = Alerts.objects.filter(user_id=request.user) - if request.method == 'POST': - removed_alerts = request.POST.getlist('alert_select') + if request.method == "POST": + removed_alerts = request.POST.getlist("alert_select") alerts.filter(id__in=removed_alerts).delete() alerts = alerts.filter(~Q(id__in=removed_alerts)) @@ -183,42 +183,42 @@ def alerts(request): add_breadcrumb(title=alert_title, top_level=True, request=request) return render(request, - 'dojo/alerts.html', - {'alerts': paged_alerts}) + "dojo/alerts.html", + {"alerts": paged_alerts}) def delete_alerts(request): alerts = Alerts.objects.filter(user_id=request.user) - if request.method == 'POST': + if request.method == "POST": alerts.filter().delete() messages.add_message( request, messages.SUCCESS, - _('Alerts removed.'), - extra_tags='alert-success') - return HttpResponseRedirect('alerts') + _("Alerts removed."), + extra_tags="alert-success") + return HttpResponseRedirect("alerts") return render(request, - 'dojo/delete_alerts.html', - {'alerts': alerts}) + "dojo/delete_alerts.html", + {"alerts": alerts}) @login_required def alerts_json(request, limit=None): - limit = request.GET.get('limit') + limit = request.GET.get("limit") if limit: - alerts = serializers.serialize('json', Alerts.objects.filter(user_id=request.user)[:int(limit)]) + alerts = serializers.serialize("json", Alerts.objects.filter(user_id=request.user)[:int(limit)]) else: - alerts = serializers.serialize('json', Alerts.objects.filter(user_id=request.user)) - return HttpResponse(alerts, content_type='application/json') + alerts = serializers.serialize("json", Alerts.objects.filter(user_id=request.user)) + return HttpResponse(alerts, content_type="application/json") def alertcount(request): if not settings.DISABLE_ALERT_COUNTER: count = Alerts.objects.filter(user_id=request.user).count() - return JsonResponse({'count': count}) - return JsonResponse({'count': 0}) + return JsonResponse({"count": count}) + return JsonResponse({"count": 0}) def view_profile(request): @@ -226,13 +226,13 @@ def view_profile(request): form = DojoUserForm(instance=user) group_members = get_authorized_group_members_for_user(user) - user_contact = user.usercontactinfo if hasattr(user, 'usercontactinfo') else None + user_contact = user.usercontactinfo if hasattr(user, "usercontactinfo") else None if user_contact is None: contact_form = UserContactInfoForm() else: contact_form = UserContactInfoForm(instance=user_contact) - global_role = user.global_role if hasattr(user, 'global_role') else None + global_role = user.global_role if hasattr(user, "global_role") else None if global_role is None: previous_global_role = None global_role_form = GlobalRoleForm() @@ -240,7 +240,7 @@ def view_profile(request): previous_global_role = global_role.role global_role_form = GlobalRoleForm(instance=global_role) - if request.method == 'POST': + if request.method == "POST": form = DojoUserForm(request.POST, instance=user) contact_form = UserContactInfoForm(request.POST, instance=user_contact) global_role_form = GlobalRoleForm(request.POST, instance=global_role) @@ -255,32 +255,32 @@ def view_profile(request): global_role.role = previous_global_role messages.add_message(request, messages.WARNING, - _('Only superusers are allowed to change their global role.'), - extra_tags='alert-warning') + _("Only superusers are allowed to change their global role."), + extra_tags="alert-warning") global_role.user = user global_role.save() messages.add_message(request, messages.SUCCESS, - _('Profile updated successfully.'), - extra_tags='alert-success') - add_breadcrumb(title=_("User Profile - %(user_full_name)s") % {'user_full_name': user.get_full_name()}, top_level=True, request=request) - return render(request, 'dojo/profile.html', { - 'user': user, - 'form': form, - 'contact_form': contact_form, - 'global_role_form': global_role_form, - 'group_members': group_members}) + _("Profile updated successfully."), + extra_tags="alert-success") + add_breadcrumb(title=_("User Profile - %(user_full_name)s") % {"user_full_name": user.get_full_name()}, top_level=True, request=request) + return render(request, "dojo/profile.html", { + "user": user, + "form": form, + "contact_form": contact_form, + "global_role_form": global_role_form, + "group_members": group_members}) def change_password(request): user = get_object_or_404(Dojo_User, pk=request.user.id) form = ChangePasswordForm(user=user) - if request.method == 'POST': + if request.method == "POST": form = ChangePasswordForm(request.POST, user=user) if form.is_valid(): - new_password = form.cleaned_data['new_password'] + new_password = form.cleaned_data["new_password"] user.set_password(new_password) Dojo_User.disable_force_password_reset(user) @@ -288,31 +288,31 @@ def change_password(request): messages.add_message(request, messages.SUCCESS, - _('Your password has been changed.'), - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_profile')) + _("Your password has been changed."), + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_profile")) add_breadcrumb(title=_("Change Password"), top_level=False, request=request) - return render(request, 'dojo/change_pwd.html', {'form': form}) + return render(request, "dojo/change_pwd.html", {"form": form}) -@user_is_configuration_authorized('auth.view_user') +@user_is_configuration_authorized("auth.view_user") def user(request): page_name = _("All Users") users = Dojo_User.objects.all() \ - .select_related('usercontactinfo', 'global_role') \ - .order_by('username', 'last_name', 'first_name') + .select_related("usercontactinfo", "global_role") \ + .order_by("username", "last_name", "first_name") users = UserFilter(request.GET, queryset=users) paged_users = get_page_items(request, users.qs, 25) add_breadcrumb(title=page_name, top_level=True, request=request) - return render(request, 'dojo/users.html', { + return render(request, "dojo/users.html", { "users": paged_users, "filtered": users, "name": page_name, }) -@user_is_configuration_authorized('auth.add_user') +@user_is_configuration_authorized("auth.add_user") def add_user(request): page_name = _("Add User") form = AddDojoUserForm() @@ -320,24 +320,24 @@ def add_user(request): global_role_form = GlobalRoleForm() user = None - if request.method == 'POST': + if request.method == "POST": form = AddDojoUserForm(request.POST) contact_form = UserContactInfoForm(request.POST) global_role_form = GlobalRoleForm(request.POST) if form.is_valid() and contact_form.is_valid() and global_role_form.is_valid(): - if not request.user.is_superuser and form.cleaned_data['is_superuser']: + if not request.user.is_superuser and form.cleaned_data["is_superuser"]: messages.add_message(request, messages.ERROR, - _('Only superusers are allowed to add superusers. User was not saved.'), - extra_tags='alert-danger') - elif not request.user.is_superuser and global_role_form.cleaned_data['role']: + _("Only superusers are allowed to add superusers. User was not saved."), + extra_tags="alert-danger") + elif not request.user.is_superuser and global_role_form.cleaned_data["role"]: messages.add_message(request, messages.ERROR, - _('Only superusers are allowed to add users with a global role. User was not saved.'), - extra_tags='alert-danger') + _("Only superusers are allowed to add users with a global role. User was not saved."), + extra_tags="alert-danger") else: user = form.save(commit=False) - password = request.POST['password'] + password = request.POST["password"] if password: user.set_password(password) else: @@ -352,24 +352,24 @@ def add_user(request): global_role.save() messages.add_message(request, messages.SUCCESS, - _('User added successfully.'), - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_user', args=(user.id,))) + _("User added successfully."), + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_user", args=(user.id,))) else: messages.add_message(request, messages.ERROR, - _('User was not added successfully.'), - extra_tags='alert-danger') + _("User was not added successfully."), + extra_tags="alert-danger") add_breadcrumb(title=page_name, top_level=False, request=request) return render(request, "dojo/add_user.html", { - 'name': page_name, - 'form': form, - 'contact_form': contact_form, - 'global_role_form': global_role_form, - 'to_add': True}) + "name": page_name, + "form": form, + "contact_form": contact_form, + "global_role_form": global_role_form, + "to_add": True}) -@user_is_configuration_authorized('auth.view_user') +@user_is_configuration_authorized("auth.view_user") def view_user(request, uid): user = get_object_or_404(Dojo_User, id=uid) product_members = get_authorized_product_members_for_user(user, Permissions.Product_View) @@ -378,33 +378,33 @@ def view_user(request, uid): configuration_permission_form = ConfigurationPermissionsForm(user=user) add_breadcrumb(title=_("View User"), top_level=False, request=request) - return render(request, 'dojo/view_user.html', { - 'user': user, - 'product_members': product_members, - 'product_type_members': product_type_members, - 'group_members': group_members, - 'configuration_permission_form': configuration_permission_form}) + return render(request, "dojo/view_user.html", { + "user": user, + "product_members": product_members, + "product_type_members": product_type_members, + "group_members": group_members, + "configuration_permission_form": configuration_permission_form}) -@user_is_configuration_authorized('auth.change_user') +@user_is_configuration_authorized("auth.change_user") def edit_user(request, uid): page_name = _("Edit User") user = get_object_or_404(Dojo_User, id=uid) form = EditDojoUserForm(instance=user) - user_contact = user.usercontactinfo if hasattr(user, 'usercontactinfo') else None + user_contact = user.usercontactinfo if hasattr(user, "usercontactinfo") else None if user_contact is None: contact_form = UserContactInfoForm() else: contact_form = UserContactInfoForm(instance=user_contact) - global_role = user.global_role if hasattr(user, 'global_role') else None + global_role = user.global_role if hasattr(user, "global_role") else None if global_role is None: global_role_form = GlobalRoleForm() else: global_role_form = GlobalRoleForm(instance=global_role) - if request.method == 'POST': + if request.method == "POST": form = EditDojoUserForm(request.POST, instance=user) if user_contact is None: contact_form = UserContactInfoForm(request.POST) @@ -417,16 +417,16 @@ def edit_user(request, uid): global_role_form = GlobalRoleForm(request.POST, instance=global_role) if form.is_valid() and contact_form.is_valid() and global_role_form.is_valid(): - if not request.user.is_superuser and form.cleaned_data['is_superuser']: + if not request.user.is_superuser and form.cleaned_data["is_superuser"]: messages.add_message(request, messages.ERROR, - _('Only superusers are allowed to edit superusers. User was not saved.'), - extra_tags='alert-danger') - elif not request.user.is_superuser and global_role_form.cleaned_data['role']: + _("Only superusers are allowed to edit superusers. User was not saved."), + extra_tags="alert-danger") + elif not request.user.is_superuser and global_role_form.cleaned_data["role"]: messages.add_message(request, messages.ERROR, - _('Only superusers are allowed to edit users with a global role. User was not saved.'), - extra_tags='alert-danger') + _("Only superusers are allowed to edit users with a global role. User was not saved."), + extra_tags="alert-danger") else: form.save() contact = contact_form.save(commit=False) @@ -437,23 +437,23 @@ def edit_user(request, uid): global_role.save() messages.add_message(request, messages.SUCCESS, - _('User saved successfully.'), - extra_tags='alert-success') + _("User saved successfully."), + extra_tags="alert-success") else: messages.add_message(request, messages.ERROR, - _('User was not saved successfully.'), - extra_tags='alert-danger') + _("User was not saved successfully."), + extra_tags="alert-danger") add_breadcrumb(title=page_name, top_level=False, request=request) return render(request, "dojo/add_user.html", { - 'name': page_name, - 'form': form, - 'contact_form': contact_form, - 'global_role_form': global_role_form, - 'to_edit': user}) + "name": page_name, + "form": form, + "contact_form": contact_form, + "global_role_form": global_role_form, + "to_edit": user}) -@user_is_configuration_authorized('auth.delete_user') +@user_is_configuration_authorized("auth.delete_user") def delete_user(request, uid): user = get_object_or_404(Dojo_User, id=uid) form = DeleteUserForm(instance=user) @@ -461,162 +461,162 @@ def delete_user(request, uid): if user.id == request.user.id: messages.add_message(request, messages.ERROR, - _('You may not delete yourself.'), - extra_tags='alert-danger') - return HttpResponseRedirect(reverse('edit_user', args=(user.id,))) + _("You may not delete yourself."), + extra_tags="alert-danger") + return HttpResponseRedirect(reverse("edit_user", args=(user.id,))) - if request.method == 'POST': - if 'id' in request.POST and str(user.id) == request.POST['id']: + if request.method == "POST": + if "id" in request.POST and str(user.id) == request.POST["id"]: form = DeleteUserForm(request.POST, instance=user) if form.is_valid(): if not request.user.is_superuser and user.is_superuser: messages.add_message(request, messages.ERROR, - _('Only superusers are allowed to delete superusers. User was not removed.'), - extra_tags='alert-danger') - elif not request.user.is_superuser and hasattr(user, 'global_role') and user.global_role.role: + _("Only superusers are allowed to delete superusers. User was not removed."), + extra_tags="alert-danger") + elif not request.user.is_superuser and hasattr(user, "global_role") and user.global_role.role: messages.add_message(request, messages.ERROR, - _('Only superusers are allowed to delete users with a global role. User was not removed.'), - extra_tags='alert-danger') + _("Only superusers are allowed to delete users with a global role. User was not removed."), + extra_tags="alert-danger") else: try: user.delete() messages.add_message(request, messages.SUCCESS, - _('User and relationships removed.'), - extra_tags='alert-success') + _("User and relationships removed."), + extra_tags="alert-success") except RestrictedError as err: messages.add_message(request, messages.WARNING, - _('User cannot be deleted: %(error)s') % {'error': err}, - extra_tags='alert-warning') - return HttpResponseRedirect(reverse('users')) + _("User cannot be deleted: %(error)s") % {"error": err}, + extra_tags="alert-warning") + return HttpResponseRedirect(reverse("users")) collector = NestedObjects(using=DEFAULT_DB_ALIAS) collector.collect([user]) rels = collector.nested() add_breadcrumb(title=_("Delete User"), top_level=False, request=request) - return render(request, 'dojo/delete_user.html', - {'to_delete': user, - 'form': form, - 'rels': rels, + return render(request, "dojo/delete_user.html", + {"to_delete": user, + "form": form, + "rels": rels, }) @user_passes_test(lambda u: u.is_superuser) def add_product_type_member(request, uid): user = get_object_or_404(Dojo_User, id=uid) - memberform = Add_Product_Type_Member_UserForm(initial={'user': user.id}) - if request.method == 'POST': - memberform = Add_Product_Type_Member_UserForm(request.POST, initial={'user': user.id}) + memberform = Add_Product_Type_Member_UserForm(initial={"user": user.id}) + if request.method == "POST": + memberform = Add_Product_Type_Member_UserForm(request.POST, initial={"user": user.id}) if memberform.is_valid(): - if 'product_types' in memberform.cleaned_data and len(memberform.cleaned_data['product_types']) > 0: - for product_type in memberform.cleaned_data['product_types']: + if "product_types" in memberform.cleaned_data and len(memberform.cleaned_data["product_types"]) > 0: + for product_type in memberform.cleaned_data["product_types"]: existing_members = Product_Type_Member.objects.filter(product_type=product_type, user=user) if existing_members.count() == 0: product_type_member = Product_Type_Member() product_type_member.product_type = product_type product_type_member.user = user - product_type_member.role = memberform.cleaned_data['role'] + product_type_member.role = memberform.cleaned_data["role"] product_type_member.save() messages.add_message(request, messages.SUCCESS, - _('Product type members added successfully.'), - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_user', args=(uid, ))) + _("Product type members added successfully."), + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_user", args=(uid, ))) add_breadcrumb(title=_("Add Product Type Member"), top_level=False, request=request) - return render(request, 'dojo/new_product_type_member_user.html', { - 'user': user, - 'form': memberform, + return render(request, "dojo/new_product_type_member_user.html", { + "user": user, + "form": memberform, }) @user_passes_test(lambda u: u.is_superuser) def add_product_member(request, uid): user = get_object_or_404(Dojo_User, id=uid) - memberform = Add_Product_Member_UserForm(initial={'user': user.id}) - if request.method == 'POST': - memberform = Add_Product_Member_UserForm(request.POST, initial={'user': user.id}) + memberform = Add_Product_Member_UserForm(initial={"user": user.id}) + if request.method == "POST": + memberform = Add_Product_Member_UserForm(request.POST, initial={"user": user.id}) if memberform.is_valid(): - if 'products' in memberform.cleaned_data and len(memberform.cleaned_data['products']) > 0: - for product in memberform.cleaned_data['products']: + if "products" in memberform.cleaned_data and len(memberform.cleaned_data["products"]) > 0: + for product in memberform.cleaned_data["products"]: existing_members = Product_Member.objects.filter(product=product, user=user) if existing_members.count() == 0: product_member = Product_Member() product_member.product = product product_member.user = user - product_member.role = memberform.cleaned_data['role'] + product_member.role = memberform.cleaned_data["role"] product_member.save() messages.add_message(request, messages.SUCCESS, - _('Product members added successfully.'), - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_user', args=(uid, ))) + _("Product members added successfully."), + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_user", args=(uid, ))) add_breadcrumb(title=_("Add Product Member"), top_level=False, request=request) - return render(request, 'dojo/new_product_member_user.html', { - 'user': user, - 'form': memberform, + return render(request, "dojo/new_product_member_user.html", { + "user": user, + "form": memberform, }) @user_passes_test(lambda u: u.is_superuser) def add_group_member(request, uid): user = get_object_or_404(Dojo_User, id=uid) - memberform = Add_Group_Member_UserForm(initial={'user': user.id}) + memberform = Add_Group_Member_UserForm(initial={"user": user.id}) - if request.method == 'POST': - memberform = Add_Group_Member_UserForm(request.POST, initial={'user': user.id}) + if request.method == "POST": + memberform = Add_Group_Member_UserForm(request.POST, initial={"user": user.id}) if memberform.is_valid(): - if 'groups' in memberform.cleaned_data and len(memberform.cleaned_data['groups']) > 0: - for group in memberform.cleaned_data['groups']: + if "groups" in memberform.cleaned_data and len(memberform.cleaned_data["groups"]) > 0: + for group in memberform.cleaned_data["groups"]: existing_groups = Dojo_Group_Member.objects.filter(user=user, group=group) if existing_groups.count() == 0: group_member = Dojo_Group_Member() group_member.group = group group_member.user = user - group_member.role = memberform.cleaned_data['role'] + group_member.role = memberform.cleaned_data["role"] group_member.save() messages.add_message(request, messages.SUCCESS, - _('Groups added successfully.'), - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_user', args=(uid,))) + _("Groups added successfully."), + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_user", args=(uid,))) add_breadcrumb(title=_("Add Group Member"), top_level=False, request=request) - return render(request, 'dojo/new_group_member_user.html', { - 'user': user, - 'form': memberform, + return render(request, "dojo/new_group_member_user.html", { + "user": user, + "form": memberform, }) -@user_is_configuration_authorized('auth.change_permission') +@user_is_configuration_authorized("auth.change_permission") def edit_permissions(request, uid): user = get_object_or_404(Dojo_User, id=uid) - if request.method == 'POST': + if request.method == "POST": form = ConfigurationPermissionsForm(request.POST, user=user) if form.is_valid(): form.save() messages.add_message(request, messages.SUCCESS, - _('Permissions updated.'), - extra_tags='alert-success') - return HttpResponseRedirect(reverse('view_user', args=(uid,))) + _("Permissions updated."), + extra_tags="alert-success") + return HttpResponseRedirect(reverse("view_user", args=(uid,))) class DojoForgotUsernameForm(PasswordResetForm): def send_mail(self, subject_template_name, email_template_name, context, from_email, to_email, html_email_template_name=None): - from_email = get_system_setting('email_from') + from_email = get_system_setting("email_from") url = hyperlink.parse(settings.SITE_URL) - subject_template_name = 'login/forgot_username_subject.html' - email_template_name = 'login/forgot_username.tpl' - context['site_name'] = url.host - context['protocol'] = url.scheme - context['domain'] = settings.SITE_URL[len(f'{url.scheme}://'):] + subject_template_name = "login/forgot_username_subject.html" + email_template_name = "login/forgot_username.tpl" + context["site_name"] = url.host + context["protocol"] = url.scheme + context["domain"] = settings.SITE_URL[len(f"{url.scheme}://"):] super().send_mail(subject_template_name, email_template_name, context, from_email, to_email, html_email_template_name) @@ -635,14 +635,14 @@ class DojoPasswordResetForm(PasswordResetForm): def send_mail(self, subject_template_name, email_template_name, context, from_email, to_email, html_email_template_name=None): - from_email = get_system_setting('email_from') + from_email = get_system_setting("email_from") url = hyperlink.parse(settings.SITE_URL) - email_template_name = 'login/forgot_password.tpl' - context['site_name'] = url.host - context['protocol'] = url.scheme - context['domain'] = settings.SITE_URL[len(f'{url.scheme}://'):] - context['link_expiration_date'] = naturaltime(now() + timedelta(seconds=settings.PASSWORD_RESET_TIMEOUT)) + email_template_name = "login/forgot_password.tpl" + context["site_name"] = url.host + context["protocol"] = url.scheme + context["domain"] = settings.SITE_URL[len(f"{url.scheme}://"):] + context["link_expiration_date"] = naturaltime(now() + timedelta(seconds=settings.PASSWORD_RESET_TIMEOUT)) super().send_mail(subject_template_name, email_template_name, context, from_email, to_email, html_email_template_name) diff --git a/dojo/utils.py b/dojo/utils.py index 2bf51f60f1..12bb4e0175 100644 --- a/dojo/utils.py +++ b/dojo/utils.py @@ -113,8 +113,8 @@ def do_false_positive_history(finding, *args, **kwargs): # Remove the async user kwarg because save() really does not like it # Would rather not add anything to Finding.save() - if 'async_user' in kwargs: - kwargs.pop('async_user') + if "async_user" in kwargs: + kwargs.pop("async_user") for find in to_mark_as_fp: deduplicationLogger.debug( @@ -145,60 +145,60 @@ def match_finding_to_existing_findings(finding, product=None, engagement=None, t test (:model:`dojo.Test`, optional): Test to filter findings by """ if product: - custom_filter_type = 'product' - custom_filter = {'test__engagement__product': product} + custom_filter_type = "product" + custom_filter = {"test__engagement__product": product} elif engagement: - custom_filter_type = 'engagement' - custom_filter = {'test__engagement': engagement} + custom_filter_type = "engagement" + custom_filter = {"test__engagement": engagement} elif test: - custom_filter_type = 'test' - custom_filter = {'test': test} + custom_filter_type = "test" + custom_filter = {"test": test} else: - msg = 'No product, engagement or test provided as argument.' + msg = "No product, engagement or test provided as argument." raise ValueError(msg) deduplication_algorithm = finding.test.deduplication_algorithm deduplicationLogger.debug( - 'Matching finding %i:%s to existing findings in %s %s using %s as deduplication algorithm.', + "Matching finding %i:%s to existing findings in %s %s using %s as deduplication algorithm.", finding.id, finding.title, custom_filter_type, list(custom_filter.values())[0], deduplication_algorithm, ) - if deduplication_algorithm == 'hash_code': + if deduplication_algorithm == "hash_code": return ( Finding.objects.filter( **custom_filter, hash_code=finding.hash_code, ).exclude(hash_code=None) .exclude(id=finding.id) - .order_by('id') + .order_by("id") ) - elif deduplication_algorithm == 'unique_id_from_tool': + elif deduplication_algorithm == "unique_id_from_tool": return ( Finding.objects.filter( **custom_filter, unique_id_from_tool=finding.unique_id_from_tool, ).exclude(unique_id_from_tool=None) .exclude(id=finding.id) - .order_by('id') + .order_by("id") ) - elif deduplication_algorithm == 'unique_id_from_tool_or_hash_code': + elif deduplication_algorithm == "unique_id_from_tool_or_hash_code": query = Finding.objects.filter( Q(**custom_filter), ( (Q(hash_code__isnull=False) & Q(hash_code=finding.hash_code)) | (Q(unique_id_from_tool__isnull=False) & Q(unique_id_from_tool=finding.unique_id_from_tool)) ), - ).exclude(id=finding.id).order_by('id') + ).exclude(id=finding.id).order_by("id") deduplicationLogger.debug(query.query) return query - elif deduplication_algorithm == 'legacy': + elif deduplication_algorithm == "legacy": # This is the legacy reimport behavior. Although it's pretty flawed and # doesn't match the legacy algorithm for deduplication, this is left as is for simplicity. # Re-writing the legacy deduplication here would be complicated and counter-productive. @@ -210,7 +210,7 @@ def match_finding_to_existing_findings(finding, product=None, engagement=None, t title=finding.title, severity=finding.severity, numerical_severity=Finding.get_numerical_severity(finding.severity), - ).order_by('id') + ).order_by("id") ) else: @@ -235,32 +235,32 @@ def are_urls_equal(url1, url2, fields): # For a details description see https://hyperlink.readthedocs.io/en/latest/api.html#attributes deduplicationLogger.debug("Check if url %s and url %s are equal in terms of %s.", url1, url2, fields) for field in fields: - if field == 'scheme': + if field == "scheme": if url1.scheme != url2.scheme: return False - elif field == 'host': + elif field == "host": if url1.host != url2.host: return False - elif field == 'port': + elif field == "port": if url1.port != url2.port: return False - elif field == 'path': + elif field == "path": if url1.path != url2.path: return False - elif field == 'query': + elif field == "query": if url1.query != url2.query: return False - elif field == 'fragment': + elif field == "fragment": if url1.fragment != url2.fragment: return False - elif field == 'userinfo': + elif field == "userinfo": if url1.userinfo != url2.userinfo: return False - elif field == 'user': + elif field == "user": if url1.user != url2.user: return False else: - logger.warning('Field ' + field + ' is not supported by the endpoint dedupe algorithm, ignoring it.') + logger.warning("Field " + field + " is not supported by the endpoint dedupe algorithm, ignoring it.") return True @@ -300,10 +300,10 @@ def do_dedupe_finding(new_finding, *args, **kwargs): logger.warning("system settings not found") enabled = False if enabled: - deduplicationLogger.debug('dedupe for: ' + str(new_finding.id) + deduplicationLogger.debug("dedupe for: " + str(new_finding.id) + ":" + str(new_finding.title)) deduplicationAlgorithm = new_finding.test.deduplication_algorithm - deduplicationLogger.debug('deduplication algorithm: ' + deduplicationAlgorithm) + deduplicationLogger.debug("deduplication algorithm: " + deduplicationAlgorithm) if deduplicationAlgorithm == settings.DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL: deduplicate_unique_id_from_tool(new_finding) elif deduplicationAlgorithm == settings.DEDUPE_ALGO_HASH_CODE: @@ -328,32 +328,32 @@ def deduplicate_legacy(new_finding): if new_finding.test.engagement.deduplication_on_engagement: eng_findings_cwe = Finding.objects.filter( test__engagement=new_finding.test.engagement, - cwe=new_finding.cwe).exclude(id=new_finding.id).exclude(cwe=0).exclude(duplicate=True).values('id') + cwe=new_finding.cwe).exclude(id=new_finding.id).exclude(cwe=0).exclude(duplicate=True).values("id") eng_findings_title = Finding.objects.filter( test__engagement=new_finding.test.engagement, - title=new_finding.title).exclude(id=new_finding.id).exclude(duplicate=True).values('id') + title=new_finding.title).exclude(id=new_finding.id).exclude(duplicate=True).values("id") else: eng_findings_cwe = Finding.objects.filter( test__engagement__product=new_finding.test.engagement.product, - cwe=new_finding.cwe).exclude(id=new_finding.id).exclude(cwe=0).exclude(duplicate=True).values('id') + cwe=new_finding.cwe).exclude(id=new_finding.id).exclude(cwe=0).exclude(duplicate=True).values("id") eng_findings_title = Finding.objects.filter( test__engagement__product=new_finding.test.engagement.product, - title=new_finding.title).exclude(id=new_finding.id).exclude(duplicate=True).values('id') + title=new_finding.title).exclude(id=new_finding.id).exclude(duplicate=True).values("id") - total_findings = Finding.objects.filter(Q(id__in=eng_findings_cwe) | Q(id__in=eng_findings_title)).prefetch_related('endpoints', 'test', 'test__engagement', 'found_by', 'original_finding', 'test__test_type') + total_findings = Finding.objects.filter(Q(id__in=eng_findings_cwe) | Q(id__in=eng_findings_title)).prefetch_related("endpoints", "test", "test__engagement", "found_by", "original_finding", "test__test_type") deduplicationLogger.debug("Found " + str(len(eng_findings_cwe)) + " findings with same cwe, " + str(len(eng_findings_title)) + " findings with same title: " + str(len(total_findings)) + " findings with either same title or same cwe") # total_findings = total_findings.order_by('date') - for find in total_findings.order_by('id'): + for find in total_findings.order_by("id"): flag_endpoints = False flag_line_path = False flag_hash = False if is_deduplication_on_engagement_mismatch(new_finding, find): deduplicationLogger.debug( - 'deduplication_on_engagement_mismatch, skipping dedupe.') + "deduplication_on_engagement_mismatch, skipping dedupe.") continue # --------------------------------------------------------- @@ -376,10 +376,10 @@ def deduplicate_legacy(new_finding): else: deduplicationLogger.debug("no endpoints on one of the findings and file_path doesn't match; Deduplication will not occur") else: - deduplicationLogger.debug('find.static/dynamic: %s/%s', find.static_finding, find.dynamic_finding) - deduplicationLogger.debug('new_finding.static/dynamic: %s/%s', new_finding.static_finding, new_finding.dynamic_finding) - deduplicationLogger.debug('find.file_path: %s', find.file_path) - deduplicationLogger.debug('new_finding.file_path: %s', new_finding.file_path) + deduplicationLogger.debug("find.static/dynamic: %s/%s", find.static_finding, find.dynamic_finding) + deduplicationLogger.debug("new_finding.static/dynamic: %s/%s", new_finding.static_finding, new_finding.dynamic_finding) + deduplicationLogger.debug("find.file_path: %s", find.file_path) + deduplicationLogger.debug("new_finding.file_path: %s", new_finding.file_path) deduplicationLogger.debug("no endpoints on one of the findings and the new finding is either dynamic or doesn't have a file_path; Deduplication will not occur") @@ -387,8 +387,8 @@ def deduplicate_legacy(new_finding): flag_hash = True deduplicationLogger.debug( - 'deduplication flags for new finding (' + ('dynamic' if new_finding.dynamic_finding else 'static') + ') ' + str(new_finding.id) + ' and existing finding ' + str(find.id) - + ' flag_endpoints: ' + str(flag_endpoints) + ' flag_line_path:' + str(flag_line_path) + ' flag_hash:' + str(flag_hash)) + "deduplication flags for new finding (" + ("dynamic" if new_finding.dynamic_finding else "static") + ") " + str(new_finding.id) + " and existing finding " + str(find.id) + + " flag_endpoints: " + str(flag_endpoints) + " flag_line_path:" + str(flag_line_path) + " flag_hash:" + str(flag_hash)) # --------------------------------------------------------- # 3) Findings are duplicate if (cond1 is true) and they have the same: @@ -412,7 +412,7 @@ def deduplicate_unique_id_from_tool(new_finding): unique_id_from_tool=new_finding.unique_id_from_tool).exclude( id=new_finding.id).exclude( unique_id_from_tool=None).exclude( - duplicate=True).order_by('id') + duplicate=True).order_by("id") else: existing_findings = Finding.objects.filter( test__engagement__product=new_finding.test.engagement.product, @@ -421,14 +421,14 @@ def deduplicate_unique_id_from_tool(new_finding): unique_id_from_tool=new_finding.unique_id_from_tool).exclude( id=new_finding.id).exclude( unique_id_from_tool=None).exclude( - duplicate=True).order_by('id') + duplicate=True).order_by("id") deduplicationLogger.debug("Found " + str(len(existing_findings)) + " findings with same unique_id_from_tool") for find in existing_findings: if is_deduplication_on_engagement_mismatch(new_finding, find): deduplicationLogger.debug( - 'deduplication_on_engagement_mismatch, skipping dedupe.') + "deduplication_on_engagement_mismatch, skipping dedupe.") continue try: set_duplicate(new_finding, find) @@ -445,21 +445,21 @@ def deduplicate_hash_code(new_finding): hash_code=new_finding.hash_code).exclude( id=new_finding.id).exclude( hash_code=None).exclude( - duplicate=True).order_by('id') + duplicate=True).order_by("id") else: existing_findings = Finding.objects.filter( test__engagement__product=new_finding.test.engagement.product, hash_code=new_finding.hash_code).exclude( id=new_finding.id).exclude( hash_code=None).exclude( - duplicate=True).order_by('id') + duplicate=True).order_by("id") deduplicationLogger.debug("Found " + str(len(existing_findings)) + " findings with same hash_code") for find in existing_findings: if is_deduplication_on_engagement_mismatch(new_finding, find): deduplicationLogger.debug( - 'deduplication_on_engagement_mismatch, skipping dedupe.') + "deduplication_on_engagement_mismatch, skipping dedupe.") continue try: if are_endpoints_duplicates(new_finding, find): @@ -478,7 +478,7 @@ def deduplicate_uid_or_hash_code(new_finding): | (Q(unique_id_from_tool__isnull=False) & Q(unique_id_from_tool=new_finding.unique_id_from_tool) & Q(test__test_type=new_finding.test.test_type)), test__engagement=new_finding.test.engagement).exclude( id=new_finding.id).exclude( - duplicate=True).order_by('id') + duplicate=True).order_by("id") else: # same without "test__engagement=new_finding.test.engagement" condition existing_findings = Finding.objects.filter( @@ -486,13 +486,13 @@ def deduplicate_uid_or_hash_code(new_finding): | (Q(unique_id_from_tool__isnull=False) & Q(unique_id_from_tool=new_finding.unique_id_from_tool) & Q(test__test_type=new_finding.test.test_type)), test__engagement__product=new_finding.test.engagement.product).exclude( id=new_finding.id).exclude( - duplicate=True).order_by('id') + duplicate=True).order_by("id") deduplicationLogger.debug("Found " + str(len(existing_findings)) + " findings with either the same unique_id_from_tool or hash_code") for find in existing_findings: if is_deduplication_on_engagement_mismatch(new_finding, find): deduplicationLogger.debug( - 'deduplication_on_engagement_mismatch, skipping dedupe.') + "deduplication_on_engagement_mismatch, skipping dedupe.") continue try: if are_endpoints_duplicates(new_finding, find): @@ -507,7 +507,7 @@ def set_duplicate(new_finding, existing_finding): deduplicationLogger.debug(f"new_finding.status(): {new_finding.id} {new_finding.status()}") deduplicationLogger.debug(f"existing_finding.status(): {existing_finding.id} {existing_finding.status()}") if existing_finding.duplicate: - deduplicationLogger.debug('existing finding: %s:%s:duplicate=%s;duplicate_finding=%s', existing_finding.id, existing_finding.title, existing_finding.duplicate, existing_finding.duplicate_finding.id if existing_finding.duplicate_finding else 'None') + deduplicationLogger.debug("existing finding: %s:%s:duplicate=%s;duplicate_finding=%s", existing_finding.id, existing_finding.title, existing_finding.duplicate, existing_finding.duplicate_finding.id if existing_finding.duplicate_finding else "None") msg = "Existing finding is a duplicate" raise Exception(msg) if existing_finding.id == new_finding.id: @@ -520,7 +520,7 @@ def set_duplicate(new_finding, existing_finding): msg = "Skip this finding as we do not want to attach a new duplicate to a mitigated finding" raise Exception(msg) - deduplicationLogger.debug('Setting new finding ' + str(new_finding.id) + ' as a duplicate of existing finding ' + str(existing_finding.id)) + deduplicationLogger.debug("Setting new finding " + str(new_finding.id) + " as a duplicate of existing finding " + str(existing_finding.id)) new_finding.duplicate = True new_finding.active = False new_finding.verified = False @@ -529,13 +529,13 @@ def set_duplicate(new_finding, existing_finding): # Make sure transitive duplication is flattened # if A -> B and B is made a duplicate of C here, aferwards: # A -> C and B -> C should be true - for find in new_finding.original_finding.all().order_by('-id'): + for find in new_finding.original_finding.all().order_by("-id"): new_finding.original_finding.remove(find) set_duplicate(find, existing_finding) existing_finding.found_by.add(new_finding.test.test_type) - logger.debug('saving new finding: %d', new_finding.id) + logger.debug("saving new finding: %d", new_finding.id) super(Finding, new_finding).save() - logger.debug('saving existing finding: %d', existing_finding.id) + logger.debug("saving existing finding: %d", existing_finding.id) super(Finding, existing_finding).save() @@ -552,7 +552,7 @@ def finding_not_human_set_status(finding: Finding) -> bool: def set_duplicate_reopen(new_finding, existing_finding): - logger.debug('duplicate reopen existing finding') + logger.debug("duplicate reopen existing finding") existing_finding.mitigated = new_finding.mitigated existing_finding.is_mitigated = new_finding.is_mitigated existing_finding.active = new_finding.active @@ -564,38 +564,38 @@ def set_duplicate_reopen(new_finding, existing_finding): def count_findings(findings): product_count = {} - finding_count = {'low': 0, 'med': 0, 'high': 0, 'crit': 0} + finding_count = {"low": 0, "med": 0, "high": 0, "crit": 0} for f in findings: product = f.test.engagement.product if product in product_count: product_count[product][4] += 1 - if f.severity == 'Low': + if f.severity == "Low": product_count[product][3] += 1 - finding_count['low'] += 1 - if f.severity == 'Medium': + finding_count["low"] += 1 + if f.severity == "Medium": product_count[product][2] += 1 - finding_count['med'] += 1 - if f.severity == 'High': + finding_count["med"] += 1 + if f.severity == "High": product_count[product][1] += 1 - finding_count['high'] += 1 - if f.severity == 'Critical': + finding_count["high"] += 1 + if f.severity == "Critical": product_count[product][0] += 1 - finding_count['crit'] += 1 + finding_count["crit"] += 1 else: product_count[product] = [0, 0, 0, 0, 0] product_count[product][4] += 1 - if f.severity == 'Low': + if f.severity == "Low": product_count[product][3] += 1 - finding_count['low'] += 1 - if f.severity == 'Medium': + finding_count["low"] += 1 + if f.severity == "Medium": product_count[product][2] += 1 - finding_count['med'] += 1 - if f.severity == 'High': + finding_count["med"] += 1 + if f.severity == "High": product_count[product][1] += 1 - finding_count['high'] += 1 - if f.severity == 'Critical': + finding_count["high"] += 1 + if f.severity == "Critical": product_count[product][0] += 1 - finding_count['crit'] += 1 + finding_count["crit"] += 1 return product_count, finding_count @@ -620,60 +620,60 @@ def findings_this_period(findings, period_type, stuff, o_stuff, a_stuff): day=31, hour=23, minute=59, second=59) o_count = { - 'closed': 0, - 'zero': 0, - 'one': 0, - 'two': 0, - 'three': 0, - 'total': 0, + "closed": 0, + "zero": 0, + "one": 0, + "two": 0, + "three": 0, + "total": 0, } a_count = { - 'closed': 0, - 'zero': 0, - 'one': 0, - 'two': 0, - 'three': 0, - 'total': 0, + "closed": 0, + "zero": 0, + "one": 0, + "two": 0, + "three": 0, + "total": 0, } for f in findings: if f.mitigated is not None and end_of_period >= f.mitigated >= start_of_period: - o_count['closed'] += 1 + o_count["closed"] += 1 elif f.mitigated is not None and f.mitigated > end_of_period and f.date <= end_of_period.date(): - if f.severity == 'Critical': - o_count['zero'] += 1 - elif f.severity == 'High': - o_count['one'] += 1 - elif f.severity == 'Medium': - o_count['two'] += 1 - elif f.severity == 'Low': - o_count['three'] += 1 + if f.severity == "Critical": + o_count["zero"] += 1 + elif f.severity == "High": + o_count["one"] += 1 + elif f.severity == "Medium": + o_count["two"] += 1 + elif f.severity == "Low": + o_count["three"] += 1 elif f.mitigated is None and f.date <= end_of_period.date(): - if f.severity == 'Critical': - o_count['zero'] += 1 - a_count['zero'] += 1 - elif f.severity == 'High': - o_count['one'] += 1 - a_count['one'] += 1 - elif f.severity == 'Medium': - o_count['two'] += 1 - a_count['two'] += 1 - elif f.severity == 'Low': - o_count['three'] += 1 - a_count['three'] += 1 - - total = sum(o_count.values()) - o_count['closed'] + if f.severity == "Critical": + o_count["zero"] += 1 + a_count["zero"] += 1 + elif f.severity == "High": + o_count["one"] += 1 + a_count["one"] += 1 + elif f.severity == "Medium": + o_count["two"] += 1 + a_count["two"] += 1 + elif f.severity == "Low": + o_count["three"] += 1 + a_count["three"] += 1 + + total = sum(o_count.values()) - o_count["closed"] if period_type == 0: counts.append( start_of_period.strftime("%b %d") + " - " + end_of_period.strftime("%b %d")) else: counts.append(start_of_period.strftime("%b %Y")) - counts.append(o_count['zero']) - counts.append(o_count['one']) - counts.append(o_count['two']) - counts.append(o_count['three']) + counts.append(o_count["zero"]) + counts.append(o_count["one"]) + counts.append(o_count["two"]) + counts.append(o_count["three"]) counts.append(total) - counts.append(o_count['closed']) + counts.append(o_count["closed"]) stuff.append(counts) o_stuff.append(counts[:-1]) @@ -686,10 +686,10 @@ def findings_this_period(findings, period_type, stuff, o_stuff, a_stuff): + end_of_period.strftime("%b %d")) else: a_counts.append(start_of_period.strftime("%b %Y")) - a_counts.append(a_count['zero']) - a_counts.append(a_count['one']) - a_counts.append(a_count['two']) - a_counts.append(a_count['three']) + a_counts.append(a_count["zero"]) + a_counts.append(a_count["one"]) + a_counts.append(a_count["two"]) + a_counts.append(a_count["three"]) a_counts.append(a_total) a_stuff.append(a_counts) @@ -701,24 +701,24 @@ def add_breadcrumb(parent=None, request=None, clear=False): if clear: - request.session['dojo_breadcrumbs'] = None + request.session["dojo_breadcrumbs"] = None return else: - crumbs = request.session.get('dojo_breadcrumbs', None) + crumbs = request.session.get("dojo_breadcrumbs", None) if top_level or crumbs is None: crumbs = [ { - 'title': _('Home'), - 'url': reverse('home'), + "title": _("Home"), + "url": reverse("home"), }, ] if parent is not None and getattr(parent, "get_breadcrumbs", None): crumbs += parent.get_breadcrumbs() else: crumbs += [{ - 'title': title, - 'url': request.get_full_path() if url is None else url, + "title": title, + "url": request.get_full_path() if url is None else url, }] else: resolver = get_resolver(None).resolve @@ -726,23 +726,23 @@ def add_breadcrumb(parent=None, obj_crumbs = parent.get_breadcrumbs() if title is not None: obj_crumbs += [{ - 'title': title, - 'url': request.get_full_path() if url is None else url, + "title": title, + "url": request.get_full_path() if url is None else url, }] else: obj_crumbs = [{ - 'title': title, - 'url': request.get_full_path() if url is None else url, + "title": title, + "url": request.get_full_path() if url is None else url, }] for crumb in crumbs: - crumb_to_resolve = crumb['url'] if '?' not in crumb[ - 'url'] else crumb['url'][:crumb['url'].index('?')] + crumb_to_resolve = crumb["url"] if "?" not in crumb[ + "url"] else crumb["url"][:crumb["url"].index("?")] crumb_view = resolver(crumb_to_resolve) for obj_crumb in obj_crumbs: obj_crumb_to_resolve = obj_crumb[ - 'url'] if '?' not in obj_crumb['url'] else obj_crumb[ - 'url'][:obj_crumb['url'].index('?')] + "url"] if "?" not in obj_crumb["url"] else obj_crumb[ + "url"][:obj_crumb["url"].index("?")] obj_crumb_view = resolver(obj_crumb_to_resolve) if crumb_view.view_name == obj_crumb_view.view_name: @@ -757,7 +757,7 @@ def add_breadcrumb(parent=None, crumbs += obj_crumbs - request.session['dojo_breadcrumbs'] = crumbs + request.session["dojo_breadcrumbs"] = crumbs def is_title_in_breadcrumbs(title): @@ -765,18 +765,18 @@ def is_title_in_breadcrumbs(title): if request is None: return False - breadcrumbs = request.session.get('dojo_breadcrumbs') + breadcrumbs = request.session.get("dojo_breadcrumbs") if breadcrumbs is None: return False for breadcrumb in breadcrumbs: - if breadcrumb.get('title') == title: + if breadcrumb.get("title") == title: return True return False -def get_punchcard_data(objs, start_date, weeks, view='Finding'): +def get_punchcard_data(objs, start_date, weeks, view="Finding"): # use try catch to make sure any teething bugs in the bunchcard don't break the dashboard try: # gather findings over past half year, make sure to start on a sunday @@ -785,16 +785,16 @@ def get_punchcard_data(objs, start_date, weeks, view='Finding'): # reminder: The first week of a year is the one that contains the year’s first Thursday # so we could have for 29/12/2019: week=1 and year=2019 :-D. So using week number from db is not practical - if view == 'Finding': + if view == "Finding": severities_by_day = objs.filter(created__date__gte=first_sunday).filter(created__date__lt=last_sunday) \ - .values('created__date') \ - .annotate(count=Count('id')) \ - .order_by('created__date') - elif view == 'Endpoint': + .values("created__date") \ + .annotate(count=Count("id")) \ + .order_by("created__date") + elif view == "Endpoint": severities_by_day = objs.filter(date__gte=first_sunday).filter(date__lt=last_sunday) \ - .values('date') \ - .annotate(count=Count('id')) \ - .order_by('date') + .values("date") \ + .annotate(count=Count("id")) \ + .order_by("date") # return empty stuff if no findings to be statted if severities_by_day.count() <= 0: return None, None @@ -822,16 +822,16 @@ def get_punchcard_data(objs, start_date, weeks, view='Finding'): start_of_next_week = start_of_week + relativedelta(weeks=1) for day in severities_by_day: - if view == 'Finding': - created = day['created__date'] - elif view == 'Endpoint': - created = day['date'] - day_count = day['count'] + if view == "Finding": + created = day["created__date"] + elif view == "Endpoint": + created = day["date"] + day_count = day["count"] created = timezone.make_aware(datetime.combine(created, datetime.min.time())) if created < start_of_week: - raise ValueError('date found outside supported range: ' + str(created)) + raise ValueError("date found outside supported range: " + str(created)) else: if created >= start_of_week and created < start_of_next_week: # add day count to current week data @@ -877,7 +877,7 @@ def get_punchcard_data(objs, start_date, weeks, view='Finding'): return punchcard, ticks except Exception as e: - logger.exception('Not showing punchcard graph due to exception gathering data', e) + logger.exception("Not showing punchcard graph due to exception gathering data", e) return None, None @@ -895,16 +895,16 @@ def get_period_counts_legacy(findings, accepted_findings, period_interval, start_date, - relative_delta='months'): + relative_delta="months"): opened_in_period = [] accepted_in_period = [] opened_in_period.append( - ['Timestamp', 'Date', 'S0', 'S1', 'S2', 'S3', 'Total', 'Closed']) + ["Timestamp", "Date", "S0", "S1", "S2", "S3", "Total", "Closed"]) accepted_in_period.append( - ['Timestamp', 'Date', 'S0', 'S1', 'S2', 'S3', 'Total', 'Closed']) + ["Timestamp", "Date", "S0", "S1", "S2", "S3", "Total", "Closed"]) for x in range(-1, period_interval): - if relative_delta == 'months': + if relative_delta == "months": # make interval the first through last of month end_date = (start_date + relativedelta(months=x)) + relativedelta( day=1, months=+1, days=-1) @@ -941,13 +941,13 @@ def get_period_counts_legacy(findings, for finding in findings: if new_date <= datetime.combine(finding.date, datetime.min.time( )).replace(tzinfo=timezone.get_current_timezone()) <= end_date: - if finding.severity == 'Critical': + if finding.severity == "Critical": crit_count += 1 - elif finding.severity == 'High': + elif finding.severity == "High": high_count += 1 - elif finding.severity == 'Medium': + elif finding.severity == "Medium": med_count += 1 - elif finding.severity == 'Low': + elif finding.severity == "Low": low_count += 1 total = crit_count + high_count + med_count + low_count @@ -960,13 +960,13 @@ def get_period_counts_legacy(findings, ] if risks_a is not None: for finding in risks_a: - if finding.severity == 'Critical': + if finding.severity == "Critical": crit_count += 1 - elif finding.severity == 'High': + elif finding.severity == "High": high_count += 1 - elif finding.severity == 'Medium': + elif finding.severity == "Medium": med_count += 1 - elif finding.severity == 'Low': + elif finding.severity == "Low": low_count += 1 total = crit_count + high_count + med_count + low_count @@ -975,8 +975,8 @@ def get_period_counts_legacy(findings, crit_count, high_count, med_count, low_count, total]) return { - 'opened_per_period': opened_in_period, - 'accepted_per_period': accepted_in_period, + "opened_per_period": opened_in_period, + "accepted_per_period": accepted_in_period, } @@ -985,7 +985,7 @@ def get_period_counts(findings, accepted_findings, period_interval, start_date, - relative_delta='months'): + relative_delta="months"): tz = timezone.get_current_timezone() @@ -995,14 +995,14 @@ def get_period_counts(findings, active_in_period = [] accepted_in_period = [] opened_in_period.append( - ['Timestamp', 'Date', 'S0', 'S1', 'S2', 'S3', 'Total', 'Closed']) + ["Timestamp", "Date", "S0", "S1", "S2", "S3", "Total", "Closed"]) active_in_period.append( - ['Timestamp', 'Date', 'S0', 'S1', 'S2', 'S3', 'Total', 'Closed']) + ["Timestamp", "Date", "S0", "S1", "S2", "S3", "Total", "Closed"]) accepted_in_period.append( - ['Timestamp', 'Date', 'S0', 'S1', 'S2', 'S3', 'Total', 'Closed']) + ["Timestamp", "Date", "S0", "S1", "S2", "S3", "Total", "Closed"]) for x in range(-1, period_interval): - if relative_delta == 'months': + if relative_delta == "months": # make interval the first through last of month end_date = (start_date + relativedelta(months=x)) + relativedelta( day=1, months=+1, days=-1) @@ -1058,22 +1058,22 @@ def get_period_counts(findings, f_time = finding.date if f_time <= end_date: - if severity == 'Critical': + if severity == "Critical": if new_date <= f_time: f_crit_count += 1 if active: active_crit_count += 1 - elif severity == 'High': + elif severity == "High": if new_date <= f_time: f_high_count += 1 if active: active_high_count += 1 - elif severity == 'Medium': + elif severity == "Medium": if new_date <= f_time: f_med_count += 1 if active: active_med_count += 1 - elif severity == 'Low': + elif severity == "Low": if new_date <= f_time: f_low_count += 1 if active: @@ -1085,13 +1085,13 @@ def get_period_counts(findings, severity = finding.severity except: severity = finding.finding.severity - if severity == 'Critical': + if severity == "Critical": ra_crit_count += 1 - elif severity == 'High': + elif severity == "High": ra_high_count += 1 - elif severity == 'Medium': + elif severity == "Medium": ra_med_count += 1 - elif severity == 'Low': + elif severity == "Low": ra_low_count += 1 total = f_crit_count + f_high_count + f_med_count + f_low_count @@ -1111,9 +1111,9 @@ def get_period_counts(findings, active_crit_count, active_high_count, active_med_count, active_low_count, total]) return { - 'opened_per_period': opened_in_period, - 'accepted_per_period': accepted_in_period, - 'active_per_period': active_in_period, + "opened_per_period": opened_in_period, + "accepted_per_period": accepted_in_period, + "active_per_period": active_in_period, } @@ -1137,9 +1137,9 @@ def opened_in_period(start_date, end_date, **kwargs): out_of_scope=False, mitigated__isnull=True, severity__in=( - 'Critical', 'High', 'Medium', - 'Low')).values('numerical_severity').annotate( - Count('numerical_severity')).order_by('numerical_severity') + "Critical", "High", "Medium", + "Low")).values("numerical_severity").annotate( + Count("numerical_severity")).order_by("numerical_severity") total_opened_in_period = Finding.objects.filter( date__range=[start_date, end_date], **kwargs, @@ -1148,41 +1148,41 @@ def opened_in_period(start_date, end_date, **kwargs): duplicate=False, out_of_scope=False, mitigated__isnull=True, - severity__in=('Critical', 'High', 'Medium', 'Low')).aggregate( + severity__in=("Critical", "High", "Medium", "Low")).aggregate( total=Sum( Case( When( - severity__in=('Critical', 'High', 'Medium', 'Low'), + severity__in=("Critical", "High", "Medium", "Low"), then=Value(1)), - output_field=IntegerField())))['total'] + output_field=IntegerField())))["total"] oip = { - 'S0': + "S0": 0, - 'S1': + "S1": 0, - 'S2': + "S2": 0, - 'S3': + "S3": 0, - 'Total': + "Total": total_opened_in_period, - 'start_date': + "start_date": start_date, - 'end_date': + "end_date": end_date, - 'closed': + "closed": Finding.objects.filter( mitigated__date__range=[start_date, end_date], **kwargs, - severity__in=('Critical', 'High', 'Medium', 'Low')).aggregate( + severity__in=("Critical", "High", "Medium", "Low")).aggregate( total=Sum( Case( When( - severity__in=('Critical', 'High', 'Medium', 'Low'), + severity__in=("Critical", "High", "Medium", "Low"), then=Value(1)), - output_field=IntegerField())))['total'], - 'to_date_total': + output_field=IntegerField())))["total"], + "to_date_total": Finding.objects.filter( date__lte=end_date.date(), verified=True, @@ -1191,11 +1191,11 @@ def opened_in_period(start_date, end_date, **kwargs): out_of_scope=False, mitigated__isnull=True, **kwargs, - severity__in=('Critical', 'High', 'Medium', 'Low')).count(), + severity__in=("Critical", "High", "Medium", "Low")).count(), } for o in opened_in_period: - oip[o['numerical_severity']] = o['numerical_severity__count'] + oip[o["numerical_severity"]] = o["numerical_severity__count"] return oip @@ -1218,14 +1218,14 @@ def __iter__(self): def get_cal_event(start_date, end_date, summary, description, uid): cal = vobject.iCalendar() - cal.add('vevent') - cal.vevent.add('summary').value = summary - cal.vevent.add('description').value = description - start = cal.vevent.add('dtstart') + cal.add("vevent") + cal.vevent.add("summary").value = summary + cal.vevent.add("description").value = description + start = cal.vevent.add("dtstart") start.value = start_date - end = cal.vevent.add('dtend') + end = cal.vevent.add("dtend") end.value = end_date - cal.vevent.add('uid').value = uid + cal.vevent.add("uid").value = uid return cal @@ -1238,9 +1238,9 @@ def named_month(month_number): def normalize_query(query_string, findterms=re.compile(r'"([^"]+)"|(\S+)').findall, - normspace=re.compile(r'\s{2,}').sub): + normspace=re.compile(r"\s{2,}").sub): return [ - normspace(' ', (t[0] or t[1]).strip()) for t in findterms(query_string) + normspace(" ", (t[0] or t[1]).strip()) for t in findterms(query_string) ] @@ -1271,8 +1271,8 @@ def build_query(query_string, search_fields): def template_search_helper(fields=None, query_string=None): if not fields: fields = [ - 'title', - 'description', + "title", + "description", ] findings = Finding_Template.objects.all() @@ -1285,13 +1285,13 @@ def template_search_helper(fields=None, query_string=None): return found_entries -def get_page_items(request, items, page_size, prefix=''): +def get_page_items(request, items, page_size, prefix=""): return get_page_items_and_count(request, items, page_size, prefix=prefix, do_count=False) -def get_page_items_and_count(request, items, page_size, prefix='', do_count=True): - page_param = prefix + 'page' - page_size_param = prefix + 'page_size' +def get_page_items_and_count(request, items, page_size, prefix="", do_count=True): + page_param = prefix + "page" + page_size_param = prefix + "page_size" page = request.GET.get(page_param, 1) size = request.GET.get(page_size_param, page_size) @@ -1311,24 +1311,24 @@ def get_page_items_and_count(request, items, page_size, prefix='', do_count=True def handle_uploaded_threat(f, eng): _name, extension = os.path.splitext(f.name) # Check if threat folder exist. - if not os.path.isdir(settings.MEDIA_ROOT + '/threat/'): + if not os.path.isdir(settings.MEDIA_ROOT + "/threat/"): # Create the folder - os.mkdir(settings.MEDIA_ROOT + '/threat/') - with open(settings.MEDIA_ROOT + f'/threat/{eng.id}{extension}', - 'wb+') as destination: + os.mkdir(settings.MEDIA_ROOT + "/threat/") + with open(settings.MEDIA_ROOT + f"/threat/{eng.id}{extension}", + "wb+") as destination: for chunk in f.chunks(): destination.write(chunk) - eng.tmodel_path = settings.MEDIA_ROOT + f'/threat/{eng.id}{extension}' + eng.tmodel_path = settings.MEDIA_ROOT + f"/threat/{eng.id}{extension}" eng.save() def handle_uploaded_selenium(f, cred): _name, extension = os.path.splitext(f.name) - with open(settings.MEDIA_ROOT + f'/selenium/{cred.id}{extension}', - 'wb+') as destination: + with open(settings.MEDIA_ROOT + f"/selenium/{cred.id}{extension}", + "wb+") as destination: for chunk in f.chunks(): destination.write(chunk) - cred.selenium_script = settings.MEDIA_ROOT + f'/selenium/{cred.id}{extension}' + cred.selenium_script = settings.MEDIA_ROOT + f"/selenium/{cred.id}{extension}" cred.save() @@ -1339,9 +1339,9 @@ def handle_uploaded_selenium(f, cred): def add_external_issue(find, external_issue_provider, **kwargs): eng = Engagement.objects.get(test=find.test) prod = Product.objects.get(engagement=eng) - logger.debug('adding external issue with provider: ' + external_issue_provider) + logger.debug("adding external issue with provider: " + external_issue_provider) - if external_issue_provider == 'github': + if external_issue_provider == "github": add_external_issue_github(find, prod, eng) @@ -1353,7 +1353,7 @@ def update_external_issue(find, old_status, external_issue_provider, **kwargs): prod = Product.objects.get(engagement=Engagement.objects.get(test=find.test)) eng = Engagement.objects.get(test=find.test) - if external_issue_provider == 'github': + if external_issue_provider == "github": update_external_issue_github(find, prod, eng) @@ -1365,7 +1365,7 @@ def close_external_issue(find, note, external_issue_provider, **kwargs): prod = Product.objects.get(engagement=Engagement.objects.get(test=find.test)) eng = Engagement.objects.get(test=find.test) - if external_issue_provider == 'github': + if external_issue_provider == "github": close_external_issue_github(find, note, prod, eng) @@ -1377,12 +1377,12 @@ def reopen_external_issue(find, note, external_issue_provider, **kwargs): prod = Product.objects.get(engagement=Engagement.objects.get(test=find.test)) eng = Engagement.objects.get(test=find.test) - if external_issue_provider == 'github': + if external_issue_provider == "github": reopen_external_issue_github(find, note, prod, eng) def process_tag_notifications(request, note, parent_url, parent_title): - regex = re.compile(r'(?:\A|\s)@(\w+)\b') + regex = re.compile(r"(?:\A|\s)@(\w+)\b") usernames_to_check = set(un.lower() for un in regex.findall(note.entry)) # noqa: C401 @@ -1397,12 +1397,12 @@ def process_tag_notifications(request, note, parent_url, parent_title): note.entry += "..." create_notification( - event='user_mentioned', + event="user_mentioned", section=parent_title, note=note, - title=f'{request.user} jotted a note', + title=f"{request.user} jotted a note", url=parent_url, - icon='commenting', + icon="commenting", recipients=users_to_notify) @@ -1431,12 +1431,12 @@ def decrypt(key, iv, encrypted_text): def _pad_string(value): length = len(value) pad_size = 16 - (length % 16) - return value.ljust(length + pad_size, b'\x00') + return value.ljust(length + pad_size, b"\x00") def _unpad_string(value): if value and value is not None: - value = value.rstrip(b'\x00') + value = value.rstrip(b"\x00") return value @@ -1448,7 +1448,7 @@ def dojo_crypto_encrypt(plaintext): iv = os.urandom(16) data = prepare_for_save( - iv, encrypt(key, iv, plaintext.encode('utf-8'))) + iv, encrypt(key, iv, plaintext.encode("utf-8"))) return data @@ -1458,16 +1458,16 @@ def prepare_for_save(iv, encrypted_value): if encrypted_value and encrypted_value is not None: binascii.b2a_hex(encrypted_value).rstrip() - stored_value = "AES.1:" + binascii.b2a_hex(iv).decode('utf-8') + ":" + encrypted_value.decode('utf-8') + stored_value = "AES.1:" + binascii.b2a_hex(iv).decode("utf-8") + ":" + encrypted_value.decode("utf-8") return stored_value def get_db_key(): db_key = None - if hasattr(settings, 'DB_KEY'): + if hasattr(settings, "DB_KEY"): db_key = settings.DB_KEY db_key = binascii.b2a_hex( - hashlib.sha256(db_key.encode('utf-8')).digest().rstrip())[:32] + hashlib.sha256(db_key.encode("utf-8")).digest().rstrip())[:32] return db_key @@ -1486,7 +1486,7 @@ def prepare_for_view(encrypted_value): try: decrypted_value = decrypt(key, iv, value) - decrypted_value = decrypted_value.decode('utf-8') + decrypted_value = decrypted_value.decode("utf-8") except UnicodeDecodeError: decrypted_value = "" @@ -1509,33 +1509,33 @@ def get_setting(setting): def calculate_grade(product, *args, **kwargs): system_settings = System_Settings.objects.get() if not product: - logger.warning('ignoring calculate product for product None!') + logger.warning("ignoring calculate product for product None!") return if system_settings.enable_product_grade: - logger.debug('calculating product grade for %s:%s', product.id, product.name) + logger.debug("calculating product grade for %s:%s", product.id, product.name) severity_values = Finding.objects.filter( - ~Q(severity='Info'), + ~Q(severity="Info"), active=True, duplicate=False, verified=True, false_p=False, - test__engagement__product=product).values('severity').annotate( - Count('numerical_severity')).order_by() + test__engagement__product=product).values("severity").annotate( + Count("numerical_severity")).order_by() low = 0 medium = 0 high = 0 critical = 0 for severity_count in severity_values: - if severity_count['severity'] == "Critical": - critical = severity_count['numerical_severity__count'] - elif severity_count['severity'] == "High": - high = severity_count['numerical_severity__count'] - elif severity_count['severity'] == "Medium": - medium = severity_count['numerical_severity__count'] - elif severity_count['severity'] == "Low": - low = severity_count['numerical_severity__count'] + if severity_count["severity"] == "Critical": + critical = severity_count["numerical_severity__count"] + elif severity_count["severity"] == "High": + high = severity_count["numerical_severity__count"] + elif severity_count["severity"] == "Medium": + medium = severity_count["numerical_severity__count"] + elif severity_count["severity"] == "Low": + low = severity_count["numerical_severity__count"] aeval = Interpreter() aeval(system_settings.product_grade) grade_product = f"grade_product({critical}, {high}, {medium}, {low})" @@ -1606,9 +1606,9 @@ def __init__(self, product, title=None, tab=None): status_endpoint__risk_accepted=False, ) self.endpoints_count = active_endpoints.distinct().count() - self.endpoint_hosts_count = active_endpoints.values('host').distinct().count() + self.endpoint_hosts_count = active_endpoints.values("host").distinct().count() self.benchmark_type = Benchmark_Type.objects.filter( - enabled=True).order_by('name') + enabled=True).order_by("name") self.engagement = None def setTab(self, tab): @@ -1662,7 +1662,7 @@ def tab_view_count(product_id): endpoints = Endpoint.objects.filter(product=product).count() # benchmarks = Benchmark_Product_Summary.objects.filter(product=product, publish=True, benchmark_type__enabled=True).order_by('benchmark_type__name') benchmark_type = Benchmark_Type.objects.filter( - enabled=True).order_by('name') + enabled=True).order_by("name") return product, engagements, open_findings, endpoints, benchmark_type @@ -1707,7 +1707,7 @@ def apply_cwe_to_template(finding, override=False): def truncate_with_dots(the_string, max_length_including_dots): if not the_string: return the_string - return (the_string[:max_length_including_dots - 3] + '...' if len(the_string) > max_length_including_dots else the_string) + return (the_string[:max_length_including_dots - 3] + "..." if len(the_string) > max_length_including_dots else the_string) def max_safe(list): @@ -1722,7 +1722,7 @@ def get_site_url(): if settings.SITE_URL: return settings.SITE_URL else: - logger.warning('SITE URL undefined in settings, full_url cannot be created') + logger.warning("SITE URL undefined in settings, full_url cannot be created") return "settings.SITE_URL" @@ -1739,10 +1739,10 @@ def user_post_save(sender, instance, created, **kwargs): notifications.pk = None notifications.template = False notifications.user = instance - logger.info('creating default set (from template) of notifications for: ' + str(instance)) + logger.info("creating default set (from template) of notifications for: " + str(instance)) except Exception: notifications = Notifications(user=instance) - logger.info('creating default set of notifications for: ' + str(instance)) + logger.info("creating default set of notifications for: " + str(instance)) notifications.save() @@ -1750,7 +1750,7 @@ def user_post_save(sender, instance, created, **kwargs): if system_settings.default_group and system_settings.default_group_role: if (system_settings.default_group_email_pattern and re.fullmatch(system_settings.default_group_email_pattern, instance.email)) or \ not system_settings.default_group_email_pattern: - logger.info('setting default group for: ' + str(instance)) + logger.info("setting default group for: " + str(instance)) dojo_group_member = Dojo_Group_Member( group=system_settings.default_group, user=instance, @@ -1775,11 +1775,11 @@ def is_safe_url(url): def get_return_url(request): - return_url = request.POST.get('return_url', None) + return_url = request.POST.get("return_url", None) # print('return_url from POST: ', return_url) if return_url is None or not return_url.strip(): # for some reason using request.GET.get('return_url') never works - return_url = request.GET['return_url'] if 'return_url' in request.GET else None + return_url = request.GET["return_url"] if "return_url" in request.GET else None # print('return_url from GET: ', return_url) return return_url if return_url else None @@ -1794,7 +1794,7 @@ def redirect_to_return_url_or_else(request, or_else): elif or_else: return redirect(request, or_else) else: - messages.add_message(request, messages.ERROR, 'Unable to redirect anywhere.', extra_tags='alert-danger') + messages.add_message(request, messages.ERROR, "Unable to redirect anywhere.", extra_tags="alert-danger") return redirect(request, request.get_full_path()) @@ -1802,7 +1802,7 @@ def redirect(request, redirect_to): """Only allow redirects to allowed_hosts to prevent open redirects""" if is_safe_url(redirect_to): return HttpResponseRedirect(redirect_to) - msg = 'invalid redirect, host and scheme not in allowed_hosts' + msg = "invalid redirect, host and scheme not in allowed_hosts" raise ValueError(msg) @@ -1870,15 +1870,15 @@ def _add_notification(finding, kind): def _notification_title_for_finding(finding, kind, sla_age): title = f"Finding {finding.id} - " - if kind == 'breached': + if kind == "breached": abs_sla_age = abs(sla_age) period = "day" if abs_sla_age > 1: period = "days" title += "SLA breached by %d %s! Overdue notice" % (abs_sla_age, period) - elif kind == 'prebreach': + elif kind == "prebreach": title += "SLA pre-breach warning - %d day(s) left" % (sla_age) - elif kind == 'breaching': + elif kind == "breaching": title += "SLA is breaching today" return title @@ -1898,10 +1898,10 @@ def _create_notifications(): title = _notification_title_for_finding(n.finding, kind, n.finding.sla_days_remaining()) create_notification( - event='sla_breach', + event="sla_breach", title=title, finding=n.finding, - url=reverse('view_finding', args=(n.finding.id,)), + url=reverse("view_finding", args=(n.finding.id,)), ) if n.do_jira_sla_comment: @@ -1914,7 +1914,7 @@ def _create_notifications(): title_combined = f"SLA alert ({kind}): product type '{pt}', product '{p}'" product = combined_notifications[pt][p][kind][0].finding.test.engagement.product create_notification( - event='sla_breach_combined', + event="sla_breach_combined", title=title_combined, product=product, findings=findings_list, @@ -1962,7 +1962,7 @@ def _create_notifications(): # A finding with 'Info' severity will not be considered for SLA notifications (not in model) findings = Finding.objects \ .filter(query) \ - .exclude(severity='Info') \ + .exclude(severity="Info") \ .exclude(id__in=no_jira_findings) for finding in findings: @@ -2011,19 +2011,19 @@ def _create_notifications(): logger.info(f"Finding {finding.id} has breached by {abs(sla_age)} days.") abs_sla_age = abs(sla_age) if not system_settings.enable_notify_sla_exponential_backoff or abs_sla_age == 1 or (abs_sla_age & (abs_sla_age - 1) == 0): - _add_notification(finding, 'breached') + _add_notification(finding, "breached") else: logger.info("Skipping notification as exponential backoff is enabled and the SLA is not a power of two") # The finding is within the pre-breach period elif (sla_age > 0) and (sla_age <= settings.SLA_NOTIFY_PRE_BREACH): pre_breach_count += 1 logger.info(f"Security SLA pre-breach warning for finding ID {finding.id}. Days remaining: {sla_age}") - _add_notification(finding, 'prebreach') + _add_notification(finding, "prebreach") # The finding breaches the SLA today elif (sla_age == 0): at_breach_count += 1 logger.info(f"Security SLA breach warning. Finding ID {finding.id} breaching today ({sla_age})") - _add_notification(finding, 'breaching') + _add_notification(finding, "breaching") _create_notifications() logger.info(f"SLA run results: Pre-breach: {pre_breach_count}, at-breach: {at_breach_count}, post-breach: {post_breach_count}, post-breach-no-notify: {post_breach_no_notify_count}, with-jira: {jira_count}, TOTAL: {total_count}") @@ -2033,7 +2033,7 @@ def _create_notifications(): def get_words_for_field(model, fieldname): - max_results = getattr(settings, 'MAX_AUTOCOMPLETE_WORDS', 20000) + max_results = getattr(settings, "MAX_AUTOCOMPLETE_WORDS", 20000) models = None if model == Finding: models = get_authorized_findings(Permissions.Finding_View, user=get_current_user()) @@ -2042,7 +2042,7 @@ def get_words_for_field(model, fieldname): if models is not None: words = [ - word for field_value in models.order_by().filter(**{f'{fieldname}__isnull': False}).values_list(fieldname, flat=True).distinct()[:max_results] for word in (field_value.split() if field_value else []) if len(word) > 2 + word for field_value in models.order_by().filter(**{f"{fieldname}__isnull": False}).values_list(fieldname, flat=True).distinct()[:max_results] for word in (field_value.split() if field_value else []) if len(word) > 2 ] else: words = [] @@ -2065,8 +2065,8 @@ def create_bleached_link(url, title): link += title link += '">' link += title - link += '' - return bleach.clean(link, tags={'a'}, attributes={'a': ['href', 'target', 'title']}) + link += "" + return bleach.clean(link, tags={"a"}, attributes={"a": ["href", "target", "title"]}) def get_object_or_none(klass, *args, **kwargs): @@ -2080,10 +2080,10 @@ def get_object_or_none(klass, *args, **kwargs): """ queryset = klass - if hasattr(klass, '_default_manager'): + if hasattr(klass, "_default_manager"): queryset = klass._default_manager.all() - if not hasattr(queryset, 'get'): + if not hasattr(queryset, "get"): klass__name = klass.__name__ if isinstance(klass, type) else klass.__class__.__name__ msg = ( "First argument to get_object_or_None() must be a Model, Manager, " @@ -2107,10 +2107,10 @@ def get_last_object_or_none(klass, *args, **kwargs): """ queryset = klass - if hasattr(klass, '_default_manager'): + if hasattr(klass, "_default_manager"): queryset = klass._default_manager.all() - if not hasattr(queryset, 'get'): + if not hasattr(queryset, "get"): klass__name = klass.__name__ if isinstance(klass, type) else klass.__class__.__name__ msg = ( "First argument to get_last_object_or_None() must be a Model, Manager, " @@ -2118,8 +2118,8 @@ def get_last_object_or_none(klass, *args, **kwargs): ) raise ValueError(msg) try: - results = queryset.filter(*args, **kwargs).order_by('id') - logger.debug('last_object_or_none: %s', results.query) + results = queryset.filter(*args, **kwargs).order_by("id") + logger.debug("last_object_or_none: %s", results.query) return results.last() except queryset.model.DoesNotExist: return None @@ -2130,7 +2130,7 @@ def add_success_message_to_response(message): messages.add_message(get_current_request(), messages.SUCCESS, message, - extra_tags='alert-success') + extra_tags="alert-success") def add_error_message_to_response(message): @@ -2138,7 +2138,7 @@ def add_error_message_to_response(message): messages.add_message(get_current_request(), messages.ERROR, message, - extra_tags='alert-danger') + extra_tags="alert-danger") def add_field_errors_to_response(form): @@ -2147,7 +2147,7 @@ def add_field_errors_to_response(form): add_error_message_to_response(error) -def mass_model_updater(model_type, models, function, fields, page_size=1000, order='asc', log_prefix=''): +def mass_model_updater(model_type, models, function, fields, page_size=1000, order="asc", log_prefix=""): """ Using the default for model in queryset can be slow for large querysets. Even when using paging as LIMIT and OFFSET are slow on database. In some cases we can optimize this process very well if we can process the models ordered by id. @@ -2159,30 +2159,30 @@ def mass_model_updater(model_type, models, function, fields, page_size=1000, ord # force ordering by id to make our paging work last_id = None models = models.order_by() - if order == 'asc': - logger.debug('ordering ascending') - models = models.order_by('id') + if order == "asc": + logger.debug("ordering ascending") + models = models.order_by("id") last_id = 0 - elif order == 'desc': - logger.debug('ordering descending') - models = models.order_by('-id') + elif order == "desc": + logger.debug("ordering descending") + models = models.order_by("-id") # get maximum, which is the first due to descending order last_id = models.first().id + 1 else: - msg = 'order must be ''asc'' or ''desc''' + msg = "order must be ""asc"" or ""desc""" raise ValueError(msg) # use filter to make count fast on mysql total_count = models.filter(id__gt=0).count() - logger.debug('%s found %d models for mass update:', log_prefix, total_count) + logger.debug("%s found %d models for mass update:", log_prefix, total_count) i = 0 batch = [] total_pages = (total_count // page_size) + 2 # logger.info('pages to process: %d', total_pages) - logger.debug('%s%s out of %s models processed ...', log_prefix, i, total_count) + logger.debug("%s%s out of %s models processed ...", log_prefix, i, total_count) for p in range(1, total_pages): # logger.info('page: %d', p) - if order == 'asc': + if order == "asc": page = models.filter(id__gt=last_id)[:page_size] else: page = models.filter(id__lt=last_id)[:page_size] @@ -2203,23 +2203,23 @@ def mass_model_updater(model_type, models, function, fields, page_size=1000, ord if fields: model_type.objects.bulk_update(batch, fields) batch = [] - logger.debug('%s%s out of %s models processed ...', log_prefix, i, total_count) + logger.debug("%s%s out of %s models processed ...", log_prefix, i, total_count) - logger.info('%s%s out of %s models processed ...', log_prefix, i, total_count) + logger.info("%s%s out of %s models processed ...", log_prefix, i, total_count) if fields: model_type.objects.bulk_update(batch, fields) batch = [] - logger.info('%s%s out of %s models processed ...', log_prefix, i, total_count) + logger.info("%s%s out of %s models processed ...", log_prefix, i, total_count) def to_str_typed(obj): """ for code that handles multiple types of objects, print not only __str__ but prefix the type of the object""" - return f'{type(obj)}: {obj}' + return f"{type(obj)}: {obj}" def get_product(obj): - logger.debug('getting product for %s:%s', type(obj), obj) + logger.debug("getting product for %s:%s", type(obj), obj) if not obj: return None @@ -2238,7 +2238,7 @@ def get_product(obj): def prod_name(obj): if not obj: - return 'Unknown' + return "Unknown" return get_product(obj).name @@ -2246,7 +2246,7 @@ def prod_name(obj): # Returns image locations by default (i.e. uploaded_files/09577eb1-6ccb-430b-bc82-0742d4c97a09.png) # if return_objects=True, return the FileUPload object instead of just the file location def get_file_images(obj, return_objects=False): - logger.debug('getting images for %s:%s', type(obj), obj) + logger.debug("getting images for %s:%s", type(obj), obj) files = None if not obj: return files @@ -2256,7 +2256,7 @@ def get_file_images(obj, return_objects=False): for file in files: file_name = file.file.name file_type = mimetypes.guess_type(file_name)[0] - if file_type and 'image' in file_type: + if file_type and "image" in file_type: if return_objects: images.append(file) else: @@ -2266,9 +2266,9 @@ def get_file_images(obj, return_objects=False): def get_enabled_notifications_list(): # Alerts need to enabled by default - enabled = ['alert'] + enabled = ["alert"] for choice in NOTIFICATION_CHOICES: - if get_system_setting(f'enable_{choice[0]}_notifications'): + if get_system_setting(f"enable_{choice[0]}_notifications"): enabled.append(choice[0]) return enabled @@ -2281,21 +2281,21 @@ def is_finding_groups_enabled(): class async_delete: def __init__(self, *args, **kwargs): self.mapping = { - 'Product_Type': [ - (Endpoint, 'product__prod_type'), - (Finding, 'test__engagement__product__prod_type'), - (Test, 'engagement__product__prod_type'), - (Engagement, 'product__prod_type'), - (Product, 'prod_type')], - 'Product': [ - (Endpoint, 'product'), - (Finding, 'test__engagement__product'), - (Test, 'engagement__product'), - (Engagement, 'product')], - 'Engagement': [ - (Finding, 'test__engagement'), - (Test, 'engagement')], - 'Test': [(Finding, 'test')], + "Product_Type": [ + (Endpoint, "product__prod_type"), + (Finding, "test__engagement__product__prod_type"), + (Test, "engagement__product__prod_type"), + (Engagement, "product__prod_type"), + (Product, "prod_type")], + "Product": [ + (Endpoint, "product"), + (Finding, "test__engagement__product"), + (Test, "engagement__product"), + (Engagement, "product")], + "Engagement": [ + (Finding, "test__engagement"), + (Test, "engagement")], + "Test": [(Finding, "test")], } @dojo_async_task @@ -2305,49 +2305,49 @@ def delete_chunk(self, objects, **kwargs): try: object.delete() except AssertionError: - logger.debug('ASYNC_DELETE: object has already been deleted elsewhere. Skipping') + logger.debug("ASYNC_DELETE: object has already been deleted elsewhere. Skipping") # The id must be None # The object has already been deleted elsewhere @dojo_async_task @app.task def delete(self, object, **kwargs): - logger.debug('ASYNC_DELETE: Deleting ' + self.get_object_name(object) + ': ' + str(object)) + logger.debug("ASYNC_DELETE: Deleting " + self.get_object_name(object) + ": " + str(object)) model_list = self.mapping.get(self.get_object_name(object), None) if model_list: # The object to be deleted was found in the object list self.crawl(object, model_list) else: # The object is not supported in async delete, delete normally - logger.debug('ASYNC_DELETE: ' + self.get_object_name(object) + ' async delete not supported. Deleteing normally: ' + str(object)) + logger.debug("ASYNC_DELETE: " + self.get_object_name(object) + " async delete not supported. Deleteing normally: " + str(object)) object.delete() @dojo_async_task @app.task def crawl(self, object, model_list, **kwargs): - logger.debug('ASYNC_DELETE: Crawling ' + self.get_object_name(object) + ': ' + str(object)) + logger.debug("ASYNC_DELETE: Crawling " + self.get_object_name(object) + ": " + str(object)) for model_info in model_list: model = model_info[0] model_query = model_info[1] filter_dict = {model_query: object} objects_to_delete = model.objects.filter(**filter_dict) - logger.debug('ASYNC_DELETE: Deleting ' + str(len(objects_to_delete)) + ' ' + self.get_object_name(model) + 's in chunks') + logger.debug("ASYNC_DELETE: Deleting " + str(len(objects_to_delete)) + " " + self.get_object_name(model) + "s in chunks") chunks = self.chunk_list(model, objects_to_delete) for chunk in chunks: - print('deleting', len(chunk), self.get_object_name(model)) + print("deleting", len(chunk), self.get_object_name(model)) self.delete_chunk(chunk) self.delete_chunk([object]) - logger.debug('ASYNC_DELETE: Successfully deleted ' + self.get_object_name(object) + ': ' + str(object)) + logger.debug("ASYNC_DELETE: Successfully deleted " + self.get_object_name(object) + ": " + str(object)) def chunk_list(self, model, list): chunk_size = get_setting("ASYNC_OBEJECT_DELETE_CHUNK_SIZE") # Break the list of objects into "chunk_size" lists chunk_list = [list[i:i + chunk_size] for i in range(0, len(list), chunk_size)] - logger.debug('ASYNC_DELETE: Split ' + self.get_object_name(model) + ' into ' + str(len(chunk_list)) + ' chunks of ' + str(chunk_size)) + logger.debug("ASYNC_DELETE: Split " + self.get_object_name(model) + " into " + str(len(chunk_list)) + " chunks of " + str(chunk_size)) return chunk_list def get_object_name(self, object): - if object.__class__.__name__ == 'ModelBase': + if object.__class__.__name__ == "ModelBase": return object.__name__ return object.__class__.__name__ @@ -2357,94 +2357,94 @@ def log_user_login(sender, request, user, **kwargs): # to cover more complex cases: # http://stackoverflow.com/questions/4581789/how-do-i-get-user-ip-address-in-django - logger.info('login user: {user} via ip: {ip}'.format( + logger.info("login user: {user} via ip: {ip}".format( user=user.username, - ip=request.META.get('REMOTE_ADDR'), + ip=request.META.get("REMOTE_ADDR"), )) @receiver(user_logged_out) def log_user_logout(sender, request, user, **kwargs): - logger.info('logout user: {user} via ip: {ip}'.format( + logger.info("logout user: {user} via ip: {ip}".format( user=user.username, - ip=request.META.get('REMOTE_ADDR'), + ip=request.META.get("REMOTE_ADDR"), )) @receiver(user_login_failed) def log_user_login_failed(sender, credentials, request, **kwargs): - if 'username' in credentials: - logger.warning('login failed for: {credentials} via ip: {ip}'.format( - credentials=credentials['username'], - ip=request.META['REMOTE_ADDR'], + if "username" in credentials: + logger.warning("login failed for: {credentials} via ip: {ip}".format( + credentials=credentials["username"], + ip=request.META["REMOTE_ADDR"], )) else: - logger.error('login failed because of missing username via ip: {ip}'.format( - ip=request.META['REMOTE_ADDR'], + logger.error("login failed because of missing username via ip: {ip}".format( + ip=request.META["REMOTE_ADDR"], )) def get_password_requirements_string(): - s = 'Password must contain {minimum_length} to {maximum_length} characters'.format( - minimum_length=int(get_system_setting('minimum_password_length')), - maximum_length=int(get_system_setting('maximum_password_length'))) - - if bool(get_system_setting('lowercase_character_required')): - s += ', one lowercase letter (a-z)' - if bool(get_system_setting('uppercase_character_required')): - s += ', one uppercase letter (A-Z)' - if bool(get_system_setting('number_character_required')): - s += ', one number (0-9)' - if bool(get_system_setting('special_character_required')): + s = "Password must contain {minimum_length} to {maximum_length} characters".format( + minimum_length=int(get_system_setting("minimum_password_length")), + maximum_length=int(get_system_setting("maximum_password_length"))) + + if bool(get_system_setting("lowercase_character_required")): + s += ", one lowercase letter (a-z)" + if bool(get_system_setting("uppercase_character_required")): + s += ", one uppercase letter (A-Z)" + if bool(get_system_setting("number_character_required")): + s += ", one number (0-9)" + if bool(get_system_setting("special_character_required")): s += ', one special character (()[]{}|\\`~!@#$%^&*_-+=;:\'",<>./?)' - if s.count(', ') == 1: - password_requirements_string = s.rsplit(', ', 1)[0] + ' and ' + s.rsplit(', ', 1)[1] - elif s.count(', ') > 1: - password_requirements_string = s.rsplit(', ', 1)[0] + ', and ' + s.rsplit(', ', 1)[1] + if s.count(", ") == 1: + password_requirements_string = s.rsplit(", ", 1)[0] + " and " + s.rsplit(", ", 1)[1] + elif s.count(", ") > 1: + password_requirements_string = s.rsplit(", ", 1)[0] + ", and " + s.rsplit(", ", 1)[1] else: password_requirements_string = s - return password_requirements_string + '.' + return password_requirements_string + "." def get_zero_severity_level(): - return {'Critical': 0, 'High': 0, 'Medium': 0, 'Low': 0, 'Info': 0} + return {"Critical": 0, "High": 0, "Medium": 0, "Low": 0, "Info": 0} def sum_by_severity_level(metrics): values = get_zero_severity_level() for m in metrics: - if values.get(m.get('severity')) is not None: - values[m.get('severity')] += 1 + if values.get(m.get("severity")) is not None: + values[m.get("severity")] += 1 return values def calculate_finding_age(f): - start_date = f.get('date', None) + start_date = f.get("date", None) if start_date and isinstance(start_date, str): start_date = parse(start_date).date() if settings.SLA_BUSINESS_DAYS: - if f.get('mitigated'): - mitigated_date = f.get('mitigated') + if f.get("mitigated"): + mitigated_date = f.get("mitigated") if isinstance(mitigated_date, datetime): - mitigated_date = f.get('mitigated').date() - days = get_work_days(f.get('date'), mitigated_date) + mitigated_date = f.get("mitigated").date() + days = get_work_days(f.get("date"), mitigated_date) else: - days = get_work_days(f.get('date'), timezone.now().date()) + days = get_work_days(f.get("date"), timezone.now().date()) else: if isinstance(start_date, datetime): start_date = start_date.date() - if f.get('mitigated'): - mitigated_date = f.get('mitigated') + if f.get("mitigated"): + mitigated_date = f.get("mitigated") if isinstance(mitigated_date, datetime): - mitigated_date = f.get('mitigated').date() + mitigated_date = f.get("mitigated").date() diff = mitigated_date - start_date else: diff = timezone.now().date() - start_date @@ -2468,28 +2468,28 @@ def get_open_findings_burndown(product): # count all findings older than 90 days that are still active OR will be mitigated/risk-accepted in the next 90 days for f in list(findings.filter(date__lt=start_date)): if f.active: - if f.severity == 'Critical': + if f.severity == "Critical": critical_count += 1 - if f.severity == 'High': + if f.severity == "High": high_count += 1 - if f.severity == 'Medium': + if f.severity == "Medium": medium_count += 1 - if f.severity == 'Low': + if f.severity == "Low": low_count += 1 - if f.severity == 'Info': + if f.severity == "Info": info_count += 1 elif f.is_mitigated: f_mitigated_date = f.mitigated.timestamp() if f_mitigated_date >= start_date.timestamp(): - if f.severity == 'Critical': + if f.severity == "Critical": critical_count += 1 - if f.severity == 'High': + if f.severity == "High": high_count += 1 - if f.severity == 'Medium': + if f.severity == "Medium": medium_count += 1 - if f.severity == 'Low': + if f.severity == "Low": low_count += 1 - if f.severity == 'Info': + if f.severity == "Info": info_count += 1 elif f.risk_accepted: # simple risk acceptance does not have a risk acceptance object, so we fall back to creation date. @@ -2497,24 +2497,24 @@ def get_open_findings_burndown(product): if f.risk_acceptance: f_risk_accepted_date = f.risk_acceptance.created.timestamp() if f_risk_accepted_date >= start_date.timestamp(): - if f.severity == 'Critical': + if f.severity == "Critical": critical_count += 1 - if f.severity == 'High': + if f.severity == "High": high_count += 1 - if f.severity == 'Medium': + if f.severity == "Medium": medium_count += 1 - if f.severity == 'Low': + if f.severity == "Low": low_count += 1 - if f.severity == 'Info': + if f.severity == "Info": info_count += 1 - running_min, running_max = float('inf'), float('-inf') + running_min, running_max = float("inf"), float("-inf") past_90_days = { - 'Critical': [], - 'High': [], - 'Medium': [], - 'Low': [], - 'Info': [], + "Critical": [], + "High": [], + "Medium": [], + "Low": [], + "Info": [], } # count the number of open findings for the 90-day window @@ -2528,30 +2528,30 @@ def get_open_findings_burndown(product): # If a finding was opened on this day we add it to the counter of that day f_open_date = datetime.combine(f.date, datetime.min.time()).timestamp() if f_open_date >= d_start and f_open_date < d_end: - if f.severity == 'Critical': + if f.severity == "Critical": critical_count += 1 - if f.severity == 'High': + if f.severity == "High": high_count += 1 - if f.severity == 'Medium': + if f.severity == "Medium": medium_count += 1 - if f.severity == 'Low': + if f.severity == "Low": low_count += 1 - if f.severity == 'Info': + if f.severity == "Info": info_count += 1 # If a finding was mitigated on this day we subtract it if f.is_mitigated: f_mitigated_date = f.mitigated.timestamp() if f_mitigated_date >= d_start and f_mitigated_date < d_end: - if f.severity == 'Critical': + if f.severity == "Critical": critical_count -= 1 - if f.severity == 'High': + if f.severity == "High": high_count -= 1 - if f.severity == 'Medium': + if f.severity == "Medium": medium_count -= 1 - if f.severity == 'Low': + if f.severity == "Low": low_count -= 1 - if f.severity == 'Info': + if f.severity == "Info": info_count -= 1 # If a finding was risk accepted on this day we subtract it @@ -2560,15 +2560,15 @@ def get_open_findings_burndown(product): if f.risk_acceptance: f_risk_accepted_date = f.risk_acceptance.created.timestamp() if f_risk_accepted_date >= d_start and f_risk_accepted_date < d_end: - if f.severity == 'Critical': + if f.severity == "Critical": critical_count -= 1 - if f.severity == 'High': + if f.severity == "High": high_count -= 1 - if f.severity == 'Medium': + if f.severity == "Medium": medium_count -= 1 - if f.severity == 'Low': + if f.severity == "Low": low_count -= 1 - if f.severity == 'Info': + if f.severity == "Info": info_count -= 1 f_day = [critical_count, high_count, medium_count, low_count, info_count] @@ -2577,13 +2577,13 @@ def get_open_findings_burndown(product): if max(f_day) > running_max: running_max = max(f_day) - past_90_days['Critical'].append([d_start * 1000, critical_count]) - past_90_days['High'].append([d_start * 1000, high_count]) - past_90_days['Medium'].append([d_start * 1000, medium_count]) - past_90_days['Low'].append([d_start * 1000, low_count]) - past_90_days['Info'].append([d_start * 1000, info_count]) + past_90_days["Critical"].append([d_start * 1000, critical_count]) + past_90_days["High"].append([d_start * 1000, high_count]) + past_90_days["Medium"].append([d_start * 1000, medium_count]) + past_90_days["Low"].append([d_start * 1000, low_count]) + past_90_days["Info"].append([d_start * 1000, info_count]) - past_90_days['y_max'] = running_max - past_90_days['y_min'] = running_min + past_90_days["y_max"] = running_max + past_90_days["y_min"] = running_min return past_90_days diff --git a/dojo/views.py b/dojo/views.py index cd22e6ac2d..c31c0fa3b9 100644 --- a/dojo/views.py +++ b/dojo/views.py @@ -84,7 +84,7 @@ def action_history(request, cid, oid): if not authorized: raise PermissionDenied elif ct.model == "user": - user_has_configuration_permission_or_403(request.user, 'auth.view_user') + user_has_configuration_permission_or_403(request.user, "auth.view_user") else: if not request.user.is_superuser: raise PermissionDenied @@ -99,7 +99,7 @@ def action_history(request, cid, oid): product_tab.setEngagement(object_value.engagement) history = LogEntry.objects.filter(content_type=ct, - object_pk=obj.id).order_by('-timestamp') + object_pk=obj.id).order_by("-timestamp") log_entry_filter = LogEntryFilter(request.GET, queryset=history) paged_history = get_page_items(request, log_entry_filter.qs, 25) @@ -107,12 +107,12 @@ def action_history(request, cid, oid): messages.add_message( request, messages.WARNING, - 'Audit logging is currently disabled in System Settings.', - extra_tags='alert-danger') + "Audit logging is currently disabled in System Settings.", + extra_tags="alert-danger") - return render(request, 'dojo/action_history.html', + return render(request, "dojo/action_history.html", {"history": paged_history, - 'product_tab': product_tab, + "product_tab": product_tab, "filtered": history, "log_entry_filter": log_entry_filter, "obj": obj, @@ -123,25 +123,25 @@ def action_history(request, cid, oid): def manage_files(request, oid, obj_type): - if obj_type == 'Engagement': + if obj_type == "Engagement": obj = get_object_or_404(Engagement, pk=oid) user_has_permission_or_403(request.user, obj, Permissions.Engagement_Edit) - obj_vars = ('view_engagement', 'engagement_set') - elif obj_type == 'Test': + obj_vars = ("view_engagement", "engagement_set") + elif obj_type == "Test": obj = get_object_or_404(Test, pk=oid) user_has_permission_or_403(request.user, obj, Permissions.Test_Edit) - obj_vars = ('view_test', 'test_set') - elif obj_type == 'Finding': + obj_vars = ("view_test", "test_set") + elif obj_type == "Finding": obj = get_object_or_404(Finding, pk=oid) user_has_permission_or_403(request.user, obj, Permissions.Finding_Edit) - obj_vars = ('view_finding', 'finding_set') + obj_vars = ("view_finding", "finding_set") else: raise Http404 files_formset = ManageFileFormSet(queryset=obj.files.all()) error = False - if request.method == 'POST': + if request.method == "POST": files_formset = ManageFileFormSet( request.POST, request.FILES, queryset=obj.files.all()) if files_formset.is_valid(): @@ -168,24 +168,24 @@ def manage_files(request, oid, obj_type): messages.add_message( request, messages.SUCCESS, - 'Files updated successfully.', - extra_tags='alert-success') + "Files updated successfully.", + extra_tags="alert-success") else: error = True messages.add_message( request, messages.ERROR, - 'Please check form data and try again.', - extra_tags='alert-danger') + "Please check form data and try again.", + extra_tags="alert-danger") if not error: return HttpResponseRedirect(reverse(obj_vars[0], args=(oid, ))) return render( - request, 'dojo/manage_files.html', { - 'files_formset': files_formset, - 'obj': obj, - 'obj_type': obj_type, + request, "dojo/manage_files.html", { + "files_formset": files_formset, + "obj": obj, + "obj_type": obj_type, }) @@ -208,19 +208,19 @@ def protected_serve(request, path, document_root=None, show_indexes=False): def access_file(request, fid, oid, obj_type, url=False): - if obj_type == 'Engagement': + if obj_type == "Engagement": obj = get_object_or_404(Engagement, pk=oid) user_has_permission_or_403(request.user, obj, Permissions.Engagement_View) - elif obj_type == 'Test': + elif obj_type == "Test": obj = get_object_or_404(Test, pk=oid) user_has_permission_or_403(request.user, obj, Permissions.Test_View) - elif obj_type == 'Finding': + elif obj_type == "Finding": obj = get_object_or_404(Finding, pk=oid) user_has_permission_or_403(request.user, obj, Permissions.Finding_View) else: raise Http404 # If reaching this far, user must have permission to get file file = get_object_or_404(FileUpload, pk=fid) - redirect_url = f'{settings.MEDIA_ROOT}/{file.file.url.lstrip(settings.MEDIA_URL)}' + redirect_url = f"{settings.MEDIA_ROOT}/{file.file.url.lstrip(settings.MEDIA_URL)}" print(redirect_url) return FileResponse(open(redirect_url, "rb")) diff --git a/dojo/widgets.py b/dojo/widgets.py index 83d3267c31..91210094dd 100644 --- a/dojo/widgets.py +++ b/dojo/widgets.py @@ -5,12 +5,12 @@ # agrega los imports necesarios class TableCheckboxWidget(forms.widgets.Widget): - template_name = 'dojo/add_findings_as_accepted.html' + template_name = "dojo/add_findings_as_accepted.html" def __init__(self, *args, **kwargs): - self.findings = kwargs.pop('findings', []) - self.request = kwargs.pop('request', None) - self.page_number = kwargs.pop('page_number', 1) + self.findings = kwargs.pop("findings", []) + self.request = kwargs.pop("request", None) + self.page_number = kwargs.pop("page_number", 1) super().__init__(*args, **kwargs) def value_from_datadict(self, data, files, name): @@ -22,11 +22,11 @@ def render(self, name, value, attrs=None, renderer=None): paginator = Paginator(self.findings, 25) # 10 items per page page = paginator.get_page(page_number) context = { - 'name': name, - 'findings': page.object_list, - 'paginator': paginator, - 'page_number': page_number, - 'page': page, - 'page_param': 'apage', + "name": name, + "findings": page.object_list, + "paginator": paginator, + "page_number": page_number, + "page": page, + "page_param": "apage", } return render_to_string(self.template_name, context) diff --git a/dojo/wsgi.py b/dojo/wsgi.py index 0e8b2c7f8c..2b1076e372 100644 --- a/dojo/wsgi.py +++ b/dojo/wsgi.py @@ -31,7 +31,7 @@ # Shouldn't apply to docker-compose dev mode (1 process, 1 thread), but may be needed when enabling debugging in other contexts def is_debugger_listening(port): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - return s.connect_ex(('127.0.0.1', port)) + return s.connect_ex(("127.0.0.1", port)) debugpy_port = os.environ.get("DD_DEBUG_PORT") if os.environ.get("DD_DEBUG_PORT") else 3000 diff --git a/helm/defectdojo/Chart.lock b/helm/defectdojo/Chart.lock index 9bd08b08b0..c50b042bf8 100644 --- a/helm/defectdojo/Chart.lock +++ b/helm/defectdojo/Chart.lock @@ -4,15 +4,15 @@ dependencies: version: 9.19.1 - name: postgresql repository: https://charts.bitnami.com/bitnami - version: 15.5.11 + version: 15.5.15 - name: postgresql-ha repository: https://charts.bitnami.com/bitnami version: 9.4.11 - name: rabbitmq repository: https://charts.bitnami.com/bitnami - version: 14.4.4 + version: 14.4.6 - name: redis repository: https://charts.bitnami.com/bitnami - version: 19.6.0 -digest: sha256:d00f56b5b3cf6525a4e06c82789ec7dd68526959ce38ea50e5251151535dcd8b -generated: "2024-07-01T16:26:01.747085461Z" + version: 19.6.1 +digest: sha256:933d2f3df74ce23fe4f3a73c3e581c6e0d847f3af8cd56130cdd740a06d4323f +generated: "2024-07-09T17:25:08.104375211Z" diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index 58e421ed49..3fddc40373 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 appVersion: "2.37.0-dev" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.6.138-dev +version: 1.6.140-dev icon: https://www.defectdojo.org/img/favicon.ico maintainers: - name: madchap diff --git a/helm/defectdojo/values.yaml b/helm/defectdojo/values.yaml index 1faba1520b..f0d9bc130b 100644 --- a/helm/defectdojo/values.yaml +++ b/helm/defectdojo/values.yaml @@ -131,7 +131,6 @@ celery: worker: annotations: {} affinity: {} - logLevel: INFO nodeSelector: {} replicas: 1 resources: @@ -368,8 +367,6 @@ mysql: postgresql: enabled: true - image: - tag: 11.22.0-debian-11-r4 auth: username: defectdojo password: "" diff --git a/requirements.txt b/requirements.txt index 29d9c30e23..8ea5b9374b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,9 +1,9 @@ # requirements.txt for DefectDojo using Python 3.x -asteval==1.0.0 +asteval==1.0.1 bleach==6.1.0 bleach[css] celery==5.4.0 -coverage==7.5.4 +coverage==7.6.0 defusedxml==0.7.1 django_celery_results==2.5.1 django-auditlog==2.3.0 @@ -24,10 +24,10 @@ django-slack==5.19.0 git+https://github.com/DefectDojo/django-tagging@develop#egg=django-tagging django-watson==1.6.3 django-prometheus==2.3.1 -Django==4.2.13 -djangorestframework==3.14.0 +Django==4.2.14 +djangorestframework==3.15.2 html2text==2024.2.26 -humanize==4.9.0 +humanize==4.10.0 jira==3.8.0 PyGithub==1.58.2 lxml==5.2.2 @@ -35,7 +35,7 @@ Markdown==3.6 mysqlclient==2.1.1 openpyxl==3.1.5 Pillow==10.4.0 # required by django-imagekit -psycopg[binary]==3.2.1 +psycopg[c]==3.2.1 cryptography==42.0.8 python-dateutil==2.9.0.post0 pytz==2024.1 @@ -49,16 +49,15 @@ whitenoise==5.2.0 titlecase==2.4.1 social-auth-app-django==5.4.1 social-auth-core==4.5.4 -Python-jose==3.3.0 gitpython==3.1.43 debugpy==1.8.2 python-gitlab==4.7.0 cpe==1.2.1 -packageurl-python==0.15.2 +packageurl-python==0.15.3 django-crum==0.7.9 JSON-log-formatter==1.0 -django-split-settings==1.3.1 -django-debug-toolbar==4.4.4 +django-split-settings==1.3.2 +django-debug-toolbar==4.4.6 django-debug-toolbar-request-history==0.1.4 vcrpy==6.0.1 vcrpy-unittest==0.1.7 @@ -75,7 +74,7 @@ django-ratelimit==4.1.0 argon2-cffi==23.1.0 blackduck==1.1.3 pycurl==7.45.3 # Required for Celery Broker AWS (SQS) support -boto3==1.34.139 # Required for Celery Broker AWS (SQS) support +boto3==1.34.143 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 vulners==2.1.7 fontawesomefree==6.5.1 diff --git a/ruff.toml b/ruff.toml index 30a62e2c0c..c64763ce51 100644 --- a/ruff.toml +++ b/ruff.toml @@ -39,7 +39,6 @@ select = [ "UP", "YTT", "ASYNC", - "TRIO", "S2", "S5", "S7", "COM", "C4", @@ -49,10 +48,11 @@ select = [ "EXE", "ICN", "LOG", + "G1", "G2", "INP", "SLOT", "PIE", - "Q001", "Q002", "Q003", "Q004", + "Q", "RSE", "TID", "PD", diff --git a/tests/Import_scanner_test.py b/tests/Import_scanner_test.py index 2a9f170119..6cd3ef28cd 100644 --- a/tests/Import_scanner_test.py +++ b/tests/Import_scanner_test.py @@ -14,38 +14,38 @@ class ScannerTest(BaseTestCase): def setUp(self): super().setUp(self) - self.repo_path = dir_path + '/scans' + self.repo_path = dir_path + "/scans" if os.path.isdir(self.repo_path): shutil.rmtree(self.repo_path) os.mkdir(self.repo_path) - git.Repo.clone_from('https://github.com/DefectDojo/sample-scan-files', self.repo_path) - self.remove_items = ['__init__.py', '__init__.pyc', 'factory.py', 'factory.pyc', - 'factory.py', 'LICENSE', 'README.md', '.gitignore', '.git', '__pycache__'] - tool_path = dir_path[:-5] + 'dojo/tools' + git.Repo.clone_from("https://github.com/DefectDojo/sample-scan-files", self.repo_path) + self.remove_items = ["__init__.py", "__init__.pyc", "factory.py", "factory.pyc", + "factory.py", "LICENSE", "README.md", ".gitignore", ".git", "__pycache__"] + tool_path = dir_path[:-5] + "dojo/tools" tools = sorted(os.listdir(tool_path)) tests = sorted(os.listdir(self.repo_path)) self.tools = [i for i in tools if i not in self.remove_items] self.tests = [i for i in tests if i not in self.remove_items] def test_check_test_file(self): - missing_tests = ['MISSING TEST FOLDER'] + missing_tests = ["MISSING TEST FOLDER"] for tool in self.tools: if tool not in self.tests: missing_tests += [tool] - missing_tests += ['\nNO TEST FILES'] + missing_tests += ["\nNO TEST FILES"] for test in self.tests: - cases = sorted(os.listdir(self.repo_path + '/' + test)) + cases = sorted(os.listdir(self.repo_path + "/" + test)) cases = [i for i in cases if i not in self.remove_items] if len(cases) == 0 and tool not in missing_tests: missing_tests += [test] if len(missing_tests) > 0: - print('The following scanners are missing test cases or incorrectly named') - print('Names must match those listed in /dojo/tools') - print('Test cases can be added/modified here:') - print('https://github.com/DefectDojo/sample-scan-files\n') + print("The following scanners are missing test cases or incorrectly named") + print("Names must match those listed in /dojo/tools") + print("Test cases can be added/modified here:") + print("https://github.com/DefectDojo/sample-scan-files\n") for test in missing_tests: print(test) print() @@ -53,46 +53,46 @@ def test_check_test_file(self): def test_check_for_doc(self): driver = self.driver - driver.get('https://documentation.defectdojo.com/integrations/import/') - integration_index = integration_text.index('Integrations') + len('Integrations') + 1 - usage_index = integration_text.index('Usage Examples') - len('Models') - 2 + driver.get("https://documentation.defectdojo.com/integrations/import/") + integration_index = integration_text.index("Integrations") + len("Integrations") + 1 + usage_index = integration_text.index("Usage Examples") - len("Models") - 2 integration_text = integration_text[integration_index:usage_index].lower() - integration_text = integration_text.replace('_', ' ').replace('-', ' ').replace('.', '').split('\n') + integration_text = integration_text.replace("_", " ").replace("-", " ").replace(".", "").split("\n") acronyms = [] for words in integration_text: acronyms += ["".join(word[0] for word in words.split())] missing_docs = [] for tool in self.tools: - reg = re.compile('.*' + tool.replace('_', ' ') + '.*') + reg = re.compile(".*" + tool.replace("_", " ") + ".*") if len(list(filter(reg.search, integration_text))) < 1: if len(list(filter(reg.search, acronyms))) < 1: missing_docs += [tool] if len(missing_docs) > 0: - print('The following scanners are missing documentation') - print('Names must match those listed in /dojo/tools') - print('Documentation can be added here:') - print('https://github.com/DefectDojo/django-DefectDojo/tree/dev/docs\n') + print("The following scanners are missing documentation") + print("Names must match those listed in /dojo/tools") + print("Documentation can be added here:") + print("https://github.com/DefectDojo/django-DefectDojo/tree/dev/docs\n") for tool in missing_docs: print(tool) print() assert len(missing_docs) == 0 def test_check_for_forms(self): - forms_path = dir_path[:-5] + 'dojo/forms.py' - file = open(forms_path, 'r+') + forms_path = dir_path[:-5] + "dojo/forms.py" + file = open(forms_path, "r+") forms = file.readlines() file.close() forms = [form.strip().lower() for form in forms] forms = forms[forms.index('scan_type_choices = (("", "please select a scan type"),') + 1: - forms.index('sorted_scan_type_choices = sorted(scan_type_choices, key=lambda x: x[1])') - 1] - forms = [form.replace('(', '').replace(')', '').replace('-', ' ').replace('"', '').replace('.', '') for form in forms] - forms = [form[:form.index(',')] for form in forms] - remove_patterns = [' scanner', ' scan'] + forms.index("sorted_scan_type_choices = sorted(scan_type_choices, key=lambda x: x[1])") - 1] + forms = [form.replace("(", "").replace(")", "").replace("-", " ").replace('"', "").replace(".", "") for form in forms] + forms = [form[:form.index(",")] for form in forms] + remove_patterns = [" scanner", " scan"] for pattern in remove_patterns: - forms = [re.sub(pattern, '', fix) for fix in sorted(forms)] + forms = [re.sub(pattern, "", fix) for fix in sorted(forms)] acronyms = [] for words in forms: @@ -100,7 +100,7 @@ def test_check_for_forms(self): missing_forms = [] for tool in self.tools: - reg = re.compile(tool.replace('_', ' ')) + reg = re.compile(tool.replace("_", " ")) matches = list(filter(reg.search, forms)) + list(filter(reg.search, acronyms)) matches = [m.strip() for m in matches] if len(matches) != 1: @@ -108,10 +108,10 @@ def test_check_for_forms(self): missing_forms += [tool] if len(missing_forms) > 0: - print('The following scanners are missing forms') - print('Names must match those listed in /dojo/tools') - print('forms can be added here:') - print('https://github.com/DefectDojo/django-DefectDojo/blob/master/dojo/forms.py\n') + print("The following scanners are missing forms") + print("Names must match those listed in /dojo/tools") + print("forms can be added here:") + print("https://github.com/DefectDojo/django-DefectDojo/blob/master/dojo/forms.py\n") for tool in missing_forms: print(tool) print() @@ -119,20 +119,20 @@ def test_check_for_forms(self): @unittest.skip("Deprecated since Dynamic Parser infrastructure") def test_check_for_options(self): - template_path = dir_path[:-5] + 'dojo/templates/dojo/import_scan_results.html' - file = open(template_path, 'r+') + template_path = dir_path[:-5] + "dojo/templates/dojo/import_scan_results.html" + file = open(template_path, "r+") templates = file.readlines() file.close() templates = [temp.strip().lower() for temp in templates] - templates = templates[templates.index('
    ') + 1: - templates.index('
')] - remove_patterns = ['
  • ', '', '
  • ', ' scanner', ' scan'] + templates = templates[templates.index("
      ") + 1: + templates.index("
    ")] + remove_patterns = ["
  • ", "", "
  • ", " scanner", " scan"] for pattern in remove_patterns: - templates = [re.sub(pattern, '', temp) for temp in templates] + templates = [re.sub(pattern, "", temp) for temp in templates] - templates = [temp[:temp.index(' - ')] for temp in sorted(templates) if ' - ' in temp] - templates = [temp.replace('-', ' ').replace('.', '').replace('(', '').replace(')', '') for temp in templates] + templates = [temp[:temp.index(" - ")] for temp in sorted(templates) if " - " in temp] + templates = [temp.replace("-", " ").replace(".", "").replace("(", "").replace(")", "") for temp in templates] acronyms = [] for words in templates: @@ -140,7 +140,7 @@ def test_check_for_options(self): missing_templates = [] for tool in self.tools: - temp_tool = tool.replace('_', ' ') + temp_tool = tool.replace("_", " ") reg = re.compile(temp_tool) matches = list(filter(reg.search, templates)) + list(filter(reg.search, acronyms)) matches = [m.strip() for m in matches] @@ -149,10 +149,10 @@ def test_check_for_options(self): missing_templates += [tool] if len(missing_templates) > 0: - print('The following scanners are missing templates') - print('Names must match those listed in /dojo/tools') - print('templates can be added here:') - print('https://github.com/DefectDojo/django-DefectDojo/blob/master/dojo/templates/dojo/import_scan_results.html\n') + print("The following scanners are missing templates") + print("Names must match those listed in /dojo/tools") + print("templates can be added here:") + print("https://github.com/DefectDojo/django-DefectDojo/blob/master/dojo/templates/dojo/import_scan_results.html\n") for tool in missing_templates: print(tool) print() @@ -163,15 +163,15 @@ def test_engagement_import_scan_result(self): self.goto_product_overview(driver) driver.find_element(By.CSS_SELECTOR, ".dropdown-toggle.pull-left").click() driver.find_element(By.LINK_TEXT, "Add New Engagement").click() - driver.find_element(By.ID, "id_name").send_keys('Scan type mapping') - driver.find_element(By.NAME, '_Import Scan Results').click() - options_text = ''.join(driver.find_element(By.NAME, 'scan_type').text).split('\n') + driver.find_element(By.ID, "id_name").send_keys("Scan type mapping") + driver.find_element(By.NAME, "_Import Scan Results").click() + options_text = "".join(driver.find_element(By.NAME, "scan_type").text).split("\n") options_text = [scan.strip() for scan in options_text] mod_options = options_text - mod_options = [re.sub(' Scanner', '', scan) for scan in mod_options] - mod_options = [re.sub(' Scan', '', scan) for scan in mod_options] - mod_options = [scan.lower().replace('-', ' ').replace('.', '') for scan in mod_options] + mod_options = [re.sub(" Scanner", "", scan) for scan in mod_options] + mod_options = [re.sub(" Scan", "", scan) for scan in mod_options] + mod_options = [scan.lower().replace("-", " ").replace(".", "") for scan in mod_options] acronyms = [] for scans in mod_options: @@ -181,8 +181,8 @@ def test_engagement_import_scan_result(self): scan_map = {} for test in self.tests: - temp_test = test.replace('_', ' ').replace('-', ' ') - reg = re.compile('.*' + temp_test + '.*') + temp_test = test.replace("_", " ").replace("-", " ") + reg = re.compile(".*" + temp_test + ".*") found_matches = {} for i in range(len(potential_matches)): matches = list(filter(reg.search, [potential_matches[i]])) @@ -204,41 +204,41 @@ def test_engagement_import_scan_result(self): failed_tests = [] for test in self.tests: - cases = sorted(os.listdir(self.repo_path + '/' + test)) + cases = sorted(os.listdir(self.repo_path + "/" + test)) cases = [i for i in cases if i not in self.remove_items] if len(cases) == 0: - failed_tests += [test.upper() + ': No test cases'] + failed_tests += [test.upper() + ": No test cases"] for case in cases: self.goto_product_overview(driver) driver.find_element(By.CSS_SELECTOR, ".dropdown-toggle.pull-left").click() driver.find_element(By.LINK_TEXT, "Add New Engagement").click() - driver.find_element(By.ID, "id_name").send_keys(test + ' - ' + case) - driver.find_element(By.NAME, '_Import Scan Results').click() + driver.find_element(By.ID, "id_name").send_keys(test + " - " + case) + driver.find_element(By.NAME, "_Import Scan Results").click() try: - driver.find_element(By.ID, 'id_active').get_attribute('checked') - driver.find_element(By.ID, 'id_verified').get_attribute('checked') + driver.find_element(By.ID, "id_active").get_attribute("checked") + driver.find_element(By.ID, "id_verified").get_attribute("checked") scan_type = scan_map[test] Select(driver.find_element(By.ID, "id_scan_type")).select_by_visible_text(scan_type) - test_location = self.repo_path + '/' + test + '/' + case - driver.find_element(By.ID, 'id_file').send_keys(test_location) + test_location = self.repo_path + "/" + test + "/" + case + driver.find_element(By.ID, "id_file").send_keys(test_location) driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() - EngagementTXT = ''.join(driver.find_element(By.TAG_NAME, "BODY").text).split('\n') - reg = re.compile('processed, a total of') + EngagementTXT = "".join(driver.find_element(By.TAG_NAME, "BODY").text).split("\n") + reg = re.compile("processed, a total of") matches = list(filter(reg.search, EngagementTXT)) if len(matches) != 1: - failed_tests += [test.upper() + ' - ' + case + ': Not imported'] + failed_tests += [test.upper() + " - " + case + ": Not imported"] except Exception as e: - if e == 'Message: timeout': - failed_tests += [test.upper() + ' - ' + case + ': Not imported due to timeout'] + if e == "Message: timeout": + failed_tests += [test.upper() + " - " + case + ": Not imported due to timeout"] else: - failed_tests += [test.upper() + ': Cannot auto select scan type'] + failed_tests += [test.upper() + ": Cannot auto select scan type"] break if len(failed_tests) > 0: - print('The following scan imports produced errors') - print('Names of tests must match those listed in /dojo/tools') - print('Tests can be added/modified here:') - print('https://github.com/DefectDojo/sample-scan-files\n') + print("The following scan imports produced errors") + print("Names of tests must match those listed in /dojo/tools") + print("Tests can be added/modified here:") + print("https://github.com/DefectDojo/sample-scan-files\n") for test in failed_tests: print(test) print() @@ -251,15 +251,15 @@ def tearDown(self): def suite(): suite = unittest.TestSuite() - suite.addTest(BaseTestCase('test_login')) - suite.addTest(BaseTestCase('disable_block_execution')) - suite.addTest(ScannerTest('test_check_test_file')) - suite.addTest(ScannerTest('test_check_for_doc')) - suite.addTest(ScannerTest('test_check_for_forms')) - suite.addTest(ScannerTest('test_check_for_options')) - suite.addTest(ProductTest('test_create_product')) - suite.addTest(ScannerTest('test_engagement_import_scan_result')) - suite.addTest(ProductTest('test_delete_product')) + suite.addTest(BaseTestCase("test_login")) + suite.addTest(BaseTestCase("disable_block_execution")) + suite.addTest(ScannerTest("test_check_test_file")) + suite.addTest(ScannerTest("test_check_for_doc")) + suite.addTest(ScannerTest("test_check_for_forms")) + suite.addTest(ScannerTest("test_check_for_options")) + suite.addTest(ProductTest("test_create_product")) + suite.addTest(ScannerTest("test_engagement_import_scan_result")) + suite.addTest(ProductTest("test_delete_product")) return suite diff --git a/tests/announcement_banner_test.py b/tests/announcement_banner_test.py index 5206be7ffd..8d1fc4dd09 100644 --- a/tests/announcement_banner_test.py +++ b/tests/announcement_banner_test.py @@ -16,122 +16,122 @@ def __init__(self, method_name, type): def test_setup(self): driver = self.driver - driver.get(self.base_url + 'configure_announcement') - if self.is_element_by_css_selector_present('input.btn.btn-danger'): - driver.find_element(By.CSS_SELECTOR, 'input.btn.btn-danger').click() + driver.get(self.base_url + "configure_announcement") + if self.is_element_by_css_selector_present("input.btn.btn-danger"): + driver.find_element(By.CSS_SELECTOR, "input.btn.btn-danger").click() def enable_announcement(self, message, dismissable, style): driver = self.driver - driver.get(self.base_url + 'configure_announcement') - driver.find_element(By.ID, 'id_message').send_keys(message) + driver.get(self.base_url + "configure_announcement") + driver.find_element(By.ID, "id_message").send_keys(message) - Select(driver.find_element(By.ID, 'id_style')).select_by_visible_text(style) + Select(driver.find_element(By.ID, "id_style")).select_by_visible_text(style) - dismissable_control = driver.find_element(By.ID, 'id_dismissable') + dismissable_control = driver.find_element(By.ID, "id_dismissable") if xor(bool(dismissable_control.is_selected()), bool(dismissable)): dismissable_control.click() - driver.find_element(By.CSS_SELECTOR, 'input.btn.btn-primary').click() + driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() def disable_announcement(self): driver = self.driver - driver.get(self.base_url + 'configure_announcement') - driver.find_element(By.CSS_SELECTOR, 'input.btn.btn-danger').click() + driver.get(self.base_url + "configure_announcement") + driver.find_element(By.CSS_SELECTOR, "input.btn.btn-danger").click() def test_create_announcement(self): driver = self.driver driver.get(self.base_url) - self.assertFalse(self.is_element_by_css_selector_present('.announcement-banner')) + self.assertFalse(self.is_element_by_css_selector_present(".announcement-banner")) - text = 'Big important announcement, definitely pay attention!' + text = "Big important announcement, definitely pay attention!" self.enable_announcement(text, False, self.type) - self.assertTrue(self.is_success_message_present('Announcement updated successfully.')) + self.assertTrue(self.is_success_message_present("Announcement updated successfully.")) - self.assertTrue(self.is_element_by_css_selector_present(f'.announcement-banner.alert-{self.type.lower()}', text=text)) + self.assertTrue(self.is_element_by_css_selector_present(f".announcement-banner.alert-{self.type.lower()}", text=text)) driver.get(self.base_url) - self.assertTrue(self.is_element_by_css_selector_present(f'.announcement-banner.alert-{self.type.lower()}', text=text)) + self.assertTrue(self.is_element_by_css_selector_present(f".announcement-banner.alert-{self.type.lower()}", text=text)) self.disable_announcement() - self.assertTrue(self.is_success_message_present('Announcement removed for everyone.')) + self.assertTrue(self.is_success_message_present("Announcement removed for everyone.")) def test_create_dismissable_announcement(self): driver = self.driver driver.get(self.base_url) - self.assertFalse(self.is_element_by_css_selector_present('.announcement-banner')) + self.assertFalse(self.is_element_by_css_selector_present(".announcement-banner")) - text = 'Big important announcement, definitely pay don\'t dismiss this one.' + text = "Big important announcement, definitely pay don't dismiss this one." self.enable_announcement(text, True, self.type) - self.assertTrue(self.is_success_message_present('Announcement updated successfully.')) + self.assertTrue(self.is_success_message_present("Announcement updated successfully.")) - self.assertTrue(self.is_element_by_css_selector_present(f'.announcement-banner.alert-{self.type.lower()}', text=text)) + self.assertTrue(self.is_element_by_css_selector_present(f".announcement-banner.alert-{self.type.lower()}", text=text)) driver.get(self.base_url) - self.assertTrue(self.is_element_by_css_selector_present(f'.announcement-banner.alert-{self.type.lower()}', text=text)) + self.assertTrue(self.is_element_by_css_selector_present(f".announcement-banner.alert-{self.type.lower()}", text=text)) close_button = driver.find_element(By.XPATH, "//div[contains(@class, 'announcement-banner')]/a/span[contains(text(), '×')]") close_button.click() dismiss_announcement_button = driver.find_element(By.XPATH, "//button[contains(@class, 'btn-danger') and contains(text(), 'Dismiss Announcement')]") dismiss_announcement_button.click() - self.assertFalse(self.is_element_by_css_selector_present(f'.announcement-banner.alert-{self.type.lower()}', text=text)) + self.assertFalse(self.is_element_by_css_selector_present(f".announcement-banner.alert-{self.type.lower()}", text=text)) self.disable_announcement() - self.assertTrue(self.is_success_message_present('Announcement removed for everyone.')) + self.assertTrue(self.is_success_message_present("Announcement removed for everyone.")) def test_dismissing_announcement_does_not_dismiss_for_others(self): driver = self.driver driver.get(self.base_url) - self.assertFalse(self.is_element_by_css_selector_present('.announcement-banner')) + self.assertFalse(self.is_element_by_css_selector_present(".announcement-banner")) - text = 'Everyone sees this, right?' + text = "Everyone sees this, right?" self.enable_announcement(text, True, self.type) - self.assertTrue(self.is_success_message_present('Announcement updated successfully.')) + self.assertTrue(self.is_success_message_present("Announcement updated successfully.")) - self.assertTrue(self.is_element_by_css_selector_present(f'.announcement-banner.alert-{self.type.lower()}', text=text)) + self.assertTrue(self.is_element_by_css_selector_present(f".announcement-banner.alert-{self.type.lower()}", text=text)) close_button = driver.find_element(By.XPATH, "//div[contains(@class, 'announcement-banner')]/a/span[contains(text(), '×')]") close_button.click() dismiss_announcement_button = driver.find_element(By.XPATH, "//button[contains(@class, 'btn-danger') and contains(text(), 'Dismiss Announcement')]") dismiss_announcement_button.click() - self.assertFalse(self.is_element_by_css_selector_present(f'.announcement-banner.alert-{self.type.lower()}', text=text)) + self.assertFalse(self.is_element_by_css_selector_present(f".announcement-banner.alert-{self.type.lower()}", text=text)) self.logout() self.login_standard_page() - self.assertTrue(self.is_element_by_css_selector_present(f'.announcement-banner.alert-{self.type.lower()}', text=text)) + self.assertTrue(self.is_element_by_css_selector_present(f".announcement-banner.alert-{self.type.lower()}", text=text)) self.logout() self.login_page() - self.assertFalse(self.is_element_by_css_selector_present(f'.announcement-banner.alert-{self.type.lower()}', text=text)) + self.assertFalse(self.is_element_by_css_selector_present(f".announcement-banner.alert-{self.type.lower()}", text=text)) self.disable_announcement() - self.assertTrue(self.is_success_message_present('Announcement removed for everyone.')) + self.assertTrue(self.is_success_message_present("Announcement removed for everyone.")) def test_announcement_ui_disabled_when_set(self): driver = self.driver driver.get(self.base_url) - self.assertFalse(self.is_element_by_css_selector_present('.announcement-banner')) + self.assertFalse(self.is_element_by_css_selector_present(".announcement-banner")) - text = 'The most important announcement of the year.' + text = "The most important announcement of the year." self.enable_announcement(text, False, self.type) - self.assertTrue(self.is_success_message_present('Announcement updated successfully.')) + self.assertTrue(self.is_success_message_present("Announcement updated successfully.")) - self.assertTrue(self.is_element_by_css_selector_present(f'.announcement-banner.alert-{self.type.lower()}', text=text)) - driver.get(self.base_url + 'configure_announcement') + self.assertTrue(self.is_element_by_css_selector_present(f".announcement-banner.alert-{self.type.lower()}", text=text)) + driver.get(self.base_url + "configure_announcement") driver.find_element(By.XPATH, "//input[contains(@id, 'id_message') and @disabled]") driver.find_element(By.XPATH, "//select[contains(@id, 'id_style') and @disabled]") driver.find_element(By.XPATH, "//input[contains(@id, 'id_dismissable') and @disabled]") self.disable_announcement() - self.assertTrue(self.is_success_message_present('Announcement removed for everyone.')) + self.assertTrue(self.is_success_message_present("Announcement removed for everyone.")) def test_announcement_empty_after_removal(self): driver = self.driver driver.get(self.base_url) - self.assertFalse(self.is_element_by_css_selector_present('.announcement-banner')) + self.assertFalse(self.is_element_by_css_selector_present(".announcement-banner")) - text = 'Surely no-one would delete this announcement quickly' + text = "Surely no-one would delete this announcement quickly" self.enable_announcement(text, False, self.type) - self.assertTrue(self.is_success_message_present('Announcement updated successfully.')) + self.assertTrue(self.is_success_message_present("Announcement updated successfully.")) - self.assertTrue(self.is_element_by_css_selector_present(f'.announcement-banner.alert-{self.type.lower()}', text=text)) + self.assertTrue(self.is_element_by_css_selector_present(f".announcement-banner.alert-{self.type.lower()}", text=text)) self.disable_announcement() - self.assertTrue(self.is_success_message_present('Announcement removed for everyone.')) + self.assertTrue(self.is_success_message_present("Announcement removed for everyone.")) - driver.get(self.base_url + 'configure_announcement') + driver.get(self.base_url + "configure_announcement") driver.find_element(By.XPATH, "//input[contains(@id, 'id_message') and contains(@value,'')]") driver.find_element(By.XPATH, "//select[contains(@id, 'id_style')]/option[@selected and contains(text(), 'Info')]") driver.find_element(By.XPATH, "//input[contains(@id, 'id_dismissable') and not(@checked)]") @@ -139,38 +139,38 @@ def test_announcement_empty_after_removal(self): def test_html_announcement(self): driver = self.driver driver.get(self.base_url) - self.assertFalse(self.is_element_by_css_selector_present('.announcement-banner')) + self.assertFalse(self.is_element_by_css_selector_present(".announcement-banner")) text = "Links in announcements? you bet!" self.enable_announcement(text, False, self.type) - self.assertTrue(self.is_success_message_present('Announcement updated successfully.')) + self.assertTrue(self.is_success_message_present("Announcement updated successfully.")) driver.find_element(By.XPATH, "//div[contains(@class, 'announcement-banner')]/a[@href='https://github.com/DefectDojo/django-DefectDojo' and @style='color: #224477;' and @target='_blank']") self.disable_announcement() - self.assertTrue(self.is_success_message_present('Announcement removed for everyone.')) + self.assertTrue(self.is_success_message_present("Announcement removed for everyone.")) def suite(): suite = unittest.TestSuite() - suite.addTest(BaseTestCase('test_login')) - suite.addTest(AnnouncementBannerTest('test_setup', 'Info')) - suite.addTest(AnnouncementBannerTest('test_create_announcement', 'Info')) - suite.addTest(AnnouncementBannerTest('test_create_announcement', 'Success')) - suite.addTest(AnnouncementBannerTest('test_create_announcement', 'Warning')) - suite.addTest(AnnouncementBannerTest('test_create_announcement', 'Danger')) - suite.addTest(AnnouncementBannerTest('test_create_dismissable_announcement', 'Info')) - suite.addTest(AnnouncementBannerTest('test_create_dismissable_announcement', 'Success')) - suite.addTest(AnnouncementBannerTest('test_create_dismissable_announcement', 'Warning')) - suite.addTest(UserTest('test_create_user')) - suite.addTest(AnnouncementBannerTest('test_dismissing_announcement_does_not_dismiss_for_others', 'Info')) - suite.addTest(AnnouncementBannerTest('test_announcement_ui_disabled_when_set', 'Info')) - suite.addTest(AnnouncementBannerTest('test_announcement_empty_after_removal', 'Info')) - suite.addTest(AnnouncementBannerTest('test_html_announcement', 'Info')) + suite.addTest(BaseTestCase("test_login")) + suite.addTest(AnnouncementBannerTest("test_setup", "Info")) + suite.addTest(AnnouncementBannerTest("test_create_announcement", "Info")) + suite.addTest(AnnouncementBannerTest("test_create_announcement", "Success")) + suite.addTest(AnnouncementBannerTest("test_create_announcement", "Warning")) + suite.addTest(AnnouncementBannerTest("test_create_announcement", "Danger")) + suite.addTest(AnnouncementBannerTest("test_create_dismissable_announcement", "Info")) + suite.addTest(AnnouncementBannerTest("test_create_dismissable_announcement", "Success")) + suite.addTest(AnnouncementBannerTest("test_create_dismissable_announcement", "Warning")) + suite.addTest(UserTest("test_create_user")) + suite.addTest(AnnouncementBannerTest("test_dismissing_announcement_does_not_dismiss_for_others", "Info")) + suite.addTest(AnnouncementBannerTest("test_announcement_ui_disabled_when_set", "Info")) + suite.addTest(AnnouncementBannerTest("test_announcement_empty_after_removal", "Info")) + suite.addTest(AnnouncementBannerTest("test_html_announcement", "Info")) return suite -if __name__ == '__main__': +if __name__ == "__main__": runner = unittest.TextTestRunner(descriptions=True, failfast=True, verbosity=2) ret = not runner.run(suite()).wasSuccessful() BaseTestCase.tearDownDriver() diff --git a/tests/check_various_pages.py b/tests/check_various_pages.py index 35d9079e93..68a5f22066 100644 --- a/tests/check_various_pages.py +++ b/tests/check_various_pages.py @@ -19,9 +19,9 @@ def test_calendar_status(self): def suite(): suite = unittest.TestSuite() - suite.addTest(BaseTestCase('test_login')) - suite.addTest(VariousPagesTest('test_user_status')) - suite.addTest(VariousPagesTest('test_calendar_status')) + suite.addTest(BaseTestCase("test_login")) + suite.addTest(VariousPagesTest("test_user_status")) + suite.addTest(VariousPagesTest("test_calendar_status")) return suite diff --git a/tests/close_old_findings_dedupe_test.py b/tests/close_old_findings_dedupe_test.py index cb7db1b836..14ee93642a 100644 --- a/tests/close_old_findings_dedupe_test.py +++ b/tests/close_old_findings_dedupe_test.py @@ -36,9 +36,9 @@ def check_nb_duplicates(self, expected_number_of_duplicates): # iterate over the rows of the findings table and concatenates all columns into td.text trs = driver.find_elements(By.XPATH, '//*[@id="open_findings"]/tbody/tr') for row in trs: - concatRow = ' '.join([td.text for td in row.find_elements(By.XPATH, ".//td")]) + concatRow = " ".join([td.text for td in row.find_elements(By.XPATH, ".//td")]) # print(concatRow) - if '(DUPE)' and 'Duplicate' in concatRow: + if "(DUPE)" and "Duplicate" in concatRow: dupe_count += 1 if (dupe_count != expected_number_of_duplicates): @@ -47,8 +47,8 @@ def check_nb_duplicates(self, expected_number_of_duplicates): break if (dupe_count != expected_number_of_duplicates): - findings_table = driver.find_element(By.ID, 'open_findings') - print(findings_table.get_attribute('innerHTML')) + findings_table = driver.find_element(By.ID, "open_findings") + print(findings_table.get_attribute("innerHTML")) self.assertEqual(dupe_count, expected_number_of_duplicates) @@ -56,14 +56,14 @@ def check_nb_duplicates(self, expected_number_of_duplicates): def test_enable_deduplication(self): logger.debug("enabling deduplication...") driver = self.driver - driver.get(self.base_url + 'system_settings') - if not driver.find_element(By.ID, 'id_enable_deduplication').is_selected(): + driver.get(self.base_url + "system_settings") + if not driver.find_element(By.ID, "id_enable_deduplication").is_selected(): driver.find_element(By.XPATH, '//*[@id="id_enable_deduplication"]').click() # save settings driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() # check if it's enabled after reload - driver.get(self.base_url + 'system_settings') - self.assertTrue(driver.find_element(By.ID, 'id_enable_deduplication').is_selected()) + driver.get(self.base_url + "system_settings") + self.assertTrue(driver.find_element(By.ID, "id_enable_deduplication").is_selected()) @on_exception_html_source_logger def test_delete_findings(self): @@ -73,27 +73,27 @@ def test_delete_findings(self): if self.element_exists_by_id("no_findings"): text = driver.find_element(By.ID, "no_findings").text - if 'No findings found.' in text: + if "No findings found." in text: return driver.find_element(By.ID, "select_all").click() driver.find_element(By.CSS_SELECTOR, "i.fa-solid.fa-trash").click() try: WebDriverWait(driver, 1).until(EC.alert_is_present(), - 'Timed out waiting for finding delete ' - + 'confirmation popup to appear.') + "Timed out waiting for finding delete " + + "confirmation popup to appear.") driver.switch_to.alert.accept() except TimeoutException: - self.fail('Confirmation dialogue not shown, cannot delete previous findings') + self.fail("Confirmation dialogue not shown, cannot delete previous findings") logger.debug("page source when checking for no_findings element") logger.debug(self.driver.page_source) text = driver.find_element(By.ID, "no_findings").text self.assertIsNotNone(text) - self.assertTrue('No findings found.' in text) + self.assertTrue("No findings found." in text) # check that user was redirect back to url where it came from based on return_url - self.assertTrue(driver.current_url.endswith('page=1')) + self.assertTrue(driver.current_url.endswith("page=1")) # -------------------------------------------------------------------------------------------------------- # Same scanner deduplication - Deduplication on engagement @@ -112,7 +112,7 @@ def test_add_same_engagement_engagement(self): driver.find_element(By.XPATH, '//*[@id="id_deduplication_on_engagement"]').click() driver.find_elements(By.CLASS_NAME, "btn-primary")[3].click() - self.assertTrue(self.is_success_message_present(text='Engagement added successfully.')) + self.assertTrue(self.is_success_message_present(text="Engagement added successfully.")) # -------------------------------------------------------------------------------------------------------- # Same scanner deduplication - Deduplication on engagement @@ -141,7 +141,7 @@ def test_import_same_engagement_tests(self): driver.find_element(By.ID, "id_file").send_keys(self.relative_path + "/dedupe_scans/dedupe_endpoint_1.xml") driver.find_elements(By.CLASS_NAME, "btn-primary")[1].click() - self.assertTrue(self.is_success_message_present(text='3 findings and closed 0 findings')) + self.assertTrue(self.is_success_message_present(text="3 findings and closed 0 findings")) # Second upload. Immuniweb again. # Same report. @@ -158,7 +158,7 @@ def test_import_same_engagement_tests(self): driver.find_element(By.ID, "id_file").send_keys(self.relative_path + "/dedupe_scans/dedupe_endpoint_1.xml") driver.find_elements(By.CLASS_NAME, "btn-primary")[1].click() - self.assertTrue(self.is_success_message_present(text='3 findings and closed 0 findings')) + self.assertTrue(self.is_success_message_present(text="3 findings and closed 0 findings")) @on_exception_html_source_logger def test_close_same_engagement_tests(self): @@ -180,7 +180,7 @@ def test_close_same_engagement_tests(self): driver.find_element(By.ID, "id_file").send_keys(self.relative_path + "/dedupe_scans/dedupe_and_close_1.xml") driver.find_elements(By.CLASS_NAME, "btn-primary")[1].click() - self.assertTrue(self.is_success_message_present(text='1 findings and closed 2 findings')) + self.assertTrue(self.is_success_message_present(text="1 findings and closed 2 findings")) @on_exception_html_source_logger def test_check_endpoint_status(self): @@ -203,7 +203,7 @@ def test_add_same_product_engagement(self): driver.find_element(By.ID, "id_name").send_keys("Close Same Product Test 1") driver.find_elements(By.CLASS_NAME, "btn-primary")[3].click() - self.assertTrue(self.is_success_message_present(text='Engagement added successfully.')) + self.assertTrue(self.is_success_message_present(text="Engagement added successfully.")) self.goto_product_overview(driver) driver.find_element(By.CSS_SELECTOR, ".dropdown-toggle.pull-left").click() @@ -211,7 +211,7 @@ def test_add_same_product_engagement(self): driver.find_element(By.ID, "id_name").send_keys("Close Same Product Test 2") driver.find_elements(By.CLASS_NAME, "btn-primary")[3].click() - self.assertTrue(self.is_success_message_present(text='Engagement added successfully.')) + self.assertTrue(self.is_success_message_present(text="Engagement added successfully.")) self.goto_product_overview(driver) driver.find_element(By.CSS_SELECTOR, ".dropdown-toggle.pull-left").click() @@ -219,7 +219,7 @@ def test_add_same_product_engagement(self): driver.find_element(By.ID, "id_name").send_keys("Close Same Product Test 3") driver.find_elements(By.CLASS_NAME, "btn-primary")[3].click() - self.assertTrue(self.is_success_message_present(text='Engagement added successfully.')) + self.assertTrue(self.is_success_message_present(text="Engagement added successfully.")) # -------------------------------------------------------------------------------------------------------- # Same scanner deduplication - Deduplication on product @@ -248,7 +248,7 @@ def test_import_same_product_tests(self): driver.find_element(By.ID, "id_file").send_keys(self.relative_path + "/dedupe_scans/dedupe_endpoint_1.xml") driver.find_elements(By.CLASS_NAME, "btn-primary")[1].click() - self.assertTrue(self.is_success_message_present(text='3 findings and closed 0 findings')) + self.assertTrue(self.is_success_message_present(text="3 findings and closed 0 findings")) # Second upload. Immuniweb again. # Same report. @@ -265,7 +265,7 @@ def test_import_same_product_tests(self): driver.find_element(By.ID, "id_file").send_keys(self.relative_path + "/dedupe_scans/dedupe_endpoint_1.xml") driver.find_elements(By.CLASS_NAME, "btn-primary")[1].click() - self.assertTrue(self.is_success_message_present(text='3 findings and closed 0 findings')) + self.assertTrue(self.is_success_message_present(text="3 findings and closed 0 findings")) @on_exception_html_source_logger def test_close_same_product_tests(self): @@ -287,7 +287,7 @@ def test_close_same_product_tests(self): driver.find_element(By.ID, "id_file").send_keys(self.relative_path + "/dedupe_scans/dedupe_and_close_1.xml") driver.find_elements(By.CLASS_NAME, "btn-primary")[1].click() - self.assertTrue(self.is_success_message_present(text='1 findings and closed 2 findings')) + self.assertTrue(self.is_success_message_present(text="1 findings and closed 2 findings")) @on_exception_html_source_logger def test_check_same_product_status(self): @@ -295,38 +295,38 @@ def test_check_same_product_status(self): def add_close_old_tests_to_suite(suite, jira=False, github=False, block_execution=False): - suite.addTest(BaseTestCase('test_login')) + suite.addTest(BaseTestCase("test_login")) set_suite_settings(suite, jira=jira, github=github, block_execution=block_execution) if jira: - suite.addTest(BaseTestCase('enable_jira')) + suite.addTest(BaseTestCase("enable_jira")) else: - suite.addTest(BaseTestCase('disable_jira')) + suite.addTest(BaseTestCase("disable_jira")) if github: - suite.addTest(BaseTestCase('enable_github')) + suite.addTest(BaseTestCase("enable_github")) else: - suite.addTest(BaseTestCase('disable_github')) + suite.addTest(BaseTestCase("disable_github")) if block_execution: - suite.addTest(BaseTestCase('enable_block_execution')) + suite.addTest(BaseTestCase("enable_block_execution")) else: - suite.addTest(BaseTestCase('disable_block_execution')) + suite.addTest(BaseTestCase("disable_block_execution")) - suite.addTest(ProductTest('test_create_product')) - suite.addTest(CloseOldDedupeTest('test_enable_deduplication')) + suite.addTest(ProductTest("test_create_product")) + suite.addTest(CloseOldDedupeTest("test_enable_deduplication")) # Test same scanners - same engagement - dynamic - dedupe - suite.addTest(CloseOldDedupeTest('test_delete_findings')) - suite.addTest(CloseOldDedupeTest('test_add_same_engagement_engagement')) - suite.addTest(CloseOldDedupeTest('test_import_same_engagement_tests')) - suite.addTest(CloseOldDedupeTest('test_close_same_engagement_tests')) - suite.addTest(CloseOldDedupeTest('test_check_endpoint_status')) + suite.addTest(CloseOldDedupeTest("test_delete_findings")) + suite.addTest(CloseOldDedupeTest("test_add_same_engagement_engagement")) + suite.addTest(CloseOldDedupeTest("test_import_same_engagement_tests")) + suite.addTest(CloseOldDedupeTest("test_close_same_engagement_tests")) + suite.addTest(CloseOldDedupeTest("test_check_endpoint_status")) # Test same scanners - same product - dynamic - dedupe - suite.addTest(CloseOldDedupeTest('test_delete_findings')) - suite.addTest(CloseOldDedupeTest('test_add_same_product_engagement')) - suite.addTest(CloseOldDedupeTest('test_import_same_product_tests')) - suite.addTest(CloseOldDedupeTest('test_close_same_product_tests')) - suite.addTest(CloseOldDedupeTest('test_check_same_product_status')) + suite.addTest(CloseOldDedupeTest("test_delete_findings")) + suite.addTest(CloseOldDedupeTest("test_add_same_product_engagement")) + suite.addTest(CloseOldDedupeTest("test_import_same_product_tests")) + suite.addTest(CloseOldDedupeTest("test_close_same_product_tests")) + suite.addTest(CloseOldDedupeTest("test_check_same_product_status")) # Clean up - suite.addTest(ProductTest('test_delete_product')) + suite.addTest(ProductTest("test_delete_product")) return suite diff --git a/tests/close_old_findings_test.py b/tests/close_old_findings_test.py index f8ccc7fad7..ba47ce732d 100644 --- a/tests/close_old_findings_test.py +++ b/tests/close_old_findings_test.py @@ -32,27 +32,27 @@ def test_delete_findings(self): if self.element_exists_by_id("no_findings"): text = driver.find_element(By.ID, "no_findings").text - if 'No findings found.' in text: + if "No findings found." in text: return driver.find_element(By.ID, "select_all").click() driver.find_element(By.CSS_SELECTOR, "i.fa-solid.fa-trash").click() try: WebDriverWait(driver, 1).until(EC.alert_is_present(), - 'Timed out waiting for finding delete ' - + 'confirmation popup to appear.') + "Timed out waiting for finding delete " + + "confirmation popup to appear.") driver.switch_to.alert.accept() except TimeoutException: - self.fail('Confirmation dialogue not shown, cannot delete previous findings') + self.fail("Confirmation dialogue not shown, cannot delete previous findings") logger.debug("page source when checking for no_findings element") logger.debug(self.driver.page_source) text = driver.find_element(By.ID, "no_findings").text self.assertIsNotNone(text) - self.assertTrue('No findings found.' in text) + self.assertTrue("No findings found." in text) # check that user was redirect back to url where it came from based on return_url - self.assertTrue(driver.current_url.endswith('page=1')) + self.assertTrue(driver.current_url.endswith("page=1")) # -------------------------------------------------------------------------------------------------------- # Same scanner import - Close Old Findings on engagement @@ -69,7 +69,7 @@ def test_add_same_engagement_engagement(self): driver.find_element(By.ID, "id_name").send_keys("Close Same Engagement No Dedupe") driver.find_elements(By.CLASS_NAME, "btn-primary")[3].click() - self.assertTrue(self.is_success_message_present(text='Engagement added successfully.')) + self.assertTrue(self.is_success_message_present(text="Engagement added successfully.")) # -------------------------------------------------------------------------------------------------------- # Same scanner deduplication - Deduplication on engagement @@ -98,7 +98,7 @@ def test_import_same_engagement_tests(self): driver.find_element(By.ID, "id_file").send_keys(self.relative_path + "/close_old_scans/closeold_nodedupe_1.xml") driver.find_elements(By.CLASS_NAME, "btn-primary")[1].click() - self.assertTrue(self.is_success_message_present(text='3 findings and closed 0 findings')) + self.assertTrue(self.is_success_message_present(text="3 findings and closed 0 findings")) # Second upload. Immuniweb again. # Same report. @@ -115,7 +115,7 @@ def test_import_same_engagement_tests(self): driver.find_element(By.ID, "id_file").send_keys(self.relative_path + "/close_old_scans/closeold_nodedupe_2.xml") driver.find_elements(By.CLASS_NAME, "btn-primary")[1].click() - self.assertTrue(self.is_success_message_present(text='3 findings and closed 3 findings')) + self.assertTrue(self.is_success_message_present(text="3 findings and closed 3 findings")) @on_exception_html_source_logger def test_close_same_engagement_tests(self): @@ -137,7 +137,7 @@ def test_close_same_engagement_tests(self): driver.find_element(By.ID, "id_file").send_keys(self.relative_path + "/dedupe_scans/dedupe_and_close_1.xml") driver.find_elements(By.CLASS_NAME, "btn-primary")[1].click() - self.assertTrue(self.is_success_message_present(text='1 findings and closed 3 findings')) + self.assertTrue(self.is_success_message_present(text="1 findings and closed 3 findings")) # -------------------------------------------------------------------------------------------------------- # Same scanner deduplication - Deduplication on product @@ -156,7 +156,7 @@ def test_add_same_product_engagement(self): driver.find_element(By.ID, "id_name").send_keys("Close Same Product No Dedupe Test 1") driver.find_elements(By.CLASS_NAME, "btn-primary")[3].click() - self.assertTrue(self.is_success_message_present(text='Engagement added successfully.')) + self.assertTrue(self.is_success_message_present(text="Engagement added successfully.")) self.goto_product_overview(driver) driver.find_element(By.CSS_SELECTOR, ".dropdown-toggle.pull-left").click() @@ -164,7 +164,7 @@ def test_add_same_product_engagement(self): driver.find_element(By.ID, "id_name").send_keys("Close Same Product No Dedupe Test 2") driver.find_elements(By.CLASS_NAME, "btn-primary")[3].click() - self.assertTrue(self.is_success_message_present(text='Engagement added successfully.')) + self.assertTrue(self.is_success_message_present(text="Engagement added successfully.")) self.goto_product_overview(driver) driver.find_element(By.CSS_SELECTOR, ".dropdown-toggle.pull-left").click() @@ -172,7 +172,7 @@ def test_add_same_product_engagement(self): driver.find_element(By.ID, "id_name").send_keys("Close Same Product No Dedupe Test 3") driver.find_elements(By.CLASS_NAME, "btn-primary")[3].click() - self.assertTrue(self.is_success_message_present(text='Engagement added successfully.')) + self.assertTrue(self.is_success_message_present(text="Engagement added successfully.")) # -------------------------------------------------------------------------------------------------------- # Same scanner deduplication - Deduplication on product @@ -201,7 +201,7 @@ def test_import_same_product_tests(self): driver.find_element(By.ID, "id_file").send_keys(self.relative_path + "/close_old_scans/closeold_nodedupe_1.xml") driver.find_elements(By.CLASS_NAME, "btn-primary")[1].click() - self.assertTrue(self.is_success_message_present(text='3 findings and closed 0 findings')) + self.assertTrue(self.is_success_message_present(text="3 findings and closed 0 findings")) # Second upload. Immuniweb again. # Same report. @@ -218,7 +218,7 @@ def test_import_same_product_tests(self): driver.find_element(By.ID, "id_file").send_keys(self.relative_path + "/close_old_scans/closeold_nodedupe_2.xml") driver.find_elements(By.CLASS_NAME, "btn-primary")[1].click() - self.assertTrue(self.is_success_message_present(text='3 findings and closed 3 findings')) + self.assertTrue(self.is_success_message_present(text="3 findings and closed 3 findings")) @on_exception_html_source_logger def test_close_same_product_tests(self): @@ -240,39 +240,39 @@ def test_close_same_product_tests(self): driver.find_element(By.ID, "id_file").send_keys(self.relative_path + "/dedupe_scans/dedupe_and_close_1.xml") driver.find_elements(By.CLASS_NAME, "btn-primary")[1].click() - self.assertTrue(self.is_success_message_present(text='1 findings and closed 3 findings')) + self.assertTrue(self.is_success_message_present(text="1 findings and closed 3 findings")) def add_close_old_tests_to_suite(suite, jira=False, github=False, block_execution=False): - suite.addTest(BaseTestCase('test_login')) + suite.addTest(BaseTestCase("test_login")) set_suite_settings(suite, jira=jira, github=github, block_execution=block_execution) if jira: - suite.addTest(BaseTestCase('enable_jira')) + suite.addTest(BaseTestCase("enable_jira")) else: - suite.addTest(BaseTestCase('disable_jira')) + suite.addTest(BaseTestCase("disable_jira")) if github: - suite.addTest(BaseTestCase('enable_github')) + suite.addTest(BaseTestCase("enable_github")) else: - suite.addTest(BaseTestCase('disable_github')) + suite.addTest(BaseTestCase("disable_github")) if block_execution: - suite.addTest(BaseTestCase('enable_block_execution')) + suite.addTest(BaseTestCase("enable_block_execution")) else: - suite.addTest(BaseTestCase('disable_block_execution')) + suite.addTest(BaseTestCase("disable_block_execution")) - suite.addTest(ProductTest('test_create_product')) + suite.addTest(ProductTest("test_create_product")) # Test same scanners - same engagement - dynamic - dedupe - suite.addTest(CloseOldTest('test_delete_findings')) - suite.addTest(CloseOldTest('test_add_same_engagement_engagement')) - suite.addTest(CloseOldTest('test_import_same_engagement_tests')) - suite.addTest(CloseOldTest('test_close_same_engagement_tests')) + suite.addTest(CloseOldTest("test_delete_findings")) + suite.addTest(CloseOldTest("test_add_same_engagement_engagement")) + suite.addTest(CloseOldTest("test_import_same_engagement_tests")) + suite.addTest(CloseOldTest("test_close_same_engagement_tests")) # Test same scanners - same product - dynamic - dedupe - suite.addTest(CloseOldTest('test_delete_findings')) - suite.addTest(CloseOldTest('test_add_same_product_engagement')) - suite.addTest(CloseOldTest('test_import_same_product_tests')) - suite.addTest(CloseOldTest('test_close_same_product_tests')) + suite.addTest(CloseOldTest("test_delete_findings")) + suite.addTest(CloseOldTest("test_add_same_product_engagement")) + suite.addTest(CloseOldTest("test_import_same_product_tests")) + suite.addTest(CloseOldTest("test_close_same_product_tests")) # Clean up - suite.addTest(ProductTest('test_delete_product')) + suite.addTest(ProductTest("test_delete_product")) return suite diff --git a/tests/dedupe_test.py b/tests/dedupe_test.py index 73214cc06d..a6bfc86e80 100644 --- a/tests/dedupe_test.py +++ b/tests/dedupe_test.py @@ -34,9 +34,9 @@ def check_nb_duplicates(self, expected_number_of_duplicates): # iterate over the rows of the findings table and concatenates all columns into td.text trs = driver.find_elements(By.XPATH, '//*[@id="open_findings"]/tbody/tr') for row in trs: - concatRow = ' '.join([td.text for td in row.find_elements(By.XPATH, ".//td")]) + concatRow = " ".join([td.text for td in row.find_elements(By.XPATH, ".//td")]) # print(concatRow) - if '(DUPE)' and 'Duplicate' in concatRow: + if "(DUPE)" and "Duplicate" in concatRow: dupe_count += 1 if (dupe_count != expected_number_of_duplicates): @@ -45,8 +45,8 @@ def check_nb_duplicates(self, expected_number_of_duplicates): break if (dupe_count != expected_number_of_duplicates): - findings_table = driver.find_element(By.ID, 'open_findings') - print(findings_table.get_attribute('innerHTML')) + findings_table = driver.find_element(By.ID, "open_findings") + print(findings_table.get_attribute("innerHTML")) self.assertEqual(dupe_count, expected_number_of_duplicates) @@ -54,14 +54,14 @@ def check_nb_duplicates(self, expected_number_of_duplicates): def test_enable_deduplication(self): logger.debug("enabling deduplication...") driver = self.driver - driver.get(self.base_url + 'system_settings') - if not driver.find_element(By.ID, 'id_enable_deduplication').is_selected(): + driver.get(self.base_url + "system_settings") + if not driver.find_element(By.ID, "id_enable_deduplication").is_selected(): driver.find_element(By.XPATH, '//*[@id="id_enable_deduplication"]').click() # save settings driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() # check if it's enabled after reload - driver.get(self.base_url + 'system_settings') - self.assertTrue(driver.find_element(By.ID, 'id_enable_deduplication').is_selected()) + driver.get(self.base_url + "system_settings") + self.assertTrue(driver.find_element(By.ID, "id_enable_deduplication").is_selected()) @on_exception_html_source_logger def test_delete_findings(self): @@ -71,27 +71,27 @@ def test_delete_findings(self): if self.element_exists_by_id("no_findings"): text = driver.find_element(By.ID, "no_findings").text - if 'No findings found.' in text: + if "No findings found." in text: return driver.find_element(By.ID, "select_all").click() driver.find_element(By.CSS_SELECTOR, "i.fa-solid.fa-trash").click() try: WebDriverWait(driver, 1).until(EC.alert_is_present(), - 'Timed out waiting for finding delete ' - + 'confirmation popup to appear.') + "Timed out waiting for finding delete " + + "confirmation popup to appear.") driver.switch_to.alert.accept() except TimeoutException: - self.fail('Confirmation dialogue not shown, cannot delete previous findings') + self.fail("Confirmation dialogue not shown, cannot delete previous findings") logger.debug("page source when checking for no_findings element") logger.debug(self.driver.page_source) text = driver.find_element(By.ID, "no_findings").text self.assertIsNotNone(text) - self.assertTrue('No findings found.' in text) + self.assertTrue("No findings found." in text) # check that user was redirect back to url where it came from based on return_url - self.assertTrue(driver.current_url.endswith('page=1')) + self.assertTrue(driver.current_url.endswith("page=1")) # -------------------------------------------------------------------------------------------------------- # Same scanner deduplication - Deduplication on engagement @@ -109,7 +109,7 @@ def test_add_path_test_suite(self): driver.find_element(By.XPATH, '//*[@id="id_deduplication_on_engagement"]').click() driver.find_element(By.NAME, "_Add Tests").click() - self.assertTrue(self.is_success_message_present(text='Engagement added successfully.')) + self.assertTrue(self.is_success_message_present(text="Engagement added successfully.")) # Add the tests # Test 1 driver.find_element(By.ID, "id_title").send_keys("Path Test 1") @@ -117,14 +117,14 @@ def test_add_path_test_suite(self): Select(driver.find_element(By.ID, "id_environment")).select_by_visible_text("Development") driver.find_element(By.NAME, "_Add Another Test").click() - self.assertTrue(self.is_success_message_present(text='Test added successfully')) + self.assertTrue(self.is_success_message_present(text="Test added successfully")) # Test 2 driver.find_element(By.ID, "id_title").send_keys("Path Test 2") Select(driver.find_element(By.ID, "id_test_type")).select_by_visible_text("Bandit Scan") Select(driver.find_element(By.ID, "id_environment")).select_by_visible_text("Development") driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() - self.assertTrue(self.is_success_message_present(text='Test added successfully')) + self.assertTrue(self.is_success_message_present(text="Test added successfully")) @on_exception_html_source_logger def test_import_path_tests(self): @@ -141,11 +141,11 @@ def test_import_path_tests(self): driver.find_element(By.PARTIAL_LINK_TEXT, "Path Test 1").click() driver.find_element(By.ID, "dropdownMenu1").click() driver.find_element(By.LINK_TEXT, "Re-Upload Scan").click() - driver.find_element(By.ID, 'id_file').send_keys(self.relative_path + "/dedupe_scans/dedupe_path_1.json") + driver.find_element(By.ID, "id_file").send_keys(self.relative_path + "/dedupe_scans/dedupe_path_1.json") driver.find_elements(By.CSS_SELECTOR, "button.btn.btn-primary")[1].click() # 'Bandit Scan processed a total of 1 findings created 1 findings did not touch 1 findings.' - self.assertTrue(self.is_success_message_present(text='a total of 1 findings')) + self.assertTrue(self.is_success_message_present(text="a total of 1 findings")) # Second test # the second report have 2 findings (same vuln_id same file but different line number) @@ -155,11 +155,11 @@ def test_import_path_tests(self): driver.find_element(By.PARTIAL_LINK_TEXT, "Path Test 2").click() driver.find_element(By.ID, "dropdownMenu1").click() driver.find_element(By.LINK_TEXT, "Re-Upload Scan").click() - driver.find_element(By.ID, 'id_file').send_keys(self.relative_path + "/dedupe_scans/dedupe_path_2.json") + driver.find_element(By.ID, "id_file").send_keys(self.relative_path + "/dedupe_scans/dedupe_path_2.json") driver.find_elements(By.CSS_SELECTOR, "button.btn.btn-primary")[1].click() # 'Bandit Scan processed a total of 2 findings created 2 findings did not touch 1 findings.' - self.assertTrue(self.is_success_message_present(text='a total of 2 findings')) + self.assertTrue(self.is_success_message_present(text="a total of 2 findings")) @on_exception_html_source_logger def test_check_path_status(self): @@ -185,7 +185,7 @@ def test_add_endpoint_test_suite(self): driver.find_element(By.XPATH, '//*[@id="id_deduplication_on_engagement"]').click() driver.find_element(By.NAME, "_Add Tests").click() - self.assertTrue(self.is_success_message_present(text='Engagement added successfully.')) + self.assertTrue(self.is_success_message_present(text="Engagement added successfully.")) # Add the tests # Test 1 driver.find_element(By.ID, "id_title").send_keys("Endpoint Test 1") @@ -193,14 +193,14 @@ def test_add_endpoint_test_suite(self): Select(driver.find_element(By.ID, "id_environment")).select_by_visible_text("Development") driver.find_element(By.NAME, "_Add Another Test").click() - self.assertTrue(self.is_success_message_present(text='Test added successfully')) + self.assertTrue(self.is_success_message_present(text="Test added successfully")) # Test 2 driver.find_element(By.ID, "id_title").send_keys("Endpoint Test 2") Select(driver.find_element(By.ID, "id_test_type")).select_by_visible_text("Immuniweb Scan") Select(driver.find_element(By.ID, "id_environment")).select_by_visible_text("Development") driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() - self.assertTrue(self.is_success_message_present(text='Test added successfully')) + self.assertTrue(self.is_success_message_present(text="Test added successfully")) @on_exception_html_source_logger def test_import_endpoint_tests(self): @@ -213,10 +213,10 @@ def test_import_endpoint_tests(self): driver.find_element(By.PARTIAL_LINK_TEXT, "Endpoint Test 1").click() driver.find_element(By.ID, "dropdownMenu1").click() driver.find_element(By.LINK_TEXT, "Re-Upload Scan").click() - driver.find_element(By.ID, 'id_file').send_keys(self.relative_path + "/dedupe_scans/dedupe_endpoint_1.xml") + driver.find_element(By.ID, "id_file").send_keys(self.relative_path + "/dedupe_scans/dedupe_endpoint_1.xml") driver.find_elements(By.CSS_SELECTOR, "button.btn.btn-primary")[1].click() - self.assertTrue(self.is_success_message_present(text='a total of 3 findings')) + self.assertTrue(self.is_success_message_present(text="a total of 3 findings")) # Second test : Immuniweb Scan (dynamic) self.goto_active_engagements_overview(driver) @@ -224,10 +224,10 @@ def test_import_endpoint_tests(self): driver.find_element(By.PARTIAL_LINK_TEXT, "Endpoint Test 2").click() driver.find_element(By.ID, "dropdownMenu1").click() driver.find_element(By.LINK_TEXT, "Re-Upload Scan").click() - driver.find_element(By.ID, 'id_file').send_keys(self.relative_path + "/dedupe_scans/dedupe_endpoint_2.xml") + driver.find_element(By.ID, "id_file").send_keys(self.relative_path + "/dedupe_scans/dedupe_endpoint_2.xml") driver.find_elements(By.CSS_SELECTOR, "button.btn.btn-primary")[1].click() - self.assertTrue(self.is_success_message_present(text='a total of 3 findings')) + self.assertTrue(self.is_success_message_present(text="a total of 3 findings")) @on_exception_html_source_logger def test_check_endpoint_status(self): @@ -249,7 +249,7 @@ def test_add_same_eng_test_suite(self): driver.find_element(By.XPATH, '//*[@id="id_deduplication_on_engagement"]').click() driver.find_element(By.NAME, "_Add Tests").click() - self.assertTrue(self.is_success_message_present(text='Engagement added successfully.')) + self.assertTrue(self.is_success_message_present(text="Engagement added successfully.")) # Add the tests # Test 1 driver.find_element(By.ID, "id_title").send_keys("Same Eng Test 1") @@ -257,14 +257,14 @@ def test_add_same_eng_test_suite(self): Select(driver.find_element(By.ID, "id_environment")).select_by_visible_text("Development") driver.find_element(By.NAME, "_Add Another Test").click() - self.assertTrue(self.is_success_message_present(text='Test added successfully')) + self.assertTrue(self.is_success_message_present(text="Test added successfully")) # Test 2 driver.find_element(By.ID, "id_title").send_keys("Same Eng Test 2") Select(driver.find_element(By.ID, "id_test_type")).select_by_visible_text("Generic Findings Import") Select(driver.find_element(By.ID, "id_environment")).select_by_visible_text("Development") driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() - self.assertTrue(self.is_success_message_present(text='Test added successfully')) + self.assertTrue(self.is_success_message_present(text="Test added successfully")) @on_exception_html_source_logger def test_import_same_eng_tests(self): @@ -277,10 +277,10 @@ def test_import_same_eng_tests(self): driver.find_element(By.PARTIAL_LINK_TEXT, "Same Eng Test 1").click() driver.find_element(By.ID, "dropdownMenu1").click() driver.find_element(By.LINK_TEXT, "Re-Upload Scan").click() - driver.find_element(By.ID, 'id_file').send_keys(self.relative_path + "/dedupe_scans/dedupe_endpoint_1.xml") + driver.find_element(By.ID, "id_file").send_keys(self.relative_path + "/dedupe_scans/dedupe_endpoint_1.xml") driver.find_elements(By.CSS_SELECTOR, "button.btn.btn-primary")[1].click() - self.assertTrue(self.is_success_message_present(text='a total of 3 findings')) + self.assertTrue(self.is_success_message_present(text="a total of 3 findings")) # Second test : Generic Findings Import with Url (dynamic) self.goto_active_engagements_overview(driver) @@ -288,10 +288,10 @@ def test_import_same_eng_tests(self): driver.find_element(By.PARTIAL_LINK_TEXT, "Same Eng Test 2").click() driver.find_element(By.ID, "dropdownMenu1").click() driver.find_element(By.LINK_TEXT, "Re-Upload Scan").click() - driver.find_element(By.ID, 'id_file').send_keys(self.relative_path + "/dedupe_scans/dedupe_cross_1.csv") + driver.find_element(By.ID, "id_file").send_keys(self.relative_path + "/dedupe_scans/dedupe_cross_1.csv") driver.find_elements(By.CSS_SELECTOR, "button.btn.btn-primary")[1].click() - self.assertTrue(self.is_success_message_present(text='a total of 3 findings')) + self.assertTrue(self.is_success_message_present(text="a total of 3 findings")) @on_exception_html_source_logger def test_check_same_eng_status(self): @@ -319,7 +319,7 @@ def test_add_path_test_suite_checkmarx_scan(self): driver.find_element(By.XPATH, '//*[@id="id_deduplication_on_engagement"]').click() driver.find_element(By.NAME, "_Add Tests").click() - self.assertTrue(self.is_success_message_present(text='Engagement added successfully.')) + self.assertTrue(self.is_success_message_present(text="Engagement added successfully.")) # Add the tests # Test 1 driver.find_element(By.ID, "id_title").send_keys("Path Test 1") @@ -327,14 +327,14 @@ def test_add_path_test_suite_checkmarx_scan(self): Select(driver.find_element(By.ID, "id_environment")).select_by_visible_text("Development") driver.find_element(By.NAME, "_Add Another Test").click() - self.assertTrue(self.is_success_message_present(text='Test added successfully')) + self.assertTrue(self.is_success_message_present(text="Test added successfully")) # Test 2 driver.find_element(By.ID, "id_title").send_keys("Path Test 2") Select(driver.find_element(By.ID, "id_test_type")).select_by_visible_text("Checkmarx Scan") Select(driver.find_element(By.ID, "id_environment")).select_by_visible_text("Development") driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() - self.assertTrue(self.is_success_message_present(text='Test added successfully')) + self.assertTrue(self.is_success_message_present(text="Test added successfully")) def test_import_path_tests_checkmarx_scan(self): # First test @@ -346,10 +346,10 @@ def test_import_path_tests_checkmarx_scan(self): driver.find_element(By.ID, "dropdownMenu1").click() driver.find_element(By.LINK_TEXT, "Re-Upload Scan").click() # os.path.realpath makes the path canonical - driver.find_element(By.ID, 'id_file').send_keys(os.path.realpath(self.relative_path + "/dedupe_scans/multiple_findings.xml")) + driver.find_element(By.ID, "id_file").send_keys(os.path.realpath(self.relative_path + "/dedupe_scans/multiple_findings.xml")) driver.find_elements(By.CSS_SELECTOR, "button.btn.btn-primary")[1].click() - self.assertTrue(self.is_success_message_present(text='a total of 2 findings')) + self.assertTrue(self.is_success_message_present(text="a total of 2 findings")) # Second test self.goto_active_engagements_overview(driver) @@ -357,10 +357,10 @@ def test_import_path_tests_checkmarx_scan(self): driver.find_element(By.PARTIAL_LINK_TEXT, "Path Test 2").click() driver.find_element(By.ID, "dropdownMenu1").click() driver.find_element(By.LINK_TEXT, "Re-Upload Scan").click() - driver.find_element(By.ID, 'id_file').send_keys(os.path.realpath(self.relative_path + "/dedupe_scans/multiple_findings_line_changed.xml")) + driver.find_element(By.ID, "id_file").send_keys(os.path.realpath(self.relative_path + "/dedupe_scans/multiple_findings_line_changed.xml")) driver.find_elements(By.CSS_SELECTOR, "button.btn.btn-primary")[1].click() - self.assertTrue(self.is_success_message_present(text='a total of 2 findings')) + self.assertTrue(self.is_success_message_present(text="a total of 2 findings")) def test_check_path_status_checkmarx_scan(self): # After aggregation, it's only two findings. Both are duplicates even though the line number has changed @@ -384,14 +384,14 @@ def test_add_cross_test_suite(self): # driver.find_element(By.XPATH, '//*[@id="id_deduplication_on_engagement"]').click() driver.find_element(By.NAME, "_Add Tests").click() - self.assertTrue(self.is_success_message_present(text='Engagement added successfully.')) + self.assertTrue(self.is_success_message_present(text="Engagement added successfully.")) # Test driver.find_element(By.ID, "id_title").send_keys("Generic Test") Select(driver.find_element(By.ID, "id_test_type")).select_by_visible_text("Generic Findings Import") Select(driver.find_element(By.ID, "id_environment")).select_by_visible_text("Development") driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() - self.assertTrue(self.is_success_message_present(text='Test added successfully')) + self.assertTrue(self.is_success_message_present(text="Test added successfully")) # Create immuniweb engagement self.goto_product_overview(driver) @@ -401,14 +401,14 @@ def test_add_cross_test_suite(self): # driver.find_element(By.XPATH, '//*[@id="id_deduplication_on_engagement"]').click() driver.find_element(By.NAME, "_Add Tests").click() - self.assertTrue(self.is_success_message_present(text='Engagement added successfully.')) + self.assertTrue(self.is_success_message_present(text="Engagement added successfully.")) # Test driver.find_element(By.ID, "id_title").send_keys("Immuniweb Test") Select(driver.find_element(By.ID, "id_test_type")).select_by_visible_text("Immuniweb Scan") Select(driver.find_element(By.ID, "id_environment")).select_by_visible_text("Development") driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() - self.assertTrue(self.is_success_message_present(text='Test added successfully')) + self.assertTrue(self.is_success_message_present(text="Test added successfully")) def test_import_cross_test(self): logger.debug("Importing findings...") @@ -420,10 +420,10 @@ def test_import_cross_test(self): driver.find_element(By.PARTIAL_LINK_TEXT, "Immuniweb Test").click() driver.find_element(By.CSS_SELECTOR, "i.fa-solid.fa-ellipsis-vertical").click() driver.find_element(By.LINK_TEXT, "Re-Upload Scan Results").click() - driver.find_element(By.ID, 'id_file').send_keys(self.relative_path + "/dedupe_scans/dedupe_endpoint_1.xml") + driver.find_element(By.ID, "id_file").send_keys(self.relative_path + "/dedupe_scans/dedupe_endpoint_1.xml") driver.find_elements(By.CSS_SELECTOR, "button.btn.btn-primary")[1].click() - self.assertTrue(self.is_success_message_present(text='a total of 3 findings')) + self.assertTrue(self.is_success_message_present(text="a total of 3 findings")) # Second test : generic scan with url (dynamic) self.goto_active_engagements_overview(driver) @@ -431,10 +431,10 @@ def test_import_cross_test(self): driver.find_element(By.PARTIAL_LINK_TEXT, "Generic Test").click() driver.find_element(By.CSS_SELECTOR, "i.fa-solid.fa-ellipsis-vertical").click() driver.find_element(By.LINK_TEXT, "Re-Upload Scan Results").click() - driver.find_element(By.ID, 'id_file').send_keys(self.relative_path + "/dedupe_scans/dedupe_cross_1.csv") + driver.find_element(By.ID, "id_file").send_keys(self.relative_path + "/dedupe_scans/dedupe_cross_1.csv") driver.find_elements(By.CSS_SELECTOR, "button.btn.btn-primary")[1].click() - self.assertTrue(self.is_success_message_present(text='a total of 3 findings')) + self.assertTrue(self.is_success_message_present(text="a total of 3 findings")) def test_check_cross_status(self): self.check_nb_duplicates(1) @@ -453,10 +453,10 @@ def test_import_no_service(self): driver.find_element(By.PARTIAL_LINK_TEXT, "Path Test 1").click() driver.find_element(By.ID, "dropdownMenu1").click() driver.find_element(By.LINK_TEXT, "Re-Upload Scan").click() - driver.find_element(By.ID, 'id_file').send_keys(os.path.realpath(self.relative_path + "/dedupe_scans/multiple_findings.xml")) + driver.find_element(By.ID, "id_file").send_keys(os.path.realpath(self.relative_path + "/dedupe_scans/multiple_findings.xml")) driver.find_elements(By.CSS_SELECTOR, "button.btn.btn-primary")[1].click() - self.assertTrue(self.is_success_message_present(text='Checkmarx Scan processed a total of 2 findings created 2 findings.')) + self.assertTrue(self.is_success_message_present(text="Checkmarx Scan processed a total of 2 findings created 2 findings.")) # Import the same findings a second time - they should all be duplicates self.goto_active_engagements_overview(driver) @@ -464,10 +464,10 @@ def test_import_no_service(self): driver.find_element(By.PARTIAL_LINK_TEXT, "Path Test 2").click() driver.find_element(By.ID, "dropdownMenu1").click() driver.find_element(By.LINK_TEXT, "Re-Upload Scan").click() - driver.find_element(By.ID, 'id_file').send_keys(os.path.realpath(self.relative_path + "/dedupe_scans/multiple_findings.xml")) + driver.find_element(By.ID, "id_file").send_keys(os.path.realpath(self.relative_path + "/dedupe_scans/multiple_findings.xml")) driver.find_elements(By.CSS_SELECTOR, "button.btn.btn-primary")[1].click() - self.assertTrue(self.is_success_message_present(text='Checkmarx Scan processed a total of 2 findings created 2 findings.')) + self.assertTrue(self.is_success_message_present(text="Checkmarx Scan processed a total of 2 findings created 2 findings.")) def test_check_no_service(self): # Since we imported the same report twice, we should have 2 duplicates @@ -484,11 +484,11 @@ def test_import_service(self): driver.find_element(By.PARTIAL_LINK_TEXT, "Path Test 1").click() driver.find_element(By.ID, "dropdownMenu1").click() driver.find_element(By.LINK_TEXT, "Re-Upload Scan").click() - driver.find_element(By.ID, 'id_service').send_keys("service_1") - driver.find_element(By.ID, 'id_file').send_keys(os.path.realpath(self.relative_path + "/dedupe_scans/multiple_findings.xml")) + driver.find_element(By.ID, "id_service").send_keys("service_1") + driver.find_element(By.ID, "id_file").send_keys(os.path.realpath(self.relative_path + "/dedupe_scans/multiple_findings.xml")) driver.find_elements(By.CSS_SELECTOR, "button.btn.btn-primary")[1].click() - self.assertTrue(self.is_success_message_present(text='Checkmarx Scan processed a total of 2 findings created 2 findings.')) + self.assertTrue(self.is_success_message_present(text="Checkmarx Scan processed a total of 2 findings created 2 findings.")) # Import the same findings a second time with a different service - they should all be new findings self.goto_active_engagements_overview(driver) @@ -496,11 +496,11 @@ def test_import_service(self): driver.find_element(By.PARTIAL_LINK_TEXT, "Path Test 2").click() driver.find_element(By.ID, "dropdownMenu1").click() driver.find_element(By.LINK_TEXT, "Re-Upload Scan").click() - driver.find_element(By.ID, 'id_service').send_keys("service_2") - driver.find_element(By.ID, 'id_file').send_keys(os.path.realpath(self.relative_path + "/dedupe_scans/multiple_findings.xml")) + driver.find_element(By.ID, "id_service").send_keys("service_2") + driver.find_element(By.ID, "id_file").send_keys(os.path.realpath(self.relative_path + "/dedupe_scans/multiple_findings.xml")) driver.find_elements(By.CSS_SELECTOR, "button.btn.btn-primary")[1].click() - self.assertTrue(self.is_success_message_present(text='Checkmarx Scan processed a total of 2 findings created 2 findings.')) + self.assertTrue(self.is_success_message_present(text="Checkmarx Scan processed a total of 2 findings created 2 findings.")) def test_check_service(self): # Since we imported the same report twice but with different service names, we should have no duplicates @@ -508,58 +508,58 @@ def test_check_service(self): def add_dedupe_tests_to_suite(suite, jira=False, github=False, block_execution=False): - suite.addTest(BaseTestCase('test_login')) + suite.addTest(BaseTestCase("test_login")) set_suite_settings(suite, jira=jira, github=github, block_execution=block_execution) if jira: - suite.addTest(BaseTestCase('enable_jira')) + suite.addTest(BaseTestCase("enable_jira")) else: - suite.addTest(BaseTestCase('disable_jira')) + suite.addTest(BaseTestCase("disable_jira")) if github: - suite.addTest(BaseTestCase('enable_github')) + suite.addTest(BaseTestCase("enable_github")) else: - suite.addTest(BaseTestCase('disable_github')) + suite.addTest(BaseTestCase("disable_github")) if block_execution: - suite.addTest(BaseTestCase('enable_block_execution')) + suite.addTest(BaseTestCase("enable_block_execution")) else: - suite.addTest(BaseTestCase('disable_block_execution')) + suite.addTest(BaseTestCase("disable_block_execution")) - suite.addTest(ProductTest('test_create_product')) - suite.addTest(DedupeTest('test_enable_deduplication')) + suite.addTest(ProductTest("test_create_product")) + suite.addTest(DedupeTest("test_enable_deduplication")) # Test same scanners - same engagement - static - dedupe - suite.addTest(DedupeTest('test_delete_findings')) - suite.addTest(DedupeTest('test_add_path_test_suite')) - suite.addTest(DedupeTest('test_import_path_tests')) - suite.addTest(DedupeTest('test_check_path_status')) + suite.addTest(DedupeTest("test_delete_findings")) + suite.addTest(DedupeTest("test_add_path_test_suite")) + suite.addTest(DedupeTest("test_import_path_tests")) + suite.addTest(DedupeTest("test_check_path_status")) # Test same scanners - same engagement - dynamic - dedupe - suite.addTest(DedupeTest('test_delete_findings')) - suite.addTest(DedupeTest('test_add_endpoint_test_suite')) - suite.addTest(DedupeTest('test_import_endpoint_tests')) - suite.addTest(DedupeTest('test_check_endpoint_status')) + suite.addTest(DedupeTest("test_delete_findings")) + suite.addTest(DedupeTest("test_add_endpoint_test_suite")) + suite.addTest(DedupeTest("test_import_endpoint_tests")) + suite.addTest(DedupeTest("test_check_endpoint_status")) # Test different scanners - same engagement - dynamic - dedupe - suite.addTest(DedupeTest('test_delete_findings')) - suite.addTest(DedupeTest('test_add_same_eng_test_suite')) - suite.addTest(DedupeTest('test_import_same_eng_tests')) - suite.addTest(DedupeTest('test_check_same_eng_status')) + suite.addTest(DedupeTest("test_delete_findings")) + suite.addTest(DedupeTest("test_add_same_eng_test_suite")) + suite.addTest(DedupeTest("test_import_same_eng_tests")) + suite.addTest(DedupeTest("test_check_same_eng_status")) # Test same scanners - same engagement - static - dedupe with custom hash_code - suite.addTest(DedupeTest('test_delete_findings')) - suite.addTest(DedupeTest('test_add_path_test_suite_checkmarx_scan')) - suite.addTest(DedupeTest('test_import_path_tests_checkmarx_scan')) - suite.addTest(DedupeTest('test_check_path_status_checkmarx_scan')) + suite.addTest(DedupeTest("test_delete_findings")) + suite.addTest(DedupeTest("test_add_path_test_suite_checkmarx_scan")) + suite.addTest(DedupeTest("test_import_path_tests_checkmarx_scan")) + suite.addTest(DedupeTest("test_check_path_status_checkmarx_scan")) # Test different scanners - different engagement - dynamic - dedupe - suite.addTest(DedupeTest('test_delete_findings')) - suite.addTest(DedupeTest('test_add_cross_test_suite')) - suite.addTest(DedupeTest('test_import_cross_test')) - suite.addTest(DedupeTest('test_check_cross_status')) + suite.addTest(DedupeTest("test_delete_findings")) + suite.addTest(DedupeTest("test_add_cross_test_suite")) + suite.addTest(DedupeTest("test_import_cross_test")) + suite.addTest(DedupeTest("test_check_cross_status")) # Test deduplication with and without service in findings - suite.addTest(DedupeTest('test_delete_findings')) - suite.addTest(DedupeTest('test_import_no_service')) - suite.addTest(DedupeTest('test_check_no_service')) - suite.addTest(DedupeTest('test_delete_findings')) - suite.addTest(DedupeTest('test_import_service')) - suite.addTest(DedupeTest('test_check_service')) + suite.addTest(DedupeTest("test_delete_findings")) + suite.addTest(DedupeTest("test_import_no_service")) + suite.addTest(DedupeTest("test_check_no_service")) + suite.addTest(DedupeTest("test_delete_findings")) + suite.addTest(DedupeTest("test_import_service")) + suite.addTest(DedupeTest("test_check_service")) # Clean up - suite.addTest(ProductTest('test_delete_product')) + suite.addTest(ProductTest("test_delete_product")) return suite diff --git a/tests/endpoint_test.py b/tests/endpoint_test.py index 7306e309ea..87f0771c5a 100644 --- a/tests/endpoint_test.py +++ b/tests/endpoint_test.py @@ -30,7 +30,7 @@ def test_create_endpoint(self): # Query the site to determine if the finding has been added # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Endpoint added successfully')) + self.assertTrue(self.is_success_message_present(text="Endpoint added successfully")) def test_edit_endpoint(self): # Login to the site. Password will have to be modified @@ -56,7 +56,7 @@ def test_edit_endpoint(self): # Query the site to determine if the product has been added # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Endpoint updated successfully')) + self.assertTrue(self.is_success_message_present(text="Endpoint updated successfully")) def test_delete_endpoint(self): # Login to the site. Password will have to be modified @@ -75,20 +75,20 @@ def test_delete_endpoint(self): # Query the site to determine if the product has been added # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Endpoint and relationships removed.')) + self.assertTrue(self.is_success_message_present(text="Endpoint and relationships removed.")) def suite(): suite = unittest.TestSuite() # Add each test the the suite to be run # success and failure is output by the test - suite.addTest(BaseTestCase('test_login')) - suite.addTest(BaseTestCase('disable_block_execution')) - suite.addTest(ProductTest('test_create_product')) - suite.addTest(EndpointTest('test_create_endpoint')) - suite.addTest(EndpointTest('test_edit_endpoint')) - suite.addTest(EndpointTest('test_delete_endpoint')) - suite.addTest(ProductTest('test_delete_product')) + suite.addTest(BaseTestCase("test_login")) + suite.addTest(BaseTestCase("disable_block_execution")) + suite.addTest(ProductTest("test_create_product")) + suite.addTest(EndpointTest("test_create_endpoint")) + suite.addTest(EndpointTest("test_edit_endpoint")) + suite.addTest(EndpointTest("test_delete_endpoint")) + suite.addTest(ProductTest("test_delete_product")) return suite diff --git a/tests/engagement_test.py b/tests/engagement_test.py index 6dde9962be..11ffbe12b2 100644 --- a/tests/engagement_test.py +++ b/tests/engagement_test.py @@ -32,11 +32,11 @@ def test_add_new_engagement(self): driver.find_element(By.ID, "id_name").send_keys("test engagement") driver.find_element(By.ID, "id_name").send_keys("\tthis is engagement test.") driver.find_element(By.ID, "id_test_strategy").clear() - driver.find_element(By.ID, 'id_test_strategy').send_keys("http://localhost:5000") + driver.find_element(By.ID, "id_test_strategy").send_keys("http://localhost:5000") Select(driver.find_element(By.ID, "id_status")).select_by_visible_text("In Progress") driver.find_element(By.CSS_SELECTOR, "input[value='Done']").click() - self.assertTrue(self.is_success_message_present(text='Engagement added successfully.')) + self.assertTrue(self.is_success_message_present(text="Engagement added successfully.")) def test_edit_created_new_engagement(self): driver = self.driver @@ -51,7 +51,7 @@ def test_edit_created_new_engagement(self): Select(driver.find_element(By.ID, "id_status")).select_by_visible_text("In Progress") driver.find_element(By.CSS_SELECTOR, "input[value='Done']").click() - self.assertTrue(self.is_success_message_present(text='Engagement updated successfully.')) + self.assertTrue(self.is_success_message_present(text="Engagement updated successfully.")) def test_close_new_engagement(self): driver = self.driver @@ -62,53 +62,53 @@ def test_close_new_engagement(self): driver.find_element(By.ID, "dropdownMenu1").click() driver.find_element(By.LINK_TEXT, "Close Engagement").click() - self.assertTrue(self.is_success_message_present(text='Engagement closed successfully.')) + self.assertTrue(self.is_success_message_present(text="Engagement closed successfully.")) def test_delete_new_closed_engagement(self): driver = self.driver self.goto_product_overview(driver) driver.find_element(By.CSS_SELECTOR, ".dropdown-toggle.pull-left").click() - driver.find_element(By.LINK_TEXT, 'View Engagements').click() + driver.find_element(By.LINK_TEXT, "View Engagements").click() self.wait_for_datatable_if_content("no_active_engagements", "open_wrapper") driver.find_element(By.LINK_TEXT, "edited test engagement").click() driver.find_element(By.ID, "dropdownMenu1").click() - driver.find_element(By.LINK_TEXT, 'Delete Engagement').click() - driver.find_element(By.NAME, 'delete_name').click() + driver.find_element(By.LINK_TEXT, "Delete Engagement").click() + driver.find_element(By.NAME, "delete_name").click() - self.assertTrue(self.is_success_message_present(text='Engagement and relationships removed.')) + self.assertTrue(self.is_success_message_present(text="Engagement and relationships removed.")) def test_new_ci_cd_engagement(self): driver = self.driver self.goto_product_overview(driver) # wait for product_wrapper div as datatables javascript modifies the DOM on page load. - driver.find_element(By.ID, 'products_wrapper') - driver.find_element(By.LINK_TEXT, 'QA Test').click() + driver.find_element(By.ID, "products_wrapper") + driver.find_element(By.LINK_TEXT, "QA Test").click() driver.find_element(By.XPATH, "//a[@class='dropdown-toggle active']//span[@class='hidden-xs']").click() - driver.find_element(By.LINK_TEXT, 'Add New CI/CD Engagement').click() + driver.find_element(By.LINK_TEXT, "Add New CI/CD Engagement").click() driver.find_element(By.ID, "id_name").send_keys("test new ci/cd engagement") driver.find_element(By.ID, "id_name").send_keys("\ttest new ci/cd engagement") - driver.find_element(By.ID, 'id_deduplication_on_engagement').get_attribute('checked') + driver.find_element(By.ID, "id_deduplication_on_engagement").get_attribute("checked") driver.find_element(By.CSS_SELECTOR, "input[value='Done']").click() - self.assertTrue(self.is_success_message_present(text='Engagement added successfully.')) + self.assertTrue(self.is_success_message_present(text="Engagement added successfully.")) def suite(): suite = unittest.TestSuite() - suite.addTest(BaseTestCase('test_login')) - suite.addTest(BaseTestCase('disable_block_execution')) - suite.addTest(ProductTest('test_create_product')) - suite.addTest(EngagementTest('test_add_new_engagement')) - suite.addTest(EngagementTest('test_edit_created_new_engagement')) - suite.addTest(EngagementTest('test_list_active_engagements_found')) - suite.addTest(EngagementTest('test_close_new_engagement')) - suite.addTest(EngagementTest('test_list_active_engagements_empty')) - suite.addTest(EngagementTest('test_list_all_engagements_by_product')) - suite.addTest(EngagementTest('test_delete_new_closed_engagement')) - suite.addTest(EngagementTest('test_new_ci_cd_engagement')) - suite.addTest(ProductTest('test_delete_product')) + suite.addTest(BaseTestCase("test_login")) + suite.addTest(BaseTestCase("disable_block_execution")) + suite.addTest(ProductTest("test_create_product")) + suite.addTest(EngagementTest("test_add_new_engagement")) + suite.addTest(EngagementTest("test_edit_created_new_engagement")) + suite.addTest(EngagementTest("test_list_active_engagements_found")) + suite.addTest(EngagementTest("test_close_new_engagement")) + suite.addTest(EngagementTest("test_list_active_engagements_empty")) + suite.addTest(EngagementTest("test_list_all_engagements_by_product")) + suite.addTest(EngagementTest("test_delete_new_closed_engagement")) + suite.addTest(EngagementTest("test_new_ci_cd_engagement")) + suite.addTest(ProductTest("test_delete_product")) return suite diff --git a/tests/environment_test.py b/tests/environment_test.py index 7b496ccc41..65fe9f51e8 100644 --- a/tests/environment_test.py +++ b/tests/environment_test.py @@ -12,9 +12,9 @@ def login_page(self): driver = self.driver driver.get(self.base_url + "login") driver.find_element(By.ID, "id_username").clear() - driver.find_element(By.ID, "id_username").send_keys(os.environ['DD_ADMIN_USER']) + driver.find_element(By.ID, "id_username").send_keys(os.environ["DD_ADMIN_USER"]) driver.find_element(By.ID, "id_password").clear() - driver.find_element(By.ID, "id_password").send_keys(os.environ['DD_ADMIN_PASSWORD']) + driver.find_element(By.ID, "id_password").send_keys(os.environ["DD_ADMIN_PASSWORD"]) driver.find_element(By.CSS_SELECTOR, "button.btn.btn-success").click() return driver @@ -27,7 +27,7 @@ def test_create_environment(self): driver.find_element(By.ID, "id_name").send_keys("environment test") driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() - self.assertTrue(self.is_success_message_present(text='Environment added successfully.')) + self.assertTrue(self.is_success_message_present(text="Environment added successfully.")) def test_edit_environment(self): driver = self.driver @@ -37,7 +37,7 @@ def test_edit_environment(self): driver.find_element(By.ID, "id_name").send_keys("Edited environment test") driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() - self.assertTrue(self.is_success_message_present(text='Environment updated successfully.')) + self.assertTrue(self.is_success_message_present(text="Environment updated successfully.")) def test_delete_environment(self): driver = self.driver @@ -45,16 +45,16 @@ def test_delete_environment(self): driver.find_element(By.LINK_TEXT, "Edited environment test").click() driver.find_element(By.CSS_SELECTOR, "input.btn.btn-danger").click() - self.assertTrue(self.is_success_message_present(text='Environment deleted successfully.')) + self.assertTrue(self.is_success_message_present(text="Environment deleted successfully.")) def suite(): suite = unittest.TestSuite() - suite.addTest(BaseTestCase('test_login')) - suite.addTest(BaseTestCase('disable_block_execution')) - suite.addTest(EnvironmentTest('test_create_environment')) - suite.addTest(EnvironmentTest('test_edit_environment')) - suite.addTest(EnvironmentTest('test_delete_environment')) + suite.addTest(BaseTestCase("test_login")) + suite.addTest(BaseTestCase("disable_block_execution")) + suite.addTest(EnvironmentTest("test_create_environment")) + suite.addTest(EnvironmentTest("test_edit_environment")) + suite.addTest(EnvironmentTest("test_delete_environment")) return suite diff --git a/tests/false_positive_history_test.py b/tests/false_positive_history_test.py index d330ffb194..05cadb244d 100644 --- a/tests/false_positive_history_test.py +++ b/tests/false_positive_history_test.py @@ -15,7 +15,7 @@ def create_finding(self, product_name, engagement_name, test_name, finding_name) # Navigate to the Product page to select the product we created earlier self.goto_product_overview(driver) # wait for product_wrapper div as datatables javascript modifies the DOM on page load. - driver.find_element(By.ID, 'products_wrapper') + driver.find_element(By.ID, "products_wrapper") # Select and click on the particular product to create finding for driver.find_element(By.LINK_TEXT, product_name).click() # Click on the 'Engagement' Dropdown button @@ -55,14 +55,14 @@ def create_finding(self, product_name, engagement_name, test_name, finding_name) def assert_is_active(self, finding_url): driver = self.driver driver.get(finding_url) - self.assertTrue(self.is_element_by_css_selector_present(selector='#notes', text='Active')) - self.assertFalse(self.is_element_by_css_selector_present(selector='#notes', text='False Positive')) + self.assertTrue(self.is_element_by_css_selector_present(selector="#notes", text="Active")) + self.assertFalse(self.is_element_by_css_selector_present(selector="#notes", text="False Positive")) def assert_is_false_positive(self, finding_url): driver = self.driver driver.get(finding_url) - self.assertFalse(self.is_element_by_css_selector_present(selector='#notes', text='Active')) - self.assertTrue(self.is_element_by_css_selector_present(selector='#notes', text='False Positive')) + self.assertFalse(self.is_element_by_css_selector_present(selector="#notes", text="Active")) + self.assertTrue(self.is_element_by_css_selector_present(selector="#notes", text="False Positive")) def edit_toggle_false_positive(self, finding_url): driver = self.driver @@ -99,16 +99,16 @@ def bulk_edit(self, finding_url, status_id): def test_retroactive_edit_finding(self): # Create two equal findings on different engagements finding_1 = self.create_finding( - product_name='QA Test', - engagement_name='FP History Eng 1', - test_name='FP History Test', - finding_name='Fake Vulnerability for Edit Test', + product_name="QA Test", + engagement_name="FP History Eng 1", + test_name="FP History Test", + finding_name="Fake Vulnerability for Edit Test", ) finding_2 = self.create_finding( - product_name='QA Test', - engagement_name='FP History Eng 2', - test_name='FP History Test', - finding_name='Fake Vulnerability for Edit Test', + product_name="QA Test", + engagement_name="FP History Eng 2", + test_name="FP History Test", + finding_name="Fake Vulnerability for Edit Test", ) # Assert that both findings are active self.assert_is_active(finding_1) @@ -127,27 +127,27 @@ def test_retroactive_edit_finding(self): def test_retroactive_bulk_edit_finding(self): # Create two equal findings on different engagements finding_1 = self.create_finding( - product_name='QA Test', - engagement_name='FP History Eng 1', - test_name='FP History Test', - finding_name='Fake Vulnerability for Bulk Edit Test', + product_name="QA Test", + engagement_name="FP History Eng 1", + test_name="FP History Test", + finding_name="Fake Vulnerability for Bulk Edit Test", ) finding_2 = self.create_finding( - product_name='QA Test', - engagement_name='FP History Eng 2', - test_name='FP History Test', - finding_name='Fake Vulnerability for Bulk Edit Test', + product_name="QA Test", + engagement_name="FP History Eng 2", + test_name="FP History Test", + finding_name="Fake Vulnerability for Bulk Edit Test", ) # Assert that both findings are active self.assert_is_active(finding_1) self.assert_is_active(finding_2) # Bulk edit first finding to be a false positive - self.bulk_edit(finding_1, status_id='id_bulk_false_p') + self.bulk_edit(finding_1, status_id="id_bulk_false_p") # Assert that both findings are false positives self.assert_is_false_positive(finding_1) self.assert_is_false_positive(finding_2) # Reactivate second finding - self.bulk_edit(finding_2, status_id='id_bulk_active') + self.bulk_edit(finding_2, status_id="id_bulk_active") # Assert that both findings are active again self.assert_is_active(finding_1) self.assert_is_active(finding_2) @@ -155,18 +155,18 @@ def test_retroactive_bulk_edit_finding(self): def suite(): suite = unittest.TestSuite() - suite.addTest(BaseTestCase('test_login')) - suite.addTest(BaseTestCase('enable_block_execution')) - suite.addTest(BaseTestCase('disable_deduplication')) - suite.addTest(BaseTestCase('enable_false_positive_history')) - suite.addTest(BaseTestCase('enable_retroactive_false_positive_history')) + suite.addTest(BaseTestCase("test_login")) + suite.addTest(BaseTestCase("enable_block_execution")) + suite.addTest(BaseTestCase("disable_deduplication")) + suite.addTest(BaseTestCase("enable_false_positive_history")) + suite.addTest(BaseTestCase("enable_retroactive_false_positive_history")) # Add each test the the suite to be run # success and failure is output by the test - suite.addTest(ProductTest('test_create_product')) - suite.addTest(FalsePositiveHistoryTest('test_retroactive_edit_finding')) - suite.addTest(ProductTest('test_create_product')) - suite.addTest(FalsePositiveHistoryTest('test_retroactive_bulk_edit_finding')) - suite.addTest(ProductTest('test_delete_product')) + suite.addTest(ProductTest("test_create_product")) + suite.addTest(FalsePositiveHistoryTest("test_retroactive_edit_finding")) + suite.addTest(ProductTest("test_create_product")) + suite.addTest(FalsePositiveHistoryTest("test_retroactive_bulk_edit_finding")) + suite.addTest(ProductTest("test_delete_product")) return suite diff --git a/tests/file_test.py b/tests/file_test.py index dce9d43894..5e3294adf4 100644 --- a/tests/file_test.py +++ b/tests/file_test.py @@ -35,15 +35,15 @@ def test_add_file_finding_level(self): driver.find_element(By.LINK_TEXT, "Manage Files").click() # select first file input field: form-0-image # Set full image path for image file 'strange.png - image_path = os.path.join(dir_path, 'finding_image.png') - driver.find_element(By.ID, "id_form-0-title").send_keys('Finding Title') + image_path = os.path.join(dir_path, "finding_image.png") + driver.find_element(By.ID, "id_form-0-title").send_keys("Finding Title") driver.find_element(By.ID, "id_form-0-file").send_keys(image_path) # Save uploaded image with WaitForPageLoad(driver, timeout=50): driver.find_element(By.CSS_SELECTOR, "button.btn.btn-success").click() # Query the site to determine if the finding has been added # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Files updated successfully')) + self.assertTrue(self.is_success_message_present(text="Files updated successfully")) def test_delete_file_finding_level(self): # login to site, password set to fetch from environ @@ -62,7 +62,7 @@ def test_delete_file_finding_level(self): driver.find_element(By.CSS_SELECTOR, "button.btn.btn-success").click() # Query the site to determine if the finding has been added # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Files updated successfully')) + self.assertTrue(self.is_success_message_present(text="Files updated successfully")) def test_add_file_test_level(self): # View existing test from ProductTest() @@ -76,15 +76,15 @@ def test_add_file_test_level(self): driver.find_element(By.NAME, "Manage Files").click() # select first file input field: form-0-image # Set full image path for image file 'strange.png - image_path = os.path.join(dir_path, 'finding_image.png') - driver.find_element(By.ID, "id_form-0-title").send_keys('Test Title') + image_path = os.path.join(dir_path, "finding_image.png") + driver.find_element(By.ID, "id_form-0-title").send_keys("Test Title") driver.find_element(By.ID, "id_form-0-file").send_keys(image_path) # Save uploaded image with WaitForPageLoad(driver, timeout=50): driver.find_element(By.CSS_SELECTOR, "button.btn.btn-success").click() # Query the site to determine if the finding has been added # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Files updated successfully')) + self.assertTrue(self.is_success_message_present(text="Files updated successfully")) def test_delete_file_test_level(self): # View existing test from ProductTest() @@ -102,7 +102,7 @@ def test_delete_file_test_level(self): driver.find_element(By.CSS_SELECTOR, "button.btn.btn-success").click() # Query the site to determine if the finding has been added # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Files updated successfully')) + self.assertTrue(self.is_success_message_present(text="Files updated successfully")) def test_add_file_engagement_level(self): # View existing test from ProductTest() @@ -116,15 +116,15 @@ def test_add_file_engagement_level(self): driver.find_element(By.NAME, "Manage Files").click() # select first file input field: form-0-image # Set full image path for image file 'strange.png - image_path = os.path.join(dir_path, 'finding_image.png') - driver.find_element(By.ID, "id_form-0-title").send_keys('Engagement Title') + image_path = os.path.join(dir_path, "finding_image.png") + driver.find_element(By.ID, "id_form-0-title").send_keys("Engagement Title") driver.find_element(By.ID, "id_form-0-file").send_keys(image_path) # Save uploaded image with WaitForPageLoad(driver, timeout=50): driver.find_element(By.CSS_SELECTOR, "button.btn.btn-success").click() # Query the site to determine if the finding has been added # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Files updated successfully')) + self.assertTrue(self.is_success_message_present(text="Files updated successfully")) def test_delete_file_engagement_level(self): # View existing test from ProductTest() @@ -142,22 +142,22 @@ def test_delete_file_engagement_level(self): driver.find_element(By.CSS_SELECTOR, "button.btn.btn-success").click() # Query the site to determine if the finding has been added # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Files updated successfully')) + self.assertTrue(self.is_success_message_present(text="Files updated successfully")) def add_file_tests_to_suite(suite): # Add each test the the suite to be run # success and failure is output by the test - suite.addTest(BaseTestCase('test_login')) - suite.addTest(ProductTest('test_create_product')) - suite.addTest(ProductTest('test_add_product_finding')) - suite.addTest(FileUploadTest('test_add_file_finding_level')) - suite.addTest(FileUploadTest('test_delete_file_finding_level')) - suite.addTest(FileUploadTest('test_add_file_test_level')) - suite.addTest(FileUploadTest('test_delete_file_test_level')) - suite.addTest(FileUploadTest('test_add_file_engagement_level')) - suite.addTest(FileUploadTest('test_delete_file_engagement_level')) - suite.addTest(ProductTest('test_delete_product')) + suite.addTest(BaseTestCase("test_login")) + suite.addTest(ProductTest("test_create_product")) + suite.addTest(ProductTest("test_add_product_finding")) + suite.addTest(FileUploadTest("test_add_file_finding_level")) + suite.addTest(FileUploadTest("test_delete_file_finding_level")) + suite.addTest(FileUploadTest("test_add_file_test_level")) + suite.addTest(FileUploadTest("test_delete_file_test_level")) + suite.addTest(FileUploadTest("test_add_file_engagement_level")) + suite.addTest(FileUploadTest("test_delete_file_engagement_level")) + suite.addTest(ProductTest("test_delete_product")) return suite diff --git a/tests/finding_test.py b/tests/finding_test.py index 7643e013ff..ac2794dbc8 100644 --- a/tests/finding_test.py +++ b/tests/finding_test.py @@ -18,16 +18,16 @@ class FindingTest(BaseTestCase): def test_list_findings_all(self): - return self.test_list_findings('finding') + return self.test_list_findings("finding") def test_list_findings_closed(self): - return self.test_list_findings('finding/closed') + return self.test_list_findings("finding/closed") def test_list_findings_accepted(self): - return self.test_list_findings('finding/accepted') + return self.test_list_findings("finding/accepted") def test_list_findings_open(self): - return self.test_list_findings('finding/open') + return self.test_list_findings("finding/open") def test_list_findings(self, suffix): # bulk edit dropdown menu @@ -71,7 +71,7 @@ def check_file(self, file_name): if Path(file_name).is_file(): file_found = True break - self.assertTrue(file_found, f'Cannot find {file_name}') + self.assertTrue(file_found, f"Cannot find {file_name}") os.remove(file_name) def test_csv_export(self): @@ -83,7 +83,7 @@ def test_csv_export(self): time.sleep(5) - self.check_file(f'{self.export_path}/findings.csv') + self.check_file(f"{self.export_path}/findings.csv") def test_excel_export(self): driver = self.driver @@ -94,7 +94,7 @@ def test_excel_export(self): time.sleep(5) - self.check_file(f'{self.export_path}/findings.xlsx') + self.check_file(f"{self.export_path}/findings.xlsx") @on_exception_html_source_logger def test_edit_finding(self): @@ -122,12 +122,12 @@ def test_edit_finding(self): # Query the site to determine if the finding has been added # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Finding saved successfully')) - self.assertTrue(self.is_text_present_on_page(text='REF-1')) - self.assertTrue(self.is_text_present_on_page(text='REF-2')) - self.assertTrue(self.is_text_present_on_page(text='REF-3')) - self.assertTrue(self.is_text_present_on_page(text='REF-4')) - self.assertTrue(self.is_text_present_on_page(text='Additional Vulnerability Ids')) + self.assertTrue(self.is_success_message_present(text="Finding saved successfully")) + self.assertTrue(self.is_text_present_on_page(text="REF-1")) + self.assertTrue(self.is_text_present_on_page(text="REF-2")) + self.assertTrue(self.is_text_present_on_page(text="REF-3")) + self.assertTrue(self.is_text_present_on_page(text="REF-4")) + self.assertTrue(self.is_text_present_on_page(text="Additional Vulnerability Ids")) def test_add_image(self): # print("\n\nDebug Print Log: testing 'add image' \n") @@ -145,16 +145,16 @@ def test_add_image(self): driver.find_element(By.LINK_TEXT, "Manage Files").click() # select first file input field: form-0-image # Set full image path for image file 'strange.png - image_path = os.path.join(dir_path, 'finding_image.png') + image_path = os.path.join(dir_path, "finding_image.png") driver.find_element(By.ID, "id_form-0-file").send_keys(image_path) - driver.find_element(By.ID, "id_form-0-title").send_keys('Image Title') + driver.find_element(By.ID, "id_form-0-title").send_keys("Image Title") # Save uploaded image with WaitForPageLoad(driver, timeout=50): driver.find_element(By.CSS_SELECTOR, "button.btn.btn-success").click() # Query the site to determine if the finding has been added # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Files updated successfully.')) + self.assertTrue(self.is_success_message_present(text="Files updated successfully.")) @on_exception_html_source_logger def test_add_note_to_finding(self): @@ -174,7 +174,7 @@ def test_add_note_to_finding(self): driver.find_element(By.XPATH, "//input[@value='Add Note']").click() # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Note saved.')) + self.assertTrue(self.is_success_message_present(text="Note saved.")) def test_mark_finding_for_review(self): # login to site, password set to fetch from environ @@ -191,14 +191,14 @@ def test_mark_finding_for_review(self): # Let's make the first user in the list a reviewer # set select element style from 'none' to 'inline' try: - WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.ID, 'id_reviewers'))) + WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.ID, "id_reviewers"))) except TimeoutException: - self.fail('Timed out waiting for reviewer dropdown to initialize ') + self.fail("Timed out waiting for reviewer dropdown to initialize ") driver.execute_script("document.getElementsByName('reviewers')[0].style.display = 'inline'") # select the first option tag element = driver.find_element(By.XPATH, "//select[@name='reviewers']") - reviewer_option = element.find_elements(By.TAG_NAME, 'option')[0] + reviewer_option = element.find_elements(By.TAG_NAME, "option")[0] Select(element).select_by_value(reviewer_option.get_attribute("value")) # Add Review notes driver.find_element(By.ID, "id_entry").clear() @@ -208,7 +208,7 @@ def test_mark_finding_for_review(self): # Query the site to determine if the finding has been added # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Finding marked for review and reviewers notified.')) + self.assertTrue(self.is_success_message_present(text="Finding marked for review and reviewers notified.")) @on_exception_html_source_logger def test_clear_review_from_finding(self): @@ -221,8 +221,8 @@ def test_clear_review_from_finding(self): # Click on `Clear Review` link text driver.find_element(By.LINK_TEXT, "Clear Review").click() # Mark Active and Verified checkboxes - driver.find_element(By.ID, 'id_active').click() - driver.find_element(By.ID, 'id_verified').click() + driver.find_element(By.ID, "id_active").click() + driver.find_element(By.ID, "id_verified").click() # Add Review notes driver.find_element(By.ID, "id_entry").clear() driver.find_element(By.ID, "id_entry").send_keys("This has been reviewed and confirmed. A fix needed here.") @@ -231,7 +231,7 @@ def test_clear_review_from_finding(self): # Query the site to determine if the finding has been added # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Finding review has been updated successfully.')) + self.assertTrue(self.is_success_message_present(text="Finding review has been updated successfully.")) def test_delete_image(self): # login to site, password set to fetch from environ @@ -251,7 +251,7 @@ def test_delete_image(self): # Query the site to determine if the finding has been added # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Files updated successfully.')) + self.assertTrue(self.is_success_message_present(text="Files updated successfully.")) def test_close_finding(self): driver = self.driver @@ -271,7 +271,7 @@ def test_close_finding(self): driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() # Query the site to determine if the finding has been added # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Finding closed.')) + self.assertTrue(self.is_success_message_present(text="Finding closed.")) # Check to see if the endpoint was mitigated # Select and click on the particular finding to edit driver.find_element(By.LINK_TEXT, "App Vulnerable to XSS").click() @@ -294,7 +294,7 @@ def test_open_finding(self): # Click on `Open Finding` driver.find_element(By.LINK_TEXT, "Open Finding").click() # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Finding Reopened.')) + self.assertTrue(self.is_success_message_present(text="Finding Reopened.")) # Check to see if the endpoint was set to active again # Select and click on the particular finding to edit driver.find_element(By.LINK_TEXT, "App Vulnerable to XSS").click() @@ -319,7 +319,7 @@ def test_simple_accept_finding(self): driver.find_element(By.LINK_TEXT, "Accept Risk").click() # Query the site to determine if the finding has been added # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Finding risk accepted.')) + self.assertTrue(self.is_success_message_present(text="Finding risk accepted.")) # Check to see if the endpoint was mitigated # Select and click on the particular finding to edit driver.find_element(By.LINK_TEXT, "App Vulnerable to XSS").click() @@ -343,7 +343,7 @@ def test_unaccept_finding(self): driver.find_element(By.LINK_TEXT, "Unaccept Risk").click() # Query the site to determine if the finding has been added # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Finding risk unaccepted.')) + self.assertTrue(self.is_success_message_present(text="Finding risk unaccepted.")) # Check to see if the endpoint was mitigated # Select and click on the particular finding to edit driver.find_element(By.LINK_TEXT, "App Vulnerable to XSS").click() @@ -366,7 +366,7 @@ def test_make_finding_a_template(self): # Query the site to determine if the finding has been added # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Finding template added successfully. You may edit it here.')) + self.assertTrue(self.is_success_message_present(text="Finding template added successfully. You may edit it here.")) def test_apply_template_to_a_finding(self): driver = self.driver @@ -394,12 +394,12 @@ def test_apply_template_to_a_finding(self): self.assertNoConsoleErrors() # Click the 'finished' button to submit # print("\nClicking on finished \n") - driver.find_element(By.NAME, '_Finished').click() + driver.find_element(By.NAME, "_Finished").click() self.assertNoConsoleErrors() # Query the site to determine if the finding has been added # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_text_present_on_page(text='App Vulnerable to XSS')) + self.assertTrue(self.is_text_present_on_page(text="App Vulnerable to XSS")) @on_exception_html_source_logger def test_create_finding_from_template(self): @@ -436,8 +436,8 @@ def test_create_finding_from_template(self): # Query the site to determine if the finding has been added # Assert to the query to determine status of failure - self.assertTrue(self.is_success_message_present(text='Finding from template added successfully.')) - self.assertTrue(self.is_text_present_on_page(text='App Vulnerable to XSS From Template')) + self.assertTrue(self.is_success_message_present(text="Finding from template added successfully.")) + self.assertTrue(self.is_text_present_on_page(text="App Vulnerable to XSS From Template")) @on_exception_html_source_logger def test_delete_finding_template(self): @@ -453,7 +453,7 @@ def test_delete_finding_template(self): # Query the site to determine if the finding has been added # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Finding Template deleted successfully.')) + self.assertTrue(self.is_success_message_present(text="Finding Template deleted successfully.")) def test_import_scan_result(self): driver = self.driver @@ -466,11 +466,11 @@ def test_import_scan_result(self): # Click on `Import Scan Results` link text driver.find_element(By.LINK_TEXT, "Import Scan Results").click() # Select `ZAP Scan` as Scan Type - Select(driver.find_element(By.ID, "id_scan_type")).select_by_visible_text('ZAP Scan') + Select(driver.find_element(By.ID, "id_scan_type")).select_by_visible_text("ZAP Scan") # Select `Default` as the Environment - Select(driver.find_element(By.ID, "id_environment")).select_by_visible_text('Development') + Select(driver.find_element(By.ID, "id_environment")).select_by_visible_text("Development") # upload scan file - file_path = os.path.join(dir_path, 'zap_sample.xml') + file_path = os.path.join(dir_path, "zap_sample.xml") driver.find_element(By.NAME, "file").send_keys(file_path) # Click Submit button with WaitForPageLoad(driver, timeout=50): @@ -479,7 +479,7 @@ def test_import_scan_result(self): # print("\n\nDebug Print Log: findingTxt fetched: {}\n".format(productTxt)) # print("Checking for '.*ZAP Scan processed a total of 4 findings.*'") # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='ZAP Scan processed a total of 4 findings')) + self.assertTrue(self.is_success_message_present(text="ZAP Scan processed a total of 4 findings")) @on_exception_html_source_logger def test_delete_finding(self): @@ -503,7 +503,7 @@ def test_delete_finding(self): # Assert ot the query to dtermine status of failure # self.assertTrue(self.is_success_message_present(text='Finding deleted successfully')) # there's no alert when deleting this way - self.assertTrue(self.is_text_present_on_page(text='Finding deleted successfully')) + self.assertTrue(self.is_text_present_on_page(text="Finding deleted successfully")) # check that user was redirect back to url where it came from based on return_url def test_list_components(self): @@ -513,43 +513,43 @@ def test_list_components(self): def add_finding_tests_to_suite(suite, jira=False, github=False, block_execution=False): - suite.addTest(BaseTestCase('test_login')) + suite.addTest(BaseTestCase("test_login")) set_suite_settings(suite, jira=jira, github=github, block_execution=block_execution) # Add each test the the suite to be run # success and failure is output by the test - suite.addTest(BaseTestCase('delete_finding_template_if_exists')) - suite.addTest(ProductTest('test_create_product')) - suite.addTest(ProductTest('test_add_product_finding')) - suite.addTest(UserTest('test_create_user_with_writer_global_role')) - suite.addTest(FindingTest('test_list_findings_all')) - suite.addTest(FindingTest('test_list_findings_open')) - suite.addTest(FindingTest('test_quick_report')) - suite.addTest(FindingTest('test_csv_export')) - suite.addTest(FindingTest('test_excel_export')) - suite.addTest(FindingTest('test_list_components')) - suite.addTest(FindingTest('test_edit_finding')) - suite.addTest(FindingTest('test_add_note_to_finding')) - suite.addTest(FindingTest('test_add_image')) - suite.addTest(FindingTest('test_delete_image')) - suite.addTest(FindingTest('test_mark_finding_for_review')) - suite.addTest(FindingTest('test_clear_review_from_finding')) - suite.addTest(FindingTest('test_close_finding')) - suite.addTest(FindingTest('test_list_findings_closed')) - suite.addTest(FindingTest('test_open_finding')) - suite.addTest(ProductTest('test_enable_simple_risk_acceptance')) - suite.addTest(FindingTest('test_simple_accept_finding')) - suite.addTest(FindingTest('test_list_findings_accepted')) - suite.addTest(FindingTest('test_list_findings_all')) - suite.addTest(FindingTest('test_unaccept_finding')) - suite.addTest(FindingTest('test_make_finding_a_template')) - suite.addTest(FindingTest('test_apply_template_to_a_finding')) - suite.addTest(FindingTest('test_create_finding_from_template')) - suite.addTest(FindingTest('test_import_scan_result')) - suite.addTest(FindingTest('test_delete_finding')) - suite.addTest(FindingTest('test_delete_finding_template')) - suite.addTest(ProductTest('test_delete_product')) - suite.addTest(UserTest('test_user_with_writer_role_delete')) + suite.addTest(BaseTestCase("delete_finding_template_if_exists")) + suite.addTest(ProductTest("test_create_product")) + suite.addTest(ProductTest("test_add_product_finding")) + suite.addTest(UserTest("test_create_user_with_writer_global_role")) + suite.addTest(FindingTest("test_list_findings_all")) + suite.addTest(FindingTest("test_list_findings_open")) + suite.addTest(FindingTest("test_quick_report")) + suite.addTest(FindingTest("test_csv_export")) + suite.addTest(FindingTest("test_excel_export")) + suite.addTest(FindingTest("test_list_components")) + suite.addTest(FindingTest("test_edit_finding")) + suite.addTest(FindingTest("test_add_note_to_finding")) + suite.addTest(FindingTest("test_add_image")) + suite.addTest(FindingTest("test_delete_image")) + suite.addTest(FindingTest("test_mark_finding_for_review")) + suite.addTest(FindingTest("test_clear_review_from_finding")) + suite.addTest(FindingTest("test_close_finding")) + suite.addTest(FindingTest("test_list_findings_closed")) + suite.addTest(FindingTest("test_open_finding")) + suite.addTest(ProductTest("test_enable_simple_risk_acceptance")) + suite.addTest(FindingTest("test_simple_accept_finding")) + suite.addTest(FindingTest("test_list_findings_accepted")) + suite.addTest(FindingTest("test_list_findings_all")) + suite.addTest(FindingTest("test_unaccept_finding")) + suite.addTest(FindingTest("test_make_finding_a_template")) + suite.addTest(FindingTest("test_apply_template_to_a_finding")) + suite.addTest(FindingTest("test_create_finding_from_template")) + suite.addTest(FindingTest("test_import_scan_result")) + suite.addTest(FindingTest("test_delete_finding")) + suite.addTest(FindingTest("test_delete_finding_template")) + suite.addTest(ProductTest("test_delete_product")) + suite.addTest(UserTest("test_user_with_writer_role_delete")) return suite diff --git a/tests/group_test.py b/tests/group_test.py index 5694293fbf..bde85f08f9 100644 --- a/tests/group_test.py +++ b/tests/group_test.py @@ -27,7 +27,7 @@ def test_create_group(self): # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() # Assert status is success - self.assertTrue(self.is_success_message_present(text='Group was added successfully.')) + self.assertTrue(self.is_success_message_present(text="Group was added successfully.")) def test_group_edit_name_and_global_role(self): # Login to the site. Password will have to be modified @@ -56,7 +56,7 @@ def test_group_edit_name_and_global_role(self): driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() # Assert status is success - self.assertTrue(self.is_success_message_present(text='Group saved successfully.')) + self.assertTrue(self.is_success_message_present(text="Group saved successfully.")) def test_add_group_member(self): # Login to the site. Password will have to be modified @@ -71,19 +71,19 @@ def test_add_group_member(self): driver.find_element(By.ID, "addGroupMember").click() # Select the user 'propersahm' try: - WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.ID, 'id_users'))) + WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.ID, "id_users"))) except TimeoutException: - self.fail('Timed out waiting for products dropdown to initialize ') + self.fail("Timed out waiting for products dropdown to initialize ") driver.execute_script("document.getElementsByName('users')[0].style.display = 'inline'") element = driver.find_element(By.XPATH, "//select[@name='users']") - user_option = element.find_elements(By.TAG_NAME, 'option')[0] + user_option = element.find_elements(By.TAG_NAME, "option")[0] Select(element).select_by_value(user_option.get_attribute("value")) # Select the role 'Reader' Select(driver.find_element(By.ID, "id_role")).select_by_visible_text("Reader") # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() # Assert the message to determine success status - self.assertTrue(self.is_success_message_present(text='Group members added successfully.')) + self.assertTrue(self.is_success_message_present(text="Group members added successfully.")) # Query the site to determine if the member has been added self.assertEqual(driver.find_elements(By.NAME, "member_user")[1].text, "Proper Samuel (propersahm)") self.assertEqual(driver.find_elements(By.NAME, "member_role")[1].text, "Reader") @@ -104,7 +104,7 @@ def test_edit_group_member(self): # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() # Assert the message to determine success status - self.assertTrue(self.is_success_message_present(text='Group member updated successfully')) + self.assertTrue(self.is_success_message_present(text="Group member updated successfully")) # Query the site to determine if the member has been edited self.assertEqual(driver.find_elements(By.NAME, "member_user")[1].text, "Proper Samuel (propersahm)") self.assertEqual(driver.find_elements(By.NAME, "member_role")[1].text, "Maintainer") @@ -123,7 +123,7 @@ def test_delete_group_member(self): # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-danger").click() # Assert the message to determine success status - self.assertTrue(self.is_success_message_present(text='Group member deleted successfully.')) + self.assertTrue(self.is_success_message_present(text="Group member deleted successfully.")) def test_group_delete(self): # Login to the site. Password will have to be modified @@ -147,7 +147,7 @@ def test_group_delete(self): driver.find_element(By.CSS_SELECTOR, "button.btn.btn-danger").click() # Assert status is success - self.assertTrue(self.is_success_message_present(text='Group and relationships successfully removed.')) + self.assertTrue(self.is_success_message_present(text="Group and relationships successfully removed.")) def test_group_edit_configuration(self): @@ -155,7 +155,7 @@ def test_group_edit_configuration(self): driver = self.driver self.login_standard_page() with self.assertRaises(NoSuchElementException): - driver.find_element(By.ID, 'id_group_menu') + driver.find_element(By.ID, "id_group_menu") # Login as superuser and activate view user configuration for group with standard user self.login_page() @@ -169,30 +169,30 @@ def test_group_edit_configuration(self): # Login as standard user and check the user menu does exist now self.login_standard_page() - driver.find_element(By.ID, 'id_group_menu') + driver.find_element(By.ID, "id_group_menu") # Navigate to User Management page driver.get(self.base_url + "group") # Select and click on the particular group to view driver.find_element(By.LINK_TEXT, "Another Name").click() # Check user cannot edit configuration permissions - self.assertFalse(self.driver.find_element(By.ID, 'id_add_development_environment').is_enabled()) + self.assertFalse(self.driver.find_element(By.ID, "id_add_development_environment").is_enabled()) def suite(): suite = unittest.TestSuite() # Add each test the the suite to be run # success and failure is output by the test - suite.addTest(BaseTestCase('test_login')) - suite.addTest(UserTest('test_create_user')) - suite.addTest(GroupTest('test_create_group')) - suite.addTest(GroupTest('test_group_edit_name_and_global_role')) - suite.addTest(GroupTest('test_add_group_member')) - suite.addTest(GroupTest('test_group_edit_configuration')) - suite.addTest(BaseTestCase('test_login')) - suite.addTest(GroupTest('test_edit_group_member')) - suite.addTest(GroupTest('test_delete_group_member')) - suite.addTest(GroupTest('test_group_delete')) - suite.addTest(UserTest('test_user_delete')) + suite.addTest(BaseTestCase("test_login")) + suite.addTest(UserTest("test_create_user")) + suite.addTest(GroupTest("test_create_group")) + suite.addTest(GroupTest("test_group_edit_name_and_global_role")) + suite.addTest(GroupTest("test_add_group_member")) + suite.addTest(GroupTest("test_group_edit_configuration")) + suite.addTest(BaseTestCase("test_login")) + suite.addTest(GroupTest("test_edit_group_member")) + suite.addTest(GroupTest("test_delete_group_member")) + suite.addTest(GroupTest("test_group_delete")) + suite.addTest(UserTest("test_user_delete")) return suite diff --git a/tests/ibm_appscan_test.py b/tests/ibm_appscan_test.py index cfb500d112..5a5f41fc0a 100644 --- a/tests/ibm_appscan_test.py +++ b/tests/ibm_appscan_test.py @@ -19,7 +19,7 @@ def test_import_ibm_app_scan_result(self): # Navigate to the Endpoint page self.goto_product_overview(driver) # wait for product_wrapper div as datatables javascript modifies the DOM on page load. - driver.find_element(By.ID, 'products_wrapper') + driver.find_element(By.ID, "products_wrapper") driver.find_element(By.LINK_TEXT, "QA Test").click() # "Click" the Finding Drop down driver.find_element(By.PARTIAL_LINK_TEXT, "Findings").click() @@ -28,7 +28,7 @@ def test_import_ibm_app_scan_result(self): # Select scan type Select(driver.find_element(By.ID, "id_scan_type")).select_by_visible_text("IBM AppScan DAST") # Select `Default` as the Environment - Select(driver.find_element(By.ID, "id_environment")).select_by_visible_text('Development') + Select(driver.find_element(By.ID, "id_environment")).select_by_visible_text("Development") # Upload Scan result file scanner_file = os.path.join(dir_path, "ibm_appscan_xml_file.xml") driver.find_element(By.NAME, "file").send_keys(scanner_file) @@ -37,18 +37,18 @@ def test_import_ibm_app_scan_result(self): # Query the site to determine if the finding has been added # Assert the query to determine status or failure - self.assertTrue(self.is_success_message_present(text='IBM AppScan DAST processed a total of 27 findings')) + self.assertTrue(self.is_success_message_present(text="IBM AppScan DAST processed a total of 27 findings")) def suite(): suite = unittest.TestSuite() # Add each test the the suite to be run # success and failure is output by the test - suite.addTest(BaseTestCase('test_login')) - suite.addTest(BaseTestCase('disable_block_execution')) - suite.addTest(ProductTest('test_create_product')) - suite.addTest(IBMAppScanTest('test_import_ibm_app_scan_result')) - suite.addTest(ProductTest('test_delete_product')) + suite.addTest(BaseTestCase("test_login")) + suite.addTest(BaseTestCase("disable_block_execution")) + suite.addTest(ProductTest("test_create_product")) + suite.addTest(IBMAppScanTest("test_import_ibm_app_scan_result")) + suite.addTest(ProductTest("test_delete_product")) return suite diff --git a/tests/note_type_test.py b/tests/note_type_test.py index c1801e17b4..970fea4a44 100644 --- a/tests/note_type_test.py +++ b/tests/note_type_test.py @@ -19,7 +19,7 @@ def test_create_note_type(self): driver.find_element(By.ID, "id_is_single").click() driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() - self.assertTrue(self.is_success_message_present(text='Note Type added successfully.')) + self.assertTrue(self.is_success_message_present(text="Note Type added successfully.")) def test_edit_note_type(self): driver = self.driver @@ -29,7 +29,7 @@ def test_edit_note_type(self): driver.find_element(By.ID, "id_name").send_keys("Edited test note type") driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() - self.assertTrue(self.is_success_message_present(text='Note type updated successfully.')) + self.assertTrue(self.is_success_message_present(text="Note type updated successfully.")) def test_disable_note_type(self): driver = self.driver @@ -37,7 +37,7 @@ def test_disable_note_type(self): driver.find_element(By.LINK_TEXT, "Disable Note Type").click() driver.find_element(By.CSS_SELECTOR, "input.btn.btn-danger").click() - self.assertTrue(self.is_success_message_present(text='Note type Disabled successfully.')) + self.assertTrue(self.is_success_message_present(text="Note type Disabled successfully.")) def test_enable_note_type(self): driver = self.driver @@ -45,17 +45,17 @@ def test_enable_note_type(self): driver.find_element(By.LINK_TEXT, "Enable Note Type").click() driver.find_element(By.CSS_SELECTOR, "input.btn.btn-success").click() - self.assertTrue(self.is_success_message_present(text='Note type Enabled successfully.')) + self.assertTrue(self.is_success_message_present(text="Note type Enabled successfully.")) def suite(): suite = unittest.TestSuite() - suite.addTest(BaseTestCase('test_login')) - suite.addTest(BaseTestCase('disable_block_execution')) - suite.addTest(NoteTypeTest('test_create_note_type')) - suite.addTest(NoteTypeTest('test_edit_note_type')) - suite.addTest(NoteTypeTest('test_disable_note_type')) - suite.addTest(NoteTypeTest('test_enable_note_type')) + suite.addTest(BaseTestCase("test_login")) + suite.addTest(BaseTestCase("disable_block_execution")) + suite.addTest(NoteTypeTest("test_create_note_type")) + suite.addTest(NoteTypeTest("test_edit_note_type")) + suite.addTest(NoteTypeTest("test_disable_note_type")) + suite.addTest(NoteTypeTest("test_enable_note_type")) return suite diff --git a/tests/notes_test.py b/tests/notes_test.py index 77546d3349..a95be518d9 100644 --- a/tests/notes_test.py +++ b/tests/notes_test.py @@ -30,10 +30,10 @@ def create_public_note(self, driver, level): time.sleep(1) if not driver.find_element(By.ID, "add_note").is_displayed(): self.uncollapse_all(driver) - text = driver.find_element(By.TAG_NAME, 'body').text + text = driver.find_element(By.TAG_NAME, "body").text pass_test = "Test public note" in text if not pass_test: - print('Public note created at the', level, 'level') + print("Public note created at the", level, "level") self.assertTrue(pass_test) def create_private_note(self, driver, level): @@ -46,47 +46,47 @@ def create_private_note(self, driver, level): time.sleep(1) if not driver.find_element(By.ID, "add_note").is_displayed(): self.uncollapse_all(driver) - text = driver.find_element(By.TAG_NAME, 'body').text + text = driver.find_element(By.TAG_NAME, "body").text note_present = "Test public note" in text private_status = "(will not appear in report)" in text pass_test = note_present and private_status if not pass_test: - print('Private note note created at the', level, 'level') + print("Private note note created at the", level, "level") self.assertTrue(pass_test) def test_finding_note(self): driver = self.driver self.goto_all_findings_list(driver) driver.find_element(By.LINK_TEXT, "App Vulnerable to XSS").click() - self.create_public_note(driver, 'Finding') - self.create_private_note(driver, 'Finding') + self.create_public_note(driver, "Finding") + self.create_private_note(driver, "Finding") def test_test_note(self): driver = self.driver self.goto_all_engagements_overview(driver) driver.find_element(By.PARTIAL_LINK_TEXT, "Ad Hoc Engagement").click() driver.find_element(By.PARTIAL_LINK_TEXT, "Pen Test").click() - self.create_public_note(driver, 'Test') - self.create_private_note(driver, 'Test') + self.create_public_note(driver, "Test") + self.create_private_note(driver, "Test") def test_engagement_note(self): driver = self.driver self.goto_all_engagements_overview(driver) driver.find_element(By.PARTIAL_LINK_TEXT, "Ad Hoc Engagement").click() - self.create_public_note(driver, 'Engagement') - self.create_private_note(driver, 'Engagement') + self.create_public_note(driver, "Engagement") + self.create_private_note(driver, "Engagement") def suite(): suite = unittest.TestSuite() - suite.addTest(BaseTestCase('test_login')) - suite.addTest(BaseTestCase('disable_block_execution')) - suite.addTest(ProductTest('test_create_product')) - suite.addTest(ProductTest('test_add_product_finding')) - suite.addTest(NoteTest('test_finding_note')) - suite.addTest(NoteTest('test_test_note')) - suite.addTest(NoteTest('test_engagement_note')) - suite.addTest(ProductTest('test_delete_product')) + suite.addTest(BaseTestCase("test_login")) + suite.addTest(BaseTestCase("disable_block_execution")) + suite.addTest(ProductTest("test_create_product")) + suite.addTest(ProductTest("test_add_product_finding")) + suite.addTest(NoteTest("test_finding_note")) + suite.addTest(NoteTest("test_test_note")) + suite.addTest(NoteTest("test_engagement_note")) + suite.addTest(ProductTest("test_delete_product")) return suite diff --git a/tests/notifications_test.py b/tests/notifications_test.py index 2a5c832ab2..8d2d5fbbef 100644 --- a/tests/notifications_test.py +++ b/tests/notifications_test.py @@ -58,7 +58,7 @@ def test_enable_personal_notification(self): driver.find_element(By.XPATH, f"//input[@name='product_added' and @value='{self.type}']") assert True except NoSuchElementException: - if self.type == 'msteams': + if self.type == "msteams": # msteam should be not in personal notifications assert True else: @@ -116,7 +116,7 @@ def test_enable_template_notification(self): driver.find_element(By.XPATH, f"//input[@name='product_added' and @value='{self.type}']") assert True except NoSuchElementException: - if self.type == 'msteams': + if self.type == "msteams": # msteam should be not in personal notifications assert True else: @@ -129,14 +129,14 @@ def test_user_mail_notifications_change(self): wait = WebDriverWait(driver, 5) actions = ActionChains(driver) - configuration_menu = driver.find_element(By.ID, 'menu_configuration') + configuration_menu = driver.find_element(By.ID, "menu_configuration") actions.move_to_element(configuration_menu).perform() wait.until(EC.visibility_of_element_located((By.LINK_TEXT, "Notifications"))).click() originally_selected = { - 'product_added': driver.find_element(By.XPATH, + "product_added": driver.find_element(By.XPATH, "//input[@name='product_added' and @value='mail']").is_selected(), - 'scan_added': driver.find_element(By.XPATH, "//input[@name='scan_added' and @value='mail']").is_selected(), + "scan_added": driver.find_element(By.XPATH, "//input[@name='scan_added' and @value='mail']").is_selected(), } driver.find_element(By.XPATH, "//input[@name='product_added' and @value='mail']").click() @@ -144,10 +144,10 @@ def test_user_mail_notifications_change(self): driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() - self.assertTrue(self.is_success_message_present(text='Settings saved')) - self.assertNotEqual(originally_selected['product_added'], + self.assertTrue(self.is_success_message_present(text="Settings saved")) + self.assertNotEqual(originally_selected["product_added"], driver.find_element(By.XPATH, "//input[@name='product_added' and @value='mail']").is_selected()) - self.assertNotEqual(originally_selected['scan_added'], + self.assertNotEqual(originally_selected["scan_added"], driver.find_element(By.XPATH, "//input[@name='scan_added' and @value='mail']").is_selected()) @@ -155,28 +155,28 @@ def suite(): suite = unittest.TestSuite() # Add each test the the suite to be run # success and failure is output by the test - suite.addTest(BaseTestCase('test_login')) - suite.addTest(NotificationTest('test_disable_personal_notification', 'mail')) - suite.addTest(NotificationTest('test_disable_personal_notification', 'slack')) - suite.addTest(NotificationTest('test_disable_personal_notification', 'msteams')) + suite.addTest(BaseTestCase("test_login")) + suite.addTest(NotificationTest("test_disable_personal_notification", "mail")) + suite.addTest(NotificationTest("test_disable_personal_notification", "slack")) + suite.addTest(NotificationTest("test_disable_personal_notification", "msteams")) # now test when enabled - suite.addTest(NotificationTest('test_enable_personal_notification', 'mail')) - suite.addTest(NotificationTest('test_enable_personal_notification', 'slack')) - suite.addTest(NotificationTest('test_enable_personal_notification', 'msteams')) + suite.addTest(NotificationTest("test_enable_personal_notification", "mail")) + suite.addTest(NotificationTest("test_enable_personal_notification", "slack")) + suite.addTest(NotificationTest("test_enable_personal_notification", "msteams")) # Now switch to system notifications - suite.addTest(NotificationTest('test_disable_system_notification', 'mail')) - suite.addTest(NotificationTest('test_disable_system_notification', 'slack')) - suite.addTest(NotificationTest('test_disable_system_notification', 'msteams')) + suite.addTest(NotificationTest("test_disable_system_notification", "mail")) + suite.addTest(NotificationTest("test_disable_system_notification", "slack")) + suite.addTest(NotificationTest("test_disable_system_notification", "msteams")) # now test when enabled - suite.addTest(NotificationTest('test_enable_system_notification', 'mail')) - suite.addTest(NotificationTest('test_enable_system_notification', 'slack')) - suite.addTest(NotificationTest('test_enable_system_notification', 'msteams')) + suite.addTest(NotificationTest("test_enable_system_notification", "mail")) + suite.addTest(NotificationTest("test_enable_system_notification", "slack")) + suite.addTest(NotificationTest("test_enable_system_notification", "msteams")) # not really for the user we created, but still related to user settings - suite.addTest(NotificationTest('test_user_mail_notifications_change', 'mail')) + suite.addTest(NotificationTest("test_user_mail_notifications_change", "mail")) # now do short test for the template - suite.addTest(NotificationTest('test_enable_template_notification', 'mail')) - suite.addTest(NotificationTest('test_enable_template_notification', 'slack')) - suite.addTest(NotificationTest('test_enable_template_notification', 'msteams')) + suite.addTest(NotificationTest("test_enable_template_notification", "mail")) + suite.addTest(NotificationTest("test_enable_template_notification", "slack")) + suite.addTest(NotificationTest("test_enable_template_notification", "msteams")) return suite diff --git a/tests/product_group_test.py b/tests/product_group_test.py index f7f4559c8c..ab0ccc7c77 100644 --- a/tests/product_group_test.py +++ b/tests/product_group_test.py @@ -19,19 +19,19 @@ def test_group_add_product_group(self): driver.find_element(By.ID, "addProductGroup").click() # Select the product 'Research and Development' try: - WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.ID, 'id_products'))) + WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.ID, "id_products"))) except TimeoutException: - self.fail('Timed out waiting for products dropdown to initialize ') + self.fail("Timed out waiting for products dropdown to initialize ") driver.execute_script("document.getElementsByName('products')[0].style.display = 'inline'") element = driver.find_element(By.XPATH, "//select[@name='products']") - product_option = element.find_elements(By.TAG_NAME, 'option')[0] + product_option = element.find_elements(By.TAG_NAME, "option")[0] Select(element).select_by_value(product_option.get_attribute("value")) # Select the role 'Reader' Select(driver.find_element(By.ID, "id_role")).select_by_visible_text("Reader") # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() # Assert the message to determine success status - self.assertTrue(self.is_success_message_present(text='Product groups added successfully.')) + self.assertTrue(self.is_success_message_present(text="Product groups added successfully.")) # Query the site to determine if the member has been added self.assertEqual(driver.find_elements(By.NAME, "member_product")[0].text, "QA Test") self.assertEqual(driver.find_elements(By.NAME, "member_product_role")[0].text, "Reader") @@ -46,7 +46,7 @@ def test_group_edit_product_group(self): # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() # Assert the message to determine success status - self.assertTrue(self.is_success_message_present(text='Product group updated successfully.')) + self.assertTrue(self.is_success_message_present(text="Product group updated successfully.")) # Query the site to determine if the member has been edited self.assertEqual(driver.find_elements(By.NAME, "member_product")[0].text, "QA Test") self.assertEqual(driver.find_elements(By.NAME, "member_product_role")[0].text, "Owner") @@ -59,7 +59,7 @@ def test_group_delete_product_group(self): # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-danger").click() # Assert the message to determine success status - self.assertTrue(self.is_success_message_present(text='Product group deleted successfully.')) + self.assertTrue(self.is_success_message_present(text="Product group deleted successfully.")) # Query the site to determine if the member has been deleted self.assertFalse(driver.find_elements(By.NAME, "member_product")) @@ -76,19 +76,19 @@ def test_product_add_product_group(self): driver.find_element(By.ID, "addProductGroup").click() # Select the group 'Group Name' try: - WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.ID, 'id_groups'))) + WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.ID, "id_groups"))) except TimeoutException: - self.fail('Timed out waiting for groups dropdown to initialize ') + self.fail("Timed out waiting for groups dropdown to initialize ") driver.execute_script("document.getElementsByName('groups')[0].style.display = 'inline'") element = driver.find_element(By.XPATH, "//select[@name='groups']") - group_option = element.find_elements(By.TAG_NAME, 'option')[0] + group_option = element.find_elements(By.TAG_NAME, "option")[0] Select(element).select_by_value(group_option.get_attribute("value")) # Select the role 'Reader' Select(driver.find_element(By.ID, "id_role")).select_by_visible_text("Reader") # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() # Assert the message to determine success status - self.assertTrue(self.is_success_message_present(text='Product groups added successfully.')) + self.assertTrue(self.is_success_message_present(text="Product groups added successfully.")) # Query the site to determine if the member has been added self.assertEqual(driver.find_elements(By.NAME, "group_name")[0].text, "Group Name") self.assertEqual(driver.find_elements(By.NAME, "group_role")[0].text, "Reader") @@ -110,7 +110,7 @@ def test_product_edit_product_group(self): # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() # Assert the message to determine success status - self.assertTrue(self.is_success_message_present(text='Product group updated successfully.')) + self.assertTrue(self.is_success_message_present(text="Product group updated successfully.")) # Query the site to determine if the member has been edited self.assertEqual(driver.find_elements(By.NAME, "group_name")[0].text, "Group Name") self.assertEqual(driver.find_elements(By.NAME, "group_role")[0].text, "Maintainer") @@ -130,7 +130,7 @@ def test_product_delete_product_group(self): # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-danger").click() # Assert the message to determine success status - self.assertTrue(self.is_success_message_present(text='Product group deleted successfully.')) + self.assertTrue(self.is_success_message_present(text="Product group deleted successfully.")) # Query the site to determine if the member has been deleted self.assertFalse(driver.find_elements(By.NAME, "group_name")) @@ -160,18 +160,18 @@ def suite(): suite = unittest.TestSuite() # Add each test the the suite to be run # success and failure is output by the test - suite.addTest(BaseTestCase('test_login')) - suite.addTest(GroupTest('test_create_group')) - suite.addTest(ProductTest('test_create_product')) - suite.addTest(ProductGroupTest('test_group_add_product_group')) - suite.addTest(ProductGroupTest('test_group_edit_product_group')) - suite.addTest(ProductGroupTest('test_group_delete_product_group')) - suite.addTest(ProductGroupTest('test_product_add_product_group')) - suite.addTest(ProductGroupTest('test_product_edit_product_group')) - suite.addTest(ProductGroupTest('test_product_delete_product_group')) - suite.addTest(GroupTest('test_group_edit_name_and_global_role')) - suite.addTest(GroupTest('test_group_delete')) - suite.addTest(ProductTest('test_delete_product')) + suite.addTest(BaseTestCase("test_login")) + suite.addTest(GroupTest("test_create_group")) + suite.addTest(ProductTest("test_create_product")) + suite.addTest(ProductGroupTest("test_group_add_product_group")) + suite.addTest(ProductGroupTest("test_group_edit_product_group")) + suite.addTest(ProductGroupTest("test_group_delete_product_group")) + suite.addTest(ProductGroupTest("test_product_add_product_group")) + suite.addTest(ProductGroupTest("test_product_edit_product_group")) + suite.addTest(ProductGroupTest("test_product_delete_product_group")) + suite.addTest(GroupTest("test_group_edit_name_and_global_role")) + suite.addTest(GroupTest("test_group_delete")) + suite.addTest(ProductTest("test_delete_product")) return suite diff --git a/tests/product_member_test.py b/tests/product_member_test.py index 8e45a4a9cc..23d82de286 100644 --- a/tests/product_member_test.py +++ b/tests/product_member_test.py @@ -21,32 +21,32 @@ def test_user_add_product_member(self): # Select and click on the particular user to view driver.find_element(By.LINK_TEXT, "propersahm").click() # Only execute test case when authorization v2 is activated - if self.is_element_by_id_present('dropdownMenuAddProductMember'): + if self.is_element_by_id_present("dropdownMenuAddProductMember"): # Open the menu to add users and click the 'Add' button driver.find_element(By.ID, "dropdownMenuAddProductMember").click() driver.find_element(By.ID, "addProductMember").click() # Select the product 'QA Test' try: - WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.ID, 'id_products'))) + WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.ID, "id_products"))) except TimeoutException: - self.fail('Timed out waiting for products dropdown to initialize ') + self.fail("Timed out waiting for products dropdown to initialize ") driver.execute_script("document.getElementsByName('products')[0].style.display = 'inline'") element = driver.find_element(By.XPATH, "//select[@name='products']") - product_option = element.find_elements(By.TAG_NAME, 'option')[0] + product_option = element.find_elements(By.TAG_NAME, "option")[0] Select(element).select_by_value(product_option.get_attribute("value")) # Select the role 'Reader' Select(driver.find_element(By.ID, "id_role")).select_by_visible_text("Reader") # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() # Assert the message to determine success status - self.assertTrue(self.is_success_message_present(text='Product members added successfully.')) + self.assertTrue(self.is_success_message_present(text="Product members added successfully.")) # Query the site to determine if the member has been added self.assertEqual(driver.find_elements(By.NAME, "member_product")[0].text, "QA Test") self.assertEqual(driver.find_elements(By.NAME, "member_product_role")[0].text, "Reader") else: - print('--------------------------------') - print('test_user_add_product_member: Not executed because legacy authorization is active') - print('--------------------------------') + print("--------------------------------") + print("test_user_add_product_member: Not executed because legacy authorization is active") + print("--------------------------------") def test_user_edit_product_member(self): # Login to the site. Password will have to be modified @@ -57,7 +57,7 @@ def test_user_edit_product_member(self): # Select and click on the particular user to view driver.find_element(By.LINK_TEXT, "propersahm").click() # Only execute test case when authorization v2 is activated - if self.is_element_by_id_present('dropdownMenuAddProductMember'): + if self.is_element_by_id_present("dropdownMenuAddProductMember"): # Open the menu to manage members and click the 'Edit' button driver.find_elements(By.NAME, "dropdownManageProductMember")[0].click() driver.find_elements(By.NAME, "editProductMember")[0].click() @@ -66,14 +66,14 @@ def test_user_edit_product_member(self): # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() # Assert the message to determine success status - self.assertTrue(self.is_success_message_present(text='Product member updated successfully.')) + self.assertTrue(self.is_success_message_present(text="Product member updated successfully.")) # Query the site to determine if the member has been edited self.assertEqual(driver.find_elements(By.NAME, "member_product")[0].text, "QA Test") self.assertEqual(driver.find_elements(By.NAME, "member_product_role")[0].text, "Maintainer") else: - print('--------------------------------') - print('test_edit_add_product_member: Not executed because legacy authorization is active') - print('--------------------------------') + print("--------------------------------") + print("test_edit_add_product_member: Not executed because legacy authorization is active") + print("--------------------------------") def test_user_delete_product_member(self): # Login to the site. Password will have to be modified @@ -84,20 +84,20 @@ def test_user_delete_product_member(self): # Select and click on the particular user to view driver.find_element(By.LINK_TEXT, "propersahm").click() # Only execute test case when authorization v2 is activated - if self.is_element_by_id_present('dropdownMenuAddProductMember'): + if self.is_element_by_id_present("dropdownMenuAddProductMember"): # Open the menu to manage members and click the 'Delete' button driver.find_elements(By.NAME, "dropdownManageProductMember")[0].click() driver.find_elements(By.NAME, "deleteProductMember")[0].click() # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-danger").click() # Assert the message to determine success status - self.assertTrue(self.is_success_message_present(text='Product member deleted successfully.')) + self.assertTrue(self.is_success_message_present(text="Product member deleted successfully.")) # Query the site to determine if the member has been deleted self.assertFalse(driver.find_elements(By.NAME, "member_product")) else: - print('--------------------------------') - print('test_user_delete_product_member: Not executed because legacy authorization is active') - print('--------------------------------') + print("--------------------------------") + print("test_user_delete_product_member: Not executed because legacy authorization is active") + print("--------------------------------") def test_product_add_product_member(self): # Login to the site. Password will have to be modified @@ -107,32 +107,32 @@ def test_product_add_product_member(self): self.goto_product_overview(driver) # Select and click on the particular product to edit driver.find_element(By.LINK_TEXT, "QA Test").click() - if self.is_element_by_id_present('dropdownMenuAddProductMember'): + if self.is_element_by_id_present("dropdownMenuAddProductMember"): # Open the menu to add users and click the 'Add User' button driver.find_element(By.ID, "dropdownMenuAddProductMember").click() driver.find_element(By.ID, "addProductMember").click() # Select the user 'propersahm' try: - WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.ID, 'id_users'))) + WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.ID, "id_users"))) except TimeoutException: - self.fail('Timed out waiting for users dropdown to initialize ') + self.fail("Timed out waiting for users dropdown to initialize ") driver.execute_script("document.getElementsByName('users')[0].style.display = 'inline'") element = driver.find_element(By.XPATH, "//select[@name='users']") - user_option = element.find_elements(By.TAG_NAME, 'option')[0] + user_option = element.find_elements(By.TAG_NAME, "option")[0] Select(element).select_by_value(user_option.get_attribute("value")) # Select the role 'Reader' Select(driver.find_element(By.ID, "id_role")).select_by_visible_text("Reader") # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() # Assert the message to determine success status - self.assertTrue(self.is_success_message_present(text='Product members added successfully.')) + self.assertTrue(self.is_success_message_present(text="Product members added successfully.")) # Query the site to determine if the member has been added self.assertEqual(driver.find_elements(By.NAME, "member_user")[0].text, "Proper Samuel (propersahm)") self.assertEqual(driver.find_elements(By.NAME, "member_role")[0].text, "Reader") else: - print('--------------------------------') - print('test_product_add_product_member: Not executed because legacy authorization is active') - print('--------------------------------') + print("--------------------------------") + print("test_product_add_product_member: Not executed because legacy authorization is active") + print("--------------------------------") def test_product_edit_product_member(self): # Login to the site. Password will have to be modified @@ -143,7 +143,7 @@ def test_product_edit_product_member(self): # Select and click on the particular product to edit driver.find_element(By.LINK_TEXT, "QA Test").click() # Only execute test case when authorization v2 is activated - if self.is_element_by_id_present('dropdownMenuAddProductMember'): + if self.is_element_by_id_present("dropdownMenuAddProductMember"): # Open the menu to manage members and click the 'Edit' button driver.find_elements(By.NAME, "dropdownManageProductMember")[0].click() driver.find_elements(By.NAME, "editProductMember")[0].click() @@ -152,14 +152,14 @@ def test_product_edit_product_member(self): # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() # Assert the message to determine success status - self.assertTrue(self.is_success_message_present(text='Product member updated successfully.')) + self.assertTrue(self.is_success_message_present(text="Product member updated successfully.")) # Query the site to determine if the member has been edited self.assertEqual(driver.find_elements(By.NAME, "member_user")[0].text, "Proper Samuel (propersahm)") self.assertEqual(driver.find_elements(By.NAME, "member_role")[0].text, "Maintainer") else: - print('--------------------------------') - print('test_product_edit_product_member: Not executed because legacy authorization is active') - print('--------------------------------') + print("--------------------------------") + print("test_product_edit_product_member: Not executed because legacy authorization is active") + print("--------------------------------") def test_product_delete_product_member(self): # Login to the site. Password will have to be modified @@ -170,38 +170,38 @@ def test_product_delete_product_member(self): # Select and click on the particular product to edit driver.find_element(By.LINK_TEXT, "QA Test").click() # Only execute test case when authorization v2 is activated - if self.is_element_by_id_present('dropdownMenuAddProductMember'): + if self.is_element_by_id_present("dropdownMenuAddProductMember"): # Open the menu to manage members and click the 'Delete' button driver.find_elements(By.NAME, "dropdownManageProductMember")[0].click() driver.find_elements(By.NAME, "deleteProductMember")[0].click() # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-danger").click() # Assert the message to determine success status - self.assertTrue(self.is_success_message_present(text='Product member deleted successfully.')) + self.assertTrue(self.is_success_message_present(text="Product member deleted successfully.")) # Query the site to determine if the member has been deleted self.assertFalse(driver.find_elements(By.NAME, "member_user")) else: - print('--------------------------------') - print('test_product_delete_product_member: Not executed because legacy authorization is active') - print('--------------------------------') + print("--------------------------------") + print("test_product_delete_product_member: Not executed because legacy authorization is active") + print("--------------------------------") def suite(): suite = unittest.TestSuite() # Add each test the the suite to be run # success and failure is output by the test - suite.addTest(BaseTestCase('test_login')) - suite.addTest(BaseTestCase('disable_block_execution')) - suite.addTest(ProductTest('test_create_product')) - suite.addTest(UserTest('test_create_user')) - suite.addTest(ProductMemberTest('test_user_add_product_member')) - suite.addTest(ProductMemberTest('test_user_edit_product_member')) - suite.addTest(ProductMemberTest('test_user_delete_product_member')) - suite.addTest(ProductMemberTest('test_product_add_product_member')) - suite.addTest(ProductMemberTest('test_product_edit_product_member')) - suite.addTest(ProductMemberTest('test_product_delete_product_member')) - suite.addTest(UserTest('test_user_delete')) - suite.addTest(ProductTest('test_delete_product')) + suite.addTest(BaseTestCase("test_login")) + suite.addTest(BaseTestCase("disable_block_execution")) + suite.addTest(ProductTest("test_create_product")) + suite.addTest(UserTest("test_create_user")) + suite.addTest(ProductMemberTest("test_user_add_product_member")) + suite.addTest(ProductMemberTest("test_user_edit_product_member")) + suite.addTest(ProductMemberTest("test_user_delete_product_member")) + suite.addTest(ProductMemberTest("test_product_add_product_member")) + suite.addTest(ProductMemberTest("test_product_edit_product_member")) + suite.addTest(ProductMemberTest("test_product_delete_product_member")) + suite.addTest(UserTest("test_user_delete")) + suite.addTest(ProductTest("test_delete_product")) return suite diff --git a/tests/product_test.py b/tests/product_test.py index 79ac3ac3c7..bc3a64c0d4 100644 --- a/tests/product_test.py +++ b/tests/product_test.py @@ -15,10 +15,10 @@ def __init__(self, browser, timeout): self.timeout = time.time() + timeout def __enter__(self): - self.old_page = self.browser.find_element(By.TAG_NAME, 'html') + self.old_page = self.browser.find_element(By.TAG_NAME, "html") def page_has_loaded(self): - new_page = self.browser.find_element(By.TAG_NAME, 'html') + new_page = self.browser.find_element(By.TAG_NAME, "html") return new_page.id != self.old_page.id def __exit__(self, *_): @@ -27,7 +27,7 @@ def __exit__(self, *_): return True else: time.sleep(0.2) - msg = f'Timeout waiting for {self.timeout}s' + msg = f"Timeout waiting for {self.timeout}s" raise Exception(msg) @@ -60,8 +60,8 @@ def test_create_product(self): # Assert ot the query to dtermine status of failure # Also confirm success even if Product is returned as already exists for test sake - self.assertTrue(self.is_success_message_present(text='Product added successfully') - or self.is_success_message_present(text='Product with this Name already exists.')) + self.assertTrue(self.is_success_message_present(text="Product added successfully") + or self.is_success_message_present(text="Product with this Name already exists.")) self.assertFalse(self.is_error_message_present()) @on_exception_html_source_logger @@ -103,8 +103,8 @@ def test_edit_product_description(self): # Query the site to determine if the product has been added # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Product updated successfully') - or self.is_success_message_present(text='Product with this Name already exists.')) + self.assertTrue(self.is_success_message_present(text="Product updated successfully") + or self.is_success_message_present(text="Product with this Name already exists.")) self.assertFalse(self.is_error_message_present()) # For product consistency sake, We won't be editting the product title @@ -131,8 +131,8 @@ def test_enable_simple_risk_acceptance(self): # Query the site to determine if the product has been added # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Product updated successfully') - or self.is_success_message_present(text='Product with this Name already exists.')) + self.assertTrue(self.is_success_message_present(text="Product updated successfully") + or self.is_success_message_present(text="Product with this Name already exists.")) self.assertFalse(self.is_error_message_present()) @on_exception_html_source_logger @@ -163,7 +163,7 @@ def test_add_product_engagement(self): # engagement target start and target end already have defaults # we can safely skip # Testing Lead: This can be the logged in user - Select(driver.find_element(By.ID, "id_lead")).select_by_visible_text('Admin User (admin)') + Select(driver.find_element(By.ID, "id_lead")).select_by_visible_text("Admin User (admin)") # engagement status Select(driver.find_element(By.ID, "id_status")).select_by_visible_text("In Progress") # "Click" the Done button to Add the engagement @@ -171,7 +171,7 @@ def test_add_product_engagement(self): # Query the site to determine if the product has been added # Assert of the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Engagement added successfully')) + self.assertTrue(self.is_success_message_present(text="Engagement added successfully")) @on_exception_html_source_logger def test_add_technology(self): @@ -194,7 +194,7 @@ def test_add_technology(self): # "Click" the Submit button to Add the technology driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() # Assert of the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Technology added successfully')) + self.assertTrue(self.is_success_message_present(text="Technology added successfully")) # Query the site to determine if the member has been added self.assertEqual(driver.find_elements(By.NAME, "technology_name")[0].text, "Technology Test") self.assertEqual(driver.find_elements(By.NAME, "technology_version")[0].text, "v.2.1.0-RELEASE") @@ -221,7 +221,7 @@ def test_edit_technology(self): # "Click" the Submit button to change the technology driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() # Assert of the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Technology changed successfully')) + self.assertTrue(self.is_success_message_present(text="Technology changed successfully")) # Query the site to determine if the member has been added self.assertEqual(driver.find_elements(By.NAME, "technology_name")[0].text, "Technology Changed") self.assertEqual(driver.find_elements(By.NAME, "technology_version")[0].text, "v.2.2.0-RELEASE") @@ -243,7 +243,7 @@ def test_delete_technology(self): # "Click" the Submit button to delete the technology driver.find_element(By.CSS_SELECTOR, "input.btn.btn-danger").click() # Assert of the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Technology deleted successfully')) + self.assertTrue(self.is_success_message_present(text="Technology deleted successfully")) # Query the site to determine if the technology has been deleted self.assertFalse(driver.find_elements(By.NAME, "technology_name")) @@ -293,13 +293,13 @@ def test_add_product_finding(self): # Query the site to determine if the finding has been added # Assert to the query to dtermine status of failure - self.assertTrue(self.is_text_present_on_page(text='App Vulnerable to XSS')) + self.assertTrue(self.is_text_present_on_page(text="App Vulnerable to XSS")) # Select and click on the finding to check if endpoint has been added driver.find_element(By.LINK_TEXT, "App Vulnerable to XSS").click() - self.assertTrue(self.is_text_present_on_page(text='product.finding.com')) - self.assertTrue(self.is_text_present_on_page(text='REF-1')) - self.assertTrue(self.is_text_present_on_page(text='REF-2')) - self.assertTrue(self.is_text_present_on_page(text='Additional Vulnerability Ids')) + self.assertTrue(self.is_text_present_on_page(text="product.finding.com")) + self.assertTrue(self.is_text_present_on_page(text="REF-1")) + self.assertTrue(self.is_text_present_on_page(text="REF-2")) + self.assertTrue(self.is_text_present_on_page(text="Additional Vulnerability Ids")) @on_exception_html_source_logger def test_add_product_endpoints(self): @@ -323,7 +323,7 @@ def test_add_product_endpoints(self): # Query the site to determine if the finding has been added # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Endpoint added successfully')) + self.assertTrue(self.is_success_message_present(text="Endpoint added successfully")) @on_exception_html_source_logger def test_add_product_custom_field(self): @@ -351,8 +351,8 @@ def test_add_product_custom_field(self): # Assert ot the query to dtermine status of failure # Also confirm success even if variable is returned as already exists for test sake - self.assertTrue(self.is_success_message_present(text='Metadata added successfully') - or self.is_success_message_present(text='A metadata entry with the same name exists already for this object.')) + self.assertTrue(self.is_success_message_present(text="Metadata added successfully") + or self.is_success_message_present(text="A metadata entry with the same name exists already for this object.")) @on_exception_html_source_logger def test_edit_product_custom_field(self): @@ -376,8 +376,8 @@ def test_edit_product_custom_field(self): # Query the site to determine if the finding has been added # Assert ot the query to dtermine success or failure - self.assertTrue(self.is_success_message_present(text='Metadata edited successfully') - or self.is_success_message_present(text='A metadata entry with the same name exists already for this object.')) + self.assertTrue(self.is_success_message_present(text="Metadata edited successfully") + or self.is_success_message_present(text="A metadata entry with the same name exists already for this object.")) @on_exception_html_source_logger def test_add_product_tracking_files(self): @@ -404,7 +404,7 @@ def test_add_product_tracking_files(self): # Query the site to determine if the finding has been added # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Added Tracked File to a Product')) + self.assertTrue(self.is_success_message_present(text="Added Tracked File to a Product")) @on_exception_html_source_logger def test_edit_product_tracking_files(self): @@ -430,7 +430,7 @@ def test_edit_product_tracking_files(self): # Query the site to determine if the Tracking file has been updated # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Tool Product Configuration Successfully Updated')) + self.assertTrue(self.is_success_message_present(text="Tool Product Configuration Successfully Updated")) def test_product_metrics(self): # Test To Edit Product Tracking Files @@ -442,7 +442,7 @@ def test_product_metrics(self): driver.find_element(By.LINK_TEXT, "QA Test").click() # "Click" the dropdown option # driver.find_element(By.XPATH, "//span[contains(., 'Metrics')]").click() - driver.find_element(By.PARTIAL_LINK_TEXT, 'Metrics').click() + driver.find_element(By.PARTIAL_LINK_TEXT, "Metrics").click() @on_exception_html_source_logger def test_delete_product(self, name="QA Test"): @@ -453,7 +453,7 @@ def test_delete_product(self, name="QA Test"): driver.find_element(By.LINK_TEXT, name).click() # Click the drop down menu # driver.execute_script("window.scrollTo(0, 0)") - driver.find_element(By.ID, 'dropdownMenu1').click() + driver.find_element(By.ID, "dropdownMenu1").click() # "Click" the Delete option driver.find_element(By.LINK_TEXT, "Delete").click() # "Click" the delete button to complete the transaction @@ -461,7 +461,7 @@ def test_delete_product(self, name="QA Test"): # Query the site to determine if the product has been added # Assert ot the query to determine status of failure - self.assertTrue(self.is_success_message_present(text='Product and relationships removed.')) + self.assertTrue(self.is_success_message_present(text="Product and relationships removed.")) @on_exception_html_source_logger def test_product_notifications_change(self): @@ -477,14 +477,14 @@ def test_product_notifications_change(self): driver.find_element(By.XPATH, "//input[@name='engagement_added' and @value='mail']").click() # clicking == ajax call to submit, but I think selenium gets this - self.assertTrue(self.is_success_message_present(text='Notification settings updated')) + self.assertTrue(self.is_success_message_present(text="Notification settings updated")) self.assertTrue(driver.find_element(By.XPATH, "//input[@name='engagement_added' and @value='mail']").is_selected()) self.assertFalse(driver.find_element(By.XPATH, "//input[@name='scan_added' and @value='mail']").is_selected()) self.assertFalse(driver.find_element(By.XPATH, "//input[@name='test_added' and @value='mail']").is_selected()) driver.find_element(By.XPATH, "//input[@name='scan_added' and @value='mail']").click() - self.assertTrue(self.is_success_message_present(text='Notification settings updated')) + self.assertTrue(self.is_success_message_present(text="Notification settings updated")) self.assertTrue(driver.find_element(By.XPATH, "//input[@name='engagement_added' and @value='mail']").is_selected()) self.assertTrue(driver.find_element(By.XPATH, "//input[@name='scan_added' and @value='mail']").is_selected()) self.assertFalse(driver.find_element(By.XPATH, "//input[@name='test_added' and @value='mail']").is_selected()) @@ -540,35 +540,35 @@ def test_metrics_dashboard(self): def add_product_tests_to_suite(suite, jira=False, github=False, block_execution=False): # Add each test and the suite to be run # success and failure is output by the test - suite.addTest(BaseTestCase('test_login')) + suite.addTest(BaseTestCase("test_login")) set_suite_settings(suite, jira=jira, github=github, block_execution=block_execution) - suite.addTest(ProductTest('test_create_product')) - suite.addTest(ProductTest('test_edit_product_description')) - suite.addTest(ProductTest('test_add_technology')) - suite.addTest(ProductTest('test_edit_technology')) - suite.addTest(ProductTest('test_delete_technology')) - suite.addTest(ProductTest('test_add_product_engagement')) - suite.addTest(ProductTest('test_add_product_finding')) - suite.addTest(ProductTest('test_add_product_endpoints')) - suite.addTest(ProductTest('test_add_product_custom_field')) - suite.addTest(ProductTest('test_edit_product_custom_field')) - suite.addTest(ProductTest('test_add_product_tracking_files')) - suite.addTest(ProductTest('test_edit_product_tracking_files')) - suite.addTest(ProductTest('test_list_products')) - suite.addTest(ProductTest('test_list_components')) - suite.addTest(ProductTest('test_product_notifications_change')) - suite.addTest(ProductTest('test_product_metrics')) + suite.addTest(ProductTest("test_create_product")) + suite.addTest(ProductTest("test_edit_product_description")) + suite.addTest(ProductTest("test_add_technology")) + suite.addTest(ProductTest("test_edit_technology")) + suite.addTest(ProductTest("test_delete_technology")) + suite.addTest(ProductTest("test_add_product_engagement")) + suite.addTest(ProductTest("test_add_product_finding")) + suite.addTest(ProductTest("test_add_product_endpoints")) + suite.addTest(ProductTest("test_add_product_custom_field")) + suite.addTest(ProductTest("test_edit_product_custom_field")) + suite.addTest(ProductTest("test_add_product_tracking_files")) + suite.addTest(ProductTest("test_edit_product_tracking_files")) + suite.addTest(ProductTest("test_list_products")) + suite.addTest(ProductTest("test_list_components")) + suite.addTest(ProductTest("test_product_notifications_change")) + suite.addTest(ProductTest("test_product_metrics")) # we add metrics tests here as we now have a product that triggers some logic inside metrics - suite.addTest(ProductTest('test_critical_product_metrics')) - suite.addTest(ProductTest('test_product_type_metrics')) - suite.addTest(ProductTest('test_product_type_counts_metrics')) - suite.addTest(ProductTest('test_simple_metrics')) - suite.addTest(ProductTest('test_engineer_metrics')) - suite.addTest(ProductTest('test_metrics_dashboard')) - - suite.addTest(ProductTest('test_delete_product')) + suite.addTest(ProductTest("test_critical_product_metrics")) + suite.addTest(ProductTest("test_product_type_metrics")) + suite.addTest(ProductTest("test_product_type_counts_metrics")) + suite.addTest(ProductTest("test_simple_metrics")) + suite.addTest(ProductTest("test_engineer_metrics")) + suite.addTest(ProductTest("test_metrics_dashboard")) + + suite.addTest(ProductTest("test_delete_product")) return suite diff --git a/tests/product_type_group_test.py b/tests/product_type_group_test.py index 3e8a05cb27..9e5011b6fe 100644 --- a/tests/product_type_group_test.py +++ b/tests/product_type_group_test.py @@ -18,19 +18,19 @@ def test_group_add_product_type_group(self): driver.find_element(By.ID, "addProductTypeGroup").click() # Select the product type 'Research and Development' try: - WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.ID, 'id_product_types'))) + WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.ID, "id_product_types"))) except TimeoutException: - self.fail('Timed out waiting for product types dropdown to initialize ') + self.fail("Timed out waiting for product types dropdown to initialize ") driver.execute_script("document.getElementsByName('product_types')[0].style.display = 'inline'") element = driver.find_element(By.XPATH, "//select[@name='product_types']") - product_type_option = element.find_elements(By.TAG_NAME, 'option')[0] + product_type_option = element.find_elements(By.TAG_NAME, "option")[0] Select(element).select_by_value(product_type_option.get_attribute("value")) # Select the role 'Reader' Select(driver.find_element(By.ID, "id_role")).select_by_visible_text("Reader") # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() # Assert the message to determine success status - self.assertTrue(self.is_success_message_present(text='Product type groups added successfully.')) + self.assertTrue(self.is_success_message_present(text="Product type groups added successfully.")) # Query the site to determine if the member has been added self.assertEqual(driver.find_elements(By.NAME, "member_product_type")[0].text, "Research and Development") self.assertEqual(driver.find_elements(By.NAME, "member_product_type_role")[0].text, "Reader") @@ -45,7 +45,7 @@ def test_group_edit_product_type_group(self): # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() # Assert the message to determine success status - self.assertTrue(self.is_success_message_present(text='Product type group updated successfully.')) + self.assertTrue(self.is_success_message_present(text="Product type group updated successfully.")) # Query the site to determine if the member has been edited self.assertEqual(driver.find_elements(By.NAME, "member_product_type")[0].text, "Research and Development") self.assertEqual(driver.find_elements(By.NAME, "member_product_type_role")[0].text, "Owner") @@ -58,7 +58,7 @@ def test_group_delete_product_type_group(self): # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-danger").click() # Assert the message to determine success status - self.assertTrue(self.is_success_message_present(text='Product type group deleted successfully.')) + self.assertTrue(self.is_success_message_present(text="Product type group deleted successfully.")) # Query the site to determine if the member has been deleted self.assertFalse(driver.find_elements(By.NAME, "member_product_type")) @@ -75,19 +75,19 @@ def test_product_type_add_product_type_group(self): driver.find_element(By.ID, "addProductTypeGroup").click() # Select the group 'Group Name' try: - WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.ID, 'id_groups'))) + WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.ID, "id_groups"))) except TimeoutException: - self.fail('Timed out waiting for groups dropdown to initialize ') + self.fail("Timed out waiting for groups dropdown to initialize ") driver.execute_script("document.getElementsByName('groups')[0].style.display = 'inline'") element = driver.find_element(By.XPATH, "//select[@name='groups']") - group_option = element.find_elements(By.TAG_NAME, 'option')[0] + group_option = element.find_elements(By.TAG_NAME, "option")[0] Select(element).select_by_value(group_option.get_attribute("value")) # Select the role 'Reader' Select(driver.find_element(By.ID, "id_role")).select_by_visible_text("Reader") # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() # Assert the message to determine success status - self.assertTrue(self.is_success_message_present(text='Product type groups added successfully.')) + self.assertTrue(self.is_success_message_present(text="Product type groups added successfully.")) # Query the site to determine if the member has been added self.assertEqual(driver.find_elements(By.NAME, "product_type_group_group")[0].text, "Group Name") self.assertEqual(driver.find_elements(By.NAME, "product_type_group_role")[0].text, "Reader") @@ -109,7 +109,7 @@ def test_product_type_edit_product_type_group(self): # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() # Assert the message to determine success status - self.assertTrue(self.is_success_message_present(text='Product type group updated successfully.')) + self.assertTrue(self.is_success_message_present(text="Product type group updated successfully.")) # Query the site to determine if the member has been edited self.assertEqual(driver.find_elements(By.NAME, "product_type_group_group")[0].text, "Group Name") self.assertEqual(driver.find_elements(By.NAME, "product_type_group_role")[0].text, "Maintainer") @@ -129,7 +129,7 @@ def test_product_type_delete_product_type_group(self): # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-danger").click() # Assert the message to determine success status - self.assertTrue(self.is_success_message_present(text='Product type group deleted successfully.')) + self.assertTrue(self.is_success_message_present(text="Product type group deleted successfully.")) # Query the site to determine if the member has been deleted self.assertFalse(driver.find_elements(By.NAME, "product_type_group_group")) @@ -159,16 +159,16 @@ def suite(): suite = unittest.TestSuite() # Add each test the the suite to be run # success and failure is output by the test - suite.addTest(BaseTestCase('test_login')) - suite.addTest(GroupTest('test_create_group')) - suite.addTest(ProductTypeGroupTest('test_group_add_product_type_group')) - suite.addTest(ProductTypeGroupTest('test_group_edit_product_type_group')) - suite.addTest(ProductTypeGroupTest('test_group_delete_product_type_group')) - suite.addTest(ProductTypeGroupTest('test_product_type_add_product_type_group')) - suite.addTest(ProductTypeGroupTest('test_product_type_edit_product_type_group')) - suite.addTest(ProductTypeGroupTest('test_product_type_delete_product_type_group')) - suite.addTest(GroupTest('test_group_edit_name_and_global_role')) - suite.addTest(GroupTest('test_group_delete')) + suite.addTest(BaseTestCase("test_login")) + suite.addTest(GroupTest("test_create_group")) + suite.addTest(ProductTypeGroupTest("test_group_add_product_type_group")) + suite.addTest(ProductTypeGroupTest("test_group_edit_product_type_group")) + suite.addTest(ProductTypeGroupTest("test_group_delete_product_type_group")) + suite.addTest(ProductTypeGroupTest("test_product_type_add_product_type_group")) + suite.addTest(ProductTypeGroupTest("test_product_type_edit_product_type_group")) + suite.addTest(ProductTypeGroupTest("test_product_type_delete_product_type_group")) + suite.addTest(GroupTest("test_group_edit_name_and_global_role")) + suite.addTest(GroupTest("test_group_delete")) return suite diff --git a/tests/product_type_member_test.py b/tests/product_type_member_test.py index 03664664d4..6067198ccf 100644 --- a/tests/product_type_member_test.py +++ b/tests/product_type_member_test.py @@ -20,32 +20,32 @@ def test_user_add_product_type_member(self): # Select and click on the particular user to view driver.find_element(By.LINK_TEXT, "propersahm").click() # Only execute test case when authorization v2 is activated - if self.is_element_by_id_present('dropdownMenuAddProductTypeMember'): + if self.is_element_by_id_present("dropdownMenuAddProductTypeMember"): # Open the menu to add users and click the 'Add' button driver.find_element(By.ID, "dropdownMenuAddProductTypeMember").click() driver.find_element(By.ID, "addProductTypeMember").click() # Select the product type 'Research and Development' try: - WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.ID, 'id_product_types'))) + WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.ID, "id_product_types"))) except TimeoutException: - self.fail('Timed out waiting for product types dropdown to initialize ') + self.fail("Timed out waiting for product types dropdown to initialize ") driver.execute_script("document.getElementsByName('product_types')[0].style.display = 'inline'") element = driver.find_element(By.XPATH, "//select[@name='product_types']") - product_type_option = element.find_elements(By.TAG_NAME, 'option')[0] + product_type_option = element.find_elements(By.TAG_NAME, "option")[0] Select(element).select_by_value(product_type_option.get_attribute("value")) # Select the role 'Reader' Select(driver.find_element(By.ID, "id_role")).select_by_visible_text("Reader") # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() # Assert the message to determine success status - self.assertTrue(self.is_success_message_present(text='Product type members added successfully.')) + self.assertTrue(self.is_success_message_present(text="Product type members added successfully.")) # Query the site to determine if the member has been added self.assertEqual(driver.find_elements(By.NAME, "member_product_type")[0].text, "Research and Development") self.assertEqual(driver.find_elements(By.NAME, "member_product_type_role")[0].text, "Reader") else: - print('--------------------------------') - print('test_user_add_product_type_member: Not executed because legacy authorization is active') - print('--------------------------------') + print("--------------------------------") + print("test_user_add_product_type_member: Not executed because legacy authorization is active") + print("--------------------------------") def test_user_edit_product_type_member(self): # Login to the site. Password will have to be modified @@ -56,7 +56,7 @@ def test_user_edit_product_type_member(self): # Select and click on the particular user to view driver.find_element(By.LINK_TEXT, "propersahm").click() # Only execute test case when authorization v2 is activated - if self.is_element_by_id_present('dropdownMenuAddProductTypeMember'): + if self.is_element_by_id_present("dropdownMenuAddProductTypeMember"): # Open the menu to manage members and click the 'Edit' button driver.find_elements(By.NAME, "dropdownManageProductTypeMember")[0].click() driver.find_elements(By.NAME, "editProductTypeMember")[0].click() @@ -65,14 +65,14 @@ def test_user_edit_product_type_member(self): # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() # Assert the message to determine success status - self.assertTrue(self.is_success_message_present(text='Product type member updated successfully.')) + self.assertTrue(self.is_success_message_present(text="Product type member updated successfully.")) # Query the site to determine if the member has been edited self.assertEqual(driver.find_elements(By.NAME, "member_product_type")[0].text, "Research and Development") self.assertEqual(driver.find_elements(By.NAME, "member_product_type_role")[0].text, "Owner") else: - print('--------------------------------') - print('test_user_edit_product_type_member: Not executed because legacy authorization is active') - print('--------------------------------') + print("--------------------------------") + print("test_user_edit_product_type_member: Not executed because legacy authorization is active") + print("--------------------------------") def test_user_delete_product_type_member(self): # Login to the site. Password will have to be modified @@ -83,20 +83,20 @@ def test_user_delete_product_type_member(self): # Select and click on the particular user to view driver.find_element(By.LINK_TEXT, "propersahm").click() # Only execute test case when authorization v2 is activated - if self.is_element_by_id_present('dropdownMenuAddProductTypeMember'): + if self.is_element_by_id_present("dropdownMenuAddProductTypeMember"): # Open the menu to manage members and click the 'Delete' button driver.find_elements(By.NAME, "dropdownManageProductTypeMember")[0].click() driver.find_elements(By.NAME, "deleteProductTypeMember")[0].click() # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-danger").click() # Assert the message to determine success status - self.assertTrue(self.is_success_message_present(text='Product type member deleted successfully.')) + self.assertTrue(self.is_success_message_present(text="Product type member deleted successfully.")) # Query the site to determine if the member has been deleted self.assertFalse(driver.find_elements(By.NAME, "member_product_type")) else: - print('--------------------------------') - print('test_user_delete_product_type_member: Not executed because legacy authorization is active') - print('--------------------------------') + print("--------------------------------") + print("test_user_delete_product_type_member: Not executed because legacy authorization is active") + print("--------------------------------") def test_product_type_add_product_type_member(self): # Login to the site. Password will have to be modified @@ -107,32 +107,32 @@ def test_product_type_add_product_type_member(self): driver.find_element(By.ID, "dropdownMenuProductType").click() driver.find_element(By.PARTIAL_LINK_TEXT, "View").click() # Only execute test case when authorization v2 is activated - if self.is_element_by_id_present('dropdownMenuAddProductTypeMember'): + if self.is_element_by_id_present("dropdownMenuAddProductTypeMember"): # Open the menu to add users and click the 'Add' button driver.find_element(By.ID, "dropdownMenuAddProductTypeMember").click() driver.find_element(By.ID, "addProductTypeMember").click() # Select the user 'propersahm' try: - WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.ID, 'id_users'))) + WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.ID, "id_users"))) except TimeoutException: - self.fail('Timed out waiting for users dropdown to initialize ') + self.fail("Timed out waiting for users dropdown to initialize ") driver.execute_script("document.getElementsByName('users')[0].style.display = 'inline'") element = driver.find_element(By.XPATH, "//select[@name='users']") - user_option = element.find_elements(By.TAG_NAME, 'option')[0] + user_option = element.find_elements(By.TAG_NAME, "option")[0] Select(element).select_by_value(user_option.get_attribute("value")) # Select the role 'Reader' Select(driver.find_element(By.ID, "id_role")).select_by_visible_text("Reader") # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() # Assert the message to determine success status - self.assertTrue(self.is_success_message_present(text='Product type members added successfully.')) + self.assertTrue(self.is_success_message_present(text="Product type members added successfully.")) # Query the site to determine if the member has been added self.assertEqual(driver.find_elements(By.NAME, "member_user")[1].text, "Proper Samuel (propersahm)") self.assertEqual(driver.find_elements(By.NAME, "member_role")[1].text, "Reader") else: - print('--------------------------------') - print('test_product_type_add_product_type_member: Not executed because legacy authorization is active') - print('--------------------------------') + print("--------------------------------") + print("test_product_type_add_product_type_member: Not executed because legacy authorization is active") + print("--------------------------------") def test_product_type_edit_product_type_member(self): # Login to the site. Password will have to be modified @@ -143,7 +143,7 @@ def test_product_type_edit_product_type_member(self): driver.find_element(By.ID, "dropdownMenuProductType").click() driver.find_element(By.PARTIAL_LINK_TEXT, "View").click() # Only execute test case when authorization v2 is activated - if self.is_element_by_id_present('dropdownMenuAddProductTypeMember'): + if self.is_element_by_id_present("dropdownMenuAddProductTypeMember"): # Open the menu to manage members and click the 'Edit' button # The first member in the list is the admin user which was inserted by a fixture # The second member is the user we are looking for @@ -154,14 +154,14 @@ def test_product_type_edit_product_type_member(self): # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() # Assert the message to determine success status - self.assertTrue(self.is_success_message_present(text='Product type member updated successfully.')) + self.assertTrue(self.is_success_message_present(text="Product type member updated successfully.")) # Query the site to determine if the member has been edited self.assertEqual(driver.find_elements(By.NAME, "member_user")[1].text, "Proper Samuel (propersahm)") self.assertEqual(driver.find_elements(By.NAME, "member_role")[1].text, "Maintainer") else: - print('--------------------------------') - print('test_product_type_edit_product_type_member: Not executed because legacy authorization is active') - print('--------------------------------') + print("--------------------------------") + print("test_product_type_edit_product_type_member: Not executed because legacy authorization is active") + print("--------------------------------") def test_product_type_delete_product_type_member(self): # Login to the site. Password will have to be modified @@ -172,7 +172,7 @@ def test_product_type_delete_product_type_member(self): driver.find_element(By.ID, "dropdownMenuProductType").click() driver.find_element(By.PARTIAL_LINK_TEXT, "View").click() # Only execute test case when authorization v2 is activated - if self.is_element_by_id_present('dropdownMenuAddProductTypeMember'): + if self.is_element_by_id_present("dropdownMenuAddProductTypeMember"): # Open the menu to manage members and click the 'Delete' button # The first member in the list is the admin user which was inserted by a fixture # The second member is the user we are looking for @@ -181,29 +181,29 @@ def test_product_type_delete_product_type_member(self): # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-danger").click() # Assert the message to determine success status - self.assertTrue(self.is_success_message_present(text='Product type member deleted successfully.')) + self.assertTrue(self.is_success_message_present(text="Product type member deleted successfully.")) # Query the site to determine if the member has been deleted self.assertTrue(len(driver.find_elements(By.NAME, "member_user")) == 1) else: - print('--------------------------------') - print('test_product_delete_product_member: Not executed because legacy authorization is active') - print('--------------------------------') + print("--------------------------------") + print("test_product_delete_product_member: Not executed because legacy authorization is active") + print("--------------------------------") def suite(): suite = unittest.TestSuite() # Add each test the the suite to be run # success and failure is output by the test - suite.addTest(BaseTestCase('test_login')) - suite.addTest(BaseTestCase('disable_block_execution')) - suite.addTest(UserTest('test_create_user')) - suite.addTest(ProductTypeMemberTest('test_user_add_product_type_member')) - suite.addTest(ProductTypeMemberTest('test_user_edit_product_type_member')) - suite.addTest(ProductTypeMemberTest('test_user_delete_product_type_member')) - suite.addTest(ProductTypeMemberTest('test_product_type_add_product_type_member')) - suite.addTest(ProductTypeMemberTest('test_product_type_edit_product_type_member')) - suite.addTest(ProductTypeMemberTest('test_product_type_delete_product_type_member')) - suite.addTest(UserTest('test_user_delete')) + suite.addTest(BaseTestCase("test_login")) + suite.addTest(BaseTestCase("disable_block_execution")) + suite.addTest(UserTest("test_create_user")) + suite.addTest(ProductTypeMemberTest("test_user_add_product_type_member")) + suite.addTest(ProductTypeMemberTest("test_user_edit_product_type_member")) + suite.addTest(ProductTypeMemberTest("test_user_delete_product_type_member")) + suite.addTest(ProductTypeMemberTest("test_product_type_add_product_type_member")) + suite.addTest(ProductTypeMemberTest("test_product_type_edit_product_type_member")) + suite.addTest(ProductTypeMemberTest("test_product_type_delete_product_type_member")) + suite.addTest(UserTest("test_user_delete")) return suite diff --git a/tests/product_type_test.py b/tests/product_type_test.py index f8854899a2..38100b18de 100644 --- a/tests/product_type_test.py +++ b/tests/product_type_test.py @@ -19,7 +19,7 @@ def test_create_product_type(self): driver.find_element(By.ID, "id_critical_product").click() driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() - self.assertTrue(self.is_success_message_present(text='Product type added successfully.')) + self.assertTrue(self.is_success_message_present(text="Product type added successfully.")) self.assertFalse(self.is_error_message_present()) @on_exception_html_source_logger @@ -43,7 +43,7 @@ def test_create_product_for_product_type(self): # Assert ot the query to dtermine status of failure # Also confirm success even if Product is returned as already exists for test sake - self.assertTrue(self.is_success_message_present(text='Product added successfully')) + self.assertTrue(self.is_success_message_present(text="Product added successfully")) self.assertFalse(self.is_error_message_present()) def test_view_product_type(self): @@ -54,7 +54,7 @@ def test_view_product_type(self): driver.find_element(By.PARTIAL_LINK_TEXT, "View").click() product_type_text = driver.find_element(By.ID, "id_heading").text - self.assertEqual('Product Type Product test type', product_type_text) + self.assertEqual("Product Type Product test type", product_type_text) def test_edit_product_type(self): print("\n\nDebug Print Log: testing 'edit product type' \n") @@ -66,7 +66,7 @@ def test_edit_product_type(self): driver.find_element(By.ID, "id_name").send_keys("Edited product test type") driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() - self.assertTrue(self.is_success_message_present(text='Product type updated successfully.')) + self.assertTrue(self.is_success_message_present(text="Product type updated successfully.")) def test_delete_product_type(self): print("\n\nDebug Print Log: testing 'delete product type' \n") @@ -77,18 +77,18 @@ def test_delete_product_type(self): driver.find_element(By.PARTIAL_LINK_TEXT, "Delete").click() driver.find_element(By.CSS_SELECTOR, "button.btn.btn-danger").click() - self.assertTrue(self.is_success_message_present(text='Product Type and relationships removed.')) + self.assertTrue(self.is_success_message_present(text="Product Type and relationships removed.")) def suite(): suite = unittest.TestSuite() - suite.addTest(BaseTestCase('test_login')) - suite.addTest(BaseTestCase('disable_block_execution')) - suite.addTest(ProductTypeTest('test_create_product_type')) - suite.addTest(ProductTypeTest('test_view_product_type')) - suite.addTest(ProductTypeTest('test_create_product_for_product_type')) - suite.addTest(ProductTypeTest('test_edit_product_type')) - suite.addTest(ProductTypeTest('test_delete_product_type')) + suite.addTest(BaseTestCase("test_login")) + suite.addTest(BaseTestCase("disable_block_execution")) + suite.addTest(ProductTypeTest("test_create_product_type")) + suite.addTest(ProductTypeTest("test_view_product_type")) + suite.addTest(ProductTypeTest("test_create_product_for_product_type")) + suite.addTest(ProductTypeTest("test_edit_product_type")) + suite.addTest(ProductTypeTest("test_delete_product_type")) return suite diff --git a/tests/regulations_test.py b/tests/regulations_test.py index 912272bbae..8bb7b452e7 100644 --- a/tests/regulations_test.py +++ b/tests/regulations_test.py @@ -12,9 +12,9 @@ def login_page(self): driver = self.driver driver.get(self.base_url + "login") driver.find_element(By.ID, "id_username").clear() - driver.find_element(By.ID, "id_username").send_keys(os.environ['DD_ADMIN_USER']) + driver.find_element(By.ID, "id_username").send_keys(os.environ["DD_ADMIN_USER"]) driver.find_element(By.ID, "id_password").clear() - driver.find_element(By.ID, "id_password").send_keys(os.environ['DD_ADMIN_PASSWORD']) + driver.find_element(By.ID, "id_password").send_keys(os.environ["DD_ADMIN_PASSWORD"]) driver.find_element(By.CSS_SELECTOR, "button.btn.btn-success").click() return driver @@ -37,7 +37,7 @@ def test_create_regulation(self): driver.find_element(By.ID, "id_reference").send_keys("http://www.psa.eu") driver.find_element(By.CSS_SELECTOR, ".col-sm-offset-2 > .btn").click() - self.assertTrue(self.is_success_message_present(text='Regulation Successfully Created.')) + self.assertTrue(self.is_success_message_present(text="Regulation Successfully Created.")) def test_edit_regulation(self): driver = self.driver @@ -47,7 +47,7 @@ def test_edit_regulation(self): driver.find_element(By.ID, "id_name").clear() driver.find_element(By.ID, "id_name").send_keys("Edited PSA test") driver.find_element(By.ID, "submit").click() - self.assertTrue(self.is_success_message_present(text='Regulation Successfully Updated.')) + self.assertTrue(self.is_success_message_present(text="Regulation Successfully Updated.")) def test_delete_regulation(self): driver = self.driver @@ -56,16 +56,16 @@ def test_delete_regulation(self): driver.find_element(By.LINK_TEXT, "Edited PSA test").click() driver.find_element(By.ID, "delete").click() - self.assertTrue(self.is_success_message_present(text='Regulation Deleted.')) + self.assertTrue(self.is_success_message_present(text="Regulation Deleted.")) def suite(): suite = unittest.TestSuite() - suite.addTest(BaseTestCase('test_login')) - suite.addTest(BaseTestCase('disable_block_execution')) - suite.addTest(RegulationTest('test_create_regulation')) - suite.addTest(RegulationTest('test_edit_regulation')) - suite.addTest(RegulationTest('test_delete_regulation')) + suite.addTest(BaseTestCase("test_login")) + suite.addTest(BaseTestCase("disable_block_execution")) + suite.addTest(RegulationTest("test_create_regulation")) + suite.addTest(RegulationTest("test_edit_regulation")) + suite.addTest(RegulationTest("test_delete_regulation")) return suite diff --git a/tests/report_builder_test.py b/tests/report_builder_test.py index 6025d1c0a4..25127e55c4 100644 --- a/tests/report_builder_test.py +++ b/tests/report_builder_test.py @@ -26,12 +26,12 @@ def enter_values(self, driver): in_use = driver.find_element(By.ID, "sortable2").find_elements(By.TAG_NAME, "li") for widget in in_use: class_names = widget.get_attribute("class") - if 'cover-page' in class_names: + if "cover-page" in class_names: inputs = widget.find_elements(By.TAG_NAME, "input") for field in inputs: - field.send_keys('cover words') - if 'wysiwyg-content' in class_names: - widget.find_element(By.CLASS_NAME, "editor").send_keys('wysiwyg') + field.send_keys("cover words") + if "wysiwyg-content" in class_names: + widget.find_element(By.CLASS_NAME, "editor").send_keys("wysiwyg") def generate_HTML_report(self): driver = self.driver @@ -39,7 +39,7 @@ def generate_HTML_report(self): self.move_blocks(driver) self.enter_values(driver) Select(driver.find_element(By.ID, "id_report_type")).select_by_visible_text("HTML") - driver.find_element(By.ID, "id_report_name").send_keys('Test Report') + driver.find_element(By.ID, "id_report_name").send_keys("Test Report") driver.find_element(By.CLASS_NAME, "run_report").click() self.assertTrue(driver.current_url == self.base_url + "reports/custom") @@ -49,7 +49,7 @@ def generate_AsciiDoc_report(self): self.move_blocks(driver) self.enter_values(driver) Select(driver.find_element(By.ID, "id_report_type")).select_by_visible_text("AsciiDoc") - driver.find_element(By.ID, "id_report_name").send_keys('Test Report') + driver.find_element(By.ID, "id_report_name").send_keys("Test Report") driver.find_element(By.CLASS_NAME, "run_report").click() self.assertTrue(driver.current_url == self.base_url + "reports/custom") @@ -57,7 +57,7 @@ def test_product_type_report(self): driver = self.driver driver.get(self.base_url + "product/type") driver.find_element(By.ID, "dropdownMenuProductType").click() - driver.find_element(By.PARTIAL_LINK_TEXT, 'Report').click() + driver.find_element(By.PARTIAL_LINK_TEXT, "Report").click() my_select = Select(driver.find_element(By.ID, "id_include_finding_notes")) my_select.select_by_index(1) @@ -70,14 +70,14 @@ def test_product_type_report(self): my_select = Select(driver.find_element(By.ID, "id_include_table_of_contents")) my_select.select_by_index(1) - driver.find_element(By.NAME, '_generate').click() + driver.find_element(By.NAME, "_generate").click() def test_product_report(self): driver = self.driver self.goto_product_overview(driver) driver.find_element(By.LINK_TEXT, "QA Test").click() driver.find_element(By.ID, "dropdownMenu1").click() - driver.find_element(By.PARTIAL_LINK_TEXT, 'Product Report').click() + driver.find_element(By.PARTIAL_LINK_TEXT, "Product Report").click() my_select = Select(driver.find_element(By.ID, "id_include_finding_notes")) my_select.select_by_index(1) @@ -88,17 +88,17 @@ def test_product_report(self): my_select = Select(driver.find_element(By.ID, "id_include_table_of_contents")) my_select.select_by_index(1) - driver.find_element(By.NAME, '_generate').click() + driver.find_element(By.NAME, "_generate").click() def test_engagement_report(self): driver = self.driver self.goto_product_overview(driver) driver.find_element(By.LINK_TEXT, "QA Test").click() - driver.find_element(By.PARTIAL_LINK_TEXT, 'Engagements').click() + driver.find_element(By.PARTIAL_LINK_TEXT, "Engagements").click() driver.find_element(By.LINK_TEXT, "View Engagements").click() driver.find_element(By.LINK_TEXT, "Ad Hoc Engagement").click() driver.find_element(By.ID, "dropdownMenu1").click() - driver.find_element(By.PARTIAL_LINK_TEXT, 'Report').click() + driver.find_element(By.PARTIAL_LINK_TEXT, "Report").click() my_select = Select(driver.find_element(By.ID, "id_include_finding_notes")) my_select.select_by_index(1) @@ -108,18 +108,18 @@ def test_engagement_report(self): my_select = Select(driver.find_element(By.ID, "id_include_table_of_contents")) my_select.select_by_index(1) - driver.find_element(By.NAME, '_generate').click() + driver.find_element(By.NAME, "_generate").click() def test_test_report(self): driver = self.driver self.goto_product_overview(driver) driver.find_element(By.LINK_TEXT, "QA Test").click() - driver.find_element(By.PARTIAL_LINK_TEXT, 'Engagements').click() + driver.find_element(By.PARTIAL_LINK_TEXT, "Engagements").click() driver.find_element(By.LINK_TEXT, "View Engagements").click() driver.find_element(By.LINK_TEXT, "Ad Hoc Engagement").click() driver.find_element(By.LINK_TEXT, "Pen Test").click() driver.find_element(By.ID, "dropdownMenu1").click() - driver.find_element(By.PARTIAL_LINK_TEXT, 'Report').click() + driver.find_element(By.PARTIAL_LINK_TEXT, "Report").click() my_select = Select(driver.find_element(By.ID, "id_include_finding_notes")) my_select.select_by_index(1) @@ -129,13 +129,13 @@ def test_test_report(self): my_select = Select(driver.find_element(By.ID, "id_include_table_of_contents")) my_select.select_by_index(1) - driver.find_element(By.NAME, '_generate').click() + driver.find_element(By.NAME, "_generate").click() def test_product_endpoint_report(self): driver = self.driver self.goto_product_overview(driver) driver.find_element(By.LINK_TEXT, "QA Test").click() - driver.find_element(By.PARTIAL_LINK_TEXT, 'Endpoints').click() + driver.find_element(By.PARTIAL_LINK_TEXT, "Endpoints").click() driver.find_element(By.LINK_TEXT, "Endpoint Report").click() # extra dropdown click @@ -157,7 +157,7 @@ def test_product_endpoint_report(self): my_select = Select(driver.find_element(By.ID, "id_include_table_of_contents")) my_select.select_by_index(1) - driver.find_element(By.NAME, '_generate').click() + driver.find_element(By.NAME, "_generate").click() def test_product_list_report(self): driver = self.driver @@ -174,29 +174,29 @@ def test_product_list_report(self): my_select = Select(driver.find_element(By.ID, "id_include_table_of_contents")) my_select.select_by_index(1) - driver.find_element(By.NAME, '_generate').click() + driver.find_element(By.NAME, "_generate").click() def add_report_tests_to_suite(suite): # Add each test the the suite to be run # success and failure is output by the test - suite.addTest(BaseTestCase('test_login')) - suite.addTest(BaseTestCase('disable_block_execution')) - suite.addTest(ProductTest('test_create_product')) - suite.addTest(ProductTest('test_add_product_finding')) - suite.addTest(ProductTest('test_add_product_endpoints')) + suite.addTest(BaseTestCase("test_login")) + suite.addTest(BaseTestCase("disable_block_execution")) + suite.addTest(ProductTest("test_create_product")) + suite.addTest(ProductTest("test_add_product_finding")) + suite.addTest(ProductTest("test_add_product_endpoints")) - suite.addTest(ReportBuilderTest('generate_HTML_report')) - suite.addTest(ReportBuilderTest('generate_AsciiDoc_report')) + suite.addTest(ReportBuilderTest("generate_HTML_report")) + suite.addTest(ReportBuilderTest("generate_AsciiDoc_report")) # we add reports here as we now have a product that triggers some logic inside reports - suite.addTest(ReportBuilderTest('test_product_type_report')) - suite.addTest(ReportBuilderTest('test_product_report')) - suite.addTest(ReportBuilderTest('test_engagement_report')) - suite.addTest(ReportBuilderTest('test_test_report')) - suite.addTest(ReportBuilderTest('test_product_endpoint_report')) + suite.addTest(ReportBuilderTest("test_product_type_report")) + suite.addTest(ReportBuilderTest("test_product_report")) + suite.addTest(ReportBuilderTest("test_engagement_report")) + suite.addTest(ReportBuilderTest("test_test_report")) + suite.addTest(ReportBuilderTest("test_product_endpoint_report")) - suite.addTest(ProductTest('test_delete_product')) + suite.addTest(ProductTest("test_delete_product")) return suite diff --git a/tests/search_test.py b/tests/search_test.py index 3da6465380..a8815121e7 100644 --- a/tests/search_test.py +++ b/tests/search_test.py @@ -11,99 +11,99 @@ def test_search(self): # very basic search test to see if it doesn't 500 driver = self.goto_some_page() driver.find_element(By.ID, "simple_search").clear() - driver.find_element(By.ID, "simple_search").send_keys('finding') + driver.find_element(By.ID, "simple_search").send_keys("finding") driver.find_element(By.ID, "simple_search_submit").click() def test_search_vulnerability_id(self): # very basic search test to see if it doesn't 500 driver = self.goto_some_page() driver.find_element(By.ID, "simple_search").clear() - driver.find_element(By.ID, "simple_search").send_keys('vulnerability_id:CVE-2020-12345') + driver.find_element(By.ID, "simple_search").send_keys("vulnerability_id:CVE-2020-12345") driver.find_element(By.ID, "simple_search_submit").click() driver.find_element(By.ID, "simple_search").clear() - driver.find_element(By.ID, "simple_search").send_keys('CVE-2020-12345') + driver.find_element(By.ID, "simple_search").send_keys("CVE-2020-12345") driver.find_element(By.ID, "simple_search_submit").click() def test_search_tag(self): # very basic search test to see if it doesn't 500 driver = self.goto_some_page() driver.find_element(By.ID, "simple_search").clear() - driver.find_element(By.ID, "simple_search").send_keys('tag:magento') + driver.find_element(By.ID, "simple_search").send_keys("tag:magento") driver.find_element(By.ID, "simple_search_submit").click() def test_search_product_tag(self): # very basic search test to see if it doesn't 500 driver = self.goto_some_page() driver.find_element(By.ID, "simple_search").clear() - driver.find_element(By.ID, "simple_search").send_keys('product-tag:java') + driver.find_element(By.ID, "simple_search").send_keys("product-tag:java") driver.find_element(By.ID, "simple_search_submit").click() def test_search_engagement_tag(self): # very basic search test to see if it doesn't 500 driver = self.goto_some_page() driver.find_element(By.ID, "simple_search").clear() - driver.find_element(By.ID, "simple_search").send_keys('engagement-tag:php') + driver.find_element(By.ID, "simple_search").send_keys("engagement-tag:php") driver.find_element(By.ID, "simple_search_submit").click() def test_search_test_tag(self): # very basic search test to see if it doesn't 500 driver = self.goto_some_page() driver.find_element(By.ID, "simple_search").clear() - driver.find_element(By.ID, "simple_search").send_keys('test-tag:go') + driver.find_element(By.ID, "simple_search").send_keys("test-tag:go") driver.find_element(By.ID, "simple_search_submit").click() def test_search_tags(self): # very basic search test to see if it doesn't 500 driver = self.goto_some_page() driver.find_element(By.ID, "simple_search").clear() - driver.find_element(By.ID, "simple_search").send_keys('tags:php') + driver.find_element(By.ID, "simple_search").send_keys("tags:php") driver.find_element(By.ID, "simple_search_submit").click() def test_search_product_tags(self): # very basic search test to see if it doesn't 500 driver = self.goto_some_page() driver.find_element(By.ID, "simple_search").clear() - driver.find_element(By.ID, "simple_search").send_keys('product-tags:java') + driver.find_element(By.ID, "simple_search").send_keys("product-tags:java") driver.find_element(By.ID, "simple_search_submit").click() def test_search_engagement_tags(self): # very basic search test to see if it doesn't 500 driver = self.goto_some_page() driver.find_element(By.ID, "simple_search").clear() - driver.find_element(By.ID, "simple_search").send_keys('engagement-tags:php') + driver.find_element(By.ID, "simple_search").send_keys("engagement-tags:php") driver.find_element(By.ID, "simple_search_submit").click() def test_search_test_tags(self): # very basic search test to see if it doesn't 500 driver = self.goto_some_page() driver.find_element(By.ID, "simple_search").clear() - driver.find_element(By.ID, "simple_search").send_keys('test-tags:go') + driver.find_element(By.ID, "simple_search").send_keys("test-tags:go") driver.find_element(By.ID, "simple_search_submit").click() def test_search_id(self): # very basic search test to see if it doesn't 500 driver = self.goto_some_page() driver.find_element(By.ID, "simple_search").clear() - driver.find_element(By.ID, "simple_search").send_keys('id:1') + driver.find_element(By.ID, "simple_search").send_keys("id:1") driver.find_element(By.ID, "simple_search_submit").click() def suite(): suite = unittest.TestSuite() - suite.addTest(BaseTestCase('test_login')) - suite.addTest(BaseTestCase('disable_block_execution')) - suite.addTest(SearchTests('test_search')) - suite.addTest(SearchTests('test_search_vulnerability_id')) - suite.addTest(SearchTests('test_search_tag')) - suite.addTest(SearchTests('test_search_product_tag')) - suite.addTest(SearchTests('test_search_engagement_tag')) - suite.addTest(SearchTests('test_search_test_tag')) - suite.addTest(SearchTests('test_search_tags')) - suite.addTest(SearchTests('test_search_product_tags')) - suite.addTest(SearchTests('test_search_engagement_tags')) - suite.addTest(SearchTests('test_search_test_tags')) - suite.addTest(SearchTests('test_search_id')) + suite.addTest(BaseTestCase("test_login")) + suite.addTest(BaseTestCase("disable_block_execution")) + suite.addTest(SearchTests("test_search")) + suite.addTest(SearchTests("test_search_vulnerability_id")) + suite.addTest(SearchTests("test_search_tag")) + suite.addTest(SearchTests("test_search_product_tag")) + suite.addTest(SearchTests("test_search_engagement_tag")) + suite.addTest(SearchTests("test_search_test_tag")) + suite.addTest(SearchTests("test_search_tags")) + suite.addTest(SearchTests("test_search_product_tags")) + suite.addTest(SearchTests("test_search_engagement_tags")) + suite.addTest(SearchTests("test_search_test_tags")) + suite.addTest(SearchTests("test_search_id")) return suite diff --git a/tests/sla_configuration_test.py b/tests/sla_configuration_test.py index a388cd436d..f2a06e8b3e 100644 --- a/tests/sla_configuration_test.py +++ b/tests/sla_configuration_test.py @@ -12,9 +12,9 @@ def login_page(self): driver = self.driver driver.get(self.base_url + "login") driver.find_element(By.ID, "id_username").clear() - driver.find_element(By.ID, "id_username").send_keys(os.environ['DD_ADMIN_USER']) + driver.find_element(By.ID, "id_username").send_keys(os.environ["DD_ADMIN_USER"]) driver.find_element(By.ID, "id_password").clear() - driver.find_element(By.ID, "id_password").send_keys(os.environ['DD_ADMIN_PASSWORD']) + driver.find_element(By.ID, "id_password").send_keys(os.environ["DD_ADMIN_PASSWORD"]) driver.find_element(By.CSS_SELECTOR, "button.btn.btn-success").click() return driver @@ -37,7 +37,7 @@ def test_add_sla_config(self): driver.find_element(By.ID, "id_critical").send_keys("4") driver.find_element(By.VALUE, "Submit").click() - self.assertTrue(self.is_success_message_present(text='SLA configuration Successfully Created.')) + self.assertTrue(self.is_success_message_present(text="SLA configuration Successfully Created.")) def test_edit_sla_config(self): driver = self.driver @@ -46,31 +46,31 @@ def test_edit_sla_config(self): driver.find_element(By.ID, "id_name").clear() driver.find_element(By.ID, "id_name").send_keys("Edited Test SLA Configuration test") driver.find_element(By.ID, "submit").click() - self.assertTrue(self.is_success_message_present(text='SLA configuration Successfully Updated.')) + self.assertTrue(self.is_success_message_present(text="SLA configuration Successfully Updated.")) def test_delete_sla_config(self): driver = self.driver driver.get(self.base_url + "sla_config") driver.find_element(By.LINK_TEXT, "Edited Test SLA Configuration test").click() driver.find_element(By.ID, "delete").click() - self.assertTrue(self.is_success_message_present(text='SLA configuration Deleted.')) + self.assertTrue(self.is_success_message_present(text="SLA configuration Deleted.")) def test_delete_default_sla(self): driver = self.driver driver.get(self.base_url + "sla_config") driver.find_element(By.LINK_TEXT, "Edited Test SLA Configuration test").click() driver.find_element(By.ID, "delete").click() - self.assertTrue(self.is_error_message_present(text='The Default SLA Configuration cannot be deleted.')) + self.assertTrue(self.is_error_message_present(text="The Default SLA Configuration cannot be deleted.")) def suite(): suite = unittest.TestSuite() - suite.addTest(BaseTestCase('test_login')) - suite.addTest(BaseTestCase('disable_block_execution')) - suite.addTest(SLAConfigurationTest('test_add_sla_config')) - suite.addTest(SLAConfigurationTest('test_edit_sla_config')) - suite.addTest(SLAConfigurationTest('test_delete_sla_config')) - suite.addTest(SLAConfigurationTest('test_delete_default_sla')) + suite.addTest(BaseTestCase("test_login")) + suite.addTest(BaseTestCase("disable_block_execution")) + suite.addTest(SLAConfigurationTest("test_add_sla_config")) + suite.addTest(SLAConfigurationTest("test_edit_sla_config")) + suite.addTest(SLAConfigurationTest("test_delete_sla_config")) + suite.addTest(SLAConfigurationTest("test_delete_default_sla")) return suite diff --git a/tests/test_test.py b/tests/test_test.py index a78e99889a..59ebdefc86 100644 --- a/tests/test_test.py +++ b/tests/test_test.py @@ -53,7 +53,7 @@ def test_create_test(self): # Navigate to the Product page to select the product we created earlier self.goto_product_overview(driver) # wait for product_wrapper div as datatables javascript modifies the DOM on page load. - driver.find_element(By.ID, 'products_wrapper') + driver.find_element(By.ID, "products_wrapper") # Select and click on the particular product to create test for driver.find_element(By.LINK_TEXT, "QA Test").click() # # "Click" the dropdown option @@ -75,7 +75,7 @@ def test_create_test(self): # engagement target start and target end already have defaults # we can safely skip # Testing Lead: This can be the logged in user - Select(driver.find_element(By.ID, "id_lead")).select_by_visible_text('Admin User (admin)') + Select(driver.find_element(By.ID, "id_lead")).select_by_visible_text("Admin User (admin)") # engagement status Select(driver.find_element(By.ID, "id_status")).select_by_visible_text("In Progress") # "Click" the 'Add Test' button to Add Test to engagement @@ -94,7 +94,7 @@ def test_create_test(self): # Query the site to determine if the Test has been added # Assert on the query to determine success or failure - self.assertTrue(self.is_success_message_present(text='Test added successfully')) + self.assertTrue(self.is_success_message_present(text="Test added successfully")) def test_edit_test(self): # Login to the site. @@ -115,7 +115,7 @@ def test_edit_test(self): # Query the site to determine if the Test has been updated # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Test saved.')) + self.assertTrue(self.is_success_message_present(text="Test saved.")) def test_add_note(self): # Login to the site. @@ -135,7 +135,7 @@ def test_add_note(self): # Query the site to determine if the Test has been updated # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Note added successfully.')) + self.assertTrue(self.is_success_message_present(text="Note added successfully.")) def test_add_test_finding(self): # Test To Add Finding To Test @@ -181,10 +181,10 @@ def test_add_test_finding(self): # Query the site to determine if the finding has been added # Assert to the query to dtermine status of failure - self.assertTrue(self.is_text_present_on_page(text='App Vulnerable to XSS2')) + self.assertTrue(self.is_text_present_on_page(text="App Vulnerable to XSS2")) # Select and click on the finding to check if endpoint has been added driver.find_element(By.LINK_TEXT, "App Vulnerable to XSS2").click() - self.assertTrue(self.is_text_present_on_page(text='product2.finding.com')) + self.assertTrue(self.is_text_present_on_page(text="product2.finding.com")) def test_add_stub_finding(self): # Login to the site. @@ -216,14 +216,14 @@ def test_add_and_promote_stub_finding(self): # Click on link of finding name to promote to finding driver.find_elements(By.NAME, "stub_finding_name")[0].click() # Check we have the correct stub finding - self.assertEqual(driver.find_element(By.ID, "id_title").get_attribute('value'), 'App Vulnerable to XSS3') + self.assertEqual(driver.find_element(By.ID, "id_title").get_attribute("value"), "App Vulnerable to XSS3") # Edit finding Description driver.find_element(By.ID, "id_cvssv3_score").send_keys(Keys.TAB, Keys.TAB, "This is a promoted stub finding") # "Click" the Done button to Edit the finding driver.find_element(By.ID, "submit").click() # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Finding promoted successfully')) + self.assertTrue(self.is_success_message_present(text="Finding promoted successfully")) @on_exception_html_source_logger def test_add_and_delete_stub_finding(self): @@ -259,9 +259,9 @@ def test_merge_findings(self): driver.find_element(By.ID, "merge_findings").click() - Select(driver.find_element(By.ID, "id_finding_action")).select_by_visible_text('Inactive') + Select(driver.find_element(By.ID, "id_finding_action")).select_by_visible_text("Inactive") - Select(driver.find_element(By.ID, "id_findings_to_merge")).select_by_visible_text('App Vulnerable to XSS3') + Select(driver.find_element(By.ID, "id_findings_to_merge")).select_by_visible_text("App Vulnerable to XSS3") driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() @@ -281,26 +281,26 @@ def test_delete_test(self): # "Click" the delete button to complete the transaction driver.find_element(By.CSS_SELECTOR, "button.btn.btn-danger").click() # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='Test and relationships removed.')) + self.assertTrue(self.is_success_message_present(text="Test and relationships removed.")) def suite(): suite = unittest.TestSuite() # Add each test the the suite to be run # success and failure is output by the test - suite.addTest(BaseTestCase('test_login')) - suite.addTest(ProductTest('test_create_product')) - suite.addTest(ProductTest('test_add_product_finding')) - suite.addTest(TestUnitTest('test_view_test')) - suite.addTest(TestUnitTest('test_create_test')) - suite.addTest(TestUnitTest('test_edit_test')) - suite.addTest(TestUnitTest('test_add_test_finding')) - suite.addTest(TestUnitTest('test_add_and_promote_stub_finding')) - suite.addTest(TestUnitTest('test_merge_findings')) - suite.addTest(TestUnitTest('test_add_and_delete_stub_finding')) - suite.addTest(TestUnitTest('test_add_note')) - suite.addTest(TestUnitTest('test_delete_test')) - suite.addTest(ProductTest('test_delete_product')) + suite.addTest(BaseTestCase("test_login")) + suite.addTest(ProductTest("test_create_product")) + suite.addTest(ProductTest("test_add_product_finding")) + suite.addTest(TestUnitTest("test_view_test")) + suite.addTest(TestUnitTest("test_create_test")) + suite.addTest(TestUnitTest("test_edit_test")) + suite.addTest(TestUnitTest("test_add_test_finding")) + suite.addTest(TestUnitTest("test_add_and_promote_stub_finding")) + suite.addTest(TestUnitTest("test_merge_findings")) + suite.addTest(TestUnitTest("test_add_and_delete_stub_finding")) + suite.addTest(TestUnitTest("test_add_note")) + suite.addTest(TestUnitTest("test_delete_test")) + suite.addTest(ProductTest("test_delete_product")) return suite diff --git a/tests/tool_config.py b/tests/tool_config.py index 13db275955..8259d2ba2d 100644 --- a/tests/tool_config.py +++ b/tests/tool_config.py @@ -31,11 +31,11 @@ def test_setup_tt_via_api_scan_configuration(self): # Follow instuctions to create ToolType driver.find_element(By.ID, "link_tt_edgescan_scan").click() # Check if form is prefieled - self.assertEqual(driver.find_element(By.ID, "id_name").get_attribute('value'), "Edgescan") + self.assertEqual(driver.find_element(By.ID, "id_name").get_attribute("value"), "Edgescan") # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() - self.assertTrue(self.is_success_message_present(text='Tool Type Configuration Successfully Created.')) + self.assertTrue(self.is_success_message_present(text="Tool Type Configuration Successfully Created.")) self.assertFalse(self.is_error_message_present()) @on_exception_html_source_logger @@ -63,7 +63,7 @@ def test_setup_tc_via_api_scan_configuration(self): # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() - self.assertTrue(self.is_success_message_present(text='Tool Configuration successfully updated.')) + self.assertTrue(self.is_success_message_present(text="Tool Configuration successfully updated.")) self.assertFalse(self.is_error_message_present()) @on_exception_html_source_logger @@ -87,24 +87,24 @@ def test_setup_api_scan_configuration(self): # "Click" the submit button to complete the transaction driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() - self.assertTrue(self.is_success_message_present(text='API Scan Configuration added successfully.')) + self.assertTrue(self.is_success_message_present(text="API Scan Configuration added successfully.")) self.assertFalse(self.is_error_message_present()) def suite(): suite = unittest.TestSuite() - suite.addTest(BaseTestCase('test_login')) - suite.addTest(BaseTestCase('disable_block_execution')) - suite.addTest(ProductTest('test_create_product')) + suite.addTest(BaseTestCase("test_login")) + suite.addTest(BaseTestCase("disable_block_execution")) + suite.addTest(ProductTest("test_create_product")) # Usable if instance doesn't autocreate all TTs # suite.addTest(ToolConfigTest('test_list_api_scan_configuration_tt_and_tc_missing')) # suite.addTest(ToolConfigTest('test_setup_tt_via_api_scan_configuration')) - suite.addTest(ToolConfigTest('test_list_api_scan_configuration_tt_ready_tc_missing')) - suite.addTest(ToolConfigTest('test_setup_tc_via_api_scan_configuration')) - suite.addTest(ToolConfigTest('test_list_api_scan_configuration_tt_and_tc_ready')) - suite.addTest(ToolConfigTest('test_setup_api_scan_configuration')) - suite.addTest(ProductTest('test_delete_product')) + suite.addTest(ToolConfigTest("test_list_api_scan_configuration_tt_ready_tc_missing")) + suite.addTest(ToolConfigTest("test_setup_tc_via_api_scan_configuration")) + suite.addTest(ToolConfigTest("test_list_api_scan_configuration_tt_and_tc_ready")) + suite.addTest(ToolConfigTest("test_setup_api_scan_configuration")) + suite.addTest(ProductTest("test_delete_product")) return suite diff --git a/tests/user_test.py b/tests/user_test.py index 14fa6b37d7..43659b3f16 100644 --- a/tests/user_test.py +++ b/tests/user_test.py @@ -41,8 +41,8 @@ def test_create_user(self): # Query the site to determine if the user has been created # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='User added successfully.') - or self.is_help_message_present(text='A user with that username already exists.')) + self.assertTrue(self.is_success_message_present(text="User added successfully.") + or self.is_help_message_present(text="A user with that username already exists.")) def test_create_user_with_writer_global_role(self): # Login to the site. @@ -74,8 +74,8 @@ def test_create_user_with_writer_global_role(self): driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() # Query the site to determine if the user has been created # Assert ot the query to determine status of failure - self.assertTrue(self.is_success_message_present(text='User added successfully.') - or self.is_help_message_present(text='A user with that username already exists.')) + self.assertTrue(self.is_success_message_present(text="User added successfully.") + or self.is_help_message_present(text="A user with that username already exists.")) def enable_user_profile_writing(self): self.login_page() @@ -122,7 +122,7 @@ def test_user_edit_permissions(self): # Query the site to determine if the User permission has been changed # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='User saved successfully.')) + self.assertTrue(self.is_success_message_present(text="User saved successfully.")) def test_user_delete(self): # Login to the site. Password will have to be modified @@ -150,7 +150,7 @@ def test_user_delete(self): # Query the site to determine if the User has been deleted # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='User and relationships removed.')) + self.assertTrue(self.is_success_message_present(text="User and relationships removed.")) def test_user_with_writer_role_delete(self): # Login to the site. Password will have to be modified @@ -178,7 +178,7 @@ def test_user_with_writer_role_delete(self): # Query the site to determine if the User has been deleted # Assert ot the query to dtermine status of failure - self.assertTrue(self.is_success_message_present(text='User and relationships removed.')) + self.assertTrue(self.is_success_message_present(text="User and relationships removed.")) def test_standard_user_login(self): self.login_standard_page() @@ -187,19 +187,19 @@ def test_admin_profile_form(self): self.enable_user_profile_writing() self.login_page() self.driver.get(self.base_url + "profile") - self.assertTrue(self.driver.find_element(By.ID, 'id_first_name').is_enabled()) + self.assertTrue(self.driver.find_element(By.ID, "id_first_name").is_enabled()) def test_user_profile_form_disabled(self): self.disable_user_profile_writing() self.login_standard_page() self.driver.get(self.base_url + "profile") - self.assertFalse(self.driver.find_element(By.ID, 'id_first_name').is_enabled()) + self.assertFalse(self.driver.find_element(By.ID, "id_first_name").is_enabled()) def test_user_profile_form_enabled(self): self.enable_user_profile_writing() self.login_standard_page() self.driver.get(self.base_url + "profile") - self.assertTrue(self.driver.find_element(By.ID, 'id_first_name').is_enabled()) + self.assertTrue(self.driver.find_element(By.ID, "id_first_name").is_enabled()) def test_forgot_password(self): driver = self.driver @@ -210,7 +210,7 @@ def test_forgot_password(self): driver.find_element(By.ID, "id_email").send_keys("propersam@example.com") driver.find_element(By.ID, "reset-password").click() - self.assertTrue(self.is_text_present_on_page(text='We’ve emailed you instructions for setting your password')) + self.assertTrue(self.is_text_present_on_page(text="We’ve emailed you instructions for setting your password")) def test_user_edit_configuration(self): @@ -218,7 +218,7 @@ def test_user_edit_configuration(self): driver = self.driver self.login_standard_page() with self.assertRaises(NoSuchElementException): - driver.find_element(By.ID, 'id_user_menu') + driver.find_element(By.ID, "id_user_menu") # Login as superuser and activate view user configuration for standard user self.login_page() @@ -242,7 +242,7 @@ def test_user_edit_configuration(self): # Login as standard user and check the user menu does exist now self.login_standard_page() - driver.find_element(By.ID, 'id_user_menu') + driver.find_element(By.ID, "id_user_menu") # Navigate to User Management page driver.get(self.base_url + "user") # Select the previously created user to edit @@ -258,26 +258,26 @@ def test_user_edit_configuration(self): driver.find_element(By.ID, "dropdownMenuUser").click() driver.find_element(By.ID, "viewUser").click() # Check user cannot edit configuration permissions - self.assertFalse(self.driver.find_element(By.ID, 'id_add_development_environment').is_enabled()) + self.assertFalse(self.driver.find_element(By.ID, "id_add_development_environment").is_enabled()) def suite(): suite = unittest.TestSuite() # Add each test the the suite to be run # success and failure is output by the test - suite.addTest(BaseTestCase('test_login')) - suite.addTest(UserTest('test_create_user')) - suite.addTest(UserTest('test_create_user_with_writer_global_role')) - suite.addTest(UserTest('test_admin_profile_form')) - suite.addTest(UserTest('test_standard_user_login')) - suite.addTest(UserTest('test_user_profile_form_disabled')) - suite.addTest(UserTest('test_user_profile_form_enabled')) - suite.addTest(UserTest('test_forgot_password')) - suite.addTest(UserTest('test_user_edit_configuration')) - suite.addTest(BaseTestCase('test_login')) - suite.addTest(UserTest('test_user_edit_permissions')) - suite.addTest(UserTest('test_user_delete')) - suite.addTest(UserTest('test_user_with_writer_role_delete')) + suite.addTest(BaseTestCase("test_login")) + suite.addTest(UserTest("test_create_user")) + suite.addTest(UserTest("test_create_user_with_writer_global_role")) + suite.addTest(UserTest("test_admin_profile_form")) + suite.addTest(UserTest("test_standard_user_login")) + suite.addTest(UserTest("test_user_profile_form_disabled")) + suite.addTest(UserTest("test_user_profile_form_enabled")) + suite.addTest(UserTest("test_forgot_password")) + suite.addTest(UserTest("test_user_edit_configuration")) + suite.addTest(BaseTestCase("test_login")) + suite.addTest(UserTest("test_user_edit_permissions")) + suite.addTest(UserTest("test_user_delete")) + suite.addTest(UserTest("test_user_with_writer_role_delete")) return suite diff --git a/tests/zap.py b/tests/zap.py index db0f77bf3d..41f6e10327 100755 --- a/tests/zap.py +++ b/tests/zap.py @@ -24,7 +24,7 @@ class Main: print("Error connecting to ZAP, exiting.") sys.exit(0) - zap = ZAPv2(proxies={'http': 'http://127.0.0.1:8080', 'https': 'http://127.0.0.1:8080'}) + zap = ZAPv2(proxies={"http": "http://127.0.0.1:8080", "https": "http://127.0.0.1:8080"}) apikey = "" # user_input_obj = User_Input() #Creating object for class User_Input @@ -59,24 +59,24 @@ class Main: # Wait for passive scanning to complete while (int(zap.pscan.records_to_scan) > 0): - print('Records to passive scan : ' + zap.pscan.records_to_scan) + print("Records to passive scan : " + zap.pscan.records_to_scan) time.sleep(15) - print('Passive scanning complete') + print("Passive scanning complete") - print('Actively Scanning target ' + targetURL) + print("Actively Scanning target " + targetURL) ascan_id = zap.ascan.scan(targetURL, None, None, None, None, None, apikey) # Can provide more options for active scan here instead of using None. while (int(zap.ascan.status(ascan_id)) < 100): - print('Scan progress %: ' + zap.ascan.status(ascan_id)) + print("Scan progress %: " + zap.ascan.status(ascan_id)) time.sleep(15) - print('Scan completed') + print("Scan completed") # Report the results sort_by_url = collections.defaultdict(list) for alert in zap.core.alerts(): - sort_by_url[alert['url']].append({ - 'risk': alert['risk'], - 'alert': alert['alert'], + sort_by_url[alert["url"]].append({ + "risk": alert["risk"], + "alert": alert["alert"], }) summary = PrettyTable(["Risk", "Count"]) @@ -90,13 +90,13 @@ class Main: for url in sort_by_url: for details in sort_by_url[url]: - if details['risk'] == "Informational": + if details["risk"] == "Informational": info = info + 1 - if details['risk'] == "Low": + if details["risk"] == "Low": low = low + 1 - if details['risk'] == "Medium": + if details["risk"] == "Medium": medium = medium + 1 - if details['risk'] == "High": + if details["risk"] == "High": high = high + 1 summary.add_row(["Informational", info]) @@ -115,6 +115,6 @@ class Main: results.sortby = "Risk" for details in sort_by_url[url]: - results.add_row([details['risk'], details['alert']]) + results.add_row([details["risk"], details["alert"]]) print(results) diff --git a/unittests/authorization/test_authorization.py b/unittests/authorization/test_authorization.py index 726bfb544a..3987d6feb0 100644 --- a/unittests/authorization/test_authorization.py +++ b/unittests/authorization/test_authorization.py @@ -166,7 +166,7 @@ def setUpTestData(cls): def test_role_has_permission_exception(self): with self.assertRaisesMessage(RoleDoesNotExistError, - 'Role 9999 does not exist'): + "Role 9999 does not exist"): role_has_permission(9999, Permissions.Product_Type_Edit) def test_role_has_permission_true(self): @@ -179,7 +179,7 @@ def test_role_has_permission_false(self): def test_get_roles_for_permission_exception(self): with self.assertRaisesMessage(PermissionDoesNotExistError, - 'Permission 9999 does not exist'): + "Permission 9999 does not exist"): get_roles_for_permission(9999) def test_get_roles_for_permission_success(self): @@ -191,7 +191,7 @@ def test_user_has_permission_or_403_exception(self): with self.assertRaises(PermissionDenied): user_has_permission_or_403(self.user, self.product_type, Permissions.Product_Type_Delete) - @patch('dojo.models.Product_Type_Member.objects') + @patch("dojo.models.Product_Type_Member.objects") def test_user_has_permission_or_403_success(self, mock_foo): mock_foo.select_related.return_value = mock_foo mock_foo.select_related.return_value = mock_foo @@ -203,14 +203,14 @@ def test_user_has_permission_or_403_success(self, mock_foo): def test_user_has_permission_exception(self): with self.assertRaisesMessage(dojo.authorization.authorization.NoAuthorizationImplementedError, - 'No authorization implemented for class Product_Type_Member and permission 1007'): + "No authorization implemented for class Product_Type_Member and permission 1007"): user_has_permission(self.user, self.product_type_member, Permissions.Product_Type_Delete) def test_user_has_permission_product_type_no_member(self): result = user_has_permission(self.user, self.product_type, Permissions.Product_Type_View) self.assertFalse(result) - @patch('dojo.models.Product_Type_Member.objects') + @patch("dojo.models.Product_Type_Member.objects") def test_user_has_permission_product_type_no_permissions(self, mock_foo): mock_foo.select_related.return_value = mock_foo mock_foo.select_related.return_value = mock_foo @@ -230,7 +230,7 @@ def test_user_has_permission_superuser(self): self.user.is_superuser = False - @patch('dojo.models.Product_Type_Member.objects') + @patch("dojo.models.Product_Type_Member.objects") def test_user_has_permission_product_type_success(self, mock_foo): mock_foo.select_related.return_value = mock_foo mock_foo.select_related.return_value = mock_foo @@ -245,7 +245,7 @@ def test_user_has_permission_product_no_member(self): result = user_has_permission(self.user, self.product, Permissions.Product_View) self.assertFalse(result) - @patch('dojo.models.Product_Member.objects') + @patch("dojo.models.Product_Member.objects") def test_user_has_permission_product_no_permissions(self, mock_foo): mock_foo.select_related.return_value = mock_foo mock_foo.select_related.return_value = mock_foo @@ -256,7 +256,7 @@ def test_user_has_permission_product_no_permissions(self, mock_foo): self.assertFalse(result) mock_foo.filter.assert_called_with(user=self.user) - @patch('dojo.models.Product_Type_Member.objects') + @patch("dojo.models.Product_Type_Member.objects") def test_user_has_permission_product_product_type_success(self, mock_foo): mock_foo.select_related.return_value = mock_foo mock_foo.select_related.return_value = mock_foo @@ -267,7 +267,7 @@ def test_user_has_permission_product_product_type_success(self, mock_foo): self.assertTrue(result) mock_foo.filter.assert_called_with(user=self.user) - @patch('dojo.models.Product_Member.objects') + @patch("dojo.models.Product_Member.objects") def test_user_has_permission_product_success(self, mock_foo): mock_foo.select_related.return_value = mock_foo mock_foo.select_related.return_value = mock_foo @@ -278,7 +278,7 @@ def test_user_has_permission_product_success(self, mock_foo): self.assertTrue(result) mock_foo.filter.assert_called_with(user=self.user) - @patch('dojo.models.Product_Member.objects') + @patch("dojo.models.Product_Member.objects") def test_user_has_permission_engagement_no_permissions(self, mock_foo): mock_foo.select_related.return_value = mock_foo mock_foo.select_related.return_value = mock_foo @@ -289,7 +289,7 @@ def test_user_has_permission_engagement_no_permissions(self, mock_foo): self.assertFalse(result) mock_foo.filter.assert_called_with(user=self.user) - @patch('dojo.models.Product_Member.objects') + @patch("dojo.models.Product_Member.objects") def test_user_has_permission_engagement_success(self, mock_foo): mock_foo.select_related.return_value = mock_foo mock_foo.select_related.return_value = mock_foo @@ -300,7 +300,7 @@ def test_user_has_permission_engagement_success(self, mock_foo): self.assertTrue(result) mock_foo.filter.assert_called_with(user=self.user) - @patch('dojo.models.Product_Member.objects') + @patch("dojo.models.Product_Member.objects") def test_user_has_permission_test_no_permissions(self, mock_foo): mock_foo.select_related.return_value = mock_foo mock_foo.select_related.return_value = mock_foo @@ -311,7 +311,7 @@ def test_user_has_permission_test_no_permissions(self, mock_foo): self.assertFalse(result) mock_foo.filter.assert_called_with(user=self.user) - @patch('dojo.models.Product_Member.objects') + @patch("dojo.models.Product_Member.objects") def test_user_has_permission_test_success(self, mock_foo): mock_foo.select_related.return_value = mock_foo mock_foo.select_related.return_value = mock_foo @@ -322,7 +322,7 @@ def test_user_has_permission_test_success(self, mock_foo): self.assertTrue(result) mock_foo.filter.assert_called_with(user=self.user) - @patch('dojo.models.Product_Member.objects') + @patch("dojo.models.Product_Member.objects") def test_user_has_permission_finding_no_permissions(self, mock_foo): mock_foo.select_related.return_value = mock_foo mock_foo.select_related.return_value = mock_foo @@ -333,7 +333,7 @@ def test_user_has_permission_finding_no_permissions(self, mock_foo): self.assertFalse(result) mock_foo.filter.assert_called_with(user=self.user) - @patch('dojo.models.Product_Member.objects') + @patch("dojo.models.Product_Member.objects") def test_user_has_permission_finding_success(self, mock_foo): mock_foo.select_related.return_value = mock_foo mock_foo.select_related.return_value = mock_foo @@ -344,7 +344,7 @@ def test_user_has_permission_finding_success(self, mock_foo): self.assertTrue(result) mock_foo.filter.assert_called_with(user=self.user) - @patch('dojo.models.Product_Member.objects') + @patch("dojo.models.Product_Member.objects") def test_user_has_permission_stub_finding_no_permissions(self, mock_foo): mock_foo.select_related.return_value = mock_foo mock_foo.select_related.return_value = mock_foo @@ -355,7 +355,7 @@ def test_user_has_permission_stub_finding_no_permissions(self, mock_foo): self.assertFalse(result) mock_foo.filter.assert_called_with(user=self.user) - @patch('dojo.models.Product_Member.objects') + @patch("dojo.models.Product_Member.objects") def test_user_has_permission_stub_finding_success(self, mock_foo): mock_foo.select_related.return_value = mock_foo mock_foo.select_related.return_value = mock_foo @@ -366,7 +366,7 @@ def test_user_has_permission_stub_finding_success(self, mock_foo): self.assertTrue(result) mock_foo.filter.assert_called_with(user=self.user) - @patch('dojo.models.Product_Member.objects') + @patch("dojo.models.Product_Member.objects") def test_user_has_permission_endpoint_no_permissions(self, mock_foo): mock_foo.select_related.return_value = mock_foo mock_foo.select_related.return_value = mock_foo @@ -377,7 +377,7 @@ def test_user_has_permission_endpoint_no_permissions(self, mock_foo): self.assertFalse(result) mock_foo.filter.assert_called_with(user=self.user) - @patch('dojo.models.Product_Member.objects') + @patch("dojo.models.Product_Member.objects") def test_user_has_permission_endpoint_success(self, mock_foo): mock_foo.select_related.return_value = mock_foo mock_foo.select_related.return_value = mock_foo @@ -392,7 +392,7 @@ def test_user_has_permission_product_type_member_success_same_user(self): result = user_has_permission(self.user, self.product_type_member_owner, Permissions.Product_Type_Member_Delete) self.assertTrue(result) - @patch('dojo.models.Product_Type_Member.objects') + @patch("dojo.models.Product_Type_Member.objects") def test_user_has_permission_product_type_member_no_permission(self, mock_foo): other_user = User() other_user.id = 2 @@ -410,7 +410,7 @@ def test_user_has_permission_product_type_member_no_permission(self, mock_foo): self.assertFalse(result) mock_foo.filter.assert_called_with(user=other_user) - @patch('dojo.models.Product_Type_Member.objects') + @patch("dojo.models.Product_Type_Member.objects") def test_user_has_permission_product_type_member_success(self, mock_foo): other_user = User() other_user.id = 2 @@ -432,7 +432,7 @@ def test_user_has_permission_product_member_success_same_user(self): result = user_has_permission(self.user, self.product_member_owner, Permissions.Product_Member_Delete) self.assertTrue(result) - @patch('dojo.models.Product_Member.objects') + @patch("dojo.models.Product_Member.objects") def test_user_has_permission_product_member_no_permission(self, mock_foo): other_user = User() other_user.id = 2 @@ -450,7 +450,7 @@ def test_user_has_permission_product_member_no_permission(self, mock_foo): self.assertFalse(result) mock_foo.filter.assert_called_with(user=other_user) - @patch('dojo.models.Product_Member.objects') + @patch("dojo.models.Product_Member.objects") def test_user_has_permission_product_member_success(self, mock_foo): other_user = User() other_user.id = 2 @@ -468,7 +468,7 @@ def test_user_has_permission_product_member_success(self, mock_foo): self.assertTrue(result) mock_foo.filter.assert_called_with(user=other_user) - @patch('dojo.models.Product_Group.objects') + @patch("dojo.models.Product_Group.objects") def test_user_has_group_product_no_permissions(self, mock_foo): mock_foo.select_related.return_value = mock_foo mock_foo.select_related.return_value = mock_foo @@ -479,7 +479,7 @@ def test_user_has_group_product_no_permissions(self, mock_foo): self.assertFalse(result) mock_foo.filter.assert_called_with(group__users=self.user) - @patch('dojo.models.Product_Group.objects') + @patch("dojo.models.Product_Group.objects") def test_user_has_group_product_success(self, mock_foo): mock_foo.select_related.return_value = mock_foo mock_foo.select_related.return_value = mock_foo @@ -490,7 +490,7 @@ def test_user_has_group_product_success(self, mock_foo): self.assertTrue(result) mock_foo.filter.assert_called_with(group__users=self.user) - @patch('dojo.models.Product_Type_Group.objects') + @patch("dojo.models.Product_Type_Group.objects") def test_user_has_group_product_type_no_permissions(self, mock_foo): mock_foo.select_related.return_value = mock_foo mock_foo.select_related.return_value = mock_foo @@ -501,7 +501,7 @@ def test_user_has_group_product_type_no_permissions(self, mock_foo): self.assertFalse(result) mock_foo.filter.assert_called_with(group__users=self.user) - @patch('dojo.models.Product_Type_Group.objects') + @patch("dojo.models.Product_Type_Group.objects") def test_user_has_group_product_type_success(self, mock_foo): mock_foo.select_related.return_value = mock_foo mock_foo.select_related.return_value = mock_foo @@ -528,7 +528,7 @@ def test_user_has_global_role_global_permission_success(self): result = user_has_global_permission(self.user5, Permissions.Product_Type_Add) self.assertTrue(result) - @patch('dojo.models.Dojo_Group.objects') + @patch("dojo.models.Dojo_Group.objects") def test_user_in_group_with_global_role_no_permission(self, mock_foo): mock_foo.select_related.return_value = mock_foo mock_foo.select_related.return_value = mock_foo @@ -538,7 +538,7 @@ def test_user_in_group_with_global_role_no_permission(self, mock_foo): self.assertFalse(result) mock_foo.filter.assert_called_with(users=self.user3) - @patch('dojo.models.Dojo_Group.objects') + @patch("dojo.models.Dojo_Group.objects") def test_user_in_group_with_global_role_success(self, mock_foo): mock_foo.select_related.return_value = mock_foo mock_foo.select_related.return_value = mock_foo @@ -548,7 +548,7 @@ def test_user_in_group_with_global_role_success(self, mock_foo): self.assertTrue(result) mock_foo.filter.assert_called_with(users=self.user3) - @patch('dojo.models.Dojo_Group_Member.objects') + @patch("dojo.models.Dojo_Group_Member.objects") def test_dojo_group_no_permission(self, mock_foo): mock_foo.select_related.return_value = mock_foo mock_foo.select_related.return_value = mock_foo @@ -558,7 +558,7 @@ def test_dojo_group_no_permission(self, mock_foo): self.assertFalse(result) mock_foo.filter.assert_called_with(user=self.user4) - @patch('dojo.models.Dojo_Group_Member.objects') + @patch("dojo.models.Dojo_Group_Member.objects") def test_dojo_group_success(self, mock_foo): mock_foo.select_related.return_value = mock_foo mock_foo.select_related.return_value = mock_foo @@ -568,7 +568,7 @@ def test_dojo_group_success(self, mock_foo): self.assertTrue(result) mock_foo.filter.assert_called_with(user=self.user4) - @patch('dojo.models.Dojo_Group_Member.objects') + @patch("dojo.models.Dojo_Group_Member.objects") def test_dojo_group_member_no_permission(self, mock_foo): mock_foo.select_related.return_value = mock_foo mock_foo.select_related.return_value = mock_foo @@ -578,7 +578,7 @@ def test_dojo_group_member_no_permission(self, mock_foo): self.assertFalse(result) mock_foo.filter.assert_called_with(user=self.user4) - @patch('dojo.models.Dojo_Group_Member.objects') + @patch("dojo.models.Dojo_Group_Member.objects") def test_dojo_group_member_success(self, mock_foo): mock_foo.select_related.return_value = mock_foo mock_foo.select_related.return_value = mock_foo @@ -588,7 +588,7 @@ def test_dojo_group_member_success(self, mock_foo): self.assertTrue(result) mock_foo.filter.assert_called_with(user=self.user4) - @patch('dojo.models.Product_Member.objects') + @patch("dojo.models.Product_Member.objects") def test_user_has_permission_language_no_permissions(self, mock_foo): mock_foo.select_related.return_value = mock_foo mock_foo.select_related.return_value = mock_foo @@ -599,7 +599,7 @@ def test_user_has_permission_language_no_permissions(self, mock_foo): self.assertFalse(result) mock_foo.filter.assert_called_with(user=self.user) - @patch('dojo.models.Product_Member.objects') + @patch("dojo.models.Product_Member.objects") def test_user_has_permission_language_success(self, mock_foo): mock_foo.select_related.return_value = mock_foo mock_foo.select_related.return_value = mock_foo @@ -610,7 +610,7 @@ def test_user_has_permission_language_success(self, mock_foo): self.assertTrue(result) mock_foo.filter.assert_called_with(user=self.user) - @patch('dojo.models.Product_Member.objects') + @patch("dojo.models.Product_Member.objects") def test_user_has_permission_technology_no_permissions(self, mock_foo): mock_foo.select_related.return_value = mock_foo mock_foo.select_related.return_value = mock_foo @@ -621,7 +621,7 @@ def test_user_has_permission_technology_no_permissions(self, mock_foo): self.assertFalse(result) mock_foo.filter.assert_called_with(user=self.user) - @patch('dojo.models.Product_Member.objects') + @patch("dojo.models.Product_Member.objects") def test_user_has_permission_technology_success(self, mock_foo): mock_foo.select_related.return_value = mock_foo mock_foo.select_related.return_value = mock_foo @@ -632,14 +632,14 @@ def test_user_has_permission_technology_success(self, mock_foo): self.assertTrue(result) mock_foo.filter.assert_called_with(user=self.user) - @patch('django.contrib.auth.models.User.has_perm') + @patch("django.contrib.auth.models.User.has_perm") def test_configuration_permission_true(self, mock): mock.return_value = True - self.assertTrue(user_has_configuration_permission(self.user, 'test')) - mock.assert_called_with('test') + self.assertTrue(user_has_configuration_permission(self.user, "test")) + mock.assert_called_with("test") - @patch('django.contrib.auth.models.User.has_perm') + @patch("django.contrib.auth.models.User.has_perm") def test_configuration_permission_false(self, mock): mock.return_value = False - self.assertFalse(user_has_configuration_permission(self.user, 'test')) - mock.assert_called_with('test') + self.assertFalse(user_has_configuration_permission(self.user, "test")) + mock.assert_called_with("test") diff --git a/unittests/authorization/test_authorization_decorators.py b/unittests/authorization/test_authorization_decorators.py index 7e5b5d04a3..dc973cac51 100644 --- a/unittests/authorization/test_authorization_decorators.py +++ b/unittests/authorization/test_authorization_decorators.py @@ -14,13 +14,13 @@ class TestAuthorizationDecorators(DojoTestCase): def setUp(self): - self.request = RequestFactory().get('/dummy') + self.request = RequestFactory().get("/dummy") self.user = User() self.request.user = self.user self.product_type = Product_Type() - self.decorated_func = user_is_authorized(Product_Type, Permissions.Product_Type_View, 'id', 'pk', Mock()) + self.decorated_func = user_is_authorized(Product_Type, Permissions.Product_Type_View, "id", "pk", Mock()) - @patch('dojo.authorization.authorization_decorators.get_object_or_404', side_effect=Http404()) + @patch("dojo.authorization.authorization_decorators.get_object_or_404", side_effect=Http404()) def test_object_does_not_exist(self, shortcuts_get_mock): with self.assertRaises(Http404): @@ -28,8 +28,8 @@ def test_object_does_not_exist(self, shortcuts_get_mock): shortcuts_get_mock.assert_called_once() - @patch('dojo.authorization.authorization_decorators.get_object_or_404') - @patch('dojo.authorization.authorization_decorators.user_has_permission_or_403', side_effect=PermissionDenied()) + @patch("dojo.authorization.authorization_decorators.get_object_or_404") + @patch("dojo.authorization.authorization_decorators.user_has_permission_or_403", side_effect=PermissionDenied()) def test_authorization_permission_denied(self, mock_user_has_permission, mock_shortcuts_get): mock_shortcuts_get.return_value = self.product_type @@ -41,7 +41,7 @@ def test_authorization_permission_denied(self, mock_user_has_permission, mock_sh mock_shortcuts_get.assert_called_once() mock_user_has_permission.assert_called_with(self.user, self.product_type, Permissions.Product_Type_View) - @patch('dojo.authorization.authorization_decorators.get_object_or_404') + @patch("dojo.authorization.authorization_decorators.get_object_or_404") def test_authorization_superuser(self, mock_shortcuts_get): mock_shortcuts_get.return_value = self.product_type @@ -51,8 +51,8 @@ def test_authorization_superuser(self, mock_shortcuts_get): mock_shortcuts_get.assert_called_once() - @patch('dojo.authorization.authorization_decorators.get_object_or_404') - @patch('dojo.authorization.authorization_decorators.user_has_permission_or_403') + @patch("dojo.authorization.authorization_decorators.get_object_or_404") + @patch("dojo.authorization.authorization_decorators.user_has_permission_or_403") def test_authorization_user_has_permission(self, mock_user_has_permission, mock_shortcuts_get): mock_shortcuts_get.return_value = self.product_type @@ -67,24 +67,24 @@ def test_authorization_user_has_permission(self, mock_user_has_permission, mock_ class TestConfigurationAuthorizationDecorators(DojoTestCase): def setUp(self): - self.request = RequestFactory().get('/dummy') + self.request = RequestFactory().get("/dummy") self.user = User() self.request.user = self.user - self.decorated_func = user_is_configuration_authorized('test', Mock()) + self.decorated_func = user_is_configuration_authorized("test", Mock()) - @patch('dojo.authorization.authorization_decorators.user_has_configuration_permission') + @patch("dojo.authorization.authorization_decorators.user_has_configuration_permission") def test_authorization_user_has_configuration_permission_ok(self, mock): mock.return_value = True self.decorated_func(self.request) - mock.assert_called_with(self.user, 'test') + mock.assert_called_with(self.user, "test") - @patch('dojo.authorization.authorization_decorators.user_has_configuration_permission') + @patch("dojo.authorization.authorization_decorators.user_has_configuration_permission") def test_authorization_user_has_configuration_permission_denied(self, mock): mock.return_value = False with self.assertRaises(PermissionDenied): self.decorated_func(self.request) - mock.assert_called_with(self.user, 'test') + mock.assert_called_with(self.user, "test") diff --git a/unittests/authorization/test_authorization_tags.py b/unittests/authorization/test_authorization_tags.py index 8b49c25a18..2e3de9ba31 100644 --- a/unittests/authorization/test_authorization_tags.py +++ b/unittests/authorization/test_authorization_tags.py @@ -22,28 +22,28 @@ def setUp(self): self.group = Group() self.permission_a = Permission() - self.permission_a.codename = 'a' + self.permission_a.codename = "a" self.permission_b = Permission() - self.permission_b.codename = 'b' + self.permission_b.codename = "b" self.permission_c = Permission() - self.permission_c.codename = 'c' + self.permission_c.codename = "c" - @patch('dojo.templatetags.authorization_tags.user_has_permission') + @patch("dojo.templatetags.authorization_tags.user_has_permission") def test_has_object_permission_no_permission(self, mock_has_permission): mock_has_permission.return_value = False - result = has_object_permission(self.product_type, 'Product_Type_View') + result = has_object_permission(self.product_type, "Product_Type_View") self.assertFalse(result) mock_has_permission.assert_called_with(None, self.product_type, Permissions.Product_Type_View) - @patch('dojo.templatetags.authorization_tags.user_has_permission') - @patch('crum.get_current_user') + @patch("dojo.templatetags.authorization_tags.user_has_permission") + @patch("crum.get_current_user") def test_has_object_permission_has_permission(self, mock_current_user, mock_has_permission): mock_has_permission.return_value = True mock_current_user.return_value = self.user - result = has_object_permission(self.product_type, 'Product_Type_View') + result = has_object_permission(self.product_type, "Product_Type_View") self.assertTrue(result) mock_has_permission.assert_called_with(self.user, self.product_type, Permissions.Product_Type_View) @@ -52,52 +52,52 @@ def test_has_object_permission_has_permission(self, mock_current_user, mock_has_ def test_has_object_permission_wrong_permission(self): with self.assertRaises(KeyError): - has_object_permission(self.product_type, 'Test') + has_object_permission(self.product_type, "Test") - @patch('dojo.templatetags.authorization_tags.configuration_permission') - @patch('crum.get_current_user') + @patch("dojo.templatetags.authorization_tags.configuration_permission") + @patch("crum.get_current_user") def test_has_configuration_permission(self, mock_current_user, mock_configuration_permission): mock_configuration_permission.return_value = True mock_current_user.return_value = self.user - result = has_configuration_permission('test', None) + result = has_configuration_permission("test", None) self.assertTrue(result) - mock_configuration_permission.assert_called_with(self.user, 'test') + mock_configuration_permission.assert_called_with(self.user, "test") mock_current_user.assert_called_once() - @patch('django.contrib.auth.models.User.user_permissions') + @patch("django.contrib.auth.models.User.user_permissions") def test_user_has_configuration_permission_without_group_not_found(self, mock): mock.all.return_value = [self.permission_a, self.permission_b, self.permission_c] - result = user_has_configuration_permission_without_group(self.user, 'test') + result = user_has_configuration_permission_without_group(self.user, "test") self.assertFalse(result) mock.all.assert_called_once() - @patch('django.contrib.auth.models.User.user_permissions') + @patch("django.contrib.auth.models.User.user_permissions") def test_user_has_configuration_permission_without_group_found(self, mock): mock.all.return_value = [self.permission_a, self.permission_b, self.permission_c] - result = user_has_configuration_permission_without_group(self.user, 'b') + result = user_has_configuration_permission_without_group(self.user, "b") self.assertTrue(result) mock.all.assert_called_once() - @patch('django.contrib.auth.models.Group.permissions') + @patch("django.contrib.auth.models.Group.permissions") def test_group_has_configuration_permission_not_found(self, mock): mock.all.return_value = [self.permission_a, self.permission_b, self.permission_c] - result = group_has_configuration_permission(self.group, 'test') + result = group_has_configuration_permission(self.group, "test") self.assertFalse(result) mock.all.assert_called_once() - @patch('django.contrib.auth.models.Group.permissions') + @patch("django.contrib.auth.models.Group.permissions") def test_group_has_configuration_permission(self, mock): mock.all.return_value = [self.permission_a, self.permission_b, self.permission_c] - result = group_has_configuration_permission(self.group, 'b') + result = group_has_configuration_permission(self.group, "b") self.assertTrue(result) mock.all.assert_called_once() diff --git a/unittests/dojo_test_case.py b/unittests/dojo_test_case.py index 2c8cd2abfe..e4e1b510a9 100644 --- a/unittests/dojo_test_case.py +++ b/unittests/dojo_test_case.py @@ -44,7 +44,7 @@ def get_unit_tests_path(): class DojoTestUtilsMixin: def get_test_admin(self, *args, **kwargs): - return User.objects.get(username='admin') + return User.objects.get(username="admin") def system_settings( self, @@ -62,17 +62,17 @@ def system_settings( ss.enable_product_tag_inheritance = enable_product_tag_inehritance ss.save() - def create_product_type(self, name, *args, description='dummy description', **kwargs): + def create_product_type(self, name, *args, description="dummy description", **kwargs): product_type = Product_Type(name=name, description=description) product_type.save() return product_type - def create_sla_configuration(self, name, *args, description='dummy description', critical=7, high=30, medium=60, low=120, **kwargs): + def create_sla_configuration(self, name, *args, description="dummy description", critical=7, high=30, medium=60, low=120, **kwargs): sla_configuration = SLA_Configuration(name=name, description=description, critical=critical, high=high, medium=medium, low=low) sla_configuration.save() return sla_configuration - def create_product(self, name, *args, description='dummy description', prod_type=None, tags=[], **kwargs): + def create_product(self, name, *args, description="dummy description", prod_type=None, tags=[], **kwargs): if not prod_type: prod_type = Product_Type.objects.first() product = Product(name=name, description=description, prod_type=prod_type, tags=tags) @@ -81,13 +81,13 @@ def create_product(self, name, *args, description='dummy description', prod_type def patch_product_api(self, product_id, product_details): payload = copy.deepcopy(product_details) - response = self.client.patch(reverse('product-list') + f'{product_id}/', payload, format='json') + response = self.client.patch(reverse("product-list") + f"{product_id}/", payload, format="json") self.assertEqual(200, response.status_code, response.content[:1000]) return response.data def patch_endpoint_api(self, endpoint_id, endpoint_details): payload = copy.deepcopy(endpoint_details) - response = self.client.patch(reverse('endpoint-list') + f'{endpoint_id}/', payload, format='json') + response = self.client.patch(reverse("endpoint-list") + f"{endpoint_id}/", payload, format="json") self.assertEqual(200, response.status_code, response.content[:1000]) return response.data @@ -105,7 +105,7 @@ def get_test(self, id): return Test.objects.get(id=id) def get_test_api(self, test_id): - response = self.client.patch(reverse('engagement-list') + f'{test_id}/') + response = self.client.patch(reverse("engagement-list") + f"{test_id}/") self.assertEqual(200, response.status_code, response.content[:1000]) return response.data @@ -113,7 +113,7 @@ def get_engagement(self, id): return Engagement.objects.get(id=id) def get_engagement_api(self, engagement_id): - response = self.client.patch(reverse('engagement-list') + f'{engagement_id}/') + response = self.client.patch(reverse("engagement-list") + f"{engagement_id}/") self.assertEqual(200, response.status_code, response.content[:1000]) return response.data @@ -137,7 +137,7 @@ def model_to_dict(self, instance): return data def log_model_instance(self, instance): - logger.debug('model instance: %s', pprint.pprint(self.model_to_dict(instance))) + logger.debug("model instance: %s", pprint.pprint(self.model_to_dict(instance))) def log_model_instances(self, instances): for instance in instances: @@ -166,28 +166,28 @@ def db_dojo_meta_count(self): def get_new_product_with_jira_project_data(self): return { - 'name': 'new product', - 'description': 'new description', - 'prod_type': 1, - 'jira-project-form-project_key': 'IFFFNEW', - 'jira-project-form-jira_instance': 2, - 'jira-project-form-enable_engagement_epic_mapping': 'on', - 'jira-project-form-epic_issue_type_name': 'Epic', - 'jira-project-form-push_notes': 'on', - 'jira-project-form-product_jira_sla_notification': 'on', - 'jira-project-form-custom_fields': 'null', - 'sla_configuration': 1, + "name": "new product", + "description": "new description", + "prod_type": 1, + "jira-project-form-project_key": "IFFFNEW", + "jira-project-form-jira_instance": 2, + "jira-project-form-enable_engagement_epic_mapping": "on", + "jira-project-form-epic_issue_type_name": "Epic", + "jira-project-form-push_notes": "on", + "jira-project-form-product_jira_sla_notification": "on", + "jira-project-form-custom_fields": "null", + "sla_configuration": 1, } def get_new_product_without_jira_project_data(self): return { - 'name': 'new product', - 'description': 'new description', - 'prod_type': 1, - 'sla_configuration': 1, + "name": "new product", + "description": "new description", + "prod_type": 1, + "sla_configuration": 1, # A value is set by default by the model, so we need to add it here as well - 'jira-project-form-epic_issue_type_name': 'Epic', + "jira-project-form-epic_issue_type_name": "Epic", # 'project_key': 'IFFF', # 'jira_instance': 2, # 'enable_engagement_epic_mapping': 'on', @@ -197,45 +197,45 @@ def get_new_product_without_jira_project_data(self): def get_product_with_jira_project_data(self, product): return { - 'name': product.name, - 'description': product.description, - 'prod_type': product.prod_type.id, - 'jira-project-form-project_key': 'IFFF', - 'jira-project-form-jira_instance': 2, - 'jira-project-form-enable_engagement_epic_mapping': 'on', - 'jira-project-form-epic_issue_type_name': 'Epic', - 'jira-project-form-push_notes': 'on', - 'jira-project-form-product_jira_sla_notification': 'on', - 'jira-project-form-custom_fields': 'null', - 'sla_configuration': 1, + "name": product.name, + "description": product.description, + "prod_type": product.prod_type.id, + "jira-project-form-project_key": "IFFF", + "jira-project-form-jira_instance": 2, + "jira-project-form-enable_engagement_epic_mapping": "on", + "jira-project-form-epic_issue_type_name": "Epic", + "jira-project-form-push_notes": "on", + "jira-project-form-product_jira_sla_notification": "on", + "jira-project-form-custom_fields": "null", + "sla_configuration": 1, } def get_product_with_jira_project_data2(self, product): return { - 'name': product.name, - 'description': product.description, - 'prod_type': product.prod_type.id, - 'jira-project-form-project_key': 'IFFF2', - 'jira-project-form-jira_instance': 2, - 'jira-project-form-enable_engagement_epic_mapping': 'on', - 'jira-project-form-epic_issue_type_name': 'Epic', - 'jira-project-form-push_notes': 'on', - 'jira-project-form-product_jira_sla_notification': 'on', - 'jira-project-form-custom_fields': 'null', - 'sla_configuration': 1, + "name": product.name, + "description": product.description, + "prod_type": product.prod_type.id, + "jira-project-form-project_key": "IFFF2", + "jira-project-form-jira_instance": 2, + "jira-project-form-enable_engagement_epic_mapping": "on", + "jira-project-form-epic_issue_type_name": "Epic", + "jira-project-form-push_notes": "on", + "jira-project-form-product_jira_sla_notification": "on", + "jira-project-form-custom_fields": "null", + "sla_configuration": 1, } def get_product_with_empty_jira_project_data(self, product): return { - 'name': product.name, - 'description': product.description, - 'prod_type': product.prod_type.id, - 'sla_configuration': 1, + "name": product.name, + "description": product.description, + "prod_type": product.prod_type.id, + "sla_configuration": 1, # A value is set by default by the model, so we need to add it here as well - 'jira-project-form-epic_issue_type_name': 'Epic', - 'jira-project-form-custom_fields': 'null', + "jira-project-form-epic_issue_type_name": "Epic", + "jira-project-form-custom_fields": "null", # 'project_key': 'IFFF', # 'jira_instance': 2, # 'enable_engagement_epic_mapping': 'on', @@ -244,18 +244,18 @@ def get_product_with_empty_jira_project_data(self, product): } def get_expected_redirect_product(self, product): - return '/product/%i' % product.id + return "/product/%i" % product.id def add_product_jira(self, data, expect_redirect_to=None, expect_200=False): - response = self.client.get(reverse('new_product')) + response = self.client.get(reverse("new_product")) # logger.debug('before: JIRA_Project last') # self.log_model_instance(JIRA_Project.objects.last()) if not expect_redirect_to and not expect_200: - expect_redirect_to = '/product/%i' + expect_redirect_to = "/product/%i" - response = self.client.post(reverse('new_product'), urlencode(data), content_type='application/x-www-form-urlencoded') + response = self.client.post(reverse("new_product"), urlencode(data), content_type="application/x-www-form-urlencoded") # logger.debug('after: JIRA_Project last') # self.log_model_instance(JIRA_Project.objects.last()) @@ -267,12 +267,12 @@ def add_product_jira(self, data, expect_redirect_to=None, expect_200=False): self.assertEqual(response.status_code, 302) # print('url: ' + response.url) try: - product = Product.objects.get(id=response.url.split('/')[-1]) + product = Product.objects.get(id=response.url.split("/")[-1]) except: try: - product = Product.objects.get(id=response.url.split('/')[-2]) + product = Product.objects.get(id=response.url.split("/")[-2]) except: - raise ValueError('error parsing id from redirect uri: ' + response.url) + raise ValueError("error parsing id from redirect uri: " + response.url) self.assertEqual(response.url, (expect_redirect_to % product.id)) else: self.assertEqual(response.status_code, 200) @@ -300,16 +300,16 @@ def add_product_with_jira_project(self, expected_delta_jira_project_db=0, expect return self.add_product_jira_with_data(self.get_new_product_with_jira_project_data(), expected_delta_jira_project_db, expect_redirect_to=expect_redirect_to, expect_200=expect_200) def add_product_without_jira_project(self, expected_delta_jira_project_db=0, expect_redirect_to=None, expect_200=False): - logger.debug('adding product without jira project') + logger.debug("adding product without jira project") return self.add_product_jira_with_data(self.get_new_product_without_jira_project_data(), expected_delta_jira_project_db, expect_redirect_to=expect_redirect_to, expect_200=expect_200) def edit_product_jira(self, product, data, expect_redirect_to=None, expect_200=False): - response = self.client.get(reverse('edit_product', args=(product.id, ))) + response = self.client.get(reverse("edit_product", args=(product.id, ))) # logger.debug('before: JIRA_Project last') # self.log_model_instance(JIRA_Project.objects.last()) - response = self.client.post(reverse('edit_product', args=(product.id, )), urlencode(data), content_type='application/x-www-form-urlencoded') + response = self.client.post(reverse("edit_product", args=(product.id, )), urlencode(data), content_type="application/x-www-form-urlencoded") # self.log_model_instance(product) # logger.debug('after: JIRA_Project last') # self.log_model_instance(JIRA_Project.objects.last()) @@ -343,7 +343,7 @@ def edit_jira_project_for_product2(self, product, expected_delta_jira_project_db return self.edit_jira_project_for_product_with_data(product, self.get_product_with_jira_project_data2(product), expected_delta_jira_project_db, expect_redirect_to=expect_redirect_to, expect_200=expect_200) def empty_jira_project_for_product(self, product, expected_delta_jira_project_db=0, expect_redirect_to=None, expect_200=False): - logger.debug('empty jira project for product') + logger.debug("empty jira project for product") jira_project_count_before = self.db_jira_project_count() # print('before: ' + str(jira_project_count_before)) @@ -376,7 +376,7 @@ def get_jira_issue_updated_map(self, test_id): findings = Test.objects.get(id=test_id).finding_set.all() updated_map = {} for finding in findings: - logger.debug('finding!!!') + logger.debug("finding!!!") updated = jira_helper.get_jira_updated(finding) updated_map[finding.id] = updated return updated_map @@ -384,13 +384,13 @@ def get_jira_issue_updated_map(self, test_id): def assert_jira_updated_map_unchanged(self, test_id, updated_map): findings = Test.objects.get(id=test_id).finding_set.all() for finding in findings: - logger.debug('finding!') + logger.debug("finding!") self.assertEqual(jira_helper.get_jira_updated(finding), updated_map[finding.id]) def assert_jira_updated_map_changed(self, test_id, updated_map): findings = Test.objects.get(id=test_id).finding_set.all() for finding in findings: - logger.debug('finding!') + logger.debug("finding!") self.assertNotEqual(jira_helper.get_jira_updated(finding), updated_map[finding.id]) # Toggle epic mapping on jira product @@ -406,9 +406,9 @@ def get_epic_issues(self, engagement): epic_id = jira_helper.get_jira_issue_key(engagement) response = {} if epic_id: - url = instance.url.strip('/') + '/rest/agile/1.0/epic/' + epic_id + '/issue' + url = instance.url.strip("/") + "/rest/agile/1.0/epic/" + epic_id + "/issue" response = jira._session.get(url).json() - return response.get('issues', []) + return response.get("issues", []) # Determine whether an issue is in an epic def assert_jira_issue_in_epic(self, finding, engagement, issue_in_epic=True): @@ -416,9 +416,9 @@ def assert_jira_issue_in_epic(self, finding, engagement, issue_in_epic=True): jira = jira_helper.get_jira_connection(instance) epic_id = jira_helper.get_jira_issue_key(engagement) issue_id = jira_helper.get_jira_issue_key(finding) - epic_link_field = 'customfield_' + str(get_custom_field(jira, 'Epic Link')) - url = instance.url.strip('/') + '/rest/api/latest/issue/' + issue_id - response = jira._session.get(url).json().get('fields', {}) + epic_link_field = "customfield_" + str(get_custom_field(jira, "Epic Link")) + url = instance.url.strip("/") + "/rest/api/latest/issue/" + issue_id + response = jira._session.get(url).json().get("fields", {}) epic_link = response.get(epic_link_field, None) if epic_id is None and epic_link is None or issue_in_epic: self.assertEqual(epic_id, epic_link) @@ -429,7 +429,7 @@ def assert_jira_updated_change(self, old, new): self.assertNotEqual(old, new) def get_latest_model(self, model): - return model.objects.order_by('id').last() + return model.objects.order_by("id").last() class DojoTestCase(TestCase, DojoTestUtilsMixin): @@ -451,39 +451,45 @@ def login_as_admin(self): testuser = self.get_test_admin() token = Token.objects.get(user=testuser) self.client = APIClient() - self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) + self.client.credentials(HTTP_AUTHORIZATION="Token " + token.key) def import_scan(self, payload, expected_http_status_code): - logger.debug('import_scan payload %s', payload) - response = self.client.post(reverse('importscan-list'), payload) + logger.debug("import_scan payload %s", payload) + response = self.client.post(reverse("importscan-list"), payload) self.assertEqual(expected_http_status_code, response.status_code, response.content[:1000]) return json.loads(response.content) def reimport_scan(self, payload, expected_http_status_code): - logger.debug('reimport_scan payload %s', payload) - response = self.client.post(reverse('reimportscan-list'), payload) + logger.debug("reimport_scan payload %s", payload) + response = self.client.post(reverse("reimportscan-list"), payload) self.assertEqual(expected_http_status_code, response.status_code, response.content[:1000]) return json.loads(response.content) def endpoint_meta_import_scan(self, payload, expected_http_status_code): - logger.debug('endpoint_meta_import_scan payload %s', payload) - response = self.client.post(reverse('endpointmetaimport-list'), payload) - print(response.content) + logger.debug("endpoint_meta_import_scan payload %s", payload) + response = self.client.post(reverse("endpointmetaimport-list"), payload) + # print(response.content) self.assertEqual(expected_http_status_code, response.status_code, response.content[:1000]) return json.loads(response.content) def get_test_api(self, test_id): - response = self.client.get(reverse('test-list') + f'{test_id}/', format='json') + response = self.client.get(reverse("test-list") + f"{test_id}/", format="json") self.assertEqual(200, response.status_code, response.content[:1000]) # print('test.content: ', response.content) return json.loads(response.content) - def import_scan_with_params(self, filename, scan_type='ZAP Scan', engagement=1, minimum_severity='Low', active=True, verified=False, + def get_results_by_id(self, results: list, object_id: int) -> dict | None: + for item in results: + if item.get("id") == object_id: + return item + return None + + def import_scan_with_params(self, filename, scan_type="ZAP Scan", engagement=1, minimum_severity="Low", active=True, verified=False, push_to_jira=None, endpoint_to_add=None, tags=None, close_old_findings=False, group_by=None, engagement_name=None, product_name=None, product_type_name=None, auto_create_context=None, expected_http_status_code=201, test_title=None, scan_date=None, service=None, forceActive=True, forceVerified=True): - with open(get_unit_tests_path() + '/' + filename) as testfile: + with open(get_unit_tests_path() + "/" + filename) as testfile: payload = { "minimum_severity": minimum_severity, "active": active, @@ -495,47 +501,47 @@ def import_scan_with_params(self, filename, scan_type='ZAP Scan', engagement=1, } if engagement: - payload['engagement'] = engagement + payload["engagement"] = engagement if engagement_name: - payload['engagement_name'] = engagement_name + payload["engagement_name"] = engagement_name if product_name: - payload['product_name'] = product_name + payload["product_name"] = product_name if product_type_name: - payload['product_type_name'] = product_type_name + payload["product_type_name"] = product_type_name if auto_create_context: - payload['auto_create_context'] = auto_create_context + payload["auto_create_context"] = auto_create_context if push_to_jira is not None: - payload['push_to_jira'] = push_to_jira + payload["push_to_jira"] = push_to_jira if endpoint_to_add is not None: - payload['endpoint_to_add'] = endpoint_to_add + payload["endpoint_to_add"] = endpoint_to_add if tags is not None: - payload['tags'] = tags + payload["tags"] = tags if group_by is not None: - payload['group_by'] = group_by + payload["group_by"] = group_by if test_title is not None: - payload['test_title'] = test_title + payload["test_title"] = test_title if scan_date is not None: - payload['scan_date'] = scan_date + payload["scan_date"] = scan_date if service is not None: - payload['service'] = service + payload["service"] = service return self.import_scan(payload, expected_http_status_code) - def reimport_scan_with_params(self, test_id, filename, scan_type='ZAP Scan', engagement=1, minimum_severity='Low', active=True, verified=False, push_to_jira=None, + def reimport_scan_with_params(self, test_id, filename, scan_type="ZAP Scan", engagement=1, minimum_severity="Low", active=True, verified=False, push_to_jira=None, tags=None, close_old_findings=True, group_by=None, engagement_name=None, scan_date=None, product_name=None, product_type_name=None, auto_create_context=None, expected_http_status_code=201, test_title=None): - with open(get_unit_tests_path() + '/' + filename) as testfile: + with open(get_unit_tests_path() + "/" + filename) as testfile: payload = { "minimum_severity": minimum_severity, "active": active, @@ -547,44 +553,44 @@ def reimport_scan_with_params(self, test_id, filename, scan_type='ZAP Scan', eng } if test_id is not None: - payload['test'] = test_id + payload["test"] = test_id if engagement: - payload['engagement'] = engagement + payload["engagement"] = engagement if engagement_name: - payload['engagement_name'] = engagement_name + payload["engagement_name"] = engagement_name if product_name: - payload['product_name'] = product_name + payload["product_name"] = product_name if product_type_name: - payload['product_type_name'] = product_type_name + payload["product_type_name"] = product_type_name if auto_create_context: - payload['auto_create_context'] = auto_create_context + payload["auto_create_context"] = auto_create_context if push_to_jira is not None: - payload['push_to_jira'] = push_to_jira + payload["push_to_jira"] = push_to_jira if tags is not None: - payload['tags'] = tags + payload["tags"] = tags if group_by is not None: - payload['group_by'] = group_by + payload["group_by"] = group_by if test_title is not None: - payload['test_title'] = test_title + payload["test_title"] = test_title if scan_date is not None: - payload['scan_date'] = scan_date + payload["scan_date"] = scan_date return self.reimport_scan(payload, expected_http_status_code=expected_http_status_code) def endpoint_meta_import_scan_with_params(self, filename, product=1, product_name=None, create_endpoints=True, create_tags=True, create_dojo_meta=True, expected_http_status_code=201): - with open(get_unit_tests_path() + '/' + filename) as testfile: + with open(get_unit_tests_path() + "/" + filename) as testfile: payload = { "create_endpoints": create_endpoints, "create_tags": create_tags, @@ -593,99 +599,99 @@ def endpoint_meta_import_scan_with_params(self, filename, product=1, product_nam } if product: - payload['product'] = product + payload["product"] = product if product_name: - payload['product_name'] = product_name + payload["product_name"] = product_name return self.endpoint_meta_import_scan(payload, expected_http_status_code) def get_finding_api(self, finding_id): - response = self.client.get(reverse('finding-list') + f'{finding_id}/', format='json') + response = self.client.get(reverse("finding-list") + f"{finding_id}/", format="json") self.assertEqual(200, response.status_code, response.content[:1000]) return response.data def post_new_finding_api(self, finding_details, push_to_jira=None): payload = copy.deepcopy(finding_details) if push_to_jira is not None: - payload['push_to_jira'] = push_to_jira + payload["push_to_jira"] = push_to_jira # logger.debug('posting new finding push_to_jira: %s', payload.get('push_to_jira', None)) - response = self.client.post(reverse('finding-list'), payload, format='json') + response = self.client.post(reverse("finding-list"), payload, format="json") self.assertEqual(201, response.status_code, response.content[:1000]) return response.data def put_finding_api(self, finding_id, finding_details, push_to_jira=None): payload = copy.deepcopy(finding_details) if push_to_jira is not None: - payload['push_to_jira'] = push_to_jira + payload["push_to_jira"] = push_to_jira - response = self.client.put(reverse('finding-list') + f'{finding_id}/', payload, format='json') + response = self.client.put(reverse("finding-list") + f"{finding_id}/", payload, format="json") self.assertEqual(200, response.status_code, response.content[:1000]) return response.data def delete_finding_api(self, finding_id): - response = self.client.delete(reverse('finding-list') + f'{finding_id}/') + response = self.client.delete(reverse("finding-list") + f"{finding_id}/") self.assertEqual(204, response.status_code, response.content[:1000]) return response.data def patch_finding_api(self, finding_id, finding_details, push_to_jira=None): payload = copy.deepcopy(finding_details) if push_to_jira is not None: - payload['push_to_jira'] = push_to_jira + payload["push_to_jira"] = push_to_jira - response = self.client.patch(reverse('finding-list') + f'{finding_id}/', payload, format='json') + response = self.client.patch(reverse("finding-list") + f"{finding_id}/", payload, format="json") self.assertEqual(200, response.status_code, response.content[:1000]) return response.data def assert_finding_count_json(self, count, findings_content_json): - self.assertEqual(findings_content_json['count'], count) + self.assertEqual(findings_content_json["count"], count) def get_test_findings_api(self, test_id, active=None, verified=None, is_mitigated=None, component_name=None, component_version=None): - payload = {'test': test_id} + payload = {"test": test_id} if active is not None: - payload['active'] = active + payload["active"] = active if verified is not None: - payload['verified'] = verified + payload["verified"] = verified if is_mitigated is not None: - payload['is_mitigated'] = is_mitigated + payload["is_mitigated"] = is_mitigated if component_name is not None: - payload['component_name'] = component_name + payload["component_name"] = component_name if component_version is not None: - payload['component_version'] = component_version + payload["component_version"] = component_version - response = self.client.get(reverse('finding-list'), payload, format='json') + response = self.client.get(reverse("finding-list"), payload, format="json") self.assertEqual(200, response.status_code, response.content[:1000]) # print('findings.content: ', response.content) return json.loads(response.content) def get_product_endpoints_api(self, product_id, host=None): - payload = {'product': product_id} + payload = {"product": product_id} if host is not None: - payload['host'] = host + payload["host"] = host - response = self.client.get(reverse('endpoint-list'), payload, format='json') + response = self.client.get(reverse("endpoint-list"), payload, format="json") self.assertEqual(200, response.status_code, response.content[:1000]) return json.loads(response.content) def get_endpoints_meta_api(self, endpoint_id, name=None): - payload = {'endpoint': endpoint_id} + payload = {"endpoint": endpoint_id} if name is not None: - payload['name'] = name + payload["name"] = name - response = self.client.get(reverse('metadata-list'), payload, format='json') + response = self.client.get(reverse("metadata-list"), payload, format="json") self.assertEqual(200, response.status_code, response.content[:1000]) return json.loads(response.content) def do_finding_tags_api(self, http_method, finding_id, tags=None): data = None if tags: - data = {'tags': tags} + data = {"tags": tags} # print('data:' + str(data)) - response = http_method(reverse('finding-tags', args=(finding_id,)), data, format='json') + response = http_method(reverse("finding-tags", args=(finding_id,)), data, format="json") # print(vars(response)) self.assertEqual(200, response.status_code, response.content[:1000]) return response @@ -696,7 +702,7 @@ def get_finding_tags_api(self, finding_id): return response.data def get_finding_api_filter_tags(self, tags): - response = self.client.get(reverse('finding-list') + f'?tags={tags}', format='json') + response = self.client.get(reverse("finding-list") + f"?tags={tags}", format="json") self.assertEqual(200, response.status_code, response.content[:1000]) # print(response.data) return response.data @@ -708,9 +714,9 @@ def post_finding_tags_api(self, finding_id, tags): def do_finding_remove_tags_api(self, http_method, finding_id, tags=None, expected_response_status_code=204): data = None if tags: - data = {'tags': tags} + data = {"tags": tags} - response = http_method(reverse('finding-remove-tags', args=(finding_id,)), data, format='json') + response = http_method(reverse("finding-remove-tags", args=(finding_id,)), data, format="json") # print(response) self.assertEqual(expected_response_status_code, response.status_code, response.content[:1000]) return response.data @@ -726,11 +732,11 @@ def patch_finding_remove_tags_api(self, finding_id, tags, *args, **kwargs): def do_finding_notes_api(self, http_method, finding_id, note=None): data = None if note: - data = {'entry': note} + data = {"entry": note} # print('data:' + str(data)) - response = http_method(reverse('finding-notes', args=(finding_id,)), data, format='json') + response = http_method(reverse("finding-notes", args=(finding_id,)), data, format="json") # print(vars(response)) self.assertEqual(201, response.status_code, response.content[:1000]) return response @@ -740,29 +746,25 @@ def post_finding_notes_api(self, finding_id, note): return response.data def log_finding_summary_json_api(self, findings_content_json=None): - print('summary') - print(findings_content_json) - print(findings_content_json['count']) + logger.debug("summary") + logger.debug(findings_content_json) + logger.debug(findings_content_json["count"]) - if not findings_content_json or findings_content_json['count'] == 0: - logger.debug('no findings') + if not findings_content_json or findings_content_json["count"] == 0: + logger.debug("no findings") else: - for finding in findings_content_json['results']: - print(str(finding['id']) + ': ' + finding['title'][:5] + ':' + finding['severity'] + ': active: ' + str(finding['active']) + ': verified: ' + str(finding['verified']) - + ': is_mitigated: ' + str(finding['is_mitigated']) + ": notes: " + str([n['id'] for n in finding['notes']]) - + ": endpoints: " + str(finding['endpoints'])) - - logger.debug(str(finding['id']) + ': ' + finding['title'][:5] + ':' + finding['severity'] + ': active: ' + str(finding['active']) + ': verified: ' + str(finding['verified']) - + ': is_mitigated: ' + str(finding['is_mitigated']) + ": notes: " + str([n['id'] for n in finding['notes']]) - + ": endpoints: " + str(finding['endpoints'])) + for finding in findings_content_json["results"]: + logger.debug(str(finding["id"]) + ": " + finding["title"][:5] + ":" + finding["severity"] + ": active: " + str(finding["active"]) + ": verified: " + str(finding["verified"]) + + ": is_mitigated: " + str(finding["is_mitigated"]) + ": notes: " + str([n["id"] for n in finding["notes"]]) + + ": endpoints: " + str(finding["endpoints"])) - logger.debug('endpoints') + logger.debug("endpoints") for ep in Endpoint.objects.all(): - logger.debug(str(ep.id) + ': ' + str(ep)) + logger.debug(str(ep.id) + ": " + str(ep)) - logger.debug('endpoint statuses') + logger.debug("endpoint statuses") for eps in Endpoint_Status.objects.all(): - logger.debug(str(eps.id) + ': ' + str(eps.endpoint) + ': ' + str(eps.endpoint.id) + ': ' + str(eps.mitigated)) + logger.debug(str(eps.id) + ": " + str(eps.endpoint) + ": " + str(eps.endpoint.id) + ": " + str(eps.mitigated)) class DojoVCRTestCase(DojoTestCase, VCRTestCase): @@ -773,17 +775,17 @@ def __init__(self, *args, **kwargs): # filters headers doesn't seem to work for cookies, so use callbacks to filter cookies from being recorded # https://github.com/kevin1024/vcrpy/issues/569 def before_record_request(self, request): - if 'Cookie' in request.headers: - del request.headers['Cookie'] - if 'cookie' in request.headers: - del request.headers['cookie'] + if "Cookie" in request.headers: + del request.headers["Cookie"] + if "cookie" in request.headers: + del request.headers["cookie"] return request def before_record_response(self, response): - if 'Set-Cookie' in response['headers']: - del response['headers']['Set-Cookie'] - if 'set-cookie' in response['headers']: - del response['headers']['set-cookie'] + if "Set-Cookie" in response["headers"]: + del response["headers"]["Set-Cookie"] + if "set-cookie" in response["headers"]: + del response["headers"]["set-cookie"] return response diff --git a/unittests/test_adminsite.py b/unittests/test_adminsite.py index 3f8bc8ce69..128727fca0 100644 --- a/unittests/test_adminsite.py +++ b/unittests/test_adminsite.py @@ -5,13 +5,13 @@ class AdminSite(DojoTestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def test_is_model_defined(self): for subclass in django.apps.apps.get_models(): if subclass._meta.proxy: continue - if subclass.__module__ == 'dojo.models': + if subclass.__module__ == "dojo.models": if not ((subclass.__name__[:9] == "Tagulous_") and (subclass.__name__[-5:] == "_tags")): with self.subTest(type="base", subclass=subclass): self.assertIn(subclass, admin.site._registry.keys(), f"{subclass} is not registered in 'admin.site' in models.py") diff --git a/unittests/test_api_sonarqube_updater.py b/unittests/test_api_sonarqube_updater.py index 42f3f65731..52ee6ed8f9 100644 --- a/unittests/test_api_sonarqube_updater.py +++ b/unittests/test_api_sonarqube_updater.py @@ -7,91 +7,91 @@ class TestSonarQubeApiUpdater(DojoTestCase): def setUp(self): - tool_type = Tool_Type.objects.create(name='SonarQube') - Tool_Configuration.objects.create(name='SonarQube', tool_type=tool_type, authentication_type="API") + tool_type = Tool_Type.objects.create(name="SonarQube") + Tool_Configuration.objects.create(name="SonarQube", tool_type=tool_type, authentication_type="API") self.updater = SonarQubeApiUpdater() def test_transitions_for_sonarqube_from_open_1(self): self.assertEqual( - self.updater.get_sonarqube_required_transitions_for('OPEN', 'CONFIRMED'), - ['confirm'], + self.updater.get_sonarqube_required_transitions_for("OPEN", "CONFIRMED"), + ["confirm"], ) def test_transitions_for_sonarqube_from_open_2(self): self.assertEqual( - self.updater.get_sonarqube_required_transitions_for('OPEN', 'RESOLVED / FIXED'), - ['resolve'], + self.updater.get_sonarqube_required_transitions_for("OPEN", "RESOLVED / FIXED"), + ["resolve"], ) def test_transitions_for_sonarqube_from_reopened_1(self): self.assertEqual( - self.updater.get_sonarqube_required_transitions_for('REOPENED', 'RESOLVED / FIXED'), - ['resolve'], + self.updater.get_sonarqube_required_transitions_for("REOPENED", "RESOLVED / FIXED"), + ["resolve"], ) def test_transitions_for_sonarqube_from_reopened_2(self): self.assertEqual( - self.updater.get_sonarqube_required_transitions_for('REOPENED', 'CONFIRMED'), - ['confirm'], + self.updater.get_sonarqube_required_transitions_for("REOPENED", "CONFIRMED"), + ["confirm"], ) def test_transitions_for_sonarqube_from_resolved_1(self): self.assertEqual( - self.updater.get_sonarqube_required_transitions_for('RESOLVED / FIXED', 'CONFIRMED'), - ['reopen', 'confirm'], + self.updater.get_sonarqube_required_transitions_for("RESOLVED / FIXED", "CONFIRMED"), + ["reopen", "confirm"], ) def test_transitions_for_sonarqube_from_resolved_2(self): self.assertEqual( - self.updater.get_sonarqube_required_transitions_for('RESOLVED / FIXED', 'RESOLVED / FALSE-POSITIVE'), - ['reopen', 'falsepositive'], + self.updater.get_sonarqube_required_transitions_for("RESOLVED / FIXED", "RESOLVED / FALSE-POSITIVE"), + ["reopen", "falsepositive"], ) def test_transitions_for_sonarqube_from_resolved_3(self): self.assertEqual( - self.updater.get_sonarqube_required_transitions_for('RESOLVED / FIXED', 'RESOLVED / WONTFIX'), - ['reopen', 'wontfix'], + self.updater.get_sonarqube_required_transitions_for("RESOLVED / FIXED", "RESOLVED / WONTFIX"), + ["reopen", "wontfix"], ) def test_transitions_for_sonarqube_fake_target_origin(self): self.assertEqual( - self.updater.get_sonarqube_required_transitions_for('FAKE_STATUS', 'RESOLVED / FIXED'), + self.updater.get_sonarqube_required_transitions_for("FAKE_STATUS", "RESOLVED / FIXED"), None, ) def test_transitions_for_sonarqube_fake_target_status(self): self.assertEqual( - self.updater.get_sonarqube_required_transitions_for('RESOLVED / FIXED', 'FAKE_STATUS'), + self.updater.get_sonarqube_required_transitions_for("RESOLVED / FIXED", "FAKE_STATUS"), None, ) def test_transitions_for_sonarqube_from_confirmed_1(self): self.assertEqual( - self.updater.get_sonarqube_required_transitions_for('CONFIRMED', 'REOPENED'), - ['unconfirm'], + self.updater.get_sonarqube_required_transitions_for("CONFIRMED", "REOPENED"), + ["unconfirm"], ) def test_transitions_for_sonarqube_from_confirmed_2(self): self.assertEqual( - self.updater.get_sonarqube_required_transitions_for('CONFIRMED', 'RESOLVED / FIXED'), - ['resolve'], + self.updater.get_sonarqube_required_transitions_for("CONFIRMED", "RESOLVED / FIXED"), + ["resolve"], ) def test_transitions_for_open_reopen_status_1(self): self.assertEqual( - self.updater.get_sonarqube_required_transitions_for('OPEN', 'REOPENED'), + self.updater.get_sonarqube_required_transitions_for("OPEN", "REOPENED"), None, ) def test_transitions_for_open_reopen_status_2(self): self.assertEqual( - self.updater.get_sonarqube_required_transitions_for('REOPENED', 'OPEN'), + self.updater.get_sonarqube_required_transitions_for("REOPENED", "OPEN"), None, ) def test_transitions_for_open_reopen_status_3(self): self.assertEqual( - self.updater.get_sonarqube_required_transitions_for('REOPENED', 'REOPENED'), + self.updater.get_sonarqube_required_transitions_for("REOPENED", "REOPENED"), None, ) diff --git a/unittests/test_apiv2_endpoint.py b/unittests/test_apiv2_endpoint.py index b0900f9fe3..25c414587b 100644 --- a/unittests/test_apiv2_endpoint.py +++ b/unittests/test_apiv2_endpoint.py @@ -7,60 +7,60 @@ class EndpointTest(APITestCase): """ Test the Endpoint APIv2 endpoint. """ - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def setUp(self): - token = Token.objects.get(user__username='admin') + token = Token.objects.get(user__username="admin") self.client = APIClient() - self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) + self.client.credentials(HTTP_AUTHORIZATION="Token " + token.key) def test_endpoint_missing_host_product(self): - r = self.client.post(reverse('endpoint-list'), { + r = self.client.post(reverse("endpoint-list"), { "host": "FOO.BAR", - }, format='json') + }, format="json") self.assertEqual(r.status_code, 400, r.content[:1000]) self.assertIn("Attribute 'product' is required", r.content.decode("utf-8")) - r = self.client.post(reverse('endpoint-list'), { + r = self.client.post(reverse("endpoint-list"), { "product": 1, - }, format='json') + }, format="json") self.assertEqual(r.status_code, 400, r.content[:1000]) self.assertIn("Host must not be empty", r.content.decode("utf-8")) def test_endpoint_add_existing(self): - r = self.client.post(reverse('endpoint-list'), { + r = self.client.post(reverse("endpoint-list"), { "product": 1, "host": "FOO.BAR", - }, format='json') + }, format="json") self.assertEqual(r.status_code, 201, r.content[:1000]) - r = self.client.post(reverse('endpoint-list'), { + r = self.client.post(reverse("endpoint-list"), { "product": 1, "host": "FOO.BAR", - }, format='json') + }, format="json") self.assertEqual(r.status_code, 400, r.content[:1000]) - self.assertIn('It appears as though an endpoint with this data already ' - 'exists for this product.', r.content.decode("utf-8")) + self.assertIn("It appears as though an endpoint with this data already " + "exists for this product.", r.content.decode("utf-8")) - r = self.client.post(reverse('endpoint-list'), { + r = self.client.post(reverse("endpoint-list"), { "product": 1, "host": "foo.bar", - }, format='json') + }, format="json") self.assertEqual(r.status_code, 400, r.content[:1000]) - self.assertIn('It appears as though an endpoint with this data already ' - 'exists for this product.', r.content.decode("utf-8")) + self.assertIn("It appears as though an endpoint with this data already " + "exists for this product.", r.content.decode("utf-8")) def test_endpoint_change_product(self): - r = self.client.post(reverse('endpoint-list'), { + r = self.client.post(reverse("endpoint-list"), { "product": 1, "host": "product1", - }, format='json') - eid = r.json()['id'] + }, format="json") + eid = r.json()["id"] self.assertEqual(r.status_code, 201, r.content[:1000]) - r = self.client.patch(reverse('endpoint-detail', args=(eid,)), { + r = self.client.patch(reverse("endpoint-detail", args=(eid,)), { "product": 2, - }, format='json') + }, format="json") self.assertEqual(r.status_code, 400, r.content[:1000]) self.assertIn("Change of product is not possible", r.content.decode("utf-8")) @@ -69,11 +69,11 @@ def test_endpoint_remove_host(self): "product": 1, "host": "host1", } - r = self.client.post(reverse('endpoint-list'), payload, format='json') - eid = r.json()['id'] + r = self.client.post(reverse("endpoint-list"), payload, format="json") + eid = r.json()["id"] self.assertEqual(r.status_code, 201, r.content[:1000]) - r = self.client.patch(reverse('endpoint-detail', args=(eid,)), { + r = self.client.patch(reverse("endpoint-detail", args=(eid,)), { "host": None, - }, format='json') + }, format="json") self.assertEqual(r.status_code, 400, r.content[:1000]) self.assertIn("Host must not be empty", r.content.decode("utf-8")) diff --git a/unittests/test_apiv2_limit_reqresp.py b/unittests/test_apiv2_limit_reqresp.py index 06e5ad2f4c..2021f2643c 100644 --- a/unittests/test_apiv2_limit_reqresp.py +++ b/unittests/test_apiv2_limit_reqresp.py @@ -9,27 +9,27 @@ class APILimitReqRespPairsTest(APITestCase): Test the MAX_REQRESP_FROM_API setting for /api/v2/findings/{id}/request_response/ """ - fixtures = ['unit_limit_reqresp.json'] + fixtures = ["unit_limit_reqresp.json"] def setUp(self: object): - token = Token.objects.get(user__username='admin') + token = Token.objects.get(user__username="admin") self.client = APIClient() - self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) + self.client.credentials(HTTP_AUTHORIZATION="Token " + token.key) def assertReqrespValue(self: object, value: int, expect_notequal: bool = False) -> None: settings.MAX_REQRESP_FROM_API = value - r = self.client.get(reverse('finding-list'), format='json') - results = r.json()['results'] + r = self.client.get(reverse("finding-list"), format="json") + results = r.json()["results"] # get finding with id 8 finding = self.getFinding(8, results) if expect_notequal: - self.assertNotEqual(len(finding['request_response']['req_resp']), value) + self.assertNotEqual(len(finding["request_response"]["req_resp"]), value) else: - self.assertEqual(len(finding['request_response']['req_resp']), value) + self.assertEqual(len(finding["request_response"]["req_resp"]), value) def getFinding(self: object, idn: int, results: list) -> dict: for result in results: - if result['id'] == idn: + if result["id"] == idn: return result return None diff --git a/unittests/test_apiv2_metadata.py b/unittests/test_apiv2_metadata.py index 177b3f0852..21f0defac4 100644 --- a/unittests/test_apiv2_metadata.py +++ b/unittests/test_apiv2_metadata.py @@ -7,111 +7,111 @@ class MetadataTest(APITestCase): """ Test the metadata APIv2 endpoint. """ - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def setUp(self): - token = Token.objects.get(user__username='admin') + token = Token.objects.get(user__username="admin") self.client = APIClient() - self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) + self.client.credentials(HTTP_AUTHORIZATION="Token " + token.key) r = self.create( product=1, - name='foo', - value='bar', + name="foo", + value="bar", ) self.assertEqual(r.status_code, 201) - self.mid = r.json()['id'] + self.mid = r.json()["id"] def create(self, **kwargs): - return self.client.post(reverse('metadata-list'), kwargs, format='json') + return self.client.post(reverse("metadata-list"), kwargs, format="json") def test_docs(self): - r = self.client.get(reverse('swagger-ui_oa3')) + r = self.client.get(reverse("swagger-ui_oa3")) self.assertEqual(r.status_code, 200) def test_query_metadata(self): - r = self.client.get(reverse('metadata-detail', args=(self.mid,))) + r = self.client.get(reverse("metadata-detail", args=(self.mid,))) self.assertEqual(r.status_code, 200) def test_query_product_endpoint(self): - r = self.client.get(reverse('product-detail', args=(1,))) - self.assertIn({'name': 'foo', 'value': 'bar'}, r.json()['product_meta']) + r = self.client.get(reverse("product-detail", args=(1,))) + self.assertIn({"name": "foo", "value": "bar"}, r.json()["product_meta"]) def test_delete(self): - r = self.client.delete(reverse('metadata-detail', args=(self.mid,))) + r = self.client.delete(reverse("metadata-detail", args=(self.mid,))) self.assertEqual(r.status_code, 204) - r = self.client.get(reverse('metadata-detail', args=(self.mid,))) + r = self.client.get(reverse("metadata-detail", args=(self.mid,))) self.assertEqual(r.status_code, 404) - r = self.client.get(reverse('product-detail', args=(1,))) - self.assertNotIn({'name': 'foo', 'value': 'bar'}, r.json()['product_meta']) + r = self.client.get(reverse("product-detail", args=(1,))) + self.assertNotIn({"name": "foo", "value": "bar"}, r.json()["product_meta"]) def test_no_product_or_endpoint_as_parameter(self): - r = self.create(name='foo', value='bar') + r = self.create(name="foo", value="bar") self.assertEqual(r.status_code, 400) def test_product_and_endpoint_as_parameters(self): - r = self.create(product=1, endpoint=1, name='foo', value='bar') + r = self.create(product=1, endpoint=1, name="foo", value="bar") self.assertEqual(r.status_code, 400) def test_invalid_product(self): - r = self.create(product=99999, name='quux', value='bar') + r = self.create(product=99999, name="quux", value="bar") self.assertEqual(r.status_code, 404) - r = self.client.get(reverse('metadata-list')) - for x in r.json()['results']: - self.assertFalse(x['name'] == 'quux' and x['value'] == 'bar', x) + r = self.client.get(reverse("metadata-list")) + for x in r.json()["results"]: + self.assertFalse(x["name"] == "quux" and x["value"] == "bar", x) def test_missing_name(self): - r = self.create(product=1, value='bar') + r = self.create(product=1, value="bar") self.assertEqual(r.status_code, 400) def test_none_name(self): - r = self.create(product=1, name=None, value='bar') + r = self.create(product=1, name=None, value="bar") self.assertEqual(r.status_code, 400) def test_empty_name(self): - r = self.create(product=1, name='', value='bar') + r = self.create(product=1, name="", value="bar") self.assertEqual(r.status_code, 400) def test_missing_value(self): - r = self.create(product=1, name='foo') + r = self.create(product=1, name="foo") self.assertEqual(r.status_code, 400) def test_none_value(self): - r = self.create(product=1, name='foo', value=None) + r = self.create(product=1, name="foo", value=None) self.assertEqual(r.status_code, 400) def test_empty_value(self): - r = self.create(product=1, name='foo', value='') + r = self.create(product=1, name="foo", value="") self.assertEqual(r.status_code, 400) def test_unique_constraint(self): r = self.create( product=1, - name='foo', - value='bar', + name="foo", + value="bar", ) self.assertEqual(r.status_code, 400) r = self.create( product=1, - name='quux', - value='bar', + name="quux", + value="bar", ) self.assertEqual(r.status_code, 201) r = self.create( product=2, - name='foo', - value='bar', + name="foo", + value="bar", ) self.assertEqual(r.status_code, 201) r = self.create( endpoint=1, - name='foo', - value='bar', + name="foo", + value="bar", ) self.assertEqual(r.status_code, 201) diff --git a/unittests/test_apiv2_methods_and_endpoints.py b/unittests/test_apiv2_methods_and_endpoints.py index 6169f28f75..5c05742188 100644 --- a/unittests/test_apiv2_methods_and_endpoints.py +++ b/unittests/test_apiv2_methods_and_endpoints.py @@ -34,7 +34,7 @@ class ApiEndpointMethods(DojoTestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def setUp(self): super().setUp() @@ -45,47 +45,47 @@ def setUp(self): def test_is_defined(self): exempt_list = [ - 'import-scan', 'reimport-scan', 'notes', 'system_settings', 'roles', - 'import-languages', 'endpoint_meta_import', 'test_types', - 'configuration_permissions', 'questionnaire_questions', - 'questionnaire_answers', 'questionnaire_answered_questionnaires', - 'questionnaire_engagement_questionnaires', 'questionnaire_general_questionnaires', - 'dojo_group_members', 'product_members', 'product_groups', 'product_type_groups', - 'product_type_members', + "import-scan", "reimport-scan", "notes", "system_settings", "roles", + "import-languages", "endpoint_meta_import", "test_types", + "configuration_permissions", "questionnaire_questions", + "questionnaire_answers", "questionnaire_answered_questionnaires", + "questionnaire_engagement_questionnaires", "questionnaire_general_questionnaires", + "dojo_group_members", "product_members", "product_groups", "product_type_groups", + "product_type_members", ] for reg, _, _ in sorted(self.registry): if reg in exempt_list: continue - for method in ['get', 'post']: + for method in ["get", "post"]: self.assertIsNotNone( - self.schema["paths"][f'{BASE_API_URL}/{reg}/'].get(method), + self.schema["paths"][f"{BASE_API_URL}/{reg}/"].get(method), f"Endpoint: {reg}, Method: {method}", ) - for method in ['get', 'put', 'patch', 'delete']: + for method in ["get", "put", "patch", "delete"]: self.assertIsNotNone( - self.schema["paths"][f'{BASE_API_URL}/{reg}' + '/{id}/'].get(method), + self.schema["paths"][f"{BASE_API_URL}/{reg}" + "/{id}/"].get(method), f"Endpoint: {reg}, Method: {method}", ) self.assertIsNotNone( self.schema["paths"] - .get(f'{BASE_API_URL}/{reg}' + '/{id}/delete_preview/', {}) - .get('get'), + .get(f"{BASE_API_URL}/{reg}" + "/{id}/delete_preview/", {}) + .get("get"), f"Endpoint: {reg}, Method: get - delete_preview", ) class ApiEndpoints(DojoTestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def setUp(self): super().setUp() self.used_models = [] for serializer in serializers.__dict__.values(): - if hasattr(serializer, 'Meta'): - if hasattr(serializer.Meta, 'model'): + if hasattr(serializer, "Meta"): + if hasattr(serializer.Meta, "model"): self.used_models.append(serializer.Meta.model) self.no_api_models = [ # TODO: these models are excluded from check for now but implementation is needed Contact, @@ -116,10 +116,10 @@ def setUp(self): def test_is_defined(self): for subclass in django.apps.apps.get_models(): - if subclass.__module__ == 'dojo.models': + if subclass.__module__ == "dojo.models": if (subclass.__name__[:9] == "Tagulous_") and (subclass.__name__[-5:] == "_tags"): continue - if subclass.__name__ in ['Alerts']: + if subclass.__name__ in ["Alerts"]: continue with self.subTest(subclass=subclass): if subclass in self.used_models: diff --git a/unittests/test_apiv2_notifications.py b/unittests/test_apiv2_notifications.py index f45d7433b9..ad559678e4 100644 --- a/unittests/test_apiv2_notifications.py +++ b/unittests/test_apiv2_notifications.py @@ -1,50 +1,58 @@ from django.urls import reverse from rest_framework.authtoken.models import Token -from rest_framework.test import APIClient, APITestCase +from rest_framework.test import APIClient +from unittests.dojo_test_case import DojoAPITestCase -class NotificationsTest(APITestCase): + +class NotificationsTest(DojoAPITestCase): """ Test the metadata APIv2 endpoint. """ - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def setUp(self): - token = Token.objects.get(user__username='admin') + token = Token.objects.get(user__username="admin") self.client = APIClient() - self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) + self.client.credentials(HTTP_AUTHORIZATION="Token " + token.key) r = self.create( template=True, - scan_added=['alert', 'slack'], + scan_added=["alert", "slack"], ) + self.creation_id = r.json()["id"] self.assertEqual(r.status_code, 201) + def tearDown(self): + self.client.delete(f"{reverse('notifications-list')}/{self.creation_id}") + def create(self, **kwargs): - return self.client.post(reverse('notifications-list'), kwargs, format='json') + return self.client.post(reverse("notifications-list"), kwargs, format="json") def create_test_user(self): - password = 'testTEST1234!@#$' - r = self.client.post(reverse('user-list'), { + password = "testTEST1234!@#$" + r = self.client.post(reverse("user-list"), { "username": "api-user-notification", "password": password, - }, format='json') + }, format="json") return r.json()["id"] def test_notification_get(self): - r = self.client.get(reverse('notifications-list'), format='json') + r = self.client.get(reverse("notifications-list"), format="json") self.assertEqual(r.status_code, 200) - self.assertEqual(r.json()['results'][0]['template'], False) + item = self.get_results_by_id(r.json()["results"], 1) + self.assertEqual(item["template"], False) def test_notification_template(self): - q = {'template': True} - r = self.client.get(reverse('notifications-list'), q, format='json') + q = {"template": True} + r = self.client.get(reverse("notifications-list"), q, format="json") self.assertEqual(r.status_code, 200) - self.assertEqual(r.json()['results'][0]['template'], True) + item = self.get_results_by_id(r.json()["results"], self.creation_id) + self.assertEqual(item["template"], True) def test_notification_template_multiple(self): - q = {'template': True, 'scan_added': ['alert', 'slack']} - r = self.client.post(reverse('notifications-list'), q, format='json') + q = {"template": True, "scan_added": ["alert", "slack"]} + r = self.client.post(reverse("notifications-list"), q, format="json") self.assertEqual("Notification template already exists", r.json()["non_field_errors"][0]) def test_user_notifications(self): @@ -52,8 +60,9 @@ def test_user_notifications(self): creates user and checks if template is assigned """ user = {"user": self.create_test_user()} - r = self.client.get(reverse('notifications-list'), user, format='json') + r = self.client.get(reverse("notifications-list"), user, format="json") self.assertEqual(r.status_code, 200) - self.assertEqual(r.json()['results'][0]['template'], False) - self.assertIn('alert', r.json()['results'][0]['scan_added']) - self.assertIn('slack', r.json()['results'][0]['scan_added']) + item = r.json()["results"][-1] + self.assertEqual(item["template"], False) + self.assertIn("alert", item["scan_added"]) + self.assertIn("slack", item["scan_added"]) diff --git a/unittests/test_apiv2_scan_import_options.py b/unittests/test_apiv2_scan_import_options.py index 78200e48fd..b7f802b3e5 100644 --- a/unittests/test_apiv2_scan_import_options.py +++ b/unittests/test_apiv2_scan_import_options.py @@ -11,43 +11,43 @@ class ScanImportOptionsTest(APITestCase): Test the options `skip_duplicates` and `close_old_findings` for the scan import APIv2 endpoint with ZAP """ - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] EMPTY_ZAP_SCAN = """ """ - def __del__(self): - self.payload['file'].close() + def tearDown(self): + self.payload["file"].close() def setUp(self): - token = Token.objects.get(user__username='admin') + token = Token.objects.get(user__username="admin") self.client = APIClient() - self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) + self.client.credentials(HTTP_AUTHORIZATION="Token " + token.key) self._first_import_test = self.import_zap_scan() test = self.import_zap_scan() - test.test_type = Test_Type.objects.create(name='some other test tool') + test.test_type = Test_Type.objects.create(name="some other test tool") test.save() def import_zap_scan(self, upload_empty_scan=False): - with open('tests/zap_sample.xml') as file: + with open("tests/zap_sample.xml") as file: if upload_empty_scan: - file = SimpleUploadedFile('zap_sample.xml', self.EMPTY_ZAP_SCAN.encode('utf-8')) + file = SimpleUploadedFile("zap_sample.xml", self.EMPTY_ZAP_SCAN.encode("utf-8")) self.payload = { - 'engagement': 1, - 'scan_type': 'ZAP Scan', - 'file': file, + "engagement": 1, + "scan_type": "ZAP Scan", + "file": file, } - test_ids = list(Test.objects.values_list('id', flat=True)) - r = self.client.post(reverse('importscan-list'), self.payload) + test_ids = list(Test.objects.values_list("id", flat=True)) + r = self.client.post(reverse("importscan-list"), self.payload) self.assertEqual(201, r.status_code) return Test.objects.exclude(id__in=test_ids).get() def get_all_finding_ids(self, **kwargs): return set(Finding.objects.filter(test__engagement_id=1, **kwargs) - .order_by('id').values_list('id', flat=True)) + .order_by("id").values_list("id", flat=True)) def test_epmty_scan(self): """ diff --git a/unittests/test_apiv2_user.py b/unittests/test_apiv2_user.py index e93fb39fa1..88f91bfb5f 100644 --- a/unittests/test_apiv2_user.py +++ b/unittests/test_apiv2_user.py @@ -7,82 +7,82 @@ class UserTest(APITestCase): """ Test the User APIv2 endpoint. """ - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def setUp(self): - token = Token.objects.get(user__username='admin') + token = Token.objects.get(user__username="admin") self.client = APIClient() - self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) + self.client.credentials(HTTP_AUTHORIZATION="Token " + token.key) def test_user_list(self): - r = self.client.get(reverse('user-list')) + r = self.client.get(reverse("user-list")) self.assertEqual(r.status_code, 200, r.content[:1000]) - user_list = r.json()['results'] + user_list = r.json()["results"] self.assertGreaterEqual(len(user_list), 1, r.content[:1000]) for user in user_list: - for item in ['username', 'first_name', 'last_name', 'email']: + for item in ["username", "first_name", "last_name", "email"]: self.assertIn(item, user, r.content[:1000]) - for item in ['password']: + for item in ["password"]: self.assertNotIn(item, user, r.content[:1000]) def test_user_add(self): # simple user without password - r = self.client.post(reverse('user-list'), { + r = self.client.post(reverse("user-list"), { "username": "api-user-1", - }, format='json') + }, format="json") self.assertEqual(r.status_code, 201, r.content[:1000]) # user with good password - password = 'testTEST1234!@#$' - r = self.client.post(reverse('user-list'), { + password = "testTEST1234!@#$" + r = self.client.post(reverse("user-list"), { "username": "api-user-2", "password": password, - }, format='json') + }, format="json") self.assertEqual(r.status_code, 201, r.content[:1000]) # test password by fetching API key - r = self.client.post(reverse('api-token-auth'), { + r = self.client.post(reverse("api-token-auth"), { "username": "api-user-2", "password": password, - }, format='json') + }, format="json") self.assertEqual(r.status_code, 200, r.content[:1000]) # user with weak password - r = self.client.post(reverse('user-list'), { + r = self.client.post(reverse("user-list"), { "username": "api-user-3", "password": "weakPassword", - }, format='json') + }, format="json") self.assertEqual(r.status_code, 400, r.content[:1000]) - self.assertIn('Password must contain at least 1 digit, 0-9.', r.content.decode("utf-8")) + self.assertIn("Password must contain at least 1 digit, 0-9.", r.content.decode("utf-8")) def test_user_change_password(self): # some user - r = self.client.post(reverse('user-list'), { + r = self.client.post(reverse("user-list"), { "username": "api-user-4", - }, format='json') + }, format="json") self.assertEqual(r.status_code, 201, r.content[:1000]) - user_id = r.json()['id'] + user_id = r.json()["id"] - r = self.client.put("{}{}/".format(reverse('user-list'), user_id), { + r = self.client.put("{}{}/".format(reverse("user-list"), user_id), { "username": "api-user-4", "first_name": "first", - }, format='json') + }, format="json") self.assertEqual(r.status_code, 200, r.content[:1000]) - r = self.client.patch("{}{}/".format(reverse('user-list'), user_id), { + r = self.client.patch("{}{}/".format(reverse("user-list"), user_id), { "last_name": "last", - }, format='json') + }, format="json") self.assertEqual(r.status_code, 200, r.content[:1000]) - r = self.client.put("{}{}/".format(reverse('user-list'), user_id), { + r = self.client.put("{}{}/".format(reverse("user-list"), user_id), { "username": "api-user-4", "password": "testTEST1234!@#$", - }, format='json') + }, format="json") self.assertEqual(r.status_code, 400, r.content[:1000]) self.assertIn("Update of password though API is not allowed", r.content.decode("utf-8")) - r = self.client.patch("{}{}/".format(reverse('user-list'), user_id), { + r = self.client.patch("{}{}/".format(reverse("user-list"), user_id), { "password": "testTEST1234!@#$", - }, format='json') + }, format="json") self.assertEqual(r.status_code, 400, r.content[:1000]) self.assertIn("Update of password though API is not allowed", r.content.decode("utf-8")) diff --git a/unittests/test_apply_finding_template.py b/unittests/test_apply_finding_template.py index 5e58bdde62..1acd108d25 100644 --- a/unittests/test_apply_finding_template.py +++ b/unittests/test_apply_finding_template.py @@ -21,8 +21,8 @@ def create(): settings.save() p = Product() - p.Name = 'Test Product' - p.Description = 'Product for Testing Apply Template functionality' + p.Name = "Test Product" + p.Description = "Product for Testing Apply Template functionality" p.prod_type = Product_Type.objects.get(id=1) p.save() @@ -33,7 +33,7 @@ def create(): e.save() tt = Test_Type() - tt.name = 'Temporary Test' + tt.name = "Temporary Test" tt.save() t = Test() @@ -46,9 +46,9 @@ def create(): user = FindingTemplateTestUtil.create_user(True) f = Finding() - f.title = 'Finding for Testing Apply Template functionality' - f.severity = 'High' - f.description = 'Finding for Testing Apply Template Functionality' + f.title = "Finding for Testing Apply Template functionality" + f.severity = "High" + f.description = "Finding for Testing Apply Template Functionality" f.test = t f.reporter = user f.last_reviewed = timezone.now() @@ -60,12 +60,12 @@ class FindingTemplateMother: @staticmethod def create(): tmp = Finding_Template() - tmp.title = 'Finding Template for Testing Apply Template functionality' + tmp.title = "Finding Template for Testing Apply Template functionality" tmp.cwe = 0 - tmp.severity = 'Low' - tmp.description = 'Finding Template for Testing Apply Template functionality' - tmp.mitigation = 'Finding Template Mitigation' - tmp.impact = 'Finding Template Impact' + tmp.severity = "Low" + tmp.description = "Finding Template for Testing Apply Template functionality" + tmp.mitigation = "Finding Template Mitigation" + tmp.impact = "Finding Template Impact" tmp.save() @@ -79,7 +79,7 @@ def create_user(is_staff): user_count = User.objects.count() user = User() user.is_staff = is_staff - user.username = 'TestUser' + str(user_count) + user.username = "TestUser" + str(user_count) user.save() return user @@ -99,16 +99,16 @@ def create_post_request(user, path, data): post_request.user = user post_request.session = {} messages = FallbackStorage(post_request) - setattr(post_request, '_messages', messages) + setattr(post_request, "_messages", messages) return post_request @skip("outdated so doesn't work with current fixture") class TestApplyFindingTemplate(DojoTestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] - apply_template_url = 'finding/2/2/apply_template_to_finding' + apply_template_url = "finding/2/2/apply_template_to_finding" def setUp(self): FindingMother.create() @@ -128,41 +128,41 @@ def make_request(self, user_is_staff, finding_id, template_id, data=None): def test_apply_template_to_finding_with_data_does_not_display_error_success(self): result = self.make_request(True, 1, 1, - {'title': 'Finding for Testing Apply Template functionality', - 'cwe': '89', - 'severity': 'High', - 'description': 'Finding for Testing Apply Template Functionality', - 'mitigation': 'template mitigation', - 'impact': 'template impact'}) - self.assertNotContains(result, 'There appears to be errors on the form', 302) + {"title": "Finding for Testing Apply Template functionality", + "cwe": "89", + "severity": "High", + "description": "Finding for Testing Apply Template Functionality", + "mitigation": "template mitigation", + "impact": "template impact"}) + self.assertNotContains(result, "There appears to be errors on the form", 302) def test_apply_template_to_finding_with_data_returns_to_view_success(self): result = self.make_request(True, 1, 1, - {'title': 'Finding for Testing Apply Template functionality', - 'cwe': '89', - 'severity': 'High', - 'description': 'Finding for Testing Apply Template Functionality', - 'mitigation': 'template mitigation', - 'impact': 'template impact'}) + {"title": "Finding for Testing Apply Template functionality", + "cwe": "89", + "severity": "High", + "description": "Finding for Testing Apply Template Functionality", + "mitigation": "template mitigation", + "impact": "template impact"}) self.assertIsNotNone(result) self.assertEqual(302, result.status_code) - self.assertEqual('/finding/1', result.url) + self.assertEqual("/finding/1", result.url) def test_apply_template_to_finding_with_data_saves_success(self): - test_title = 'Finding for Testing Apply Template functionality' + test_title = "Finding for Testing Apply Template functionality" test_cwe = 89 - test_severity = 'High' - test_description = 'Finding for Testing Apply Template Functionality' - test_mitigation = 'template mitigation' - test_impact = 'template impact' + test_severity = "High" + test_description = "Finding for Testing Apply Template Functionality" + test_mitigation = "template mitigation" + test_impact = "template impact" self.make_request(True, 1, 1, - {'title': test_title, - 'cwe': test_cwe, - 'severity': test_severity, - 'description': test_description, - 'mitigation': test_mitigation, - 'impact': test_impact}) + {"title": test_title, + "cwe": test_cwe, + "severity": test_severity, + "description": test_description, + "mitigation": test_mitigation, + "impact": test_impact}) f = Finding.objects.get(id=1) self.assertEqual(test_title, f.title) @@ -174,15 +174,15 @@ def test_apply_template_to_finding_with_data_saves_success(self): def test_unauthorized_apply_template_to_finding_fails(self): result = self.make_request(False, 1, 1, - {'title': 'Finding for Testing Apply Template functionality', - 'cwe': '89', - 'severity': 'High', - 'description': 'Finding for Testing Apply Template Functionality', - 'mitigation': 'template mitigation', - 'impact': 'template impact'}, + {"title": "Finding for Testing Apply Template functionality", + "cwe": "89", + "severity": "High", + "description": "Finding for Testing Apply Template Functionality", + "mitigation": "template mitigation", + "impact": "template impact"}, ) self.assertEqual(302, result.status_code) - self.assertIn('login', result.url) + self.assertIn("login", result.url) def test_apply_template_to_finding_with_illegal_finding_fails(self): with self.assertRaises(Exception): @@ -196,33 +196,33 @@ def test_apply_template_to_finding_with_no_data_returns_view_success(self): result = self.make_request(True, 1, 1, None) self.assertIsNotNone(result) self.assertEqual(302, result.status_code) - self.assertEqual('/finding/1', result.url) + self.assertEqual("/finding/1", result.url) def test_apply_template_to_finding_without_required_field_displays_field_title_success(self): result = self.make_request(True, 1, 1, - {'title': '', - 'cwe': '89', - 'severity': 'High', - 'description': 'Finding for Testing Apply Template Functionality', - 'mitigation': 'template mitigation', - 'impact': 'template impact'}) - self.assertContains(result, 'The title is required.') + {"title": "", + "cwe": "89", + "severity": "High", + "description": "Finding for Testing Apply Template Functionality", + "mitigation": "template mitigation", + "impact": "template impact"}) + self.assertContains(result, "The title is required.") def test_apply_template_to_finding_without_required_field_displays_error_success(self): result = self.make_request(True, 1, 1, - {'title': '', - 'cwe': '89', - 'severity': 'High', - 'description': 'Finding for Testing Apply Template Functionality', - 'mitigation': 'template mitigation', - 'impact': 'template impact'}) - self.assertContains(result, 'There appears to be errors on the form') + {"title": "", + "cwe": "89", + "severity": "High", + "description": "Finding for Testing Apply Template Functionality", + "mitigation": "template mitigation", + "impact": "template impact"}) + self.assertContains(result, "There appears to be errors on the form") @skip("outdated so doesn't work with current fixture") class TestFindTemplateToApply(DojoTestCase): - fixtures = ['dojo_testdata.json'] - choose_template_url = 'finding/2/find_template_to_apply' + fixtures = ["dojo_testdata.json"] + choose_template_url = "finding/2/find_template_to_apply" def setUp(self): FindingMother.create() @@ -243,7 +243,7 @@ def make_request(self, user_is_staff, finding_id, data=None): def test_unauthorized_find_template_to_apply_fails(self): result = self.make_request(False, 1) self.assertEqual(302, result.status_code) - self.assertIn('login', result.url) + self.assertIn("login", result.url) def test_authorized_find_template_to_apply_success(self): result = self.make_request(True, 1) @@ -251,17 +251,17 @@ def test_authorized_find_template_to_apply_success(self): def test_find_template_to_apply_displays_templates_success(self): result = self.make_request(True, 1) - self.assertContains(result, 'Finding Template for Testing Apply Template functionality') + self.assertContains(result, "Finding Template for Testing Apply Template functionality") def test_find_template_to_apply_displays_breadcrumb(self): result = self.make_request(True, 1) - self.assertContains(result, 'Apply Template to Finding') + self.assertContains(result, "Apply Template to Finding") @skip("outdated so doesn't work with current fixture") class TestChooseFindingTemplateOptions(DojoTestCase): - fixtures = ['dojo_testdata.json'] - finding_template_options_url = 'finding/2/2/choose_finding_template_options' + fixtures = ["dojo_testdata.json"] + finding_template_options_url = "finding/2/2/choose_finding_template_options" def setUp(self): FindingMother.create() @@ -282,7 +282,7 @@ def make_request(self, user_is_staff, finding_id, template_id, data=None): def test_unauthorized_choose_finding_template_options_fails(self): result = self.make_request(False, 1, 1) self.assertEqual(302, result.status_code) - self.assertIn('login', result.url) + self.assertIn("login", result.url) def test_authorized_choose_finding_template_options_success(self): result = self.make_request(True, 1, 1) @@ -300,4 +300,4 @@ def test_choose_finding_template_options_with_invalid_template_fails(self): def test_choose_finding_template_options_with_valid_finding_and_template_renders_apply_finding_template_view(self): result = self.make_request(True, 1, 1) - self.assertContains(result, '

    Apply template to a Finding

    ') + self.assertContains(result, "

    Apply template to a Finding

    ") diff --git a/unittests/test_bulk_risk_acceptance_api.py b/unittests/test_bulk_risk_acceptance_api.py index ba0bf56a57..bdc87451d0 100644 --- a/unittests/test_bulk_risk_acceptance_api.py +++ b/unittests/test_bulk_risk_acceptance_api.py @@ -23,12 +23,12 @@ class TestBulkRiskAcceptanceApi(APITestCase): @classmethod def setUpTestData(cls): - cls.user = User.objects.create(username='molly', first_name='Molly', last_name='Mocket', is_staff=True) + cls.user = User.objects.create(username="molly", first_name="Molly", last_name="Mocket", is_staff=True) cls.token = Token.objects.create(user=cls.user) - cls.product_type = Product_Type.objects.create(name='Web App') - cls.product = Product.objects.create(prod_type=cls.product_type, name='Flopper', description='Test product') + cls.product_type = Product_Type.objects.create(name="Web App") + cls.product = Product.objects.create(prod_type=cls.product_type, name="Flopper", description="Test product") Product_Type_Member.objects.create(product_type=cls.product_type, user=cls.user, role=Role.objects.get(id=Roles.Owner)) - cls.product_2 = Product.objects.create(prod_type=cls.product_type, name='Flopper2', description='Test product2') + cls.product_2 = Product.objects.create(prod_type=cls.product_type, name="Flopper2", description="Test product2") cls.engagement = Engagement.objects.create(product=cls.product, target_start=datetime.datetime(2000, 1, 1, tzinfo=datetime.timezone.utc), target_end=datetime.datetime(2000, 2, 1, tzinfo=datetime.timezone.utc)) cls.engagement_2a = Engagement.objects.create(product=cls.product_2, target_start=datetime.datetime(2000, 1, 1, tzinfo=datetime.timezone.utc), @@ -36,7 +36,7 @@ def setUpTestData(cls): cls.engagement_2b = Engagement.objects.create(product=cls.product_2, target_start=datetime.datetime(2000, 1, 1, tzinfo=datetime.timezone.utc), target_end=datetime.datetime(2000, 2, 1, tzinfo=datetime.timezone.utc)) - cls.test_type = Test_Type.objects.create(name='Risk Acceptance Mock Scan', static_tool=True) + cls.test_type = Test_Type.objects.create(name="Risk Acceptance Mock Scan", static_tool=True) cls.test_a = Test.objects.create(engagement=cls.engagement, test_type=cls.test_type, target_start=datetime.datetime(2000, 1, 1, tzinfo=datetime.timezone.utc), target_end=datetime.datetime(2000, 2, 1, tzinfo=datetime.timezone.utc)) cls.test_b = Test.objects.create(engagement=cls.engagement, test_type=cls.test_type, @@ -50,41 +50,41 @@ def setUpTestData(cls): target_start=datetime.datetime(2000, 1, 1, tzinfo=datetime.timezone.utc), target_end=datetime.datetime(2000, 2, 1, tzinfo=datetime.timezone.utc)) def create_finding(test: Test, reporter: User, cve: str) -> Finding: - return Finding(test=test, title=f'Finding {cve}', cve=cve, severity='High', verified=True, - description='Hello world!', mitigation='Delete system32', impact='Everything', - reporter=reporter, numerical_severity='S1', static_finding=True, dynamic_finding=False) + return Finding(test=test, title=f"Finding {cve}", cve=cve, severity="High", verified=True, + description="Hello world!", mitigation="Delete system32", impact="Everything", + reporter=reporter, numerical_severity="S1", static_finding=True, dynamic_finding=False) Finding.objects.bulk_create( - create_finding(cls.test_a, cls.user, f'CVE-1999-{i}') for i in range(50, 150, 3)) + create_finding(cls.test_a, cls.user, f"CVE-1999-{i}") for i in range(50, 150, 3)) for finding in Finding.objects.filter(test=cls.test_a): Vulnerability_Id.objects.get_or_create(finding=finding, vulnerability_id=finding.cve) Finding.objects.bulk_create( - create_finding(cls.test_b, cls.user, f'CVE-1999-{i}') for i in range(51, 150, 3)) + create_finding(cls.test_b, cls.user, f"CVE-1999-{i}") for i in range(51, 150, 3)) for finding in Finding.objects.filter(test=cls.test_b): Vulnerability_Id.objects.get_or_create(finding=finding, vulnerability_id=finding.cve) Finding.objects.bulk_create( - create_finding(cls.test_c, cls.user, f'CVE-1999-{i}') for i in range(52, 150, 3)) + create_finding(cls.test_c, cls.user, f"CVE-1999-{i}") for i in range(52, 150, 3)) for finding in Finding.objects.filter(test=cls.test_c): Vulnerability_Id.objects.get_or_create(finding=finding, vulnerability_id=finding.cve) Finding.objects.bulk_create( - create_finding(cls.test_d, cls.user, f'CVE-2000-{i}') for i in range(50, 150, 3)) + create_finding(cls.test_d, cls.user, f"CVE-2000-{i}") for i in range(50, 150, 3)) for finding in Finding.objects.filter(test=cls.test_d): Vulnerability_Id.objects.get_or_create(finding=finding, vulnerability_id=finding.cve) Finding.objects.bulk_create( - create_finding(cls.test_e, cls.user, f'CVE-1999-{i}') for i in range(50, 150, 3)) + create_finding(cls.test_e, cls.user, f"CVE-1999-{i}") for i in range(50, 150, 3)) for finding in Finding.objects.filter(test=cls.test_e): Vulnerability_Id.objects.get_or_create(finding=finding, vulnerability_id=finding.cve) def setUp(self) -> None: self.client = APIClient() - self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key) + self.client.credentials(HTTP_AUTHORIZATION="Token " + self.token.key) def test_test_accept_risks(self): - accepted_risks = [{'vulnerability_id': f'CVE-1999-{i}', 'justification': 'Demonstration purposes', - 'accepted_by': 'King of the Internet'} for i in range(100, 150)] - result = self.client.post(reverse('test-accept-risks', kwargs={'pk': self.test_a.id}), data=accepted_risks, - format='json') + accepted_risks = [{"vulnerability_id": f"CVE-1999-{i}", "justification": "Demonstration purposes", + "accepted_by": "King of the Internet"} for i in range(100, 150)] + result = self.client.post(reverse("test-accept-risks", kwargs={"pk": self.test_a.id}), data=accepted_risks, + format="json") self.assertEqual(len(result.json()), 17) self.assertEqual(self.test_a.unaccepted_open_findings.count(), 17) self.assertEqual(self.test_b.unaccepted_open_findings.count(), 33) @@ -94,10 +94,10 @@ def test_test_accept_risks(self): self.assertEqual(self.engagement_2a.risk_acceptance.count(), 0) def test_engagement_accept_risks(self): - accepted_risks = [{'vulnerability_id': f'CVE-1999-{i}', 'justification': 'Demonstration purposes', - 'accepted_by': 'King of the Internet'} for i in range(100, 150)] - result = self.client.post(reverse('engagement-accept-risks', kwargs={'pk': self.engagement.id}), - data=accepted_risks, format='json') + accepted_risks = [{"vulnerability_id": f"CVE-1999-{i}", "justification": "Demonstration purposes", + "accepted_by": "King of the Internet"} for i in range(100, 150)] + result = self.client.post(reverse("engagement-accept-risks", kwargs={"pk": self.engagement.id}), + data=accepted_risks, format="json") self.assertEqual(len(result.json()), 50) self.assertEqual(self.engagement.unaccepted_open_findings.count(), 50) @@ -105,9 +105,9 @@ def test_engagement_accept_risks(self): self.assertEqual(self.engagement_2a.unaccepted_open_findings.count(), 34) def test_finding_accept_risks(self): - accepted_risks = [{'vulnerability_id': f'CVE-1999-{i}', 'justification': 'Demonstration purposes', - 'accepted_by': 'King of the Internet'} for i in range(60, 140)] - result = self.client.post(reverse('finding-accept-risks'), data=accepted_risks, format='json') + accepted_risks = [{"vulnerability_id": f"CVE-1999-{i}", "justification": "Demonstration purposes", + "accepted_by": "King of the Internet"} for i in range(60, 140)] + result = self.client.post(reverse("finding-accept-risks"), data=accepted_risks, format="json") self.assertEqual(len(result.json()), 106) self.assertEqual(Finding.unaccepted_open_findings().count(), 62) diff --git a/unittests/test_cleanup_alerts.py b/unittests/test_cleanup_alerts.py index d2f4af56d3..3c087cdee7 100644 --- a/unittests/test_cleanup_alerts.py +++ b/unittests/test_cleanup_alerts.py @@ -11,10 +11,10 @@ class TestCleanupAlerts(DojoTestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def setUp(self): - testuser = User.objects.get(username='admin') + testuser = User.objects.get(username="admin") Alerts.objects.filter(user_id=testuser).delete() Alerts.objects.create(title="A", user_id=testuser) Alerts.objects.create(title="B", user_id=testuser) @@ -22,7 +22,7 @@ def setUp(self): def test_delete_alerts_disabled(self): settings.MAX_ALERTS_PER_USER = -1 - testuser = User.objects.get(username='admin') + testuser = User.objects.get(username="admin") alerts_before = Alerts.objects.filter(user_id=testuser).count() cleanup_alerts() alerts_after = Alerts.objects.filter(user_id=testuser).count() @@ -30,14 +30,14 @@ def test_delete_alerts_disabled(self): def test_delete_all_alerts(self): settings.MAX_ALERTS_PER_USER = 0 - testuser = User.objects.get(username='admin') + testuser = User.objects.get(username="admin") cleanup_alerts() alerts_after = Alerts.objects.filter(user_id=testuser).count() self.assertEqual(alerts_after, 0) def test_delete_more_than_two_alerts(self): settings.MAX_ALERTS_PER_USER = 2 - testuser = User.objects.get(username='admin') + testuser = User.objects.get(username="admin") cleanup_alerts() alerts_after = Alerts.objects.filter(user_id=testuser).count() self.assertEqual(alerts_after, 2) diff --git a/unittests/test_copy_model.py b/unittests/test_copy_model.py index 04f39a293b..94c0b3ac4e 100644 --- a/unittests/test_copy_model.py +++ b/unittests/test_copy_model.py @@ -8,10 +8,10 @@ class TestCopyFindingModel(DojoTestCase): def test_duplicate_finding_same_test(self): # Set the scene user, _ = User.objects.get_or_create(username="admin") - product_type = self.create_product_type('prod_type') - product = self.create_product('test_deuplicate_finding', prod_type=product_type) - engagement = self.create_engagement('eng', product) - test = self.create_test(engagement=engagement, scan_type='NPM Audit Scan', title='test') + product_type = self.create_product_type("prod_type") + product = self.create_product("test_deuplicate_finding", prod_type=product_type) + engagement = self.create_engagement("eng", product) + test = self.create_test(engagement=engagement, scan_type="NPM Audit Scan", title="test") finding = Finding.objects.create(test=test, reporter=user) # Do the counting current_finding_count = Finding.objects.filter(test=test).count() @@ -25,11 +25,11 @@ def test_duplicate_finding_same_test(self): def test_duplicate_finding_different_test(self): # Set the scene user, _ = User.objects.get_or_create(username="admin") - product_type = self.create_product_type('prod_type') - product = self.create_product('test_deuplicate_finding', prod_type=product_type) - engagement = self.create_engagement('eng', product) - test1 = self.create_test(engagement=engagement, scan_type='NPM Audit Scan', title='test1') - test2 = self.create_test(engagement=engagement, scan_type='NPM Audit Scan', title='test2') + product_type = self.create_product_type("prod_type") + product = self.create_product("test_deuplicate_finding", prod_type=product_type) + engagement = self.create_engagement("eng", product) + test1 = self.create_test(engagement=engagement, scan_type="NPM Audit Scan", title="test1") + test2 = self.create_test(engagement=engagement, scan_type="NPM Audit Scan", title="test2") finding = Finding.objects.create(test=test1, reporter=user) # Do the counting engagement_finding_count = Finding.objects.filter(test__engagement=engagement).count() @@ -45,12 +45,12 @@ def test_duplicate_finding_different_test(self): def test_duplicate_finding_with_tags(self): # Set the scene user, _ = User.objects.get_or_create(username="admin") - product_type = self.create_product_type('prod_type') - product = self.create_product('test_deuplicate_finding', prod_type=product_type) - engagement = self.create_engagement('eng', product) - test = self.create_test(engagement=engagement, scan_type='NPM Audit Scan', title='test') + product_type = self.create_product_type("prod_type") + product = self.create_product("test_deuplicate_finding", prod_type=product_type) + engagement = self.create_engagement("eng", product) + test = self.create_test(engagement=engagement, scan_type="NPM Audit Scan", title="test") finding = Finding.objects.create(test=test, reporter=user) - finding.unsaved_tags = ['test_tag'] + finding.unsaved_tags = ["test_tag"] finding.save() # Do the counting current_finding_count = Finding.objects.filter(test=test).count() @@ -64,12 +64,12 @@ def test_duplicate_finding_with_tags(self): def test_duplicate_finding_with_notes(self): # Set the scene user, _ = User.objects.get_or_create(username="admin") - product_type = self.create_product_type('prod_type') - product = self.create_product('test_deuplicate_finding', prod_type=product_type) - engagement = self.create_engagement('eng', product) - test = self.create_test(engagement=engagement, scan_type='NPM Audit Scan', title='test') + product_type = self.create_product_type("prod_type") + product = self.create_product("test_deuplicate_finding", prod_type=product_type) + engagement = self.create_engagement("eng", product) + test = self.create_test(engagement=engagement, scan_type="NPM Audit Scan", title="test") finding = Finding.objects.create(test=test, reporter=user) - finding.unsaved_notes = ['test_note'] + finding.unsaved_notes = ["test_note"] finding.save() # Do the counting current_finding_count = Finding.objects.filter(test=test).count() @@ -83,13 +83,13 @@ def test_duplicate_finding_with_notes(self): def test_duplicate_finding_with_tags_and_notes(self): # Set the scene user, _ = User.objects.get_or_create(username="admin") - product_type = self.create_product_type('prod_type') - product = self.create_product('test_deuplicate_finding', prod_type=product_type) - engagement = self.create_engagement('eng', product) - test = self.create_test(engagement=engagement, scan_type='NPM Audit Scan', title='test') + product_type = self.create_product_type("prod_type") + product = self.create_product("test_deuplicate_finding", prod_type=product_type) + engagement = self.create_engagement("eng", product) + test = self.create_test(engagement=engagement, scan_type="NPM Audit Scan", title="test") finding = Finding.objects.create(test=test, reporter=user) - finding.unsaved_tags = ['test_tag'] - finding.unsaved_notes = ['test_note'] + finding.unsaved_tags = ["test_tag"] + finding.unsaved_notes = ["test_note"] finding.save() # Do the counting current_finding_count = Finding.objects.filter(test=test).count() @@ -105,11 +105,11 @@ def test_duplicate_finding_with_tags_and_notes(self): def test_duplicate_finding_with_endpoints(self): # Set the scene user, _ = User.objects.get_or_create(username="admin") - product_type = self.create_product_type('prod_type') - product = self.create_product('test_deuplicate_finding', prod_type=product_type) - engagement = self.create_engagement('eng', product) - test = self.create_test(engagement=engagement, scan_type='NPM Audit Scan', title='test') - endpoint = Endpoint.from_uri('0.0.0.0') + product_type = self.create_product_type("prod_type") + product = self.create_product("test_deuplicate_finding", prod_type=product_type) + engagement = self.create_engagement("eng", product) + test = self.create_test(engagement=engagement, scan_type="NPM Audit Scan", title="test") + endpoint = Endpoint.from_uri("0.0.0.0") endpoint.save() finding = Finding.objects.create(test=test, reporter=user) endpoint_status = Endpoint_Status.objects.create(finding=finding, endpoint=endpoint) @@ -137,10 +137,10 @@ class TestCopyTestModel(DojoTestCase): def test_duplicate_test_same_enagagement(self): # Set the scene user, _ = User.objects.get_or_create(username="admin") - product_type = self.create_product_type('prod_type') - product = self.create_product('test_deuplicate_test', prod_type=product_type) - engagement = self.create_engagement('eng', product) - test = self.create_test(engagement=engagement, scan_type='NPM Audit Scan', title='test') + product_type = self.create_product_type("prod_type") + product = self.create_product("test_deuplicate_test", prod_type=product_type) + engagement = self.create_engagement("eng", product) + test = self.create_test(engagement=engagement, scan_type="NPM Audit Scan", title="test") _ = Finding.objects.create(test=test, reporter=user) # Do the counting current_test_count = Test.objects.filter(engagement=engagement).count() @@ -158,11 +158,11 @@ def test_duplicate_test_same_enagagement(self): def test_duplicate_tests_different_engagements(self): # Set the scene user, _ = User.objects.get_or_create(username="admin") - product_type = self.create_product_type('prod_type') - product = self.create_product('test_deuplicate_test', prod_type=product_type) - engagement1 = self.create_engagement('eng1', product) - engagement2 = self.create_engagement('eng2', product) - test = self.create_test(engagement=engagement1, scan_type='NPM Audit Scan', title='test') + product_type = self.create_product_type("prod_type") + product = self.create_product("test_deuplicate_test", prod_type=product_type) + engagement1 = self.create_engagement("eng1", product) + engagement2 = self.create_engagement("eng2", product) + test = self.create_test(engagement=engagement1, scan_type="NPM Audit Scan", title="test") _ = Finding.objects.create(test=test, reporter=user) # Do the counting product_finding_count = Finding.objects.filter(test__engagement__product=product).count() @@ -180,12 +180,12 @@ def test_duplicate_tests_different_engagements(self): def test_duplicate_test_with_tags(self): # Set the scene user, _ = User.objects.get_or_create(username="admin") - product_type = self.create_product_type('prod_type') - product = self.create_product('test_deuplicate_test', prod_type=product_type) - engagement = self.create_engagement('eng', product) - test = self.create_test(engagement=engagement, scan_type='NPM Audit Scan', title='test') + product_type = self.create_product_type("prod_type") + product = self.create_product("test_deuplicate_test", prod_type=product_type) + engagement = self.create_engagement("eng", product) + test = self.create_test(engagement=engagement, scan_type="NPM Audit Scan", title="test") _ = Finding.objects.create(test=test, reporter=user) - test.unsaved_tags = ['test_tag'] + test.unsaved_tags = ["test_tag"] test.save() # Do the counting current_test_count = Test.objects.filter(engagement=engagement).count() @@ -199,12 +199,12 @@ def test_duplicate_test_with_tags(self): def test_duplicate_test_with_notes(self): # Set the scene user, _ = User.objects.get_or_create(username="admin") - product_type = self.create_product_type('prod_type') - product = self.create_product('test_deuplicate_test', prod_type=product_type) - engagement = self.create_engagement('eng', product) - test = self.create_test(engagement=engagement, scan_type='NPM Audit Scan', title='test') + product_type = self.create_product_type("prod_type") + product = self.create_product("test_deuplicate_test", prod_type=product_type) + engagement = self.create_engagement("eng", product) + test = self.create_test(engagement=engagement, scan_type="NPM Audit Scan", title="test") _ = Finding.objects.create(test=test, reporter=user) - test.unsaved_notes = ['test_note'] + test.unsaved_notes = ["test_note"] test.save() # Do the counting current_test_count = Test.objects.filter(engagement=engagement).count() @@ -218,13 +218,13 @@ def test_duplicate_test_with_notes(self): def test_duplicate_test_with_tags_and_notes(self): # Set the scene user, _ = User.objects.get_or_create(username="admin") - product_type = self.create_product_type('prod_type') - product = self.create_product('test_deuplicate_test', prod_type=product_type) - engagement = self.create_engagement('eng', product) - test = self.create_test(engagement=engagement, scan_type='NPM Audit Scan', title='test') + product_type = self.create_product_type("prod_type") + product = self.create_product("test_deuplicate_test", prod_type=product_type) + engagement = self.create_engagement("eng", product) + test = self.create_test(engagement=engagement, scan_type="NPM Audit Scan", title="test") _ = Finding.objects.create(test=test, reporter=user) - test.unsaved_tags = ['test_tag'] - test.unsaved_notes = ['test_note'] + test.unsaved_tags = ["test_tag"] + test.unsaved_notes = ["test_note"] test.save() # Do the counting current_test_count = Test.objects.filter(engagement=engagement).count() @@ -243,10 +243,10 @@ class TestCopyEngagementModel(DojoTestCase): def test_duplicate_engagement(self): # Set the scene user, _ = User.objects.get_or_create(username="admin") - product_type = self.create_product_type('prod_type') - product = self.create_product('test_deuplicate_test', prod_type=product_type) - engagement = self.create_engagement('eng', product) - test = self.create_test(engagement=engagement, scan_type='NPM Audit Scan', title='test') + product_type = self.create_product_type("prod_type") + product = self.create_product("test_deuplicate_test", prod_type=product_type) + engagement = self.create_engagement("eng", product) + test = self.create_test(engagement=engagement, scan_type="NPM Audit Scan", title="test") _ = Finding.objects.create(test=test, reporter=user) # Do the counting current_product_count = Product.objects.filter(prod_type=product_type).count() @@ -264,12 +264,12 @@ def test_duplicate_engagement(self): def test_duplicate_engagement_with_tags(self): # Set the scene user, _ = User.objects.get_or_create(username="admin") - product_type = self.create_product_type('prod_type') - product = self.create_product('test_deuplicate_test', prod_type=product_type) - engagement = self.create_engagement('eng', product) - test = self.create_test(engagement=engagement, scan_type='NPM Audit Scan', title='test') + product_type = self.create_product_type("prod_type") + product = self.create_product("test_deuplicate_test", prod_type=product_type) + engagement = self.create_engagement("eng", product) + test = self.create_test(engagement=engagement, scan_type="NPM Audit Scan", title="test") _ = Finding.objects.create(test=test, reporter=user) - engagement.unsaved_tags = ['test_tag'] + engagement.unsaved_tags = ["test_tag"] engagement.save() # Do the counting current_engagement_count = Engagement.objects.filter(product=product).count() @@ -283,12 +283,12 @@ def test_duplicate_engagement_with_tags(self): def test_duplicate_engagement_with_notes(self): # Set the scene user, _ = User.objects.get_or_create(username="admin") - product_type = self.create_product_type('prod_type') - product = self.create_product('test_deuplicate_test', prod_type=product_type) - engagement = self.create_engagement('eng', product) - test = self.create_test(engagement=engagement, scan_type='NPM Audit Scan', title='test') + product_type = self.create_product_type("prod_type") + product = self.create_product("test_deuplicate_test", prod_type=product_type) + engagement = self.create_engagement("eng", product) + test = self.create_test(engagement=engagement, scan_type="NPM Audit Scan", title="test") _ = Finding.objects.create(test=test, reporter=user) - engagement.unsaved_notes = ['test_note'] + engagement.unsaved_notes = ["test_note"] engagement.save() # Do the counting current_engagement_count = Engagement.objects.filter(product=product).count() @@ -302,13 +302,13 @@ def test_duplicate_engagement_with_notes(self): def test_duplicate_engagement_with_tags_and_notes(self): # Set the scene user, _ = User.objects.get_or_create(username="admin") - product_type = self.create_product_type('prod_type') - product = self.create_product('test_deuplicate_test', prod_type=product_type) - engagement = self.create_engagement('eng', product) - test = self.create_test(engagement=engagement, scan_type='NPM Audit Scan', title='test') + product_type = self.create_product_type("prod_type") + product = self.create_product("test_deuplicate_test", prod_type=product_type) + engagement = self.create_engagement("eng", product) + test = self.create_test(engagement=engagement, scan_type="NPM Audit Scan", title="test") _ = Finding.objects.create(test=test, reporter=user) - engagement.unsaved_tags = ['test_tag'] - engagement.unsaved_notes = ['test_note'] + engagement.unsaved_tags = ["test_tag"] + engagement.unsaved_notes = ["test_note"] engagement.save() # Do the counting current_engagement_count = Engagement.objects.filter(product=product).count() diff --git a/unittests/test_dashboard.py b/unittests/test_dashboard.py index a5f73a14e8..81d9000e40 100644 --- a/unittests/test_dashboard.py +++ b/unittests/test_dashboard.py @@ -15,7 +15,7 @@ def create(when: datetime, product_id: int, titles_and_severities: List[Tuple[str, str]]): - with patch('django.db.models.fields.timezone.now') as mock_now: + with patch("django.db.models.fields.timezone.now") as mock_now: mock_now.return_value = when engagement = Engagement.objects.create(product_id=product_id, target_start=when.date(), target_end=when.date()) test = Test.objects.create(engagement=engagement, test_type_id=120, target_start=when, target_end=when) @@ -26,7 +26,7 @@ def create(when: datetime, product_id: int, titles_and_severities: List[Tuple[st def create_with_duplicates(when: datetime, product_id: int, titles_and_severities: List[Tuple[str, str]]): - with patch('django.db.models.fields.timezone.now') as mock_now: + with patch("django.db.models.fields.timezone.now") as mock_now: mock_now.return_value = when engagement = Engagement.objects.create(product_id=product_id, target_start=when.date(), target_end=when.date()) test = Test.objects.create(engagement=engagement, test_type_id=120, target_start=when, target_end=when) @@ -41,13 +41,13 @@ def create_with_duplicates(when: datetime, product_id: int, titles_and_severitie def mitigate(when: datetime, product_id: int, title: str): - with patch('django.db.models.fields.timezone.now') as mock_now: + with patch("django.db.models.fields.timezone.now") as mock_now: mock_now.return_value = when Finding.objects.filter(test__engagement__product_id=product_id, title=title).update(is_mitigated=True, mitigated=when) def accept(when: datetime, product_id: int, title: str): - with patch('django.db.models.fields.timezone.now') as mock_now: + with patch("django.db.models.fields.timezone.now") as mock_now: mock_now.return_value = when findings = Finding.objects.filter(test__engagement__product_id=product_id, title=title) ra = Risk_Acceptance.objects.create(name="My Risk Acceptance", owner_id=1) @@ -56,13 +56,13 @@ def accept(when: datetime, product_id: int, title: str): def verify(when: datetime, product_id: int, title: str): - with patch('django.db.models.fields.timezone.now') as mock_now: + with patch("django.db.models.fields.timezone.now") as mock_now: mock_now.return_value = when Finding.objects.filter(test__engagement__product_id=product_id, title=title).update(verified=True) class TestDashboard(DojoTestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] @classmethod def setUpClass(cls): @@ -80,13 +80,13 @@ def setUpTestData(cls) -> None: def _setup_test_counters_findings(self, product_id: int): when = self.week_ago create(when, product_id, [ - ("My Findind 1.1", 'Medium'), - ("My Findind 1.2", 'Medium'), - ("My Findind 1.3", 'Medium'), - ("My Findind 1.4", 'Medium'), - ("My Findind 1.5", 'Medium'), - ("My Findind 1.6", 'Medium'), - ("My Findind 1.7", 'Medium'), + ("My Findind 1.1", "Medium"), + ("My Findind 1.2", "Medium"), + ("My Findind 1.3", "Medium"), + ("My Findind 1.4", "Medium"), + ("My Findind 1.5", "Medium"), + ("My Findind 1.6", "Medium"), + ("My Findind 1.7", "Medium"), ]) mitigate(when, product_id, "My Findind 1.1") accept (when, product_id, "My Findind 1.2") # noqa: E211 @@ -94,16 +94,16 @@ def _setup_test_counters_findings(self, product_id: int): when = self.now create(when, product_id, [ - ("My Findind 2.1", 'Medium'), - ("My Findind 2.2", 'Medium'), - ("My Findind 2.3", 'Medium'), - ("My Findind 2.4", 'Medium'), + ("My Findind 2.1", "Medium"), + ("My Findind 2.2", "Medium"), + ("My Findind 2.3", "Medium"), + ("My Findind 2.4", "Medium"), ]) create_with_duplicates(when, product_id, [ - ("My Findind 2.1", 'Medium'), - ("My Findind 2.2", 'Medium'), - ("My Findind 2.3", 'Medium'), - ("My Findind 2.4", 'Medium'), + ("My Findind 2.1", "Medium"), + ("My Findind 2.2", "Medium"), + ("My Findind 2.3", "Medium"), + ("My Findind 2.4", "Medium"), ]) mitigate(when, product_id, "My Findind 1.4") accept (when, product_id, "My Findind 1.5") # noqa: E211 @@ -117,10 +117,10 @@ def test_counters_as_staff(self): response = self._request("admin") - self.assertEqual(3, response.context['engagement_count']) - self.assertEqual(4, response.context['finding_count']) - self.assertEqual(2, response.context['mitigated_count']) - self.assertEqual(2, response.context['accepted_count']) + self.assertEqual(3, response.context["engagement_count"]) + self.assertEqual(4, response.context["finding_count"]) + self.assertEqual(2, response.context["mitigated_count"]) + self.assertEqual(2, response.context["accepted_count"]) def test_counters_as_user(self): self._setup_test_counters_findings(product_id=2) @@ -128,34 +128,34 @@ def test_counters_as_user(self): response = self._request("user1") - self.assertEqual(3, response.context['engagement_count']) - self.assertEqual(4, response.context['finding_count']) - self.assertEqual(2, response.context['mitigated_count']) - self.assertEqual(2, response.context['accepted_count']) + self.assertEqual(3, response.context["engagement_count"]) + self.assertEqual(4, response.context["finding_count"]) + self.assertEqual(2, response.context["mitigated_count"]) + self.assertEqual(2, response.context["accepted_count"]) def _setup_test_charts_findings(self, product_id: int): when = self.year_ago create(when, product_id, [ - ("My Findind 0.1", 'Medium'), + ("My Findind 0.1", "Medium"), ]) when = self.month_ago create(when, product_id, [ - ("My Findind 1.1", 'Critical'), - ("My Findind 1.2", 'High'), - ("My Findind 1.3", 'Medium'), - ("My Findind 1.4", 'Low'), - ("My Findind 1.5", 'Info'), + ("My Findind 1.1", "Critical"), + ("My Findind 1.2", "High"), + ("My Findind 1.3", "Medium"), + ("My Findind 1.4", "Low"), + ("My Findind 1.5", "Info"), ("My Findind 1.6", ""), ("My Findind 1.7", "Foo"), ]) create_with_duplicates(when, product_id, [ - ("My Findind 1.3", 'Medium'), + ("My Findind 1.3", "Medium"), ]) when = self.now create(when, product_id, [ - ("My Findind 2.1", 'Critical'), + ("My Findind 2.1", "Critical"), ]) def test_charts_as_staff(self): @@ -163,17 +163,17 @@ def test_charts_as_staff(self): response = self._request("admin") - self.assertEqual(2, response.context['critical']) - self.assertEqual(1, response.context['high']) - self.assertEqual(2, response.context['medium']) - self.assertEqual(1, response.context['low']) - self.assertEqual(1, response.context['info']) + self.assertEqual(2, response.context["critical"]) + self.assertEqual(1, response.context["high"]) + self.assertEqual(2, response.context["medium"]) + self.assertEqual(1, response.context["low"]) + self.assertEqual(1, response.context["info"]) expected = [ - {'y': f"{self.month_ago.year}-{self.month_ago.month:02}", 'a': 1, 'b': 1, 'c': 1, 'd': 1, 'e': 1, None: 2}, - {'y': f"{self.now.year}-{self.now.month:02}", 'a': 1, 'b': 0, 'c': 0, 'd': 0, 'e': 0, None: 0}, # noqa: E241 + {"y": f"{self.month_ago.year}-{self.month_ago.month:02}", "a": 1, "b": 1, "c": 1, "d": 1, "e": 1, None: 2}, + {"y": f"{self.now.year}-{self.now.month:02}", "a": 1, "b": 0, "c": 0, "d": 0, "e": 0, None: 0}, # noqa: E241 ] - self.assertEqual(expected, response.context['by_month']) + self.assertEqual(expected, response.context["by_month"]) def test_charts_as_user(self): self._setup_test_charts_findings(product_id=2) @@ -181,19 +181,19 @@ def test_charts_as_user(self): response = self._request("user1") - self.assertEqual(2, response.context['critical']) - self.assertEqual(1, response.context['high']) - self.assertEqual(2, response.context['medium']) - self.assertEqual(1, response.context['low']) - self.assertEqual(1, response.context['info']) + self.assertEqual(2, response.context["critical"]) + self.assertEqual(1, response.context["high"]) + self.assertEqual(2, response.context["medium"]) + self.assertEqual(1, response.context["low"]) + self.assertEqual(1, response.context["info"]) expected = [ - {'y': f"{self.month_ago.year}-{self.month_ago.month:02}", 'a': 1, 'b': 1, 'c': 1, 'd': 1, 'e': 1, None: 2}, - {'y': f"{self.now.year}-{self.now.month:02}", 'a': 1, 'b': 0, 'c': 0, 'd': 0, 'e': 0, None: 0}, # noqa: E241 + {"y": f"{self.month_ago.year}-{self.month_ago.month:02}", "a": 1, "b": 1, "c": 1, "d": 1, "e": 1, None: 2}, + {"y": f"{self.now.year}-{self.now.month:02}", "a": 1, "b": 0, "c": 0, "d": 0, "e": 0, None: 0}, # noqa: E241 ] - self.assertEqual(expected, response.context['by_month']) + self.assertEqual(expected, response.context["by_month"]) def _request(self, username: str): user = User.objects.get(username=username) self.client.force_login(user) - return self.client.get(reverse('dashboard')) + return self.client.get(reverse("dashboard")) diff --git a/unittests/test_deduplication_logic.py b/unittests/test_deduplication_logic.py index 46a99090b4..bb5b6abacb 100644 --- a/unittests/test_deduplication_logic.py +++ b/unittests/test_deduplication_logic.py @@ -126,10 +126,10 @@ class TestDuplicationLogic(DojoTestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def run(self, result=None): - testuser = User.objects.get(username='admin') + testuser = User.objects.get(username="admin") testuser.usercontactinfo.block_execution = True testuser.save() @@ -140,7 +140,7 @@ def run(self, result=None): super().run(result) def setUp(self): - logger.debug('enabling deduplication') + logger.debug("enabling deduplication") self.enable_dedupe() self.log_summary() @@ -188,7 +188,7 @@ def test_identical_except_title_legacy(self): # 24 is already a duplicate of 22, let's see what happens if we create an identical finding with different title (and reset status) # expect: NOT marked as duplicate as title is part of hash_code calculation finding_new, finding_4 = self.copy_and_reset_finding(id=4) - finding_new.title = 'the best title' + finding_new.title = "the best title" finding_new.save(dedupe_option=True) self.assert_finding(finding_new, not_pk=24, duplicate=False, not_hash_code=finding_4.hash_code) @@ -197,7 +197,7 @@ def test_identical_except_description_legacy(self): # 24 is already a duplicate of 22, let's see what happens if we create an identical finding with different description (and reset status) # expect: not marked as duplicate as legacy sees description as leading for hash_code finding_new, finding_24 = self.copy_and_reset_finding(id=24) - finding_new.description = 'useless finding' + finding_new.description = "useless finding" finding_new.save(dedupe_option=True) self.assert_finding(finding_new, not_pk=24, duplicate=False, not_hash_code=finding_24.hash_code) @@ -215,7 +215,7 @@ def test_identical_except_filepath_legacy(self): # 24 is already a duplicate of 22, let's see what happens if we create an identical finding with different file_path (and reset status) # expect: not marked as duplicate finding_new, finding_24 = self.copy_and_reset_finding(id=24) - finding_new.file_path = '/dev/null' + finding_new.file_path = "/dev/null" Finding.objects.get(id=22) @@ -461,7 +461,7 @@ def test_identical_except_title_hash_code(self): # 4 is already a duplicate of 2, let's see what happens if we create an identical finding with different title (and reset status) # expect: NOT marked as duplicate as title is part of hash_code calculation finding_new, finding_4 = self.copy_and_reset_finding(id=4) - finding_new.title = 'the best title' + finding_new.title = "the best title" finding_new.save(dedupe_option=True) self.assert_finding(finding_new, not_pk=4, duplicate=False, not_hash_code=finding_4.hash_code) @@ -472,7 +472,7 @@ def test_identical_except_description_hash_code(self): # expect: marked as duplicate finding_new, finding_4 = self.copy_and_reset_finding(id=4) - finding_new.description = 'useless finding' + finding_new.description = "useless finding" finding_new.save(dedupe_option=True) if (settings.DEDUPE_ALGO_ENDPOINT_FIELDS == []): @@ -510,7 +510,7 @@ def test_identical_except_filepath_hash_code(self): # 4 is already a duplicate of 2, let's see what happens if we create an identical finding with different file_path (and reset status) # expect: marked as duplicate finding_new, finding_4 = self.copy_and_reset_finding(id=4) - finding_new.file_path = '/dev/null' + finding_new.file_path = "/dev/null" finding_new.save(dedupe_option=True) if (settings.DEDUPE_ALGO_ENDPOINT_FIELDS == []): @@ -520,7 +520,7 @@ def test_identical_except_filepath_hash_code(self): self.assert_finding(finding_new, not_pk=4, duplicate=False, duplicate_finding_id=None, hash_code=finding_4.hash_code) finding_new, finding_2 = self.copy_with_endpoints_without_dedupe_and_reset_finding(id=2) - finding_new.file_path = '/dev/null' + finding_new.file_path = "/dev/null" finding_new.save(dedupe_option=True) self.assert_finding(finding_new, not_pk=2, duplicate=True, duplicate_finding_id=finding_4.duplicate_finding.id, hash_code=finding_2.hash_code) @@ -691,7 +691,7 @@ def test_identical_unique_id(self): def test_different_unique_id_unique_id(self): # create identical copy finding_new, finding_124 = self.copy_and_reset_finding(id=124) - finding_new.unique_id_from_tool = '9999' + finding_new.unique_id_from_tool = "9999" finding_new.save() # expect not duplicate, but same hash_code @@ -708,10 +708,10 @@ def test_identical_ordering_unique_id(self): def test_title_description_line_filepath_different_unique_id(self): # create identical copy, change some fields finding_new, finding_124 = self.copy_and_reset_finding(id=124) - finding_new.title = 'another title' - finding_new.unsaved_vulnerability_ids = ['CVE-2020-12345'] - finding_new.cwe = '456' - finding_new.description = 'useless finding' + finding_new.title = "another title" + finding_new.unsaved_vulnerability_ids = ["CVE-2020-12345"] + finding_new.cwe = "456" + finding_new.description = "useless finding" finding_new.save() # expect duplicate as we only match on unique id, hash_code also different @@ -720,11 +720,11 @@ def test_title_description_line_filepath_different_unique_id(self): def test_title_description_line_filepath_different_and_id_different_unique_id(self): # create identical copy, change some fields finding_new, finding_124 = self.copy_and_reset_finding(id=124) - finding_new.title = 'another title' - finding_new.unsaved_vulnerability_ids = ['CVE-2020-12345'] - finding_new.cwe = '456' - finding_new.description = 'useless finding' - finding_new.unique_id_from_tool = '9999' + finding_new.title = "another title" + finding_new.unsaved_vulnerability_ids = ["CVE-2020-12345"] + finding_new.cwe = "456" + finding_new.description = "useless finding" + finding_new.unique_id_from_tool = "9999" finding_new.save() # expect not duplicate as we match on unique id, hash_code also different because fields changed @@ -740,10 +740,10 @@ def test_dedupe_not_inside_engagement_unique_id(self): finding_22.test.test_type = finding_124.test.test_type finding_22.test.save() - finding_22.unique_id_from_tool = '888' + finding_22.unique_id_from_tool = "888" finding_22.save(dedupe_option=False) - finding_new.unique_id_from_tool = '888' + finding_new.unique_id_from_tool = "888" finding_new.save() # expect not duplicate as dedupe_inside_engagement is True @@ -774,10 +774,10 @@ def test_dedupe_inside_engagement_unique_id2(self): finding_22.test.test_type = finding_124.test.test_type finding_22.test.save() - finding_22.unique_id_from_tool = '888' + finding_22.unique_id_from_tool = "888" finding_22.save(dedupe_option=False) - finding_new.unique_id_from_tool = '888' + finding_new.unique_id_from_tool = "888" finding_new.save() # expect duplicate as dedupe_inside_engagement is false @@ -789,8 +789,8 @@ def test_dedupe_same_id_different_test_type_unique_id(self): # first setup some finding from a different test_Type, but with the same unique_id_from_tool finding_22 = Finding.objects.get(id=22) - finding_22.unique_id_from_tool = '888' - finding_new.unique_id_from_tool = '888' + finding_22.unique_id_from_tool = "888" + finding_new.unique_id_from_tool = "888" # and we need to look in another engagement this time for finding_22 self.set_dedupe_inside_engagement(False) finding_22.save(dedupe_option=False) @@ -836,7 +836,7 @@ def test_identical_unique_id_or_hash_code_bug(self): def test_different_unique_id_unique_id_or_hash_code(self): # create identical copy finding_new, finding_224 = self.copy_and_reset_finding(id=224) - finding_new.unique_id_from_tool = '9999' + finding_new.unique_id_from_tool = "9999" finding_new.save() # expect duplicate, uid mismatch, but same hash_code @@ -844,8 +844,8 @@ def test_different_unique_id_unique_id_or_hash_code(self): # but if we change title and thus hash_code, it should no longer matchs finding_new, finding_224 = self.copy_and_reset_finding(id=224) - finding_new.unique_id_from_tool = '9999' - finding_new.title = 'no no no no no no' + finding_new.unique_id_from_tool = "9999" + finding_new.title = "no no no no no no" finding_new.save() # expect duplicate, uid mismatch, but same hash_code @@ -862,10 +862,10 @@ def test_identical_ordering_unique_id_or_hash_code(self): def test_title_description_line_filepath_different_unique_id_or_hash_code(self): # create identical copy, change some fields finding_new, finding_224 = self.copy_and_reset_finding(id=224) - finding_new.title = 'another title' - finding_new.unsaved_vulnerability_ids = ['CVE-2020-12345'] - finding_new.cwe = '456' - finding_new.description = 'useless finding' + finding_new.title = "another title" + finding_new.unsaved_vulnerability_ids = ["CVE-2020-12345"] + finding_new.cwe = "456" + finding_new.description = "useless finding" finding_new.save() # expect duplicate as we only match on unique id, hash_code also different @@ -874,11 +874,11 @@ def test_title_description_line_filepath_different_unique_id_or_hash_code(self): def test_title_description_line_filepath_different_and_id_different_unique_id_or_hash_code(self): # create identical copy, change some fields finding_new, finding_224 = self.copy_and_reset_finding(id=224) - finding_new.title = 'another title' - finding_new.unsaved_vulnerability_ids = ['CVE-2020-12345'] - finding_new.cwe = '456' - finding_new.description = 'useless finding' - finding_new.unique_id_from_tool = '9999' + finding_new.title = "another title" + finding_new.unsaved_vulnerability_ids = ["CVE-2020-12345"] + finding_new.cwe = "456" + finding_new.description = "useless finding" + finding_new.unique_id_from_tool = "9999" finding_new.save() # expect not duplicate as we match on unique id, hash_code also different because fields changed @@ -894,10 +894,10 @@ def test_dedupe_not_inside_engagement_same_hash_unique_id_or_hash_code(self): finding_22.test.test_type = finding_224.test.test_type finding_22.test.save() - finding_22.unique_id_from_tool = '888' + finding_22.unique_id_from_tool = "888" finding_22.save(dedupe_option=False) - finding_new.unique_id_from_tool = '888' + finding_new.unique_id_from_tool = "888" finding_new.save() # should become duplicate of finding 22 because of the uid match, but existing BUG makes it duplicate of 224 due to hashcode match @@ -913,11 +913,11 @@ def test_dedupe_not_inside_engagement_same_hash_unique_id_or_hash_code2(self): finding_22.test.test_type = finding_224.test.test_type finding_22.test.save() - finding_22.unique_id_from_tool = '333' + finding_22.unique_id_from_tool = "333" finding_22.save(dedupe_option=False) finding_new.hash_code = finding_22.hash_code # sneaky copy of hash_code to be able to test this case icm with the bug in previous test case above - finding_new.unique_id_from_tool = '333' + finding_new.unique_id_from_tool = "333" finding_new.save() # expect not duplicate as dedupe_inside_engagement is True and 22 is in another engagement @@ -947,11 +947,11 @@ def test_dedupe_inside_engagement_unique_id_or_hash_code2(self): finding_22.test.scan_type = finding_224.test.scan_type finding_22.test.save() - finding_22.unique_id_from_tool = '888' + finding_22.unique_id_from_tool = "888" finding_22.save(dedupe_option=False) - finding_new.unique_id_from_tool = '888' - finding_new.title = 'hack to work around bug that matches on hash_code first' # arrange different hash_code + finding_new.unique_id_from_tool = "888" + finding_new.title = "hack to work around bug that matches on hash_code first" # arrange different hash_code finding_new.save() # expect duplicate as dedupe_inside_engagement is false @@ -963,12 +963,12 @@ def test_dedupe_same_id_different_test_type_unique_id_or_hash_code(self): # first setup some finding from a different test_Type, but with the same unique_id_from_tool finding_22 = Finding.objects.get(id=22) - finding_22.unique_id_from_tool = '888' - finding_new.unique_id_from_tool = '888' + finding_22.unique_id_from_tool = "888" + finding_new.unique_id_from_tool = "888" # and we need to look in another engagement this time for finding_22 self.set_dedupe_inside_engagement(False) finding_22.save(dedupe_option=False) - finding_new.title = 'title to change hash_code' + finding_new.title = "title to change hash_code" finding_new.save() # expect not duplicate as the mathcing finding is from another test_type, hash_code is also different @@ -979,8 +979,8 @@ def test_dedupe_same_id_different_test_type_unique_id_or_hash_code(self): # first setup some finding from a different test_Type, but with the same unique_id_from_tool finding_22 = Finding.objects.get(id=22) - finding_22.unique_id_from_tool = '888' - finding_new.unique_id_from_tool = '888' + finding_22.unique_id_from_tool = "888" + finding_new.unique_id_from_tool = "888" # and we need to look in another engagement this time for finding_22 self.set_dedupe_inside_engagement(False) finding_22.save(dedupe_option=False) @@ -1050,7 +1050,7 @@ def test_hash_code_onetime(self): self.assertTrue(finding_new.hash_code) # True -> not None hash_code_at_creation = finding_new.hash_code - finding_new.title = 'new_title' + finding_new.title = "new_title" finding_new.unsaved_vulnerability_ids = [999] # both title and cve affect hash_code for ZAP scans, but not here because hash_code was already calculated @@ -1076,7 +1076,7 @@ def test_duplicate_after_modification(self): # we copy a finding but change some important fields so it's no longer a duplicate # expect: not marked as duplicate with dedupe_option-False finding_new, finding_24 = self.copy_and_reset_finding(id=24) - finding_new.title = 'new_title' + finding_new.title = "new_title" finding_new.unsaved_vulnerability_ids = [999] finding_new.save(dedupe_option=True) self.assert_finding(finding_new, not_pk=24, duplicate=False, not_hash_code=None) @@ -1103,9 +1103,9 @@ def test_title_case(self): # ideally we will switch to case-in-sensitive hash_code computation. # this could be a relatively small impact change as saving findings (currently) doesn't recompute the hash_code finding_new, _finding_24 = self.copy_and_reset_finding(id=24) - finding_new.title = 'the quick brown fox jumps over the lazy dog' + finding_new.title = "the quick brown fox jumps over the lazy dog" finding_new.save(dedupe_option=True) - self.assertEqual(finding_new.title, 'The Quick Brown Fox Jumps Over the Lazy Dog') + self.assertEqual(finding_new.title, "The Quick Brown Fox Jumps Over the Lazy Dog") def test_hash_code_without_dedupe(self): # if dedupe is disabled, hash_code should still be calculated @@ -1132,7 +1132,7 @@ def log_product(self, product): if isinstance(product, int): product = Product.objects.get(pk=product) - logger.debug('product %i: %s', product.id, product.name) + logger.debug("product %i: %s", product.id, product.name) for eng in product.engagement_set.all(): self.log_engagement(eng) for test in eng.test_set.all(): @@ -1142,13 +1142,13 @@ def log_engagement(self, eng): if isinstance(eng, int): eng = Engagement.objects.get(pk=eng) - logger.debug('\t' + 'engagement %i: %s (dedupe_inside: %s)', eng.id, eng.name, eng.deduplication_on_engagement) + logger.debug("\t" + "engagement %i: %s (dedupe_inside: %s)", eng.id, eng.name, eng.deduplication_on_engagement) def log_test(self, test): if isinstance(test, int): test = Test.objects.get(pk=test) - logger.debug('\t\t' + 'test %i: %s (algo=%s, dynamic=%s)', test.id, test, test.deduplication_algorithm, test.test_type.dynamic_tool) + logger.debug("\t\t" + "test %i: %s (algo=%s, dynamic=%s)", test.id, test, test.deduplication_algorithm, test.test_type.dynamic_tool) self.log_findings(test.finding_set.all()) def log_all_products(self): @@ -1157,25 +1157,25 @@ def log_all_products(self): def log_findings(self, findings): if not findings: - logger.debug('\t\t' + 'no findings') + logger.debug("\t\t" + "no findings") else: - logger.debug('\t\t' + 'findings:') + logger.debug("\t\t" + "findings:") for finding in findings: - logger.debug(f'\t\t\t{str(finding.id):4.4}' + ': "' + f'{finding.title:20.20}' + '": ' + f'{finding.severity:5.5}' + ': act: ' + f'{str(finding.active):5.5}' - + ': ver: ' + f'{str(finding.verified):5.5}' + ': mit: ' + f'{str(finding.is_mitigated):5.5}' - + ': dup: ' + f'{str(finding.duplicate):5.5}' + ': dup_id: ' - + (f'{str(finding.duplicate_finding.id):4.4}' if finding.duplicate_finding else 'None') + ': hash_code: ' + str(finding.hash_code) - + ': eps: ' + str(finding.endpoints.count()) + ": notes: " + str([n.id for n in finding.notes.all()]) - + ': uid: ' + f'{str(finding.unique_id_from_tool):5.5}' + (' fp' if finding.false_p else ''), + logger.debug(f"\t\t\t{str(finding.id):4.4}" + ': "' + f"{finding.title:20.20}" + '": ' + f"{finding.severity:5.5}" + ": act: " + f"{str(finding.active):5.5}" + + ": ver: " + f"{str(finding.verified):5.5}" + ": mit: " + f"{str(finding.is_mitigated):5.5}" + + ": dup: " + f"{str(finding.duplicate):5.5}" + ": dup_id: " + + (f"{str(finding.duplicate_finding.id):4.4}" if finding.duplicate_finding else "None") + ": hash_code: " + str(finding.hash_code) + + ": eps: " + str(finding.endpoints.count()) + ": notes: " + str([n.id for n in finding.notes.all()]) + + ": uid: " + f"{str(finding.unique_id_from_tool):5.5}" + (" fp" if finding.false_p else ""), ) - logger.debug('\t\tendpoints') + logger.debug("\t\tendpoints") for ep in Endpoint.objects.all(): - logger.debug('\t\t\t' + str(ep.id) + ': ' + str(ep)) + logger.debug("\t\t\t" + str(ep.id) + ": " + str(ep)) - logger.debug('\t\t' + 'endpoint statuses') + logger.debug("\t\t" + "endpoint statuses") for eps in Endpoint_Status.objects.all(): - logger.debug('\t\t\t' + str(eps.id) + ': ' + str(eps)) + logger.debug("\t\t\t" + str(eps.id) + ": " + str(eps)) def log_summary(self, product=None, engagement=None, test=None): if product: @@ -1254,7 +1254,7 @@ def assert_finding(self, finding, not_pk=None, duplicate=False, duplicate_findin self.assertFalse(finding.duplicate_finding) # False -> None if duplicate_finding_id: - logger.debug('asserting that finding %i is a duplicate of %i', finding.id if finding.id is not None else 'None', duplicate_finding_id if duplicate_finding_id is not None else 'None') + logger.debug("asserting that finding %i is a duplicate of %i", finding.id if finding.id is not None else "None", duplicate_finding_id if duplicate_finding_id is not None else "None") self.assertTrue(finding.duplicate_finding) # True -> not None self.assertEqual(finding.duplicate_finding.id, duplicate_finding_id) @@ -1263,7 +1263,7 @@ def assert_finding(self, finding, not_pk=None, duplicate=False, duplicate_findin def set_dedupe_inside_engagement(self, deduplication_on_engagement): for eng in Engagement.objects.all(): - logger.debug('setting deduplication_on_engagment to %s for %i', str(deduplication_on_engagement), eng.id) + logger.debug("setting deduplication_on_engagment to %s for %i", str(deduplication_on_engagement), eng.id) eng.deduplication_on_engagement = deduplication_on_engagement eng.save() diff --git a/unittests/test_duplication_loops.py b/unittests/test_duplication_loops.py index e7a4d843bc..6d97524ff4 100644 --- a/unittests/test_duplication_loops.py +++ b/unittests/test_duplication_loops.py @@ -13,10 +13,10 @@ class TestDuplicationLoops(DojoTestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def run(self, result=None): - testuser = User.objects.get(username='admin') + testuser = User.objects.get(username="admin") testuser.usercontactinfo.block_execution = True testuser.save() @@ -29,21 +29,21 @@ def run(self, result=None): def setUp(self): self.finding_a = Finding.objects.get(id=2) self.finding_a.pk = None - self.finding_a.title = 'A: ' + self.finding_a.title + self.finding_a.title = "A: " + self.finding_a.title self.finding_a.duplicate = False self.finding_a.duplicate_finding = None self.finding_a.hash_code = None self.finding_a.save() self.finding_b = Finding.objects.get(id=3) self.finding_b.pk = None - self.finding_b.title = 'B: ' + self.finding_b.title + self.finding_b.title = "B: " + self.finding_b.title self.finding_b.duplicate = False self.finding_b.duplicate_finding = None self.finding_b.hash_code = None self.finding_b.save() self.finding_c = Finding.objects.get(id=4) self.finding_c.pk = None - self.finding_c.title = 'C: ' + self.finding_c.title + self.finding_c.title = "C: " + self.finding_c.title self.finding_c.duplicate = False self.finding_c.duplicate_finding = None self.finding_c.hash_code = None @@ -121,9 +121,9 @@ def test_set_duplicate_exception_delete_a_duplicate(self): def test_set_duplicate_exception_delete_original_cascade(self): set_duplicate(self.finding_a, self.finding_b) self.assertEqual(self.finding_b.original_finding.first().id, self.finding_a.id) - logger.debug('going to delete finding B') + logger.debug("going to delete finding B") self.finding_b.delete() - logger.debug('deleted finding B') + logger.debug("deleted finding B") with self.assertRaises(Finding.DoesNotExist): self.finding_a = Finding.objects.get(id=self.finding_a.id) self.assertEqual(self.finding_b.id, None) @@ -134,11 +134,11 @@ def test_set_duplicate_exception_delete_original_duplicates_adapt(self): set_duplicate(self.finding_a, self.finding_b) set_duplicate(self.finding_c, self.finding_b) self.assertEqual(self.finding_b.original_finding.first().id, self.finding_a.id) - logger.debug('going to delete finding B') + logger.debug("going to delete finding B") b_active = self.finding_b.active b_id = self.finding_b.id self.finding_b.delete() - logger.debug('deleted finding B') + logger.debug("deleted finding B") self.finding_a.refresh_from_db() self.finding_c.refresh_from_db() self.assertEqual(self.finding_a.original_finding.first(), self.finding_c) @@ -159,11 +159,11 @@ def test_set_duplicate_exception_delete_original_duplicates_adapt(self): def test_set_duplicate_exception_delete_original_1_duplicate_adapt(self): set_duplicate(self.finding_a, self.finding_b) self.assertEqual(self.finding_b.original_finding.first().id, self.finding_a.id) - logger.debug('going to delete finding B') + logger.debug("going to delete finding B") b_active = self.finding_b.active b_id = self.finding_b.id self.finding_b.delete() - logger.debug('deleted finding B') + logger.debug("deleted finding B") self.finding_a.refresh_from_db() self.assertEqual(self.finding_a.original_finding.first(), None) self.assertEqual(self.finding_a.duplicate_finding, None) @@ -380,10 +380,10 @@ def test_list_relations_for_three_reverse(self): def test_delete_all_engagements(self): # make sure there is no exception when deleting all engagements - for engagement in Engagement.objects.all().order_by('id'): + for engagement in Engagement.objects.all().order_by("id"): engagement.delete() def test_delete_all_products(self): # make sure there is no exception when deleting all engagements - for product in Product.objects.all().order_by('id'): + for product in Product.objects.all().order_by("id"): product.delete() diff --git a/unittests/test_endpoint_meta_import.py b/unittests/test_endpoint_meta_import.py index a639999f16..6efa1d8a7a 100644 --- a/unittests/test_endpoint_meta_import.py +++ b/unittests/test_endpoint_meta_import.py @@ -16,12 +16,12 @@ # test methods to be used both by API Test and UI Test class EndpointMetaImportMixin: def __init__(self, *args, **kwargs): - self.meta_import_full = 'endpoint_meta_import/full_endpoint_meta_import.csv' - self.meta_import_no_hostname = 'endpoint_meta_import/no_hostname_endpoint_meta_import.csv' - self.meta_import_updated_added = 'endpoint_meta_import/updated_added_endpoint_meta_import.csv' - self.meta_import_updated_removed = 'endpoint_meta_import/updated_removed_endpoint_meta_import.csv' - self.meta_import_updated_changed = 'endpoint_meta_import/updated_changed_endpoint_meta_import.csv' - self.updated_tag_host = 'feedback.internal.google.com' + self.meta_import_full = "endpoint_meta_import/full_endpoint_meta_import.csv" + self.meta_import_no_hostname = "endpoint_meta_import/no_hostname_endpoint_meta_import.csv" + self.meta_import_updated_added = "endpoint_meta_import/updated_added_endpoint_meta_import.csv" + self.meta_import_updated_removed = "endpoint_meta_import/updated_removed_endpoint_meta_import.csv" + self.meta_import_updated_changed = "endpoint_meta_import/updated_changed_endpoint_meta_import.csv" + self.updated_tag_host = "feedback.internal.google.com" def test_endpoint_meta_import_endpoint_create_tag_create_meta_create(self): endpoint_count_before = self.db_endpoint_count() @@ -83,8 +83,8 @@ def test_endpoint_meta_import_tag_changed_column(self): endpoint_count_before = self.db_endpoint_count() endpoint_tag_count_before = self.db_endpoint_tag_count() # Grab the endpoint that is known to change - endpoint = self.get_product_endpoints_api(1, host=self.updated_tag_host)['results'][0] - human_resource_tag = endpoint['tags'][endpoint['tags'].index('team:human resources')] + endpoint = self.get_product_endpoints_api(1, host=self.updated_tag_host)["results"][0] + human_resource_tag = endpoint["tags"][endpoint["tags"].index("team:human resources")] # Import again with one column missing with assertImportModelsCreated(self, tests=0, engagements=0, products=0, endpoints=0): self.endpoint_meta_import_scan_with_params( @@ -93,8 +93,8 @@ def test_endpoint_meta_import_tag_changed_column(self): self.assertEqual(endpoint_count_before, self.db_endpoint_count()) self.assertEqual(endpoint_tag_count_before, self.db_endpoint_tag_count()) # Grab the updated endpoint - endpoint = self.get_product_endpoints_api(1, host=self.updated_tag_host)['results'][0] - human_resource_tag_updated = endpoint['tags'][endpoint['tags'].index('team:hr')] + endpoint = self.get_product_endpoints_api(1, host=self.updated_tag_host)["results"][0] + human_resource_tag_updated = endpoint["tags"][endpoint["tags"].index("team:hr")] # Make sure the tags are not the same self.assertNotEqual(human_resource_tag, human_resource_tag_updated) @@ -139,8 +139,8 @@ def test_endpoint_meta_import_meta_changed_column(self): endpoint_count_before = self.db_endpoint_count() meta_count_before = self.db_dojo_meta_count() # Grab the endpoint that is known to change - endpoint_id = self.get_product_endpoints_api(1, host=self.updated_tag_host)['results'][0]['id'] - meta_value = self.get_endpoints_meta_api(endpoint_id, 'team')['results'][0]['value'] + endpoint_id = self.get_product_endpoints_api(1, host=self.updated_tag_host)["results"][0]["id"] + meta_value = self.get_endpoints_meta_api(endpoint_id, "team")["results"][0]["value"] # Import again with one column missing with assertImportModelsCreated(self, tests=0, engagements=0, products=0, endpoints=0): self.endpoint_meta_import_scan_with_params( @@ -149,14 +149,14 @@ def test_endpoint_meta_import_meta_changed_column(self): self.assertEqual(endpoint_count_before, self.db_endpoint_count()) self.assertEqual(meta_count_before, self.db_dojo_meta_count()) # Grab the updated endpoint - endpoint_id = self.get_product_endpoints_api(1, host=self.updated_tag_host)['results'][0]['id'] - meta_value_updated = self.get_endpoints_meta_api(endpoint_id, 'team')['results'][0]['value'] + endpoint_id = self.get_product_endpoints_api(1, host=self.updated_tag_host)["results"][0]["id"] + meta_value_updated = self.get_endpoints_meta_api(endpoint_id, "team")["results"][0]["value"] # Make sure the tags are not the same self.assertNotEqual(meta_value, meta_value_updated) class EndpointMetaImportTestAPI(DojoAPITestCase, EndpointMetaImportMixin): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): # TODO remove __init__ if it does nothing... @@ -166,15 +166,15 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def setUp(self): - testuser = User.objects.get(username='admin') + testuser = User.objects.get(username="admin") token = Token.objects.get(user=testuser) self.client = APIClient() - self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) + self.client.credentials(HTTP_AUTHORIZATION="Token " + token.key) # self.url = reverse(self.viewname + '-list') class EndpointMetaImportTestUI(DojoAPITestCase, EndpointMetaImportMixin): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] client_ui = Client() def __init__(self, *args, **kwargs): @@ -186,10 +186,10 @@ def __init__(self, *args, **kwargs): def setUp(self): # still using the API to verify results - testuser = User.objects.get(username='admin') + testuser = User.objects.get(username="admin") token = Token.objects.get(user=testuser) self.client = APIClient() - self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) + self.client.credentials(HTTP_AUTHORIZATION="Token " + token.key) # self.url = reverse(self.viewname + '-list') self.client_ui = Client() @@ -200,13 +200,13 @@ def endpoint_meta_import_scan_with_params(self, *args, **kwargs): return self.endpoint_meta_import_scan_with_params_ui(*args, **kwargs) def endpoint_meta_import_ui(self, product, payload): - logger.debug('import_scan payload %s', payload) - response = self.client_ui.post(reverse('import_endpoint_meta', args=(product, )), payload) + logger.debug("import_scan payload %s", payload) + response = self.client_ui.post(reverse("import_endpoint_meta", args=(product, )), payload) self.assertEqual(302, response.status_code, response.content[:1000]) def endpoint_meta_import_scan_with_params_ui(self, filename, product=1, create_endpoints=True, create_tags=True, create_dojo_meta=True, expected_http_status_code=201): - with open(get_unit_tests_path() + '/' + filename) as testfile: + with open(get_unit_tests_path() + "/" + filename) as testfile: payload = { "create_endpoints": create_endpoints, "create_tags": create_tags, diff --git a/unittests/test_endpoint_model.py b/unittests/test_endpoint_model.py index 69694680df..0a219b48f8 100644 --- a/unittests/test_endpoint_model.py +++ b/unittests/test_endpoint_model.py @@ -25,29 +25,29 @@ def test_empty(self): self.assertIsNone(endpoint.product) def test_url_full(self): - endpoint = Endpoint.from_uri('http://alice@foo.bar:8080/path1/path2?key1=value&no_value_key#fragment1') - self.assertEqual(endpoint.protocol, 'http') - self.assertEqual(endpoint.userinfo, 'alice') - self.assertEqual(endpoint.host, 'foo.bar') + endpoint = Endpoint.from_uri("http://alice@foo.bar:8080/path1/path2?key1=value&no_value_key#fragment1") + self.assertEqual(endpoint.protocol, "http") + self.assertEqual(endpoint.userinfo, "alice") + self.assertEqual(endpoint.host, "foo.bar") self.assertEqual(endpoint.port, 8080) - self.assertEqual(endpoint.path, 'path1/path2') # path begins with '/' but Endpoint store "root-less" path - self.assertEqual(endpoint.query, 'key1=value&no_value_key') - self.assertEqual(endpoint.fragment, 'fragment1') + self.assertEqual(endpoint.path, "path1/path2") # path begins with '/' but Endpoint store "root-less" path + self.assertEqual(endpoint.query, "key1=value&no_value_key") + self.assertEqual(endpoint.fragment, "fragment1") def test_truncates_large_attributes(self): path = "foo" * 1000 query = "bar" * 1000 fragment = "baz" * 1000 - endpoint = Endpoint.from_uri(f'http://alice@foo.bar:8080/{path}?{query}#{fragment}') + endpoint = Endpoint.from_uri(f"http://alice@foo.bar:8080/{path}?{query}#{fragment}") self.assertEqual(len(endpoint.path), 500) self.assertEqual(len(endpoint.query), 1000) self.assertEqual(len(endpoint.fragment), 500) def test_noscheme(self): - endpoint = Endpoint.from_uri('//' + 'localhost:22') + endpoint = Endpoint.from_uri("//" + "localhost:22") self.assertIsNone(endpoint.protocol) self.assertIsNone(endpoint.userinfo) - self.assertEqual(endpoint.host, 'localhost') + self.assertEqual(endpoint.host, "localhost") self.assertEqual(endpoint.port, 22) self.assertIsNone(endpoint.path) self.assertIsNone(endpoint.query) @@ -55,103 +55,103 @@ def test_noscheme(self): self.assertIsNone(endpoint.product) def test_paths(self): - endpoint = Endpoint.from_uri('https://foo.bar') + endpoint = Endpoint.from_uri("https://foo.bar") self.assertIsNone(endpoint.path) - endpoint = Endpoint.from_uri('https://foo.bar/') + endpoint = Endpoint.from_uri("https://foo.bar/") self.assertIsNone(endpoint.path) def test_ip(self): - endpoint = Endpoint.from_uri('http://127.0.0.1/') - self.assertEqual(endpoint.host, '127.0.0.1') - endpoint = Endpoint(host='127.0.0.1') - self.assertEqual(endpoint.host, '127.0.0.1') + endpoint = Endpoint.from_uri("http://127.0.0.1/") + self.assertEqual(endpoint.host, "127.0.0.1") + endpoint = Endpoint(host="127.0.0.1") + self.assertEqual(endpoint.host, "127.0.0.1") def test_less_standard_hosts(self): - endpoint = Endpoint.from_uri('http://123_server/') + endpoint = Endpoint.from_uri("http://123_server/") endpoint.clean() - endpoint = Endpoint(host='456_desktop') + endpoint = Endpoint(host="456_desktop") endpoint.clean() - endpoint = Endpoint(host='_invalid._host.com') + endpoint = Endpoint(host="_invalid._host.com") endpoint.clean() def test_invalid(self): - self.assertRaises(ValidationError, Endpoint.from_uri, 'http://127.0.0.1:portNo/') - endpoint = Endpoint.from_uri('http://127.0.0.1:-1/') + self.assertRaises(ValidationError, Endpoint.from_uri, "http://127.0.0.1:portNo/") + endpoint = Endpoint.from_uri("http://127.0.0.1:-1/") self.assertRaises(ValidationError, endpoint.clean) - endpoint = Endpoint.from_uri('http://127.0.0.1:66666/') + endpoint = Endpoint.from_uri("http://127.0.0.1:66666/") self.assertRaises(ValidationError, endpoint.clean) - endpoint = Endpoint(host='127.0.0.1', port=-1) + endpoint = Endpoint(host="127.0.0.1", port=-1) self.assertRaises(ValidationError, endpoint.clean) - endpoint = Endpoint(host='127.0.0.1', port=66666) + endpoint = Endpoint(host="127.0.0.1", port=66666) self.assertRaises(ValidationError, endpoint.clean) def test_ports(self): # known port - endpoint = Endpoint.from_uri('http://foo.bar/') + endpoint = Endpoint.from_uri("http://foo.bar/") self.assertEqual(endpoint.port, 80) # unknown port - endpoint = Endpoint.from_uri('this-scheme-is-unknown://foo.bar/') + endpoint = Endpoint.from_uri("this-scheme-is-unknown://foo.bar/") self.assertIsNone(endpoint.port) def test_spacial_char(self): - endpoint = Endpoint.from_uri('http://foo.bar/beforeSpace%20afterSpace') - self.assertEqual(endpoint.path, 'beforeSpace afterSpace') - self.assertEqual(str(endpoint), 'http://foo.bar/beforeSpace%20afterSpace') - endpoint = Endpoint.from_uri('//' + 'foo.bar/beforeSpace%20afterSpace') - self.assertEqual(endpoint.path, 'beforeSpace afterSpace') - self.assertEqual(str(endpoint), 'foo.bar/beforeSpace%20afterSpace') + endpoint = Endpoint.from_uri("http://foo.bar/beforeSpace%20afterSpace") + self.assertEqual(endpoint.path, "beforeSpace afterSpace") + self.assertEqual(str(endpoint), "http://foo.bar/beforeSpace%20afterSpace") + endpoint = Endpoint.from_uri("//" + "foo.bar/beforeSpace%20afterSpace") + self.assertEqual(endpoint.path, "beforeSpace afterSpace") + self.assertEqual(str(endpoint), "foo.bar/beforeSpace%20afterSpace") def test_url_normalize(self): - endpoint1 = Endpoint.from_uri('HTTP://FOO.BAR/') - endpoint2 = Endpoint.from_uri('HtTp://foo.BAR/') - self.assertEqual(endpoint1.protocol, 'HTTP') - self.assertEqual(endpoint1.host, 'foo.bar') - self.assertEqual(str(endpoint1), 'http://foo.bar') + endpoint1 = Endpoint.from_uri("HTTP://FOO.BAR/") + endpoint2 = Endpoint.from_uri("HtTp://foo.BAR/") + self.assertEqual(endpoint1.protocol, "HTTP") + self.assertEqual(endpoint1.host, "foo.bar") + self.assertEqual(str(endpoint1), "http://foo.bar") self.assertEqual(endpoint1, endpoint2) def test_get_or_create(self): _endpoint1, created1 = endpoint_get_or_create( - protocol='http', - host='bar.foo', + protocol="http", + host="bar.foo", ) self.assertTrue(created1) _endpoint2, created2 = endpoint_get_or_create( - protocol='http', - host='bar.foo', + protocol="http", + host="bar.foo", ) self.assertFalse(created2) _endpoint3, created3 = endpoint_get_or_create( - protocol='http', - host='bar.foo', + protocol="http", + host="bar.foo", port=80, ) self.assertFalse(created3) _endpoint4, created4 = endpoint_get_or_create( - protocol='http', - host='bar.foo', + protocol="http", + host="bar.foo", port=8080, ) self.assertTrue(created4) _endpoint5, created5 = endpoint_get_or_create( - protocol='https', - host='bar.foo', + protocol="https", + host="bar.foo", port=443, ) self.assertTrue(created5) _endpoint6, created6 = endpoint_get_or_create( - protocol='https', - host='bar.foo', + protocol="https", + host="bar.foo", ) self.assertFalse(created6) _endpoint7, created7 = endpoint_get_or_create( - protocol='https', - host='bar.foo', + protocol="https", + host="bar.foo", port=8443, ) self.assertTrue(created7) @@ -224,25 +224,25 @@ def test_endpoint_status_broken(self): from django.contrib.auth import get_user_model user = get_user_model().objects.create().pk self.finding = Finding.objects.create(test=self.test, reporter_id=user).pk - self.endpoint = Endpoint.objects.create(protocol='http', host='foo.bar.eps').pk + self.endpoint = Endpoint.objects.create(protocol="http", host="foo.bar.eps").pk self.another_finding = Finding.objects.create(test=self.test, reporter_id=user).pk - self.another_endpoint = Endpoint.objects.create(protocol='http', host='bar.foo.eps').pk + self.another_endpoint = Endpoint.objects.create(protocol="http", host="bar.foo.eps").pk self.endpoint_status = { - 'standard': Endpoint_Status.objects.create( + "standard": Endpoint_Status.objects.create( date=datetime.datetime(2021, 3, 1, tzinfo=timezone.utc), last_modified=datetime.datetime(2021, 4, 1, tzinfo=timezone.utc), mitigated=False, finding_id=self.finding, endpoint_id=self.endpoint, ).pk, - 'removed_endpoint': Endpoint_Status.objects.create( + "removed_endpoint": Endpoint_Status.objects.create( date=datetime.datetime(2021, 2, 1, tzinfo=timezone.utc), last_modified=datetime.datetime(2021, 5, 1, tzinfo=timezone.utc), mitigated=True, finding_id=self.another_finding, endpoint_id=None, ).pk, - 'removed_finding': Endpoint_Status.objects.create( + "removed_finding": Endpoint_Status.objects.create( date=datetime.datetime(2021, 2, 1, tzinfo=timezone.utc), last_modified=datetime.datetime(2021, 5, 1, tzinfo=timezone.utc), mitigated=True, @@ -252,42 +252,42 @@ def test_endpoint_status_broken(self): } Finding.objects.get(id=self.finding).endpoint_status.add( - Endpoint_Status.objects.get(id=self.endpoint_status['standard']), + Endpoint_Status.objects.get(id=self.endpoint_status["standard"]), ) Finding.objects.get(id=self.another_finding).endpoint_status.add( - Endpoint_Status.objects.get(id=self.endpoint_status['removed_endpoint']), + Endpoint_Status.objects.get(id=self.endpoint_status["removed_endpoint"]), ) Endpoint.objects.get(id=self.endpoint).endpoint_status.add( - Endpoint_Status.objects.get(id=self.endpoint_status['standard']), + Endpoint_Status.objects.get(id=self.endpoint_status["standard"]), ) Endpoint.objects.get(id=self.another_endpoint).endpoint_status.add( - Endpoint_Status.objects.get(id=self.endpoint_status['removed_finding']), + Endpoint_Status.objects.get(id=self.endpoint_status["removed_finding"]), ) remove_broken_endpoint_statuses(apps) - with self.subTest('Stadnard eps for finding'): + with self.subTest("Stadnard eps for finding"): f = Finding.objects.filter(id=self.finding) self.assertEqual(f.count(), 1) f = f.first() self.assertEqual(f.endpoint_status.count(), 1) - self.assertEqual(f.endpoint_status.first().pk, self.endpoint_status['standard']) + self.assertEqual(f.endpoint_status.first().pk, self.endpoint_status["standard"]) - with self.subTest('Broken eps for finding'): + with self.subTest("Broken eps for finding"): f = Finding.objects.filter(id=self.another_finding) self.assertEqual(f.count(), 1) f = f.first() self.assertEqual(f.endpoint_status.count(), 0) - with self.subTest('Stadnard eps for endpoint'): + with self.subTest("Stadnard eps for endpoint"): e = Endpoint.objects.filter(id=self.endpoint) self.assertEqual(e.count(), 1) e = e.first() self.assertEqual(e.endpoint_status.count(), 1) - self.assertEqual(e.endpoint_status.first().pk, self.endpoint_status['standard']) + self.assertEqual(e.endpoint_status.first().pk, self.endpoint_status["standard"]) - with self.subTest('Broken eps for endpoint'): + with self.subTest("Broken eps for endpoint"): e = Endpoint.objects.filter(id=self.another_endpoint) self.assertEqual(e.count(), 1) e = e.first() @@ -295,7 +295,7 @@ def test_endpoint_status_broken(self): class TestEndpointStatusModel(DojoTestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def test_str(self): eps = Endpoint_Status.objects.get(id=1) @@ -321,19 +321,19 @@ def test_status_evaluation(self): ep4 = Endpoint.objects.get(id=7) ep5 = Endpoint.objects.get(id=8) - with self.subTest('Endpoint without statuses'): + with self.subTest("Endpoint without statuses"): self.assertEqual(ep1.findings_count, 0, ep1.findings.all()) self.assertEqual(ep1.active_findings_count, 0, ep1.active_findings) self.assertFalse(ep1.vulnerable, ep1.active_findings_count) self.assertTrue(ep1.mitigated, ep1.active_findings_count) - with self.subTest('Endpoint with vulnerabilities but all of them are mitigated because of different reasons'): + with self.subTest("Endpoint with vulnerabilities but all of them are mitigated because of different reasons"): self.assertEqual(ep2.findings_count, 4, ep2.findings.all()) self.assertEqual(ep2.active_findings_count, 1, ep2.active_findings) self.assertFalse(ep2.vulnerable, ep2.active_findings_count) self.assertTrue(ep2.mitigated, ep2.active_findings_count) - with self.subTest('Host without vulnerabilities'): + with self.subTest("Host without vulnerabilities"): self.assertEqual(ep1.host_endpoints_count, 2, ep1.host_endpoints) self.assertEqual(ep2.host_endpoints_count, 2, ep2.host_endpoints) self.assertEqual(ep1.host_findings_count, 4, ep1.host_findings) @@ -343,25 +343,25 @@ def test_status_evaluation(self): self.assertEqual(ep1.host_mitigated_endpoints_count, 1, ep1.host_mitigated_endpoints) self.assertEqual(ep2.host_mitigated_endpoints_count, 1, ep2.host_mitigated_endpoints) - with self.subTest('Endpoint with one vulnerabilitiy but EPS is mitigated'): + with self.subTest("Endpoint with one vulnerabilitiy but EPS is mitigated"): self.assertEqual(ep3.findings_count, 1, ep3.findings.all()) self.assertEqual(ep3.active_findings_count, 1, ep3.active_findings) self.assertFalse(ep3.vulnerable, ep3.active_findings_count) self.assertTrue(ep3.mitigated, ep3.active_findings_count) - with self.subTest('Endpoint with one vulnerability'): + with self.subTest("Endpoint with one vulnerability"): self.assertEqual(ep4.findings_count, 1, ep4.findings.all()) self.assertEqual(ep4.active_findings_count, 1, ep4.active_findings) self.assertTrue(ep4.vulnerable, ep4.active_findings_count) self.assertFalse(ep4.mitigated, ep4.active_findings_count) - with self.subTest('Endpoint with one vulnerability but finding is mitigated'): + with self.subTest("Endpoint with one vulnerability but finding is mitigated"): self.assertEqual(ep5.findings_count, 1, ep5.findings.all()) self.assertEqual(ep5.active_findings_count, 0, ep5.active_findings) self.assertTrue(ep5.vulnerable, ep5.active_findings_count) self.assertFalse(ep5.mitigated, ep5.active_findings_count) - with self.subTest('Host with vulnerabilities'): + with self.subTest("Host with vulnerabilities"): self.assertEqual(ep3.host_endpoints_count, 3, ep3.host_endpoints) self.assertEqual(ep4.host_endpoints_count, 3, ep4.host_endpoints) self.assertEqual(ep5.host_endpoints_count, 3, ep5.host_endpoints) diff --git a/unittests/test_false_positive_history_logic.py b/unittests/test_false_positive_history_logic.py index ac949bf5f7..c4d939fbc4 100644 --- a/unittests/test_false_positive_history_logic.py +++ b/unittests/test_false_positive_history_logic.py @@ -111,10 +111,10 @@ class TestFalsePositiveHistoryLogic(DojoTestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def run(self, result=None): - testuser = User.objects.get(username='admin') + testuser = User.objects.get(username="admin") testuser.usercontactinfo.block_execution = True testuser.save() @@ -125,9 +125,9 @@ def run(self, result=None): super().run(result) def setUp(self): - logger.debug('disabling dedupe') + logger.debug("disabling dedupe") self.disable_dedupe() - logger.debug('enabling false positive history') + logger.debug("enabling false positive history") self.enable_false_positive_history() self.enable_retroactive_false_positive_history() self.log_summary() @@ -582,7 +582,7 @@ def test_fp_history_different_unique_id_same_engagement_different_test(self): find_124.save() # Copy finding 124 and store it at Product 2, Engagement 5, Test 66 find_created_after_mark, find_124 = self.copy_and_reset_finding(id=124) - find_created_after_mark.unique_id_from_tool = 'somefakeid123' + find_created_after_mark.unique_id_from_tool = "somefakeid123" find_created_after_mark.test = Test.objects.get(id=66) find_created_after_mark.save() # Assert that both findings belongs to the same engagement but in a different test and are NOT marked as fp @@ -726,7 +726,7 @@ def test_fp_history_different_unique_id_different_product(self): test_new, _eng_new, _product_new = self.create_new_test_and_engagment_and_product_from_finding(find_124) # Copy finding 124 and store it at Product 2, New Engagement, New Test (to test retroactive replication) find_created_before_mark, find_124 = self.copy_and_reset_finding(id=124) - find_created_before_mark.unique_id_from_tool = 'somefakeid123' + find_created_before_mark.unique_id_from_tool = "somefakeid123" find_created_before_mark.test = test_new find_created_before_mark.save() # Makes sure that the copy is not a false positive @@ -736,7 +736,7 @@ def test_fp_history_different_unique_id_different_product(self): find_124.save() # Copy finding 124 and store it at Product 2, New Engagement, New Test find_created_after_mark, find_124 = self.copy_and_reset_finding(id=124) - find_created_after_mark.unique_id_from_tool = 'somefakeid123' + find_created_after_mark.unique_id_from_tool = "somefakeid123" find_created_after_mark.test = test_new find_created_after_mark.save() # Assert that both findings belongs to the same product but in a different engagement and are NOT marked as fp @@ -1649,7 +1649,7 @@ def log_product(self, product): if isinstance(product, int): product = Product.objects.get(pk=product) - logger.debug('product %i: %s', product.id, product.name) + logger.debug("product %i: %s", product.id, product.name) for eng in product.engagement_set.all(): self.log_engagement(eng) for test in eng.test_set.all(): @@ -1659,13 +1659,13 @@ def log_engagement(self, eng): if isinstance(eng, int): eng = Engagement.objects.get(pk=eng) - logger.debug('\t' + 'engagement %i: %s (dedupe_inside: %s)', eng.id, eng.name, eng.deduplication_on_engagement) + logger.debug("\t" + "engagement %i: %s (dedupe_inside: %s)", eng.id, eng.name, eng.deduplication_on_engagement) def log_test(self, test): if isinstance(test, int): test = Test.objects.get(pk=test) - logger.debug('\t\t' + 'test %i: %s (algo=%s, dynamic=%s)', test.id, test, test.deduplication_algorithm, test.test_type.dynamic_tool) + logger.debug("\t\t" + "test %i: %s (algo=%s, dynamic=%s)", test.id, test, test.deduplication_algorithm, test.test_type.dynamic_tool) self.log_findings(test.finding_set.all()) def log_all_products(self): @@ -1674,25 +1674,25 @@ def log_all_products(self): def log_findings(self, findings): if not findings: - logger.debug('\t\t' + 'no findings') + logger.debug("\t\t" + "no findings") else: - logger.debug('\t\t' + 'findings:') + logger.debug("\t\t" + "findings:") for finding in findings: - logger.debug(f'\t\t\t{str(finding.id):4.4}' + ': "' + f'{finding.title:20.20}' + '": ' + f'{finding.severity:5.5}' + ': act: ' + f'{str(finding.active):5.5}' - + ': ver: ' + f'{str(finding.verified):5.5}' + ': mit: ' + f'{str(finding.is_mitigated):5.5}' - + ': dup: ' + f'{str(finding.duplicate):5.5}' + ': dup_id: ' - + (f'{str(finding.duplicate_finding.id):4.4}' if finding.duplicate_finding else 'None') + ': hash_code: ' + str(finding.hash_code) - + ': eps: ' + str(finding.endpoints.count()) + ": notes: " + str([n.id for n in finding.notes.all()]) - + ': uid: ' + f'{str(finding.unique_id_from_tool):5.5}' + (' fp' if finding.false_p else ''), + logger.debug(f"\t\t\t{str(finding.id):4.4}" + ': "' + f"{finding.title:20.20}" + '": ' + f"{finding.severity:5.5}" + ": act: " + f"{str(finding.active):5.5}" + + ": ver: " + f"{str(finding.verified):5.5}" + ": mit: " + f"{str(finding.is_mitigated):5.5}" + + ": dup: " + f"{str(finding.duplicate):5.5}" + ": dup_id: " + + (f"{str(finding.duplicate_finding.id):4.4}" if finding.duplicate_finding else "None") + ": hash_code: " + str(finding.hash_code) + + ": eps: " + str(finding.endpoints.count()) + ": notes: " + str([n.id for n in finding.notes.all()]) + + ": uid: " + f"{str(finding.unique_id_from_tool):5.5}" + (" fp" if finding.false_p else ""), ) - logger.debug('\t\tendpoints') + logger.debug("\t\tendpoints") for ep in Endpoint.objects.all(): - logger.debug('\t\t\t' + str(ep.id) + ': ' + str(ep)) + logger.debug("\t\t\t" + str(ep.id) + ": " + str(ep)) - logger.debug('\t\t' + 'endpoint statuses') + logger.debug("\t\t" + "endpoint statuses") for eps in Endpoint_Status.objects.all(): - logger.debug('\t\t\t' + str(eps.id) + ': ' + str(eps)) + logger.debug("\t\t\t" + str(eps.id) + ": " + str(eps)) def log_summary(self, product=None, engagement=None, test=None): if product: @@ -1737,7 +1737,7 @@ def copy_and_reset_product(self, id): org = Product.objects.get(id=id) new = org new.pk = None - new.name = f'{org.name} (Copy {datetime.now()})' + new.name = f"{org.name} (Copy {datetime.now()})" # return unsaved new product and reloaded existing product return new, Product.objects.get(id=id) @@ -1749,12 +1749,12 @@ def change_finding_unique_id(self, finding): return finding def change_finding_title(self, finding): - finding.title = f'{finding.title} (Copy {datetime.now()})' + finding.title = f"{finding.title} (Copy {datetime.now()})" return finding def change_finding_severity(self, finding): # Get list of severities without the current finding severity - severities = [sev for sev in ['Info', 'Low', 'Medium', 'High', 'Critical'] if sev != finding.severity] + severities = [sev for sev in ["Info", "Low", "Medium", "High", "Critical"] if sev != finding.severity] # Return the finding with the highest severity from list finding.severity = severities[-1] return finding @@ -1819,7 +1819,7 @@ def assert_finding(self, finding, false_p, duplicate=None, not_pk=None, def set_dedupe_inside_engagement(self, deduplication_on_engagement): for eng in Engagement.objects.all(): - logger.debug('setting deduplication_on_engagment to %s for %i', str(deduplication_on_engagement), eng.id) + logger.debug("setting deduplication_on_engagment to %s for %i", str(deduplication_on_engagement), eng.id) eng.deduplication_on_engagement = deduplication_on_engagement eng.save() diff --git a/unittests/test_finding_helper.py b/unittests/test_finding_helper.py index 1ef97136b5..9ae8420f7f 100644 --- a/unittests/test_finding_helper.py +++ b/unittests/test_finding_helper.py @@ -20,17 +20,17 @@ class TestUpdateFindingStatusSignal(DojoTestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def setUp(self): - self.user_1 = User.objects.get(id='1') - self.user_2 = User.objects.get(id='2') + self.user_1 = User.objects.get(id="1") + self.user_2 = User.objects.get(id="2") def get_status_fields(self, finding): - logger.debug('%s, %s, %s, %s, %s, %s, %s, %s', finding.active, finding.verified, finding.false_p, finding.out_of_scope, finding.is_mitigated, finding.mitigated, finding.mitigated_by, finding.last_status_update) + logger.debug("%s, %s, %s, %s, %s, %s, %s, %s", finding.active, finding.verified, finding.false_p, finding.out_of_scope, finding.is_mitigated, finding.mitigated, finding.mitigated_by, finding.last_status_update) return finding.active, finding.verified, finding.false_p, finding.out_of_scope, finding.is_mitigated, finding.mitigated, finding.mitigated_by, finding.last_status_update - @mock.patch('dojo.finding.helper.timezone.now') + @mock.patch("dojo.finding.helper.timezone.now") def test_new_finding(self, mock_tz): mock_tz.return_value = frozen_datetime with impersonate(self.user_1): @@ -43,7 +43,7 @@ def test_new_finding(self, mock_tz): (True, False, False, False, False, None, None, frozen_datetime), ) - @mock.patch('dojo.finding.helper.timezone.now') + @mock.patch("dojo.finding.helper.timezone.now") def test_no_status_change(self, mock_tz): mock_tz.return_value = frozen_datetime with impersonate(self.user_1): @@ -53,7 +53,7 @@ def test_no_status_change(self, mock_tz): status_fields = self.get_status_fields(finding) - finding.title = finding.title + '!!!' + finding.title = finding.title + "!!!" finding.save() self.assertEqual( @@ -61,7 +61,7 @@ def test_no_status_change(self, mock_tz): status_fields, ) - @mock.patch('dojo.finding.helper.timezone.now') + @mock.patch("dojo.finding.helper.timezone.now") def test_mark_fresh_as_mitigated(self, mock_dt): mock_dt.return_value = frozen_datetime with impersonate(self.user_1): @@ -73,8 +73,8 @@ def test_mark_fresh_as_mitigated(self, mock_dt): (False, False, False, False, True, frozen_datetime, self.user_1, frozen_datetime), ) - @mock.patch('dojo.finding.helper.timezone.now') - @mock.patch('dojo.finding.helper.can_edit_mitigated_data', return_value=False) + @mock.patch("dojo.finding.helper.timezone.now") + @mock.patch("dojo.finding.helper.can_edit_mitigated_data", return_value=False) def test_mark_old_active_as_mitigated(self, mock_can_edit, mock_tz): mock_tz.return_value = frozen_datetime @@ -91,8 +91,8 @@ def test_mark_old_active_as_mitigated(self, mock_can_edit, mock_tz): (False, False, False, False, True, frozen_datetime, self.user_1, frozen_datetime), ) - @mock.patch('dojo.finding.helper.timezone.now') - @mock.patch('dojo.finding.helper.can_edit_mitigated_data', return_value=True) + @mock.patch("dojo.finding.helper.timezone.now") + @mock.patch("dojo.finding.helper.can_edit_mitigated_data", return_value=True) def test_mark_old_active_as_mitigated_custom_edit(self, mock_can_edit, mock_tz): mock_tz.return_value = frozen_datetime @@ -113,8 +113,8 @@ def test_mark_old_active_as_mitigated_custom_edit(self, mock_can_edit, mock_tz): (False, False, False, False, True, custom_mitigated, self.user_2, frozen_datetime), ) - @mock.patch('dojo.finding.helper.timezone.now') - @mock.patch('dojo.finding.helper.can_edit_mitigated_data', return_value=True) + @mock.patch("dojo.finding.helper.timezone.now") + @mock.patch("dojo.finding.helper.can_edit_mitigated_data", return_value=True) def test_update_old_mitigated_with_custom_edit(self, mock_can_edit, mock_tz): mock_tz.return_value = frozen_datetime @@ -135,8 +135,8 @@ def test_update_old_mitigated_with_custom_edit(self, mock_can_edit, mock_tz): (False, False, False, False, True, custom_mitigated, self.user_2, frozen_datetime), ) - @mock.patch('dojo.finding.helper.timezone.now') - @mock.patch('dojo.finding.helper.can_edit_mitigated_data', return_value=True) + @mock.patch("dojo.finding.helper.timezone.now") + @mock.patch("dojo.finding.helper.can_edit_mitigated_data", return_value=True) def test_update_old_mitigated_with_missing_data(self, mock_can_edit, mock_tz): mock_tz.return_value = frozen_datetime @@ -158,18 +158,18 @@ def test_update_old_mitigated_with_missing_data(self, mock_can_edit, mock_tz): (False, False, False, False, True, frozen_datetime, self.user_1, frozen_datetime), ) - @mock.patch('dojo.finding.helper.timezone.now') - @mock.patch('dojo.finding.helper.can_edit_mitigated_data', return_value=True) + @mock.patch("dojo.finding.helper.timezone.now") + @mock.patch("dojo.finding.helper.can_edit_mitigated_data", return_value=True) def test_set_old_mitigated_as_active(self, mock_can_edit, mock_tz): mock_tz.return_value = frozen_datetime with impersonate(self.user_1): test = Test.objects.last() finding = Finding(test=test, is_mitigated=True, active=False, mitigated=frozen_datetime, mitigated_by=self.user_2) - logger.debug('save1') + logger.debug("save1") finding.save() finding.active = True - logger.debug('save2') + logger.debug("save2") finding.save() self.assertEqual( @@ -177,8 +177,8 @@ def test_set_old_mitigated_as_active(self, mock_can_edit, mock_tz): (True, False, False, False, False, None, None, frozen_datetime), ) - @mock.patch('dojo.finding.helper.timezone.now') - @mock.patch('dojo.finding.helper.can_edit_mitigated_data', return_value=False) + @mock.patch("dojo.finding.helper.timezone.now") + @mock.patch("dojo.finding.helper.can_edit_mitigated_data", return_value=False) def test_set_active_as_false_p(self, mock_can_edit, mock_tz): mock_tz.return_value = frozen_datetime @@ -195,8 +195,8 @@ def test_set_active_as_false_p(self, mock_can_edit, mock_tz): (False, False, True, False, True, frozen_datetime, self.user_1, frozen_datetime), ) - @mock.patch('dojo.finding.helper.timezone.now') - @mock.patch('dojo.finding.helper.can_edit_mitigated_data', return_value=False) + @mock.patch("dojo.finding.helper.timezone.now") + @mock.patch("dojo.finding.helper.can_edit_mitigated_data", return_value=False) def test_set_active_as_out_of_scope(self, mock_can_edit, mock_tz): mock_tz.return_value = frozen_datetime @@ -216,12 +216,12 @@ def test_set_active_as_out_of_scope(self, mock_can_edit, mock_tz): class TestSaveVulnerabilityIds(DojoTestCase): - @patch('dojo.finding.helper.Vulnerability_Id.objects.filter') - @patch('django.db.models.query.QuerySet.delete') - @patch('dojo.finding.helper.Vulnerability_Id.save') + @patch("dojo.finding.helper.Vulnerability_Id.objects.filter") + @patch("django.db.models.query.QuerySet.delete") + @patch("dojo.finding.helper.Vulnerability_Id.save") def test_save_vulnerability_ids(self, save_mock, delete_mock, filter_mock): finding = Finding() - new_vulnerability_ids = ['REF-1', 'REF-2', 'REF-2'] + new_vulnerability_ids = ["REF-1", "REF-2", "REF-2"] filter_mock.return_value = Vulnerability_Id.objects.none() save_vulnerability_ids(finding, new_vulnerability_ids) @@ -229,14 +229,14 @@ def test_save_vulnerability_ids(self, save_mock, delete_mock, filter_mock): filter_mock.assert_called_with(finding=finding) delete_mock.assert_called_once() self.assertEqual(save_mock.call_count, 2) - self.assertEqual('REF-1', finding.cve) + self.assertEqual("REF-1", finding.cve) - @patch('dojo.finding.helper.Vulnerability_Id_Template.objects.filter') - @patch('django.db.models.query.QuerySet.delete') - @patch('dojo.finding.helper.Vulnerability_Id_Template.save') + @patch("dojo.finding.helper.Vulnerability_Id_Template.objects.filter") + @patch("django.db.models.query.QuerySet.delete") + @patch("dojo.finding.helper.Vulnerability_Id_Template.save") def test_save_vulnerability_id_templates(self, save_mock, delete_mock, filter_mock): finding_template = Finding_Template() - new_vulnerability_ids = ['REF-1', 'REF-2', 'REF-2'] + new_vulnerability_ids = ["REF-1", "REF-2", "REF-2"] filter_mock.return_value = Vulnerability_Id_Template.objects.none() save_vulnerability_ids_template(finding_template, new_vulnerability_ids) @@ -244,4 +244,4 @@ def test_save_vulnerability_id_templates(self, save_mock, delete_mock, filter_mo filter_mock.assert_called_with(finding_template=finding_template) delete_mock.assert_called_once() self.assertEqual(save_mock.call_count, 2) - self.assertEqual('REF-1', finding_template.cve) + self.assertEqual("REF-1", finding_template.cve) diff --git a/unittests/test_finding_model.py b/unittests/test_finding_model.py index 7d93832921..130b658599 100644 --- a/unittests/test_finding_model.py +++ b/unittests/test_finding_model.py @@ -19,8 +19,8 @@ def test_get_sast_source_file_path_with_link_no_source_code_management_uri(self) test.engagement = engagement finding = Finding() finding.test = test - finding.sast_source_file_path = 'SastSourceFilePath' - self.assertEqual('SastSourceFilePath', finding.get_sast_source_file_path_with_link()) + finding.sast_source_file_path = "SastSourceFilePath" + self.assertEqual("SastSourceFilePath", finding.get_sast_source_file_path_with_link()) def test_get_sast_source_file_path_with_link_and_source_code_management_uri(self): test = Test() @@ -28,8 +28,8 @@ def test_get_sast_source_file_path_with_link_and_source_code_management_uri(self test.engagement = engagement finding = Finding() finding.test = test - finding.sast_source_file_path = 'SastSourceFilePath' - engagement.source_code_management_uri = 'URL' + finding.sast_source_file_path = "SastSourceFilePath" + engagement.source_code_management_uri = "URL" self.assertEqual('SastSourceFilePath', finding.get_sast_source_file_path_with_link()) def test_get_file_path_with_link_no_file_path(self): @@ -42,8 +42,8 @@ def test_get_file_path_with_link_no_source_code_management_uri(self): test.engagement = engagement finding = Finding() finding.test = test - finding.file_path = 'FilePath' - self.assertEqual('FilePath', finding.get_file_path_with_link()) + finding.file_path = "FilePath" + self.assertEqual("FilePath", finding.get_file_path_with_link()) def test_get_file_path_with_link_and_source_code_management_uri(self): test = Test() @@ -51,8 +51,8 @@ def test_get_file_path_with_link_and_source_code_management_uri(self): test.engagement = engagement finding = Finding() finding.test = test - finding.file_path = 'FilePath' - engagement.source_code_management_uri = 'URL' + finding.file_path = "FilePath" + engagement.source_code_management_uri = "URL" self.assertEqual('FilePath', finding.get_file_path_with_link()) def test_get_file_path_with_link_and_source_code_management_uri_github_no_scm_type_with_details_and_line(self): @@ -65,17 +65,17 @@ def test_get_file_path_with_link_and_source_code_management_uri_github_no_scm_ty engagement.branch_tag = "some-branch" finding = Finding() finding.test = test - finding.file_path = 'some-folder/some-file.ext' + finding.file_path = "some-folder/some-file.ext" finding.line = 5432 - engagement.source_code_management_uri = 'https://github.com/some-test-account/some-test-repo' + engagement.source_code_management_uri = "https://github.com/some-test-account/some-test-repo" self.assertEqual('some-folder/some-file.ext', finding.get_file_path_with_link()) def test_get_file_path_with_link_and_source_code_management_uri_github_with_scm_type_with_details_and_line(self): # checks that for github in custom field dojo makes correct url to browse on github # create scm-type custom field with value "github" - product_type = self.create_product_type('test_product_type') - product = self.create_product(name='test_product', prod_type=product_type) + product_type = self.create_product_type("test_product_type") + product = self.create_product(name="test_product", prod_type=product_type) product_metadata = DojoMeta(product=product, name="scm-type", value="github") product_metadata.save() @@ -88,10 +88,10 @@ def test_get_file_path_with_link_and_source_code_management_uri_github_with_scm_ engagement.branch_tag = "some-branch" finding = Finding() finding.test = test - finding.file_path = 'some-folder/some-file.ext' + finding.file_path = "some-folder/some-file.ext" finding.line = 5432 - engagement.source_code_management_uri = 'https://github.com/some-test-account/some-test-repo' + engagement.source_code_management_uri = "https://github.com/some-test-account/some-test-repo" self.assertEqual('some-folder/some-file.ext', finding.get_file_path_with_link()) def test_get_file_path_with_link_and_source_code_management_uri_bitbucket_public_project_with_no_details_and_line(self): @@ -99,8 +99,8 @@ def test_get_file_path_with_link_and_source_code_management_uri_bitbucket_public # dojo makes correct url to browse on public bitbucket (for project uri) # create scm-type custom field with value "bitbucket" - product_type = self.create_product_type('test_product_type') - product = self.create_product(name='test_product', prod_type=product_type) + product_type = self.create_product_type("test_product_type") + product = self.create_product(name="test_product", prod_type=product_type) product_metadata = DojoMeta(product=product, name="scm-type", value="bitbucket") product_metadata.save() @@ -111,10 +111,10 @@ def test_get_file_path_with_link_and_source_code_management_uri_bitbucket_public test.engagement = engagement finding = Finding() finding.test = test - finding.file_path = 'some-folder/some-file.ext' + finding.file_path = "some-folder/some-file.ext" finding.line = 5432 - engagement.source_code_management_uri = 'https://bb.example.com/some-test-user/some-test-repo.git' + engagement.source_code_management_uri = "https://bb.example.com/some-test-user/some-test-repo.git" self.assertEqual('some-folder/some-file.ext', finding.get_file_path_with_link()) def test_get_file_path_with_link_and_source_code_management_uri_bitbucket_public_project_with_commithash_and_line(self): @@ -122,8 +122,8 @@ def test_get_file_path_with_link_and_source_code_management_uri_bitbucket_public # dojo makes correct url to browse on public bitbucket (for project uri) # create scm-type custom field with value "bitbucket" - product_type = self.create_product_type('test_product_type') - product = self.create_product(name='test_product', prod_type=product_type) + product_type = self.create_product_type("test_product_type") + product = self.create_product(name="test_product", prod_type=product_type) product_metadata = DojoMeta(product=product, name="scm-type", value="bitbucket") product_metadata.save() @@ -135,10 +135,10 @@ def test_get_file_path_with_link_and_source_code_management_uri_bitbucket_public engagement.commit_hash = "some-commit-hash" finding = Finding() finding.test = test - finding.file_path = 'some-folder/some-file.ext' + finding.file_path = "some-folder/some-file.ext" finding.line = 5432 - engagement.source_code_management_uri = 'https://bb.example.com/some-test-user/some-test-repo.git' + engagement.source_code_management_uri = "https://bb.example.com/some-test-user/some-test-repo.git" self.assertEqual('some-folder/some-file.ext', finding.get_file_path_with_link()) def test_get_file_path_with_link_and_source_code_management_uri_bitbucket_standalone_project_with_commithash_and_line(self): @@ -146,8 +146,8 @@ def test_get_file_path_with_link_and_source_code_management_uri_bitbucket_standa # dojo makes correct url to browse on standalone/onpremise bitbucket (for project uri) # create scm-type custom field with value "bitbucket-standalone" - product_type = self.create_product_type('test_product_type') - product = self.create_product(name='test_product', prod_type=product_type) + product_type = self.create_product_type("test_product_type") + product = self.create_product(name="test_product", prod_type=product_type) product_metadata = DojoMeta(product=product, name="scm-type", value="bitbucket-standalone") product_metadata.save() @@ -159,10 +159,10 @@ def test_get_file_path_with_link_and_source_code_management_uri_bitbucket_standa engagement.commit_hash = "some-commit-hash" finding = Finding() finding.test = test - finding.file_path = 'some-folder/some-file.ext' + finding.file_path = "some-folder/some-file.ext" finding.line = 5432 - engagement.source_code_management_uri = 'https://bb.example.com/scm/some-test-project/some-test-repo.git' + engagement.source_code_management_uri = "https://bb.example.com/scm/some-test-project/some-test-repo.git" self.assertEqual('some-folder/some-file.ext', finding.get_file_path_with_link()) def test_get_file_path_with_link_and_source_code_management_uri_bitbucket_standalone_project_with_branchtag_and_line(self): @@ -170,8 +170,8 @@ def test_get_file_path_with_link_and_source_code_management_uri_bitbucket_standa # dojo makes correct url to browse on standalone/onpremise bitbucket (for project uri) # create scm-type custom field with value "bitbucket-standalone" - product_type = self.create_product_type('test_product_type') - product = self.create_product(name='test_product', prod_type=product_type) + product_type = self.create_product_type("test_product_type") + product = self.create_product(name="test_product", prod_type=product_type) product_metadata = DojoMeta(product=product, name="scm-type", value="bitbucket-standalone") product_metadata.save() @@ -183,10 +183,10 @@ def test_get_file_path_with_link_and_source_code_management_uri_bitbucket_standa engagement.branch_tag = "some-branch" finding = Finding() finding.test = test - finding.file_path = 'some-folder/some-file.ext' + finding.file_path = "some-folder/some-file.ext" finding.line = 5432 - engagement.source_code_management_uri = 'https://bb.example.com/scm/some-test-project/some-test-repo.git' + engagement.source_code_management_uri = "https://bb.example.com/scm/some-test-project/some-test-repo.git" self.assertEqual('some-folder/some-file.ext', finding.get_file_path_with_link()) def test_get_file_path_with_link_and_source_code_management_uri_bitbucket_standalone_user_with_branchtag_and_line(self): @@ -194,8 +194,8 @@ def test_get_file_path_with_link_and_source_code_management_uri_bitbucket_standa # dojo makes correct url to browse on standalone/onpremise bitbucket (for user uri) # create scm-type custom field with value "bitbucket-standalone" - product_type = self.create_product_type('test_product_type') - product = self.create_product(name='test_product', prod_type=product_type) + product_type = self.create_product_type("test_product_type") + product = self.create_product(name="test_product", prod_type=product_type) product_metadata = DojoMeta(product=product, name="scm-type", value="bitbucket-standalone") product_metadata.save() @@ -207,10 +207,10 @@ def test_get_file_path_with_link_and_source_code_management_uri_bitbucket_standa engagement.branch_tag = "some-branch" finding = Finding() finding.test = test - finding.file_path = 'some-folder/some-file.ext' + finding.file_path = "some-folder/some-file.ext" finding.line = 5432 - engagement.source_code_management_uri = 'https://bb.example.com/scm/~some-user/some-test-repo.git' + engagement.source_code_management_uri = "https://bb.example.com/scm/~some-user/some-test-repo.git" self.assertEqual('some-folder/some-file.ext', finding.get_file_path_with_link()) @@ -219,8 +219,8 @@ def test_get_file_path_with_link_and_source_code_management_uri_gitea_or_codeber # dojo makes correct url # create scm-type custom field with value "gitea" - product_type = self.create_product_type('test_product_type') - product = self.create_product(name='test_product', prod_type=product_type) + product_type = self.create_product_type("test_product_type") + product = self.create_product(name="test_product", prod_type=product_type) product_metadata = DojoMeta(product=product, name="scm-type", value="gitea") product_metadata.save() @@ -231,10 +231,10 @@ def test_get_file_path_with_link_and_source_code_management_uri_gitea_or_codeber test.engagement = engagement finding = Finding() finding.test = test - finding.file_path = 'some-folder/some-file.ext' + finding.file_path = "some-folder/some-file.ext" finding.line = 5432 - engagement.source_code_management_uri = 'https://bb.example.com/some-test-user/some-test-repo.git' + engagement.source_code_management_uri = "https://bb.example.com/some-test-user/some-test-repo.git" self.assertEqual('some-folder/some-file.ext', finding.get_file_path_with_link()) def test_get_file_path_with_link_and_source_code_management_uri_gitea_or_codeberg_project_with_commithash_and_line(self): @@ -242,8 +242,8 @@ def test_get_file_path_with_link_and_source_code_management_uri_gitea_or_codeber # dojo makes correct url # create scm-type custom field with value "gitea" - product_type = self.create_product_type('test_product_type') - product = self.create_product(name='test_product', prod_type=product_type) + product_type = self.create_product_type("test_product_type") + product = self.create_product(name="test_product", prod_type=product_type) product_metadata = DojoMeta(product=product, name="scm-type", value="gitea") product_metadata.save() @@ -255,10 +255,10 @@ def test_get_file_path_with_link_and_source_code_management_uri_gitea_or_codeber engagement.commit_hash = "some-commit-hash" finding = Finding() finding.test = test - finding.file_path = 'some-folder/some-file.ext' + finding.file_path = "some-folder/some-file.ext" finding.line = 5432 - engagement.source_code_management_uri = 'https://bb.example.com/some-test-user/some-test-repo.git' + engagement.source_code_management_uri = "https://bb.example.com/some-test-user/some-test-repo.git" self.assertEqual('some-folder/some-file.ext', finding.get_file_path_with_link()) def test_get_file_path_with_xss_attack(self): @@ -267,8 +267,8 @@ def test_get_file_path_with_xss_attack(self): test.engagement = engagement finding = Finding() finding.test = test - finding.file_path = '' - engagement.source_code_management_uri = '' + finding.file_path = "" + engagement.source_code_management_uri = "" self.assertEqual('<SCRIPT SRC=http://xss.rocks/xss.js></SCRIPT>', finding.get_file_path_with_link()) def test_get_references_with_links_no_references(self): @@ -277,50 +277,50 @@ def test_get_references_with_links_no_references(self): def test_get_references_with_links_no_links(self): finding = Finding() - finding.references = 'Lorem ipsum dolor sit amet, consetetur sadipscing elitr' - self.assertEqual('Lorem ipsum dolor sit amet, consetetur sadipscing elitr', finding.get_references_with_links()) + finding.references = "Lorem ipsum dolor sit amet, consetetur sadipscing elitr" + self.assertEqual("Lorem ipsum dolor sit amet, consetetur sadipscing elitr", finding.get_references_with_links()) def test_get_references_with_links_simple_url(self): finding = Finding() - finding.references = 'URL: https://www.example.com' + finding.references = "URL: https://www.example.com" self.assertEqual('URL: https://www.example.com', finding.get_references_with_links()) def test_get_references_with_links_url_with_port(self): finding = Finding() - finding.references = 'http://www.example.com:8080' + finding.references = "http://www.example.com:8080" self.assertEqual('http://www.example.com:8080', finding.get_references_with_links()) def test_get_references_with_links_url_with_path(self): finding = Finding() - finding.references = 'URL https://www.example.com/path/part2 behind URL' + finding.references = "URL https://www.example.com/path/part2 behind URL" self.assertEqual('URL https://www.example.com/path/part2 behind URL', finding.get_references_with_links()) def test_get_references_with_links_complicated_url_with_parameter(self): finding = Finding() - finding.references = 'URL:https://www.example.com/path?param1=abc&_param2=xyz' + finding.references = "URL:https://www.example.com/path?param1=abc&_param2=xyz" self.assertEqual('URL:https://www.example.com/path?param1=abc&_param2=xyz', finding.get_references_with_links()) def test_get_references_with_links_two_urls(self): finding = Finding() - finding.references = 'URL1: https://www.example.com URL2: https://info.example.com' + finding.references = "URL1: https://www.example.com URL2: https://info.example.com" self.assertEqual('URL1: https://www.example.com URL2: https://info.example.com', finding.get_references_with_links()) def test_get_references_with_links_linebreak(self): finding = Finding() - finding.references = 'https://www.example.com\nhttps://info.example.com' + finding.references = "https://www.example.com\nhttps://info.example.com" self.assertEqual('https://www.example.com\nhttps://info.example.com', finding.get_references_with_links()) def test_get_references_with_links_markdown(self): finding = Finding() - finding.references = 'URL: [https://www.example.com](https://www.example.com)' - self.assertEqual('URL: [https://www.example.com](https://www.example.com)', finding.get_references_with_links()) + finding.references = "URL: [https://www.example.com](https://www.example.com)" + self.assertEqual("URL: [https://www.example.com](https://www.example.com)", finding.get_references_with_links()) class TestFindingSLAExpiration(DojoTestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def run(self, result=None): - testuser = User.objects.get(username='admin') + testuser = User.objects.get(username="admin") testuser.usercontactinfo.block_execution = True testuser.save() @@ -335,19 +335,19 @@ def test_sla_expiration_date(self): tests if the SLA expiration date and SLA days remaining are calculated correctly after a finding's severity is updated """ - user, _ = User.objects.get_or_create(username='admin') - product_type = self.create_product_type('test_product_type') - sla_config = self.create_sla_configuration(name='test_sla_config') - product = self.create_product(name='test_product', prod_type=product_type) + user, _ = User.objects.get_or_create(username="admin") + product_type = self.create_product_type("test_product_type") + sla_config = self.create_sla_configuration(name="test_sla_config") + product = self.create_product(name="test_product", prod_type=product_type) product.sla_configuration = sla_config product.save() - engagement = self.create_engagement('test_eng', product) - test = self.create_test(engagement=engagement, scan_type='ZAP Scan', title='test_test') + engagement = self.create_engagement("test_eng", product) + test = self.create_test(engagement=engagement, scan_type="ZAP Scan", title="test_test") finding = Finding.objects.create( test=test, reporter=user, - title='test_finding', - severity='Critical', + title="test_finding", + severity="Critical", date=datetime.now().date()) finding.set_sla_expiration_date() @@ -360,19 +360,19 @@ def test_sla_expiration_date_after_finding_severity_updated(self): tests if the SLA expiration date and SLA days remaining are calculated correctly after a finding's severity is updated """ - user, _ = User.objects.get_or_create(username='admin') - product_type = self.create_product_type('test_product_type') - sla_config = self.create_sla_configuration(name='test_sla_config') - product = self.create_product(name='test_product', prod_type=product_type) + user, _ = User.objects.get_or_create(username="admin") + product_type = self.create_product_type("test_product_type") + sla_config = self.create_sla_configuration(name="test_sla_config") + product = self.create_product(name="test_product", prod_type=product_type) product.sla_configuration = sla_config product.save() - engagement = self.create_engagement('test_eng', product) - test = self.create_test(engagement=engagement, scan_type='ZAP Scan', title='test_test') + engagement = self.create_engagement("test_eng", product) + test = self.create_test(engagement=engagement, scan_type="ZAP Scan", title="test_test") finding = Finding.objects.create( test=test, reporter=user, - title='test_finding', - severity='Critical', + title="test_finding", + severity="Critical", date=datetime.now().date()) finding.set_sla_expiration_date() @@ -380,7 +380,7 @@ def test_sla_expiration_date_after_finding_severity_updated(self): self.assertEqual(finding.sla_expiration_date, datetime.now().date() + timedelta(days=expected_sla_days)) self.assertEqual(finding.sla_days_remaining(), expected_sla_days) - finding.severity = 'Medium' + finding.severity = "Medium" finding.set_sla_expiration_date() expected_sla_days = getattr(product.sla_configuration, finding.severity.lower(), None) @@ -392,25 +392,25 @@ def test_sla_expiration_date_after_product_updated(self): tests if the SLA expiration date and SLA days remaining are calculated correctly after a product changed from one SLA configuration to another """ - user, _ = User.objects.get_or_create(username='admin') - product_type = self.create_product_type('test_product_type') - sla_config_1 = self.create_sla_configuration(name='test_sla_config_1') + user, _ = User.objects.get_or_create(username="admin") + product_type = self.create_product_type("test_product_type") + sla_config_1 = self.create_sla_configuration(name="test_sla_config_1") sla_config_2 = self.create_sla_configuration( - name='test_sla_config_2', + name="test_sla_config_2", critical=1, high=2, medium=3, low=4) - product = self.create_product(name='test_product', prod_type=product_type) + product = self.create_product(name="test_product", prod_type=product_type) product.sla_configuration = sla_config_1 product.save() - engagement = self.create_engagement('test_eng', product) - test = self.create_test(engagement=engagement, scan_type='ZAP Scan', title='test_test') + engagement = self.create_engagement("test_eng", product) + test = self.create_test(engagement=engagement, scan_type="ZAP Scan", title="test_test") finding = Finding.objects.create( test=test, reporter=user, - title='test_finding', - severity='Critical', + title="test_finding", + severity="Critical", date=datetime.now().date()) expected_sla_days = getattr(product.sla_configuration, finding.severity.lower(), None) @@ -431,19 +431,19 @@ def test_sla_expiration_date_after_sla_configuration_updated(self): tests if the SLA expiration date and SLA days remaining are calculated correctly after the SLA configuration on a product was updated to a different number of SLA days """ - user, _ = User.objects.get_or_create(username='admin') - product_type = self.create_product_type('test_product_type') - sla_config = self.create_sla_configuration(name='test_sla_config') - product = self.create_product(name='test_product', prod_type=product_type) + user, _ = User.objects.get_or_create(username="admin") + product_type = self.create_product_type("test_product_type") + sla_config = self.create_sla_configuration(name="test_sla_config") + product = self.create_product(name="test_product", prod_type=product_type) product.sla_configuration = sla_config product.save() - engagement = self.create_engagement('test_eng', product) - test = self.create_test(engagement=engagement, scan_type='ZAP Scan', title='test_test') + engagement = self.create_engagement("test_eng", product) + test = self.create_test(engagement=engagement, scan_type="ZAP Scan", title="test_test") finding = Finding.objects.create( test=test, reporter=user, - title='test_finding', - severity='Critical', + title="test_finding", + severity="Critical", date=datetime.now().date()) expected_sla_days = getattr(product.sla_configuration, finding.severity.lower(), None) @@ -464,19 +464,19 @@ def test_sla_expiration_date_after_sla_not_enforced(self): tests if the SLA expiration date is none after the after the SLA configuration on a product was updated to not enforce all SLA remediation days """ - user, _ = User.objects.get_or_create(username='admin') - product_type = self.create_product_type('test_product_type') - sla_config = self.create_sla_configuration(name='test_sla_config') - product = self.create_product(name='test_product', prod_type=product_type) + user, _ = User.objects.get_or_create(username="admin") + product_type = self.create_product_type("test_product_type") + sla_config = self.create_sla_configuration(name="test_sla_config") + product = self.create_product(name="test_product", prod_type=product_type) product.sla_configuration = sla_config product.save() - engagement = self.create_engagement('test_eng', product) - test = self.create_test(engagement=engagement, scan_type='ZAP Scan', title='test_test') + engagement = self.create_engagement("test_eng", product) + test = self.create_test(engagement=engagement, scan_type="ZAP Scan", title="test_test") finding = Finding.objects.create( test=test, reporter=user, - title='test_finding', - severity='Critical', + title="test_finding", + severity="Critical", date=datetime.now().date()) expected_sla_days = getattr(product.sla_configuration, finding.severity.lower(), None) diff --git a/unittests/test_flush_auditlog.py b/unittests/test_flush_auditlog.py index 2116d537c9..a75473664b 100644 --- a/unittests/test_flush_auditlog.py +++ b/unittests/test_flush_auditlog.py @@ -14,7 +14,7 @@ class TestFlushAuditlog(DojoTestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] @override_settings(AUDITLOG_FLUSH_RETENTION_PERIOD=-1) def test_flush_auditlog_disabled(self): diff --git a/unittests/test_import_reimport.py b/unittests/test_import_reimport.py index 45c8ed63fa..6d923e2e39 100644 --- a/unittests/test_import_reimport.py +++ b/unittests/test_import_reimport.py @@ -18,9 +18,9 @@ logger = logging.getLogger(__name__) -ENGAGEMENT_NAME_DEFAULT = 'Engagement 1' -PRODUCT_NAME_DEFAULT = 'Product A' -PRODUCT_TYPE_NAME_DEFAULT = 'Type type' +ENGAGEMENT_NAME_DEFAULT = "Engagement 1" +PRODUCT_NAME_DEFAULT = "Product A" +PRODUCT_TYPE_NAME_DEFAULT = "Type type" # 0_zap_sample.xml: basic file with 4 out of 5 findings reported, zap4 absent @@ -54,59 +54,59 @@ # test methods to be used both by API Test and UI Test class ImportReimportMixin: def __init__(self, *args, **kwargs): - self.scans_path = '/scans/' + self.scans_path = "/scans/" - self.zap_sample0_filename = self.scans_path + 'zap/0_zap_sample.xml' - self.zap_sample1_filename = self.scans_path + 'zap/1_zap_sample_0_and_new_absent.xml' - self.zap_sample2_filename = self.scans_path + 'zap/2_zap_sample_0_and_new_endpoint.xml' - self.zap_sample3_filename = self.scans_path + 'zap/3_zap_sampl_0_and_different_severities.xml' + self.zap_sample0_filename = self.scans_path + "zap/0_zap_sample.xml" + self.zap_sample1_filename = self.scans_path + "zap/1_zap_sample_0_and_new_absent.xml" + self.zap_sample2_filename = self.scans_path + "zap/2_zap_sample_0_and_new_endpoint.xml" + self.zap_sample3_filename = self.scans_path + "zap/3_zap_sampl_0_and_different_severities.xml" - self.anchore_file_name = self.scans_path + 'anchore_engine/one_vuln_many_files.json' - self.scan_type_anchore = 'Anchore Engine Scan' + self.anchore_file_name = self.scans_path + "anchore_engine/one_vuln_many_files.json" + self.scan_type_anchore = "Anchore Engine Scan" - self.acunetix_file_name = self.scans_path + 'acunetix/one_finding.xml' - self.scan_type_acunetix = 'Acunetix Scan' + self.acunetix_file_name = self.scans_path + "acunetix/one_finding.xml" + self.scan_type_acunetix = "Acunetix Scan" - self.gitlab_dep_scan_components_filename = f'{self.scans_path}gitlab_dep_scan/gl-dependency-scanning-report-many-vuln_v15.json' - self.scan_type_gtlab_dep_scan = 'GitLab Dependency Scanning Report' + self.gitlab_dep_scan_components_filename = f"{self.scans_path}gitlab_dep_scan/gl-dependency-scanning-report-many-vuln_v15.json" + self.scan_type_gtlab_dep_scan = "GitLab Dependency Scanning Report" - self.sonarqube_file_name1 = self.scans_path + 'sonarqube/sonar-6-findings.html' - self.sonarqube_file_name2 = self.scans_path + 'sonarqube/sonar-6-findings-1-unique_id_changed.html' - self.scan_type_sonarqube_detailed = 'SonarQube Scan detailed' + self.sonarqube_file_name1 = self.scans_path + "sonarqube/sonar-6-findings.html" + self.sonarqube_file_name2 = self.scans_path + "sonarqube/sonar-6-findings-1-unique_id_changed.html" + self.scan_type_sonarqube_detailed = "SonarQube Scan detailed" - self.veracode_many_findings = self.scans_path + 'veracode/many_findings.xml' - self.veracode_same_hash_code_different_unique_id = self.scans_path + 'veracode/many_findings_same_hash_code_different_unique_id.xml' - self.veracode_same_unique_id_different_hash_code = self.scans_path + 'veracode/many_findings_same_unique_id_different_hash_code.xml' - self.veracode_different_hash_code_different_unique_id = self.scans_path + 'veracode/many_findings_different_hash_code_different_unique_id.xml' - self.veracode_mitigated_findings = self.scans_path + 'veracode/mitigated_finding.xml' - self.scan_type_veracode = 'Veracode Scan' + self.veracode_many_findings = self.scans_path + "veracode/many_findings.xml" + self.veracode_same_hash_code_different_unique_id = self.scans_path + "veracode/many_findings_same_hash_code_different_unique_id.xml" + self.veracode_same_unique_id_different_hash_code = self.scans_path + "veracode/many_findings_same_unique_id_different_hash_code.xml" + self.veracode_different_hash_code_different_unique_id = self.scans_path + "veracode/many_findings_different_hash_code_different_unique_id.xml" + self.veracode_mitigated_findings = self.scans_path + "veracode/mitigated_finding.xml" + self.scan_type_veracode = "Veracode Scan" - self.clair_few_findings = self.scans_path + 'clair/clair_few_vuln.json' - self.clair_empty = self.scans_path + 'clair/clair_empty.json' - self.scan_type_clair = 'Clair Scan' + self.clair_few_findings = self.scans_path + "clair/clair_few_vuln.json" + self.clair_empty = self.scans_path + "clair/clair_empty.json" + self.scan_type_clair = "Clair Scan" self.scan_type_generic = "Generic Findings Import" self.generic_filename_with_file = self.scans_path + "generic/test_with_image.json" self.generic_import_1 = self.scans_path + "generic/test_import_report1.json" self.generic_import_2 = self.scans_path + "generic/test_import_report2.json" - self.aws_prowler_file_name = self.scans_path + 'aws_prowler/many_vuln.json' - self.aws_prowler_file_name_plus_one = self.scans_path + 'aws_prowler/many_vuln_plus_one.json' - self.scan_type_aws_prowler = 'AWS Prowler Scan' + self.aws_prowler_file_name = self.scans_path + "aws_prowler/many_vuln.json" + self.aws_prowler_file_name_plus_one = self.scans_path + "aws_prowler/many_vuln_plus_one.json" + self.scan_type_aws_prowler = "AWS Prowler Scan" - self.nuclei_empty = self.scans_path + 'nuclei/empty.jsonl' + self.nuclei_empty = self.scans_path + "nuclei/empty.jsonl" - self.gitlab_dast_file_name = f'{self.scans_path}gitlab_dast/gitlab_dast_one_vul_v15.json' - self.scan_type_gitlab_dast = 'GitLab DAST Report' + self.gitlab_dast_file_name = f"{self.scans_path}gitlab_dast/gitlab_dast_one_vul_v15.json" + self.scan_type_gitlab_dast = "GitLab DAST Report" - self.anchore_grype_file_name = self.scans_path + 'anchore_grype/check_all_fields.json' - self.anchore_grype_scan_type = 'Anchore Grype' + self.anchore_grype_file_name = self.scans_path + "anchore_grype/check_all_fields.json" + self.anchore_grype_scan_type = "Anchore Grype" # import zap scan, testing: # - import # - active/verifed = True def test_zap_scan_base_active_verified(self): - logger.debug('importing original zap xml report') + logger.debug("importing original zap xml report") endpoint_count_before = self.db_endpoint_count() endpoint_status_count_before_active = self.db_endpoint_status_count(mitigated=False) endpoint_status_count_before_mitigated = self.db_endpoint_status_count(mitigated=True) @@ -122,7 +122,7 @@ def test_zap_scan_base_active_verified(self): # 4 absent # 5 active - test_id = import0['test'] + test_id = import0["test"] findings = self.get_test_findings_api(test_id) self.log_finding_summary_json_api(findings) @@ -146,7 +146,7 @@ def test_zap_scan_base_active_verified(self): # - import # - active/verifed = False def test_zap_scan_base_not_active_not_verified(self): - logger.debug('importing original zap xml report') + logger.debug("importing original zap xml report") endpoint_count_before = self.db_endpoint_count() endpoint_status_count_before_active = self.db_endpoint_status_count(mitigated=False) endpoint_status_count_before_mitigated = self.db_endpoint_status_count(mitigated=True) @@ -162,7 +162,7 @@ def test_zap_scan_base_not_active_not_verified(self): # 4 absent # 5 inactive - test_id = import0['test'] + test_id = import0["test"] findings = self.get_test_findings_api(test_id, active=False, verified=False) self.log_finding_summary_json_api(findings) @@ -188,109 +188,109 @@ def test_zap_scan_base_not_active_not_verified(self): # - import # - no scan_date and date not set by parser leads to today as date def test_import_default_scan_date_parser_not_sets_date(self): - logger.debug('importing zap xml report with date set by parser') + logger.debug("importing zap xml report with date set by parser") with assertTestImportModelsCreated(self, imports=1, affected_findings=4, created=4): import0 = self.import_scan_with_params(self.zap_sample0_filename, active=False, verified=False) - test_id = import0['test'] + test_id = import0["test"] findings = self.get_test_findings_api(test_id, active=False, verified=False) self.log_finding_summary_json_api(findings) # Get the date - date = findings['results'][0]['date'] + date = findings["results"][0]["date"] self.assertEqual(date, str(timezone.localtime(timezone.now()).date())) # import acunetix scan with dates # - import # - no scan scan_date does not overrides date set by parser def test_import_default_scan_date_parser_sets_date(self): - logger.debug('importing original acunetix xml report') + logger.debug("importing original acunetix xml report") with assertTestImportModelsCreated(self, imports=1, affected_findings=1, created=1): import0 = self.import_scan_with_params(self.acunetix_file_name, scan_type=self.scan_type_acunetix, active=False, verified=False) - test_id = import0['test'] + test_id = import0["test"] findings = self.get_test_findings_api(test_id, active=False, verified=False) self.log_finding_summary_json_api(findings) # Get the date - date = findings['results'][0]['date'] - self.assertEqual(date, '2018-09-24') + date = findings["results"][0]["date"] + self.assertEqual(date, "2018-09-24") # import zap scan without dates # - import # - set scan_date overrides date not set by parser def test_import_set_scan_date_parser_not_sets_date(self): - logger.debug('importing original zap xml report') + logger.debug("importing original zap xml report") with assertTestImportModelsCreated(self, imports=1, affected_findings=4, created=4): - import0 = self.import_scan_with_params(self.zap_sample0_filename, active=False, verified=False, scan_date='2006-12-26') + import0 = self.import_scan_with_params(self.zap_sample0_filename, active=False, verified=False, scan_date="2006-12-26") - test_id = import0['test'] + test_id = import0["test"] findings = self.get_test_findings_api(test_id, active=False, verified=False) self.log_finding_summary_json_api(findings) # Get the date - date = findings['results'][0]['date'] - self.assertEqual(date, '2006-12-26') + date = findings["results"][0]["date"] + self.assertEqual(date, "2006-12-26") # import acunetix scan with dates # - import # - set scan_date overrides date set by parser def test_import_set_scan_date_parser_sets_date(self): - logger.debug('importing acunetix xml report with date set by parser') + logger.debug("importing acunetix xml report with date set by parser") with assertTestImportModelsCreated(self, imports=1, affected_findings=1, created=1): - import0 = self.import_scan_with_params(self.acunetix_file_name, scan_type=self.scan_type_acunetix, active=False, verified=False, scan_date='2006-12-26') + import0 = self.import_scan_with_params(self.acunetix_file_name, scan_type=self.scan_type_acunetix, active=False, verified=False, scan_date="2006-12-26") - test_id = import0['test'] + test_id = import0["test"] findings = self.get_test_findings_api(test_id, active=False, verified=False) self.log_finding_summary_json_api(findings) # Get the date - date = findings['results'][0]['date'] - self.assertEqual(date, '2006-12-26') + date = findings["results"][0]["date"] + self.assertEqual(date, "2006-12-26") # Test Scan_Date for reimport in UI. UI can only rupload for existing tests, non UI tests are in API class below def test_import_reimport_no_scan_date_parser_no_date(self): import0 = self.import_scan_with_params(self.zap_sample0_filename) - test_id = import0['test'] + test_id = import0["test"] # reimport report with 1 extra finding reimport0 = self.reimport_scan_with_params(test_id, self.zap_sample1_filename) - test_id = reimport0['test'] + test_id = reimport0["test"] # 1 new finding imported findings = self.get_test_findings_api(test_id) self.assert_finding_count_json(5, findings) # no scan_date, so date should be today - self.assertEqual(findings['results'][4]['date'], str(timezone.localtime(timezone.now()).date())) + self.assertEqual(findings["results"][4]["date"], str(timezone.localtime(timezone.now()).date())) def test_import_reimport_scan_date_parser_no_date(self): import0 = self.import_scan_with_params(self.zap_sample0_filename) - test_id = import0['test'] + test_id = import0["test"] # reimport report with 1 extra finding - reimport0 = self.reimport_scan_with_params(test_id, self.zap_sample1_filename, scan_date='2020-02-02') + reimport0 = self.reimport_scan_with_params(test_id, self.zap_sample1_filename, scan_date="2020-02-02") - test_id = reimport0['test'] + test_id = reimport0["test"] # 1 new finding imported findings = self.get_test_findings_api(test_id) self.assert_finding_count_json(5, findings) # scan_date provided, so date should be equal to that - self.assertEqual(findings['results'][4]['date'], "2020-02-02") + self.assertEqual(findings["results"][4]["date"], "2020-02-02") def test_import_reimport_no_scan_date_parser_date(self): import0 = self.import_scan_with_params(self.aws_prowler_file_name, scan_type=self.scan_type_aws_prowler) - test_id = import0['test'] + test_id = import0["test"] # reimport report with 1 extra finding reimport0 = self.reimport_scan_with_params(test_id, self.aws_prowler_file_name_plus_one, scan_type=self.scan_type_aws_prowler) - test_id = reimport0['test'] + test_id = reimport0["test"] # 1 new finding imported findings = self.get_test_findings_api(test_id) @@ -298,37 +298,37 @@ def test_import_reimport_no_scan_date_parser_date(self): self.log_finding_summary_json_api(findings) # no scan_date, so date should be date from parser # findings order by priority, third finding is the new one - self.assertEqual(findings['results'][2]['date'], "2021-08-23") + self.assertEqual(findings["results"][2]["date"], "2021-08-23") def test_import_reimport_scan_date_parser_date(self): import0 = self.import_scan_with_params(self.aws_prowler_file_name, scan_type=self.scan_type_aws_prowler) - test_id = import0['test'] + test_id = import0["test"] # reimport report with 1 extra finding - reimport0 = self.reimport_scan_with_params(test_id, self.aws_prowler_file_name_plus_one, scan_type=self.scan_type_aws_prowler, scan_date='2020-02-02') + reimport0 = self.reimport_scan_with_params(test_id, self.aws_prowler_file_name_plus_one, scan_type=self.scan_type_aws_prowler, scan_date="2020-02-02") - test_id = reimport0['test'] + test_id = reimport0["test"] # 1 new finding imported findings = self.get_test_findings_api(test_id) self.assert_finding_count_json(5, findings) # scan_date provided, so date should be equal to that overriding that from the parser self.log_finding_summary_json_api(findings) - self.assertEqual(findings['results'][2]['date'], "2020-02-02") + self.assertEqual(findings["results"][4]["date"], "2020-02-02") # Test re-import with unique_id_from_tool algorithm # import sonar scan with detailed parser, testing: # - import # - active/verifed = True def test_sonar_detailed_scan_base_active_verified(self): - logger.debug('importing original sonar report') + logger.debug("importing original sonar report") notes_count_before = self.db_notes_count() with assertTestImportModelsCreated(self, imports=1, affected_findings=6, created=6): import0 = self.import_scan_with_params(self.sonarqube_file_name1, scan_type=self.scan_type_sonarqube_detailed) - test_id = import0['test'] + test_id = import0["test"] findings = self.get_test_findings_api(test_id) self.log_finding_summary_json_api(findings) @@ -343,13 +343,13 @@ def test_sonar_detailed_scan_base_active_verified(self): # - import # - active/verifed = True def test_veracode_scan_base_active_verified(self): - logger.debug('importing original veracode report') + logger.debug("importing original veracode report") notes_count_before = self.db_notes_count() with assertTestImportModelsCreated(self, imports=1, affected_findings=4, created=4): import0 = self.import_scan_with_params(self.veracode_many_findings, scan_type=self.scan_type_veracode) - test_id = import0['test'] + test_id = import0["test"] findings = self.get_test_findings_api(test_id) self.log_finding_summary_json_api(findings) @@ -364,12 +364,12 @@ def test_veracode_scan_base_active_verified(self): # - active = True, verified = True # - existing findings with verified is true should stay verified def test_import_veracode_reimport_veracode_active_verified_mitigated(self): - logger.debug('reimporting exact same original veracode mitigated xml report again') + logger.debug("reimporting exact same original veracode mitigated xml report again") import_veracode_many_findings = self.import_scan_with_params(self.veracode_mitigated_findings, scan_type=self.scan_type_veracode, verified=True, forceActive=True, forceVerified=True) - test_id = import_veracode_many_findings['test'] + test_id = import_veracode_many_findings["test"] notes_count_before = self.db_notes_count() @@ -377,7 +377,7 @@ def test_import_veracode_reimport_veracode_active_verified_mitigated(self): with assertTestImportModelsCreated(self, reimports=1, affected_findings=1, created=0, closed=1, reactivated=0, untouched=0): reimport_veracode_mitigated_findings = self.reimport_scan_with_params(test_id, self.veracode_mitigated_findings, scan_type=self.scan_type_veracode) - test_id = reimport_veracode_mitigated_findings['test'] + test_id = reimport_veracode_mitigated_findings["test"] self.assertEqual(test_id, test_id) findings = self.get_test_findings_api(test_id) @@ -401,11 +401,11 @@ def test_import_veracode_reimport_veracode_active_verified_mitigated(self): # - active = True, verified = Trie # - existing findings with verified is true should stay verified def test_import_0_reimport_0_active_verified(self): - logger.debug('reimporting exact same original zap xml report again') + logger.debug("reimporting exact same original zap xml report again") import0 = self.import_scan_with_params(self.zap_sample0_filename) - test_id = import0['test'] + test_id = import0["test"] endpoint_count_before = self.db_endpoint_count() endpoint_status_count_before_active = self.db_endpoint_status_count(mitigated=False) @@ -416,7 +416,7 @@ def test_import_0_reimport_0_active_verified(self): with assertTestImportModelsCreated(self, reimports=1, untouched=4): reimport0 = self.reimport_scan_with_params(test_id, self.zap_sample0_filename) - test_id = reimport0['test'] + test_id = reimport0["test"] self.assertEqual(test_id, test_id) findings = self.get_test_findings_api(test_id) @@ -439,11 +439,11 @@ def test_import_0_reimport_0_active_verified(self): # - active = True, verified = False # - existing findings with verified is true should stay verified def test_import_0_reimport_0_active_not_verified(self): - logger.debug('reimporting exact same original zap xml report again, verified=False') + logger.debug("reimporting exact same original zap xml report again, verified=False") import0 = self.import_scan_with_params(self.zap_sample0_filename) - test_id = import0['test'] + test_id = import0["test"] endpoint_count_before = self.db_endpoint_count() endpoint_status_count_before_active = self.db_endpoint_status_count(mitigated=False) @@ -454,7 +454,7 @@ def test_import_0_reimport_0_active_not_verified(self): with assertTestImportModelsCreated(self, reimports=1, untouched=4): reimport0 = self.reimport_scan_with_params(test_id, self.zap_sample0_filename, verified=False) - test_id = reimport0['test'] + test_id = reimport0["test"] self.assertEqual(test_id, test_id) findings = self.get_test_findings_api(test_id) @@ -483,11 +483,11 @@ def test_import_0_reimport_0_active_not_verified(self): # - active = True, verified = False # - existing findings with verified is true should stay verified def test_import_sonar1_reimport_sonar1_active_not_verified(self): - logger.debug('reimporting exact same original sonar report again, verified=False') + logger.debug("reimporting exact same original sonar report again, verified=False") importsonar1 = self.import_scan_with_params(self.sonarqube_file_name1, scan_type=self.scan_type_sonarqube_detailed) - test_id = importsonar1['test'] + test_id = importsonar1["test"] notes_count_before = self.db_notes_count() @@ -495,7 +495,7 @@ def test_import_sonar1_reimport_sonar1_active_not_verified(self): with assertTestImportModelsCreated(self, reimports=1, untouched=6): reimportsonar1 = self.reimport_scan_with_params(test_id, self.sonarqube_file_name1, scan_type=self.scan_type_sonarqube_detailed, verified=False) - test_id = reimportsonar1['test'] + test_id = reimportsonar1["test"] self.assertEqual(test_id, test_id) findings = self.get_test_findings_api(test_id) @@ -518,11 +518,11 @@ def test_import_sonar1_reimport_sonar1_active_not_verified(self): # - reimport, findings stay the same, stay active # - existing findings with verified is true should stay verified def test_import_veracode_reimport_veracode_active_not_verified(self): - logger.debug('reimporting exact same original veracode report again, verified=False') + logger.debug("reimporting exact same original veracode report again, verified=False") import_veracode_many_findings = self.import_scan_with_params(self.veracode_many_findings, scan_type=self.scan_type_veracode) - test_id = import_veracode_many_findings['test'] + test_id = import_veracode_many_findings["test"] notes_count_before = self.db_notes_count() @@ -530,7 +530,7 @@ def test_import_veracode_reimport_veracode_active_not_verified(self): with assertTestImportModelsCreated(self, reimports=1, untouched=4): reimport_veracode_many_findings = self.reimport_scan_with_params(test_id, self.veracode_many_findings, scan_type=self.scan_type_veracode, verified=False) - test_id = reimport_veracode_many_findings['test'] + test_id = reimport_veracode_many_findings["test"] self.assertEqual(test_id, test_id) findings = self.get_test_findings_api(test_id) @@ -553,11 +553,11 @@ def test_import_veracode_reimport_veracode_active_not_verified(self): # - 1 finding is mitigated # - 1 finding is added def test_import_sonar1_reimport_sonar2(self): - logger.debug('reimporting same findings except one with a different unique_id_from_tool') + logger.debug("reimporting same findings except one with a different unique_id_from_tool") importsonar1 = self.import_scan_with_params(self.sonarqube_file_name1, scan_type=self.scan_type_sonarqube_detailed) - test_id = importsonar1['test'] + test_id = importsonar1["test"] notes_count_before = self.db_notes_count() @@ -565,7 +565,7 @@ def test_import_sonar1_reimport_sonar2(self): with assertTestImportModelsCreated(self, reimports=1, affected_findings=2, created=1, closed=1, untouched=5): reimportsonar1 = self.reimport_scan_with_params(test_id, self.sonarqube_file_name2, scan_type=self.scan_type_sonarqube_detailed, verified=False) - test_id = reimportsonar1['test'] + test_id = reimportsonar1["test"] self.assertEqual(test_id, test_id) findings = self.get_test_findings_api(test_id) @@ -593,11 +593,11 @@ def test_import_sonar1_reimport_sonar2(self): # - reimport, all findings stay the same, stay active # - existing findings with verified is true should stay verified def test_import_veracode_reimport_veracode_same_hash_code_different_unique_id(self): - logger.debug('reimporting report with one finding having same hash_code but different unique_id_from_tool, verified=False') + logger.debug("reimporting report with one finding having same hash_code but different unique_id_from_tool, verified=False") import_veracode_many_findings = self.import_scan_with_params(self.veracode_many_findings, scan_type=self.scan_type_veracode) - test_id = import_veracode_many_findings['test'] + test_id = import_veracode_many_findings["test"] notes_count_before = self.db_notes_count() @@ -605,7 +605,7 @@ def test_import_veracode_reimport_veracode_same_hash_code_different_unique_id(se with assertTestImportModelsCreated(self, reimports=1, untouched=4): reimport_veracode_many_findings = self.reimport_scan_with_params(test_id, self.veracode_same_hash_code_different_unique_id, scan_type=self.scan_type_veracode, verified=False) - test_id = reimport_veracode_many_findings['test'] + test_id = reimport_veracode_many_findings["test"] self.assertEqual(test_id, test_id) findings = self.get_test_findings_api(test_id) @@ -627,11 +627,11 @@ def test_import_veracode_reimport_veracode_same_hash_code_different_unique_id(se # - reimport, all findings stay the same, stay active # - existing findings with verified is true should stay verified def test_import_veracode_reimport_veracode_same_unique_id_different_hash_code(self): - logger.debug('reimporting report with one finding having same unique_id_from_tool but different hash_code, verified=False') + logger.debug("reimporting report with one finding having same unique_id_from_tool but different hash_code, verified=False") import_veracode_many_findings = self.import_scan_with_params(self.veracode_many_findings, scan_type=self.scan_type_veracode) - test_id = import_veracode_many_findings['test'] + test_id = import_veracode_many_findings["test"] notes_count_before = self.db_notes_count() @@ -639,7 +639,7 @@ def test_import_veracode_reimport_veracode_same_unique_id_different_hash_code(se with assertTestImportModelsCreated(self, reimports=1, untouched=4): reimport_veracode_many_findings = self.reimport_scan_with_params(test_id, self.veracode_same_unique_id_different_hash_code, scan_type=self.scan_type_veracode, verified=False) - test_id = reimport_veracode_many_findings['test'] + test_id = reimport_veracode_many_findings["test"] self.assertEqual(test_id, test_id) findings = self.get_test_findings_api(test_id) @@ -662,11 +662,11 @@ def test_import_veracode_reimport_veracode_same_unique_id_different_hash_code(se # - 1 added finding, 1 mitigated finding # - existing findings with verified is true should stay verified def test_import_veracode_reimport_veracode_different_hash_code_different_unique_id(self): - logger.debug('reimporting report with one finding having different hash_code and different unique_id_from_tool, verified=False') + logger.debug("reimporting report with one finding having different hash_code and different unique_id_from_tool, verified=False") import_veracode_many_findings = self.import_scan_with_params(self.veracode_many_findings, scan_type=self.scan_type_veracode) - test_id = import_veracode_many_findings['test'] + test_id = import_veracode_many_findings["test"] notes_count_before = self.db_notes_count() @@ -674,7 +674,7 @@ def test_import_veracode_reimport_veracode_different_hash_code_different_unique_ with assertTestImportModelsCreated(self, reimports=1, affected_findings=2, created=1, closed=1, untouched=3): reimport_veracode_many_findings = self.reimport_scan_with_params(test_id, self.veracode_different_hash_code_different_unique_id, scan_type=self.scan_type_veracode, verified=False) - test_id = reimport_veracode_many_findings['test'] + test_id = reimport_veracode_many_findings["test"] self.assertEqual(test_id, test_id) findings = self.get_test_findings_api(test_id) @@ -698,11 +698,11 @@ def test_import_veracode_reimport_veracode_different_hash_code_different_unique_ # - verified is false, so zap4 should not be verified. # - existing findings with verified is true should stay verified def test_import_0_reimport_1_active_not_verified(self): - logger.debug('reimporting updated zap xml report, 1 new finding and 1 no longer present, verified=False') + logger.debug("reimporting updated zap xml report, 1 new finding and 1 no longer present, verified=False") import0 = self.import_scan_with_params(self.zap_sample0_filename) - test_id = import0['test'] + test_id = import0["test"] findings = self.get_test_findings_api(test_id) self.log_finding_summary_json_api(findings) @@ -716,7 +716,7 @@ def test_import_0_reimport_1_active_not_verified(self): with assertTestImportModelsCreated(self, reimports=1, affected_findings=2, created=1, closed=1, untouched=3): reimport1 = self.reimport_scan_with_params(test_id, self.zap_sample1_filename, verified=False) - test_id = reimport1['test'] + test_id = reimport1["test"] self.assertEqual(test_id, test_id) self.get_test_api(test_id) @@ -751,11 +751,11 @@ def test_import_0_reimport_1_active_not_verified(self): # - total findings count should be 5 # - zap1 active, zap4 inactive def test_import_0_reimport_1_active_verified_reimport_0_active_verified(self): - logger.debug('reimporting updated zap xml report, 1 new finding and 1 no longer present, verified=True and then 0 again') + logger.debug("reimporting updated zap xml report, 1 new finding and 1 no longer present, verified=True and then 0 again") import0 = self.import_scan_with_params(self.zap_sample0_filename) - test_id = import0['test'] + test_id = import0["test"] findings = self.get_test_findings_api(test_id) self.log_finding_summary_json_api(findings) @@ -777,7 +777,7 @@ def test_import_0_reimport_1_active_verified_reimport_0_active_verified(self): with assertTestImportModelsCreated(self, reimports=1, affected_findings=2, closed=1, reactivated=1, untouched=3): self.reimport_scan_with_params(test_id, self.zap_sample0_filename) - test_id = reimport1['test'] + test_id = reimport1["test"] self.assertEqual(test_id, test_id) self.get_test_api(test_id) @@ -790,12 +790,12 @@ def test_import_0_reimport_1_active_verified_reimport_0_active_verified(self): zap1_ok = False zap4_ok = False - for finding in findings['results']: - if 'Zap1' in finding['title']: - self.assertTrue(finding['active']) + for finding in findings["results"]: + if "Zap1" in finding["title"]: + self.assertTrue(finding["active"]) zap1_ok = True - if 'Zap4' in finding['title']: - self.assertFalse(finding['active']) + if "Zap4" in finding["title"]: + self.assertFalse(finding["active"]) zap4_ok = True self.assertTrue(zap1_ok) @@ -822,11 +822,11 @@ def test_import_0_reimport_1_active_verified_reimport_0_active_verified(self): # - extra endpoint should be present in db # - reimport doesn't look at endpoints to match against existing findings def test_import_0_reimport_2_extra_endpoint(self): - logger.debug('reimporting exact same original zap xml report again, with an extra endpoint for zap1') + logger.debug("reimporting exact same original zap xml report again, with an extra endpoint for zap1") import0 = self.import_scan_with_params(self.zap_sample0_filename) - test_id = import0['test'] + test_id = import0["test"] findings = self.get_test_findings_api(test_id) self.log_finding_summary_json_api(findings) @@ -839,7 +839,7 @@ def test_import_0_reimport_2_extra_endpoint(self): with assertTestImportModelsCreated(self, reimports=1, affected_findings=0, untouched=4): reimport2 = self.reimport_scan_with_params(test_id, self.zap_sample2_filename) - test_id = reimport2['test'] + test_id = reimport2["test"] self.assertEqual(test_id, test_id) findings = self.get_test_findings_api(test_id) @@ -861,19 +861,19 @@ def test_import_0_reimport_2_extra_endpoint(self): # - extra endpoint should no long be present in db # - reimport doesn't look at endpoints to match against existing findings def test_import_0_reimport_2_extra_endpoint_reimport_0(self): - logger.debug('reimporting exact same original zap xml report again, with an extra endpoint for zap1') + logger.debug("reimporting exact same original zap xml report again, with an extra endpoint for zap1") # self.log_finding_summary_json_api() import0 = self.import_scan_with_params(self.zap_sample0_filename) - test_id = import0['test'] + test_id = import0["test"] findings = self.get_test_findings_api(test_id) self.log_finding_summary_json_api(findings) with assertTestImportModelsCreated(self, reimports=1, affected_findings=0, untouched=4): reimport2 = self.reimport_scan_with_params(test_id, self.zap_sample2_filename) - test_id = reimport2['test'] + test_id = reimport2["test"] self.assertEqual(test_id, test_id) findings = self.get_test_findings_api(test_id) @@ -887,7 +887,7 @@ def test_import_0_reimport_2_extra_endpoint_reimport_0(self): reimport0 = self.reimport_scan_with_params(test_id, self.zap_sample0_filename) - test_id = reimport0['test'] + test_id = reimport0["test"] self.assertEqual(test_id, test_id) findings = self.get_test_findings_api(test_id) @@ -912,11 +912,11 @@ def test_import_0_reimport_2_extra_endpoint_reimport_0(self): # - so zap1 + zap2 closed # - 2 new findings zap1' and zap2' def test_import_0_reimport_3_active_verified(self): - logger.debug('reimporting updated zap xml report, with different severities for zap2 and zap5') + logger.debug("reimporting updated zap xml report, with different severities for zap2 and zap5") import0 = self.import_scan_with_params(self.zap_sample0_filename) - test_id = import0['test'] + test_id = import0["test"] findings = self.get_test_findings_api(test_id) self.log_finding_summary_json_api(findings) @@ -930,7 +930,7 @@ def test_import_0_reimport_3_active_verified(self): with assertTestImportModelsCreated(self, reimports=1, affected_findings=4, created=2, closed=2, untouched=2): reimport1 = self.reimport_scan_with_params(test_id, self.zap_sample3_filename) - test_id = reimport1['test'] + test_id = reimport1["test"] self.assertEqual(test_id, test_id) self.get_test_api(test_id) @@ -940,14 +940,14 @@ def test_import_0_reimport_3_active_verified(self): zap2_ok = False zap5_ok = False - for finding in findings['results']: - if 'Zap2' in finding['title']: - self.assertTrue(finding['active'] or finding['severity'] == 'Low') - self.assertTrue(not finding['active'] or finding['severity'] == 'Medium') + for finding in findings["results"]: + if "Zap2" in finding["title"]: + self.assertTrue(finding["active"] or finding["severity"] == "Low") + self.assertTrue(not finding["active"] or finding["severity"] == "Medium") zap2_ok = True - if 'Zap5' in finding['title']: - self.assertTrue(finding['active'] or finding['severity'] == 'Low') - self.assertTrue(not finding['active'] or finding['severity'] == 'Medium') + if "Zap5" in finding["title"]: + self.assertTrue(finding["active"] or finding["severity"] == "Low") + self.assertTrue(not finding["active"] or finding["severity"] == "Medium") zap5_ok = True self.assertTrue(zap2_ok) @@ -974,18 +974,18 @@ def test_import_0_reimport_3_active_verified(self): # import 1 and then reimport 2 without closing old findings # - reimport should not mitigate the zap1 def test_import_reimport_without_closing_old_findings(self): - logger.debug('reimporting updated zap xml report and keep old findings open') + logger.debug("reimporting updated zap xml report and keep old findings open") import1 = self.import_scan_with_params(self.zap_sample1_filename) - test_id = import1['test'] + test_id = import1["test"] findings = self.get_test_findings_api(test_id) self.assert_finding_count_json(4, findings) with assertTestImportModelsCreated(self, reimports=1, affected_findings=1, created=1, untouched=3): reimport1 = self.reimport_scan_with_params(test_id, self.zap_sample2_filename, close_old_findings=False) - test_id = reimport1['test'] + test_id = reimport1["test"] self.assertEqual(test_id, test_id) findings = self.get_test_findings_api(test_id, verified=False) @@ -996,9 +996,9 @@ def test_import_reimport_without_closing_old_findings(self): mitigated = 0 not_mitigated = 0 - for finding in findings['results']: + for finding in findings["results"]: logger.debug(finding) - if finding['is_mitigated']: + if finding["is_mitigated"]: mitigated += 1 else: not_mitigated += 1 @@ -1018,12 +1018,12 @@ def test_import_reimport_without_closing_old_findings(self): def test_import_0_reimport_0_anchore_file_path(self): import0 = self.import_scan_with_params(self.anchore_file_name, scan_type=self.scan_type_anchore) - test_id = import0['test'] + test_id = import0["test"] active_findings_before = self.get_test_findings_api(test_id, active=True) self.log_finding_summary_json_api(active_findings_before) - active_findings_count_before = active_findings_before['count'] + active_findings_count_before = active_findings_before["count"] notes_count_before = self.db_notes_count() # reimport exact same report @@ -1045,37 +1045,37 @@ def test_import_0_reimport_0_anchore_file_path(self): # reimport Zap0 and only 1 finding must be active # the other 3 findings manually set to active=False must remain False def test_import_reimport_keep_false_positive_and_out_of_scope(self): - logger.debug('importing zap0 with 4 findings, manually setting 3 findings to active=False, reimporting zap0 must return only 1 finding active=True') + logger.debug("importing zap0 with 4 findings, manually setting 3 findings to active=False, reimporting zap0 must return only 1 finding active=True") import0 = self.import_scan_with_params(self.zap_sample0_filename) - test_id = import0['test'] + test_id = import0["test"] test_api_response = self.get_test_api(test_id) - product_api_response = self.get_engagement_api(test_api_response['engagement']) - product_id = product_api_response['product'] + product_api_response = self.get_engagement_api(test_api_response["engagement"]) + product_id = product_api_response["product"] self.patch_product_api(product_id, {"enable_simple_risk_acceptance": True}) active_findings_before = self.get_test_findings_api(test_id, active=True) self.assert_finding_count_json(4, active_findings_before) - for finding in active_findings_before['results']: - if 'Zap1' in finding['title']: - self.patch_finding_api(finding['id'], {"active": False, + for finding in active_findings_before["results"]: + if "Zap1" in finding["title"]: + self.patch_finding_api(finding["id"], {"active": False, "verified": False, "false_p": True, "out_of_scope": False, "risk_accepted": False, "is_mitigated": True}) - elif 'Zap2' in finding['title']: - self.patch_finding_api(finding['id'], {"active": False, + elif "Zap2" in finding["title"]: + self.patch_finding_api(finding["id"], {"active": False, "verified": False, "false_p": False, "out_of_scope": True, "risk_accepted": False, "is_mitigated": True}) - elif 'Zap3' in finding['title']: - self.patch_finding_api(finding['id'], {"active": False, + elif "Zap3" in finding["title"]: + self.patch_finding_api(finding["id"], {"active": False, "verified": False, "false_p": False, "out_of_scope": False, @@ -1085,9 +1085,9 @@ def test_import_reimport_keep_false_positive_and_out_of_scope(self): active_findings_before = self.get_test_findings_api(test_id, active=True) self.assert_finding_count_json(1, active_findings_before) - for finding in active_findings_before['results']: - if 'Zap5' in finding['title']: - self.delete_finding_api(finding['id']) + for finding in active_findings_before["results"]: + if "Zap5" in finding["title"]: + self.delete_finding_api(finding["id"]) active_findings_before = self.get_test_findings_api(test_id, active=True) self.assert_finding_count_json(0, active_findings_before) @@ -1095,7 +1095,7 @@ def test_import_reimport_keep_false_positive_and_out_of_scope(self): with assertTestImportModelsCreated(self, reimports=1, affected_findings=1, created=1): reimport0 = self.reimport_scan_with_params(test_id, self.zap_sample0_filename) - self.assertEqual(reimport0['test'], test_id) + self.assertEqual(reimport0["test"], test_id) active_findings_after = self.get_test_findings_api(test_id, active=True) self.assert_finding_count_json(1, active_findings_after) @@ -1103,35 +1103,35 @@ def test_import_reimport_keep_false_positive_and_out_of_scope(self): active_findings_after = self.get_test_findings_api(test_id, active=False) self.assert_finding_count_json(3, active_findings_after) - for finding in active_findings_after['results']: - if 'Zap1' in finding['title']: - self.assertFalse(finding['active']) - self.assertFalse(finding['verified']) - self.assertTrue(finding['false_p']) - self.assertFalse(finding['out_of_scope']) - self.assertFalse(finding['risk_accepted']) - self.assertTrue(finding['is_mitigated']) - elif 'Zap2' in finding['title']: - self.assertFalse(finding['active']) - self.assertFalse(finding['verified']) - self.assertFalse(finding['false_p']) - self.assertTrue(finding['out_of_scope']) - self.assertFalse(finding['risk_accepted']) - self.assertTrue(finding['is_mitigated']) - elif 'Zap3' in finding['title']: - self.assertFalse(finding['active']) - self.assertFalse(finding['verified']) - self.assertFalse(finding['false_p']) - self.assertFalse(finding['out_of_scope']) - self.assertTrue(finding['risk_accepted']) - self.assertTrue(finding['is_mitigated']) - elif 'Zap5' in finding['title']: - self.assertTrue(finding['active']) - self.assertTrue(finding['verified']) - self.assertFalse(finding['false_p']) - self.assertFalse(finding['out_of_scope']) - self.assertFalse(finding['risk_accepted']) - self.assertFalse(finding['is_mitigated']) + for finding in active_findings_after["results"]: + if "Zap1" in finding["title"]: + self.assertFalse(finding["active"]) + self.assertFalse(finding["verified"]) + self.assertTrue(finding["false_p"]) + self.assertFalse(finding["out_of_scope"]) + self.assertFalse(finding["risk_accepted"]) + self.assertTrue(finding["is_mitigated"]) + elif "Zap2" in finding["title"]: + self.assertFalse(finding["active"]) + self.assertFalse(finding["verified"]) + self.assertFalse(finding["false_p"]) + self.assertTrue(finding["out_of_scope"]) + self.assertFalse(finding["risk_accepted"]) + self.assertTrue(finding["is_mitigated"]) + elif "Zap3" in finding["title"]: + self.assertFalse(finding["active"]) + self.assertFalse(finding["verified"]) + self.assertFalse(finding["false_p"]) + self.assertFalse(finding["out_of_scope"]) + self.assertTrue(finding["risk_accepted"]) + self.assertTrue(finding["is_mitigated"]) + elif "Zap5" in finding["title"]: + self.assertTrue(finding["active"]) + self.assertTrue(finding["verified"]) + self.assertFalse(finding["false_p"]) + self.assertFalse(finding["out_of_scope"]) + self.assertFalse(finding["risk_accepted"]) + self.assertFalse(finding["is_mitigated"]) # import gitlab_dep_scan_components_filename with 6 findings # findings 1, 2 and 3 have the same component_name (golang.org/x/crypto) and the same CVE (CVE-2020-29652), but different component_version @@ -1149,8 +1149,8 @@ def test_import_6_reimport_6_gitlab_dep_scan_component_name_and_version(self): import0 = self.import_scan_with_params(self.gitlab_dep_scan_components_filename, scan_type=self.scan_type_gtlab_dep_scan, - minimum_severity='Info') - test_id = import0['test'] + minimum_severity="Info") + test_id = import0["test"] active_findings_before = self.get_test_findings_api(test_id, active=True) self.assert_finding_count_json(6, active_findings_before) @@ -1158,37 +1158,37 @@ def test_import_6_reimport_6_gitlab_dep_scan_component_name_and_version(self): self.reimport_scan_with_params(test_id, self.gitlab_dep_scan_components_filename, scan_type=self.scan_type_gtlab_dep_scan, - minimum_severity='Info') + minimum_severity="Info") active_findings_after = self.get_test_findings_api(test_id, active=True) self.assert_finding_count_json(6, active_findings_after) count = 0 - for finding in active_findings_after['results']: - if 'v0.0.0-20190219172222-a4c6cb3142f2' == finding['component_version']: - self.assertEqual("CVE-2020-29652: Nil Pointer Dereference", finding['title']) - self.assertEqual("CVE-2020-29652", finding['vulnerability_ids'][0]['vulnerability_id']) - self.assertEqual("golang.org/x/crypto", finding['component_name']) + for finding in active_findings_after["results"]: + if "v0.0.0-20190219172222-a4c6cb3142f2" == finding["component_version"]: + self.assertEqual("CVE-2020-29652: Nil Pointer Dereference", finding["title"]) + self.assertEqual("CVE-2020-29652", finding["vulnerability_ids"][0]["vulnerability_id"]) + self.assertEqual("golang.org/x/crypto", finding["component_name"]) count = count + 1 - elif 'v0.0.0-20190308221718-c2843e01d9a2' == finding['component_version']: - self.assertEqual("CVE-2020-29652: Nil Pointer Dereference", finding['title']) - self.assertEqual("CVE-2020-29652", finding['vulnerability_ids'][0]['vulnerability_id']) - self.assertEqual("golang.org/x/crypto", finding['component_name']) + elif "v0.0.0-20190308221718-c2843e01d9a2" == finding["component_version"]: + self.assertEqual("CVE-2020-29652: Nil Pointer Dereference", finding["title"]) + self.assertEqual("CVE-2020-29652", finding["vulnerability_ids"][0]["vulnerability_id"]) + self.assertEqual("golang.org/x/crypto", finding["component_name"]) count = count + 1 - elif 'v0.0.0-20200302210943-78000ba7a073' == finding['component_version']: - self.assertEqual("CVE-2020-29652: Nil Pointer Dereference", finding['title']) - self.assertEqual("CVE-2020-29652", finding['vulnerability_ids'][0]['vulnerability_id']) - self.assertEqual("golang.org/x/crypto", finding['component_name']) + elif "v0.0.0-20200302210943-78000ba7a073" == finding["component_version"]: + self.assertEqual("CVE-2020-29652: Nil Pointer Dereference", finding["title"]) + self.assertEqual("CVE-2020-29652", finding["vulnerability_ids"][0]["vulnerability_id"]) + self.assertEqual("golang.org/x/crypto", finding["component_name"]) count = count + 1 - elif 'v0.3.0' == finding['component_version']: - self.assertEqual("CVE-2020-14040: Loop With Unreachable Exit Condition (Infinite Loop)", finding['title']) - self.assertEqual("CVE-2020-14040", finding['vulnerability_ids'][0]['vulnerability_id']) - self.assertEqual("golang.org/x/text", finding['component_name']) + elif "v0.3.0" == finding["component_version"]: + self.assertEqual("CVE-2020-14040: Loop With Unreachable Exit Condition (Infinite Loop)", finding["title"]) + self.assertEqual("CVE-2020-14040", finding["vulnerability_ids"][0]["vulnerability_id"]) + self.assertEqual("golang.org/x/text", finding["component_name"]) count = count + 1 - elif 'v0.3.2' == finding['component_version']: - self.assertEqual("CVE-2020-14040: Loop With Unreachable Exit Condition (Infinite Loop)", finding['title']) - self.assertEqual("CVE-2020-14040", finding['vulnerability_ids'][0]['vulnerability_id']) - self.assertEqual("golang.org/x/text", finding['component_name']) + elif "v0.3.2" == finding["component_version"]: + self.assertEqual("CVE-2020-14040: Loop With Unreachable Exit Condition (Infinite Loop)", finding["title"]) + self.assertEqual("CVE-2020-14040", finding["vulnerability_ids"][0]["vulnerability_id"]) + self.assertEqual("golang.org/x/text", finding["component_name"]) count = count + 1 self.assertEqual(5, count) @@ -1197,11 +1197,11 @@ def test_import_6_reimport_6_gitlab_dep_scan_component_name_and_version(self): # parameter endpoint_to_add: each imported finding should be related to endpoint with id=1 # close_old_findings functionality: secony (empty) import should close all findings from the first import def test_import_param_close_old_findings_with_additional_endpoint(self): - logger.debug('importing clair report with additional endpoint') + logger.debug("importing clair report with additional endpoint") with assertTestImportModelsCreated(self, imports=1, affected_findings=4, created=4): import0 = self.import_scan_with_params(self.clair_few_findings, scan_type=self.scan_type_clair, close_old_findings=True, endpoint_to_add=1) - test_id = import0['test'] + test_id = import0["test"] test = self.get_test(test_id) findings = self.get_test_findings_api(test_id) self.log_finding_summary_json_api(findings) @@ -1227,11 +1227,11 @@ def test_import_param_close_old_findings_with_additional_endpoint(self): # close_old_findings functionality: second (empty) import should close all findings from the first import when setting the same service def test_import_param_close_old_findings_with_same_service(self): - logger.debug('importing clair report with same service') + logger.debug("importing clair report with same service") with assertTestImportModelsCreated(self, imports=1, affected_findings=4, created=4): - import0 = self.import_scan_with_params(self.clair_few_findings, scan_type=self.scan_type_clair, close_old_findings=True, service='service_1') + import0 = self.import_scan_with_params(self.clair_few_findings, scan_type=self.scan_type_clair, close_old_findings=True, service="service_1") - test_id = import0['test'] + test_id = import0["test"] test = self.get_test(test_id) findings = self.get_test_findings_api(test_id) self.log_finding_summary_json_api(findings) @@ -1244,7 +1244,7 @@ def test_import_param_close_old_findings_with_same_service(self): # reimport empty report with assertTestImportModelsCreated(self, imports=1, affected_findings=4, closed=4): - self.import_scan_with_params(self.clair_empty, scan_type=self.scan_type_clair, close_old_findings=True, service='service_1') + self.import_scan_with_params(self.clair_empty, scan_type=self.scan_type_clair, close_old_findings=True, service="service_1") # all findings from import0 should be closed now engagement_findings_count = Finding.objects.filter(test__engagement_id=1, test__test_type=test.test_type, active=True, is_mitigated=False).count() @@ -1252,11 +1252,11 @@ def test_import_param_close_old_findings_with_same_service(self): # close_old_findings functionality: second (empty) import should not close findings from the first import when setting different services def test_import_param_close_old_findings_with_different_services(self): - logger.debug('importing clair report with different services') + logger.debug("importing clair report with different services") with assertTestImportModelsCreated(self, imports=1, affected_findings=4, created=4): - import0 = self.import_scan_with_params(self.clair_few_findings, scan_type=self.scan_type_clair, close_old_findings=True, service='service_1') + import0 = self.import_scan_with_params(self.clair_few_findings, scan_type=self.scan_type_clair, close_old_findings=True, service="service_1") - test_id = import0['test'] + test_id = import0["test"] test = self.get_test(test_id) findings = self.get_test_findings_api(test_id) self.log_finding_summary_json_api(findings) @@ -1269,7 +1269,7 @@ def test_import_param_close_old_findings_with_different_services(self): # reimport empty report with assertTestImportModelsCreated(self, imports=1, affected_findings=0, closed=0): - self.import_scan_with_params(self.clair_empty, scan_type=self.scan_type_clair, close_old_findings=True, service='service_2') + self.import_scan_with_params(self.clair_empty, scan_type=self.scan_type_clair, close_old_findings=True, service="service_2") # no findings from import0 should be closed now engagement_findings_count = Finding.objects.filter(test__engagement_id=1, test__test_type=test.test_type, active=True, is_mitigated=False).count() @@ -1278,9 +1278,9 @@ def test_import_param_close_old_findings_with_different_services(self): # close_old_findings functionality: second (empty) import should not close findings from the first import when setting a service in the first import but none in the second import def test_import_param_close_old_findings_with_and_without_service_1(self): with assertTestImportModelsCreated(self, imports=1, affected_findings=4, created=4): - import0 = self.import_scan_with_params(self.clair_few_findings, scan_type=self.scan_type_clair, close_old_findings=True, service='service_1') + import0 = self.import_scan_with_params(self.clair_few_findings, scan_type=self.scan_type_clair, close_old_findings=True, service="service_1") - test_id = import0['test'] + test_id = import0["test"] test = self.get_test(test_id) findings = self.get_test_findings_api(test_id) self.log_finding_summary_json_api(findings) @@ -1304,7 +1304,7 @@ def test_import_param_close_old_findings_with_and_without_service_2(self): with assertTestImportModelsCreated(self, imports=1, affected_findings=4, created=4): import0 = self.import_scan_with_params(self.clair_few_findings, scan_type=self.scan_type_clair, close_old_findings=True, service=None) - test_id = import0['test'] + test_id = import0["test"] test = self.get_test(test_id) findings = self.get_test_findings_api(test_id) self.log_finding_summary_json_api(findings) @@ -1317,7 +1317,7 @@ def test_import_param_close_old_findings_with_and_without_service_2(self): # reimport empty report with assertTestImportModelsCreated(self, imports=1, affected_findings=0, closed=0): - self.import_scan_with_params(self.clair_empty, scan_type=self.scan_type_clair, close_old_findings=True, service='service_2') + self.import_scan_with_params(self.clair_empty, scan_type=self.scan_type_clair, close_old_findings=True, service="service_2") # no findings from import0 should be closed now engagement_findings_count = Finding.objects.filter(test__engagement_id=1, test__test_type=test.test_type, active=True, is_mitigated=False).count() @@ -1331,13 +1331,13 @@ def test_import_reimport_generic(self): import0 = self.import_scan_with_params(self.generic_filename_with_file, scan_type="Generic Findings Import") - test_id = import0['test'] + test_id = import0["test"] # reimport exact same report with assertTestImportModelsCreated(self, reimports=1, untouched=1): reimport0 = self.reimport_scan_with_params(test_id, self.generic_filename_with_file, scan_type="Generic Findings Import") - test_id2 = reimport0['test'] + test_id2 = reimport0["test"] self.assertEqual(test_id, test_id2) findings = self.get_test_findings_api(test_id) @@ -1361,11 +1361,11 @@ def test_import_nuclei_emptyc(self): import0 = self.import_scan_with_params(self.nuclei_empty, scan_type="Nuclei Scan") - test_id = import0['test'] + test_id = import0["test"] reimport0 = self.reimport_scan_with_params(test_id, self.nuclei_empty, scan_type="Nuclei Scan") - test_id2 = reimport0['test'] + test_id2 = reimport0["test"] self.assertEqual(test_id, test_id2) def test_import_reimport_endpoint_where_eps_date_is_different(self): @@ -1379,13 +1379,13 @@ def test_import_reimport_endpoint_where_eps_date_is_different(self): active=True, verified=True) - test_id = import0['test'] + test_id = import0["test"] findings = self.get_test_findings_api(test_id) self.log_finding_summary_json_api(findings) self.assert_finding_count_json(1, findings) - test = self.get_test_api(test_id)['id'] + test = self.get_test_api(test_id)["id"] finding = Finding.objects.filter(test__engagement_id=1, test=test).first() self.assertEqual(finding.status_finding.count(), 1) @@ -1398,7 +1398,7 @@ def test_import_reimport_endpoint_where_eps_date_is_different(self): reimport0 = self.reimport_scan_with_params(test_id, self.gitlab_dast_file_name, scan_type=self.scan_type_gitlab_dast) - test_id = reimport0['test'] + test_id = reimport0["test"] findings = self.get_test_findings_api(test_id) self.log_finding_summary_json_api(findings) @@ -1419,14 +1419,14 @@ def test_import_reimport_vulnerability_ids(self): import0 = self.import_scan_with_params(self.anchore_grype_file_name, scan_type=self.anchore_grype_scan_type) - test_id = import0['test'] + test_id = import0["test"] test = Test.objects.get(id=test_id) findings = Finding.objects.filter(test=test) self.assertEqual(5, len(findings)) - self.assertEqual('GHSA-v6rh-hp5x-86rv', findings[3].cve) + self.assertEqual("GHSA-v6rh-hp5x-86rv", findings[3].cve) self.assertEqual(2, len(findings[3].vulnerability_ids)) - self.assertEqual('GHSA-v6rh-hp5x-86rv', findings[3].vulnerability_ids[0]) - self.assertEqual('CVE-2021-44420', findings[3].vulnerability_ids[1]) + self.assertEqual("GHSA-v6rh-hp5x-86rv", findings[3].vulnerability_ids[0]) + self.assertEqual("CVE-2021-44420", findings[3].vulnerability_ids[1]) test_type = Test_Type.objects.get(name=self.anchore_grype_scan_type) reimport_test = Test( @@ -1441,14 +1441,14 @@ def test_import_reimport_vulnerability_ids(self): self.reimport_scan_with_params(reimport_test.id, self.anchore_grype_file_name, scan_type=self.anchore_grype_scan_type) findings = Finding.objects.filter(test=reimport_test) self.assertEqual(5, len(findings)) - self.assertEqual('GHSA-v6rh-hp5x-86rv', findings[3].cve) + self.assertEqual("GHSA-v6rh-hp5x-86rv", findings[3].cve) self.assertEqual(2, len(findings[3].vulnerability_ids)) - self.assertEqual('GHSA-v6rh-hp5x-86rv', findings[3].vulnerability_ids[0]) - self.assertEqual('CVE-2021-44420', findings[3].vulnerability_ids[1]) + self.assertEqual("GHSA-v6rh-hp5x-86rv", findings[3].vulnerability_ids[0]) + self.assertEqual("CVE-2021-44420", findings[3].vulnerability_ids[1]) def test_import_history_reactivated_and_untouched_findings_do_not_mix(self): import0 = self.import_scan_with_params(self.generic_import_1, scan_type=self.scan_type_generic) - test_id = import0['test'] + test_id = import0["test"] # reimport the second report self.reimport_scan_with_params(test_id, self.generic_import_2, scan_type=self.scan_type_generic) # reimport the first report again @@ -1457,7 +1457,7 @@ def test_import_history_reactivated_and_untouched_findings_do_not_mix(self): class ImportReimportTestAPI(DojoAPITestCase, ImportReimportMixin): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): # super(ImportReimportMixin, self).__init__(*args, **kwargs) @@ -1466,10 +1466,10 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def setUp(self): - testuser = User.objects.get(username='admin') + testuser = User.objects.get(username="admin") token = Token.objects.get(user=testuser) self.client = APIClient() - self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) + self.client.credentials(HTTP_AUTHORIZATION="Token " + token.key) # self.url = reverse(self.viewname + '-list') # Statistics only available in API Response @@ -1480,181 +1480,181 @@ def setUp(self): # - total findings count should be 5 # - zap1 active, zap4 inactive def test_import_0_reimport_1_active_verified_reimport_0_active_verified_statistics(self): - logger.debug('reimporting updated zap xml report, 1 new finding and 1 no longer present, verified=True and then 0 again') + logger.debug("reimporting updated zap xml report, 1 new finding and 1 no longer present, verified=True and then 0 again") import0 = self.import_scan_with_params(self.zap_sample0_filename) - self.assertEqual(import0['statistics'], { - 'after': { - 'info': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0}, - 'low': {'active': 3, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 3}, - 'medium': {'active': 1, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 1}, - 'high': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0}, - 'critical': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0}, - 'total': {'active': 4, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 4}, + self.assertEqual(import0["statistics"], { + "after": { + "info": {"active": 0, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 0, "risk_accepted": 0, "total": 0}, + "low": {"active": 3, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 0, "risk_accepted": 0, "total": 3}, + "medium": {"active": 1, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 0, "risk_accepted": 0, "total": 1}, + "high": {"active": 0, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 0, "risk_accepted": 0, "total": 0}, + "critical": {"active": 0, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 0, "risk_accepted": 0, "total": 0}, + "total": {"active": 4, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 0, "risk_accepted": 0, "total": 4}, }, }) - test_id = import0['test'] + test_id = import0["test"] reimport1 = self.reimport_scan_with_params(test_id, self.zap_sample1_filename) - self.assertEqual(reimport1['statistics'], { - 'after': { - 'critical': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'high': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'info': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'low': {'active': 3, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 1, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 4, 'verified': 0}, - 'medium': {'active': 1, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 1, 'verified': 0}, - 'total': {'active': 4, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 1, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 5, 'verified': 0}}, - 'before': { - 'critical': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'high': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'info': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'low': {'active': 3, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 3, 'verified': 0}, - 'medium': {'active': 1, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 1, 'verified': 0}, - 'total': {'active': 4, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 4, 'verified': 0}}, - 'delta': { - 'closed': { - 'critical': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'high': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'info': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'low': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 1, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 1, 'verified': 0}, - 'medium': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'total': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 1, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 1, 'verified': 0}}, - 'created': { - 'critical': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'high': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'info': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'low': {'active': 1, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 1, 'verified': 0}, - 'medium': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'total': {'active': 1, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 1, 'verified': 0}}, - 'left untouched': { - 'critical': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'high': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'info': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'low': {'active': 2, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 2, 'verified': 0}, - 'medium': {'active': 1, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 1, 'verified': 0}, - 'total': {'active': 3, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 3, 'verified': 0}}, - 'reactivated': { - 'critical': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'high': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'info': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'low': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'medium': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'total': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}}, + self.assertEqual(reimport1["statistics"], { + "after": { + "critical": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "high": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "info": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "low": {"active": 3, "duplicate": 0, "false_p": 0, "is_mitigated": 1, "out_of_scope": 0, "risk_accepted": 0, "total": 4, "verified": 0}, + "medium": {"active": 1, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 1, "verified": 0}, + "total": {"active": 4, "duplicate": 0, "false_p": 0, "is_mitigated": 1, "out_of_scope": 0, "risk_accepted": 0, "total": 5, "verified": 0}}, + "before": { + "critical": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "high": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "info": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "low": {"active": 3, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 3, "verified": 0}, + "medium": {"active": 1, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 1, "verified": 0}, + "total": {"active": 4, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 4, "verified": 0}}, + "delta": { + "closed": { + "critical": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "high": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "info": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "low": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 1, "out_of_scope": 0, "risk_accepted": 0, "total": 1, "verified": 0}, + "medium": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "total": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 1, "out_of_scope": 0, "risk_accepted": 0, "total": 1, "verified": 0}}, + "created": { + "critical": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "high": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "info": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "low": {"active": 1, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 1, "verified": 0}, + "medium": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "total": {"active": 1, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 1, "verified": 0}}, + "left untouched": { + "critical": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "high": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "info": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "low": {"active": 2, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 2, "verified": 0}, + "medium": {"active": 1, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 1, "verified": 0}, + "total": {"active": 3, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 3, "verified": 0}}, + "reactivated": { + "critical": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "high": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "info": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "low": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "medium": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "total": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}}, }, }) with assertTestImportModelsCreated(self, reimports=1, affected_findings=2, closed=1, reactivated=1, untouched=3): reimport0 = self.reimport_scan_with_params(test_id, self.zap_sample0_filename) - self.assertEqual(reimport0['statistics'], { - 'after': { - 'critical': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'high': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'info': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'low': {'active': 3, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 1, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 4, 'verified': 0}, - 'medium': {'active': 1, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 1, 'verified': 0}, - 'total': {'active': 4, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 1, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 5, 'verified': 0}}, - 'before': { - 'critical': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'high': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'info': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'low': {'active': 3, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 1, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 4, 'verified': 0}, - 'medium': {'active': 1, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 1, 'verified': 0}, - 'total': {'active': 4, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 1, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 5, 'verified': 0}}, - 'delta': { - 'closed': { - 'critical': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'high': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'info': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'low': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 1, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 1, 'verified': 0}, - 'medium': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'total': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 1, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 1, 'verified': 0}}, - 'created': { - 'critical': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'high': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'info': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'low': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'medium': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'total': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}}, - 'left untouched': { - 'critical': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'high': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'info': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'low': {'active': 2, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 2, 'verified': 0}, - 'medium': {'active': 1, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 1, 'verified': 0}, - 'total': {'active': 3, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 3, 'verified': 0}}, - 'reactivated': { - 'critical': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'high': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'info': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'low': {'active': 1, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 1, 'verified': 0}, - 'medium': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}, - 'total': {'active': 1, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 1, 'verified': 0}}, + self.assertEqual(reimport0["statistics"], { + "after": { + "critical": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "high": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "info": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "low": {"active": 3, "duplicate": 0, "false_p": 0, "is_mitigated": 1, "out_of_scope": 0, "risk_accepted": 0, "total": 4, "verified": 0}, + "medium": {"active": 1, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 1, "verified": 0}, + "total": {"active": 4, "duplicate": 0, "false_p": 0, "is_mitigated": 1, "out_of_scope": 0, "risk_accepted": 0, "total": 5, "verified": 0}}, + "before": { + "critical": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "high": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "info": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "low": {"active": 3, "duplicate": 0, "false_p": 0, "is_mitigated": 1, "out_of_scope": 0, "risk_accepted": 0, "total": 4, "verified": 0}, + "medium": {"active": 1, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 1, "verified": 0}, + "total": {"active": 4, "duplicate": 0, "false_p": 0, "is_mitigated": 1, "out_of_scope": 0, "risk_accepted": 0, "total": 5, "verified": 0}}, + "delta": { + "closed": { + "critical": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "high": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "info": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "low": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 1, "out_of_scope": 0, "risk_accepted": 0, "total": 1, "verified": 0}, + "medium": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "total": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 1, "out_of_scope": 0, "risk_accepted": 0, "total": 1, "verified": 0}}, + "created": { + "critical": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "high": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "info": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "low": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "medium": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "total": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}}, + "left untouched": { + "critical": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "high": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "info": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "low": {"active": 2, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 2, "verified": 0}, + "medium": {"active": 1, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 1, "verified": 0}, + "total": {"active": 3, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 3, "verified": 0}}, + "reactivated": { + "critical": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "high": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "info": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "low": {"active": 1, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 1, "verified": 0}, + "medium": {"active": 0, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 0, "verified": 0}, + "total": {"active": 1, "duplicate": 0, "false_p": 0, "is_mitigated": 0, "out_of_scope": 0, "risk_accepted": 0, "total": 1, "verified": 0}}, }, }) # without import history, there are no delta statistics @override_settings(TRACK_IMPORT_HISTORY=False) def test_import_0_reimport_1_active_verified_reimport_0_active_verified_statistics_no_history(self): - logger.debug('reimporting updated zap xml report, 1 new finding and 1 no longer present, verified=True and then 0 again') + logger.debug("reimporting updated zap xml report, 1 new finding and 1 no longer present, verified=True and then 0 again") import0 = self.import_scan_with_params(self.zap_sample0_filename) - self.assertEqual(import0['statistics'], { - 'after': { - 'info': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0}, - 'low': {'active': 3, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 3}, - 'medium': {'active': 1, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 1}, - 'high': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0}, - 'critical': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0}, - 'total': {'active': 4, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 4}, + self.assertEqual(import0["statistics"], { + "after": { + "info": {"active": 0, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 0, "risk_accepted": 0, "total": 0}, + "low": {"active": 3, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 0, "risk_accepted": 0, "total": 3}, + "medium": {"active": 1, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 0, "risk_accepted": 0, "total": 1}, + "high": {"active": 0, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 0, "risk_accepted": 0, "total": 0}, + "critical": {"active": 0, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 0, "risk_accepted": 0, "total": 0}, + "total": {"active": 4, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 0, "risk_accepted": 0, "total": 4}, }, }) - test_id = import0['test'] + test_id = import0["test"] reimport1 = self.reimport_scan_with_params(test_id, self.zap_sample1_filename) - self.assertEqual(reimport1['statistics'], { - 'before': { - 'info': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0}, - 'low': {'active': 3, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 3}, - 'medium': {'active': 1, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 1}, - 'high': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0}, - 'critical': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0}, - 'total': {'active': 4, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 4}, + self.assertEqual(reimport1["statistics"], { + "before": { + "info": {"active": 0, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 0, "risk_accepted": 0, "total": 0}, + "low": {"active": 3, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 0, "risk_accepted": 0, "total": 3}, + "medium": {"active": 1, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 0, "risk_accepted": 0, "total": 1}, + "high": {"active": 0, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 0, "risk_accepted": 0, "total": 0}, + "critical": {"active": 0, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 0, "risk_accepted": 0, "total": 0}, + "total": {"active": 4, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 0, "risk_accepted": 0, "total": 4}, }, - 'after': { - 'info': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0}, - 'low': {'active': 3, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 1, 'risk_accepted': 0, 'total': 4}, - 'medium': {'active': 1, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 1}, - 'high': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0}, - 'critical': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0}, - 'total': {'active': 4, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 1, 'risk_accepted': 0, 'total': 5}, + "after": { + "info": {"active": 0, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 0, "risk_accepted": 0, "total": 0}, + "low": {"active": 3, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 1, "risk_accepted": 0, "total": 4}, + "medium": {"active": 1, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 0, "risk_accepted": 0, "total": 1}, + "high": {"active": 0, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 0, "risk_accepted": 0, "total": 0}, + "critical": {"active": 0, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 0, "risk_accepted": 0, "total": 0}, + "total": {"active": 4, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 1, "risk_accepted": 0, "total": 5}, }, }) with assertTestImportModelsCreated(self, reimports=0, affected_findings=0, closed=0, reactivated=0, untouched=0): reimport0 = self.reimport_scan_with_params(test_id, self.zap_sample0_filename) - self.assertEqual(reimport0['statistics'], { - 'before': { - 'info': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0}, - 'low': {'active': 3, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 1, 'risk_accepted': 0, 'total': 4}, - 'medium': {'active': 1, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 1}, - 'high': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0}, - 'critical': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0}, - 'total': {'active': 4, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 1, 'risk_accepted': 0, 'total': 5}, + self.assertEqual(reimport0["statistics"], { + "before": { + "info": {"active": 0, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 0, "risk_accepted": 0, "total": 0}, + "low": {"active": 3, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 1, "risk_accepted": 0, "total": 4}, + "medium": {"active": 1, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 0, "risk_accepted": 0, "total": 1}, + "high": {"active": 0, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 0, "risk_accepted": 0, "total": 0}, + "critical": {"active": 0, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 0, "risk_accepted": 0, "total": 0}, + "total": {"active": 4, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 1, "risk_accepted": 0, "total": 5}, }, - 'after': { - 'info': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0}, - 'low': {'active': 3, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 1, 'risk_accepted': 0, 'total': 4}, - 'medium': {'active': 1, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 1}, - 'high': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0}, - 'critical': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0}, - 'total': {'active': 4, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 1, 'risk_accepted': 0, 'total': 5}, + "after": { + "info": {"active": 0, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 0, "risk_accepted": 0, "total": 0}, + "low": {"active": 3, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 1, "risk_accepted": 0, "total": 4}, + "medium": {"active": 1, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 0, "risk_accepted": 0, "total": 1}, + "high": {"active": 0, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 0, "risk_accepted": 0, "total": 0}, + "critical": {"active": 0, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 0, "risk_accepted": 0, "total": 0}, + "total": {"active": 4, "verified": 0, "duplicate": 0, "false_p": 0, "out_of_scope": 0, "is_mitigated": 1, "risk_accepted": 0, "total": 5}, }, }) # Reimport tests to test Scan_Date logic (usecase not supported on UI) @@ -1663,73 +1663,73 @@ def test_import_0_reimport_1_active_verified_reimport_0_active_verified_statisti # - reimport # - no scan_date overrides date not set by parser def test_reimport_default_scan_date_parser_not_sets_date(self): - logger.debug('importing zap xml report with date set by parser') + logger.debug("importing zap xml report with date set by parser") with assertTestImportModelsCreated(self, imports=1, affected_findings=4, created=4): import0 = self.reimport_scan_with_params(None, self.zap_sample0_filename, active=False, verified=False, product_name=PRODUCT_NAME_DEFAULT, engagement=None, engagement_name=ENGAGEMENT_NAME_DEFAULT, product_type_name=PRODUCT_TYPE_NAME_DEFAULT, auto_create_context=True) - test_id = import0['test'] + test_id = import0["test"] findings = self.get_test_findings_api(test_id, active=False, verified=False) self.log_finding_summary_json_api(findings) # Get the date - date = findings['results'][0]['date'] + date = findings["results"][0]["date"] self.assertEqual(date, str(timezone.localtime(timezone.now()).date())) # reimport acunetix scan with dates (non existing test, so import is called inside DD) # - reimport # - deafult scan_date (today) does not overrides date set by parser def test_reimport_default_scan_date_parser_sets_date(self): - logger.debug('importing original acunetix xml report') + logger.debug("importing original acunetix xml report") with assertTestImportModelsCreated(self, imports=1, affected_findings=1, created=1): import0 = self.reimport_scan_with_params(None, self.acunetix_file_name, scan_type=self.scan_type_acunetix, active=False, verified=False, product_name=PRODUCT_NAME_DEFAULT, engagement=None, engagement_name=ENGAGEMENT_NAME_DEFAULT, product_type_name=PRODUCT_TYPE_NAME_DEFAULT, auto_create_context=True) - test_id = import0['test'] + test_id = import0["test"] findings = self.get_test_findings_api(test_id, active=False, verified=False) self.log_finding_summary_json_api(findings) # Get the date - date = findings['results'][0]['date'] - self.assertEqual(date, '2018-09-24') + date = findings["results"][0]["date"] + self.assertEqual(date, "2018-09-24") # reimport zap scan without dates (non existing test, so import is called inside DD) # - reimport # - set scan_date overrides date not set by parser def test_reimport_set_scan_date_parser_not_sets_date(self): - logger.debug('importing original zap xml report') + logger.debug("importing original zap xml report") with assertTestImportModelsCreated(self, imports=1, affected_findings=4, created=4): - import0 = self.reimport_scan_with_params(None, self.zap_sample0_filename, active=False, verified=False, scan_date='2006-12-26', + import0 = self.reimport_scan_with_params(None, self.zap_sample0_filename, active=False, verified=False, scan_date="2006-12-26", product_name=PRODUCT_NAME_DEFAULT, engagement=None, engagement_name=ENGAGEMENT_NAME_DEFAULT, product_type_name=PRODUCT_TYPE_NAME_DEFAULT, auto_create_context=True) - test_id = import0['test'] + test_id = import0["test"] findings = self.get_test_findings_api(test_id, active=False, verified=False) self.log_finding_summary_json_api(findings) # Get the date - date = findings['results'][0]['date'] - self.assertEqual(date, '2006-12-26') + date = findings["results"][0]["date"] + self.assertEqual(date, "2006-12-26") # reimport acunetix scan with dates (non existing test, so import is called inside DD) # - reimport # - set scan_date overrides date set by parser def test_reimport_set_scan_date_parser_sets_date(self): - logger.debug('importing acunetix xml report with date set by parser') + logger.debug("importing acunetix xml report with date set by parser") with assertTestImportModelsCreated(self, imports=1, affected_findings=1, created=1): - import0 = self.reimport_scan_with_params(None, self.acunetix_file_name, scan_type=self.scan_type_acunetix, active=False, verified=False, scan_date='2006-12-26', + import0 = self.reimport_scan_with_params(None, self.acunetix_file_name, scan_type=self.scan_type_acunetix, active=False, verified=False, scan_date="2006-12-26", product_name=PRODUCT_NAME_DEFAULT, engagement=None, engagement_name=ENGAGEMENT_NAME_DEFAULT, product_type_name=PRODUCT_TYPE_NAME_DEFAULT, auto_create_context=True) - test_id = import0['test'] + test_id = import0["test"] findings = self.get_test_findings_api(test_id, active=False, verified=False) self.log_finding_summary_json_api(findings) # Get the date - date = findings['results'][0]['date'] - self.assertEqual(date, '2006-12-26') + date = findings["results"][0]["date"] + self.assertEqual(date, "2006-12-26") class ImportReimportTestUI(DojoAPITestCase, ImportReimportMixin): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] client_ui = Client() def __init__(self, *args, **kwargs): @@ -1741,10 +1741,10 @@ def __init__(self, *args, **kwargs): def setUp(self): # still using the API to verify results - testuser = User.objects.get(username='admin') + testuser = User.objects.get(username="admin") token = Token.objects.get(user=testuser) self.client = APIClient() - self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) + self.client.credentials(HTTP_AUTHORIZATION="Token " + token.key) # self.url = reverse(self.viewname + '-list') self.client_ui = Client() @@ -1764,25 +1764,25 @@ def reimport_scan_with_params(self, *args, **kwargs): return self.reimport_scan_with_params_ui(*args, **kwargs) def import_scan_ui(self, engagement, payload): - logger.debug('import_scan payload %s', payload) + logger.debug("import_scan payload %s", payload) # response = self.client_ui.post(reverse('import_scan_results', args=(engagement, )), urlencode(payload), content_type='application/x-www-form-urlencoded') - response = self.client_ui.post(reverse('import_scan_results', args=(engagement, )), payload) + response = self.client_ui.post(reverse("import_scan_results", args=(engagement, )), payload) # print(vars(response)) # print('url: ' + response.url) - test = Test.objects.get(id=response.url.split('/')[-1]) + test = Test.objects.get(id=response.url.split("/")[-1]) # f = open('response.html', 'w+') # f.write(str(response.content, 'utf-8')) # f.close() self.assertEqual(302, response.status_code, response.content[:1000]) - return {'test': test.id} + return {"test": test.id} def reimport_scan_ui(self, test, payload): - response = self.client_ui.post(reverse('re_import_scan_results', args=(test, )), payload) + response = self.client_ui.post(reverse("re_import_scan_results", args=(test, )), payload) self.assertEqual(302, response.status_code, response.content[:1000]) - test = Test.objects.get(id=response.url.split('/')[-1]) - return {'test': test.id} + test = Test.objects.get(id=response.url.split("/")[-1]) + return {"test": test.id} - def import_scan_with_params_ui(self, filename, scan_type='ZAP Scan', engagement=1, minimum_severity='Low', active=True, verified=False, + def import_scan_with_params_ui(self, filename, scan_type="ZAP Scan", engagement=1, minimum_severity="Low", active=True, verified=False, push_to_jira=None, endpoint_to_add=None, tags=None, close_old_findings=False, scan_date=None, service=None, forceActive=False, forceVerified=False): @@ -1811,25 +1811,25 @@ def import_scan_with_params_ui(self, filename, scan_type='ZAP Scan', engagement= } if push_to_jira is not None: - payload['push_to_jira'] = push_to_jira + payload["push_to_jira"] = push_to_jira if endpoint_to_add is not None: - payload['endpoints'] = [endpoint_to_add] + payload["endpoints"] = [endpoint_to_add] if tags is not None: - payload['tags'] = tags + payload["tags"] = tags if scan_date is not None: - payload['scan_date'] = scan_date + payload["scan_date"] = scan_date if service is not None: - payload['service'] = service + payload["service"] = service result = self.import_scan_ui(engagement, payload) return result - def reimport_scan_with_params_ui(self, test_id, filename, scan_type='ZAP Scan', minimum_severity='Low', active=True, verified=False, push_to_jira=None, tags=None, close_old_findings=True, scan_date=None): + def reimport_scan_with_params_ui(self, test_id, filename, scan_type="ZAP Scan", minimum_severity="Low", active=True, verified=False, push_to_jira=None, tags=None, close_old_findings=True, scan_date=None): # Mimic old functionality for active/verified to avoid breaking tests activePayload = "force_to_true" if not active: @@ -1850,13 +1850,13 @@ def reimport_scan_with_params_ui(self, test_id, filename, scan_type='ZAP Scan', } if push_to_jira is not None: - payload['push_to_jira'] = push_to_jira + payload["push_to_jira"] = push_to_jira if tags is not None: - payload['tags'] = tags + payload["tags"] = tags if scan_date is not None: - payload['scan_date'] = scan_date + payload["scan_date"] = scan_date result = self.reimport_scan_ui(test_id, payload) return result diff --git a/unittests/test_importers_importer.py b/unittests/test_importers_importer.py index 0d26c5b0e8..d4eefd358a 100644 --- a/unittests/test_importers_importer.py +++ b/unittests/test_importers_importer.py @@ -17,24 +17,24 @@ logger = logging.getLogger(__name__) -NPM_AUDIT_NO_VULN_FILENAME = 'scans/npm_audit/no_vuln.json' -NPM_AUDIT_SCAN_TYPE = 'NPM Audit Scan' +NPM_AUDIT_NO_VULN_FILENAME = "scans/npm_audit/no_vuln.json" +NPM_AUDIT_SCAN_TYPE = "NPM Audit Scan" -ACUNETIX_AUDIT_ONE_VULN_FILENAME = 'scans/acunetix/one_finding.xml' -ENDPOINT_META_IMPORTER_FILENAME = 'endpoint_meta_import/no_endpoint_meta_import.csv' +ACUNETIX_AUDIT_ONE_VULN_FILENAME = "scans/acunetix/one_finding.xml" +ENDPOINT_META_IMPORTER_FILENAME = "endpoint_meta_import/no_endpoint_meta_import.csv" -ENGAGEMENT_NAME_DEFAULT = 'Engagement 1' -ENGAGEMENT_NAME_NEW = 'Engagement New 1' +ENGAGEMENT_NAME_DEFAULT = "Engagement 1" +ENGAGEMENT_NAME_NEW = "Engagement New 1" -PRODUCT_NAME_DEFAULT = 'Product A' -PRODUCT_NAME_NEW = 'Product New A' +PRODUCT_NAME_DEFAULT = "Product A" +PRODUCT_NAME_NEW = "Product New A" -PRODUCT_TYPE_NAME_DEFAULT = 'Shiny Products' -PRODUCT_TYPE_NAME_NEW = 'Extra Shiny Products' +PRODUCT_TYPE_NAME_DEFAULT = "Shiny Products" +PRODUCT_TYPE_NAME_NEW = "Extra Shiny Products" -TEST_TITLE_DEFAULT = 'super important scan' -TEST_TITLE_ALTERNATE = 'meh import scan' -TEST_TITLE_NEW = 'lol importing via reimport' +TEST_TITLE_DEFAULT = "super important scan" +TEST_TITLE_ALTERNATE = "meh import scan" +TEST_TITLE_NEW = "lol importing via reimport" class TestDojoDefaultImporter(DojoTestCase): @@ -162,14 +162,14 @@ def setUp(self): # testuser = User.objects.get(username='admin') token, _ = Token.objects.get_or_create(user=testuser) self.client = APIClient(raise_request_exception=True) - self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) + self.client.credentials(HTTP_AUTHORIZATION="Token " + token.key) self.create_default_data() # self.url = reverse(self.viewname + '-list') def create_default_data(self): # creating is much faster compare to using a fixture - logger.debug('creating default product + engagement') - Development_Environment.objects.get_or_create(name='Development') + logger.debug("creating default product + engagement") + Development_Environment.objects.get_or_create(name="Development") self.product_type = self.create_product_type(PRODUCT_TYPE_NAME_DEFAULT) self.product = self.create_product(PRODUCT_NAME_DEFAULT) self.engagement = self.create_engagement(ENGAGEMENT_NAME_DEFAULT, product=self.product) @@ -179,19 +179,19 @@ def create_default_data(self): def test_import_by_engagement_id(self): with assertImportModelsCreated(self, tests=1, engagements=0, products=0, product_types=0, endpoints=0): import0 = self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, engagement=self.engagement.id, test_title=TEST_TITLE_DEFAULT) - test_id = import0['test'] + test_id = import0["test"] self.assertEqual(get_object_or_none(Test, id=test_id).title, TEST_TITLE_DEFAULT) - self.assertEqual(import0['engagement_id'], self.engagement.id) - self.assertEqual(import0['product_id'], self.engagement.product.id) + self.assertEqual(import0["engagement_id"], self.engagement.id) + self.assertEqual(import0["product_id"], self.engagement.product.id) def test_import_by_product_name_exists_engagement_name_exists(self): with assertImportModelsCreated(self, tests=1, engagements=0, products=0, product_types=0, endpoints=0): import0 = self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=PRODUCT_NAME_DEFAULT, engagement=None, engagement_name=ENGAGEMENT_NAME_DEFAULT) - test_id = import0['test'] + test_id = import0["test"] self.assertEqual(Test.objects.get(id=test_id).engagement, self.engagement_last) - self.assertEqual(import0['engagement_id'], self.engagement_last.id) - self.assertEqual(import0['product_id'], self.engagement_last.product.id) + self.assertEqual(import0["engagement_id"], self.engagement_last.id) + self.assertEqual(import0["product_id"], self.engagement_last.product.id) def test_import_by_product_name_exists_engagement_name_not_exists(self): with assertImportModelsCreated(self, tests=0, engagements=0, products=0, product_types=0, endpoints=0): @@ -202,40 +202,40 @@ def test_import_by_product_name_exists_engagement_name_not_exists_auto_create(se with assertImportModelsCreated(self, tests=1, engagements=1, products=0, product_types=0, endpoints=0): import0 = self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=PRODUCT_NAME_DEFAULT, engagement=None, engagement_name=ENGAGEMENT_NAME_NEW, auto_create_context=True) - test_id = import0['test'] + test_id = import0["test"] self.assertEqual(get_object_or_none(Test, id=test_id).title, None) - self.assertEqual(get_object_or_none(Engagement, id=import0['engagement_id']).name, ENGAGEMENT_NAME_NEW) - self.assertEqual(import0['product_id'], self.engagement.product.id) + self.assertEqual(get_object_or_none(Engagement, id=import0["engagement_id"]).name, ENGAGEMENT_NAME_NEW) + self.assertEqual(import0["product_id"], self.engagement.product.id) def test_import_by_product_name_not_exists_engagement_name(self): with assertImportModelsCreated(self, tests=0, engagements=0, products=0, product_types=0, endpoints=0): self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=PRODUCT_NAME_NEW, engagement=None, engagement_name=ENGAGEMENT_NAME_NEW, expected_http_status_code=400) - @patch('dojo.jira_link.helper.get_jira_project') + @patch("dojo.jira_link.helper.get_jira_project") def test_import_by_product_name_not_exists_engagement_name_auto_create(self, mock): with assertImportModelsCreated(self, tests=1, engagements=1, products=1, product_types=0, endpoints=0): import0 = self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=PRODUCT_NAME_NEW, engagement=None, engagement_name=ENGAGEMENT_NAME_NEW, product_type_name=PRODUCT_TYPE_NAME_DEFAULT, auto_create_context=True) - test_id = import0['test'] + test_id = import0["test"] self.assertEqual(get_object_or_none(Test, id=test_id).title, None) - self.assertEqual(get_object_or_none(Engagement, id=import0['engagement_id']).name, ENGAGEMENT_NAME_NEW) - self.assertEqual(get_object_or_none(Product, id=import0['product_id']).name, PRODUCT_NAME_NEW) - self.assertEqual(get_object_or_none(Product, id=import0['product_id']).prod_type.name, PRODUCT_TYPE_NAME_DEFAULT) + self.assertEqual(get_object_or_none(Engagement, id=import0["engagement_id"]).name, ENGAGEMENT_NAME_NEW) + self.assertEqual(get_object_or_none(Product, id=import0["product_id"]).name, PRODUCT_NAME_NEW) + self.assertEqual(get_object_or_none(Product, id=import0["product_id"]).prod_type.name, PRODUCT_TYPE_NAME_DEFAULT) mock.assert_not_called() - @patch('dojo.jira_link.helper.get_jira_project') + @patch("dojo.jira_link.helper.get_jira_project") def test_import_by_product_type_name_not_exists_product_name_not_exists_engagement_name_auto_create(self, mock): with assertImportModelsCreated(self, tests=1, engagements=1, products=1, product_types=1, endpoints=0): import0 = self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=PRODUCT_NAME_NEW, engagement=None, engagement_name=ENGAGEMENT_NAME_NEW, product_type_name=PRODUCT_TYPE_NAME_NEW, auto_create_context=True) - test_id = import0['test'] + test_id = import0["test"] self.assertEqual(get_object_or_none(Test, id=test_id).title, None) - self.assertEqual(get_object_or_none(Engagement, id=import0['engagement_id']).name, ENGAGEMENT_NAME_NEW) - self.assertEqual(get_object_or_none(Product, id=import0['product_id']).name, PRODUCT_NAME_NEW) - self.assertEqual(get_object_or_none(Product, id=import0['product_id']).prod_type.name, PRODUCT_TYPE_NAME_NEW) - self.assertEqual(get_object_or_none(Product_Type, id=import0['product_type_id']).name, PRODUCT_TYPE_NAME_NEW) + self.assertEqual(get_object_or_none(Engagement, id=import0["engagement_id"]).name, ENGAGEMENT_NAME_NEW) + self.assertEqual(get_object_or_none(Product, id=import0["product_id"]).name, PRODUCT_NAME_NEW) + self.assertEqual(get_object_or_none(Product, id=import0["product_id"]).prod_type.name, PRODUCT_TYPE_NAME_NEW) + self.assertEqual(get_object_or_none(Product_Type, id=import0["product_type_id"]).name, PRODUCT_TYPE_NAME_NEW) mock.assert_not_called() @@ -248,49 +248,49 @@ def test_endpoint_meta_import_by_product_name_not_exists(self): self.endpoint_meta_import_scan_with_params(ENDPOINT_META_IMPORTER_FILENAME, product=None, product_name=PRODUCT_NAME_NEW, expected_http_status_code=400) def test_import_with_invalid_parameters(self): - with self.subTest('scan_date in the future'): + with self.subTest("scan_date in the future"): import0 = self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=PRODUCT_NAME_NEW, - engagement=None, engagement_name=ENGAGEMENT_NAME_NEW, product_type_name=PRODUCT_TYPE_NAME_NEW, auto_create_context=True, scan_date='2222-01-01', + engagement=None, engagement_name=ENGAGEMENT_NAME_NEW, product_type_name=PRODUCT_TYPE_NAME_NEW, auto_create_context=True, scan_date="2222-01-01", expected_http_status_code=400) - with self.subTest('no parameters'): + with self.subTest("no parameters"): import0 = self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, engagement=None, expected_http_status_code=400) - self.assertEqual(import0, ['product_name parameter missing']) + self.assertEqual(import0, ["product_name parameter missing"]) - with self.subTest('no product data'): + with self.subTest("no product data"): import0 = self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, - engagement=None, engagement_name='what the bleep', expected_http_status_code=400) - self.assertEqual(import0, ['product_name parameter missing']) + engagement=None, engagement_name="what the bleep", expected_http_status_code=400) + self.assertEqual(import0, ["product_name parameter missing"]) - with self.subTest('engagement_name missing'): + with self.subTest("engagement_name missing"): import0 = self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, - engagement=None, product_name='67283', expected_http_status_code=400) - self.assertEqual(import0, ['engagement_name parameter missing']) + engagement=None, product_name="67283", expected_http_status_code=400) + self.assertEqual(import0, ["engagement_name parameter missing"]) - with self.subTest('invalid product type'): + with self.subTest("invalid product type"): import0 = self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, - engagement=None, product_type_name='valentijn', product_name='67283', engagement_name='valentijn', expected_http_status_code=400) + engagement=None, product_type_name="valentijn", product_name="67283", engagement_name="valentijn", expected_http_status_code=400) self.assertEqual(import0, ['Product Type "valentijn" does not exist']) - with self.subTest('invalid product'): + with self.subTest("invalid product"): # random product type to avoid collision with other tests another_product_type_name = str(uuid.uuid4()) Product_Type.objects.create(name=another_product_type_name) import0 = self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, - engagement=None, product_type_name=another_product_type_name, product_name=PRODUCT_NAME_DEFAULT, engagement_name='valentijn', expected_http_status_code=400) + engagement=None, product_type_name=another_product_type_name, product_name=PRODUCT_NAME_DEFAULT, engagement_name="valentijn", expected_http_status_code=400) self.assertEqual(import0, [( "The fetched product has a conflict with the supplied product type name: " f"existing product type name - {PRODUCT_TYPE_NAME_DEFAULT} vs " f"supplied product type name - {another_product_type_name}" )]) - with self.subTest('invalid engagement'): + with self.subTest("invalid engagement"): import0 = self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, engagement=1254235, expected_http_status_code=400) self.assertEqual(import0, ['Engagement "1254235" does not exist']) - with self.subTest('invalid engagement, but exists in another product'): + with self.subTest("invalid engagement, but exists in another product"): # random product to avoid collision with other tests another_product_name = str(uuid.uuid4()) self.product = self.create_product(another_product_name) @@ -298,20 +298,20 @@ def test_import_with_invalid_parameters(self): engagement_name=ENGAGEMENT_NAME_DEFAULT, product_name=another_product_name, expected_http_status_code=400) self.assertEqual(import0, [f'Engagement "Engagement 1" does not exist in Product "{another_product_name}"']) - with self.subTest('invalid engagement not id'): + with self.subTest("invalid engagement not id"): import0 = self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, - engagement='bla bla', expected_http_status_code=400) - self.assertEqual(import0, ['engagement must be an integer']) + engagement="bla bla", expected_http_status_code=400) + self.assertEqual(import0, ["engagement must be an integer"]) - with self.subTest('autocreate product but no product type name'): + with self.subTest("autocreate product but no product type name"): import0 = self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=PRODUCT_NAME_NEW, engagement=None, engagement_name=ENGAGEMENT_NAME_NEW, auto_create_context=True, expected_http_status_code=400) self.assertEqual(import0, [f'Product "{PRODUCT_NAME_NEW}" does not exist and no product_type_name provided to create the new product in']) - with self.subTest('autocreate engagement but no product_name'): + with self.subTest("autocreate engagement but no product_name"): import0 = self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=None, engagement=None, engagement_name=ENGAGEMENT_NAME_NEW, auto_create_context=True, expected_http_status_code=400) - self.assertEqual(import0, ['product_name parameter missing']) + self.assertEqual(import0, ["product_name parameter missing"]) class FlexibleReimportTestAPI(DojoAPITestCase): @@ -327,14 +327,14 @@ def setUp(self): # testuser = User.objects.get(username='admin') token, _ = Token.objects.get_or_create(user=testuser) self.client = APIClient(raise_request_exception=True) - self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) + self.client.credentials(HTTP_AUTHORIZATION="Token " + token.key) self.create_default_data() # self.url = reverse(self.viewname + '-list') def create_default_data(self): # creating is much faster compare to using a fixture - logger.debug('creating default product + engagement') - Development_Environment.objects.get_or_create(name='Development') + logger.debug("creating default product + engagement") + Development_Environment.objects.get_or_create(name="Development") self.product_type = self.create_product_type(PRODUCT_TYPE_NAME_DEFAULT) self.product = self.create_product(PRODUCT_NAME_DEFAULT) self.engagement = self.create_engagement(ENGAGEMENT_NAME_DEFAULT, product=self.product) @@ -348,53 +348,53 @@ def create_default_data(self): def test_reimport_by_test_id(self): with assertImportModelsCreated(self, tests=0, engagements=0, products=0, product_types=0, endpoints=0): import0 = self.reimport_scan_with_params(self.test.id, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE) - test_id = import0['test'] + test_id = import0["test"] self.assertEqual(get_object_or_none(Test, id=test_id).title, TEST_TITLE_DEFAULT) self.assertEqual(test_id, self.test.id) - self.assertEqual(import0['engagement_id'], self.test.engagement.id) - self.assertEqual(import0['product_id'], self.test.engagement.product.id) + self.assertEqual(import0["engagement_id"], self.test.engagement.id) + self.assertEqual(import0["product_id"], self.test.engagement.product.id) def test_reimport_by_product_name_exists_engagement_name_exists_no_title(self): with assertImportModelsCreated(self, tests=0, engagements=0, products=0, product_types=0, endpoints=0): import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=PRODUCT_NAME_DEFAULT, engagement=None, engagement_name=ENGAGEMENT_NAME_DEFAULT) - test_id = import0['test'] + test_id = import0["test"] self.assertEqual(test_id, self.test_last_by_scan_type.id) - self.assertEqual(import0['engagement_id'], self.test_last_by_scan_type.engagement.id) - self.assertEqual(import0['product_id'], self.test_last_by_scan_type.engagement.product.id) + self.assertEqual(import0["engagement_id"], self.test_last_by_scan_type.engagement.id) + self.assertEqual(import0["product_id"], self.test_last_by_scan_type.engagement.product.id) def test_reimport_by_product_name_exists_engagement_name_exists_scan_type_not_exsists_test_title_exists(self): with assertImportModelsCreated(self, tests=0, engagements=0, products=0, product_types=0, endpoints=0): - self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type='Acunetix Scan', product_name=PRODUCT_NAME_DEFAULT, + self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type="Acunetix Scan", product_name=PRODUCT_NAME_DEFAULT, engagement=None, engagement_name=ENGAGEMENT_NAME_DEFAULT, test_title=TEST_TITLE_DEFAULT, expected_http_status_code=400) def test_reimport_by_product_name_exists_engagement_name_exists_scan_type_not_exsists_test_title_exists_auto_create(self): with assertImportModelsCreated(self, tests=1, engagements=0, products=0, product_types=0, endpoints=1): - import0 = self.reimport_scan_with_params(None, ACUNETIX_AUDIT_ONE_VULN_FILENAME, scan_type='Acunetix Scan', product_name=PRODUCT_NAME_DEFAULT, + import0 = self.reimport_scan_with_params(None, ACUNETIX_AUDIT_ONE_VULN_FILENAME, scan_type="Acunetix Scan", product_name=PRODUCT_NAME_DEFAULT, engagement=None, engagement_name=ENGAGEMENT_NAME_DEFAULT, test_title=TEST_TITLE_DEFAULT, auto_create_context=True) - test_id = import0['test'] + test_id = import0["test"] self.assertEqual(get_object_or_none(Test, id=test_id).title, TEST_TITLE_DEFAULT) - self.assertEqual(import0['engagement_id'], self.engagement.id) + self.assertEqual(import0["engagement_id"], self.engagement.id) def test_reimport_by_product_name_exists_engagement_name_exists_scan_type_not_exsists_test_title_not_exists(self): with assertImportModelsCreated(self, tests=0, engagements=0, products=0, product_types=0, endpoints=0): - self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type='Acunetix Scan', product_name=PRODUCT_NAME_DEFAULT, - engagement=None, engagement_name=ENGAGEMENT_NAME_DEFAULT, test_title='bogus title', expected_http_status_code=400) + self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type="Acunetix Scan", product_name=PRODUCT_NAME_DEFAULT, + engagement=None, engagement_name=ENGAGEMENT_NAME_DEFAULT, test_title="bogus title", expected_http_status_code=400) def test_reimport_by_product_name_exists_engagement_name_exists_scan_type_not_exsists_test_title_not_exists_auto_create(self): with assertImportModelsCreated(self, tests=1, engagements=0, products=0, product_types=0, endpoints=1): - import0 = self.reimport_scan_with_params(None, ACUNETIX_AUDIT_ONE_VULN_FILENAME, scan_type='Acunetix Scan', product_name=PRODUCT_NAME_DEFAULT, - engagement=None, engagement_name=ENGAGEMENT_NAME_DEFAULT, test_title='bogus title', auto_create_context=True) - test_id = import0['test'] - self.assertEqual(get_object_or_none(Test, id=test_id).scan_type, 'Acunetix Scan') - self.assertEqual(get_object_or_none(Test, id=test_id).title, 'bogus title') - self.assertEqual(import0['engagement_id'], self.engagement.id) + import0 = self.reimport_scan_with_params(None, ACUNETIX_AUDIT_ONE_VULN_FILENAME, scan_type="Acunetix Scan", product_name=PRODUCT_NAME_DEFAULT, + engagement=None, engagement_name=ENGAGEMENT_NAME_DEFAULT, test_title="bogus title", auto_create_context=True) + test_id = import0["test"] + self.assertEqual(get_object_or_none(Test, id=test_id).scan_type, "Acunetix Scan") + self.assertEqual(get_object_or_none(Test, id=test_id).title, "bogus title") + self.assertEqual(import0["engagement_id"], self.engagement.id) def test_reimport_by_product_name_exists_engagement_name_exists_test_title_exists(self): with assertImportModelsCreated(self, tests=0, engagements=0, products=0, product_types=0, endpoints=0): import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=PRODUCT_NAME_DEFAULT, engagement=None, engagement_name=ENGAGEMENT_NAME_DEFAULT, test_title=TEST_TITLE_DEFAULT) - test_id = import0['test'] + test_id = import0["test"] self.assertEqual(test_id, self.test_last_by_title.id) def test_reimport_by_product_name_exists_engagement_name_not_exists(self): @@ -406,93 +406,93 @@ def test_reimport_by_product_name_exists_engagement_name_not_exists_auto_create( with assertImportModelsCreated(self, tests=1, engagements=1, products=0, product_types=0, endpoints=0): import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=PRODUCT_NAME_DEFAULT, engagement=None, engagement_name=ENGAGEMENT_NAME_NEW, auto_create_context=True) - test_id = import0['test'] + test_id = import0["test"] self.assertEqual(get_object_or_none(Test, id=test_id).title, None) - self.assertEqual(get_object_or_none(Engagement, id=import0['engagement_id']).name, ENGAGEMENT_NAME_NEW) - self.assertEqual(import0['product_id'], self.engagement.product.id) - self.assertEqual(import0['product_type_id'], self.engagement.product.prod_type.id) + self.assertEqual(get_object_or_none(Engagement, id=import0["engagement_id"]).name, ENGAGEMENT_NAME_NEW) + self.assertEqual(import0["product_id"], self.engagement.product.id) + self.assertEqual(import0["product_type_id"], self.engagement.product.prod_type.id) def test_reimport_by_product_name_not_exists_engagement_name(self): with assertImportModelsCreated(self, tests=0, engagements=0, products=0, product_types=0, endpoints=0): self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=PRODUCT_NAME_NEW, engagement=None, engagement_name=ENGAGEMENT_NAME_NEW, expected_http_status_code=400) - @patch('dojo.jira_link.helper.get_jira_project') + @patch("dojo.jira_link.helper.get_jira_project") def test_reimport_by_product_name_not_exists_engagement_name_auto_create(self, mock): with assertImportModelsCreated(self, tests=1, engagements=1, products=1, product_types=0, endpoints=0): import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=PRODUCT_NAME_NEW, engagement=None, engagement_name=ENGAGEMENT_NAME_NEW, product_type_name=PRODUCT_TYPE_NAME_DEFAULT, auto_create_context=True) - test_id = import0['test'] + test_id = import0["test"] self.assertEqual(get_object_or_none(Test, id=test_id).title, None) - self.assertEqual(get_object_or_none(Engagement, id=import0['engagement_id']).name, ENGAGEMENT_NAME_NEW) - self.assertEqual(get_object_or_none(Product, id=import0['product_id']).name, PRODUCT_NAME_NEW) - self.assertEqual(get_object_or_none(Product, id=import0['product_id']).prod_type.name, PRODUCT_TYPE_NAME_DEFAULT) + self.assertEqual(get_object_or_none(Engagement, id=import0["engagement_id"]).name, ENGAGEMENT_NAME_NEW) + self.assertEqual(get_object_or_none(Product, id=import0["product_id"]).name, PRODUCT_NAME_NEW) + self.assertEqual(get_object_or_none(Product, id=import0["product_id"]).prod_type.name, PRODUCT_TYPE_NAME_DEFAULT) mock.assert_not_called() - @patch('dojo.jira_link.helper.get_jira_project') + @patch("dojo.jira_link.helper.get_jira_project") def test_reimport_by_product_type_not_exists_product_name_not_exists_engagement_name_auto_create(self, mock): with assertImportModelsCreated(self, tests=1, engagements=1, products=1, product_types=1, endpoints=0): import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=PRODUCT_NAME_NEW, engagement=None, engagement_name=ENGAGEMENT_NAME_NEW, product_type_name=PRODUCT_TYPE_NAME_NEW, auto_create_context=True) - test_id = import0['test'] + test_id = import0["test"] self.assertEqual(get_object_or_none(Test, id=test_id).title, None) - self.assertEqual(get_object_or_none(Engagement, id=import0['engagement_id']).name, ENGAGEMENT_NAME_NEW) - self.assertEqual(get_object_or_none(Product, id=import0['product_id']).name, PRODUCT_NAME_NEW) - self.assertEqual(get_object_or_none(Product, id=import0['product_id']).prod_type.name, PRODUCT_TYPE_NAME_NEW) + self.assertEqual(get_object_or_none(Engagement, id=import0["engagement_id"]).name, ENGAGEMENT_NAME_NEW) + self.assertEqual(get_object_or_none(Product, id=import0["product_id"]).name, PRODUCT_NAME_NEW) + self.assertEqual(get_object_or_none(Product, id=import0["product_id"]).prod_type.name, PRODUCT_TYPE_NAME_NEW) mock.assert_not_called() def test_reimport_with_invalid_parameters(self): - with self.subTest('scan_date in the future'): + with self.subTest("scan_date in the future"): import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=PRODUCT_NAME_NEW, - engagement=None, engagement_name=ENGAGEMENT_NAME_NEW, product_type_name=PRODUCT_TYPE_NAME_NEW, auto_create_context=True, scan_date='2222-01-01', + engagement=None, engagement_name=ENGAGEMENT_NAME_NEW, product_type_name=PRODUCT_TYPE_NAME_NEW, auto_create_context=True, scan_date="2222-01-01", expected_http_status_code=400) - with self.subTest('no parameters'): + with self.subTest("no parameters"): import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, engagement=None, expected_http_status_code=400) - self.assertEqual(import0, ['product_name parameter missing']) + self.assertEqual(import0, ["product_name parameter missing"]) - with self.subTest('no product data'): + with self.subTest("no product data"): import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, - engagement=None, engagement_name='what the bleep', expected_http_status_code=400) - self.assertEqual(import0, ['product_name parameter missing']) + engagement=None, engagement_name="what the bleep", expected_http_status_code=400) + self.assertEqual(import0, ["product_name parameter missing"]) - with self.subTest('non engagement_name'): + with self.subTest("non engagement_name"): import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, - engagement=None, product_name='67283', expected_http_status_code=400) - self.assertEqual(import0, ['engagement_name parameter missing']) + engagement=None, product_name="67283", expected_http_status_code=400) + self.assertEqual(import0, ["engagement_name parameter missing"]) - with self.subTest('invalid product type'): + with self.subTest("invalid product type"): import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, - engagement=None, product_type_name='valentijn', product_name='67283', engagement_name='valentijn', expected_http_status_code=400) + engagement=None, product_type_name="valentijn", product_name="67283", engagement_name="valentijn", expected_http_status_code=400) self.assertEqual(import0, ['Product Type "valentijn" does not exist']) - with self.subTest('invalid product'): + with self.subTest("invalid product"): import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, - engagement=None, product_name='67283', engagement_name='valentijn', expected_http_status_code=400) + engagement=None, product_name="67283", engagement_name="valentijn", expected_http_status_code=400) self.assertEqual(import0, ['Product "67283" does not exist']) - with self.subTest('valid product, but other product type'): + with self.subTest("valid product, but other product type"): # random product type to avoid collision with other tests another_product_type_name = str(uuid.uuid4()) Product_Type.objects.create(name=another_product_type_name) import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, - engagement=None, product_type_name=another_product_type_name, product_name=PRODUCT_NAME_DEFAULT, engagement_name='valentijn', expected_http_status_code=400) + engagement=None, product_type_name=another_product_type_name, product_name=PRODUCT_NAME_DEFAULT, engagement_name="valentijn", expected_http_status_code=400) self.assertEqual(import0, [( "The fetched product has a conflict with the supplied product type name: " f"existing product type name - {PRODUCT_TYPE_NAME_DEFAULT} vs " f"supplied product type name - {another_product_type_name}" )]) - with self.subTest('invalid engagement'): + with self.subTest("invalid engagement"): import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, engagement=1254235, expected_http_status_code=400) - self.assertEqual(import0, ['product_name parameter missing']) + self.assertEqual(import0, ["product_name parameter missing"]) - with self.subTest('invalid engagement, but exists in another product'): + with self.subTest("invalid engagement, but exists in another product"): # random product to avoid collision with other tests another_product_name = str(uuid.uuid4()) self.product = self.create_product(another_product_name) @@ -500,20 +500,20 @@ def test_reimport_with_invalid_parameters(self): engagement_name=ENGAGEMENT_NAME_DEFAULT, product_name=another_product_name, expected_http_status_code=400) self.assertEqual(import0, [f'Engagement "Engagement 1" does not exist in Product "{another_product_name}"']) - with self.subTest('invalid engagement not id'): + with self.subTest("invalid engagement not id"): import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, - engagement='bla bla', expected_http_status_code=400) - self.assertEqual(import0, ['engagement must be an integer']) + engagement="bla bla", expected_http_status_code=400) + self.assertEqual(import0, ["engagement must be an integer"]) - with self.subTest('autocreate product but no product type name'): + with self.subTest("autocreate product but no product type name"): import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=PRODUCT_NAME_NEW, engagement=None, engagement_name=ENGAGEMENT_NAME_NEW, auto_create_context=True, expected_http_status_code=400) self.assertEqual(import0, [f'Product "{PRODUCT_NAME_NEW}" does not exist and no product_type_name provided to create the new product in']) - with self.subTest('autocreate engagement but no product_name'): + with self.subTest("autocreate engagement but no product_name"): import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, engagement=None, engagement_name=ENGAGEMENT_NAME_NEW, auto_create_context=True, expected_http_status_code=400) - self.assertEqual(import0, ['product_name parameter missing']) + self.assertEqual(import0, ["product_name parameter missing"]) class TestImporterUtils(DojoAPITestCase): @@ -521,11 +521,11 @@ def setUp(self): self.testuser, _ = User.objects.get_or_create(username="admin", is_superuser=True) token, _ = Token.objects.get_or_create(user=self.testuser) self.client = APIClient(raise_request_exception=True) - self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) + self.client.credentials(HTTP_AUTHORIZATION="Token " + token.key) self.client.force_authenticate(user=self.testuser, token=token) self.create_default_data() - def __del__(self): + def tearDown(self): self.test_last_by_scan_type.delete() self.test_with_title.delete() self.test_last_by_title.delete() @@ -537,8 +537,8 @@ def __del__(self): def create_default_data(self): # creating is much faster compare to using a fixture - logger.debug('creating default product + engagement') - Development_Environment.objects.get_or_create(name='Development') + logger.debug("creating default product + engagement") + Development_Environment.objects.get_or_create(name="Development") self.product_type = self.create_product_type(PRODUCT_TYPE_NAME_DEFAULT) self.product = self.create_product(PRODUCT_NAME_DEFAULT) self.engagement = self.create_engagement(ENGAGEMENT_NAME_DEFAULT, product=self.product) @@ -546,17 +546,17 @@ def create_default_data(self): self.test_last_by_title = self.create_test(engagement=self.engagement, scan_type=NPM_AUDIT_SCAN_TYPE, title=TEST_TITLE_DEFAULT) self.test_with_title = self.create_test(engagement=self.engagement, scan_type=NPM_AUDIT_SCAN_TYPE, title=TEST_TITLE_ALTERNATE) self.test_last_by_scan_type = self.create_test(engagement=self.engagement, scan_type=NPM_AUDIT_SCAN_TYPE) - environment, _ = Development_Environment.objects.get_or_create(name='Development') + environment, _ = Development_Environment.objects.get_or_create(name="Development") self.importer_data = { "engagement": self.engagement, "environment": environment, "scan_type": NPM_AUDIT_SCAN_TYPE, } - @patch('dojo.importers.base_importer.Vulnerability_Id', autospec=True) + @patch("dojo.importers.base_importer.Vulnerability_Id", autospec=True) def test_handle_vulnerability_ids_references_and_cve(self, mock): # Why doesn't this test use the test db and query for one? - vulnerability_ids = ['CVE', 'REF-1', 'REF-2'] + vulnerability_ids = ["CVE", "REF-1", "REF-2"] finding = Finding() finding.unsaved_vulnerability_ids = vulnerability_ids finding.test = self.test @@ -564,16 +564,16 @@ def test_handle_vulnerability_ids_references_and_cve(self, mock): finding.save() DefaultImporter(**self.importer_data).process_vulnerability_ids(finding) - self.assertEqual('CVE', finding.vulnerability_ids[0]) - self.assertEqual('CVE', finding.cve) + self.assertEqual("CVE", finding.vulnerability_ids[0]) + self.assertEqual("CVE", finding.cve) self.assertEqual(vulnerability_ids, finding.unsaved_vulnerability_ids) - self.assertEqual('REF-1', finding.vulnerability_ids[1]) - self.assertEqual('REF-2', finding.vulnerability_ids[2]) + self.assertEqual("REF-1", finding.vulnerability_ids[1]) + self.assertEqual("REF-2", finding.vulnerability_ids[2]) finding.delete() - @patch('dojo.importers.base_importer.Vulnerability_Id', autospec=True) + @patch("dojo.importers.base_importer.Vulnerability_Id", autospec=True) def test_handle_no_vulnerability_ids_references_and_cve(self, mock): - vulnerability_ids = ['CVE'] + vulnerability_ids = ["CVE"] finding = Finding() finding.test = self.test finding.reporter = self.testuser @@ -582,14 +582,14 @@ def test_handle_no_vulnerability_ids_references_and_cve(self, mock): DefaultImporter(**self.importer_data).process_vulnerability_ids(finding) - self.assertEqual('CVE', finding.vulnerability_ids[0]) - self.assertEqual('CVE', finding.cve) + self.assertEqual("CVE", finding.vulnerability_ids[0]) + self.assertEqual("CVE", finding.cve) self.assertEqual(vulnerability_ids, finding.unsaved_vulnerability_ids) finding.delete() - @patch('dojo.importers.base_importer.Vulnerability_Id', autospec=True) + @patch("dojo.importers.base_importer.Vulnerability_Id", autospec=True) def test_handle_vulnerability_ids_references_and_no_cve(self, mock): - vulnerability_ids = ['REF-1', 'REF-2'] + vulnerability_ids = ["REF-1", "REF-2"] finding = Finding() finding.test = self.test finding.reporter = self.testuser @@ -597,13 +597,13 @@ def test_handle_vulnerability_ids_references_and_no_cve(self, mock): finding.unsaved_vulnerability_ids = vulnerability_ids DefaultImporter(**self.importer_data).process_vulnerability_ids(finding) - self.assertEqual('REF-1', finding.vulnerability_ids[0]) - self.assertEqual('REF-1', finding.cve) + self.assertEqual("REF-1", finding.vulnerability_ids[0]) + self.assertEqual("REF-1", finding.cve) self.assertEqual(vulnerability_ids, finding.unsaved_vulnerability_ids) - self.assertEqual('REF-2', finding.vulnerability_ids[1]) + self.assertEqual("REF-2", finding.vulnerability_ids[1]) finding.delete() - @patch('dojo.importers.base_importer.Vulnerability_Id', autospec=True) + @patch("dojo.importers.base_importer.Vulnerability_Id", autospec=True) def test_no_handle_vulnerability_ids_references_and_no_cve(self, mock): finding = Finding() finding.test = self.test diff --git a/unittests/test_jira_config_engagement.py b/unittests/test_jira_config_engagement.py index cf952b6068..e9954b8eb7 100644 --- a/unittests/test_jira_config_engagement.py +++ b/unittests/test_jira_config_engagement.py @@ -1,4 +1,5 @@ # from unittest import skip +import contextlib import logging from unittest.mock import patch @@ -18,52 +19,52 @@ class JIRAConfigEngagementBase: def get_new_engagement_with_jira_project_data(self): return { - 'name': 'new engagement', - 'description': 'new description', - 'lead': 1, - 'product': self.product_id, - 'target_start': '2070-11-27', - 'target_end': '2070-12-04', - 'status': 'Not Started', + "name": "new engagement", + "description": "new description", + "lead": 1, + "product": self.product_id, + "target_start": "2070-11-27", + "target_end": "2070-12-04", + "status": "Not Started", # 'jira-project-form-inherit_from_product': 'on', # absence = False in html forms - 'jira-project-form-jira_instance': 2, - 'jira-project-form-project_key': 'IUNSEC', - 'jira-project-form-epic_issue_type_name': 'Epic', - 'jira-project-form-product_jira_sla_notification': 'on', - 'jira-project-form-custom_fields': 'null', + "jira-project-form-jira_instance": 2, + "jira-project-form-project_key": "IUNSEC", + "jira-project-form-epic_issue_type_name": "Epic", + "jira-project-form-product_jira_sla_notification": "on", + "jira-project-form-custom_fields": "null", } def get_new_engagement_with_jira_project_data_and_epic_mapping(self): return { - 'name': 'new engagement', - 'description': 'new description', - 'lead': 1, - 'product': self.product_id, - 'target_start': '2070-11-27', - 'target_end': '2070-12-04', - 'status': 'Not Started', + "name": "new engagement", + "description": "new description", + "lead": 1, + "product": self.product_id, + "target_start": "2070-11-27", + "target_end": "2070-12-04", + "status": "Not Started", # 'jira-project-form-inherit_from_product': 'on', # absence = False in html forms - 'jira-project-form-jira_instance': 2, - 'jira-project-form-project_key': 'IUNSEC', - 'jira-project-form-epic_issue_type_name': 'Epic', - 'jira-project-form-product_jira_sla_notification': 'on', - 'jira-project-form-enable_engagement_epic_mapping': 'on', - 'jira-epic-form-push_to_jira': 'on', - 'jira-project-form-custom_fields': 'null', + "jira-project-form-jira_instance": 2, + "jira-project-form-project_key": "IUNSEC", + "jira-project-form-epic_issue_type_name": "Epic", + "jira-project-form-product_jira_sla_notification": "on", + "jira-project-form-enable_engagement_epic_mapping": "on", + "jira-epic-form-push_to_jira": "on", + "jira-project-form-custom_fields": "null", } def get_new_engagement_without_jira_project_data(self): return { - 'name': 'new engagement', - 'description': 'new description', - 'lead': 1, - 'product': self.product_id, - 'target_start': '2070-11-27', - 'target_end': '2070-12-04', - 'status': 'Not Started', - 'jira-project-form-inherit_from_product': 'on', + "name": "new engagement", + "description": "new description", + "lead": 1, + "product": self.product_id, + "target_start": "2070-11-27", + "target_end": "2070-12-04", + "status": "Not Started", + "jira-project-form-inherit_from_product": "on", # A value is set by default by the model, so we need to add it here as well - 'jira-project-form-epic_issue_type_name': 'Epic', + "jira-project-form-epic_issue_type_name": "Epic", # 'project_key': 'IFFF', # 'jira_instance': 2, # 'enable_engagement_epic_mapping': 'on', @@ -73,50 +74,50 @@ def get_new_engagement_without_jira_project_data(self): def get_engagement_with_jira_project_data(self, engagement): return { - 'name': engagement.name, - 'description': engagement.description, - 'lead': 1, - 'product': engagement.product.id, - 'target_start': '2070-11-27', - 'target_end': '2070-12-04', - 'status': 'Not Started', + "name": engagement.name, + "description": engagement.description, + "lead": 1, + "product": engagement.product.id, + "target_start": "2070-11-27", + "target_end": "2070-12-04", + "status": "Not Started", # 'jira-project-form-inherit_from_product': 'on', # absence = False in html forms - 'jira-project-form-jira_instance': 2, - 'jira-project-form-project_key': 'ISEC', - 'jira-project-form-epic_issue_type_name': 'Epic', - 'jira-project-form-product_jira_sla_notification': 'on', - 'jira-project-form-custom_fields': 'null', + "jira-project-form-jira_instance": 2, + "jira-project-form-project_key": "ISEC", + "jira-project-form-epic_issue_type_name": "Epic", + "jira-project-form-product_jira_sla_notification": "on", + "jira-project-form-custom_fields": "null", } def get_engagement_with_jira_project_data2(self, engagement): return { - 'name': engagement.name, - 'description': engagement.description, - 'lead': 1, - 'product': engagement.product.id, - 'target_start': '2070-11-27', - 'target_end': '2070-12-04', - 'status': 'Not Started', + "name": engagement.name, + "description": engagement.description, + "lead": 1, + "product": engagement.product.id, + "target_start": "2070-11-27", + "target_end": "2070-12-04", + "status": "Not Started", # 'jira-project-form-inherit_from_product': 'on', # absence = False in html forms - 'jira-project-form-jira_instance': 2, - 'jira-project-form-project_key': 'ISEC2', - 'jira-project-form-epic_issue_type_name': 'Epic', - 'jira-project-form-product_jira_sla_notification': 'on', - 'jira-project-form-custom_fields': 'null', + "jira-project-form-jira_instance": 2, + "jira-project-form-project_key": "ISEC2", + "jira-project-form-epic_issue_type_name": "Epic", + "jira-project-form-product_jira_sla_notification": "on", + "jira-project-form-custom_fields": "null", } def get_engagement_with_empty_jira_project_data(self, engagement): return { - 'name': engagement.name, - 'description': engagement.description, - 'lead': 1, - 'product': engagement.product.id, - 'target_start': '2070-11-27', - 'target_end': '2070-12-04', - 'status': 'Not Started', - 'jira-project-form-inherit_from_product': 'on', + "name": engagement.name, + "description": engagement.description, + "lead": 1, + "product": engagement.product.id, + "target_start": "2070-11-27", + "target_end": "2070-12-04", + "status": "Not Started", + "jira-project-form-inherit_from_product": "on", # A value is set by default by the model, so we need to add it here as well - 'jira-project-form-epic_issue_type_name': 'Epic', + "jira-project-form-epic_issue_type_name": "Epic", # 'project_key': 'IFFF', # 'jira_instance': 2, # 'enable_engagement_epic_mapping': 'on', @@ -125,21 +126,21 @@ def get_engagement_with_empty_jira_project_data(self, engagement): } def get_expected_redirect_engagement(self, engagement): - return '/engagement/%i' % engagement.id + return "/engagement/%i" % engagement.id def get_expected_redirect_edit_engagement(self, engagement): - return '/engagement/edit/%i' % engagement.id + return "/engagement/edit/%i" % engagement.id def add_engagement_jira(self, data, expect_redirect_to=None, expect_200=False): - response = self.client.get(reverse('new_eng_for_prod', args=(self.product_id, ))) + response = self.client.get(reverse("new_eng_for_prod", args=(self.product_id, ))) # logger.debug('before: JIRA_Project last') # self.log_model_instance(JIRA_Project.objects.last()) if not expect_redirect_to and not expect_200: - expect_redirect_to = '/engagement/%i' + expect_redirect_to = "/engagement/%i" - response = self.client.post(reverse('new_eng_for_prod', args=(self.product_id, )), urlencode(data), content_type='application/x-www-form-urlencoded') + response = self.client.post(reverse("new_eng_for_prod", args=(self.product_id, )), urlencode(data), content_type="application/x-www-form-urlencoded") # logger.debug('after: JIRA_Project last') # self.log_model_instance(JIRA_Project.objects.last()) @@ -152,12 +153,12 @@ def add_engagement_jira(self, data, expect_redirect_to=None, expect_200=False): # print('response: ' + response) # print('url: ' + response.url) try: - engagement = Engagement.objects.get(id=response.url.split('/')[-1]) + engagement = Engagement.objects.get(id=response.url.split("/")[-1]) except: try: - engagement = Engagement.objects.get(id=response.url.split('/')[-2]) + engagement = Engagement.objects.get(id=response.url.split("/")[-2]) except: - raise ValueError('error parsing id from redirect uri: ' + response.url) + raise ValueError("error parsing id from redirect uri: " + response.url) self.assertEqual(response.url, (expect_redirect_to % engagement.id)) else: self.assertEqual(response.status_code, 200) @@ -183,12 +184,12 @@ def add_engagement_with_jira_project_and_epic_mapping(self, expected_delta_jira_ return self.add_engagement_jira_with_data(self.get_new_engagement_with_jira_project_data_and_epic_mapping(), expected_delta_jira_project_db, expect_redirect_to=expect_redirect_to, expect_200=expect_200) def edit_engagement_jira(self, engagement, data, expect_redirect_to=None, expect_200=False): - response = self.client.get(reverse('edit_engagement', args=(engagement.id, ))) + response = self.client.get(reverse("edit_engagement", args=(engagement.id, ))) # logger.debug('before: JIRA_Project last') # self.log_model_instance(JIRA_Project.objects.last()) - response = self.client.post(reverse('edit_engagement', args=(engagement.id, )), urlencode(data), content_type='application/x-www-form-urlencoded') + response = self.client.post(reverse("edit_engagement", args=(engagement.id, )), urlencode(data), content_type="application/x-www-form-urlencoded") # logger.debug('after: JIRA_Project last') # self.log_model_instance(JIRA_Project.objects.last()) @@ -235,7 +236,7 @@ def empty_jira_project_for_engagement(self, engagement, expected_delta_jira_proj class JIRAConfigEngagementTest(DojoTestCase, JIRAConfigEngagementBase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] product_id = 999 @@ -254,7 +255,7 @@ def setUp(self): product = Product.objects.get(id=self.product_id) self.assertIsNone(jira_helper.get_jira_project(product)) - @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') + @patch("dojo.jira_link.views.jira_helper.is_jira_project_valid") def test_add_jira_project_to_engagement_without_jira_project(self, jira_mock): jira_mock.return_value = True # cannot set return_value in decorated AND have the mock into the method # TODO: add engagement also via API, but let's focus on JIRA here @@ -262,41 +263,45 @@ def test_add_jira_project_to_engagement_without_jira_project(self, jira_mock): self.edit_jira_project_for_engagement(engagement, expected_delta_jira_project_db=1) self.assertEqual(jira_mock.call_count, 1) - @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') + @patch("dojo.jira_link.views.jira_helper.is_jira_project_valid") def test_add_empty_jira_project_to_engagement_without_jira_project(self, jira_mock): jira_mock.return_value = True # cannot set return_value in decorated AND have the mock into the method - engagement = self.add_engagement_without_jira_project(expected_delta_jira_project_db=0) - self.empty_jira_project_for_engagement(engagement, expected_delta_jira_project_db=0) - self.assertEqual(jira_mock.call_count, 0) + # Prevent the exception from being raised here so that the test can be ran in parallel + with contextlib.suppress(ValueError): + engagement = self.add_engagement_without_jira_project(expected_delta_jira_project_db=0) + self.empty_jira_project_for_engagement(engagement, expected_delta_jira_project_db=0) + self.assertEqual(jira_mock.call_count, 0) - @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') + @patch("dojo.jira_link.views.jira_helper.is_jira_project_valid") def test_edit_jira_project_to_engagement_with_jira_project(self, jira_mock): jira_mock.return_value = True # cannot set return_value in decorated AND have the mock into the method engagement = self.add_engagement_with_jira_project(expected_delta_jira_project_db=1) self.edit_jira_project_for_engagement2(engagement, expected_delta_jira_project_db=0) self.assertEqual(jira_mock.call_count, 2) - @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') + @patch("dojo.jira_link.views.jira_helper.is_jira_project_valid") def test_edit_empty_jira_project_to_engagement_with_jira_project(self, jira_mock): jira_mock.return_value = True # cannot set return_value in decorated AND have the mock into the method - engagement = self.add_engagement_with_jira_project(expected_delta_jira_project_db=1) - # clearing out jira config used to be possible. what todo? - # - delete jira project? would disconnect all existing jira issues in defect dojo from the config? - # - allow jira project with empty jira instance and/or empty project_key? unpredictable behaviour - # - so prevent clearing out these values - # response = self.empty_jira_project_for_engagement(Engagement.objects.get(id=3), -1) - # expecting ValueError as we can't delete existing JIRA Projects - self.empty_jira_project_for_engagement(engagement, expected_delta_jira_project_db=0, expect_error=True) - self.assertEqual(jira_mock.call_count, 1) - - @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') + # Prevent the exception from being raised here so that the test can be ran in parallel + with contextlib.suppress(ValueError): + engagement = self.add_engagement_with_jira_project(expected_delta_jira_project_db=1) + # clearing out jira config used to be possible. what todo? + # - delete jira project? would disconnect all existing jira issues in defect dojo from the config? + # - allow jira project with empty jira instance and/or empty project_key? unpredictable behaviour + # - so prevent clearing out these values + # response = self.empty_jira_project_for_engagement(Engagement.objects.get(id=3), -1) + # expecting ValueError as we can't delete existing JIRA Projects + self.empty_jira_project_for_engagement(engagement, expected_delta_jira_project_db=0, expect_error=True) + self.assertEqual(jira_mock.call_count, 1) + + @patch("dojo.jira_link.views.jira_helper.is_jira_project_valid") def test_add_jira_project_to_engagement_without_jira_project_invalid_project(self, jira_mock): jira_mock.return_value = False # cannot set return_value in decorated AND have the mock into the method # errors means it won't redirect to view_engagement, but returns a 200 and redisplays the edit engagement page self.edit_jira_project_for_engagement(Engagement.objects.get(id=3), expected_delta_jira_project_db=0, expect_200=True) self.assertEqual(jira_mock.call_count, 1) - @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') + @patch("dojo.jira_link.views.jira_helper.is_jira_project_valid") def test_edit_jira_project_to_engagement_with_jira_project_invalid_project(self, jira_mock): jira_mock.return_value = True # cannot set return_value in decorated AND have the mock into the method engagement = self.add_engagement_with_jira_project(expected_delta_jira_project_db=1) @@ -305,22 +310,22 @@ def test_edit_jira_project_to_engagement_with_jira_project_invalid_project(self, self.edit_jira_project_for_engagement2(engagement, expected_delta_jira_project_db=0, expect_200=True) self.assertEqual(jira_mock.call_count, 2) - @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') + @patch("dojo.jira_link.views.jira_helper.is_jira_project_valid") def test_add_engagement_with_jira_project(self, jira_mock): jira_mock.return_value = True # cannot set return_value in decorated AND have the mock into the method engagement = self.add_engagement_with_jira_project(expected_delta_jira_project_db=1) self.assertIsNotNone(engagement) self.assertEqual(jira_mock.call_count, 1) - @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') + @patch("dojo.jira_link.views.jira_helper.is_jira_project_valid") def test_add_engagement_with_jira_project_invalid_jira_project(self, jira_mock): jira_mock.return_value = False # cannot set return_value in decorated AND have the mock into the method - engagement = self.add_engagement_with_jira_project(expected_delta_jira_project_db=0, expect_redirect_to='/engagement/%i/edit') + engagement = self.add_engagement_with_jira_project(expected_delta_jira_project_db=0, expect_redirect_to="/engagement/%i/edit") # engagement still added even while jira errors self.assertIsNotNone(engagement) self.assertEqual(jira_mock.call_count, 1) - @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') + @patch("dojo.jira_link.views.jira_helper.is_jira_project_valid") def test_add_engagement_without_jira_project(self, jira_mock): jira_mock.return_value = True # cannot set return_value in decorated AND have the mock into the method engagement = self.add_engagement_without_jira_project(expected_delta_jira_project_db=0) @@ -328,7 +333,7 @@ def test_add_engagement_without_jira_project(self, jira_mock): self.assertEqual(jira_mock.call_count, 0) # with jira disabled the jiraform should not be checked at all - @patch('dojo.forms.JIRAProjectForm.is_valid') + @patch("dojo.forms.JIRAProjectForm.is_valid") def test_add_engagement_with_jira_project_to_engagement_jira_disabled(self, jira_mock): jira_mock.return_value = True # cannot set return_value in decorated AND have the mock into the method self.system_settings(enable_jira=False) @@ -337,7 +342,7 @@ def test_add_engagement_with_jira_project_to_engagement_jira_disabled(self, jira self.assertEqual(jira_mock.call_count, 0) # with jira disabled the jiraform should not be checked at all - @patch('dojo.forms.JIRAProjectForm.is_valid') + @patch("dojo.forms.JIRAProjectForm.is_valid") def test_edit_jira_project_to_engagement_with_jira_project_invalid_project_jira_disabled(self, jira_mock): self.system_settings(enable_jira=False) jira_mock.return_value = True # cannot set return_value in decorated AND have the mock into the method @@ -352,7 +357,7 @@ class JIRAConfigEngagementTest_Inheritance(JIRAConfigEngagementTest): def __init__(self, *args, **kwargs): JIRAConfigEngagementTest.__init__(self, *args, **kwargs) - @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') + @patch("dojo.jira_link.views.jira_helper.is_jira_project_valid") def setUp(self, jira_mock, *args, **kwargs): jira_mock.return_value = True JIRAConfigEngagementTest.setUp(self, *args, **kwargs) diff --git a/unittests/test_jira_config_engagement_epic.py b/unittests/test_jira_config_engagement_epic.py index ba14b310de..919e7480ae 100644 --- a/unittests/test_jira_config_engagement_epic.py +++ b/unittests/test_jira_config_engagement_epic.py @@ -10,7 +10,7 @@ class JIRAConfigEngagementEpicTest(DojoVCRTestCase, JIRAConfigEngagementBase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] product_id = 999 @@ -24,10 +24,10 @@ def assert_cassette_played(self): def _get_vcr(self, **kwargs): my_vcr = super(DojoVCRTestCase, self)._get_vcr(**kwargs) - my_vcr.record_mode = 'once' - my_vcr.path_transformer = VCR.ensure_suffix('.yaml') - my_vcr.filter_headers = ['Authorization', 'X-Atlassian-Token'] - my_vcr.cassette_library_dir = get_unit_tests_path() + '/vcr/jira/' + my_vcr.record_mode = "once" + my_vcr.path_transformer = VCR.ensure_suffix(".yaml") + my_vcr.filter_headers = ["Authorization", "X-Atlassian-Token"] + my_vcr.cassette_library_dir = get_unit_tests_path() + "/vcr/jira/" # filters headers doesn't seem to work for cookies, so use callbacks to filter cookies from being recorded my_vcr.before_record_request = self.before_record_request my_vcr.before_record_response = self.before_record_response @@ -48,19 +48,19 @@ def setUp(self): def get_new_engagement_with_jira_project_data_and_epic_mapping(self): return { - 'name': 'new engagement', - 'description': 'new description', - 'lead': 1, - 'product': self.product_id, - 'target_start': '2070-11-27', - 'target_end': '2070-12-04', - 'status': 'Not Started', - 'jira-project-form-jira_instance': 2, - 'jira-project-form-project_key': 'NTEST', - 'jira-project-form-epic_issue_type_name': 'Epic', - 'jira-project-form-product_jira_sla_notification': 'on', - 'jira-project-form-enable_engagement_epic_mapping': 'on', - 'jira-epic-form-push_to_jira': 'on', + "name": "new engagement", + "description": "new description", + "lead": 1, + "product": self.product_id, + "target_start": "2070-11-27", + "target_end": "2070-12-04", + "status": "Not Started", + "jira-project-form-jira_instance": 2, + "jira-project-form-project_key": "NTEST", + "jira-project-form-epic_issue_type_name": "Epic", + "jira-project-form-product_jira_sla_notification": "on", + "jira-project-form-enable_engagement_epic_mapping": "on", + "jira-epic-form-push_to_jira": "on", } def add_engagement_with_jira_project_and_epic_mapping(self, expected_delta_jira_project_db=0, expect_redirect_to=None, expect_200=False): diff --git a/unittests/test_jira_config_product.py b/unittests/test_jira_config_product.py index 818127c5bf..0673d1c224 100644 --- a/unittests/test_jira_config_product.py +++ b/unittests/test_jira_config_product.py @@ -16,25 +16,25 @@ class JIRAConfigProductTest(DojoTestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] data_jira_instance = { - 'configuration_name': 'something_jira', - 'url': 'https://127.0.0.1', - 'username': 'defectdojo', - 'password': 'defectdojo-password', - 'default_issue_type': 'Bug', - 'epic_name_id': 1, - 'open_status_key': 1, - 'close_status_key': 1, - 'info_mapping_severity': 'Info', - 'low_mapping_severity': 'Low', - 'medium_mapping_severity': 'Medium', - 'high_mapping_severity': 'High', - 'critical_mapping_severity': 'Critical', + "configuration_name": "something_jira", + "url": "https://127.0.0.1", + "username": "defectdojo", + "password": "defectdojo-password", + "default_issue_type": "Bug", + "epic_name_id": 1, + "open_status_key": 1, + "close_status_key": 1, + "info_mapping_severity": "Info", + "low_mapping_severity": "Low", + "medium_mapping_severity": "Medium", + "high_mapping_severity": "High", + "critical_mapping_severity": "Critical", # finding_text': '', - 'accepted_mapping_resolution': 'Fixed', - 'false_positive_mapping_resolution': 'False Positive', + "accepted_mapping_resolution": "Fixed", + "false_positive_mapping_resolution": "False Positive", # global_jira_sla_notification': '', } @@ -47,18 +47,18 @@ def setUp(self): self.system_settings(enable_jira=True) self.client.force_login(self.get_test_admin()) - @patch('dojo.jira_link.views.jira_helper.get_jira_connection_raw') + @patch("dojo.jira_link.views.jira_helper.get_jira_connection_raw") def add_jira_instance(self, data, jira_mock): - response = self.client.post(reverse('add_jira'), urlencode(data), content_type='application/x-www-form-urlencoded') + response = self.client.post(reverse("add_jira"), urlencode(data), content_type="application/x-www-form-urlencoded") # check that storing a new config triggers a login call to JIRA - call_1 = call(data['url'], data['username'], data['password']) - call_2 = call(data['url'], data['username'], data['password']) + call_1 = call(data["url"], data["username"], data["password"]) + call_2 = call(data["url"], data["username"], data["password"]) # jira_mock.assert_called_once_with(data['url'], data['username'], data['password']) jira_mock.assert_has_calls([call_1, call_2]) # succesful, so should redirect to list of JIRA instances - self.assertRedirects(response, '/jira') + self.assertRedirects(response, "/jira") - jira_instance = JIRA_Instance.objects.filter(configuration_name=data['configuration_name'], url=data['url']).last() + jira_instance = JIRA_Instance.objects.filter(configuration_name=data["configuration_name"], url=data["url"]).last() return response, jira_instance def test_add_jira_instance(self): @@ -67,33 +67,33 @@ def test_add_jira_instance(self): def test_add_jira_instance_with_issue_template_dir(self): # make sure we get no error when specifying template data = self.data_jira_instance.copy() - data['issue_template_dir'] = 'issue-trackers/jira_full' + data["issue_template_dir"] = "issue-trackers/jira_full" _response, _jira_instance = self.add_jira_instance(data) # no mock so we can assert the exception raised def test_add_jira_instance_unknown_host(self): data = self.data_jira_instance - data['url'] = 'https://jira.hj23412341hj234123421341234ljl.nl' + data["url"] = "https://jira.hj23412341hj234123421341234ljl.nl" # test UI validation error # self.client.force_login('admin', backend='django.contrib.auth.backends.ModelBackend') # Client.raise_request_exception = False # needs Django 3.0 # can't use helper method which has patched connection raw method - response = self.client.post(reverse('add_jira'), urlencode(data), content_type='application/x-www-form-urlencoded') + response = self.client.post(reverse("add_jira"), urlencode(data), content_type="application/x-www-form-urlencoded") self.assertEqual(200, response.status_code) - content = response.content.decode('utf-8') + content = response.content.decode("utf-8") # debian throws 'Name or service not known' error and alpine 'Name does not resolve' - self.assertTrue(('Name or service not known' in content) or ('Name does not resolve' in content)) + self.assertTrue(("Name or service not known" in content) or ("Name does not resolve" in content)) # test raw connection error with self.assertRaises(requests.exceptions.RequestException): - jira_helper.get_jira_connection_raw(data['url'], data['username'], data['password']) + jira_helper.get_jira_connection_raw(data["url"], data["username"], data["password"]) - @patch('dojo.jira_link.views.jira_helper.get_jira_connection_raw') + @patch("dojo.jira_link.views.jira_helper.get_jira_connection_raw") def test_add_jira_instance_invalid_credentials(self, jira_mock): - jira_mock.side_effect = JIRAError(status_code=401, text='Login failed') + jira_mock.side_effect = JIRAError(status_code=401, text="Login failed") data = self.data_jira_instance # test UI validation error @@ -101,14 +101,14 @@ def test_add_jira_instance_invalid_credentials(self, jira_mock): # self.client.force_login('admin', backend='django.contrib.auth.backends.ModelBackend') # Client.raise_request_exception = False # needs Django 3.0 # can't use helper method which has patched connection raw method - response = self.client.post(reverse('add_jira'), urlencode(data), content_type='application/x-www-form-urlencoded') + response = self.client.post(reverse("add_jira"), urlencode(data), content_type="application/x-www-form-urlencoded") self.assertEqual(200, response.status_code) - content = response.content.decode('utf-8') - self.assertIn('Login failed', content) - self.assertIn('Unable to authenticate to JIRA', content) + content = response.content.decode("utf-8") + self.assertIn("Login failed", content) + self.assertIn("Unable to authenticate to JIRA", content) - @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') + @patch("dojo.jira_link.views.jira_helper.is_jira_project_valid") def test_add_jira_project_to_product_without_jira_project(self, jira_mock): jira_mock.return_value = True # cannot set return_value in decorated AND have the mock into the method # TODO: add product also via API, but let's focus on JIRA here @@ -116,21 +116,21 @@ def test_add_jira_project_to_product_without_jira_project(self, jira_mock): self.edit_jira_project_for_product(product, expected_delta_jira_project_db=1) self.assertEqual(jira_mock.call_count, 1) - @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') + @patch("dojo.jira_link.views.jira_helper.is_jira_project_valid") def test_add_empty_jira_project_to_product_without_jira_project(self, jira_mock): jira_mock.return_value = True # cannot set return_value in decorater AND have the mock into the method product = self.add_product_without_jira_project(expected_delta_jira_project_db=0) self.empty_jira_project_for_product(product, expected_delta_jira_project_db=0) self.assertEqual(jira_mock.call_count, 0) - @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') + @patch("dojo.jira_link.views.jira_helper.is_jira_project_valid") def test_edit_jira_project_to_product_with_jira_project(self, jira_mock): jira_mock.return_value = True # cannot set return_value in decorated AND have the mock into the method product = self.add_product_with_jira_project(expected_delta_jira_project_db=1) self.edit_jira_project_for_product2(product, expected_delta_jira_project_db=0) self.assertEqual(jira_mock.call_count, 2) - @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') + @patch("dojo.jira_link.views.jira_helper.is_jira_project_valid") def test_edit_empty_jira_project_to_product_with_jira_project(self, jira_mock): jira_mock.return_value = True # cannot set return_value in decorated AND have the mock into the method product = self.add_product_with_jira_project(expected_delta_jira_project_db=1) @@ -143,14 +143,14 @@ def test_edit_empty_jira_project_to_product_with_jira_project(self, jira_mock): self.empty_jira_project_for_product(product, expected_delta_jira_project_db=0, expect_200=True) self.assertEqual(jira_mock.call_count, 1) - @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') + @patch("dojo.jira_link.views.jira_helper.is_jira_project_valid") def test_add_jira_project_to_product_without_jira_project_invalid_project(self, jira_mock): jira_mock.return_value = False # cannot set return_value in decorated AND have the mock into the method # errors means it won't redirect to view_product, but returns a 200 and redisplays the edit product page self.edit_jira_project_for_product(Product.objects.get(id=3), expected_delta_jira_project_db=0, expect_200=True) self.assertEqual(jira_mock.call_count, 1) - @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') + @patch("dojo.jira_link.views.jira_helper.is_jira_project_valid") def test_edit_jira_project_to_product_with_jira_project_invalid_project(self, jira_mock): jira_mock.return_value = True # cannot set return_value in decorated AND have the mock into the method product = self.add_product_with_jira_project(expected_delta_jira_project_db=1) @@ -159,22 +159,22 @@ def test_edit_jira_project_to_product_with_jira_project_invalid_project(self, ji self.edit_jira_project_for_product2(product, expected_delta_jira_project_db=0, expect_200=True) self.assertEqual(jira_mock.call_count, 2) - @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') + @patch("dojo.jira_link.views.jira_helper.is_jira_project_valid") def test_add_product_with_jira_project(self, jira_mock): jira_mock.return_value = True # cannot set return_value in decorated AND have the mock into the method product = self.add_product_with_jira_project(expected_delta_jira_project_db=1) self.assertIsNotNone(product) self.assertEqual(jira_mock.call_count, 1) - @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') + @patch("dojo.jira_link.views.jira_helper.is_jira_project_valid") def test_add_product_with_jira_project_invalid_jira_project(self, jira_mock): jira_mock.return_value = False # cannot set return_value in decorated AND have the mock into the method - product = self.add_product_with_jira_project(expected_delta_jira_project_db=0, expect_redirect_to='/product/%i/edit') + product = self.add_product_with_jira_project(expected_delta_jira_project_db=0, expect_redirect_to="/product/%i/edit") # product is still saved, even with invalid jira project key self.assertIsNotNone(product) self.assertEqual(jira_mock.call_count, 1) - @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') + @patch("dojo.jira_link.views.jira_helper.is_jira_project_valid") def test_add_product_without_jira_project(self, jira_mock): jira_mock.return_value = True # cannot set return_value in decorated AND have the mock into the method product = self.add_product_without_jira_project(expected_delta_jira_project_db=0) @@ -182,7 +182,7 @@ def test_add_product_without_jira_project(self, jira_mock): self.assertEqual(jira_mock.call_count, 0) # with jira disabled the jiraform should not be checked at all - @patch('dojo.forms.JIRAProjectForm.is_valid') + @patch("dojo.forms.JIRAProjectForm.is_valid") def test_add_product_with_jira_project_to_product_jira_disabled(self, jira_mock): jira_mock.return_value = True # cannot set return_value in decorated AND have the mock into the method self.system_settings(enable_jira=False) @@ -191,7 +191,7 @@ def test_add_product_with_jira_project_to_product_jira_disabled(self, jira_mock) self.assertEqual(jira_mock.call_count, 0) # with jira disabled the jiraform should not be checked at all - @patch('dojo.forms.JIRAProjectForm.is_valid') + @patch("dojo.forms.JIRAProjectForm.is_valid") def test_edit_jira_project_to_product_with_jira_project_invalid_project_jira_disabled(self, jira_mock): self.system_settings(enable_jira=False) jira_mock.return_value = True # cannot set return_value in decorated AND have the mock into the method diff --git a/unittests/test_jira_import_and_pushing_api.py b/unittests/test_jira_import_and_pushing_api.py index 2f0c1050bd..ed6d684287 100644 --- a/unittests/test_jira_import_and_pushing_api.py +++ b/unittests/test_jira_import_and_pushing_api.py @@ -35,7 +35,7 @@ class JIRAImportAndPushTestApi(DojoVCRAPITestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): # TODO remove __init__ if it does nothing... @@ -47,10 +47,10 @@ def assert_cassette_played(self): def _get_vcr(self, **kwargs): my_vcr = super()._get_vcr(**kwargs) - my_vcr.record_mode = 'once' - my_vcr.path_transformer = VCR.ensure_suffix('.yaml') - my_vcr.filter_headers = ['Authorization', 'X-Atlassian-Token'] - my_vcr.cassette_library_dir = get_unit_tests_path() + '/vcr/jira/' + my_vcr.record_mode = "once" + my_vcr.path_transformer = VCR.ensure_suffix(".yaml") + my_vcr.filter_headers = ["Authorization", "X-Atlassian-Token"] + my_vcr.cassette_library_dir = get_unit_tests_path() + "/vcr/jira/" # filters headers doesn't seem to work for cookies, so use callbacks to filter cookies from being recorded my_vcr.before_record_request = self.before_record_request my_vcr.before_record_response = self.before_record_response @@ -59,31 +59,31 @@ def _get_vcr(self, **kwargs): def setUp(self): super().setUp() self.system_settings(enable_jira=True) - self.testuser = User.objects.get(username='admin') + self.testuser = User.objects.get(username="admin") self.testuser.usercontactinfo.block_execution = True self.testuser.usercontactinfo.save() token = Token.objects.get(user=self.testuser) self.client = APIClient() - self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) - self.scans_path = '/scans/' - self.zap_sample5_filename = self.scans_path + 'zap/5_zap_sample_one.xml' - self.npm_groups_sample_filename = self.scans_path + 'npm_audit/many_vuln_with_groups.json' + self.client.credentials(HTTP_AUTHORIZATION="Token " + token.key) + self.scans_path = "/scans/" + self.zap_sample5_filename = self.scans_path + "zap/5_zap_sample_one.xml" + self.npm_groups_sample_filename = self.scans_path + "npm_audit/many_vuln_with_groups.json" def test_import_no_push_to_jira(self): import0 = self.import_scan_with_params(self.zap_sample5_filename, verified=True) - test_id = import0['test'] + test_id = import0["test"] self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 0) def test_import_with_push_to_jira_is_false(self): import0 = self.import_scan_with_params(self.zap_sample5_filename, push_to_jira=False, verified=True) - test_id = import0['test'] + test_id = import0["test"] self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 0) def test_import_with_push_to_jira(self): import0 = self.import_scan_with_params(self.zap_sample5_filename, push_to_jira=True, verified=True) - test_id = import0['test'] + test_id = import0["test"] self.assert_jira_issue_count_in_test(test_id, 2) self.assert_jira_group_issue_count_in_test(test_id, 0) # by asserting full cassette is played we know issues have been updated in JIRA @@ -91,8 +91,8 @@ def test_import_with_push_to_jira(self): def test_import_with_groups_push_to_jira(self): # 7 findings, 5 unique component_name+component_version - import0 = self.import_scan_with_params(self.npm_groups_sample_filename, scan_type='NPM Audit Scan', group_by='component_name+component_version', push_to_jira=True, verified=True) - test_id = import0['test'] + import0 = self.import_scan_with_params(self.npm_groups_sample_filename, scan_type="NPM Audit Scan", group_by="component_name+component_version", push_to_jira=True, verified=True) + test_id = import0["test"] # all findings should be in a group, so no JIRA issues for individual findings self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 3) @@ -106,7 +106,7 @@ def test_import_with_push_to_jira_epic_as_issue_type(self): jira_instance.default_issue_type = "Epic" jira_instance.save() import0 = self.import_scan_with_params(self.zap_sample5_filename, push_to_jira=True, verified=True) - test_id = import0['test'] + test_id = import0["test"] self.assert_jira_issue_count_in_test(test_id, 2) self.assert_jira_group_issue_count_in_test(test_id, 0) # by asserting full cassette is played we know issues have been updated in JIRA @@ -115,7 +115,7 @@ def test_import_with_push_to_jira_epic_as_issue_type(self): def test_import_no_push_to_jira_but_push_all(self): self.set_jira_push_all_issues(self.get_engagement(1)) import0 = self.import_scan_with_params(self.zap_sample5_filename, verified=True) - test_id = import0['test'] + test_id = import0["test"] self.assert_jira_issue_count_in_test(test_id, 2) self.assert_jira_group_issue_count_in_test(test_id, 0) # by asserting full cassette is played we know issues have been updated in JIRA @@ -123,8 +123,8 @@ def test_import_no_push_to_jira_but_push_all(self): def test_import_with_groups_no_push_to_jira_but_push_all(self): self.set_jira_push_all_issues(self.get_engagement(1)) - import0 = self.import_scan_with_params(self.npm_groups_sample_filename, scan_type='NPM Audit Scan', group_by='component_name+component_version', verified=True) - test_id = import0['test'] + import0 = self.import_scan_with_params(self.npm_groups_sample_filename, scan_type="NPM Audit Scan", group_by="component_name+component_version", verified=True) + test_id = import0["test"] self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 3) # by asserting full cassette is played we know issues have been updated in JIRA @@ -133,7 +133,7 @@ def test_import_with_groups_no_push_to_jira_but_push_all(self): def test_import_with_push_to_jira_is_false_but_push_all(self): self.set_jira_push_all_issues(self.get_engagement(1)) import0 = self.import_scan_with_params(self.zap_sample5_filename, push_to_jira=False, verified=True) - test_id = import0['test'] + test_id = import0["test"] self.assert_jira_issue_count_in_test(test_id, 2) self.assert_jira_group_issue_count_in_test(test_id, 0) # by asserting full cassette is played we know issues have been updated in JIRA @@ -141,8 +141,8 @@ def test_import_with_push_to_jira_is_false_but_push_all(self): def test_import_with_groups_with_push_to_jira_is_false_but_push_all(self): self.set_jira_push_all_issues(self.get_engagement(1)) - import0 = self.import_scan_with_params(self.npm_groups_sample_filename, scan_type='NPM Audit Scan', group_by='component_name+component_version', push_to_jira=False, verified=True) - test_id = import0['test'] + import0 = self.import_scan_with_params(self.npm_groups_sample_filename, scan_type="NPM Audit Scan", group_by="component_name+component_version", push_to_jira=False, verified=True) + test_id = import0["test"] self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 3) # by asserting full cassette is played we know issues have been updated in JIRA @@ -150,7 +150,7 @@ def test_import_with_groups_with_push_to_jira_is_false_but_push_all(self): def test_import_no_push_to_jira_reimport_no_push_to_jira(self): import0 = self.import_scan_with_params(self.zap_sample5_filename, verified=True) - test_id = import0['test'] + test_id = import0["test"] self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 0) @@ -160,7 +160,7 @@ def test_import_no_push_to_jira_reimport_no_push_to_jira(self): def test_import_no_push_to_jira_reimport_push_to_jira_false(self): import0 = self.import_scan_with_params(self.zap_sample5_filename, verified=True) - test_id = import0['test'] + test_id = import0["test"] self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 0) @@ -170,7 +170,7 @@ def test_import_no_push_to_jira_reimport_push_to_jira_false(self): def test_import_no_push_to_jira_reimport_with_push_to_jira(self): import0 = self.import_scan_with_params(self.zap_sample5_filename, verified=True) - test_id = import0['test'] + test_id = import0["test"] self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 0) @@ -181,12 +181,12 @@ def test_import_no_push_to_jira_reimport_with_push_to_jira(self): self.assert_cassette_played() def test_import_with_groups_no_push_to_jira_reimport_with_push_to_jira(self): - import0 = self.import_scan_with_params(self.npm_groups_sample_filename, scan_type='NPM Audit Scan', group_by='component_name+component_version', verified=True) - test_id = import0['test'] + import0 = self.import_scan_with_params(self.npm_groups_sample_filename, scan_type="NPM Audit Scan", group_by="component_name+component_version", verified=True) + test_id = import0["test"] self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 0) - self.reimport_scan_with_params(test_id, self.npm_groups_sample_filename, scan_type='NPM Audit Scan', group_by='component_name+component_version', push_to_jira=True, verified=True) + self.reimport_scan_with_params(test_id, self.npm_groups_sample_filename, scan_type="NPM Audit Scan", group_by="component_name+component_version", push_to_jira=True, verified=True) self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 3) # by asserting full cassette is played we know issues have been updated in JIRA @@ -195,7 +195,7 @@ def test_import_with_groups_no_push_to_jira_reimport_with_push_to_jira(self): def test_import_no_push_to_jira_reimport_no_push_to_jira_but_push_all_issues(self): self.set_jira_push_all_issues(self.get_engagement(1)) import0 = self.import_scan_with_params(self.zap_sample5_filename, verified=True) - test_id = import0['test'] + test_id = import0["test"] self.assert_jira_issue_count_in_test(test_id, 2) self.assert_jira_group_issue_count_in_test(test_id, 0) @@ -207,12 +207,12 @@ def test_import_no_push_to_jira_reimport_no_push_to_jira_but_push_all_issues(sel def test_import_with_groups_no_push_to_jira_reimport_no_push_to_jira_but_push_all_issues(self): self.set_jira_push_all_issues(self.get_engagement(1)) - import0 = self.import_scan_with_params(self.npm_groups_sample_filename, scan_type='NPM Audit Scan', group_by='component_name+component_version', verified=True) - test_id = import0['test'] + import0 = self.import_scan_with_params(self.npm_groups_sample_filename, scan_type="NPM Audit Scan", group_by="component_name+component_version", verified=True) + test_id = import0["test"] self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 3) - self.reimport_scan_with_params(test_id, self.npm_groups_sample_filename, scan_type='NPM Audit Scan', group_by='component_name+component_version', verified=True) + self.reimport_scan_with_params(test_id, self.npm_groups_sample_filename, scan_type="NPM Audit Scan", group_by="component_name+component_version", verified=True) self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 3) # by asserting full cassette is played we know issues have been updated in JIRA @@ -221,7 +221,7 @@ def test_import_with_groups_no_push_to_jira_reimport_no_push_to_jira_but_push_al def test_import_no_push_to_jira_reimport_push_to_jira_is_false_but_push_all_issues(self): self.set_jira_push_all_issues(self.get_engagement(1)) import0 = self.import_scan_with_params(self.zap_sample5_filename, verified=True) - test_id = import0['test'] + test_id = import0["test"] self.assert_jira_issue_count_in_test(test_id, 2) self.assert_jira_group_issue_count_in_test(test_id, 0) self.get_jira_issue_updated_map(test_id) @@ -236,13 +236,13 @@ def test_import_no_push_to_jira_reimport_push_to_jira_is_false_but_push_all_issu def test_import_with_groups_no_push_to_jira_reimport_push_to_jira_is_false_but_push_all_issues(self): self.set_jira_push_all_issues(self.get_engagement(1)) - import0 = self.import_scan_with_params(self.npm_groups_sample_filename, scan_type='NPM Audit Scan', group_by='component_name+component_version', verified=True) - test_id = import0['test'] + import0 = self.import_scan_with_params(self.npm_groups_sample_filename, scan_type="NPM Audit Scan", group_by="component_name+component_version", verified=True) + test_id = import0["test"] self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 3) updated_map = self.get_jira_issue_updated_map(test_id) - self.reimport_scan_with_params(test_id, self.npm_groups_sample_filename, scan_type='NPM Audit Scan', group_by='component_name+component_version', push_to_jira=False, verified=True) + self.reimport_scan_with_params(test_id, self.npm_groups_sample_filename, scan_type="NPM Audit Scan", group_by="component_name+component_version", push_to_jira=False, verified=True) self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 3) # when sending in identical data to JIRA, JIRA does NOT update the updated timestamp.... @@ -253,7 +253,7 @@ def test_import_with_groups_no_push_to_jira_reimport_push_to_jira_is_false_but_p def test_import_push_to_jira_reimport_with_push_to_jira(self): import0 = self.import_scan_with_params(self.zap_sample5_filename, push_to_jira=True, verified=True) - test_id = import0['test'] + test_id = import0["test"] self.assert_jira_issue_count_in_test(test_id, 2) self.assert_jira_group_issue_count_in_test(test_id, 0) # Get one of the findings from the test @@ -271,24 +271,24 @@ def test_import_push_to_jira_reimport_with_push_to_jira(self): def test_import_twice_push_to_jira(self): import0 = self.import_scan_with_params(self.zap_sample5_filename, push_to_jira=True, verified=True) - test_id = import0['test'] + test_id = import0["test"] self.assert_jira_issue_count_in_test(test_id, 2) self.assert_jira_group_issue_count_in_test(test_id, 0) import1 = self.import_scan_with_params(self.zap_sample5_filename, push_to_jira=True, verified=True) - test_id1 = import1['test'] + test_id1 = import1["test"] # duplicates shouldn't be sent to JIRA self.assert_jira_issue_count_in_test(test_id1, 0) self.assert_jira_group_issue_count_in_test(test_id, 0) def test_import_with_groups_twice_push_to_jira(self): - import0 = self.import_scan_with_params(self.npm_groups_sample_filename, scan_type='NPM Audit Scan', group_by='component_name+component_version', push_to_jira=True, verified=True) - test_id = import0['test'] + import0 = self.import_scan_with_params(self.npm_groups_sample_filename, scan_type="NPM Audit Scan", group_by="component_name+component_version", push_to_jira=True, verified=True) + test_id = import0["test"] self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 3) - import1 = self.import_scan_with_params(self.npm_groups_sample_filename, scan_type='NPM Audit Scan', group_by='component_name+component_version', push_to_jira=True, verified=True) - test_id1 = import1['test'] + import1 = self.import_scan_with_params(self.npm_groups_sample_filename, scan_type="NPM Audit Scan", group_by="component_name+component_version", push_to_jira=True, verified=True) + test_id1 = import1["test"] # duplicates shouldn't be sent to JIRA self.assert_jira_issue_count_in_test(test_id1, 0) self.assert_jira_group_issue_count_in_test(test_id1, 0) @@ -296,68 +296,68 @@ def test_import_with_groups_twice_push_to_jira(self): def test_import_twice_push_to_jira_push_all_issues(self): self.set_jira_push_all_issues(self.get_engagement(1)) import0 = self.import_scan_with_params(self.zap_sample5_filename, verified=True) - test_id = import0['test'] + test_id = import0["test"] self.assert_jira_issue_count_in_test(test_id, 2) self.assert_jira_group_issue_count_in_test(test_id, 0) import1 = self.import_scan_with_params(self.zap_sample5_filename, verified=True) - test_id1 = import1['test'] + test_id1 = import1["test"] # duplicates shouldn't be sent to JIRA self.assert_jira_issue_count_in_test(test_id1, 0) self.assert_jira_group_issue_count_in_test(test_id1, 0) def test_create_edit_update_finding(self): import0 = self.import_scan_with_params(self.zap_sample5_filename, verified=True) - test_id = import0['test'] + test_id = import0["test"] self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 0) findings = self.get_test_findings_api(test_id) - finding_id = findings['results'][0]['id'] + finding_id = findings["results"][0]["id"] # logger.debug('finding_id: %s', finding_id) # use existing finding as template, but change some fields to make it not a duplicate finding_details = self.get_finding_api(finding_id) - del finding_details['id'] - del finding_details['push_to_jira'] + del finding_details["id"] + del finding_details["push_to_jira"] - finding_details['title'] = 'jira api test 1' + finding_details["title"] = "jira api test 1" self.post_new_finding_api(finding_details) self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 0) - finding_details['title'] = 'jira api test 2' + finding_details["title"] = "jira api test 2" self.post_new_finding_api(finding_details, push_to_jira=True) self.assert_jira_issue_count_in_test(test_id, 1) self.assert_jira_group_issue_count_in_test(test_id, 0) - finding_details['title'] = 'jira api test 3' + finding_details["title"] = "jira api test 3" new_finding_json = self.post_new_finding_api(finding_details) self.assert_jira_issue_count_in_test(test_id, 1) self.assert_jira_group_issue_count_in_test(test_id, 0) - self.patch_finding_api(new_finding_json['id'], {"push_to_jira": False}) + self.patch_finding_api(new_finding_json["id"], {"push_to_jira": False}) self.assert_jira_issue_count_in_test(test_id, 1) self.assert_jira_group_issue_count_in_test(test_id, 0) - self.patch_finding_api(new_finding_json['id'], {"push_to_jira": True}) + self.patch_finding_api(new_finding_json["id"], {"push_to_jira": True}) self.assert_jira_issue_count_in_test(test_id, 2) self.assert_jira_group_issue_count_in_test(test_id, 0) - pre_jira_status = self.get_jira_issue_status(new_finding_json['id']) + pre_jira_status = self.get_jira_issue_status(new_finding_json["id"]) - self.patch_finding_api(new_finding_json['id'], {"push_to_jira": True, + self.patch_finding_api(new_finding_json["id"], {"push_to_jira": True, "is_mitigated": True, "active": False}) self.assert_jira_issue_count_in_test(test_id, 2) self.assert_jira_group_issue_count_in_test(test_id, 0) - post_jira_status = self.get_jira_issue_status(new_finding_json['id']) + post_jira_status = self.get_jira_issue_status(new_finding_json["id"]) self.assertNotEqual(pre_jira_status, post_jira_status) - finding_details['title'] = 'jira api test 4' + finding_details["title"] = "jira api test 4" new_finding_json = self.post_new_finding_api(finding_details) - new_finding_id = new_finding_json['id'] - del new_finding_json['id'] + new_finding_id = new_finding_json["id"] + del new_finding_json["id"] self.assert_jira_issue_count_in_test(test_id, 2) self.assert_jira_group_issue_count_in_test(test_id, 0) @@ -374,48 +374,48 @@ def test_create_edit_update_finding(self): self.assert_cassette_played() def test_groups_create_edit_update_finding(self): - import0 = self.import_scan_with_params(self.npm_groups_sample_filename, scan_type='NPM Audit Scan', group_by='component_name+component_version', verified=True) - test_id = import0['test'] + import0 = self.import_scan_with_params(self.npm_groups_sample_filename, scan_type="NPM Audit Scan", group_by="component_name+component_version", verified=True) + test_id = import0["test"] self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 0) - findings = self.get_test_findings_api(test_id, component_name='negotiator') + findings = self.get_test_findings_api(test_id, component_name="negotiator") - self.assertEqual(len(findings['results']), 2) + self.assertEqual(len(findings["results"]), 2) - finding_details = self.get_finding_api(findings['results'][0]['id']) - finding_group_id = findings['results'][0]['finding_groups'][0]['id'] + finding_details = self.get_finding_api(findings["results"][0]["id"]) + finding_group_id = findings["results"][0]["finding_groups"][0]["id"] - del finding_details['id'] - del finding_details['push_to_jira'] + del finding_details["id"] + del finding_details["push_to_jira"] # push a finding should result in pushing the group instead - self.patch_finding_api(findings['results'][0]['id'], {"push_to_jira": True}) + self.patch_finding_api(findings["results"][0]["id"], {"push_to_jira": True}) self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 1) # push second finding from the same group should not result in a new jira issue - self.patch_finding_api(findings['results'][1]['id'], {"push_to_jira": True}) + self.patch_finding_api(findings["results"][1]["id"], {"push_to_jira": True}) self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 1) - pre_jira_status = self.get_jira_issue_status(findings['results'][0]['id']) + pre_jira_status = self.get_jira_issue_status(findings["results"][0]["id"]) # close both findings - self.patch_finding_api(findings['results'][0]['id'], {"active": False, "is_mitigated": True, "push_to_jira": True}) - self.patch_finding_api(findings['results'][1]['id'], {"active": False, "is_mitigated": True, "push_to_jira": True}) + self.patch_finding_api(findings["results"][0]["id"], {"active": False, "is_mitigated": True, "push_to_jira": True}) + self.patch_finding_api(findings["results"][1]["id"], {"active": False, "is_mitigated": True, "push_to_jira": True}) - post_jira_status = self.get_jira_issue_status(findings['results'][0]['id']) + post_jira_status = self.get_jira_issue_status(findings["results"][0]["id"]) # both findings inactive -> should update status in JIRA self.assertNotEqual(pre_jira_status, post_jira_status) # new finding, not pushed to JIRA # use existing finding as template, but change some fields to make it not a duplicate - self.get_finding_api(findings['results'][0]['id']) + self.get_finding_api(findings["results"][0]["id"]) - finding_details['title'] = 'jira api test 1' + finding_details["title"] = "jira api test 1" self.post_new_finding_api(finding_details) self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 1) @@ -423,7 +423,7 @@ def test_groups_create_edit_update_finding(self): # another new finding, pushed to JIRA # same component_name, but not yet in a group, so finding pushed to JIRA - finding_details['title'] = 'jira api test 2' + finding_details["title"] = "jira api test 2" new_finding_json = self.post_new_finding_api(finding_details, push_to_jira=True) self.assert_jira_issue_count_in_test(test_id, 1) self.assert_jira_group_issue_count_in_test(test_id, 1) @@ -431,29 +431,29 @@ def test_groups_create_edit_update_finding(self): # print(finding_details) # no way to set finding group easily via API yet - Finding_Group.objects.get(id=finding_group_id).findings.add(Finding.objects.get(id=new_finding_json['id'])) + Finding_Group.objects.get(id=finding_group_id).findings.add(Finding.objects.get(id=new_finding_json["id"])) - self.patch_finding_api(new_finding_json['id'], {"push_to_jira": True}) + self.patch_finding_api(new_finding_json["id"], {"push_to_jira": True}) self.assert_jira_issue_count_in_test(test_id, 1) self.assert_jira_group_issue_count_in_test(test_id, 1) # another new finding, pushed to JIRA, different component_name / different group - finding_details['title'] = 'jira api test 3' - finding_details['component_name'] = 'pg' + finding_details["title"] = "jira api test 3" + finding_details["component_name"] = "pg" new_finding_json = self.post_new_finding_api(finding_details) self.assert_jira_issue_count_in_test(test_id, 1) self.assert_jira_group_issue_count_in_test(test_id, 1) - findings = self.get_test_findings_api(test_id, component_name='pg') + findings = self.get_test_findings_api(test_id, component_name="pg") - finding_group_id = findings['results'][0]['finding_groups'][0]['id'] + finding_group_id = findings["results"][0]["finding_groups"][0]["id"] # no way to set finding group easily via API yet - Finding_Group.objects.get(id=finding_group_id).findings.add(Finding.objects.get(id=new_finding_json['id'])) + Finding_Group.objects.get(id=finding_group_id).findings.add(Finding.objects.get(id=new_finding_json["id"])) - self.patch_finding_api(new_finding_json['id'], {"push_to_jira": True}) + self.patch_finding_api(new_finding_json["id"], {"push_to_jira": True}) self.assert_jira_issue_count_in_test(test_id, 1) self.assert_jira_group_issue_count_in_test(test_id, 2) @@ -462,15 +462,15 @@ def test_groups_create_edit_update_finding(self): def test_import_with_push_to_jira_add_comment(self): import0 = self.import_scan_with_params(self.zap_sample5_filename, push_to_jira=True, verified=True) - test_id = import0['test'] + test_id = import0["test"] self.assert_jira_issue_count_in_test(test_id, 2) self.assert_jira_group_issue_count_in_test(test_id, 0) findings = self.get_test_findings_api(test_id) - finding_id = findings['results'][0]['id'] + finding_id = findings["results"][0]["id"] - self.post_finding_notes_api(finding_id, 'testing note. creating it and pushing it to JIRA') + self.post_finding_notes_api(finding_id, "testing note. creating it and pushing it to JIRA") self.patch_finding_api(finding_id, {"push_to_jira": True}) # Make sure the number of comments match self.assertEqual(len(self.get_jira_comments(finding_id)), 1) @@ -479,14 +479,14 @@ def test_import_with_push_to_jira_add_comment(self): def test_import_add_comments_then_push_to_jira(self): import0 = self.import_scan_with_params(self.zap_sample5_filename, push_to_jira=False, verified=True) - test_id = import0['test'] + test_id = import0["test"] findings = self.get_test_findings_api(test_id) - finding_id = findings['results'][0]['id'] + finding_id = findings["results"][0]["id"] - self.post_finding_notes_api(finding_id, 'testing note. creating it and pushing it to JIRA') - self.post_finding_notes_api(finding_id, 'testing second note. creating it and pushing it to JIRA') + self.post_finding_notes_api(finding_id, "testing note. creating it and pushing it to JIRA") + self.post_finding_notes_api(finding_id, "testing second note. creating it and pushing it to JIRA") self.patch_finding_api(finding_id, {"push_to_jira": True}) self.assert_jira_issue_count_in_test(test_id, 1) @@ -498,15 +498,15 @@ def test_import_add_comments_then_push_to_jira(self): def test_import_with_push_to_jira_add_tags(self): import0 = self.import_scan_with_params(self.zap_sample5_filename, push_to_jira=True, verified=True) - test_id = import0['test'] + test_id = import0["test"] self.assert_jira_issue_count_in_test(test_id, 2) self.assert_jira_group_issue_count_in_test(test_id, 0) findings = self.get_test_findings_api(test_id) - finding = Finding.objects.get(id=findings['results'][0]['id']) + finding = Finding.objects.get(id=findings["results"][0]["id"]) - tags = ['tag1', 'tag2'] + tags = ["tag1", "tag2"] self.post_finding_tags_api(finding.id, tags) self.patch_finding_api(finding.id, {"push_to_jira": True}) @@ -523,15 +523,15 @@ def test_import_with_push_to_jira_add_tags(self): def test_import_with_push_to_jira_update_tags(self): import0 = self.import_scan_with_params(self.zap_sample5_filename, push_to_jira=True, verified=True) - test_id = import0['test'] + test_id = import0["test"] self.assert_jira_issue_count_in_test(test_id, 2) self.assert_jira_group_issue_count_in_test(test_id, 0) findings = self.get_test_findings_api(test_id) - finding = Finding.objects.get(id=findings['results'][0]['id']) + finding = Finding.objects.get(id=findings["results"][0]["id"]) - tags = ['tag1', 'tag2'] + tags = ["tag1", "tag2"] self.post_finding_tags_api(finding.id, tags) self.patch_finding_api(finding.id, {"push_to_jira": True}) @@ -543,7 +543,7 @@ def test_import_with_push_to_jira_update_tags(self): # Assert that the tags match self.assertEqual(issue.fields.labels, tags) - tags_new = tags + ['tag3', 'tag4'] + tags_new = tags + ["tag3", "tag4"] self.post_finding_tags_api(finding.id, tags_new) self.patch_finding_api(finding.id, {"push_to_jira": True}) @@ -573,7 +573,7 @@ def test_engagement_epic_mapping_enabled_create_epic_and_push_findings(self): self.toggle_jira_project_epic_mapping(eng, True) self.create_engagement_epic(eng) import0 = self.import_scan_with_params(self.zap_sample5_filename, push_to_jira=True, engagement=3, verified=True) - test_id = import0['test'] + test_id = import0["test"] # Correct number of issues are pushed to jira self.assert_jira_issue_count_in_test(test_id, 2) self.assert_jira_group_issue_count_in_test(test_id, 0) @@ -590,7 +590,7 @@ def test_engagement_epic_mapping_enabled_no_epic_and_push_findings(self): # Set epic_mapping to true self.toggle_jira_project_epic_mapping(eng, True) import0 = self.import_scan_with_params(self.zap_sample5_filename, push_to_jira=True, engagement=3, verified=True) - test_id = import0['test'] + test_id = import0["test"] # Correct number of issues are pushed to jira self.assert_jira_issue_count_in_test(test_id, 2) self.assert_jira_group_issue_count_in_test(test_id, 0) @@ -608,7 +608,7 @@ def test_engagement_epic_mapping_disabled_create_epic_and_push_findings(self): self.toggle_jira_project_epic_mapping(eng, False) self.create_engagement_epic(eng) import0 = self.import_scan_with_params(self.zap_sample5_filename, push_to_jira=True, engagement=3, verified=True) - test_id = import0['test'] + test_id = import0["test"] # Correct number of issues are pushed to jira self.assert_jira_issue_count_in_test(test_id, 2) self.assert_jira_group_issue_count_in_test(test_id, 0) @@ -625,7 +625,7 @@ def test_engagement_epic_mapping_disabled_no_epic_and_push_findings(self): # Set epic_mapping to true self.toggle_jira_project_epic_mapping(eng, False) import0 = self.import_scan_with_params(self.zap_sample5_filename, push_to_jira=True, engagement=3, verified=True) - test_id = import0['test'] + test_id = import0["test"] # Correct number of issues are pushed to jira self.assert_jira_issue_count_in_test(test_id, 2) self.assert_jira_group_issue_count_in_test(test_id, 0) diff --git a/unittests/test_jira_template.py b/unittests/test_jira_template.py index a0a2fc562a..f62c693a6a 100644 --- a/unittests/test_jira_template.py +++ b/unittests/test_jira_template.py @@ -10,7 +10,7 @@ class JIRATemplatetTest(DojoTestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): DojoTestCase.__init__(self, *args, **kwargs) @@ -22,17 +22,17 @@ def test_get_jira_issue_template_dir_from_project(self): product = Product.objects.get(id=1) jira_project = jira_helper.get_jira_project(product) # filepathfield contains full path - jira_project.issue_template_dir = 'issue-trackers/jira_full_extra' + jira_project.issue_template_dir = "issue-trackers/jira_full_extra" jira_project.save() - self.assertEqual(jira_helper.get_jira_issue_template(product), 'issue-trackers/jira_full_extra/jira-description.tpl') + self.assertEqual(jira_helper.get_jira_issue_template(product), "issue-trackers/jira_full_extra/jira-description.tpl") def test_get_jira_issue_template_dir_from_instance(self): product = Product.objects.get(id=1) jira_project = jira_helper.get_jira_project(product) jira_project.issue_template_dir = None jira_project.save() - self.assertEqual(jira_helper.get_jira_issue_template(product), 'issue-trackers/jira_full/jira-description.tpl') + self.assertEqual(jira_helper.get_jira_issue_template(product), "issue-trackers/jira_full/jira-description.tpl") def test_get_jira_project_and_instance_no_issue_template_dir(self): product = Product.objects.get(id=1) @@ -43,4 +43,4 @@ def test_get_jira_project_and_instance_no_issue_template_dir(self): jira_instance.issue_template_dir = None jira_instance.save() # no template should return default - self.assertEqual(jira_helper.get_jira_issue_template(product), 'issue-trackers/jira_full/jira-description.tpl') + self.assertEqual(jira_helper.get_jira_issue_template(product), "issue-trackers/jira_full/jira-description.tpl") diff --git a/unittests/test_jira_webhook.py b/unittests/test_jira_webhook.py index d88161e46f..949a3732d3 100644 --- a/unittests/test_jira_webhook.py +++ b/unittests/test_jira_webhook.py @@ -14,7 +14,7 @@ class JIRAWebhookTest(DojoTestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] jira_issue_comment_template_json = { "timestamp": 1605117321425, @@ -403,54 +403,54 @@ def __init__(self, *args, **kwargs): DojoTestCase.__init__(self, *args, **kwargs) def setUp(self): - self.correct_secret = '12345' - self.incorrect_secret = '1234567890' + self.correct_secret = "12345" + self.incorrect_secret = "1234567890" def test_webhook_get(self): - response = self.client.get(reverse('jira_web_hook')) + response = self.client.get(reverse("jira_web_hook")) self.assertEqual(405, response.status_code, response.content[:1000]) def test_webhook_jira_disabled(self): self.system_settings(enable_jira=False) - response = self.client.post(reverse('jira_web_hook')) + response = self.client.post(reverse("jira_web_hook")) self.assertEqual(200, response.status_code, response.content[:1000]) def test_webhook_disabled(self): self.system_settings(enable_jira=False, enable_jira_web_hook=False) - response = self.client.post(reverse('jira_web_hook')) + response = self.client.post(reverse("jira_web_hook")) self.assertEqual(200, response.status_code, response.content[:1000]) def test_webhook_invalid_content_type(self): self.system_settings(enable_jira=True, enable_jira_web_hook=True, disable_jira_webhook_secret=True) - response = self.client.post(reverse('jira_web_hook')) + response = self.client.post(reverse("jira_web_hook")) # 400 due to incorrect content_type self.assertEqual(200, response.status_code, response.content[:1000]) def test_webhook_secret_disabled_no_secret(self): self.system_settings(enable_jira=True, enable_jira_web_hook=True, disable_jira_webhook_secret=True) - response = self.client.post(reverse('jira_web_hook')) + response = self.client.post(reverse("jira_web_hook")) # 400 due to incorrect content_type self.assertEqual(200, response.status_code, response.content[:1000]) def test_webhook_secret_disabled_secret(self): self.system_settings(enable_jira=True, enable_jira_web_hook=True, disable_jira_webhook_secret=True) - response = self.client.post(reverse('jira_web_hook_secret', args=(self.incorrect_secret, ))) + response = self.client.post(reverse("jira_web_hook_secret", args=(self.incorrect_secret, ))) # 400 due to incorrect content_type self.assertEqual(200, response.status_code, response.content[:1000]) def test_webhook_secret_enabled_no_secret(self): self.system_settings(enable_jira=True, enable_jira_web_hook=True, disable_jira_webhook_secret=False, jira_webhook_secret=self.correct_secret) - response = self.client.post(reverse('jira_web_hook')) + response = self.client.post(reverse("jira_web_hook")) self.assertEqual(200, response.status_code, response.content[:1000]) def test_webhook_secret_enabled_incorrect_secret(self): self.system_settings(enable_jira=True, enable_jira_web_hook=True, disable_jira_webhook_secret=False, jira_webhook_secret=self.correct_secret) - response = self.client.post(reverse('jira_web_hook_secret', args=(self.incorrect_secret, ))) + response = self.client.post(reverse("jira_web_hook_secret", args=(self.incorrect_secret, ))) self.assertEqual(200, response.status_code, response.content[:1000]) def test_webhook_secret_enabled_correct_secret(self): self.system_settings(enable_jira=True, enable_jira_web_hook=True, disable_jira_webhook_secret=False, jira_webhook_secret=self.correct_secret) - response = self.client.post(reverse('jira_web_hook_secret', args=(self.correct_secret, ))) + response = self.client.post(reverse("jira_web_hook_secret", args=(self.correct_secret, ))) # 400 due to incorrect content_type self.assertEqual(200, response.status_code, response.content[:1000]) @@ -463,7 +463,7 @@ def test_webhook_comment_on_finding(self): finding = jira_issue.finding notes_count_before = finding.notes.count() - response = self.client.post(reverse('jira_web_hook_secret', args=(self.correct_secret, )), + response = self.client.post(reverse("jira_web_hook_secret", args=(self.correct_secret, )), self.jira_issue_comment_template_json, content_type="application/json") @@ -486,10 +486,10 @@ def test_webhook_comment_on_finding_from_dojo_note(self): notes_count_before = finding.notes.count() body = json.loads(json.dumps(self.jira_issue_comment_template_json)) - body['comment']['updateAuthor']['name'] = "defect.dojo" - body['comment']['updateAuthor']['displayName'] = "Defect Dojo" + body["comment"]["updateAuthor"]["name"] = "defect.dojo" + body["comment"]["updateAuthor"]["displayName"] = "Defect Dojo" - response = self.client.post(reverse('jira_web_hook_secret', args=(self.correct_secret, )), + response = self.client.post(reverse("jira_web_hook_secret", args=(self.correct_secret, )), body, content_type="application/json") @@ -517,10 +517,10 @@ def test_webhook_comment_on_finding_from_dojo_note_with_email(self): jira_instance.save() body = json.loads(json.dumps(self.jira_issue_comment_template_json_with_email)) - body['comment']['updateAuthor']['emailAddress'] = "defect.dojo@testme.com" - body['comment']['updateAuthor']['displayName'] = "Defect Dojo" + body["comment"]["updateAuthor"]["emailAddress"] = "defect.dojo@testme.com" + body["comment"]["updateAuthor"]["displayName"] = "Defect Dojo" - response = self.client.post(reverse('jira_web_hook_secret', args=(self.correct_secret, )), + response = self.client.post(reverse("jira_web_hook_secret", args=(self.correct_secret, )), body, content_type="application/json") @@ -543,13 +543,13 @@ def test_webhook_comment_on_finding_jira_under_path(self): # finding 5 has a JIRA issue in the initial fixture for unit tests with id=2 body = json.loads(json.dumps(self.jira_issue_comment_template_json)) - body['comment']['self'] = "http://www.testjira.com/my_little_happy_path_for_jira/rest/api/2/issue/2/comment/456843" + body["comment"]["self"] = "http://www.testjira.com/my_little_happy_path_for_jira/rest/api/2/issue/2/comment/456843" jira_issue = JIRA_Issue.objects.get(jira_id=2) finding = jira_issue.finding notes_count_before = finding.notes.count() - response = self.client.post(reverse('jira_web_hook_secret', args=(self.correct_secret, )), + response = self.client.post(reverse("jira_web_hook_secret", args=(self.correct_secret, )), self.jira_issue_comment_template_json, content_type="application/json") @@ -565,37 +565,37 @@ def test_webhook_comment_on_engagement(self): # 333 = engagement body = json.loads(json.dumps(self.jira_issue_comment_template_json)) - body['comment']['self'] = "http://www.testjira.com/rest/api/2/issue/333/comment/456843" + body["comment"]["self"] = "http://www.testjira.com/rest/api/2/issue/333/comment/456843" - response = self.client.post(reverse('jira_web_hook_secret', args=(self.correct_secret, )), + response = self.client.post(reverse("jira_web_hook_secret", args=(self.correct_secret, )), body, content_type="application/json") self.assertEqual(200, response.status_code, response.content[:1000]) - self.assertEqual(b'Comment for engagement ignored', response.content) + self.assertEqual(b"Comment for engagement ignored", response.content) def test_webhook_update_engagement(self): self.system_settings(enable_jira=True, enable_jira_web_hook=True, disable_jira_webhook_secret=False, jira_webhook_secret=self.correct_secret) # 333 = engagement body = json.loads(self.jira_issue_update_template_string) - body['issue']['id'] = 333 + body["issue"]["id"] = 333 - response = self.client.post(reverse('jira_web_hook_secret', args=(self.correct_secret, )), + response = self.client.post(reverse("jira_web_hook_secret", args=(self.correct_secret, )), body, content_type="application/json") self.assertEqual(200, response.status_code, response.content[:1000]) - self.assertEqual(b'Update for engagement ignored', response.content) + self.assertEqual(b"Update for engagement ignored", response.content) def test_webhook_comment_no_finding_no_engagement(self): self.system_settings(enable_jira=True, enable_jira_web_hook=True, disable_jira_webhook_secret=False, jira_webhook_secret=self.correct_secret) # 666 = nothing attached to JIRA_Issue body = json.loads(json.dumps(self.jira_issue_comment_template_json)) - body['comment']['self'] = "http://www.testjira.com/rest/api/2/issue/666/comment/456843" + body["comment"]["self"] = "http://www.testjira.com/rest/api/2/issue/666/comment/456843" - response = self.client.post(reverse('jira_web_hook_secret', args=(self.correct_secret, )), + response = self.client.post(reverse("jira_web_hook_secret", args=(self.correct_secret, )), body, content_type="application/json") @@ -606,9 +606,9 @@ def test_webhook_update_no_finding_no_engagement(self): # 666 = nothing attached to JIRA_Issue body = json.loads(self.jira_issue_update_template_string) - body['issue']['id'] = 999 + body["issue"]["id"] = 999 - response = self.client.post(reverse('jira_web_hook_secret', args=(self.correct_secret, )), + response = self.client.post(reverse("jira_web_hook_secret", args=(self.correct_secret, )), body, content_type="application/json") @@ -619,9 +619,9 @@ def test_webhook_comment_no_jira_issue_at_all(self): # 666 = nothing attached to JIRA_Issue body = json.loads(json.dumps(self.jira_issue_comment_template_json)) - body['comment']['self'] = "http://www.testjira.com/rest/api/2/issue/999/comment/456843" + body["comment"]["self"] = "http://www.testjira.com/rest/api/2/issue/999/comment/456843" - response = self.client.post(reverse('jira_web_hook_secret', args=(self.correct_secret, )), + response = self.client.post(reverse("jira_web_hook_secret", args=(self.correct_secret, )), body, content_type="application/json") @@ -632,9 +632,9 @@ def test_webhook_update_no_jira_issue_at_all(self): # 666 = nothing attached to JIRA_Issue body = json.loads(self.jira_issue_update_template_string) - body['issue']['id'] = 666 + body["issue"]["id"] = 666 - response = self.client.post(reverse('jira_web_hook_secret', args=(self.correct_secret, )), + response = self.client.post(reverse("jira_web_hook_secret", args=(self.correct_secret, )), body, content_type="application/json") diff --git a/unittests/test_metrics_queries.py b/unittests/test_metrics_queries.py index c8fdc30007..c52c602ea3 100644 --- a/unittests/test_metrics_queries.py +++ b/unittests/test_metrics_queries.py @@ -21,16 +21,16 @@ def add(*args, **kwargs): class FindingQueriesTest(DojoTestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def setUp(self): - user = User.objects.get(username='user1') - self.request = RequestFactory().get(reverse('metrics')) + user = User.objects.get(username="user1") + self.request = RequestFactory().get(reverse("metrics")) self.request.user = user self.request._messages = MockMessages() def test_finding_queries_no_data(self): - user3 = User.objects.get(username='user3') + user3 = User.objects.get(username="user3") self.request.user = user3 product_types = [] @@ -40,11 +40,11 @@ def test_finding_queries_no_data(self): ) self.assertSequenceEqual( - finding_queries['all'].values(), + finding_queries["all"].values(), [], ) - @patch('django.utils.timezone.now') + @patch("django.utils.timezone.now") def test_finding_queries(self, mock_timezone): mock_datetime = datetime(2020, 12, 9, tzinfo=timezone.utc) mock_timezone.return_value = mock_datetime @@ -60,96 +60,96 @@ def test_finding_queries(self, mock_timezone): self.assertSequenceEqual( list(finding_queries.keys()), [ - 'all', - 'closed', - 'accepted', - 'accepted_count', - 'top_ten', - 'monthly_counts', - 'weekly_counts', - 'weeks_between', - 'start_date', - 'end_date', - 'form', + "all", + "closed", + "accepted", + "accepted_count", + "top_ten", + "monthly_counts", + "weekly_counts", + "weeks_between", + "start_date", + "end_date", + "form", ], ) # Assert that we get expected querysets back. This is to be used to # support refactoring, in attempt of lowering the query count. self.assertSequenceEqual( - finding_queries['all'].values(), + finding_queries["all"].values(), [], # [{'id': 226, 'title': 'Test Endpoint Mitigation - Finding F1 Without Endpoints', 'date': date(2022, 10, 15), 'sla_start_date': None, 'cwe': None, 'cve': None, 'cvssv3': None, 'cvssv3_score': None, 'url': None, 'severity': 'Info', 'description': 'vulnerability', 'mitigation': '', 'impact': '', 'steps_to_reproduce': '', 'severity_justification': '', 'references': '', 'test_id': 89, 'active': True, 'verified': True, 'false_p': False, 'duplicate': False, 'duplicate_finding_id': None, 'out_of_scope': False, 'risk_accepted': False, 'under_review': False, 'last_status_update': None, 'review_requested_by_id': None, 'under_defect_review': False, 'defect_review_requested_by_id': None, 'is_mitigated': False, 'thread_id': 0, 'mitigated': None, 'mitigated_by_id': None, 'reporter_id': 1, 'numerical_severity': 'S4', 'last_reviewed': None, 'last_reviewed_by_id': None, 'param': None, 'payload': None, 'hash_code': 'a6dd6bd359ff0b504a21b8a7ae5e59f1b40dd0fa1715728bd58de8f688f01b19', 'line': None, 'file_path': '', 'component_name': None, 'component_version': None, 'static_finding': False, 'dynamic_finding': True, 'created': datetime(2022, 10, 15, 23, 12, 52, 966000, tzinfo=pytz.UTC), 'scanner_confidence': None, 'sonarqube_issue_id': None, 'unique_id_from_tool': None, 'vuln_id_from_tool': None, 'sast_source_object': None, 'sast_sink_object': None, 'sast_source_line': None, 'sast_source_file_path': None, 'nb_occurences': None, 'publish_date': None, 'service': None, 'planned_remediation_date': None, 'test__engagement__product__prod_type__member': True, 'test__engagement__product__member': True, 'test__engagement__product__prod_type__authorized_group': False, 'test__engagement__product__authorized_group': False}] ) self.assertSequenceEqual( - finding_queries['closed'].values(), + finding_queries["closed"].values(), [], ) self.assertSequenceEqual( - finding_queries['accepted'].values(), + finding_queries["accepted"].values(), [], ) self.assertSequenceEqual( - list(finding_queries['accepted_count'].values()), + list(finding_queries["accepted_count"].values()), [0, 0, 0, 0, 0, 0], ) self.assertSequenceEqual( - finding_queries['top_ten'].values(), + finding_queries["top_ten"].values(), [], ) self.assertEqual( - list(finding_queries['monthly_counts'].values()), + list(finding_queries["monthly_counts"].values()), [ [ - {'epoch': 1604188800000, 'grouped_date': date(2020, 11, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0, 'closed': 0}, - {'epoch': 1606780800000, 'grouped_date': date(2020, 12, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0, 'closed': 0}, + {"epoch": 1604188800000, "grouped_date": date(2020, 11, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0, "closed": 0}, + {"epoch": 1606780800000, "grouped_date": date(2020, 12, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0, "closed": 0}, ], [ - {'epoch': 1604188800000, 'grouped_date': date(2020, 11, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0}, - {'epoch': 1606780800000, 'grouped_date': date(2020, 12, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0}, + {"epoch": 1604188800000, "grouped_date": date(2020, 11, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, + {"epoch": 1606780800000, "grouped_date": date(2020, 12, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, ], [ - {'epoch': 1604188800000, 'grouped_date': date(2020, 11, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0}, - {'epoch': 1606780800000, 'grouped_date': date(2020, 12, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0}, + {"epoch": 1604188800000, "grouped_date": date(2020, 11, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, + {"epoch": 1606780800000, "grouped_date": date(2020, 12, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, ], ], ) self.assertEqual( - finding_queries['weekly_counts'], + finding_queries["weekly_counts"], { - 'opened_per_period': [ - {'epoch': 1606694400000, 'grouped_date': date(2020, 11, 30), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'closed': 0}, - {'epoch': 1607299200000, 'grouped_date': date(2020, 12, 7), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'closed': 0}, - {'epoch': 1607904000000, 'grouped_date': date(2020, 12, 14), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'closed': 0}, + "opened_per_period": [ + {"epoch": 1606694400000, "grouped_date": date(2020, 11, 30), "total": 0, "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "closed": 0}, + {"epoch": 1607299200000, "grouped_date": date(2020, 12, 7), "total": 0, "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "closed": 0}, + {"epoch": 1607904000000, "grouped_date": date(2020, 12, 14), "total": 0, "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "closed": 0}, ], - 'accepted_per_period': [ - {'epoch': 1606694400000, 'grouped_date': date(2020, 11, 30), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0}, - {'epoch': 1607299200000, 'grouped_date': date(2020, 12, 7), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0}, - {'epoch': 1607904000000, 'grouped_date': date(2020, 12, 14), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0}, + "accepted_per_period": [ + {"epoch": 1606694400000, "grouped_date": date(2020, 11, 30), "total": 0, "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0}, + {"epoch": 1607299200000, "grouped_date": date(2020, 12, 7), "total": 0, "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0}, + {"epoch": 1607904000000, "grouped_date": date(2020, 12, 14), "total": 0, "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0}, ], - 'active_per_period': [ - {'epoch': 1606694400000, 'grouped_date': date(2020, 11, 30), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0}, - {'epoch': 1607299200000, 'grouped_date': date(2020, 12, 7), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0}, - {'epoch': 1607904000000, 'grouped_date': date(2020, 12, 14), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0}, + "active_per_period": [ + {"epoch": 1606694400000, "grouped_date": date(2020, 11, 30), "total": 0, "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0}, + {"epoch": 1607299200000, "grouped_date": date(2020, 12, 7), "total": 0, "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0}, + {"epoch": 1607904000000, "grouped_date": date(2020, 12, 14), "total": 0, "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0}, ], }, ) - self.assertEqual(finding_queries['weeks_between'], 2) - self.assertIsInstance(finding_queries['start_date'], datetime) - self.assertIsInstance(finding_queries['end_date'], datetime) + self.assertEqual(finding_queries["weeks_between"], 2) + self.assertIsInstance(finding_queries["start_date"], datetime) + self.assertIsInstance(finding_queries["end_date"], datetime) class EndpointQueriesTest(DojoTestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def setUp(self): - user = User.objects.get(username='user1') - self.request = RequestFactory().get(reverse('metrics')) + user = User.objects.get(username="user1") + self.request = RequestFactory().get(reverse("metrics")) self.request.user = user self.request._messages = MockMessages() def test_endpoint_queries_no_data(self): - user3 = User.objects.get(username='user3') + user3 = User.objects.get(username="user3") self.request.user = user3 product_types = [] @@ -159,7 +159,7 @@ def test_endpoint_queries_no_data(self): ) self.assertSequenceEqual( - endpoint_queries['all'].values(), + endpoint_queries["all"].values(), [], ) @@ -175,86 +175,86 @@ def test_endpoint_queries(self): self.assertSequenceEqual( list(endpoint_queries.keys()), [ - 'all', - 'closed', - 'accepted', - 'accepted_count', - 'top_ten', - 'monthly_counts', - 'weekly_counts', - 'weeks_between', - 'start_date', - 'end_date', - 'form', + "all", + "closed", + "accepted", + "accepted_count", + "top_ten", + "monthly_counts", + "weekly_counts", + "weeks_between", + "start_date", + "end_date", + "form", ], ) # Assert that we get expected querysets back. This is to be used to # support refactoring, in attempt of lowering the query count. self.assertSequenceEqual( - endpoint_queries['all'].values(), + endpoint_queries["all"].values(), [ - {'id': 1, 'date': date(2020, 7, 1), 'last_modified': datetime(2020, 7, 1, 17, 45, 39, 791907, tzinfo=pytz.UTC), 'mitigated': False, 'mitigated_time': None, 'mitigated_by_id': None, 'false_positive': False, 'out_of_scope': False, 'risk_accepted': False, 'endpoint_id': 2, 'finding_id': 2, 'endpoint__product__prod_type__member': False, 'endpoint__product__member': True, 'endpoint__product__prod_type__authorized_group': False, 'endpoint__product__authorized_group': False}, - {'id': 3, 'date': date(2020, 7, 1), 'last_modified': datetime(2020, 7, 1, 17, 45, 39, 791907, tzinfo=pytz.UTC), 'mitigated': False, 'mitigated_time': None, 'mitigated_by_id': None, 'false_positive': True, 'out_of_scope': False, 'risk_accepted': False, 'endpoint_id': 5, 'finding_id': 228, 'endpoint__product__prod_type__member': True, 'endpoint__product__member': True, 'endpoint__product__prod_type__authorized_group': False, 'endpoint__product__authorized_group': False}, - {'id': 4, 'date': date(2020, 7, 1), 'last_modified': datetime(2020, 7, 1, 17, 45, 39, 791907, tzinfo=pytz.UTC), 'mitigated': False, 'mitigated_time': None, 'mitigated_by_id': None, 'false_positive': False, 'out_of_scope': True, 'risk_accepted': False, 'endpoint_id': 5, 'finding_id': 229, 'endpoint__product__prod_type__member': True, 'endpoint__product__member': True, 'endpoint__product__prod_type__authorized_group': False, 'endpoint__product__authorized_group': False}, - {'id': 5, 'date': date(2020, 7, 1), 'last_modified': datetime(2020, 7, 1, 17, 45, 39, 791907, tzinfo=pytz.UTC), 'mitigated': False, 'mitigated_time': None, 'mitigated_by_id': None, 'false_positive': False, 'out_of_scope': False, 'risk_accepted': True, 'endpoint_id': 5, 'finding_id': 230, 'endpoint__product__prod_type__member': True, 'endpoint__product__member': True, 'endpoint__product__prod_type__authorized_group': False, 'endpoint__product__authorized_group': False}, - {'id': 7, 'date': date(2020, 7, 1), 'last_modified': datetime(2020, 7, 1, 17, 45, 39, 791907, tzinfo=pytz.UTC), 'mitigated': False, 'mitigated_time': None, 'mitigated_by_id': None, 'false_positive': False, 'out_of_scope': False, 'risk_accepted': False, 'endpoint_id': 7, 'finding_id': 227, 'endpoint__product__prod_type__member': True, 'endpoint__product__member': True, 'endpoint__product__prod_type__authorized_group': False, 'endpoint__product__authorized_group': False}, - {'id': 8, 'date': date(2020, 7, 1), 'last_modified': datetime(2020, 7, 1, 17, 45, 39, 791907, tzinfo=pytz.UTC), 'mitigated': False, 'mitigated_time': None, 'mitigated_by_id': None, 'false_positive': False, 'out_of_scope': False, 'risk_accepted': False, 'endpoint_id': 8, 'finding_id': 231, 'endpoint__product__prod_type__member': True, 'endpoint__product__member': True, 'endpoint__product__prod_type__authorized_group': False, 'endpoint__product__authorized_group': False}, + {"id": 1, "date": date(2020, 7, 1), "last_modified": datetime(2020, 7, 1, 17, 45, 39, 791907, tzinfo=pytz.UTC), "mitigated": False, "mitigated_time": None, "mitigated_by_id": None, "false_positive": False, "out_of_scope": False, "risk_accepted": False, "endpoint_id": 2, "finding_id": 2, "endpoint__product__prod_type__member": False, "endpoint__product__member": True, "endpoint__product__prod_type__authorized_group": False, "endpoint__product__authorized_group": False}, + {"id": 3, "date": date(2020, 7, 1), "last_modified": datetime(2020, 7, 1, 17, 45, 39, 791907, tzinfo=pytz.UTC), "mitigated": False, "mitigated_time": None, "mitigated_by_id": None, "false_positive": True, "out_of_scope": False, "risk_accepted": False, "endpoint_id": 5, "finding_id": 228, "endpoint__product__prod_type__member": True, "endpoint__product__member": True, "endpoint__product__prod_type__authorized_group": False, "endpoint__product__authorized_group": False}, + {"id": 4, "date": date(2020, 7, 1), "last_modified": datetime(2020, 7, 1, 17, 45, 39, 791907, tzinfo=pytz.UTC), "mitigated": False, "mitigated_time": None, "mitigated_by_id": None, "false_positive": False, "out_of_scope": True, "risk_accepted": False, "endpoint_id": 5, "finding_id": 229, "endpoint__product__prod_type__member": True, "endpoint__product__member": True, "endpoint__product__prod_type__authorized_group": False, "endpoint__product__authorized_group": False}, + {"id": 5, "date": date(2020, 7, 1), "last_modified": datetime(2020, 7, 1, 17, 45, 39, 791907, tzinfo=pytz.UTC), "mitigated": False, "mitigated_time": None, "mitigated_by_id": None, "false_positive": False, "out_of_scope": False, "risk_accepted": True, "endpoint_id": 5, "finding_id": 230, "endpoint__product__prod_type__member": True, "endpoint__product__member": True, "endpoint__product__prod_type__authorized_group": False, "endpoint__product__authorized_group": False}, + {"id": 7, "date": date(2020, 7, 1), "last_modified": datetime(2020, 7, 1, 17, 45, 39, 791907, tzinfo=pytz.UTC), "mitigated": False, "mitigated_time": None, "mitigated_by_id": None, "false_positive": False, "out_of_scope": False, "risk_accepted": False, "endpoint_id": 7, "finding_id": 227, "endpoint__product__prod_type__member": True, "endpoint__product__member": True, "endpoint__product__prod_type__authorized_group": False, "endpoint__product__authorized_group": False}, + {"id": 8, "date": date(2020, 7, 1), "last_modified": datetime(2020, 7, 1, 17, 45, 39, 791907, tzinfo=pytz.UTC), "mitigated": False, "mitigated_time": None, "mitigated_by_id": None, "false_positive": False, "out_of_scope": False, "risk_accepted": False, "endpoint_id": 8, "finding_id": 231, "endpoint__product__prod_type__member": True, "endpoint__product__member": True, "endpoint__product__prod_type__authorized_group": False, "endpoint__product__authorized_group": False}, ], ) self.assertSequenceEqual( - endpoint_queries['closed'].values(), + endpoint_queries["closed"].values(), [], ) self.assertSequenceEqual( - endpoint_queries['accepted'].values(), - [{'id': 5, 'date': date(2020, 7, 1), 'last_modified': datetime(2020, 7, 1, 17, 45, 39, 791907, tzinfo=pytz.UTC), 'mitigated': False, 'mitigated_time': None, 'mitigated_by_id': None, 'false_positive': False, 'out_of_scope': False, 'risk_accepted': True, 'endpoint_id': 5, 'finding_id': 230, 'endpoint__product__prod_type__member': True, 'endpoint__product__member': True, 'endpoint__product__prod_type__authorized_group': False, 'endpoint__product__authorized_group': False}], + endpoint_queries["accepted"].values(), + [{"id": 5, "date": date(2020, 7, 1), "last_modified": datetime(2020, 7, 1, 17, 45, 39, 791907, tzinfo=pytz.UTC), "mitigated": False, "mitigated_time": None, "mitigated_by_id": None, "false_positive": False, "out_of_scope": False, "risk_accepted": True, "endpoint_id": 5, "finding_id": 230, "endpoint__product__prod_type__member": True, "endpoint__product__member": True, "endpoint__product__prod_type__authorized_group": False, "endpoint__product__authorized_group": False}], ) self.assertSequenceEqual( - list(endpoint_queries['accepted_count'].values()), + list(endpoint_queries["accepted_count"].values()), [1, 0, 0, 0, 0, 1], ) self.assertSequenceEqual( - endpoint_queries['top_ten'].values(), + endpoint_queries["top_ten"].values(), [], ) self.assertEqual( - list(endpoint_queries['monthly_counts'].values()), + list(endpoint_queries["monthly_counts"].values()), [ [ - {'epoch': 1590969600000, 'grouped_date': date(2020, 6, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0, 'closed': 0}, - {'epoch': 1593561600000, 'grouped_date': date(2020, 7, 1), 'critical': 0, 'high': 1, 'medium': 0, 'low': 0, 'info': 5, 'total': 6, 'closed': 0}, + {"epoch": 1590969600000, "grouped_date": date(2020, 6, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0, "closed": 0}, + {"epoch": 1593561600000, "grouped_date": date(2020, 7, 1), "critical": 0, "high": 1, "medium": 0, "low": 0, "info": 5, "total": 6, "closed": 0}, ], [ - {'epoch': 1590969600000, 'grouped_date': date(2020, 6, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0}, - {'epoch': 1593561600000, 'grouped_date': date(2020, 7, 1), 'critical': 0, 'high': 1, 'medium': 0, 'low': 0, 'info': 4, 'total': 5}, + {"epoch": 1590969600000, "grouped_date": date(2020, 6, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, + {"epoch": 1593561600000, "grouped_date": date(2020, 7, 1), "critical": 0, "high": 1, "medium": 0, "low": 0, "info": 4, "total": 5}, ], [ - {'epoch': 1590969600000, 'grouped_date': date(2020, 6, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0}, - {'epoch': 1593561600000, 'grouped_date': date(2020, 7, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 1, 'total': 1}, + {"epoch": 1590969600000, "grouped_date": date(2020, 6, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, + {"epoch": 1593561600000, "grouped_date": date(2020, 7, 1), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 1, "total": 1}, ], ], ) self.assertEqual( - list(endpoint_queries['weekly_counts'].values()), + list(endpoint_queries["weekly_counts"].values()), [ [ - {'epoch': 1592784000000, 'grouped_date': date(2020, 6, 22), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0, 'closed': 0}, - {'epoch': 1593388800000, 'grouped_date': date(2020, 6, 29), 'critical': 0, 'high': 1, 'medium': 0, 'low': 0, 'info': 5, 'total': 6, 'closed': 0}, - {'epoch': 1593993600000, 'grouped_date': date(2020, 7, 6), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0, 'closed': 0}, + {"epoch": 1592784000000, "grouped_date": date(2020, 6, 22), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0, "closed": 0}, + {"epoch": 1593388800000, "grouped_date": date(2020, 6, 29), "critical": 0, "high": 1, "medium": 0, "low": 0, "info": 5, "total": 6, "closed": 0}, + {"epoch": 1593993600000, "grouped_date": date(2020, 7, 6), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0, "closed": 0}, ], [ - {'epoch': 1592784000000, 'grouped_date': date(2020, 6, 22), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0}, - {'epoch': 1593388800000, 'grouped_date': date(2020, 6, 29), 'critical': 0, 'high': 1, 'medium': 0, 'low': 0, 'info': 4, 'total': 5}, - {'epoch': 1593993600000, 'grouped_date': date(2020, 7, 6), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0}, + {"epoch": 1592784000000, "grouped_date": date(2020, 6, 22), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, + {"epoch": 1593388800000, "grouped_date": date(2020, 6, 29), "critical": 0, "high": 1, "medium": 0, "low": 0, "info": 4, "total": 5}, + {"epoch": 1593993600000, "grouped_date": date(2020, 7, 6), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, ], [ - {'epoch': 1592784000000, 'grouped_date': date(2020, 6, 22), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0}, - {'epoch': 1593388800000, 'grouped_date': date(2020, 6, 29), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 1, 'total': 1}, - {'epoch': 1593993600000, 'grouped_date': date(2020, 7, 6), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0}, + {"epoch": 1592784000000, "grouped_date": date(2020, 6, 22), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, + {"epoch": 1593388800000, "grouped_date": date(2020, 6, 29), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 1, "total": 1}, + {"epoch": 1593993600000, "grouped_date": date(2020, 7, 6), "critical": 0, "high": 0, "medium": 0, "low": 0, "info": 0, "total": 0}, ], ], ) - self.assertEqual(endpoint_queries['weeks_between'], 2) - self.assertEqual(endpoint_queries['start_date'], datetime(2020, 7, 1, 0, 0, tzinfo=timezone.utc)) - self.assertEqual(endpoint_queries['end_date'], datetime(2020, 7, 1, 0, 0, tzinfo=timezone.utc)) + self.assertEqual(endpoint_queries["weeks_between"], 2) + self.assertEqual(endpoint_queries["start_date"], datetime(2020, 7, 1, 0, 0, tzinfo=timezone.utc)) + self.assertEqual(endpoint_queries["end_date"], datetime(2020, 7, 1, 0, 0, tzinfo=timezone.utc)) diff --git a/unittests/test_migrations.py b/unittests/test_migrations.py index 16b7525c47..804739e5cb 100644 --- a/unittests/test_migrations.py +++ b/unittests/test_migrations.py @@ -7,17 +7,17 @@ @skip("Outdated - this class was testing some version of migration; it is not needed anymore") class TestOptiEndpointStatus(MigratorTestCase): - migrate_from = ('dojo', '0171_jira_labels_per_product_and_engagement') - migrate_to = ('dojo', '0172_optimize_usage_of_endpoint_status') + migrate_from = ("dojo", "0171_jira_labels_per_product_and_engagement") + migrate_to = ("dojo", "0172_optimize_usage_of_endpoint_status") def prepare(self): - Product_Type = self.old_state.apps.get_model('dojo', 'Product_Type') - Product = self.old_state.apps.get_model('dojo', 'Product') - Engagement = self.old_state.apps.get_model('dojo', 'Engagement') - Test = self.old_state.apps.get_model('dojo', 'Test') - Finding = self.old_state.apps.get_model('dojo', 'Finding') - Endpoint = self.old_state.apps.get_model('dojo', 'Endpoint') - Endpoint_Status = self.old_state.apps.get_model('dojo', 'Endpoint_Status') + Product_Type = self.old_state.apps.get_model("dojo", "Product_Type") + Product = self.old_state.apps.get_model("dojo", "Product") + Engagement = self.old_state.apps.get_model("dojo", "Engagement") + Test = self.old_state.apps.get_model("dojo", "Test") + Finding = self.old_state.apps.get_model("dojo", "Finding") + Endpoint = self.old_state.apps.get_model("dojo", "Endpoint") + Endpoint_Status = self.old_state.apps.get_model("dojo", "Endpoint_Status") self.prod_type = Product_Type.objects.create() self.product = Product.objects.create(prod_type=self.prod_type) @@ -36,7 +36,7 @@ def prepare(self): user = get_user_model().objects.create().pk self.finding = Finding.objects.create(test_id=self.test.pk, reporter_id=user).pk - self.endpoint = Endpoint.objects.create(host='foo.bar', product_id=self.product.pk).pk + self.endpoint = Endpoint.objects.create(host="foo.bar", product_id=self.product.pk).pk self.endpoint_status = Endpoint_Status.objects.create( finding_id=self.finding, endpoint_id=self.endpoint, @@ -72,42 +72,42 @@ def case_list_with_status_endpoint(self, endpoint): return endpoint.status_endpoint def presudotest_before_migration(self): - Finding = self.old_state.apps.get_model('dojo', 'Finding') - Endpoint = self.old_state.apps.get_model('dojo', 'Endpoint') - Endpoint_Status = self.old_state.apps.get_model('dojo', 'Endpoint_Status') + Finding = self.old_state.apps.get_model("dojo", "Finding") + Endpoint = self.old_state.apps.get_model("dojo", "Endpoint") + Endpoint_Status = self.old_state.apps.get_model("dojo", "Endpoint_Status") - with self.subTest('Old: Add existing EPS to endpoint'): + with self.subTest("Old: Add existing EPS to endpoint"): self.case_add_status_endpoint( Endpoint.objects.get(id=self.endpoint), Endpoint_Status.objects.get(id=self.endpoint_status), ) - with self.subTest('Old: Add existing EPS to finding'): + with self.subTest("Old: Add existing EPS to finding"): self.case_add_status_finding( Finding.objects.get(id=self.finding), Endpoint_Status.objects.get(id=self.endpoint_status), ) - with self.subTest('Old: From finding get endpoints'): + with self.subTest("Old: From finding get endpoints"): ep = self.case_from_finding_get_endpoints( Finding.objects.get(id=self.finding), ).all() self.assertEqual(ep.all().count(), 1, ep) - with self.subTest('Old: Add existing endpoint to finding'): + with self.subTest("Old: Add existing endpoint to finding"): self.case_add_endpoint_finding( Finding.objects.get(id=self.finding), Endpoint.objects.get(id=self.endpoint).pk, ) - with self.subTest('Old: List EPS from finding'): + with self.subTest("Old: List EPS from finding"): eps = self.case_list_with_status_finding( Finding.objects.get(id=self.finding), ) self.assertEqual(eps.all().count(), 1, ep) self.assertIsInstance(eps.all().first(), Endpoint_Status) - with self.subTest('Old: List EPS from endpoint'): + with self.subTest("Old: List EPS from endpoint"): with self.assertRaises(AttributeError) as exc: eps = self.case_list_with_status_endpoint( Endpoint.objects.get(id=self.endpoint), @@ -115,11 +115,11 @@ def presudotest_before_migration(self): self.assertEqual(str(exc.exception), "'Endpoint' object has no attribute 'status_endpoint'") def test_after_migration(self): - Finding = self.new_state.apps.get_model('dojo', 'Finding') - Endpoint = self.new_state.apps.get_model('dojo', 'Endpoint') - Endpoint_Status = self.new_state.apps.get_model('dojo', 'Endpoint_Status') + Finding = self.new_state.apps.get_model("dojo", "Finding") + Endpoint = self.new_state.apps.get_model("dojo", "Endpoint") + Endpoint_Status = self.new_state.apps.get_model("dojo", "Endpoint_Status") - with self.subTest('New: Add existing EPS to endpoint'): + with self.subTest("New: Add existing EPS to endpoint"): with self.assertRaises(AttributeError) as exc: self.case_add_status_endpoint( Endpoint.objects.get(id=self.endpoint), @@ -127,7 +127,7 @@ def test_after_migration(self): ) self.assertEqual(str(exc.exception), "'Endpoint' object has no attribute 'endpoint_status'") - with self.subTest('New: Add existing EPS to finding'): + with self.subTest("New: Add existing EPS to finding"): with self.assertRaises(AttributeError) as exc: self.case_add_status_endpoint( Finding.objects.get(id=self.finding), @@ -135,27 +135,27 @@ def test_after_migration(self): ) self.assertEqual(str(exc.exception), "'Finding' object has no attribute 'endpoint_status'") - with self.subTest('New: From finding get endpoints'): + with self.subTest("New: From finding get endpoints"): ep = self.case_from_finding_get_endpoints( Finding.objects.get(id=self.finding), ).all() self.assertEqual(ep.all().count(), 1, ep) - with self.subTest('New: Add existing endpoint to finding'): + with self.subTest("New: Add existing endpoint to finding"): # Yes, this method is still available. It could create Endpoint_Status with default values self.case_add_endpoint_finding( Finding.objects.get(id=self.finding), Endpoint.objects.get(id=self.endpoint), ) - with self.subTest('New: List EPS from finding'): + with self.subTest("New: List EPS from finding"): eps = self.case_list_with_status_finding( Finding.objects.get(id=self.finding), ) self.assertEqual(eps.all().count(), 1, ep) self.assertIsInstance(eps.all().first(), Endpoint_Status) - with self.subTest('New: List EPS from endpoint'): + with self.subTest("New: List EPS from endpoint"): eps = self.case_list_with_status_endpoint( Endpoint.objects.get(id=self.endpoint), ) diff --git a/unittests/test_notifications.py b/unittests/test_notifications.py index 941b648753..6ad81ccc6f 100644 --- a/unittests/test_notifications.py +++ b/unittests/test_notifications.py @@ -27,16 +27,16 @@ class TestNotifications(DojoTestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def test_merge_notifications_list(self): - global_personal_notifications = Notifications(user=User.objects.get(username='admin')) - personal_product_notifications = Notifications(user=User.objects.get(username='admin'), product=Product.objects.all()[0]) + global_personal_notifications = Notifications(user=User.objects.get(username="admin")) + personal_product_notifications = Notifications(user=User.objects.get(username="admin"), product=Product.objects.all()[0]) - global_personal_notifications.product_added = ['alert'] - global_personal_notifications.test_added = '' + global_personal_notifications.product_added = ["alert"] + global_personal_notifications.test_added = "" global_personal_notifications.scan_added = None - global_personal_notifications.other = ['slack', 'mail'] + global_personal_notifications.other = ["slack", "mail"] global_personal_notifications.save() # we have to save it and retrieve it because only then the fields get turned into lists... @@ -44,8 +44,8 @@ def test_merge_notifications_list(self): # print(vars(global_personal_notifications)) - personal_product_notifications.product_added = ['mail'] - personal_product_notifications.test_added = ['mail', 'alert'] + personal_product_notifications.product_added = ["mail"] + personal_product_notifications.test_added = ["mail", "alert"] personal_product_notifications.scan_added = None # print(vars(personal_product_notifications)) @@ -59,337 +59,337 @@ def test_merge_notifications_list(self): # print(vars(merged_notifications)) - self.assertEqual('alert' in merged_notifications.product_added, True) - self.assertEqual('mail' in merged_notifications.product_added, True) - self.assertEqual('slack' in merged_notifications.product_added, False) + self.assertEqual("alert" in merged_notifications.product_added, True) + self.assertEqual("mail" in merged_notifications.product_added, True) + self.assertEqual("slack" in merged_notifications.product_added, False) self.assertEqual(len(merged_notifications.product_added), 2) - self.assertEqual('alert' in merged_notifications.test_added, True) - self.assertEqual('mail' in merged_notifications.test_added, True) - self.assertEqual('slack' in merged_notifications.test_added, False) + self.assertEqual("alert" in merged_notifications.test_added, True) + self.assertEqual("mail" in merged_notifications.test_added, True) + self.assertEqual("slack" in merged_notifications.test_added, False) self.assertEqual(len(merged_notifications.test_added), 2) - self.assertEqual('alert' in merged_notifications.scan_added, False) - self.assertEqual('mail' in merged_notifications.scan_added, False) - self.assertEqual('slack' in merged_notifications.scan_added, False) + self.assertEqual("alert" in merged_notifications.scan_added, False) + self.assertEqual("mail" in merged_notifications.scan_added, False) + self.assertEqual("slack" in merged_notifications.scan_added, False) self.assertEqual(len(merged_notifications.scan_added), 0) - self.assertEqual('alert' in merged_notifications.other, True) - self.assertEqual('mail' in merged_notifications.other, True) - self.assertEqual('slack' in merged_notifications.other, True) # default alert from global + self.assertEqual("alert" in merged_notifications.other, True) + self.assertEqual("mail" in merged_notifications.other, True) + self.assertEqual("slack" in merged_notifications.other, True) # default alert from global self.assertEqual(len(merged_notifications.other), 3) - self.assertEqual(merged_notifications.other, {'alert', 'mail', 'slack'}) + self.assertEqual(merged_notifications.other, {"alert", "mail", "slack"}) - @patch('dojo.notifications.helper.send_alert_notification', wraps=send_alert_notification) + @patch("dojo.notifications.helper.send_alert_notification", wraps=send_alert_notification) def test_notifications_system_level_trump(self, mock): - notif_user, _ = Notifications.objects.get_or_create(user=User.objects.get(username='admin')) + notif_user, _ = Notifications.objects.get_or_create(user=User.objects.get(username="admin")) notif_system, _ = Notifications.objects.get_or_create(user=None, template=False) last_count = mock.call_count - with self.subTest('user off, system off'): + with self.subTest("user off, system off"): notif_user.user_mentioned = () # no alert notif_user.save() notif_system.user_mentioned = () # no alert notif_system.save() - create_notification(event="user_mentioned", title="user_mentioned", recipients=['admin']) + create_notification(event="user_mentioned", title="user_mentioned", recipients=["admin"]) self.assertEqual(mock.call_count, last_count) last_count = mock.call_count - with self.subTest('user off, system on'): + with self.subTest("user off, system on"): notif_user.user_mentioned = () # no alert notif_user.save() notif_system.user_mentioned = DEFAULT_NOTIFICATION # alert only notif_system.save() - create_notification(event="user_mentioned", title="user_mentioned", recipients=['admin']) + create_notification(event="user_mentioned", title="user_mentioned", recipients=["admin"]) self.assertEqual(mock.call_count, last_count + 1) # Small note for this test-cast: Trump works only in positive direction - system is not able to disable some kind of notification if user enabled it last_count = mock.call_count - with self.subTest('user on, system off'): + with self.subTest("user on, system off"): notif_user.user_mentioned = DEFAULT_NOTIFICATION # alert only notif_user.save() notif_system.user_mentioned = () # no alert notif_system.save() - create_notification(event="user_mentioned", title="user_mentioned", recipients=['admin']) + create_notification(event="user_mentioned", title="user_mentioned", recipients=["admin"]) self.assertEqual(mock.call_count, last_count + 1) last_count = mock.call_count - with self.subTest('user on, system on'): + with self.subTest("user on, system on"): notif_user.user_mentioned = DEFAULT_NOTIFICATION # alert only notif_user.save() notif_system.user_mentioned = DEFAULT_NOTIFICATION # alert only notif_system.save() - create_notification(event="user_mentioned", title="user_mentioned", recipients=['admin']) + create_notification(event="user_mentioned", title="user_mentioned", recipients=["admin"]) self.assertEqual(mock.call_count, last_count + 1) last_count = mock.call_count - @patch('dojo.notifications.helper.send_alert_notification', wraps=send_alert_notification) + @patch("dojo.notifications.helper.send_alert_notification", wraps=send_alert_notification) def test_non_default_other_notifications(self, mock): - notif_user, _ = Notifications.objects.get_or_create(user=User.objects.get(username='admin')) + notif_user, _ = Notifications.objects.get_or_create(user=User.objects.get(username="admin")) notif_system, _ = Notifications.objects.get_or_create(user=None, template=False) last_count = mock.call_count - with self.subTest('do not notify other'): + with self.subTest("do not notify other"): notif_user.other = () # no alert notif_user.save() - create_notification(event="dummy_bar_event", recipients=['admin']) + create_notification(event="dummy_bar_event", recipients=["admin"]) self.assertEqual(mock.call_count, last_count) last_count = mock.call_count - with self.subTest('notify other'): + with self.subTest("notify other"): notif_user.other = DEFAULT_NOTIFICATION # alert only notif_user.save() - create_notification(event="dummy_foo_event", title="title_for_dummy_foo_event", description="description_for_dummy_foo_event", recipients=['admin']) + create_notification(event="dummy_foo_event", title="title_for_dummy_foo_event", description="description_for_dummy_foo_event", recipients=["admin"]) self.assertEqual(mock.call_count, last_count + 1) - self.assertEqual(mock.call_args_list[0].args[0], 'dummy_foo_event') - alert = Alerts.objects.get(title='title_for_dummy_foo_event') + self.assertEqual(mock.call_args_list[0].args[0], "dummy_foo_event") + alert = Alerts.objects.get(title="title_for_dummy_foo_event") self.assertEqual(alert.source, "Dummy Foo Event") last_count = mock.call_count - with self.subTest('user off, system off'): + with self.subTest("user off, system off"): notif_user.user_mentioned = () # no alert notif_user.save() notif_system.user_mentioned = () # no alert notif_system.save() - create_notification(event="user_mentioned", title="user_mentioned", recipients=['admin']) + create_notification(event="user_mentioned", title="user_mentioned", recipients=["admin"]) self.assertEqual(mock.call_count, last_count + 0) last_count = mock.call_count - with self.subTest('user off, system on'): + with self.subTest("user off, system on"): notif_user.user_mentioned = () # no alert notif_user.save() notif_system.user_mentioned = DEFAULT_NOTIFICATION # alert only notif_system.save() - create_notification(event="user_mentioned", title="user_mentioned", recipients=['admin']) + create_notification(event="user_mentioned", title="user_mentioned", recipients=["admin"]) self.assertEqual(mock.call_count, last_count + 1) # Small note for this test-cast: Trump works only in positive direction - system is not able to disable some kind of notification if user enabled it last_count = mock.call_count - with self.subTest('user on, system off'): + with self.subTest("user on, system off"): notif_user.user_mentioned = DEFAULT_NOTIFICATION # alert only notif_user.save() notif_system.user_mentioned = () # no alert notif_system.save() - create_notification(event="user_mentioned", title="user_mentioned", recipients=['admin']) + create_notification(event="user_mentioned", title="user_mentioned", recipients=["admin"]) self.assertEqual(mock.call_count, last_count + 1) last_count = mock.call_count - with self.subTest('user on, system on'): + with self.subTest("user on, system on"): notif_user.user_mentioned = DEFAULT_NOTIFICATION # alert only notif_user.save() notif_system.user_mentioned = DEFAULT_NOTIFICATION # alert only notif_system.save() - create_notification(event="user_mentioned", title="user_mentioned", recipients=['admin']) + create_notification(event="user_mentioned", title="user_mentioned", recipients=["admin"]) self.assertEqual(mock.call_count, last_count + 1) class TestNotificationTriggers(DojoTestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def setUp(self): self.notification_tester = Dojo_User.objects.get(username="admin") - @patch('dojo.notifications.helper.process_notifications') + @patch("dojo.notifications.helper.process_notifications") def test_product_types(self, mock): last_count = mock.call_count - with self.subTest('product_type_added'): + with self.subTest("product_type_added"): with set_actor(self.notification_tester): - prod_type = Product_Type.objects.create(name='notif prod type') + prod_type = Product_Type.objects.create(name="notif prod type") self.assertEqual(mock.call_count, last_count + 4) - self.assertEqual(mock.call_args_list[-1].args[0], 'product_type_added') - self.assertEqual(mock.call_args_list[-1].kwargs['url'], f'/product/type/{prod_type.id}') + self.assertEqual(mock.call_args_list[-1].args[0], "product_type_added") + self.assertEqual(mock.call_args_list[-1].kwargs["url"], f"/product/type/{prod_type.id}") last_count = mock.call_count - with self.subTest('product_type_deleted'): + with self.subTest("product_type_deleted"): with set_actor(self.notification_tester): prod_type.delete() self.assertEqual(mock.call_count, last_count + 1) - self.assertEqual(mock.call_args_list[-1].args[0], 'product_type_deleted') - self.assertEqual(mock.call_args_list[-1].kwargs['description'], 'The product type "notif prod type" was deleted by admin') - self.assertEqual(mock.call_args_list[-1].kwargs['url'], '/product/type') + self.assertEqual(mock.call_args_list[-1].args[0], "product_type_deleted") + self.assertEqual(mock.call_args_list[-1].kwargs["description"], 'The product type "notif prod type" was deleted by admin') + self.assertEqual(mock.call_args_list[-1].kwargs["url"], "/product/type") - @patch('dojo.notifications.helper.process_notifications') + @patch("dojo.notifications.helper.process_notifications") def test_products(self, mock): last_count = mock.call_count - with self.subTest('product_added'): + with self.subTest("product_added"): with set_actor(self.notification_tester): prod_type = Product_Type.objects.first() - prod, _ = Product.objects.get_or_create(prod_type=prod_type, name='prod name') + prod, _ = Product.objects.get_or_create(prod_type=prod_type, name="prod name") self.assertEqual(mock.call_count, last_count + 5) - self.assertEqual(mock.call_args_list[-1].args[0], 'product_added') - self.assertEqual(mock.call_args_list[-1].kwargs['url'], f'/product/{prod.id}') + self.assertEqual(mock.call_args_list[-1].args[0], "product_added") + self.assertEqual(mock.call_args_list[-1].kwargs["url"], f"/product/{prod.id}") last_count = mock.call_count - with self.subTest('product_deleted'): + with self.subTest("product_deleted"): with set_actor(self.notification_tester): prod.delete() self.assertEqual(mock.call_count, last_count + 2) - self.assertEqual(mock.call_args_list[-1].args[0], 'product_deleted') - self.assertEqual(mock.call_args_list[-1].kwargs['description'], 'The product "prod name" was deleted by admin') - self.assertEqual(mock.call_args_list[-1].kwargs['url'], '/product') + self.assertEqual(mock.call_args_list[-1].args[0], "product_deleted") + self.assertEqual(mock.call_args_list[-1].kwargs["description"], 'The product "prod name" was deleted by admin') + self.assertEqual(mock.call_args_list[-1].kwargs["url"], "/product") - @patch('dojo.notifications.helper.process_notifications') + @patch("dojo.notifications.helper.process_notifications") def test_engagements(self, mock): last_count = mock.call_count - with self.subTest('engagement_added'): + with self.subTest("engagement_added"): with set_actor(self.notification_tester): prod = Product.objects.first() eng = Engagement.objects.create(product=prod, target_start=timezone.now(), target_end=timezone.now()) self.assertEqual(mock.call_count, last_count + 5) - self.assertEqual(mock.call_args_list[-1].args[0], 'engagement_added') - self.assertEqual(mock.call_args_list[-1].kwargs['url'], f'/engagement/{eng.id}') + self.assertEqual(mock.call_args_list[-1].args[0], "engagement_added") + self.assertEqual(mock.call_args_list[-1].kwargs["url"], f"/engagement/{eng.id}") last_count = mock.call_count - with self.subTest('close_engagement'): + with self.subTest("close_engagement"): with set_actor(self.notification_tester): eng.status = "Completed" eng.save() self.assertEqual(mock.call_count, last_count + 5) - self.assertEqual(mock.call_args_list[-1].args[0], 'engagement_closed') - self.assertEqual(mock.call_args_list[-1].kwargs['url'], f'/engagement/{eng.id}/finding/all') + self.assertEqual(mock.call_args_list[-1].args[0], "engagement_closed") + self.assertEqual(mock.call_args_list[-1].kwargs["url"], f"/engagement/{eng.id}/finding/all") last_count = mock.call_count - with self.subTest('reopen_engagement'): + with self.subTest("reopen_engagement"): with set_actor(self.notification_tester): eng.status = "In Progress" eng.save() self.assertEqual(mock.call_count, last_count + 5) - self.assertEqual(mock.call_args_list[-1].args[0], 'engagement_reopened') - self.assertEqual(mock.call_args_list[-1].kwargs['url'], f'/engagement/{eng.id}') + self.assertEqual(mock.call_args_list[-1].args[0], "engagement_reopened") + self.assertEqual(mock.call_args_list[-1].kwargs["url"], f"/engagement/{eng.id}") eng.status = "Not Started" eng.save() last_count = mock.call_count - with self.subTest('no reopen_engagement from not started'): + with self.subTest("no reopen_engagement from not started"): with set_actor(self.notification_tester): eng.status = "In Progress" eng.save() self.assertEqual(mock.call_count, last_count) prod_type = Product_Type.objects.first() - prod1, _ = Product.objects.get_or_create(prod_type=prod_type, name='prod name 1') - _ = Engagement.objects.create(product=prod1, target_start=timezone.now(), target_end=timezone.now(), lead=User.objects.get(username='admin')) - prod2, _ = Product.objects.get_or_create(prod_type=prod_type, name='prod name 2') - eng2 = Engagement.objects.create(product=prod2, name="Testing engagement", target_start=timezone.now(), target_end=timezone.now(), lead=User.objects.get(username='admin')) + prod1, _ = Product.objects.get_or_create(prod_type=prod_type, name="prod name 1") + _ = Engagement.objects.create(product=prod1, target_start=timezone.now(), target_end=timezone.now(), lead=User.objects.get(username="admin")) + prod2, _ = Product.objects.get_or_create(prod_type=prod_type, name="prod name 2") + eng2 = Engagement.objects.create(product=prod2, name="Testing engagement", target_start=timezone.now(), target_end=timezone.now(), lead=User.objects.get(username="admin")) - with self.subTest('engagement_deleted by product'): # in case of product removal, we are not notifying about removal + with self.subTest("engagement_deleted by product"): # in case of product removal, we are not notifying about removal with set_actor(self.notification_tester): prod1.delete() for call in mock.call_args_list: - self.assertNotEqual(call.args[0], 'engagement_deleted') + self.assertNotEqual(call.args[0], "engagement_deleted") last_count = mock.call_count - with self.subTest('engagement_deleted itself'): + with self.subTest("engagement_deleted itself"): with set_actor(self.notification_tester): eng2.delete() self.assertEqual(mock.call_count, last_count + 1) - self.assertEqual(mock.call_args_list[-1].args[0], 'engagement_deleted') - self.assertEqual(mock.call_args_list[-1].kwargs['description'], 'The engagement "Testing engagement" was deleted by admin') - self.assertEqual(mock.call_args_list[-1].kwargs['url'], f'/product/{prod2.id}') + self.assertEqual(mock.call_args_list[-1].args[0], "engagement_deleted") + self.assertEqual(mock.call_args_list[-1].kwargs["description"], 'The engagement "Testing engagement" was deleted by admin') + self.assertEqual(mock.call_args_list[-1].kwargs["url"], f"/product/{prod2.id}") - @patch('dojo.notifications.helper.process_notifications') + @patch("dojo.notifications.helper.process_notifications") def test_endpoints(self, mock): prod_type = Product_Type.objects.first() - prod1, _ = Product.objects.get_or_create(prod_type=prod_type, name='prod name 1') - Endpoint.objects.get_or_create(product=prod1, host='host1') - prod2, _ = Product.objects.get_or_create(prod_type=prod_type, name='prod name 2') - endpoint2, _ = Endpoint.objects.get_or_create(product=prod2, host='host2') + prod1, _ = Product.objects.get_or_create(prod_type=prod_type, name="prod name 1") + Endpoint.objects.get_or_create(product=prod1, host="host1") + prod2, _ = Product.objects.get_or_create(prod_type=prod_type, name="prod name 2") + endpoint2, _ = Endpoint.objects.get_or_create(product=prod2, host="host2") - with self.subTest('endpoint_deleted by product'): # in case of product removal, we are not notifying about removal + with self.subTest("endpoint_deleted by product"): # in case of product removal, we are not notifying about removal with set_actor(self.notification_tester): prod1.delete() for call in mock.call_args_list: - self.assertNotEqual(call.args[0], 'endpoint_deleted') + self.assertNotEqual(call.args[0], "endpoint_deleted") last_count = mock.call_count - with self.subTest('endpoint_deleted itself'): + with self.subTest("endpoint_deleted itself"): with set_actor(self.notification_tester): endpoint2.delete() self.assertEqual(mock.call_count, last_count + 2) - self.assertEqual(mock.call_args_list[-1].args[0], 'endpoint_deleted') - self.assertEqual(mock.call_args_list[-1].kwargs['description'], 'The endpoint "host2" was deleted by admin') - self.assertEqual(mock.call_args_list[-1].kwargs['url'], '/endpoint') + self.assertEqual(mock.call_args_list[-1].args[0], "endpoint_deleted") + self.assertEqual(mock.call_args_list[-1].kwargs["description"], 'The endpoint "host2" was deleted by admin') + self.assertEqual(mock.call_args_list[-1].kwargs["url"], "/endpoint") - @patch('dojo.notifications.helper.process_notifications') + @patch("dojo.notifications.helper.process_notifications") def test_tests(self, mock): prod_type = Product_Type.objects.first() - prod, _ = Product.objects.get_or_create(prod_type=prod_type, name='prod name') - eng1 = Engagement.objects.create(product=prod, target_start=timezone.now(), target_end=timezone.now(), lead=User.objects.get(username='admin')) + prod, _ = Product.objects.get_or_create(prod_type=prod_type, name="prod name") + eng1 = Engagement.objects.create(product=prod, target_start=timezone.now(), target_end=timezone.now(), lead=User.objects.get(username="admin")) Test.objects.create(engagement=eng1, target_start=timezone.now(), target_end=timezone.now(), test_type_id=Test_Type.objects.first().id) - eng2 = Engagement.objects.create(product=prod, target_start=timezone.now(), target_end=timezone.now(), lead=User.objects.get(username='admin')) + eng2 = Engagement.objects.create(product=prod, target_start=timezone.now(), target_end=timezone.now(), lead=User.objects.get(username="admin")) test2 = Test.objects.create(engagement=eng2, target_start=timezone.now(), target_end=timezone.now(), test_type_id=Test_Type.objects.first().id) - with self.subTest('test_deleted by engagement'): # in case of engagement removal, we are not notifying about removal + with self.subTest("test_deleted by engagement"): # in case of engagement removal, we are not notifying about removal with set_actor(self.notification_tester): eng1.delete() for call in mock.call_args_list: - self.assertNotEqual(call.args[0], 'test_deleted') + self.assertNotEqual(call.args[0], "test_deleted") last_count = mock.call_count - with self.subTest('test_deleted itself'): + with self.subTest("test_deleted itself"): with set_actor(self.notification_tester): test2.delete() self.assertEqual(mock.call_count, last_count + 1) - self.assertEqual(mock.call_args_list[-1].args[0], 'test_deleted') - self.assertEqual(mock.call_args_list[-1].kwargs['description'], 'The test "Acunetix Scan" was deleted by admin') - self.assertEqual(mock.call_args_list[-1].kwargs['url'], f'/engagement/{eng2.id}') + self.assertEqual(mock.call_args_list[-1].args[0], "test_deleted") + self.assertEqual(mock.call_args_list[-1].kwargs["description"], 'The test "Acunetix Scan" was deleted by admin') + self.assertEqual(mock.call_args_list[-1].kwargs["url"], f"/engagement/{eng2.id}") - @patch('dojo.notifications.helper.process_notifications') + @patch("dojo.notifications.helper.process_notifications") def test_finding_groups(self, mock): prod_type = Product_Type.objects.first() - prod, _ = Product.objects.get_or_create(prod_type=prod_type, name='prod name') - eng, _ = Engagement.objects.get_or_create(product=prod, target_start=timezone.now(), target_end=timezone.now(), lead=User.objects.get(username='admin')) + prod, _ = Product.objects.get_or_create(prod_type=prod_type, name="prod name") + eng, _ = Engagement.objects.get_or_create(product=prod, target_start=timezone.now(), target_end=timezone.now(), lead=User.objects.get(username="admin")) test1, _ = Test.objects.get_or_create(engagement=eng, target_start=timezone.now(), target_end=timezone.now(), test_type_id=Test_Type.objects.first().id) - Finding_Group.objects.get_or_create(test=test1, creator=User.objects.get(username='admin')) + Finding_Group.objects.get_or_create(test=test1, creator=User.objects.get(username="admin")) test2, _ = Test.objects.get_or_create(engagement=eng, target_start=timezone.now(), target_end=timezone.now(), test_type_id=Test_Type.objects.first().id) - fg2, _ = Finding_Group.objects.get_or_create(test=test2, name="fg test", creator=User.objects.get(username='admin')) + fg2, _ = Finding_Group.objects.get_or_create(test=test2, name="fg test", creator=User.objects.get(username="admin")) - with self.subTest('test_deleted by engagement'): # in case of engagement removal, we are not notifying about removal + with self.subTest("test_deleted by engagement"): # in case of engagement removal, we are not notifying about removal with set_actor(self.notification_tester): test1.delete() for call in mock.call_args_list: - self.assertNotEqual(call.args[0], 'finding_group_deleted') + self.assertNotEqual(call.args[0], "finding_group_deleted") last_count = mock.call_count - with self.subTest('test_deleted itself'): + with self.subTest("test_deleted itself"): with set_actor(self.notification_tester): fg2.delete() self.assertEqual(mock.call_count, last_count + 5) - self.assertEqual(mock.call_args_list[-1].args[0], 'finding_group_deleted') - self.assertEqual(mock.call_args_list[-1].kwargs['description'], 'The finding group "fg test" was deleted by admin') - self.assertEqual(mock.call_args_list[-1].kwargs['url'], f'/test/{test2.id}') + self.assertEqual(mock.call_args_list[-1].args[0], "finding_group_deleted") + self.assertEqual(mock.call_args_list[-1].kwargs["description"], 'The finding group "fg test" was deleted by admin') + self.assertEqual(mock.call_args_list[-1].kwargs["url"], f"/test/{test2.id}") - @patch('dojo.notifications.helper.process_notifications') + @patch("dojo.notifications.helper.process_notifications") @override_settings(ENABLE_AUDITLOG=True) def test_auditlog_on(self, mock): - prod_type = Product_Type.objects.create(name='notif prod type') + prod_type = Product_Type.objects.create(name="notif prod type") with set_actor(self.notification_tester): prod_type.delete() - self.assertEqual(mock.call_args_list[-1].kwargs['description'], 'The product type "notif prod type" was deleted by admin') + self.assertEqual(mock.call_args_list[-1].kwargs["description"], 'The product type "notif prod type" was deleted by admin') - @patch('dojo.notifications.helper.process_notifications') + @patch("dojo.notifications.helper.process_notifications") @override_settings(ENABLE_AUDITLOG=False) def test_auditlog_off(self, mock): - prod_type = Product_Type.objects.create(name='notif prod type') + prod_type = Product_Type.objects.create(name="notif prod type") with set_actor(self.notification_tester): prod_type.delete() - self.assertEqual(mock.call_args_list[-1].kwargs['description'], 'The product type "notif prod type" was deleted') + self.assertEqual(mock.call_args_list[-1].kwargs["description"], 'The product type "notif prod type" was deleted') class TestNotificationTriggersApi(APITestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def setUp(self): - token = Token.objects.get(user__username='admin') + token = Token.objects.get(user__username="admin") self.client = APIClient() - self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) + self.client.credentials(HTTP_AUTHORIZATION="Token " + token.key) - @patch('dojo.notifications.helper.process_notifications') + @patch("dojo.notifications.helper.process_notifications") @override_settings(ENABLE_AUDITLOG=True) def test_auditlog_on(self, mock): - prod_type = Product_Type.objects.create(name='notif prod type API') - self.client.delete(reverse('product_type-detail', args=(prod_type.pk,)), format='json') - self.assertEqual(mock.call_args_list[-1].kwargs['description'], 'The product type "notif prod type API" was deleted by admin') + prod_type = Product_Type.objects.create(name="notif prod type API") + self.client.delete(reverse("product_type-detail", args=(prod_type.pk,)), format="json") + self.assertEqual(mock.call_args_list[-1].kwargs["description"], 'The product type "notif prod type API" was deleted by admin') diff --git a/unittests/test_parsers.py b/unittests/test_parsers.py index 53af54d17a..98856883e7 100644 --- a/unittests/test_parsers.py +++ b/unittests/test_parsers.py @@ -3,28 +3,28 @@ from .dojo_test_case import DojoTestCase, get_unit_tests_path -basedir = os.path.join(get_unit_tests_path(), '..') +basedir = os.path.join(get_unit_tests_path(), "..") class TestParsers(DojoTestCase): def test_file_existence(self): - for parser_dir in os.scandir(os.path.join(basedir, 'dojo', 'tools')): + for parser_dir in os.scandir(os.path.join(basedir, "dojo", "tools")): - if parser_dir.is_file() or parser_dir.name == '__pycache__': + if parser_dir.is_file() or parser_dir.name == "__pycache__": continue # this is not parser dir but some support file if parser_dir.name.startswith("api_"): doc_name = parser_dir.name[4:] - category = 'api' + category = "api" else: doc_name = parser_dir.name - category = 'file' + category = "file" if doc_name not in [ - 'checkmarx_osa', # it is documented in 'checkmarx' + "checkmarx_osa", # it is documented in 'checkmarx' ]: - with self.subTest(parser=parser_dir.name, category='docs'): - doc_file = os.path.join(basedir, 'docs', 'content', 'en', 'integrations', 'parsers', category, f"{doc_name}.md") + with self.subTest(parser=parser_dir.name, category="docs"): + doc_file = os.path.join(basedir, "docs", "content", "en", "integrations", "parsers", category, f"{doc_name}.md") self.assertTrue( os.path.isfile(doc_file), f"Documentation file '{doc_file}' is missing or using different name", @@ -49,37 +49,37 @@ def test_file_existence(self): if parser_dir.name not in [ # there is not exception for now ]: - with self.subTest(parser=parser_dir.name, category='parser'): - parser_test_file = os.path.join(basedir, 'unittests', 'tools', f"test_{parser_dir.name}_parser.py") + with self.subTest(parser=parser_dir.name, category="parser"): + parser_test_file = os.path.join(basedir, "unittests", "tools", f"test_{parser_dir.name}_parser.py") self.assertTrue( os.path.isfile(parser_test_file), f"Unittest of parser '{parser_test_file}' is missing or using different name", ) if parser_dir.name not in [ - 'vcg', # content of the sample report is string the directly in unittest + "vcg", # content of the sample report is string the directly in unittest ]: - with self.subTest(parser=parser_dir.name, category='testfiles'): - scan_dir = os.path.join(basedir, 'unittests', 'scans', parser_dir.name) + with self.subTest(parser=parser_dir.name, category="testfiles"): + scan_dir = os.path.join(basedir, "unittests", "scans", parser_dir.name) self.assertTrue( os.path.isdir(scan_dir), f"Test files for unittest of parser '{scan_dir}' are missing or using different name", ) - if category == 'api': + if category == "api": if parser_dir.name not in [ - 'api_blackduck', # TODO - 'api_vulners', # TODO + "api_blackduck", # TODO + "api_vulners", # TODO ]: - with self.subTest(parser=parser_dir.name, category='importer'): - importer_test_file = os.path.join(basedir, 'unittests', 'tools', f"test_{parser_dir.name}_importer.py") + with self.subTest(parser=parser_dir.name, category="importer"): + importer_test_file = os.path.join(basedir, "unittests", "tools", f"test_{parser_dir.name}_importer.py") self.assertTrue( os.path.isfile(importer_test_file), f"Unittest of importer '{importer_test_file}' is missing or using different name", ) - for file in os.scandir(os.path.join(basedir, 'dojo', 'tools', parser_dir.name)): - if file.is_file() and file.name != '__pycache__' and file.name != "__init__.py": - f = os.path.join(basedir, 'dojo', 'tools', parser_dir.name, file.name) + for file in os.scandir(os.path.join(basedir, "dojo", "tools", parser_dir.name)): + if file.is_file() and file.name != "__pycache__" and file.name != "__init__.py": + f = os.path.join(basedir, "dojo", "tools", parser_dir.name, file.name) read_true = False with open(f) as f: for line in f.readlines(): @@ -88,7 +88,7 @@ def test_file_existence(self): read_true = False i = 0 elif i > 4: - self.assertTrue(False, "In file " + str(os.path.join('dojo', 'tools', parser_dir.name, file.name)) + " the test is failing because you don't have utf-8 after .read()") + self.assertTrue(False, "In file " + str(os.path.join("dojo", "tools", parser_dir.name, file.name)) + " the test is failing because you don't have utf-8 after .read()") i = 0 read_true = False else: @@ -98,12 +98,12 @@ def test_file_existence(self): i = 0 def test_parser_existence(self): - for docs in os.scandir(os.path.join(basedir, 'docs', 'content', 'en', 'integrations', 'parsers', 'file')): + for docs in os.scandir(os.path.join(basedir, "docs", "content", "en", "integrations", "parsers", "file")): if docs.name not in [ - '_index.md', 'codeql.md', 'edgescan.md', + "_index.md", "codeql.md", "edgescan.md", ]: - with self.subTest(parser=docs.name.split('.md')[0], category='parser'): - parser = os.path.join(basedir, 'dojo', 'tools', f"{docs.name.split('.md')[0]}", "parser.py") + with self.subTest(parser=docs.name.split(".md")[0], category="parser"): + parser = os.path.join(basedir, "dojo", "tools", f"{docs.name.split('.md')[0]}", "parser.py") self.assertTrue( os.path.isfile(parser), f"Parser '{parser}' is missing or using different name", diff --git a/unittests/test_remote_user.py b/unittests/test_remote_user.py index 02dd871169..a1d3706c6a 100644 --- a/unittests/test_remote_user.py +++ b/unittests/test_remote_user.py @@ -14,17 +14,17 @@ class TestRemoteUser(DojoTestCase): def setUp(self): self.user, _ = User.objects.get_or_create( - username='test_remote_user', - first_name='original_first', - last_name='original_last', - email='original@mail.com', + username="test_remote_user", + first_name="original_first", + last_name="original_last", + email="original@mail.com", ) self.group1, _ = Dojo_Group.objects.get_or_create(name="group1", social_provider=Dojo_Group.REMOTE) self.group2, _ = Dojo_Group.objects.get_or_create(name="group2", social_provider=Dojo_Group.REMOTE) @override_settings(AUTH_REMOTEUSER_ENABLED=False) def test_disabled(self): - resp = self.client1.get('/profile') + resp = self.client1.get("/profile") self.assertEqual(resp.status_code, 302) @override_settings( @@ -32,7 +32,7 @@ def test_disabled(self): AUTH_REMOTEUSER_USERNAME_HEADER="HTTP_REMOTE_USER", ) def test_basic(self): - resp = self.client1.get('/profile', + resp = self.client1.get("/profile", headers={ "Remote-User": self.user.username, }, @@ -47,7 +47,7 @@ def test_basic(self): AUTH_REMOTEUSER_EMAIL_HEADER="HTTP_REMOTE_EMAIL", ) def test_update_user(self): - resp = self.client1.get('/profile', + resp = self.client1.get("/profile", headers={ "Remote-User": self.user.username, "Remote-Firstname": "new_first", @@ -68,7 +68,7 @@ def test_update_user(self): AUTH_REMOTEUSER_GROUPS_CLEANUP=True, ) def test_update_groups_cleanup(self): - resp = self.client1.get('/profile', + resp = self.client1.get("/profile", headers={ "Remote-User": self.user.username, "Remote-Groups": self.group1.name, @@ -79,7 +79,7 @@ def test_update_groups_cleanup(self): self.assertEqual(dgms.count(), 1) self.assertEqual(dgms.first().group.name, self.group1.name) - resp = self.client2.get('/profile', + resp = self.client2.get("/profile", headers={ "Remote-User": self.user.username, "Remote-Groups": self.group2.name, @@ -97,7 +97,7 @@ def test_update_groups_cleanup(self): AUTH_REMOTEUSER_GROUPS_CLEANUP=True, ) def test_update_multiple_groups_cleanup(self): - resp = self.client1.get('/profile', + resp = self.client1.get("/profile", headers={ "Remote-User": self.user.username, "Remote-Groups": f"{self.group1.name},{self.group2.name}", @@ -114,7 +114,7 @@ def test_update_multiple_groups_cleanup(self): AUTH_REMOTEUSER_GROUPS_CLEANUP=False, ) def test_update_groups_no_cleanup(self): - resp = self.client1.get('/profile', + resp = self.client1.get("/profile", headers={ "Remote-User": self.user.username, "Remote-Groups": self.group1.name, @@ -122,7 +122,7 @@ def test_update_groups_no_cleanup(self): ) self.assertEqual(resp.status_code, 200) - resp = self.client2.get('/profile', + resp = self.client2.get("/profile", headers={ "Remote-User": self.user.username, "Remote-Groups": self.group2.name, @@ -135,11 +135,11 @@ def test_update_groups_no_cleanup(self): @override_settings( AUTH_REMOTEUSER_ENABLED=True, AUTH_REMOTEUSER_USERNAME_HEADER="HTTP_REMOTE_USER", - AUTH_REMOTEUSER_TRUSTED_PROXY=IPSet(['192.168.0.0/24', '192.168.2.0/24']), + AUTH_REMOTEUSER_TRUSTED_PROXY=IPSet(["192.168.0.0/24", "192.168.2.0/24"]), ) def test_trusted_proxy(self): - resp = self.client1.get('/profile', - REMOTE_ADDR='192.168.0.42', + resp = self.client1.get("/profile", + REMOTE_ADDR="192.168.0.42", headers={ "Remote-User": self.user.username, }, @@ -149,18 +149,18 @@ def test_trusted_proxy(self): @override_settings( AUTH_REMOTEUSER_ENABLED=True, AUTH_REMOTEUSER_USERNAME_HEADER="HTTP_REMOTE_USER", - AUTH_REMOTEUSER_TRUSTED_PROXY=IPSet(['192.168.0.0/24', '192.168.2.0/24']), + AUTH_REMOTEUSER_TRUSTED_PROXY=IPSet(["192.168.0.0/24", "192.168.2.0/24"]), ) def test_untrusted_proxy(self): - with self.assertLogs('dojo.remote_user', level='DEBUG') as cm: - resp = self.client1.get('/profile', - REMOTE_ADDR='192.168.1.42', + with self.assertLogs("dojo.remote_user", level="DEBUG") as cm: + resp = self.client1.get("/profile", + REMOTE_ADDR="192.168.1.42", headers={ "Remote-User": self.user.username, }, ) self.assertEqual(resp.status_code, 302) - self.assertIn('Requested came from untrusted proxy', cm.output[0]) + self.assertIn("Requested came from untrusted proxy", cm.output[0]) @override_settings( AUTH_REMOTEUSER_ENABLED=True, diff --git a/unittests/test_rest_framework.py b/unittests/test_rest_framework.py index 242c95d223..160d18896b 100644 --- a/unittests/test_rest_framework.py +++ b/unittests/test_rest_framework.py @@ -9,6 +9,7 @@ from unittest.mock import ANY, MagicMock, call, patch from django.contrib.auth.models import Permission +from django.test import tag as test_tag from django.urls import reverse from drf_spectacular.drainage import GENERATOR_STATS from drf_spectacular.settings import spectacular_settings @@ -168,7 +169,7 @@ def skipIfNotSubclass(baseclass): def decorate(f): def wrapper(self, *args, **kwargs): if not issubclass(self.viewset, baseclass): - self.skipTest(f'This view does not inherit from {baseclass}') + self.skipTest(f"This view does not inherit from {baseclass}") else: f(self, *args, **kwargs) return wrapper @@ -196,7 +197,7 @@ def _check_or_fail(self, condition, message): # print(message) def _get_prefix(self): - return '#'.join(self._prefix) + return "#".join(self._prefix) def _push_prefix(self, prefix): self._prefix += [prefix] @@ -205,12 +206,12 @@ def _pop_prefix(self): self._prefix = self._prefix if len(self._prefix) == 0 else self._prefix[:-1] def _resolve_if_ref(self, schema): - if '$ref' not in schema: + if "$ref" not in schema: return schema ref_name = schema["$ref"] ref_name = ref_name[ref_name.rfind("/") + 1:] - return self._components['schemas'][ref_name] + return self._components["schemas"][ref_name] def _check_has_required_fields(self, required_fields, obj): # if not required_fields: @@ -219,13 +220,13 @@ def _check_has_required_fields(self, required_fields, obj): for required_field in required_fields: # passwords are writeOnly, but this is not supported by Swagger / OpenAPIv2 # TODO check this for OpenAPI3 - if required_field != 'password': + if required_field != "password": # print('checking field: ', required_field) field = f"{self._get_prefix()}#{required_field}" self._check_or_fail(obj is not None and required_field in obj, f"{field} is required but was not returned") def _check_type(self, schema, obj): - if 'type' not in schema: + if "type" not in schema: # TODO implement OneOf / AllOff (enums) # Engagement # "status": { @@ -287,7 +288,7 @@ def _with_prefix(self, prefix, callable, *args): def check(self, schema, obj): def _check(schema, obj): # Convert sets to lists to streamline checks - if 'type' in schema and schema["type"] is TYPE_ARRAY and isinstance(obj, set): + if "type" in schema and schema["type"] is TYPE_ARRAY and isinstance(obj, set): obj = list(obj) schema = self._resolve_if_ref(schema) @@ -311,7 +312,7 @@ def _check(schema, obj): for child_name in obj.keys(): # TODO prefetch mixins not picked up by spectcular? - if child_name not in ['prefetch']: + if child_name not in ["prefetch"]: if not properties or child_name not in properties.keys(): self._has_failed = True self._register_error(f'unexpected property "{child_name}" found') @@ -322,7 +323,7 @@ def _check(schema, obj): self._with_prefix(f"additionalProp<{name}>", _check, additional_properties, obj_child) # TODO implement support for enum / OneOff / AllOff - if 'type' in schema and schema["type"] is TYPE_ARRAY: + if "type" in schema and schema["type"] is TYPE_ARRAY: items_schema = schema["items"] for index in range(len(obj)): self._with_prefix(f"item{index}", _check, items_schema, obj[index]) @@ -331,7 +332,7 @@ def _check(schema, obj): self._errors = [] self._prefix = [] _check(schema, obj) - assert not self._has_failed, "\n" + '\n'.join(self._errors) + "\nFailed with " + str(len(self._errors)) + " errors" + assert not self._has_failed, "\n" + "\n".join(self._errors) + "\nFailed with " + str(len(self._errors)) + " errors" class TestType(Enum): @@ -346,30 +347,30 @@ def __init__(self, *args, **kwargs): DojoAPITestCase.__init__(self, *args, **kwargs) def setUp(self): - testuser = User.objects.get(username='admin') + testuser = User.objects.get(username="admin") token = Token.objects.get(user=testuser) self.client = APIClient() - self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) - self.url = reverse(self.viewname + '-list') + self.client.credentials(HTTP_AUTHORIZATION="Token " + token.key) + self.url = reverse(self.viewname + "-list") self.schema = open_api3_json_schema def setUp_not_authorized(self): testuser = User.objects.get(id=3) token = Token.objects.get(user=testuser) self.client = APIClient() - self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) + self.client.credentials(HTTP_AUTHORIZATION="Token " + token.key) def setUp_global_reader(self): testuser = User.objects.get(id=5) token = Token.objects.get(user=testuser) self.client = APIClient() - self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) + self.client.credentials(HTTP_AUTHORIZATION="Token " + token.key) def setUp_global_owner(self): testuser = User.objects.get(id=6) token = Token.objects.get(user=testuser) self.client = APIClient() - self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) + self.client.credentials(HTTP_AUTHORIZATION="Token " + token.key) def check_schema(self, schema, obj): schema_checker = SchemaChecker(self.schema["components"]) @@ -377,36 +378,36 @@ def check_schema(self, schema, obj): schema_checker.check(self.schema, obj) def check_schema_response(self, method, status_code, response, detail=False): - detail_path = '{id}/' if detail else '' + detail_path = "{id}/" if detail else "" endpoints_schema = self.schema["paths"][format_url(f"/{self.endpoint_path}/{detail_path}")] - schema = endpoints_schema[method]['responses'][status_code]['content']['application/json']['schema'] + schema = endpoints_schema[method]["responses"][status_code]["content"]["application/json"]["schema"] obj = response.data self.check_schema(schema, obj) class RetrieveRequestTest(RESTEndpointTest): @skipIfNotSubclass(RetrieveModelMixin) def test_detail(self): - current_objects = self.client.get(self.url, format='json').data - relative_url = self.url + '{}/'.format(current_objects['results'][0]['id']) + current_objects = self.client.get(self.url, format="json").data + relative_url = self.url + "{}/".format(current_objects["results"][0]["id"]) response = self.client.get(relative_url) self.assertEqual(200, response.status_code, response.content[:1000]) # sensitive data must be set to write_only so those are not returned in the response # https://github.com/DefectDojo/django-DefectDojo/security/advisories/GHSA-8q8j-7wc4-vjg5 - self.assertNotIn('password', response.data) - self.assertNotIn('ssh', response.data) - self.assertNotIn('api_key', response.data) + self.assertNotIn("password", response.data) + self.assertNotIn("ssh", response.data) + self.assertNotIn("api_key", response.data) - self.check_schema_response('get', '200', response, detail=True) + self.check_schema_response("get", "200", response, detail=True) @skipIfNotSubclass(PrefetchRetrieveMixin) def test_detail_prefetch(self): # print("=======================================================") prefetchable_fields = [x[0] for x in _get_prefetchable_fields(self.viewset.serializer_class)] - current_objects = self.client.get(self.url, format='json').data - relative_url = self.url + '{}/'.format(current_objects['results'][0]['id']) + current_objects = self.client.get(self.url, format="json").data + relative_url = self.url + "{}/".format(current_objects["results"][0]["id"]) response = self.client.get(relative_url, data={ - "prefetch": ','.join(prefetchable_fields), + "prefetch": ",".join(prefetchable_fields), }) self.assertEqual(200, response.status_code) @@ -429,24 +430,24 @@ def test_detail_prefetch(self): @skipIfNotSubclass(RetrieveModelMixin) def test_detail_object_not_authorized(self): if not self.test_type == TestType.OBJECT_PERMISSIONS: - self.skipTest('Authorization is not object based') + self.skipTest("Authorization is not object based") self.setUp_not_authorized() current_objects = self.endpoint_model.objects.all() - relative_url = self.url + f'{current_objects[0].id}/' + relative_url = self.url + f"{current_objects[0].id}/" response = self.client.get(relative_url) self.assertEqual(404, response.status_code, response.content[:1000]) @skipIfNotSubclass(RetrieveModelMixin) def test_detail_configuration_not_authorized(self): if not self.test_type == TestType.CONFIGURATION_PERMISSIONS: - self.skipTest('Authorization is not configuration based') + self.skipTest("Authorization is not configuration based") self.setUp_not_authorized() current_objects = self.endpoint_model.objects.all() - relative_url = self.url + f'{current_objects[0].id}/' + relative_url = self.url + f"{current_objects[0].id}/" response = self.client.get(relative_url) self.assertEqual(403, response.status_code, response.content[:1000]) @@ -457,18 +458,18 @@ def test_list(self): # validator = ResponseValidator(spec) check_for_tags = False - if hasattr(self.endpoint_model, 'tags') and self.payload and self.payload.get('tags', None): + if hasattr(self.endpoint_model, "tags") and self.payload and self.payload.get("tags", None): # create a new instance first to make sure there's at least 1 instance with tags set by payload to trigger tag handling code - logger.debug('creating model with endpoints: %s', self.payload) + logger.debug("creating model with endpoints: %s", self.payload) response = self.client.post(self.url, self.payload) self.assertEqual(201, response.status_code, response.content[:1000]) # print('response:', response.content[:1000]) - check_for_id = response.data['id'] + check_for_id = response.data["id"] # print('id: ', check_for_id) - check_for_tags = self.payload.get('tags', None) + check_for_tags = self.payload.get("tags", None) - response = self.client.get(self.url, format='json') + response = self.client.get(self.url, format="json") # print('response') # print(vars(response)) @@ -477,26 +478,26 @@ def test_list(self): # tags must be present in last entry, the one we created if check_for_tags: tags_found = False - for result in response.data['results']: - if result['id'] == check_for_id: + for result in response.data["results"]: + if result["id"] == check_for_id: # logger.debug('result.tags: %s', result.get('tags', '')) - self.assertEqual(len(check_for_tags), len(result.get('tags', None))) + self.assertEqual(len(check_for_tags), len(result.get("tags", None))) for tag in check_for_tags: # logger.debug('looking for tag %s in tag list %s', tag, result['tags']) - self.assertIn(tag, result['tags']) + self.assertIn(tag, result["tags"]) tags_found = True self.assertTrue(tags_found) self.assertEqual(200, response.status_code, response.content[:1000]) - self.check_schema_response('get', '200', response) + self.check_schema_response("get", "200", response) @skipIfNotSubclass(PrefetchListMixin) def test_list_prefetch(self): prefetchable_fields = [x[0] for x in _get_prefetchable_fields(self.viewset.serializer_class)] response = self.client.get(self.url, data={ - "prefetch": ','.join(prefetchable_fields), + "prefetch": ",".join(prefetchable_fields), }) self.assertEqual(200, response.status_code) @@ -515,7 +516,7 @@ def test_list_prefetch(self): for value in values: if not isinstance(value, int): - value = value['id'] + value = value["id"] self.assertIn(value, objs["prefetch"][field]) # TODO add schema check @@ -523,22 +524,22 @@ def test_list_prefetch(self): @skipIfNotSubclass(ListModelMixin) def test_list_object_not_authorized(self): if not self.test_type == TestType.OBJECT_PERMISSIONS: - self.skipTest('Authorization is not object based') + self.skipTest("Authorization is not object based") self.setUp_not_authorized() - response = self.client.get(self.url, format='json') - self.assertFalse(response.data['results']) + response = self.client.get(self.url, format="json") + self.assertFalse(response.data["results"]) self.assertEqual(200, response.status_code, response.content[:1000]) @skipIfNotSubclass(ListModelMixin) def test_list_configuration_not_authorized(self): if not self.test_type == TestType.CONFIGURATION_PERMISSIONS: - self.skipTest('Authorization is not configuration based') + self.skipTest("Authorization is not configuration based") self.setUp_not_authorized() - response = self.client.get(self.url, format='json') + response = self.client.get(self.url, format="json") self.assertEqual(403, response.status_code, response.content[:1000]) class CreateRequestTest(RESTEndpointTest): @@ -546,38 +547,38 @@ class CreateRequestTest(RESTEndpointTest): def test_create(self): length = self.endpoint_model.objects.count() response = self.client.post(self.url, self.payload) - logger.debug('test_create_response:') + logger.debug("test_create_response:") logger.debug(response) logger.debug(response.data) self.assertEqual(201, response.status_code, response.content[:1000]) self.assertEqual(self.endpoint_model.objects.count(), length + 1) - if hasattr(self.endpoint_model, 'tags') and self.payload and self.payload.get('tags', None): - self.assertEqual(len(self.payload.get('tags')), len(response.data.get('tags', None))) - for tag in self.payload.get('tags'): + if hasattr(self.endpoint_model, "tags") and self.payload and self.payload.get("tags", None): + self.assertEqual(len(self.payload.get("tags")), len(response.data.get("tags", None))) + for tag in self.payload.get("tags"): # logger.debug('looking for tag %s in tag list %s', tag, response.data['tags']) - self.assertIn(tag, response.data['tags']) + self.assertIn(tag, response.data["tags"]) - self.check_schema_response('post', '201', response) + self.check_schema_response("post", "201", response) @skipIfNotSubclass(CreateModelMixin) - @patch('dojo.api_v2.permissions.user_has_permission') + @patch("dojo.api_v2.permissions.user_has_permission") def test_create_object_not_authorized(self, mock): if not self.test_type == TestType.OBJECT_PERMISSIONS: - self.skipTest('Authorization is not object based') + self.skipTest("Authorization is not object based") mock.return_value = False response = self.client.post(self.url, self.payload) self.assertEqual(403, response.status_code, response.content[:1000]) - mock.assert_called_with(User.objects.get(username='admin'), + mock.assert_called_with(User.objects.get(username="admin"), ANY, self.permission_create) @skipIfNotSubclass(CreateModelMixin) def test_create_configuration_not_authorized(self): if not self.test_type == TestType.CONFIGURATION_PERMISSIONS: - self.skipTest('Authorization is not configuration based') + self.skipTest("Authorization is not configuration based") self.setUp_not_authorized() @@ -587,16 +588,16 @@ def test_create_configuration_not_authorized(self): class UpdateRequestTest(RESTEndpointTest): @skipIfNotSubclass(UpdateModelMixin) def test_update(self): - current_objects = self.client.get(self.url, format='json').data - relative_url = self.url + '{}/'.format(current_objects['results'][0]['id']) + current_objects = self.client.get(self.url, format="json").data + relative_url = self.url + "{}/".format(current_objects["results"][0]["id"]) response = self.client.patch(relative_url, self.update_fields) self.assertEqual(200, response.status_code, response.content[:1000]) - self.check_schema_response('patch', '200', response, detail=True) + self.check_schema_response("patch", "200", response, detail=True) for key, value in self.update_fields.items(): # some exception as push_to_jira has been implemented strangely in the update methods in the api - if key not in ['push_to_jira', 'ssh', 'password', 'api_key']: + if key not in ["push_to_jira", "ssh", "password", "api_key"]: # Convert data to sets to avoid problems with lists if isinstance(value, list): value = set(value) @@ -606,62 +607,62 @@ def test_update(self): response_data = response.data[key] self.assertEqual(value, response_data) - self.assertNotIn('push_to_jira', response.data) - self.assertNotIn('ssh', response.data) - self.assertNotIn('password', response.data) - self.assertNotIn('api_key', response.data) + self.assertNotIn("push_to_jira", response.data) + self.assertNotIn("ssh", response.data) + self.assertNotIn("password", response.data) + self.assertNotIn("api_key", response.data) - if hasattr(self.endpoint_model, 'tags') and self.update_fields and self.update_fields.get('tags', None): - self.assertEqual(len(self.update_fields.get('tags')), len(response.data.get('tags', None))) - for tag in self.update_fields.get('tags'): - logger.debug('looking for tag %s in tag list %s', tag, response.data['tags']) - self.assertIn(tag, response.data['tags']) + if hasattr(self.endpoint_model, "tags") and self.update_fields and self.update_fields.get("tags", None): + self.assertEqual(len(self.update_fields.get("tags")), len(response.data.get("tags", None))) + for tag in self.update_fields.get("tags"): + logger.debug("looking for tag %s in tag list %s", tag, response.data["tags"]) + self.assertIn(tag, response.data["tags"]) response = self.client.put( relative_url, self.payload) self.assertEqual(200, response.status_code, response.content[:1000]) - self.check_schema_response('put', '200', response, detail=True) + self.check_schema_response("put", "200", response, detail=True) @skipIfNotSubclass(UpdateModelMixin) - @patch('dojo.api_v2.permissions.user_has_permission') + @patch("dojo.api_v2.permissions.user_has_permission") def test_update_object_not_authorized(self, mock): if not self.test_type == TestType.OBJECT_PERMISSIONS: - self.skipTest('Authorization is not object based') + self.skipTest("Authorization is not object based") mock.return_value = False - current_objects = self.client.get(self.url, format='json').data - relative_url = self.url + '{}/'.format(current_objects['results'][0]['id']) + current_objects = self.client.get(self.url, format="json").data + relative_url = self.url + "{}/".format(current_objects["results"][0]["id"]) if self.endpoint_model == Endpoint_Status: - permission_object = Endpoint.objects.get(id=current_objects['results'][0]['endpoint']) + permission_object = Endpoint.objects.get(id=current_objects["results"][0]["endpoint"]) elif self.endpoint_model == JIRA_Issue: - permission_object = Finding.objects.get(id=current_objects['results'][0]['finding']) + permission_object = Finding.objects.get(id=current_objects["results"][0]["finding"]) else: - permission_object = self.permission_check_class.objects.get(id=current_objects['results'][0]['id']) + permission_object = self.permission_check_class.objects.get(id=current_objects["results"][0]["id"]) response = self.client.patch(relative_url, self.update_fields) self.assertEqual(403, response.status_code, response.content[:1000]) - mock.assert_called_with(User.objects.get(username='admin'), + mock.assert_called_with(User.objects.get(username="admin"), permission_object, self.permission_update) response = self.client.put(relative_url, self.payload) self.assertEqual(403, response.status_code, response.content[:1000]) - mock.assert_called_with(User.objects.get(username='admin'), + mock.assert_called_with(User.objects.get(username="admin"), permission_object, self.permission_update) @skipIfNotSubclass(UpdateModelMixin) def test_update_configuration_not_authorized(self): if not self.test_type == TestType.CONFIGURATION_PERMISSIONS: - self.skipTest('Authorization is not configuration based') + self.skipTest("Authorization is not configuration based") self.setUp_not_authorized() current_objects = self.endpoint_model.objects.all() - relative_url = self.url + f'{current_objects[0].id}/' + relative_url = self.url + f"{current_objects[0].id}/" response = self.client.patch(relative_url, self.update_fields) self.assertEqual(403, response.status_code, response.content[:1000]) @@ -675,7 +676,7 @@ def test_delete(self): if delete_id := getattr(self, "delete_id", None): relative_url = f"{self.url}{delete_id}/" else: - current_objects = self.client.get(self.url, format='json').data + current_objects = self.client.get(self.url, format="json").data relative_url = f"{self.url}{current_objects['results'][-1]['id']}/" response = self.client.delete(relative_url) self.assertEqual(204, response.status_code, response.content[:1000]) @@ -685,69 +686,68 @@ def test_delete_preview(self): if delete_id := getattr(self, "delete_id", None): relative_url = f"{self.url}{delete_id}/delete_preview/" else: - current_objects = self.client.get(self.url, format='json').data + current_objects = self.client.get(self.url, format="json").data relative_url = f"{self.url}{current_objects['results'][0]['id']}/delete_preview/" response = self.client.get(relative_url) # print('delete_preview response.data') - self.assertEqual(200, response.status_code, response.content[:1000]) - self.check_schema_response('get', '200', response, detail=True) + self.check_schema_response("get", "200", response, detail=True) - self.assertNotIn('push_to_jira', response.data) - self.assertNotIn('password', response.data) - self.assertNotIn('ssh', response.data) - self.assertNotIn('api_key', response.data) + self.assertNotIn("push_to_jira", response.data) + self.assertNotIn("password", response.data) + self.assertNotIn("ssh", response.data) + self.assertNotIn("api_key", response.data) - self.assertIsInstance(response.data['results'], list) - self.assertGreater(len(response.data['results']), 0, "Length: {}".format(len(response.data['results']))) + self.assertIsInstance(response.data["results"], list) + self.assertGreater(len(response.data["results"]), 0, "Length: {}".format(len(response.data["results"]))) - for obj in response.data['results']: + for obj in response.data["results"]: self.assertIsInstance(obj, dict) self.assertEqual(len(obj), 3) - self.assertIsInstance(obj['model'], str) - if obj['id']: # It needs to be None or int - self.assertIsInstance(obj['id'], int) - self.assertIsInstance(obj['name'], str) + self.assertIsInstance(obj["model"], str) + if obj["id"]: # It needs to be None or int + self.assertIsInstance(obj["id"], int) + self.assertIsInstance(obj["name"], str) - self.assertEqual(self.deleted_objects, len(response.data['results']), response.content) + self.assertEqual(self.deleted_objects, len(response.data["results"]), response.content) @skipIfNotSubclass(DestroyModelMixin) - @patch('dojo.api_v2.permissions.user_has_permission') + @patch("dojo.api_v2.permissions.user_has_permission") def test_delete_object_not_authorized(self, mock): if not self.test_type == TestType.OBJECT_PERMISSIONS: - self.skipTest('Authorization is not object based') + self.skipTest("Authorization is not object based") mock.return_value = False - current_objects = self.client.get(self.url, format='json').data - relative_url = self.url + '{}/'.format(current_objects['results'][0]['id']) + current_objects = self.client.get(self.url, format="json").data + relative_url = self.url + "{}/".format(current_objects["results"][0]["id"]) self.client.delete(relative_url) if self.endpoint_model == Endpoint_Status: - permission_object = Endpoint.objects.get(id=current_objects['results'][0]['endpoint']) + permission_object = Endpoint.objects.get(id=current_objects["results"][0]["endpoint"]) elif self.endpoint_model == JIRA_Issue: - permission_object = Finding.objects.get(id=current_objects['results'][0]['finding']) + permission_object = Finding.objects.get(id=current_objects["results"][0]["finding"]) else: - permission_object = self.permission_check_class.objects.get(id=current_objects['results'][0]['id']) + permission_object = self.permission_check_class.objects.get(id=current_objects["results"][0]["id"]) - mock.assert_called_with(User.objects.get(username='admin'), + mock.assert_called_with(User.objects.get(username="admin"), permission_object, self.permission_delete) @skipIfNotSubclass(DestroyModelMixin) def test_delete_configuration_not_authorized(self): if not self.test_type == TestType.CONFIGURATION_PERMISSIONS: - self.skipTest('Authorization is not configuration based') + self.skipTest("Authorization is not configuration based") self.setUp_not_authorized() if delete_id := getattr(self, "delete_id", None): - relative_url = self.url + f'{delete_id}/' + relative_url = self.url + f"{delete_id}/" else: current_objects = self.endpoint_model.objects.all() - relative_url = self.url + f'{current_objects[0].id}/' + relative_url = self.url + f"{current_objects[0].id}/" response = self.client.delete(relative_url) self.assertEqual(403, response.status_code, response.content[:1000]) @@ -762,77 +762,77 @@ class BaseClassTest( class MemberEndpointTest(BaseClassTest): def test_update(self): - current_objects = self.client.get(self.url, format='json').data - relative_url = self.url + '{}/'.format(current_objects['results'][0]['id']) + current_objects = self.client.get(self.url, format="json").data + relative_url = self.url + "{}/".format(current_objects["results"][0]["id"]) response = self.client.patch(relative_url, self.update_fields) self.assertEqual(405, response.status_code, response.content[:1000]) response = self.client.put( relative_url, self.payload) self.assertEqual(200, response.status_code, response.content[:1000]) - self.check_schema_response('put', '200', response, detail=True) + self.check_schema_response("put", "200", response, detail=True) @skipIfNotSubclass(UpdateModelMixin) - @patch('dojo.api_v2.permissions.user_has_permission') + @patch("dojo.api_v2.permissions.user_has_permission") def test_update_object_not_authorized(self, mock): if not self.test_type == TestType.OBJECT_PERMISSIONS: - self.skipTest('Authorization is not object based') + self.skipTest("Authorization is not object based") mock.return_value = False - current_objects = self.client.get(self.url, format='json').data - relative_url = self.url + '{}/'.format(current_objects['results'][0]['id']) + current_objects = self.client.get(self.url, format="json").data + relative_url = self.url + "{}/".format(current_objects["results"][0]["id"]) response = self.client.put(relative_url, self.payload) self.assertEqual(403, response.status_code, response.content[:1000]) - mock.assert_called_with(User.objects.get(username='admin'), - self.permission_check_class.objects.get(id=current_objects['results'][0]['id']), + mock.assert_called_with(User.objects.get(username="admin"), + self.permission_check_class.objects.get(id=current_objects["results"][0]["id"]), self.permission_update) class AuthenticatedViewTest(BaseClassTest): @skipIfNotSubclass(ListModelMixin) def test_list_configuration_not_authorized(self): if not self.test_type == TestType.CONFIGURATION_PERMISSIONS: - self.skipTest('Authorization is not configuration based') + self.skipTest("Authorization is not configuration based") self.setUp_not_authorized() - response = self.client.get(self.url, format='json') + response = self.client.get(self.url, format="json") self.assertEqual(200, response.status_code, response.content[:1000]) @skipIfNotSubclass(RetrieveModelMixin) def test_detail_configuration_not_authorized(self): if not self.test_type == TestType.CONFIGURATION_PERMISSIONS: - self.skipTest('Authorization is not configuration based') + self.skipTest("Authorization is not configuration based") self.setUp_not_authorized() current_objects = self.endpoint_model.objects.all() - relative_url = self.url + f'{current_objects[0].id}/' + relative_url = self.url + f"{current_objects[0].id}/" response = self.client.get(relative_url) self.assertEqual(200, response.status_code, response.content[:1000]) class AppAnalysisTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = App_Analysis - self.endpoint_path = 'technologies' - self.viewname = 'app_analysis' + self.endpoint_path = "technologies" + self.viewname = "app_analysis" self.viewset = AppAnalysisViewSet self.payload = { - 'product': 1, - 'name': 'Tomcat', - 'user': 1, - 'confidence': 100, - 'version': '8.5.1', - 'icon': '', - 'website': '', - 'website_found': '', - 'created': '2018-08-16T16:58:23.908Z', + "product": 1, + "name": "Tomcat", + "user": 1, + "confidence": 100, + "version": "8.5.1", + "icon": "", + "website": "", + "website_found": "", + "created": "2018-08-16T16:58:23.908Z", } - self.update_fields = {'version': '9.0'} + self.update_fields = {"version": "9.0"} self.test_type = TestType.OBJECT_PERMISSIONS self.permission_check_class = Product self.permission_create = Permissions.Technology_Add @@ -843,23 +843,23 @@ def __init__(self, *args, **kwargs): class EndpointStatusTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Endpoint_Status - self.endpoint_path = 'endpoint_status' - self.viewname = 'endpoint_status' + self.endpoint_path = "endpoint_status" + self.viewname = "endpoint_status" self.viewset = EndpointStatusViewSet self.payload = { - 'endpoint': 2, - 'finding': 3, - 'mitigated': False, - 'false_positive': False, - 'risk_accepted': False, - 'out_of_scope': False, + "endpoint": 2, + "finding": 3, + "mitigated": False, + "false_positive": False, + "risk_accepted": False, + "out_of_scope": False, "date": "2017-01-12", } - self.update_fields = {'mitigated': True} + self.update_fields = {"mitigated": True} self.test_type = TestType.OBJECT_PERMISSIONS self.permission_check_class = Endpoint self.permission_create = Permissions.Endpoint_Edit @@ -870,118 +870,119 @@ def __init__(self, *args, **kwargs): def test_create_unsuccessful(self): unsucessful_payload = self.payload.copy() - unsucessful_payload['finding'] = 2 + unsucessful_payload["finding"] = 2 response = self.client.post(self.url, unsucessful_payload) - logger.debug('test_create_response:') + logger.debug("test_create_response:") logger.debug(response) logger.debug(response.data) self.assertEqual(400, response.status_code, response.content[:1000]) - self.assertIn('This endpoint-finding relation already exists', response.content.decode("utf-8")) + self.assertIn("This endpoint-finding relation already exists", response.content.decode("utf-8")) def test_create_minimal(self): # This call should not fail even if there is not date defined minimal_payload = { - 'endpoint': 1, - 'finding': 3, + "endpoint": 1, + "finding": 3, } response = self.client.post(self.url, minimal_payload) - logger.debug('test_create_response:') + logger.debug("test_create_response:") logger.debug(response) logger.debug(response.data) self.assertEqual(201, response.status_code, response.content[:1000]) def test_update_patch_unsuccessful(self): anoher_finding_payload = self.payload.copy() - anoher_finding_payload['finding'] = 3 + anoher_finding_payload["finding"] = 3 response = self.client.post(self.url, anoher_finding_payload) - current_objects = self.client.get(self.url, format='json').data + current_objects = self.client.get(self.url, format="json").data - object1 = current_objects['results'][0] - object2 = current_objects['results'][1] + object1 = current_objects["results"][0] + object2 = current_objects["results"][1] unsucessful_payload = { - 'endpoint': object2['endpoint'], - 'finding': object2['finding'], + "endpoint": object2["endpoint"], + "finding": object2["finding"], } - relative_url = self.url + '{}/'.format(object1['id']) + relative_url = self.url + "{}/".format(object1["id"]) response = self.client.patch(relative_url, unsucessful_payload) self.assertEqual(400, response.status_code, response.content[:1000]) - self.assertIn('This endpoint-finding relation already exists', response.content.decode("utf-8")) + self.assertIn("This endpoint-finding relation already exists", response.content.decode("utf-8")) def test_update_put_unsuccessful(self): anoher_finding_payload = self.payload.copy() - anoher_finding_payload['finding'] = 3 + anoher_finding_payload["finding"] = 3 response = self.client.post(self.url, anoher_finding_payload) - current_objects = self.client.get(self.url, format='json').data + current_objects = self.client.get(self.url, format="json").data - object1 = current_objects['results'][0] - object2 = current_objects['results'][1] + object1 = current_objects["results"][0] + object2 = current_objects["results"][1] unsucessful_payload = { - 'endpoint': object2['endpoint'], - 'finding': object2['finding'], + "endpoint": object2["endpoint"], + "finding": object2["finding"], } - relative_url = self.url + '{}/'.format(object1['id']) + relative_url = self.url + "{}/".format(object1["id"]) response = self.client.put(relative_url, unsucessful_payload) self.assertEqual(400, response.status_code, response.content[:1000]) - self.assertIn('This endpoint-finding relation already exists', response.content.decode("utf-8")) + self.assertIn("This endpoint-finding relation already exists", response.content.decode("utf-8")) class EndpointTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Endpoint - self.endpoint_path = 'endpoints' - self.viewname = 'endpoint' + self.endpoint_path = "endpoints" + self.viewname = "endpoint" self.viewset = EndPointViewSet self.payload = { - 'protocol': 'http', - 'host': '127.0.0.1', - 'path': '/', - 'query': 'test=true', - 'fragment': 'test-1', - 'product': 1, + "protocol": "http", + "host": "127.0.0.1", + "path": "/", + "query": "test=true", + "fragment": "test-1", + "product": 1, "tags": ["mytag", "yourtag"], } - self.update_fields = {'protocol': 'ftp', 'tags': ['one_new_tag']} + self.update_fields = {"protocol": "ftp", "tags": ["one_new_tag"]} self.test_type = TestType.OBJECT_PERMISSIONS self.permission_check_class = Endpoint self.permission_create = Permissions.Endpoint_Add self.permission_update = Permissions.Endpoint_Edit self.permission_delete = Permissions.Endpoint_Delete self.deleted_objects = 2 + self.delete_id = 6 BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs) class EngagementTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Engagement - self.endpoint_path = 'engagements' - self.viewname = 'engagement' + self.endpoint_path = "engagements" + self.viewname = "engagement" self.viewset = EngagementViewSet self.payload = { - "engagement_type": 'Interactive', + "engagement_type": "Interactive", "report_type": 1, "name": "", "description": "", "version": "", - "target_start": '1937-01-01', - "target_end": '1937-01-01', + "target_start": "1937-01-01", + "target_end": "1937-01-01", "reason": "", "test_strategy": "", "product": "1", "tags": ["mytag"], } - self.update_fields = {'version': 'latest'} + self.update_fields = {"version": "latest"} self.test_type = TestType.OBJECT_PERMISSIONS self.permission_check_class = Engagement self.permission_create = Permissions.Engagement_Add @@ -992,12 +993,12 @@ def __init__(self, *args, **kwargs): class RiskAcceptanceTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Risk_Acceptance - self.endpoint_path = 'risk_acceptance' - self.viewname = 'risk_acceptance' + self.endpoint_path = "risk_acceptance" + self.viewname = "risk_acceptance" self.viewset = RiskAcceptanceViewSet self.payload = { "id": 2, @@ -1021,7 +1022,7 @@ def __init__(self, *args, **kwargs): ], "notes": [], } - self.update_fields = {'name': 'newName'} + self.update_fields = {"name": "newName"} self.test_type = TestType.OBJECT_PERMISSIONS self.permission_check_class = Risk_Acceptance self.permission_create = Permissions.Risk_Acceptance @@ -1058,90 +1059,90 @@ def test_update_forbidden_engagement(self): ], "notes": [], } - current_objects = self.client.get(self.url, format='json').data - relative_url = self.url + '{}/'.format(current_objects['results'][0]['id']) + current_objects = self.client.get(self.url, format="json").data + relative_url = self.url + "{}/".format(current_objects["results"][0]["id"]) response = self.client.put(relative_url, self.payload) self.assertEqual(403, response.status_code, response.content[:1000]) class FindingRequestResponseTest(DojoAPITestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def setUp(self): - testuser = User.objects.get(username='admin') + testuser = User.objects.get(username="admin") token = Token.objects.get(user=testuser) self.client = APIClient() - self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) + self.client.credentials(HTTP_AUTHORIZATION="Token " + token.key) def test_request_response_post(self): length = BurpRawRequestResponse.objects.count() payload = { "req_resp": [{"request": "POST", "response": "200"}], } - response = self.client.post('/api/v2/findings/7/request_response/', dumps(payload), content_type='application/json') + response = self.client.post("/api/v2/findings/7/request_response/", dumps(payload), content_type="application/json") self.assertEqual(200, response.status_code, response.content[:1000]) self.assertEqual(BurpRawRequestResponse.objects.count(), length + 1) def test_request_response_get(self): - response = self.client.get('/api/v2/findings/7/request_response/', format='json') + response = self.client.get("/api/v2/findings/7/request_response/", format="json") # print('response.data:') # print(response.data) self.assertEqual(200, response.status_code, response.content[:1000]) class FilesTest(DojoAPITestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def setUp(self): - testuser = User.objects.get(username='admin') + testuser = User.objects.get(username="admin") token = Token.objects.get(user=testuser) self.client = APIClient() - self.client.credentials(HTTP_AUTHORIZATION=f'Token {token.key}') + self.client.credentials(HTTP_AUTHORIZATION=f"Token {token.key}") self.path = pathlib.Path(__file__).parent.absolute() # model: file_id self.url_levels = { - 'findings/7': 0, - 'tests/3': 0, - 'engagements/1': 0, + "findings/7": 0, + "tests/3": 0, + "engagements/1": 0, } def test_request_response_post_and_download(self): # Test the creation for level in self.url_levels.keys(): length = FileUpload.objects.count() - with open(f'{str(self.path)}/scans/acunetix/one_finding.xml') as testfile: + with open(f"{str(self.path)}/scans/acunetix/one_finding.xml") as testfile: payload = { "title": level, "file": testfile, } - response = self.client.post(f'/api/v2/{level}/files/', payload) + response = self.client.post(f"/api/v2/{level}/files/", payload) self.assertEqual(201, response.status_code, response.data) self.assertEqual(FileUpload.objects.count(), length + 1) # Save the ID of the newly created file object - self.url_levels[level] = response.data.get('id') + self.url_levels[level] = response.data.get("id") # Test the download - with open(f'{str(self.path)}/scans/acunetix/one_finding.xml') as file: + with open(f"{str(self.path)}/scans/acunetix/one_finding.xml") as file: file_data = file.read() for level, file_id in self.url_levels.items(): - response = self.client.get(f'/api/v2/{level}/files/download/{file_id}/') + response = self.client.get(f"/api/v2/{level}/files/download/{file_id}/") self.assertEqual(200, response.status_code) - downloaded_file = b''.join(response.streaming_content).decode().replace('\\n', '\n') + downloaded_file = b"".join(response.streaming_content).decode().replace("\\n", "\n") self.assertEqual(file_data, downloaded_file) def test_request_response_get(self): for level in self.url_levels.keys(): - response = self.client.get(f'/api/v2/{level}/files/') + response = self.client.get(f"/api/v2/{level}/files/") self.assertEqual(200, response.status_code) class FindingsTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Finding - self.endpoint_path = 'findings' - self.viewname = 'finding' + self.endpoint_path = "findings" + self.viewname = "finding" self.viewset = FindingViewSet self.payload = { "review_requested_by": 2, @@ -1174,15 +1175,16 @@ def __init__(self, *args, **kwargs): "dynamic_finding": False, "endpoints": [1, 2], "files": [], - "tags": ['tag1', 'tag_2'], + "tags": ["tag1", "tag_2"], } - self.update_fields = {'duplicate': False, 'active': True, "push_to_jira": "True", 'tags': ['finding_tag_new']} + self.update_fields = {"duplicate": False, "active": True, "push_to_jira": "True", "tags": ["finding_tag_new"]} self.test_type = TestType.OBJECT_PERMISSIONS self.permission_check_class = Finding self.permission_create = Permissions.Finding_Add self.permission_update = Permissions.Finding_Edit self.permission_delete = Permissions.Finding_Delete self.deleted_objects = 2 + self.delete_id = 3 BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs) def test_duplicate(self): @@ -1213,7 +1215,7 @@ def test_duplicate(self): def test_filter_steps_to_reproduce(self): # Confirm initial data - result = self.client.get(self.url + '?steps_to_reproduce=lorem') + result = self.client.get(self.url + "?steps_to_reproduce=lorem") self.assertEqual(result.status_code, status.HTTP_200_OK, "Could not filter on steps_to_reproduce") result_json = result.json() assert result_json["count"] == 0 @@ -1249,12 +1251,12 @@ def test_severity_validation(self): class FindingMetadataTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Finding - self.endpoint_path = 'findings' - self.viewname = 'finding' + self.endpoint_path = "findings" + self.viewname = "finding" self.viewset = FindingViewSet self.payload = {} self.test_type = TestType.STANDARD @@ -1263,16 +1265,15 @@ def __init__(self, *args, **kwargs): def setUp(self): super().setUp() - testuser = User.objects.get(username='admin') + testuser = User.objects.get(username="admin") token = Token.objects.get(user=testuser) self.client = APIClient() - self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) - self.url = reverse(self.viewname + '-list') - - self.current_findings = self.client.get(self.url, format='json').data["results"] - finding = Finding.objects.get(id=self.current_findings[0]['id']) - - self.base_url = f"{self.url}{self.current_findings[0]['id']}/metadata/" + self.client.credentials(HTTP_AUTHORIZATION="Token " + token.key) + self.url = reverse(self.viewname + "-list") + self.finding_id = 3 + self.delete_id = self.finding_id + finding = Finding.objects.get(id=self.finding_id) + self.base_url = f"{self.url}{self.finding_id}/metadata/" metadata = DojoMeta(finding=finding, name="test_meta", value="20") metadata.save() @@ -1311,12 +1312,12 @@ def test_delete(self): class FindingTemplatesTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Finding_Template - self.endpoint_path = 'finding_templates' - self.viewname = 'finding_template' + self.endpoint_path = "finding_templates" + self.viewname = "finding_template" self.viewset = FindingTemplatesViewSet self.payload = { "title": "Test template", @@ -1327,19 +1328,19 @@ def __init__(self, *args, **kwargs): "impact": "MEDIUM", "references": "", } - self.update_fields = {'references': 'some reference'} + self.update_fields = {"references": "some reference"} self.test_type = TestType.CONFIGURATION_PERMISSIONS self.deleted_objects = 1 BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs) class JiraInstancesTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = JIRA_Instance - self.endpoint_path = 'jira_instances' - self.viewname = 'jira_instance' + self.endpoint_path = "jira_instances" + self.viewname = "jira_instance" self.viewset = JiraInstanceViewSet self.payload = { "url": "http://www.example.com", @@ -1357,26 +1358,26 @@ def __init__(self, *args, **kwargs): "finding_text": "", "global_jira_sla_notification": False, } - self.update_fields = {'epic_name_id': 1} + self.update_fields = {"epic_name_id": 1} self.test_type = TestType.CONFIGURATION_PERMISSIONS self.deleted_objects = 1 BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs) class JiraIssuesTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = JIRA_Issue - self.endpoint_path = 'jira_finding_mappings' - self.viewname = 'jira_issue' + self.endpoint_path = "jira_finding_mappings" + self.viewname = "jira_issue" self.viewset = JiraIssuesViewSet self.payload = { "jira_id": "JIRA 1", "jira_key": "SOME KEY", "finding": 2, } - self.update_fields = {'jira_change': '2022-01-02T13:47:38.021481Z'} + self.update_fields = {"jira_change": "2022-01-02T13:47:38.021481Z"} self.test_type = TestType.OBJECT_PERMISSIONS self.permission_check_class = Finding self.permission_create = Permissions.Finding_Edit @@ -1387,12 +1388,12 @@ def __init__(self, *args, **kwargs): class JiraProjectTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = JIRA_Project - self.endpoint_path = 'jira_projects' - self.viewname = 'jira_project' + self.endpoint_path = "jira_projects" + self.viewname = "jira_project" self.viewset = JiraProjectViewSet self.payload = { "project_key": "TEST KEY", @@ -1403,7 +1404,7 @@ def __init__(self, *args, **kwargs): "product": 1, "jira_instance": 2, } - self.update_fields = {'jira_instance': 3} + self.update_fields = {"jira_instance": 3} self.test_type = TestType.OBJECT_PERMISSIONS self.permission_check_class = Product self.permission_create = Permissions.Product_Edit @@ -1414,31 +1415,31 @@ def __init__(self, *args, **kwargs): class SonarqubeIssueTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Sonarqube_Issue - self.endpoint_path = 'sonarqube_issues' - self.viewname = 'sonarqube_issue' + self.endpoint_path = "sonarqube_issues" + self.viewname = "sonarqube_issue" self.viewset = SonarqubeIssueViewSet self.payload = { "key": "AREwS5n5TxsFUNm31CxP", "status": "OPEN", "type": "VULNERABILITY", } - self.update_fields = {'key': 'AREwS5n5TxsFUNm31CxP'} + self.update_fields = {"key": "AREwS5n5TxsFUNm31CxP"} self.test_type = TestType.STANDARD self.deleted_objects = 2 BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs) class SonarqubeIssuesTransitionTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Sonarqube_Issue_Transition - self.endpoint_path = 'sonarqube_transitions' - self.viewname = 'sonarqube_issue_transition' + self.endpoint_path = "sonarqube_transitions" + self.viewname = "sonarqube_issue_transition" self.viewset = SonarqubeIssuesTransitionTest self.payload = { "sonarqube_issue": 1, @@ -1446,25 +1447,25 @@ def __init__(self, *args, **kwargs): "sonarqube_status": "OPEN", "transitions": "confirm", } - self.update_fields = {'sonarqube_status': 'CLOSED'} + self.update_fields = {"sonarqube_status": "CLOSED"} self.test_type = TestType.STANDARD BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs) class Product_API_Scan_ConfigurationTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Product_API_Scan_Configuration - self.endpoint_path = 'product_api_scan_configurations' - self.viewname = 'product_api_scan_configuration' + self.endpoint_path = "product_api_scan_configurations" + self.viewname = "product_api_scan_configuration" self.viewset = ProductAPIScanConfigurationViewSet self.payload = { "product": 2, "service_key_1": "dojo_sonar_key", "tool_configuration": 3, } - self.update_fields = {'tool_configuration': 2} + self.update_fields = {"tool_configuration": 2} self.test_type = TestType.OBJECT_PERMISSIONS self.permission_check_class = Product_API_Scan_Configuration self.permission_create = Permissions.Product_API_Scan_Configuration_Add @@ -1475,12 +1476,12 @@ def __init__(self, *args, **kwargs): class ProductTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Product - self.endpoint_path = 'products' - self.viewname = 'product' + self.endpoint_path = "products" + self.viewname = "product" self.viewset = ProductViewSet self.payload = { "product_manager": 2, @@ -1491,7 +1492,7 @@ def __init__(self, *args, **kwargs): "description": "test product", "tags": ["mytag", "yourtag"], } - self.update_fields = {'prod_type': 2} + self.update_fields = {"prod_type": 2} self.test_type = TestType.OBJECT_PERMISSIONS self.permission_check_class = Product self.permission_create = Permissions.Product_Type_Add_Product @@ -1502,12 +1503,12 @@ def __init__(self, *args, **kwargs): class StubFindingsTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Stub_Finding - self.endpoint_path = 'stub_findings' - self.viewname = 'stub_finding' + self.endpoint_path = "stub_findings" + self.viewname = "stub_finding" self.viewset = StubFindingsViewSet self.payload = { "title": "Stub Finding 1", @@ -1517,7 +1518,7 @@ def __init__(self, *args, **kwargs): "reporter": 3, "test": 3, } - self.update_fields = {'severity': 'Low'} + self.update_fields = {"severity": "Low"} self.test_type = TestType.OBJECT_PERMISSIONS self.permission_check_class = Stub_Finding self.permission_create = Permissions.Finding_Add @@ -1533,12 +1534,12 @@ def test_severity_validation(self): class TestsTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Test - self.endpoint_path = 'tests' - self.viewname = 'test' + self.endpoint_path = "tests" + self.viewname = "test" self.viewset = TestsViewSet self.payload = { "test_type": 1, @@ -1556,7 +1557,7 @@ def __init__(self, *args, **kwargs): "branch_tag": "master", "commit_hash": "1234567890abcdefghijkl", } - self.update_fields = {'percent_complete': 100} + self.update_fields = {"percent_complete": 100} self.test_type = TestType.OBJECT_PERMISSIONS self.permission_check_class = Test self.permission_create = Permissions.Test_Add @@ -1568,12 +1569,12 @@ def __init__(self, *args, **kwargs): class ToolConfigurationsTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Tool_Configuration - self.viewname = 'tool_configuration' - self.endpoint_path = 'tool_configurations' + self.viewname = "tool_configuration" + self.endpoint_path = "tool_configurations" self.viewset = ToolConfigurationsViewSet self.payload = { "url": "http://www.example.com", @@ -1587,19 +1588,19 @@ def __init__(self, *args, **kwargs): "api_key": "test key", "tool_type": 1, } - self.update_fields = {'ssh': 'test string'} + self.update_fields = {"ssh": "test string"} self.test_type = TestType.CONFIGURATION_PERMISSIONS self.deleted_objects = 2 BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs) class ToolProductSettingsTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Tool_Product_Settings - self.endpoint_path = 'tool_product_settings' - self.viewname = 'tool_product_settings' + self.endpoint_path = "tool_product_settings" + self.viewname = "tool_product_settings" self.viewset = ToolProductSettingsViewSet self.payload = { "setting_url": "http://www.example.com", @@ -1609,7 +1610,7 @@ def __init__(self, *args, **kwargs): "tool_configuration": 3, "product": 2, } - self.update_fields = {'tool_project_id': '2'} + self.update_fields = {"tool_project_id": "2"} self.test_type = TestType.OBJECT_PERMISSIONS self.permission_check_class = Product self.permission_create = Permissions.Product_Edit @@ -1620,30 +1621,30 @@ def __init__(self, *args, **kwargs): class ToolTypesTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Tool_Type - self.endpoint_path = 'tool_types' - self.viewname = 'tool_type' + self.endpoint_path = "tool_types" + self.viewname = "tool_type" self.viewset = ToolTypesViewSet self.payload = { "name": "Tool Type", "description": "test tool type", } - self.update_fields = {'description': 'changed description'} + self.update_fields = {"description": "changed description"} self.test_type = TestType.CONFIGURATION_PERMISSIONS self.deleted_objects = 3 BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs) class NoteTypesTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Note_Type - self.endpoint_path = 'note_type' - self.viewname = 'note_type' + self.endpoint_path = "note_type" + self.viewname = "note_type" self.viewset = NoteTypeViewSet self.payload = { "name": "Test Note", @@ -1652,19 +1653,19 @@ def __init__(self, *args, **kwargs): "is_active": True, "is_mandatory": False, } - self.update_fields = {'description': 'changed description'} + self.update_fields = {"description": "changed description"} self.test_type = TestType.CONFIGURATION_PERMISSIONS self.deleted_objects = 1 BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs) class NotesTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Notes - self.endpoint_path = 'notes' - self.viewname = 'notes' + self.endpoint_path = "notes" + self.viewname = "notes" self.viewset = NotesViewSet self.payload = { "id": 1, @@ -1672,18 +1673,18 @@ def __init__(self, *args, **kwargs): "author": '{"username": "admin"}', "editor": '{"username": "user1"}', } - self.update_fields = {'entry': 'changed entry'} + self.update_fields = {"entry": "changed entry"} self.test_type = TestType.STANDARD BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs) class UsersTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = User - self.endpoint_path = 'users' - self.viewname = 'user' + self.endpoint_path = "users" + self.viewname = "user" self.viewset = UsersViewSet self.payload = { "username": "test_user", @@ -1700,35 +1701,35 @@ def __init__(self, *args, **kwargs): def test_create_user_with_non_configuration_permissions(self): payload = self.payload.copy() - payload['configuration_permissions'] = [25, 26] # these permissions exist but user can not assign them becaause they are not "configuration_permissions" + payload["configuration_permissions"] = [25, 26] # these permissions exist but user can not assign them becaause they are not "configuration_permissions" response = self.client.post(self.url, payload) self.assertEqual(response.status_code, 400) - self.assertIn('object does not exist', response.data['message']) + self.assertIn("object does not exist", response.data["message"]) def test_update_user_with_non_configuration_permissions(self): payload = {} - payload['configuration_permissions'] = [25, 26] # these permissions exist but user can not assign them becaause they are not "configuration_permissions" - response = self.client.patch(self.url + '3/', payload) + payload["configuration_permissions"] = [25, 26] # these permissions exist but user can not assign them becaause they are not "configuration_permissions" + response = self.client.patch(self.url + "3/", payload) self.assertEqual(response.status_code, 400) - self.assertIn('object does not exist', response.data['message']) + self.assertIn("object does not exist", response.data["message"]) def test_update_user_other_permissions_will_not_leak_and_stay_untouched(self): payload = {} - payload['configuration_permissions'] = [217, 218, 219] - response = self.client.patch(self.url + '6/', payload) + payload["configuration_permissions"] = [217, 218, 219] + response = self.client.patch(self.url + "6/", payload) self.assertEqual(response.status_code, 200) - self.assertEqual(response.data['configuration_permissions'], payload['configuration_permissions']) - user_permissions = User.objects.get(username='user5').user_permissions.all().values_list('id', flat=True) - self.assertEqual(set(user_permissions), set(payload['configuration_permissions'] + [26, 28])) + self.assertEqual(response.data["configuration_permissions"], payload["configuration_permissions"]) + user_permissions = User.objects.get(username="user5").user_permissions.all().values_list("id", flat=True) + self.assertEqual(set(user_permissions), set(payload["configuration_permissions"] + [26, 28])) class UserContactInfoTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = UserContactInfo - self.endpoint_path = 'user_contact_infos' - self.viewname = 'usercontactinfo' + self.endpoint_path = "user_contact_infos" + self.viewname = "usercontactinfo" self.viewset = UserContactInfoViewSet self.payload = { "user": 4, @@ -1744,40 +1745,41 @@ def __init__(self, *args, **kwargs): class ProductPermissionTest(DojoAPITestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def setUp(self): - testuser = User.objects.get(username='user1') + testuser = User.objects.get(username="user1") token = Token.objects.get(user=testuser) self.client = APIClient() - self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) + self.client.credentials(HTTP_AUTHORIZATION="Token " + token.key) def test_user_should_not_have_access_to_product_3_in_list(self): response = self.client.get( - reverse('product-list'), format='json') - for obj in response.data['results']: - self.assertNotEqual(obj['id'], 3) + reverse("product-list"), format="json") + for obj in response.data["results"]: + self.assertNotEqual(obj["id"], 3) def test_user_should_not_have_access_to_product_3_in_detail(self): - response = self.client.get('http://testserver/api/v2/products/3/') + response = self.client.get("http://testserver/api/v2/products/3/") self.assertEqual(response.status_code, 404) +@test_tag("non-parallel") class ImportScanTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Test - self.endpoint_path = 'import-scan' - self.viewname = 'importscan' + self.endpoint_path = "import-scan" + self.viewname = "importscan" self.viewset = ImportScanView - testfile = open('tests/zap_sample.xml') + testfile = open("tests/zap_sample.xml") self.payload = { - "minimum_severity": 'Low', + "minimum_severity": "Low", "active": False, "verified": True, - "scan_type": 'ZAP Scan', + "scan_type": "ZAP Scan", "file": testfile, "engagement": 1, "lead": 2, @@ -1789,25 +1791,25 @@ def __init__(self, *args, **kwargs): BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs) def __del__(self: object): - self.payload['file'].close() + self.payload["file"].close() - @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') - @patch('dojo.importers.default_importer.DefaultImporter.process_scan') - @patch('dojo.api_v2.permissions.user_has_permission') + @patch("dojo.importers.default_reimporter.DefaultReImporter.process_scan") + @patch("dojo.importers.default_importer.DefaultImporter.process_scan") + @patch("dojo.api_v2.permissions.user_has_permission") def test_create_not_authorized_product_name_engagement_name(self, mock, importer_mock, reimporter_mock): mock.return_value = False importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE - with open('tests/zap_sample.xml') as testfile: + with open("tests/zap_sample.xml") as testfile: payload = { - "minimum_severity": 'Low', + "minimum_severity": "Low", "active": False, "verified": True, - "scan_type": 'ZAP Scan', + "scan_type": "ZAP Scan", "file": testfile, - "product_name": 'Python How-to', - "engagement_name": 'April monthly engagement', + "product_name": "Python How-to", + "engagement_name": "April monthly engagement", "lead": 2, "tags": ["ci/cd", "api"], "version": "1.0.0", @@ -1815,29 +1817,29 @@ def test_create_not_authorized_product_name_engagement_name(self, mock, importer response = self.client.post(self.url, payload) self.assertEqual(403, response.status_code, response.content[:1000]) - mock.assert_called_with(User.objects.get(username='admin'), + mock.assert_called_with(User.objects.get(username="admin"), Engagement.objects.get(id=2), # engagement id found via product name and engagement name Permissions.Import_Scan_Result) importer_mock.assert_not_called() reimporter_mock.assert_not_called() - @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') - @patch('dojo.importers.default_importer.DefaultImporter.process_scan') - @patch('dojo.api_v2.permissions.user_has_permission') + @patch("dojo.importers.default_reimporter.DefaultReImporter.process_scan") + @patch("dojo.importers.default_importer.DefaultImporter.process_scan") + @patch("dojo.api_v2.permissions.user_has_permission") def test_create_not_authorized_product_name_engagement_name_auto_create_engagement(self, mock, importer_mock, reimporter_mock): mock.return_value = False importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE - with open('tests/zap_sample.xml') as testfile: + with open("tests/zap_sample.xml") as testfile: payload = { - "minimum_severity": 'Low', + "minimum_severity": "Low", "active": False, "verified": True, - "scan_type": 'ZAP Scan', + "scan_type": "ZAP Scan", "file": testfile, - "product_name": 'Python How-to', - "engagement_name": 'New engagement', + "product_name": "Python How-to", + "engagement_name": "New engagement", "lead": 2, "tags": ["ci/cd", "api"], "version": "1.0.0", @@ -1846,30 +1848,30 @@ def test_create_not_authorized_product_name_engagement_name_auto_create_engageme response = self.client.post(self.url, payload) self.assertEqual(403, response.status_code, response.content[:1000]) - mock.assert_called_with(User.objects.get(username='admin'), + mock.assert_called_with(User.objects.get(username="admin"), Product.objects.get(id=1), Permissions.Engagement_Add) importer_mock.assert_not_called() reimporter_mock.assert_not_called() - @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') - @patch('dojo.importers.default_importer.DefaultImporter.process_scan') - @patch('dojo.api_v2.permissions.user_has_permission') + @patch("dojo.importers.default_reimporter.DefaultReImporter.process_scan") + @patch("dojo.importers.default_importer.DefaultImporter.process_scan") + @patch("dojo.api_v2.permissions.user_has_permission") def test_create_not_authorized_product_name_engagement_name_auto_create_product(self, mock, importer_mock, reimporter_mock): mock.return_value = False importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE - with open('tests/zap_sample.xml') as testfile: + with open("tests/zap_sample.xml") as testfile: payload = { - "minimum_severity": 'Low', + "minimum_severity": "Low", "active": False, "verified": True, - "scan_type": 'ZAP Scan', + "scan_type": "ZAP Scan", "file": testfile, "product_type_name": "books", - "product_name": 'New Product', - "engagement_name": 'New engagement', + "product_name": "New Product", + "engagement_name": "New engagement", "lead": 2, "tags": ["ci/cd", "api"], "version": "1.0.0", @@ -1878,30 +1880,30 @@ def test_create_not_authorized_product_name_engagement_name_auto_create_product( response = self.client.post(self.url, payload) self.assertEqual(403, response.status_code, response.content[:1000]) - mock.assert_called_with(User.objects.get(username='admin'), + mock.assert_called_with(User.objects.get(username="admin"), Product_Type.objects.get(id=1), Permissions.Product_Type_Add_Product) importer_mock.assert_not_called() reimporter_mock.assert_not_called() - @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') - @patch('dojo.importers.default_importer.DefaultImporter.process_scan') - @patch('dojo.api_v2.permissions.user_has_global_permission') + @patch("dojo.importers.default_reimporter.DefaultReImporter.process_scan") + @patch("dojo.importers.default_importer.DefaultImporter.process_scan") + @patch("dojo.api_v2.permissions.user_has_global_permission") def test_create_not_authorized_product_name_engagement_name_auto_create_product_type(self, mock, importer_mock, reimporter_mock): mock.return_value = False importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE - with open('tests/zap_sample.xml') as testfile: + with open("tests/zap_sample.xml") as testfile: payload = { - "minimum_severity": 'Low', + "minimum_severity": "Low", "active": False, "verified": True, - "scan_type": 'ZAP Scan', + "scan_type": "ZAP Scan", "file": testfile, "product_type_name": "more books", - "product_name": 'New Product', - "engagement_name": 'New engagement', + "product_name": "New Product", + "engagement_name": "New engagement", "lead": 2, "tags": ["ci/cd", "api"], "version": "1.0.0", @@ -1910,14 +1912,14 @@ def test_create_not_authorized_product_name_engagement_name_auto_create_product_ response = self.client.post(self.url, payload) self.assertEqual(403, response.status_code, response.content[:1000]) - mock.assert_called_with(User.objects.get(username='admin'), + mock.assert_called_with(User.objects.get(username="admin"), Permissions.Product_Type_Add) importer_mock.assert_not_called() reimporter_mock.assert_not_called() - @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') - @patch('dojo.importers.default_importer.DefaultImporter.process_scan') - @patch('dojo.api_v2.permissions.user_has_permission') + @patch("dojo.importers.default_reimporter.DefaultReImporter.process_scan") + @patch("dojo.importers.default_importer.DefaultImporter.process_scan") + @patch("dojo.api_v2.permissions.user_has_permission") def test_create_authorized_product_name_engagement_name_auto_create_engagement(self, mock, importer_mock, reimporter_mock): """ Test creating a new engagement should also check for import scan permission in the product @@ -1926,15 +1928,15 @@ def test_create_authorized_product_name_engagement_name_auto_create_engagement(s importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE - with open('tests/zap_sample.xml') as testfile: + with open("tests/zap_sample.xml") as testfile: payload = { - "minimum_severity": 'Low', + "minimum_severity": "Low", "active": False, "verified": True, - "scan_type": 'ZAP Scan', + "scan_type": "ZAP Scan", "file": testfile, - "product_name": 'Python How-to', - "engagement_name": 'New engagement', + "product_name": "Python How-to", + "engagement_name": "New engagement", "lead": 2, "tags": ["ci/cd", "api"], "version": "1.0.0", @@ -1944,33 +1946,33 @@ def test_create_authorized_product_name_engagement_name_auto_create_engagement(s response = self.client.post(self.url, payload) self.assertEqual(201, response.status_code, response.content[:1000]) mock.assert_has_calls([ - call(User.objects.get(username='admin'), + call(User.objects.get(username="admin"), Product.objects.get(id=1), Permissions.Engagement_Add), - call(User.objects.get(username='admin'), + call(User.objects.get(username="admin"), Product.objects.get(id=1), Permissions.Import_Scan_Result), ]) importer_mock.assert_called_once() reimporter_mock.assert_not_called() - @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') - @patch('dojo.importers.default_importer.DefaultImporter.process_scan') - @patch('dojo.api_v2.permissions.user_has_permission') + @patch("dojo.importers.default_reimporter.DefaultReImporter.process_scan") + @patch("dojo.importers.default_importer.DefaultImporter.process_scan") + @patch("dojo.api_v2.permissions.user_has_permission") def test_create_authorized_product_name_engagement_name_auto_create_product(self, mock, importer_mock, reimporter_mock): mock.return_value = True importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE - with open('tests/zap_sample.xml') as testfile: + with open("tests/zap_sample.xml") as testfile: payload = { - "minimum_severity": 'Low', + "minimum_severity": "Low", "active": False, "verified": True, - "scan_type": 'ZAP Scan', + "scan_type": "ZAP Scan", "file": testfile, "product_type_name": "books", - "product_name": 'New Product', - "engagement_name": 'New engagement', + "product_name": "New Product", + "engagement_name": "New engagement", "lead": 2, "tags": ["ci/cd", "api"], "version": "1.0.0", @@ -1979,30 +1981,30 @@ def test_create_authorized_product_name_engagement_name_auto_create_product(self response = self.client.post(self.url, payload) self.assertEqual(201, response.status_code, response.content[:1000]) - mock.assert_called_with(User.objects.get(username='admin'), + mock.assert_called_with(User.objects.get(username="admin"), Product_Type.objects.get(id=1), Permissions.Product_Type_Add_Product) importer_mock.assert_called_once() reimporter_mock.assert_not_called() - @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') - @patch('dojo.importers.default_importer.DefaultImporter.process_scan') - @patch('dojo.api_v2.permissions.user_has_global_permission') + @patch("dojo.importers.default_reimporter.DefaultReImporter.process_scan") + @patch("dojo.importers.default_importer.DefaultImporter.process_scan") + @patch("dojo.api_v2.permissions.user_has_global_permission") def test_create_authorized_product_name_engagement_name_auto_create_product_type(self, mock, importer_mock, reimporter_mock): mock.return_value = True importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE - with open('tests/zap_sample.xml') as testfile: + with open("tests/zap_sample.xml") as testfile: payload = { - "minimum_severity": 'Low', + "minimum_severity": "Low", "active": False, "verified": True, - "scan_type": 'ZAP Scan', + "scan_type": "ZAP Scan", "file": testfile, "product_type_name": "more books", - "product_name": 'New Product', - "engagement_name": 'New engagement', + "product_name": "New Product", + "engagement_name": "New engagement", "lead": 2, "tags": ["ci/cd", "api"], "version": "1.0.0", @@ -2011,38 +2013,38 @@ def test_create_authorized_product_name_engagement_name_auto_create_product_type response = self.client.post(self.url, payload) self.assertEqual(201, response.status_code, response.content[:1000]) - mock.assert_called_with(User.objects.get(username='admin'), + mock.assert_called_with(User.objects.get(username="admin"), Permissions.Product_Type_Add) importer_mock.assert_called_once() reimporter_mock.assert_not_called() class ReimportScanTest(DojoAPITestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def setUp(self): - testuser = User.objects.get(username='admin') + testuser = User.objects.get(username="admin") token = Token.objects.get(user=testuser) self.client = APIClient() - self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) - self.url = reverse('reimportscan' + '-list') + self.client.credentials(HTTP_AUTHORIZATION="Token " + token.key) + self.url = reverse("reimportscan" + "-list") # Specific tests for reimport - @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') - @patch('dojo.importers.default_importer.DefaultImporter.process_scan') + @patch("dojo.importers.default_reimporter.DefaultReImporter.process_scan") + @patch("dojo.importers.default_importer.DefaultImporter.process_scan") def test_reimport_zap_xml(self, importer_mock, reimporter_mock): importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE - with open('tests/zap_sample.xml') as testfile: + with open("tests/zap_sample.xml") as testfile: length = Test.objects.all().count() response = self.client.post( - reverse('reimportscan-list'), { - "minimum_severity": 'Low', + reverse("reimportscan-list"), { + "minimum_severity": "Low", "active": True, "verified": True, - "scan_type": 'ZAP Scan', + "scan_type": "ZAP Scan", "file": testfile, "test": 3, "version": "1.0.1", @@ -2053,23 +2055,23 @@ def test_reimport_zap_xml(self, importer_mock, reimporter_mock): importer_mock.assert_not_called() reimporter_mock.assert_called_once() - @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') - @patch('dojo.importers.default_importer.DefaultImporter.process_scan') - @patch('dojo.api_v2.permissions.user_has_permission') + @patch("dojo.importers.default_reimporter.DefaultReImporter.process_scan") + @patch("dojo.importers.default_importer.DefaultImporter.process_scan") + @patch("dojo.api_v2.permissions.user_has_permission") def test_create_not_authorized_product_name_engagement_name(self, mock, importer_mock, reimporter_mock): mock.return_value = False importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE - with open('tests/zap_sample.xml') as testfile: + with open("tests/zap_sample.xml") as testfile: payload = { - "minimum_severity": 'Low', + "minimum_severity": "Low", "active": False, "verified": True, - "scan_type": 'ZAP Scan', + "scan_type": "ZAP Scan", "file": testfile, - "product_name": 'Security How-to', - "engagement_name": 'April monthly engagement', + "product_name": "Security How-to", + "engagement_name": "April monthly engagement", "lead": 2, "tags": ["ci/cd", "api"], "version": "1.0.0", @@ -2077,45 +2079,45 @@ def test_create_not_authorized_product_name_engagement_name(self, mock, importer response = self.client.post(self.url, payload) self.assertEqual(403, response.status_code, response.content[:1000]) - mock.assert_called_with(User.objects.get(username='admin'), + mock.assert_called_with(User.objects.get(username="admin"), Test.objects.get(id=4), # test id found via product name and engagement name and scan_type Permissions.Import_Scan_Result) importer_mock.assert_not_called() reimporter_mock.assert_not_called() - @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') - @patch('dojo.importers.default_importer.DefaultImporter.process_scan') - @patch('dojo.api_v2.permissions.user_has_permission') + @patch("dojo.importers.default_reimporter.DefaultReImporter.process_scan") + @patch("dojo.importers.default_importer.DefaultImporter.process_scan") + @patch("dojo.api_v2.permissions.user_has_permission") def test_create_authorized_product_name_engagement_name_scan_type_title_auto_create(self, mock, importer_mock, reimporter_mock): mock.return_value = True importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE - with open('tests/zap_sample.xml') as testfile: + with open("tests/zap_sample.xml") as testfile: payload = { - "minimum_severity": 'Low', + "minimum_severity": "Low", "active": False, "verified": True, - "scan_type": 'ZAP Scan', + "scan_type": "ZAP Scan", "file": testfile, - "product_name": 'Security How-to', - "engagement_name": 'April monthly engagement', - "test_title": 'My ZAP Scan NEW', + "product_name": "Security How-to", + "engagement_name": "April monthly engagement", + "test_title": "My ZAP Scan NEW", "version": "1.0.0", "auto_create_context": True, } response = self.client.post(self.url, payload) self.assertEqual(201, response.status_code, response.content[:1000]) - mock.assert_called_with(User.objects.get(username='admin'), + mock.assert_called_with(User.objects.get(username="admin"), Engagement.objects.get(id=4), Permissions.Import_Scan_Result) importer_mock.assert_called_once() reimporter_mock.assert_not_called() - @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') - @patch('dojo.importers.default_importer.DefaultImporter.process_scan') - @patch('dojo.api_v2.permissions.user_has_permission') + @patch("dojo.importers.default_reimporter.DefaultReImporter.process_scan") + @patch("dojo.importers.default_importer.DefaultImporter.process_scan") + @patch("dojo.api_v2.permissions.user_has_permission") def test_create_authorized_product_name_engagement_name_auto_create_engagement(self, mock, importer_mock, reimporter_mock): """ Test creating a new engagement should also check for import scan permission in the product @@ -2124,15 +2126,15 @@ def test_create_authorized_product_name_engagement_name_auto_create_engagement(s importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE - with open('tests/zap_sample.xml') as testfile: + with open("tests/zap_sample.xml") as testfile: payload = { - "minimum_severity": 'Low', + "minimum_severity": "Low", "active": False, "verified": True, - "scan_type": 'ZAP Scan', + "scan_type": "ZAP Scan", "file": testfile, - "product_name": 'Python How-to', - "engagement_name": 'New engagement', + "product_name": "Python How-to", + "engagement_name": "New engagement", "lead": 2, "tags": ["ci/cd", "api"], "version": "1.0.0", @@ -2142,34 +2144,34 @@ def test_create_authorized_product_name_engagement_name_auto_create_engagement(s response = self.client.post(self.url, payload) self.assertEqual(201, response.status_code, response.content[:1000]) mock.assert_has_calls([ - call(User.objects.get(username='admin'), + call(User.objects.get(username="admin"), Product.objects.get(id=1), Permissions.Engagement_Add), - call(User.objects.get(username='admin'), + call(User.objects.get(username="admin"), Product.objects.get(id=1), Permissions.Import_Scan_Result), ]) importer_mock.assert_called_once() reimporter_mock.assert_not_called() - @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') - @patch('dojo.importers.default_importer.DefaultImporter.process_scan') - @patch('dojo.api_v2.permissions.user_has_permission') + @patch("dojo.importers.default_reimporter.DefaultReImporter.process_scan") + @patch("dojo.importers.default_importer.DefaultImporter.process_scan") + @patch("dojo.api_v2.permissions.user_has_permission") def test_create_authorized_product_name_engagement_name_auto_create_product(self, mock, importer_mock, reimporter_mock): mock.return_value = True importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE - with open('tests/zap_sample.xml') as testfile: + with open("tests/zap_sample.xml") as testfile: payload = { - "minimum_severity": 'Low', + "minimum_severity": "Low", "active": False, "verified": True, - "scan_type": 'ZAP Scan', + "scan_type": "ZAP Scan", "file": testfile, "product_type_name": "books", - "product_name": 'New Product', - "engagement_name": 'New engagement', + "product_name": "New Product", + "engagement_name": "New engagement", "lead": 2, "tags": ["ci/cd", "api"], "version": "1.0.0", @@ -2178,30 +2180,30 @@ def test_create_authorized_product_name_engagement_name_auto_create_product(self response = self.client.post(self.url, payload) self.assertEqual(201, response.status_code, response.content[:1000]) - mock.assert_called_with(User.objects.get(username='admin'), + mock.assert_called_with(User.objects.get(username="admin"), Product_Type.objects.get(id=1), Permissions.Product_Type_Add_Product) importer_mock.assert_called_once() reimporter_mock.assert_not_called() - @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') - @patch('dojo.importers.default_importer.DefaultImporter.process_scan') - @patch('dojo.api_v2.permissions.user_has_global_permission') + @patch("dojo.importers.default_reimporter.DefaultReImporter.process_scan") + @patch("dojo.importers.default_importer.DefaultImporter.process_scan") + @patch("dojo.api_v2.permissions.user_has_global_permission") def test_create_authorized_product_name_engagement_name_auto_create_product_type(self, mock, importer_mock, reimporter_mock): mock.return_value = True importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE - with open('tests/zap_sample.xml') as testfile: + with open("tests/zap_sample.xml") as testfile: payload = { - "minimum_severity": 'Low', + "minimum_severity": "Low", "active": False, "verified": True, - "scan_type": 'ZAP Scan', + "scan_type": "ZAP Scan", "file": testfile, "product_type_name": "more books", - "product_name": 'New Product', - "engagement_name": 'New engagement', + "product_name": "New Product", + "engagement_name": "New engagement", "lead": 2, "tags": ["ci/cd", "api"], "version": "1.0.0", @@ -2210,32 +2212,32 @@ def test_create_authorized_product_name_engagement_name_auto_create_product_type response = self.client.post(self.url, payload) self.assertEqual(201, response.status_code, response.content[:1000]) - mock.assert_called_with(User.objects.get(username='admin'), + mock.assert_called_with(User.objects.get(username="admin"), Permissions.Product_Type_Add) importer_mock.assert_called_once() reimporter_mock.assert_not_called() - @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') - @patch('dojo.importers.default_importer.DefaultImporter.process_scan') - @patch('dojo.api_v2.permissions.user_has_permission') + @patch("dojo.importers.default_reimporter.DefaultReImporter.process_scan") + @patch("dojo.importers.default_importer.DefaultImporter.process_scan") + @patch("dojo.api_v2.permissions.user_has_permission") def test_create_not_authorized_test_id(self, mock, importer_mock, reimporter_mock): mock.return_value = False importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE - with open('tests/zap_sample.xml') as testfile: + with open("tests/zap_sample.xml") as testfile: payload = { - "minimum_severity": 'Low', + "minimum_severity": "Low", "active": True, "verified": True, - "scan_type": 'ZAP Scan', + "scan_type": "ZAP Scan", "file": testfile, "test": 3, "version": "1.0.1", } response = self.client.post(self.url, payload) self.assertEqual(403, response.status_code, response.content[:1000]) - mock.assert_called_with(User.objects.get(username='admin'), + mock.assert_called_with(User.objects.get(username="admin"), Test.objects.get(id=3), Permissions.Import_Scan_Result) importer_mock.assert_not_called() @@ -2243,23 +2245,23 @@ def test_create_not_authorized_test_id(self, mock, importer_mock, reimporter_moc # copied tests from import, unsure how to use inheritance/mixins with test_ methods - @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') - @patch('dojo.importers.default_importer.DefaultImporter.process_scan') - @patch('dojo.api_v2.permissions.user_has_permission') + @patch("dojo.importers.default_reimporter.DefaultReImporter.process_scan") + @patch("dojo.importers.default_importer.DefaultImporter.process_scan") + @patch("dojo.api_v2.permissions.user_has_permission") def test_create_not_authorized_product_name_engagement_name_auto_create_engagement(self, mock, importer_mock, reimporter_mock): mock.return_value = False importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE - with open('tests/zap_sample.xml') as testfile: + with open("tests/zap_sample.xml") as testfile: payload = { - "minimum_severity": 'Low', + "minimum_severity": "Low", "active": False, "verified": True, - "scan_type": 'ZAP Scan', + "scan_type": "ZAP Scan", "file": testfile, - "product_name": 'Python How-to', - "engagement_name": 'New engagement', + "product_name": "Python How-to", + "engagement_name": "New engagement", "lead": 2, "tags": ["ci/cd", "api"], "version": "1.0.0", @@ -2268,30 +2270,30 @@ def test_create_not_authorized_product_name_engagement_name_auto_create_engageme response = self.client.post(self.url, payload) self.assertEqual(403, response.status_code, response.content[:1000]) - mock.assert_called_with(User.objects.get(username='admin'), + mock.assert_called_with(User.objects.get(username="admin"), Product.objects.get(id=1), Permissions.Engagement_Add) importer_mock.assert_not_called() reimporter_mock.assert_not_called() - @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') - @patch('dojo.importers.default_importer.DefaultImporter.process_scan') - @patch('dojo.api_v2.permissions.user_has_permission') + @patch("dojo.importers.default_reimporter.DefaultReImporter.process_scan") + @patch("dojo.importers.default_importer.DefaultImporter.process_scan") + @patch("dojo.api_v2.permissions.user_has_permission") def test_create_not_authorized_product_name_engagement_name_auto_create_product(self, mock, importer_mock, reimporter_mock): mock.return_value = False importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE - with open('tests/zap_sample.xml') as testfile: + with open("tests/zap_sample.xml") as testfile: payload = { - "minimum_severity": 'Low', + "minimum_severity": "Low", "active": False, "verified": True, - "scan_type": 'ZAP Scan', + "scan_type": "ZAP Scan", "file": testfile, "product_type_name": "books", - "product_name": 'New Product', - "engagement_name": 'New engagement', + "product_name": "New Product", + "engagement_name": "New engagement", "lead": 2, "tags": ["ci/cd", "api"], "version": "1.0.0", @@ -2300,30 +2302,30 @@ def test_create_not_authorized_product_name_engagement_name_auto_create_product( response = self.client.post(self.url, payload) self.assertEqual(403, response.status_code, response.content[:1000]) - mock.assert_called_with(User.objects.get(username='admin'), + mock.assert_called_with(User.objects.get(username="admin"), Product_Type.objects.get(id=1), Permissions.Product_Type_Add_Product) importer_mock.assert_not_called() reimporter_mock.assert_not_called() - @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') - @patch('dojo.importers.default_importer.DefaultImporter.process_scan') - @patch('dojo.api_v2.permissions.user_has_global_permission') + @patch("dojo.importers.default_reimporter.DefaultReImporter.process_scan") + @patch("dojo.importers.default_importer.DefaultImporter.process_scan") + @patch("dojo.api_v2.permissions.user_has_global_permission") def test_create_not_authorized_product_name_engagement_name_auto_create_product_type(self, mock, importer_mock, reimporter_mock): mock.return_value = False importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE - with open('tests/zap_sample.xml') as testfile: + with open("tests/zap_sample.xml") as testfile: payload = { - "minimum_severity": 'Low', + "minimum_severity": "Low", "active": False, "verified": True, - "scan_type": 'ZAP Scan', + "scan_type": "ZAP Scan", "file": testfile, "product_type_name": "more books", - "product_name": 'New Product', - "engagement_name": 'New engagement', + "product_name": "New Product", + "engagement_name": "New engagement", "lead": 2, "tags": ["ci/cd", "api"], "version": "1.0.0", @@ -2332,63 +2334,63 @@ def test_create_not_authorized_product_name_engagement_name_auto_create_product_ response = self.client.post(self.url, payload) self.assertEqual(403, response.status_code, response.content[:1000]) - mock.assert_called_with(User.objects.get(username='admin'), + mock.assert_called_with(User.objects.get(username="admin"), Permissions.Product_Type_Add) importer_mock.assert_not_called() reimporter_mock.assert_not_called() - @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') - @patch('dojo.importers.default_importer.DefaultImporter.process_scan') - @patch('dojo.api_v2.permissions.user_has_permission') + @patch("dojo.importers.default_reimporter.DefaultReImporter.process_scan") + @patch("dojo.importers.default_importer.DefaultImporter.process_scan") + @patch("dojo.api_v2.permissions.user_has_permission") def test_create_not_authorized_product_name_engagement_name_scan_type(self, mock, importer_mock, reimporter_mock): mock.return_value = False importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE - with open('tests/zap_sample.xml') as testfile: + with open("tests/zap_sample.xml") as testfile: payload = { - "minimum_severity": 'Low', + "minimum_severity": "Low", "active": False, "verified": True, - "scan_type": 'ZAP Scan', + "scan_type": "ZAP Scan", "file": testfile, - "product_name": 'Security How-to', - "engagement_name": 'April monthly engagement', + "product_name": "Security How-to", + "engagement_name": "April monthly engagement", "version": "1.0.0", } response = self.client.post(self.url, payload) self.assertEqual(403, response.status_code, response.content[:1000]) - mock.assert_called_with(User.objects.get(username='admin'), + mock.assert_called_with(User.objects.get(username="admin"), Test.objects.get(id=4), # engagement id found via product name and engagement name Permissions.Import_Scan_Result) importer_mock.assert_not_called() reimporter_mock.assert_not_called() - @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') - @patch('dojo.importers.default_importer.DefaultImporter.process_scan') - @patch('dojo.api_v2.permissions.user_has_permission') + @patch("dojo.importers.default_reimporter.DefaultReImporter.process_scan") + @patch("dojo.importers.default_importer.DefaultImporter.process_scan") + @patch("dojo.api_v2.permissions.user_has_permission") def test_create_not_authorized_product_name_engagement_name_scan_type_title(self, mock, importer_mock, reimporter_mock): mock.return_value = False importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE - with open('tests/zap_sample.xml') as testfile: + with open("tests/zap_sample.xml") as testfile: payload = { - "minimum_severity": 'Low', + "minimum_severity": "Low", "active": False, "verified": True, - "scan_type": 'ZAP Scan', + "scan_type": "ZAP Scan", "file": testfile, - "product_name": 'Security How-to', - "engagement_name": 'April monthly engagement', - "test_title": 'My ZAP Scan', + "product_name": "Security How-to", + "engagement_name": "April monthly engagement", + "test_title": "My ZAP Scan", "version": "1.0.0", } response = self.client.post(self.url, payload) self.assertEqual(403, response.status_code, response.content[:1000]) - mock.assert_called_with(User.objects.get(username='admin'), + mock.assert_called_with(User.objects.get(username="admin"), Test.objects.get(id=4), # test id found via product name and engagement name and scan_type and test_title Permissions.Import_Scan_Result) importer_mock.assert_not_called() @@ -2396,12 +2398,12 @@ def test_create_not_authorized_product_name_engagement_name_scan_type_title(self class ProductTypeTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Product_Type - self.endpoint_path = 'product_types' - self.viewname = 'product_type' + self.endpoint_path = "product_types" + self.viewname = "product_type" self.viewset = ProductTypeViewSet self.payload = { "name": "Test Product Type", @@ -2409,7 +2411,7 @@ def __init__(self, *args, **kwargs): "key_product": True, "critical_product": False, } - self.update_fields = {'description': "changed"} + self.update_fields = {"description": "changed"} self.test_type = TestType.OBJECT_PERMISSIONS self.permission_check_class = Product_Type self.permission_update = Permissions.Product_Type_Edit @@ -2437,19 +2439,19 @@ def test_create_authorized_owner(self): class DojoGroupsTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Dojo_Group - self.endpoint_path = 'dojo_groups' - self.viewname = 'dojo_group' + self.endpoint_path = "dojo_groups" + self.viewname = "dojo_group" self.viewset = DojoGroupViewSet self.payload = { "name": "Test Group", "description": "Test", "configuration_permissions": [217, 218], } - self.update_fields = {'description': "changed", "configuration_permissions": [219, 220]} + self.update_fields = {"description": "changed", "configuration_permissions": [219, 220]} self.test_type = TestType.OBJECT_PERMISSIONS self.permission_check_class = Dojo_Group self.permission_update = Permissions.Group_Edit @@ -2460,14 +2462,14 @@ def __init__(self, *args, **kwargs): def test_list_object_not_authorized(self): self.setUp_not_authorized() - response = self.client.get(self.url, format='json') + response = self.client.get(self.url, format="json") self.assertEqual(403, response.status_code, response.content[:1000]) def test_detail_object_not_authorized(self): self.setUp_not_authorized() current_objects = self.endpoint_model.objects.all() - relative_url = self.url + f'{current_objects[0].id}/' + relative_url = self.url + f"{current_objects[0].id}/" response = self.client.get(relative_url) self.assertEqual(403, response.status_code, response.content[:1000]) @@ -2479,44 +2481,44 @@ def test_create_object_not_authorized(self): def test_create_group_with_non_configuration_permissions(self): payload = self.payload.copy() - payload['configuration_permissions'] = [25, 26] # these permissions exist but user can not assign them becaause they are not "configuration_permissions" + payload["configuration_permissions"] = [25, 26] # these permissions exist but user can not assign them becaause they are not "configuration_permissions" response = self.client.post(self.url, payload) self.assertEqual(response.status_code, 400) - self.assertIn('object does not exist', response.data['message']) + self.assertIn("object does not exist", response.data["message"]) def test_update_group_with_non_configuration_permissions(self): payload = {} - payload['configuration_permissions'] = [25, 26] # these permissions exist but user can not assign them becaause they are not "configuration_permissions" - response = self.client.patch(self.url + '2/', payload) + payload["configuration_permissions"] = [25, 26] # these permissions exist but user can not assign them becaause they are not "configuration_permissions" + response = self.client.patch(self.url + "2/", payload) self.assertEqual(response.status_code, 400) - self.assertIn('object does not exist', response.data['message']) + self.assertIn("object does not exist", response.data["message"]) def test_update_group_other_permissions_will_not_leak_and_stay_untouched(self): - Dojo_Group.objects.get(name='Group 1 Testdata').auth_group.permissions.set([218, 220, 26, 28]) # I was trying to set this in 'dojo_testdata.json' but it hasn't sucessful + Dojo_Group.objects.get(name="Group 1 Testdata").auth_group.permissions.set([218, 220, 26, 28]) # I was trying to set this in 'dojo_testdata.json' but it hasn't sucessful payload = {} - payload['configuration_permissions'] = [217, 218, 219] - response = self.client.patch(self.url + '1/', payload) + payload["configuration_permissions"] = [217, 218, 219] + response = self.client.patch(self.url + "1/", payload) self.assertEqual(response.status_code, 200) - self.assertEqual(response.data['configuration_permissions'], payload['configuration_permissions']) - permissions = Dojo_Group.objects.get(name='Group 1 Testdata').auth_group.permissions.all().values_list('id', flat=True) - self.assertEqual(set(permissions), set(payload['configuration_permissions'] + [26, 28])) - Dojo_Group.objects.get(name='Group 1 Testdata').auth_group.permissions.clear() + self.assertEqual(response.data["configuration_permissions"], payload["configuration_permissions"]) + permissions = Dojo_Group.objects.get(name="Group 1 Testdata").auth_group.permissions.all().values_list("id", flat=True) + self.assertEqual(set(permissions), set(payload["configuration_permissions"] + [26, 28])) + Dojo_Group.objects.get(name="Group 1 Testdata").auth_group.permissions.clear() class DojoGroupsUsersTest(BaseClass.MemberEndpointTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Dojo_Group_Member - self.endpoint_path = 'dojo_group_members' - self.viewname = 'dojo_group_member' + self.endpoint_path = "dojo_group_members" + self.viewname = "dojo_group_member" self.viewset = DojoGroupMemberViewSet self.payload = { "group": 1, "user": 3, "role": 4, } - self.update_fields = {'role': 3} + self.update_fields = {"role": 3} self.test_type = TestType.OBJECT_PERMISSIONS self.permission_check_class = Dojo_Group_Member self.permission_create = Permissions.Group_Manage_Members @@ -2527,49 +2529,49 @@ def __init__(self, *args, **kwargs): class RolesTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Role - self.endpoint_path = 'roles' - self.viewname = 'role' + self.endpoint_path = "roles" + self.viewname = "role" self.viewset = RoleViewSet self.test_type = TestType.STANDARD BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs) class GlobalRolesTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Global_Role - self.endpoint_path = 'global_roles' - self.viewname = 'global_role' + self.endpoint_path = "global_roles" + self.viewname = "global_role" self.viewset = GlobalRoleViewSet self.payload = { "user": 2, "role": 2, } - self.update_fields = {'role': 3} + self.update_fields = {"role": 3} self.test_type = TestType.STANDARD self.deleted_objects = 1 BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs) class ProductTypeMemberTest(BaseClass.MemberEndpointTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Product_Type_Member - self.endpoint_path = 'product_type_members' - self.viewname = 'product_type_member' + self.endpoint_path = "product_type_members" + self.viewname = "product_type_member" self.viewset = ProductTypeMemberViewSet self.payload = { "product_type": 1, "user": 3, "role": 2, } - self.update_fields = {'role': 3} + self.update_fields = {"role": 3} self.test_type = TestType.OBJECT_PERMISSIONS self.permission_check_class = Product_Type_Member self.permission_create = Permissions.Product_Type_Manage_Members @@ -2580,19 +2582,19 @@ def __init__(self, *args, **kwargs): class ProductMemberTest(BaseClass.MemberEndpointTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Product_Member - self.endpoint_path = 'product_members' - self.viewname = 'product_member' + self.endpoint_path = "product_members" + self.viewname = "product_member" self.viewset = ProductMemberViewSet self.payload = { "product": 3, "user": 2, "role": 2, } - self.update_fields = {'role': 3} + self.update_fields = {"role": 3} self.test_type = TestType.OBJECT_PERMISSIONS self.permission_check_class = Product_Member self.permission_create = Permissions.Product_Manage_Members @@ -2603,19 +2605,19 @@ def __init__(self, *args, **kwargs): class ProductTypeGroupTest(BaseClass.MemberEndpointTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Product_Type_Group - self.endpoint_path = 'product_type_groups' - self.viewname = 'product_type_group' + self.endpoint_path = "product_type_groups" + self.viewname = "product_type_group" self.viewset = ProductTypeGroupViewSet self.payload = { "product_type": 1, "group": 2, "role": 2, } - self.update_fields = {'role': 3} + self.update_fields = {"role": 3} self.test_type = TestType.OBJECT_PERMISSIONS self.permission_check_class = Product_Type_Group self.permission_create = Permissions.Product_Type_Group_Add @@ -2626,19 +2628,19 @@ def __init__(self, *args, **kwargs): class ProductGroupTest(BaseClass.MemberEndpointTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Product_Group - self.endpoint_path = 'product_groups' - self.viewname = 'product_group' + self.endpoint_path = "product_groups" + self.viewname = "product_group" self.viewset = ProductGroupViewSet self.payload = { "product": 1, "group": 2, "role": 2, } - self.update_fields = {'role': 3} + self.update_fields = {"role": 3} self.test_type = TestType.OBJECT_PERMISSIONS self.permission_check_class = Product_Group self.permission_create = Permissions.Product_Group_Add @@ -2649,43 +2651,44 @@ def __init__(self, *args, **kwargs): class LanguageTypeTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Language_Type - self.endpoint_path = 'language_types' - self.viewname = 'language_type' + self.endpoint_path = "language_types" + self.viewname = "language_type" self.viewset = LanguageTypeViewSet self.payload = { - 'language': 'Test', - 'color': 'red', - 'created': '2018-08-16T16:58:23.908Z', + "language": "Test", + "color": "red", + "created": "2018-08-16T16:58:23.908Z", } - self.update_fields = {'color': 'blue'} + self.update_fields = {"color": "blue"} self.test_type = TestType.CONFIGURATION_PERMISSIONS self.deleted_objects = 1 + self.delete_id = 3 BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs) class LanguageTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Languages - self.endpoint_path = 'languages' - self.viewname = 'languages' + self.endpoint_path = "languages" + self.viewname = "languages" self.viewset = LanguageViewSet self.payload = { - 'product': 1, - 'language': 2, - 'user': 1, - 'files': 2, - 'blank': 3, - 'comment': 4, - 'code': 5, - 'created': '2018-08-16T16:58:23.908Z', + "product": 1, + "language": 2, + "user": 1, + "files": 2, + "blank": 3, + "comment": 4, + "code": 5, + "created": "2018-08-16T16:58:23.908Z", } - self.update_fields = {'code': 10} + self.update_fields = {"code": 10} self.test_type = TestType.OBJECT_PERMISSIONS self.permission_check_class = Languages self.permission_create = Permissions.Language_Add @@ -2695,17 +2698,18 @@ def __init__(self, *args, **kwargs): BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs) +@test_tag("non-parallel") class ImportLanguagesTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Languages - self.endpoint_path = 'import-languages' - self.viewname = 'importlanguages' + self.endpoint_path = "import-languages" + self.viewname = "importlanguages" self.viewset = ImportLanguagesView self.payload = { - 'product': 1, - 'file': open("unittests/files/defectdojo_cloc.json"), + "product": 1, + "file": open("unittests/files/defectdojo_cloc.json"), } self.test_type = TestType.OBJECT_PERMISSIONS self.permission_check_class = Languages @@ -2713,12 +2717,12 @@ def __init__(self, *args, **kwargs): BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs) def __del__(self: object): - self.payload['file'].close() + self.payload["file"].close() def test_create(self): BaseClass.CreateRequestTest.test_create(self) - languages = Languages.objects.filter(product=1).order_by('language') + languages = Languages.objects.filter(product=1).order_by("language") self.assertEqual(2, len(languages)) @@ -2738,119 +2742,119 @@ def test_create(self): class NotificationsTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Notifications - self.endpoint_path = 'notifications' - self.viewname = 'notifications' + self.endpoint_path = "notifications" + self.viewname = "notifications" self.viewset = NotificationsViewSet self.payload = { - 'product': 1, - 'user': 3, - 'product_type_added': ["alert", "msteams"], + "product": 1, + "user": 3, + "product_type_added": ["alert", "msteams"], } - self.update_fields = {'product_added': ["alert", "msteams"]} + self.update_fields = {"product_added": ["alert", "msteams"]} self.test_type = TestType.STANDARD self.deleted_objects = 1 BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs) class UserProfileTest(DojoAPITestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def setUp(self): - testuser = User.objects.get(username='admin') + testuser = User.objects.get(username="admin") token = Token.objects.get(user=testuser) self.client = APIClient() - self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) - self.url = reverse('user_profile') + self.client.credentials(HTTP_AUTHORIZATION="Token " + token.key) + self.url = reverse("user_profile") def test_profile(self): - response = self.client.get(reverse('user_profile')) + response = self.client.get(reverse("user_profile")) data = json.loads(response.content) - self.assertEqual(1, data['user']['id']) - self.assertEqual('admin', data['user']['username']) - self.assertTrue(data['user']['is_superuser']) - self.assertEqual(1, data['user_contact_info']['user']) - self.assertEqual('#admin', data['user_contact_info']['twitter_username']) - self.assertEqual(1, data['global_role']['user']) - self.assertEqual(4, data['global_role']['role']) - self.assertEqual(1, data['dojo_group_member'][0]['user']) - self.assertEqual(1, data['dojo_group_member'][0]['group']) - self.assertEqual(1, data['product_type_member'][0]['user']) - self.assertEqual(1, data['product_type_member'][0]['product_type']) - self.assertEqual(1, data['product_member'][1]['user']) - self.assertEqual(3, data['product_member'][1]['product']) + self.assertEqual(1, data["user"]["id"]) + self.assertEqual("admin", data["user"]["username"]) + self.assertTrue(data["user"]["is_superuser"]) + self.assertEqual(1, data["user_contact_info"]["user"]) + self.assertEqual("#admin", data["user_contact_info"]["twitter_username"]) + self.assertEqual(1, data["global_role"]["user"]) + self.assertEqual(4, data["global_role"]["role"]) + self.assertEqual(1, data["dojo_group_member"][0]["user"]) + self.assertEqual(1, data["dojo_group_member"][0]["group"]) + self.assertEqual(1, data["product_type_member"][0]["user"]) + self.assertEqual(1, data["product_type_member"][0]["product_type"]) + self.assertEqual(1, data["product_member"][1]["user"]) + self.assertEqual(3, data["product_member"][1]["product"]) class DevelopmentEnvironmentTest(BaseClass.AuthenticatedViewTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Development_Environment - self.endpoint_path = 'development_environments' - self.viewname = 'development_environment' + self.endpoint_path = "development_environments" + self.viewname = "development_environment" self.viewset = DevelopmentEnvironmentViewSet self.payload = { - 'name': 'Test_1', + "name": "Test_1", } - self.update_fields = {'name': 'Test_2'} + self.update_fields = {"name": "Test_2"} self.test_type = TestType.CONFIGURATION_PERMISSIONS self.deleted_objects = 1 BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs) def test_delete(self): - current_objects = self.client.get(self.url, format='json').data - relative_url = self.url + '{}/'.format(current_objects['results'][-1]['id']) + current_objects = self.client.get(self.url, format="json").data + relative_url = self.url + "{}/".format(current_objects["results"][-1]["id"]) response = self.client.delete(relative_url) self.assertEqual(409, response.status_code, response.content[:1000]) class TestTypeTest(BaseClass.AuthenticatedViewTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Test_Type - self.endpoint_path = 'test_types' - self.viewname = 'test_type' + self.endpoint_path = "test_types" + self.viewname = "test_type" self.viewset = TestTypesViewSet self.payload = { - 'name': 'Test_1', + "name": "Test_1", } - self.update_fields = {'name': 'Test_2'} + self.update_fields = {"name": "Test_2"} self.test_type = TestType.CONFIGURATION_PERMISSIONS self.deleted_objects = 1 BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs) class ConfigurationPermissionTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Permission - self.endpoint_path = 'configuration_permissions' - self.viewname = 'permission' + self.endpoint_path = "configuration_permissions" + self.viewname = "permission" self.viewset = ConfigurationPermissionViewSet self.test_type = TestType.STANDARD BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs) class CredentialMappingTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Cred_Mapping - self.endpoint_path = 'credential_mappings' - self.viewname = 'cred_mapping' + self.endpoint_path = "credential_mappings" + self.viewname = "cred_mapping" self.viewset = CredentialsMappingViewSet self.payload = { - 'cred_id': 1, - 'product': 1, - 'url': 'https://google.com', + "cred_id": 1, + "product": 1, + "url": "https://google.com", } - self.update_fields = {'url': 'https://bing.com'} + self.update_fields = {"url": "https://bing.com"} self.test_type = TestType.OBJECT_PERMISSIONS self.permission_check_class = Product self.permission_create = Permissions.Credential_Add @@ -2861,34 +2865,34 @@ def __init__(self, *args, **kwargs): class CredentialTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Cred_User - self.endpoint_path = 'credentials' - self.viewname = 'cred_user' + self.endpoint_path = "credentials" + self.viewname = "cred_user" self.viewset = CredentialsViewSet self.payload = { - 'name': 'name', - 'username': 'usernmae', - 'password': 'password', - 'role': 'role', - 'url': 'https://some-url.com', - 'environment': 1, + "name": "name", + "username": "usernmae", + "password": "password", + "role": "role", + "url": "https://some-url.com", + "environment": 1, } - self.update_fields = {'name': 'newname'} + self.update_fields = {"name": "newname"} self.test_type = TestType.STANDARD self.deleted_objects = 2 BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs) class TextQuestionTest(BaseClass.BaseClassTest): - fixtures = ['questionnaire_testdata.json'] + fixtures = ["questionnaire_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = TextQuestion - self.endpoint_path = 'questionnaire_questions' - self.viewname = 'question' + self.endpoint_path = "questionnaire_questions" + self.viewname = "question" self.viewset = QuestionnaireQuestionViewSet self.test_type = TestType.STANDARD self.deleted_objects = 5 @@ -2896,12 +2900,12 @@ def __init__(self, *args, **kwargs): class ChoiceQuestionTest(BaseClass.BaseClassTest): - fixtures = ['questionnaire_testdata.json'] + fixtures = ["questionnaire_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = ChoiceQuestion - self.endpoint_path = 'questionnaire_questions' - self.viewname = 'question' + self.endpoint_path = "questionnaire_questions" + self.viewname = "question" self.viewset = QuestionnaireQuestionViewSet self.test_type = TestType.STANDARD self.deleted_objects = 5 @@ -2909,12 +2913,12 @@ def __init__(self, *args, **kwargs): class TextAnswerTest(BaseClass.BaseClassTest): - fixtures = ['questionnaire_testdata.json'] + fixtures = ["questionnaire_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = TextAnswer - self.endpoint_path = 'questionnaire_answers' - self.viewname = 'answer' + self.endpoint_path = "questionnaire_answers" + self.viewname = "answer" self.viewset = QuestionnaireAnswerViewSet self.test_type = TestType.STANDARD self.deleted_objects = 5 @@ -2922,12 +2926,12 @@ def __init__(self, *args, **kwargs): class ChoiceAnswerTest(BaseClass.BaseClassTest): - fixtures = ['questionnaire_testdata.json'] + fixtures = ["questionnaire_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = ChoiceAnswer - self.endpoint_path = 'questionnaire_answers' - self.viewname = 'answer' + self.endpoint_path = "questionnaire_answers" + self.viewname = "answer" self.viewset = QuestionnaireAnswerViewSet self.test_type = TestType.STANDARD self.deleted_objects = 5 @@ -2935,12 +2939,12 @@ def __init__(self, *args, **kwargs): class GeneralSurveyTest(BaseClass.BaseClassTest): - fixtures = ['questionnaire_testdata.json'] + fixtures = ["questionnaire_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = General_Survey - self.endpoint_path = 'questionnaire_general_questionnaires' - self.viewname = 'general_survey' + self.endpoint_path = "questionnaire_general_questionnaires" + self.viewname = "general_survey" self.viewset = QuestionnaireGeneralSurveyViewSet self.test_type = TestType.STANDARD self.deleted_objects = 5 @@ -2948,12 +2952,12 @@ def __init__(self, *args, **kwargs): class EngagementSurveyTest(BaseClass.BaseClassTest): - fixtures = ['questionnaire_testdata.json'] + fixtures = ["questionnaire_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Engagement_Survey - self.endpoint_path = 'questionnaire_engagement_questionnaires' - self.viewname = 'engagement_survey' + self.endpoint_path = "questionnaire_engagement_questionnaires" + self.viewname = "engagement_survey" self.viewset = QuestionnaireEngagementSurveyViewSet self.test_type = TestType.STANDARD self.deleted_objects = 5 @@ -2961,12 +2965,12 @@ def __init__(self, *args, **kwargs): class AnsweredSurveyTest(BaseClass.BaseClassTest): - fixtures = ['questionnaire_testdata.json'] + fixtures = ["questionnaire_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Answered_Survey - self.endpoint_path = 'questionnaire_answered_questionnaires' - self.viewname = 'answered_survey' + self.endpoint_path = "questionnaire_answered_questionnaires" + self.viewname = "answered_survey" self.viewset = QuestionnaireAnsweredSurveyViewSet self.test_type = TestType.STANDARD self.deleted_objects = 5 @@ -2974,22 +2978,22 @@ def __init__(self, *args, **kwargs): class AnnouncementTest(BaseClass.BaseClassTest): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def __init__(self, *args, **kwargs): self.endpoint_model = Announcement - self.endpoint_path = 'announcements' - self.viewname = 'announcement' + self.endpoint_path = "announcements" + self.viewname = "announcement" self.viewset = AnnouncementViewSet self.payload = { "message": "Test template", "style": "info", "dismissable": True, } - self.update_fields = {'style': 'warning'} + self.update_fields = {"style": "warning"} self.test_type = TestType.CONFIGURATION_PERMISSIONS self.deleted_objects = 7 BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs) def test_create(self): - self.skipTest('Only one Announcement can exists') + self.skipTest("Only one Announcement can exists") diff --git a/unittests/test_risk_acceptance.py b/unittests/test_risk_acceptance.py index 6a7961affa..97afb3e1f7 100644 --- a/unittests/test_risk_acceptance.py +++ b/unittests/test_risk_acceptance.py @@ -22,25 +22,25 @@ class RiskAcceptanceTestUI(DojoTestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] data_risk_accceptance = { - 'name': 'Accept: Unit test', - 'accepted_findings': [72808], - 'recommendation': 'A', - 'recommendation_details': 'recommendation 1', - 'decision': 'A', - 'decision_details': 'it has been decided!', - 'accepted_by': 'pointy haired boss', + "name": "Accept: Unit test", + "accepted_findings": [72808], + "recommendation": "A", + "recommendation_details": "recommendation 1", + "decision": "A", + "decision_details": "it has been decided!", + "accepted_by": "pointy haired boss", # 'path: (binary) - 'owner': 1, - 'expiration_date': '2021-07-15', - 'reactivate_expired': True, + "owner": 1, + "expiration_date": "2021-07-15", + "reactivate_expired": True, } data_remove_finding_from_ra = { - 'remove_finding': 'Remove', - 'remove_finding_id': 666, + "remove_finding": "Remove", + "remove_finding_id": 666, } def __init__(self, *args, **kwargs): @@ -57,7 +57,7 @@ def add_risk_acceptance(self, eid, data_risk_accceptance, fid=None): else: args = (eid, ) - response = self.client.post(reverse('add_risk_acceptance', args=args), data_risk_accceptance) + response = self.client.post(reverse("add_risk_acceptance", args=args), data_risk_accceptance) self.assertEqual(302, response.status_code, response.content[:1000]) return response @@ -81,18 +81,18 @@ def assert_all_inactive_risk_accepted(self, findings): def test_add_risk_acceptance_single_findings_accepted(self): ra_data = copy.copy(self.data_risk_accceptance) - ra_data['accepted_findings'] = [2] - ra_data['return_url'] = reverse('view_finding', args=(2, )) + ra_data["accepted_findings"] = [2] + ra_data["return_url"] = reverse("view_finding", args=(2, )) response = self.add_risk_acceptance(1, ra_data, 2) - self.assertEqual('/finding/2', response.url) + self.assertEqual("/finding/2", response.url) ra = Risk_Acceptance.objects.last() self.assert_all_active_not_risk_accepted(ra.accepted_findings.all()) def test_add_risk_acceptance_multiple_findings_accepted(self): ra_data = copy.copy(self.data_risk_accceptance) - ra_data['accepted_findings'] = [2, 3] + ra_data["accepted_findings"] = [2, 3] response = self.add_risk_acceptance(1, ra_data) - self.assertEqual('/engagement/1', response.url) + self.assertEqual("/engagement/1", response.url) ra = Risk_Acceptance.objects.last() self.assert_all_active_not_risk_accepted(ra.accepted_findings.all()) @@ -102,13 +102,13 @@ def test_add_findings_to_risk_acceptance_findings_accepted(self): ra = Risk_Acceptance.objects.last() data_add_findings_to_ra = { - 'add_findings': 'Add Selected Findings', - 'accepted_findings': [4, 5], + "add_findings": "Add Selected Findings", + "accepted_findings": [4, 5], } - response = self.client.post(reverse('view_risk_acceptance', args=(1, ra.id)), + response = self.client.post(reverse("view_risk_acceptance", args=(1, ra.id)), urlencode(MultiValueDict(data_add_findings_to_ra), doseq=True), - content_type='application/x-www-form-urlencoded') + content_type="application/x-www-form-urlencoded") self.assertEqual(302, response.status_code, response.content[:1000]) self.assert_all_inactive_risk_accepted(Finding.objects.filter(id__in=[2, 3, 4, 5])) @@ -118,9 +118,9 @@ def test_remove_findings_from_risk_acceptance_findings_active(self): self.test_add_risk_acceptance_multiple_findings_accepted() data = copy.copy(self.data_remove_finding_from_ra) - data['remove_finding_id'] = 2 + data["remove_finding_id"] = 2 ra = Risk_Acceptance.objects.last() - response = self.client.post(reverse('view_risk_acceptance', args=(1, ra.id)), data) + response = self.client.post(reverse("view_risk_acceptance", args=(1, ra.id)), data) self.assertEqual(302, response.status_code, response.content[:1000]) self.assert_all_active_not_risk_accepted(Finding.objects.filter(id=2)) self.assert_all_inactive_risk_accepted(Finding.objects.filter(id=3)) @@ -131,9 +131,9 @@ def test_remove_risk_acceptance_findings_active(self): findings = ra.accepted_findings.all() - data = {'id': ra.id} + data = {"id": ra.id} - self.client.post(reverse('delete_risk_acceptance', args=(1, ra.id)), data) + self.client.post(reverse("delete_risk_acceptance", args=(1, ra.id)), data) self.assert_all_active_not_risk_accepted(findings) self.assert_all_active_not_risk_accepted(Finding.objects.filter(test__engagement=1)) @@ -146,9 +146,9 @@ def test_expire_risk_acceptance_findings_active(self): findings = ra.accepted_findings.all() - data = {'id': ra.id} + data = {"id": ra.id} - self.client.post(reverse('expire_risk_acceptance', args=(1, ra.id)), data) + self.client.post(reverse("expire_risk_acceptance", args=(1, ra.id)), data) ra.refresh_from_db() self.assert_all_active_not_risk_accepted(findings) @@ -168,9 +168,9 @@ def test_expire_risk_acceptance_findings_not_active(self): findings = ra.accepted_findings.all() - data = {'id': ra.id} + data = {"id": ra.id} - self.client.post(reverse('expire_risk_acceptance', args=(1, ra.id)), data) + self.client.post(reverse("expire_risk_acceptance", args=(1, ra.id)), data) ra.refresh_from_db() # no reactivation on expiry @@ -191,9 +191,9 @@ def test_expire_risk_acceptance_sla_not_reset(self): findings = ra.accepted_findings.all() - data = {'id': ra.id} + data = {"id": ra.id} - self.client.post(reverse('expire_risk_acceptance', args=(1, ra.id)), data) + self.client.post(reverse("expire_risk_acceptance", args=(1, ra.id)), data) ra.refresh_from_db() @@ -207,9 +207,9 @@ def test_expire_risk_acceptance_sla_reset(self): findings = ra.accepted_findings.all() - data = {'id': ra.id} + data = {"id": ra.id} - self.client.post(reverse('expire_risk_acceptance', args=(1, ra.id)), data) + self.client.post(reverse("expire_risk_acceptance", args=(1, ra.id)), data) ra.refresh_from_db() @@ -222,12 +222,12 @@ def test_reinstate_risk_acceptance_findings_accepted(self): findings = ra.accepted_findings.all() - data = {'id': ra.id} + data = {"id": ra.id} - self.client.post(reverse('reinstate_risk_acceptance', args=(1, ra.id)), data) + self.client.post(reverse("reinstate_risk_acceptance", args=(1, ra.id)), data) ra.refresh_from_db() - expiration_delta_days = get_system_setting('risk_acceptance_form_default_days', 90) + expiration_delta_days = get_system_setting("risk_acceptance_form_default_days", 90) risk_acceptance_expiration_date = timezone.now() + relativedelta(days=expiration_delta_days) self.assertEqual(ra.expiration_date.date(), risk_acceptance_expiration_date.date()) @@ -240,20 +240,20 @@ def test_reinstate_risk_acceptance_findings_accepted(self): def create_multiple_ras(self): ra_data = copy.copy(self.data_risk_accceptance) - ra_data['accepted_findings'] = [2] - ra_data['return_url'] = reverse('view_finding', args=(2, )) + ra_data["accepted_findings"] = [2] + ra_data["return_url"] = reverse("view_finding", args=(2, )) self.add_risk_acceptance(1, ra_data, 2) ra1 = Risk_Acceptance.objects.last() ra_data = copy.copy(self.data_risk_accceptance) - ra_data['accepted_findings'] = [7] - ra_data['return_url'] = reverse('view_finding', args=(7, )) + ra_data["accepted_findings"] = [7] + ra_data["return_url"] = reverse("view_finding", args=(7, )) self.add_risk_acceptance(1, ra_data, 7) ra2 = Risk_Acceptance.objects.last() ra_data = copy.copy(self.data_risk_accceptance) - ra_data['accepted_findings'] = [22] - ra_data['return_url'] = reverse('view_finding', args=(22, )) + ra_data["accepted_findings"] = [22] + ra_data["return_url"] = reverse("view_finding", args=(22, )) self.add_risk_acceptance(3, ra_data, 22) ra3 = Risk_Acceptance.objects.last() diff --git a/unittests/test_sample_data.py b/unittests/test_sample_data.py index 481782bbcf..d1ebe4b5be 100644 --- a/unittests/test_sample_data.py +++ b/unittests/test_sample_data.py @@ -6,7 +6,7 @@ class TestSampleData(DojoTestCase): def test_loaddata(self): try: - call_command('loaddata', 'dojo/fixtures/defect_dojo_sample_data', verbosity=0) + call_command("loaddata", "dojo/fixtures/defect_dojo_sample_data", verbosity=0) except Exception as e: self.assertEqual(False, True, e) self.assertEqual(True, True) diff --git a/unittests/test_tags.py b/unittests/test_tags.py index 0e471d67ed..6c31ab1f5d 100644 --- a/unittests/test_tags.py +++ b/unittests/test_tags.py @@ -10,106 +10,106 @@ class TagTests(DojoAPITestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def setUp(self, *args, **kwargs): super().setUp() self.login_as_admin() - self.scans_path = '/scans/zap/' - self.zap_sample5_filename = self.scans_path + '5_zap_sample_one.xml' + self.scans_path = "/scans/zap/" + self.zap_sample5_filename = self.scans_path + "5_zap_sample_one.xml" def create_finding_with_tags(self, tags): finding_id = Finding.objects.all().first().id finding_details = self.get_finding_api(finding_id) - del finding_details['id'] + del finding_details["id"] - finding_details['title'] = 'tags test ' + str(random.randint(1, 9999)) - finding_details['tags'] = tags + finding_details["title"] = "tags test " + str(random.randint(1, 9999)) + finding_details["tags"] = tags response = self.post_new_finding_api(finding_details) - return response['id'] + return response["id"] def test_finding_get_tags(self): - tags = ['tag1', 'tag2'] + tags = ["tag1", "tag2"] finding_id = self.create_finding_with_tags(tags) response = self.get_finding_tags_api(finding_id) - self.assertEqual(len(tags), len(response.get('tags', None))) + self.assertEqual(len(tags), len(response.get("tags", None))) for tag in tags: # logger.debug('looking for tag %s in tag list %s', tag, response['tags']) - self.assertIn(tag, response['tags']) + self.assertIn(tag, response["tags"]) def test_finding_filter_tags(self): - tags = ['tag1', 'tag2'] + tags = ["tag1", "tag2"] self.create_finding_with_tags(tags) - tags2 = ['tag1', 'tag3'] + tags2 = ["tag1", "tag3"] self.create_finding_with_tags(tags2) - response = self.get_finding_api_filter_tags('tag1') - self.assertEqual(response['count'], 2) + response = self.get_finding_api_filter_tags("tag1") + self.assertEqual(response["count"], 2) - response = self.get_finding_api_filter_tags('tag2') - self.assertEqual(response['count'], 1) + response = self.get_finding_api_filter_tags("tag2") + self.assertEqual(response["count"], 1) - response = self.get_finding_api_filter_tags('tag2,tag3') - self.assertEqual(response['count'], 2) + response = self.get_finding_api_filter_tags("tag2,tag3") + self.assertEqual(response["count"], 2) - response = self.get_finding_api_filter_tags('tag4') - self.assertEqual(response['count'], 0) + response = self.get_finding_api_filter_tags("tag4") + self.assertEqual(response["count"], 0) def test_finding_post_tags(self): # create finding - tags = ['tag1', 'tag2'] + tags = ["tag1", "tag2"] finding_id = self.create_finding_with_tags(tags) # post tags. POST will ADD tags to existing tags (which is possibly not REST compliant?) - tags_new = ['tag3', 'tag4'] + tags_new = ["tag3", "tag4"] response = self.post_finding_tags_api(finding_id, tags_new) tags_merged = list(set(tags) | set(tags_new)) - self.assertEqual(len(tags_merged), len(response.get('tags'))) + self.assertEqual(len(tags_merged), len(response.get("tags"))) for tag in tags_merged: # logger.debug('looking for tag %s in tag list %s', tag, response['tags']) - self.assertIn(tag, response['tags']) + self.assertIn(tag, response["tags"]) def test_finding_post_tags_overlap(self): # create finding - tags = ['tag1', 'tag2'] + tags = ["tag1", "tag2"] finding_id = self.create_finding_with_tags(tags) # post tags. POST will ADD tags to existing tags (which is possibly not REST compliant?) - tags_new = ['tag2', 'tag3'] + tags_new = ["tag2", "tag3"] response = self.post_finding_tags_api(finding_id, tags_new) tags_merged = list(set(tags) | set(tags_new)) - self.assertEqual(len(tags_merged), len(response.get('tags'))) + self.assertEqual(len(tags_merged), len(response.get("tags"))) for tag in tags_merged: # logger.debug('looking for tag %s in tag list %s', tag, response['tags']) - self.assertIn(tag, response['tags']) + self.assertIn(tag, response["tags"]) def test_finding_put_remove_tags(self): # create finding - tags = ['tag1', 'tag2'] + tags = ["tag1", "tag2"] finding_id = self.create_finding_with_tags(tags) # post tags. PUT will remove any tags that exist - tags_remove = ['tag1'] + tags_remove = ["tag1"] response = self.put_finding_remove_tags_api(finding_id, tags_remove) # for some reason this method returns just a message, not the remaining tags - self.assertEqual(response['success'], 'Tag(s) Removed') + self.assertEqual(response["success"], "Tag(s) Removed") # retrieve finding and check tags_merged = list(set(tags) - set(tags_remove)) response = self.get_finding_tags_api(finding_id) - self.assertEqual(len(tags_merged), len(response.get('tags'))) + self.assertEqual(len(tags_merged), len(response.get("tags"))) for tag in tags_merged: # logger.debug('looking for tag %s in tag list %s', tag, response['tags']) - self.assertIn(tag, response['tags']) + self.assertIn(tag, response["tags"]) def test_finding_put_remove_tags_all(self): # create finding - tags = ['tag1', 'tag2'] + tags = ["tag1", "tag2"] finding_id = self.create_finding_with_tags(tags) # post tags. PUT will remove any tags that exist @@ -117,35 +117,35 @@ def test_finding_put_remove_tags_all(self): response = self.put_finding_remove_tags_api(finding_id, tags_remove) # for some reason this method returns just a message, not the remaining tags - self.assertEqual(response['success'], 'Tag(s) Removed') + self.assertEqual(response["success"], "Tag(s) Removed") # retrieve finding and check tags_merged = list(set(tags) - set(tags_remove)) response = self.get_finding_tags_api(finding_id) - self.assertEqual(len(tags_merged), len(response.get('tags'))) + self.assertEqual(len(tags_merged), len(response.get("tags"))) for tag in tags_merged: # logger.debug('looking for tag %s in tag list %s', tag, response['tags']) - self.assertIn(tag, response['tags']) + self.assertIn(tag, response["tags"]) def test_finding_put_remove_tags_non_existent(self): # create finding - tags = ['tag1', 'tag2'] + tags = ["tag1", "tag2"] finding_id = self.create_finding_with_tags(tags) # post tags. PUT will throw an error on non-existent tag to be removed (which is maybe not what we want?) - tags_remove = ['tag5'] + tags_remove = ["tag5"] response = self.put_finding_remove_tags_api(finding_id, tags_remove, expected_response_status_code=400) # for some reason this method returns just a message, not the remaining tags - self.assertEqual(response['error'], '\'tag5\' is not a valid tag in list') + self.assertEqual(response["error"], "'tag5' is not a valid tag in list") # retrieve finding and check tags_merged = list(set(tags) - set(tags_remove)) response = self.get_finding_tags_api(finding_id) - self.assertEqual(len(tags_merged), len(response.get('tags'))) + self.assertEqual(len(tags_merged), len(response.get("tags"))) for tag in tags_merged: # logger.debug('looking for tag %s in tag list %s', tag, response['tags']) - self.assertIn(tag, response['tags']) + self.assertIn(tag, response["tags"]) def test_finding_patch_remove_tags(self): # has same logic as PUT @@ -158,13 +158,13 @@ def test_finding_patch_remove_tags_non_existent(self): return self.test_finding_put_remove_tags_non_existent() def test_finding_create_tags_with_commas(self): - tags = ['one,two'] + tags = ["one,two"] finding_id = self.create_finding_with_tags(tags) response = self.get_finding_tags_api(finding_id) - self.assertEqual(2, len(response.get('tags'))) - self.assertIn('one', str(response['tags'])) - self.assertIn('two', str(response['tags'])) + self.assertEqual(2, len(response.get("tags"))) + self.assertIn("one", str(response["tags"])) + self.assertIn("two", str(response["tags"])) def test_finding_create_tags_with_commas_quoted(self): tags = ['"one,two"'] @@ -172,15 +172,15 @@ def test_finding_create_tags_with_commas_quoted(self): response = self.get_finding_tags_api(finding_id) # no splitting due to quotes - self.assertEqual(len(tags), len(response.get('tags', None))) + self.assertEqual(len(tags), len(response.get("tags", None))) for tag in tags: - logger.debug('looking for tag %s in tag list %s', tag, response['tags']) + logger.debug("looking for tag %s in tag list %s", tag, response["tags"]) # with django-tagging the quotes were stripped, with tagulous they remain # self.assertIn(tag.strip('\"'), response['tags']) - self.assertIn(tag, response['tags']) + self.assertIn(tag, response["tags"]) def test_finding_create_tags_with_spaces(self): - tags = ['one two'] + tags = ["one two"] finding_id = self.create_finding_with_tags(tags) response = self.get_finding_tags_api(finding_id) @@ -189,9 +189,9 @@ def test_finding_create_tags_with_spaces(self): # to keep doing the old behaviour. so this is a small incompatibility, but only for # tags with commas, so should be minor trouble # self.assertEqual(2, len(response.get('tags'))) - self.assertEqual(1, len(response.get('tags'))) - self.assertIn('one', str(response['tags'])) - self.assertIn('two', str(response['tags'])) + self.assertEqual(1, len(response.get("tags"))) + self.assertIn("one", str(response["tags"])) + self.assertIn("two", str(response["tags"])) # finding.tags: [, ] def test_finding_create_tags_with_spaces_quoted(self): @@ -200,59 +200,59 @@ def test_finding_create_tags_with_spaces_quoted(self): response = self.get_finding_tags_api(finding_id) # no splitting due to quotes - self.assertEqual(len(tags), len(response.get('tags', None))) + self.assertEqual(len(tags), len(response.get("tags", None))) for tag in tags: - logger.debug('looking for tag %s in tag list %s', tag, response['tags']) + logger.debug("looking for tag %s in tag list %s", tag, response["tags"]) # with django-tagging the quotes were stripped, with tagulous they remain # self.assertIn(tag.strip('\"'), response['tags']) - self.assertIn(tag, response['tags']) + self.assertIn(tag, response["tags"]) # finding.tags: ]> def test_finding_create_tags_with_slashes(self): - tags = ['a/b/c'] + tags = ["a/b/c"] finding_id = self.create_finding_with_tags(tags) response = self.get_finding_tags_api(finding_id) - self.assertEqual(len(tags), len(response.get('tags', None))) + self.assertEqual(len(tags), len(response.get("tags", None))) for tag in tags: # logger.debug('looking for tag %s in tag list %s', tag, response['tags']) - self.assertIn(tag, response['tags']) + self.assertIn(tag, response["tags"]) def test_import_and_reimport_with_tags(self): - tags = ['tag1', 'tag2'] + tags = ["tag1", "tag2"] import0 = self.import_scan_with_params(self.zap_sample5_filename, tags=tags) - test_id = import0['test'] + test_id = import0["test"] response = self.get_test_api(test_id) - self.assertEqual(len(tags), len(response.get('tags'))) + self.assertEqual(len(tags), len(response.get("tags"))) for tag in tags: - self.assertIn(tag, response['tags']) + self.assertIn(tag, response["tags"]) # reimport, do not specify tags: should retain tags self.reimport_scan_with_params(test_id, self.zap_sample5_filename) - self.assertEqual(len(tags), len(response.get('tags'))) + self.assertEqual(len(tags), len(response.get("tags"))) for tag in tags: - self.assertIn(tag, response['tags']) + self.assertIn(tag, response["tags"]) # reimport, specify tags others: currently reimport doesn't do anything with tags param and silently ignores them - self.reimport_scan_with_params(test_id, self.zap_sample5_filename, tags=['tag3', 'tag4']) - self.assertEqual(len(tags), len(response.get('tags'))) + self.reimport_scan_with_params(test_id, self.zap_sample5_filename, tags=["tag3", "tag4"]) + self.assertEqual(len(tags), len(response.get("tags"))) for tag in tags: - self.assertIn(tag, response['tags']) + self.assertIn(tag, response["tags"]) class InheritedTagsTests(DojoAPITestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def setUp(self, *args, **kwargs): super().setUp() self.login_as_admin() self.system_settings(enable_product_tag_inehritance=True) self.product = self.create_product("Inherited Tags Test", tags=["inherit", "these", "tags"]) - self.scans_path = '/scans/zap/' - self.zap_sample5_filename = f'{self.scans_path}5_zap_sample_one.xml' + self.scans_path = "/scans/zap/" + self.zap_sample5_filename = f"{self.scans_path}5_zap_sample_one.xml" def _convert_instance_tags_to_list(self, instance) -> list: return [tag.name for tag in instance.tags.all()] @@ -265,7 +265,7 @@ def _import_and_return_objects(self, test_id=None, reimport=False, tags=None) -> else: response = self.import_scan_with_params(self.zap_sample5_filename, engagement=engagement.id, tags=tags) - test_id = response['test'] + test_id = response["test"] test = Test.objects.get(id=test_id) finding = Finding.objects.filter(test=test).first() endpoint = finding.endpoints.all().first() diff --git a/unittests/test_tool_config.py b/unittests/test_tool_config.py index f807e61cf5..c35bd3f460 100644 --- a/unittests/test_tool_config.py +++ b/unittests/test_tool_config.py @@ -8,59 +8,59 @@ class TestApiScanConfigEntry(DojoTestCase): def setUp(self): - tool_type, _ = Tool_Type.objects.get_or_create(name='SonarQube') - Tool_Configuration.objects.get_or_create(name='SonarQube', tool_type=tool_type, authentication_type="API") + tool_type, _ = Tool_Type.objects.get_or_create(name="SonarQube") + Tool_Configuration.objects.get_or_create(name="SonarQube", tool_type=tool_type, authentication_type="API") def test_base(self): acsh = get_api_scan_configuration_hints() self.assertEqual(len(acsh), 6, acsh) i = 0 - with self.subTest('BlackDuck API'): - self.assertEqual(acsh[i]['name'], 'BlackDuck API') - self.assertEqual(acsh[i]['tool_type_name'], 'BlackDuck API') - self.assertEqual(acsh[i]['hint'], 'the field Service key 1 has to be set to ID of the project from which to import findings. Service key 2 has to be set to the version of the project') + with self.subTest("BlackDuck API"): + self.assertEqual(acsh[i]["name"], "BlackDuck API") + self.assertEqual(acsh[i]["tool_type_name"], "BlackDuck API") + self.assertEqual(acsh[i]["hint"], "the field Service key 1 has to be set to ID of the project from which to import findings. Service key 2 has to be set to the version of the project") i += 1 - with self.subTest('Bugcrowd'): - self.assertEqual(acsh[i]['name'], 'Bugcrowd API Import') - self.assertEqual(acsh[i]['tool_type_name'], 'Bugcrowd API') - self.assertEqual(acsh[i]['hint'], 'the field Service key 1 has to be set with the Bugcrowd program code. Service key 2 can be set with the target in the Bugcrowd program (will be url encoded for the api call), if not supplied, will fetch all submissions in the program') + with self.subTest("Bugcrowd"): + self.assertEqual(acsh[i]["name"], "Bugcrowd API Import") + self.assertEqual(acsh[i]["tool_type_name"], "Bugcrowd API") + self.assertEqual(acsh[i]["hint"], "the field Service key 1 has to be set with the Bugcrowd program code. Service key 2 can be set with the target in the Bugcrowd program (will be url encoded for the api call), if not supplied, will fetch all submissions in the program") i += 1 - with self.subTest('Cobalt.io'): - self.assertEqual(acsh[i]['name'], 'Cobalt.io API Import') - self.assertEqual(acsh[i]['tool_type_name'], 'Cobalt.io') - self.assertEqual(acsh[i]['hint'], 'the field Service key 1 has to be set with the Cobalt.io asset id. Service key 2 will be populated with the asset name while saving the configuration.') + with self.subTest("Cobalt.io"): + self.assertEqual(acsh[i]["name"], "Cobalt.io API Import") + self.assertEqual(acsh[i]["tool_type_name"], "Cobalt.io") + self.assertEqual(acsh[i]["hint"], "the field Service key 1 has to be set with the Cobalt.io asset id. Service key 2 will be populated with the asset name while saving the configuration.") i += 1 - with self.subTest('Edgescan'): - self.assertEqual(acsh[i]['name'], 'Edgescan Scan') - self.assertEqual(acsh[i]['tool_type_name'], 'Edgescan') - self.assertEqual(acsh[i]['hint'], 'In the field Service key 1, provide the Edgescan asset ID(s). Leaving it blank will import all assets\' findings.') + with self.subTest("Edgescan"): + self.assertEqual(acsh[i]["name"], "Edgescan Scan") + self.assertEqual(acsh[i]["tool_type_name"], "Edgescan") + self.assertEqual(acsh[i]["hint"], "In the field Service key 1, provide the Edgescan asset ID(s). Leaving it blank will import all assets' findings.") i += 1 - with self.subTest('SonarQube'): - self.assertEqual(acsh[i]['name'], 'SonarQube API Import') - self.assertEqual(acsh[i]['tool_type_name'], 'SonarQube') - self.assertEqual(acsh[i]['hint'], 'the field Service key 1 has to be set with the SonarQube project key. Service key 2 can be used for the Organization ID if using SonarCloud.') + with self.subTest("SonarQube"): + self.assertEqual(acsh[i]["name"], "SonarQube API Import") + self.assertEqual(acsh[i]["tool_type_name"], "SonarQube") + self.assertEqual(acsh[i]["hint"], "the field Service key 1 has to be set with the SonarQube project key. Service key 2 can be used for the Organization ID if using SonarCloud.") i += 1 - with self.subTest('Vulners'): - self.assertEqual(acsh[i]['name'], 'Vulners') - self.assertEqual(acsh[i]['tool_type_name'], 'Vulners') - self.assertEqual(acsh[i]['hint'], 'the field Service key 1 has to be set with the Vulners API key.') + with self.subTest("Vulners"): + self.assertEqual(acsh[i]["name"], "Vulners") + self.assertEqual(acsh[i]["tool_type_name"], "Vulners") + self.assertEqual(acsh[i]["hint"], "the field Service key 1 has to be set with the Vulners API key.") def test_counts(self): acsh = get_api_scan_configuration_hints() - self.assertEqual(acsh[1]['tool_types'].count(), 0) - self.assertEqual(acsh[1]['tool_configurations'].count(), 0) - self.assertEqual(acsh[4]['tool_types'].count(), 1) - self.assertEqual(acsh[4]['tool_configurations'].count(), 1) + self.assertEqual(acsh[1]["tool_types"].count(), 0) + self.assertEqual(acsh[1]["tool_configurations"].count(), 0) + self.assertEqual(acsh[4]["tool_types"].count(), 1) + self.assertEqual(acsh[4]["tool_configurations"].count(), 1) def test_has_functions(self): for parser_name, parser in PARSERS.items(): - if parser.__module__.startswith('dojo.tools.api_'): + if parser.__module__.startswith("dojo.tools.api_"): with self.subTest(parser_name): self.assertTrue(hasattr(parser, "requires_tool_type"), "All API parsers should have function 'requires_tool_type'") diff --git a/unittests/test_user_queries.py b/unittests/test_user_queries.py index 591ff52307..b4477f0f50 100644 --- a/unittests/test_user_queries.py +++ b/unittests/test_user_queries.py @@ -12,36 +12,36 @@ class TestUserQueries(DojoTestCase): def setUp(self): super().setUp() - self.product_type_1 = Product_Type(name='product_type_1') + self.product_type_1 = Product_Type(name="product_type_1") self.product_type_1.save() - self.product_1 = Product(name='product_1', prod_type=self.product_type_1) + self.product_1 = Product(name="product_1", prod_type=self.product_type_1) self.product_1.save() - self.product_type_2 = Product_Type(name='product_type_2') + self.product_type_2 = Product_Type(name="product_type_2") self.product_type_2.save() - self.product_2 = Product(name='product_2', prod_type=self.product_type_2) + self.product_2 = Product(name="product_2", prod_type=self.product_type_2) self.product_2.save() - self.admin_user = Dojo_User(username='admin_user', is_superuser=True) + self.admin_user = Dojo_User(username="admin_user", is_superuser=True) self.admin_user.save() - self.global_permission_user = Dojo_User(username='global_permission_user') + self.global_permission_user = Dojo_User(username="global_permission_user") self.global_permission_user.save() - Global_Role(user=self.global_permission_user, role=Role.objects.get(name='Reader')).save() + Global_Role(user=self.global_permission_user, role=Role.objects.get(name="Reader")).save() - self.regular_user = Dojo_User(username='regular_user') + self.regular_user = Dojo_User(username="regular_user") self.regular_user.save() - Product_Member(user=self.regular_user, product=self.product_1, role=Role.objects.get(name='Owner')).save() - Product_Type_Member(user=self.regular_user, product_type=self.product_type_2, role=Role.objects.get(name='Writer')).save() + Product_Member(user=self.regular_user, product=self.product_1, role=Role.objects.get(name="Owner")).save() + Product_Type_Member(user=self.regular_user, product_type=self.product_type_2, role=Role.objects.get(name="Writer")).save() - self.product_user = Dojo_User(username='product_user') + self.product_user = Dojo_User(username="product_user") self.product_user.save() - Product_Member(user=self.product_user, product=self.product_1, role=Role.objects.get(name='Reader')).save() + Product_Member(user=self.product_user, product=self.product_1, role=Role.objects.get(name="Reader")).save() - self.product_type_user = Dojo_User(username='product_type_user') + self.product_type_user = Dojo_User(username="product_type_user") self.product_type_user.save() - Product_Member(user=self.product_type_user, product=self.product_2, role=Role.objects.get(name='Maintainer')).save() + Product_Member(user=self.product_type_user, product=self.product_2, role=Role.objects.get(name="Maintainer")).save() - self.invisible_user = Dojo_User(username='invisible_user') + self.invisible_user = Dojo_User(username="invisible_user") self.invisible_user.save() def tearDown(self): @@ -55,31 +55,31 @@ def tearDown(self): self.product_type_user.delete() self.invisible_user.delete() - @patch('dojo.user.queries.get_current_user') + @patch("dojo.user.queries.get_current_user") def test_user_none(self, mock_current_user): mock_current_user.return_value = None self.assertQuerySetEqual(Dojo_User.objects.none(), get_authorized_users(Permissions.Product_View)) - @patch('dojo.user.queries.get_current_user') + @patch("dojo.user.queries.get_current_user") def test_user_admin(self, mock_current_user): mock_current_user.return_value = self.admin_user - users = Dojo_User.objects.all().order_by('first_name', 'last_name', 'username') + users = Dojo_User.objects.all().order_by("first_name", "last_name", "username") self.assertQuerySetEqual(users, get_authorized_users(Permissions.Product_View)) - @patch('dojo.user.queries.get_current_user') + @patch("dojo.user.queries.get_current_user") def test_user_global_permission(self, mock_current_user): mock_current_user.return_value = self.global_permission_user - users = Dojo_User.objects.all().order_by('first_name', 'last_name', 'username') + users = Dojo_User.objects.all().order_by("first_name", "last_name", "username") self.assertQuerySetEqual(users, get_authorized_users(Permissions.Product_View)) - @patch('dojo.user.queries.get_current_user') - @patch('dojo.product.queries.get_current_user') + @patch("dojo.user.queries.get_current_user") + @patch("dojo.product.queries.get_current_user") def test_user_regular(self, mock_current_user_1, mock_current_user_2): mock_current_user_1.return_value = self.regular_user mock_current_user_2.return_value = self.regular_user - users = Dojo_User.objects.exclude(username='invisible_user').order_by('first_name', 'last_name', 'username') + users = Dojo_User.objects.exclude(username="invisible_user").order_by("first_name", "last_name", "username") self.assertQuerySetEqual(users, get_authorized_users(Permissions.Product_View)) diff --git a/unittests/test_user_validators.py b/unittests/test_user_validators.py index 265a529a45..2fd8afbbd7 100644 --- a/unittests/test_user_validators.py +++ b/unittests/test_user_validators.py @@ -51,113 +51,113 @@ def setUp(self): self.user.save() def test_validator_minimum_password_length(self): - with self.subTest(policy='minimum_password_length 1≥0'): + with self.subTest(policy="minimum_password_length 1≥0"): self.set_policy(minimum_password_length=0) - self.assertTrue(self.form_test('x').is_valid()) - with self.subTest(policy='minimum_password_length 1≥1'): + self.assertTrue(self.form_test("x").is_valid()) + with self.subTest(policy="minimum_password_length 1≥1"): self.set_policy(minimum_password_length=1) - self.assertTrue(self.form_test('x').is_valid()) - with self.subTest(policy='minimum_password_length 1≱2'): + self.assertTrue(self.form_test("x").is_valid()) + with self.subTest(policy="minimum_password_length 1≱2"): self.set_policy(minimum_password_length=2) - form = self.form_test('x') + form = self.form_test("x") self.assertFalse(form.is_valid()) self.assertEqual( - form.errors['new_password'][0], - 'Password must be at least 2 characters long.') + form.errors["new_password"][0], + "Password must be at least 2 characters long.") def test_validator_maximum_password_length(self): - with self.subTest(policy='maximum_password_length 1≤2'): + with self.subTest(policy="maximum_password_length 1≤2"): self.set_policy(maximum_password_length=2) - self.assertTrue(self.form_test('x').is_valid()) - with self.subTest(policy='maximum_password_length 1≤1'): + self.assertTrue(self.form_test("x").is_valid()) + with self.subTest(policy="maximum_password_length 1≤1"): self.set_policy(maximum_password_length=1) - self.assertTrue(self.form_test('x').is_valid()) - with self.subTest(policy='maximum_password_length 2≰1'): + self.assertTrue(self.form_test("x").is_valid()) + with self.subTest(policy="maximum_password_length 2≰1"): self.set_policy(maximum_password_length=0) - form = self.form_test('x') + form = self.form_test("x") self.assertFalse(form.is_valid()) self.assertEqual( - form.errors['new_password'][0], - 'Password must be less than 0 characters long.') + form.errors["new_password"][0], + "Password must be less than 0 characters long.") def test_validator_number_character_required(self): - with self.subTest(policy='number_character_required=False'): + with self.subTest(policy="number_character_required=False"): self.set_policy(number_character_required=False) - self.assertTrue(self.form_test('x').is_valid()) - with self.subTest(policy='number_character_required=True'): + self.assertTrue(self.form_test("x").is_valid()) + with self.subTest(policy="number_character_required=True"): self.set_policy(number_character_required=True) - form = self.form_test('x') + form = self.form_test("x") self.assertFalse(form.is_valid()) self.assertEqual( - form.errors['new_password'][0], - 'Password must contain at least 1 digit, 0-9.') + form.errors["new_password"][0], + "Password must contain at least 1 digit, 0-9.") def test_validator_special_character_required(self): - with self.subTest(policy='special_character_required=False'): + with self.subTest(policy="special_character_required=False"): self.set_policy(special_character_required=False) - self.assertTrue(self.form_test('x').is_valid()) - with self.subTest(policy='special_character_required=True'): + self.assertTrue(self.form_test("x").is_valid()) + with self.subTest(policy="special_character_required=True"): self.set_policy(special_character_required=True) - form = self.form_test('x') + form = self.form_test("x") self.assertFalse(form.is_valid()) self.assertEqual( - form.errors['new_password'][0], + form.errors["new_password"][0], """The password must contain at least 1 special character, ()[]{}|`~!@#$%^&*_-+=;:'",<>./?.""") def test_validator_lowercase_character_required(self): - with self.subTest(policy='lowercase_character_required=False'): + with self.subTest(policy="lowercase_character_required=False"): self.set_policy(lowercase_character_required=False) - self.assertTrue(self.form_test('X').is_valid()) - with self.subTest(policy='lowercase_character_required=True'): + self.assertTrue(self.form_test("X").is_valid()) + with self.subTest(policy="lowercase_character_required=True"): self.set_policy(lowercase_character_required=True) - form = self.form_test('X') + form = self.form_test("X") self.assertFalse(form.is_valid()) self.assertEqual( - form.errors['new_password'][0], - 'Password must contain at least 1 lowercase letter, a-z.') + form.errors["new_password"][0], + "Password must contain at least 1 lowercase letter, a-z.") def test_validator_uppercase_character_required(self): - with self.subTest(policy='uppercase_character_required=False'): + with self.subTest(policy="uppercase_character_required=False"): self.set_policy(uppercase_character_required=False) - self.assertTrue(self.form_test('x').is_valid()) - with self.subTest(policy='uppercase_character_required=True'): + self.assertTrue(self.form_test("x").is_valid()) + with self.subTest(policy="uppercase_character_required=True"): self.set_policy(uppercase_character_required=True) - form = self.form_test('x') + form = self.form_test("x") self.assertFalse(form.is_valid()) self.assertEqual( - form.errors['new_password'][0], - 'Password must contain at least 1 uppercase letter, A-Z.') + form.errors["new_password"][0], + "Password must contain at least 1 uppercase letter, A-Z.") def test_validator_non_common_password_required(self): - with self.subTest(policy='non_common_password_required=False'): + with self.subTest(policy="non_common_password_required=False"): self.set_policy(non_common_password_required=False) - self.assertTrue(self.form_test('x').is_valid()) - with self.subTest(policy='non_common_password_required=True'): + self.assertTrue(self.form_test("x").is_valid()) + with self.subTest(policy="non_common_password_required=True"): self.set_policy(non_common_password_required=True) - form = self.form_test('x') + form = self.form_test("x") self.assertFalse(form.is_valid()) self.assertEqual( - form.errors['new_password'][0], - 'This password is too common.') + form.errors["new_password"][0], + "This password is too common.") def test_form_invalid_current_pass(self): self.set_policy() - form = self.form_test('x', current_password='not current password') + form = self.form_test("x", current_password="not current password") self.assertFalse(form.is_valid()) self.assertEqual( - form.errors['__all__'][0], - 'Current password is incorrect.') + form.errors["__all__"][0], + "Current password is incorrect.") def test_form_same_pass_as_before(self): self.set_policy() form = self.form_test(self.current_password) self.assertFalse(form.is_valid()) self.assertEqual( - form.errors['__all__'][0], - 'New password must be different from current password.') + form.errors["__all__"][0], + "New password must be different from current password.") def test_form_diff_confirm_password(self): self.set_policy() - form = self.form_test(password='x', confirm_password='y') + form = self.form_test(password="x", confirm_password="y") self.assertFalse(form.is_valid()) - self.assertEqual(form.errors['__all__'][0], 'Passwords do not match.') + self.assertEqual(form.errors["__all__"][0], "Passwords do not match.") diff --git a/unittests/test_utils.py b/unittests/test_utils.py index 3bf031ba10..c0ce833149 100644 --- a/unittests/test_utils.py +++ b/unittests/test_utils.py @@ -55,9 +55,9 @@ def test_encryption(self): test_output = prepare_for_view(encrypt) self.assertEqual(test_input, test_output) - @patch('dojo.models.System_Settings.objects') - @patch('dojo.utils.Dojo_Group_Member') - @patch('dojo.utils.Notifications') + @patch("dojo.models.System_Settings.objects") + @patch("dojo.utils.Dojo_Group_Member") + @patch("dojo.utils.Notifications") def test_user_post_save_without_template(self, mock_notifications, mock_member, mock_settings): user = Dojo_User() user.id = 1 @@ -87,9 +87,9 @@ def test_user_post_save_without_template(self, mock_notifications, mock_member, mock_notifications.assert_called_with(user=user) save_mock_notifications.save.assert_called_once() - @patch('dojo.models.System_Settings.objects') - @patch('dojo.utils.Dojo_Group_Member') - @patch('dojo.utils.Notifications') + @patch("dojo.models.System_Settings.objects") + @patch("dojo.utils.Dojo_Group_Member") + @patch("dojo.utils.Notifications") def test_user_post_save_with_template(self, mock_notifications, mock_member, mock_settings): user = Dojo_User() user.id = 1 @@ -119,13 +119,13 @@ def test_user_post_save_with_template(self, mock_notifications, mock_member, moc mock_notifications.objects.get.assert_called_with(template=True) template.save.assert_called_once() - @patch('dojo.models.System_Settings.objects') - @patch('dojo.utils.Dojo_Group_Member') - @patch('dojo.utils.Notifications') + @patch("dojo.models.System_Settings.objects") + @patch("dojo.utils.Dojo_Group_Member") + @patch("dojo.utils.Notifications") def test_user_post_save_email_pattern_matches(self, mock_notifications, mock_member, mock_settings): user = Dojo_User() user.id = 1 - user.email = 'john.doe@example.com' + user.email = "john.doe@example.com" group = Dojo_Group() group.id = 1 @@ -135,7 +135,7 @@ def test_user_post_save_email_pattern_matches(self, mock_notifications, mock_mem system_settings_group = System_Settings() system_settings_group.default_group = group system_settings_group.default_group_role = role - system_settings_group.default_group_email_pattern = '.*@example.com' + system_settings_group.default_group_email_pattern = ".*@example.com" mock_settings.get.return_value = system_settings_group save_mock_member = Mock(return_value=Dojo_Group_Member()) @@ -149,13 +149,13 @@ def test_user_post_save_email_pattern_matches(self, mock_notifications, mock_mem mock_member.assert_called_with(group=group, user=user, role=role) save_mock_member.save.assert_called_once() - @patch('dojo.models.System_Settings.objects') - @patch('dojo.utils.Dojo_Group_Member') - @patch('dojo.utils.Notifications') + @patch("dojo.models.System_Settings.objects") + @patch("dojo.utils.Dojo_Group_Member") + @patch("dojo.utils.Notifications") def test_user_post_save_email_pattern_does_not_match(self, mock_notifications, mock_member, mock_settings): user = Dojo_User() user.id = 1 - user.email = 'john.doe@partner.example.com' + user.email = "john.doe@partner.example.com" group = Dojo_Group() group.id = 1 @@ -165,7 +165,7 @@ def test_user_post_save_email_pattern_does_not_match(self, mock_notifications, m system_settings_group = System_Settings() system_settings_group.default_group = group system_settings_group.default_group_role = role - system_settings_group.default_group_email_pattern = '.*@example.com' + system_settings_group.default_group_email_pattern = ".*@example.com" save_mock_notifications = Mock(return_value=Notifications()) mock_notifications.return_value = save_mock_notifications mock_notifications.objects.get.side_effect = Exception("Mock no templates") @@ -198,7 +198,7 @@ def __exit__(self, exc_type, exc_value, exc_traceback): self.test_case.assertEqual( created_count, self.num, "%i %s objects created, %i expected. query: %s, first 100 objects: %s" % ( - created_count, self.queryset.model, self.num, self.queryset.query, self.queryset.all().order_by('-id')[:100], + created_count, self.queryset.model, self.num, self.queryset.query, self.queryset.all().order_by("-id")[:100], ), ) @@ -246,8 +246,8 @@ def assertImportModelsCreated(test_case, tests=0, engagements=0, products=0, pro class TestSettings(DojoTestCase): def test_settings_integrity(self): - with Path('dojo/settings/settings.dist.py').open('rb') as file: + with Path("dojo/settings/settings.dist.py").open("rb") as file: real_hash = hashlib.sha256(file.read()).hexdigest() - with Path('dojo/settings/.settings.dist.py.sha256sum').open('rb') as file: + with Path("dojo/settings/.settings.dist.py.sha256sum").open("rb") as file: expected_hash = file.read().decode().strip() self.assertEqual(expected_hash, real_hash, "File settings.dist.py was changed but checksum has not been updated. If this is part of a PR, update the sha256sum value in '.settings.dist.py.sha256sum'. If you are modifying this to configure your instance, revert your changes and use environment variables or 'local_settings.py'") diff --git a/unittests/test_utils_deduplication_reopen.py b/unittests/test_utils_deduplication_reopen.py index 47f7a8c3f4..1876deefe3 100644 --- a/unittests/test_utils_deduplication_reopen.py +++ b/unittests/test_utils_deduplication_reopen.py @@ -11,7 +11,7 @@ class TestDuplicationReopen(DojoTestCase): - fixtures = ['dojo_testdata.json'] + fixtures = ["dojo_testdata.json"] def setUp(self): self.finding_a = Finding.objects.get(id=2) @@ -36,13 +36,13 @@ def setUp(self): self.finding_c.active = False self.finding_c.duplicate_finding = None self.finding_c.pk = None - logger.debug('creating finding_c') + logger.debug("creating finding_c") self.finding_c.save() self.finding_d = Finding.objects.get(id=5) self.finding_d.duplicate = False self.finding_d.duplicate_finding = None self.finding_d.pk = None - logger.debug('creating finding_d') + logger.debug("creating finding_d") self.finding_d.save() def tearDown(self): @@ -85,27 +85,27 @@ def test_false_positive_reopen(self): self.assertFalse(self.finding_b.verified) def test_out_of_scope_reopen(self): - logger.debug('c: is_mitigated1: %s', self.finding_c.is_mitigated) - logger.debug('d: is_mitigated1: %s', self.finding_d.is_mitigated) + logger.debug("c: is_mitigated1: %s", self.finding_c.is_mitigated) + logger.debug("d: is_mitigated1: %s", self.finding_d.is_mitigated) self.finding_c.active = False self.finding_c.verified = False - logger.debug('set_duplicate(d,c)') + logger.debug("set_duplicate(d,c)") set_duplicate(self.finding_d, self.finding_c) - logger.debug('c: is_mitigated2: %s', self.finding_c.is_mitigated) - logger.debug('d: is_mitigated2: %s', self.finding_d.is_mitigated) + logger.debug("c: is_mitigated2: %s", self.finding_c.is_mitigated) + logger.debug("d: is_mitigated2: %s", self.finding_d.is_mitigated) # self.finding_d.duplicate = True # self.finding_d.duplicate_finding = self.finding_c - logger.debug('saving finding_c') + logger.debug("saving finding_c") super(Finding, self.finding_c).save() - logger.debug('saving finding_d') + logger.debug("saving finding_d") super(Finding, self.finding_d).save() - logger.debug('c: is_mitigated3: %s', self.finding_c.is_mitigated) - logger.debug('d: is_mitigated3: %s', self.finding_d.is_mitigated) + logger.debug("c: is_mitigated3: %s", self.finding_c.is_mitigated) + logger.debug("d: is_mitigated3: %s", self.finding_d.is_mitigated) candidates = Finding.objects.filter(duplicate_finding__isnull=False, original_finding__isnull=False).count() self.assertEqual(candidates, 0) diff --git a/unittests/tools/test_acunetix_parser.py b/unittests/tools/test_acunetix_parser.py index eabe5e2322..21f83af527 100644 --- a/unittests/tools/test_acunetix_parser.py +++ b/unittests/tools/test_acunetix_parser.py @@ -31,10 +31,10 @@ def test_parse_file_with_one_finding(self): # check endpoints self.assertEqual(1, len(finding.unsaved_endpoints)) endpoint = finding.unsaved_endpoints[0] - self.assertEqual('https', endpoint.protocol) + self.assertEqual("https", endpoint.protocol) self.assertEqual(443, endpoint.port) - self.assertEqual('vijaytest.com', endpoint.host) - self.assertEqual('some/path', endpoint.path) + self.assertEqual("vijaytest.com", endpoint.host) + self.assertEqual("some/path", endpoint.path) def test_parse_file_with_multiple_finding(self): with open("unittests/scans/acunetix/many_findings.xml") as testfile: @@ -60,17 +60,17 @@ def test_parse_file_with_multiple_finding(self): endpoint = finding.unsaved_endpoints[0] self.assertIsNone(endpoint.protocol) self.assertIsNone(endpoint.port) - self.assertEqual('www.itsecgames.com', endpoint.host) + self.assertEqual("www.itsecgames.com", endpoint.host) self.assertIsNone(endpoint.path) # check req/resp self.assertEqual(1, len(finding.unsaved_req_resp)) req_resp = finding.unsaved_req_resp[0] - self.assertIn('req', req_resp) - self.assertIsNotNone(req_resp['req']) - self.assertIsInstance(req_resp['req'], str) - self.assertIn('resp', req_resp) - self.assertIsNotNone(req_resp['resp']) - self.assertIsInstance(req_resp['resp'], str) + self.assertIn("req", req_resp) + self.assertIsNotNone(req_resp["req"]) + self.assertIsInstance(req_resp["req"], str) + self.assertIn("resp", req_resp) + self.assertIsNotNone(req_resp["resp"]) + self.assertIsInstance(req_resp["resp"], str) with self.subTest(i=1): finding = findings[1] @@ -90,17 +90,17 @@ def test_parse_file_with_multiple_finding(self): endpoint = finding.unsaved_endpoints[0] self.assertIsNone(endpoint.protocol) self.assertIsNone(endpoint.port) - self.assertEqual('www.itsecgames.com', endpoint.host) + self.assertEqual("www.itsecgames.com", endpoint.host) self.assertIsNone(endpoint.path) # check req/resp self.assertEqual(1, len(finding.unsaved_req_resp)) req_resp = finding.unsaved_req_resp[0] - self.assertIn('req', req_resp) - self.assertIsNotNone(req_resp['req']) - self.assertIsInstance(req_resp['req'], str) - self.assertIn('resp', req_resp) - self.assertIsNotNone(req_resp['resp']) - self.assertIsInstance(req_resp['resp'], str) + self.assertIn("req", req_resp) + self.assertIsNotNone(req_resp["req"]) + self.assertIsInstance(req_resp["req"], str) + self.assertIn("resp", req_resp) + self.assertIsNotNone(req_resp["resp"]) + self.assertIsInstance(req_resp["resp"], str) with self.subTest(i=2): finding = findings[2] @@ -119,17 +119,17 @@ def test_parse_file_with_multiple_finding(self): endpoint = finding.unsaved_endpoints[0] self.assertIsNone(endpoint.protocol) self.assertIsNone(endpoint.port) - self.assertEqual('www.itsecgames.com', endpoint.host) + self.assertEqual("www.itsecgames.com", endpoint.host) self.assertIsNone(endpoint.path) # check req/resp self.assertEqual(1, len(finding.unsaved_req_resp)) req_resp = finding.unsaved_req_resp[0] - self.assertIn('req', req_resp) - self.assertIsNotNone(req_resp['req']) - self.assertIsInstance(req_resp['req'], str) - self.assertIn('resp', req_resp) - self.assertIsNotNone(req_resp['resp']) - self.assertIsInstance(req_resp['resp'], str) + self.assertIn("req", req_resp) + self.assertIsNotNone(req_resp["req"]) + self.assertIsInstance(req_resp["req"], str) + self.assertIn("resp", req_resp) + self.assertIsNotNone(req_resp["resp"]) + self.assertIsInstance(req_resp["resp"], str) def test_parse_file_with_example_com(self): with open("unittests/scans/acunetix/XML_http_example_co_id_.xml") as testfile: @@ -158,22 +158,22 @@ def test_parse_file_with_example_com(self): endpoint = finding.unsaved_endpoints[0] self.assertIsNone(endpoint.protocol) self.assertIsNone(endpoint.port) - self.assertEqual('example.co.id', endpoint.host) - self.assertEqual('h/search', endpoint.path) + self.assertEqual("example.co.id", endpoint.host) + self.assertEqual("h/search", endpoint.path) endpoint = finding.unsaved_endpoints[1] self.assertIsNone(endpoint.protocol) self.assertIsNone(endpoint.port) - self.assertEqual('example.co.id', endpoint.host) - self.assertEqual('m/zmain', endpoint.path) + self.assertEqual("example.co.id", endpoint.host) + self.assertEqual("m/zmain", endpoint.path) # check req/resp self.assertEqual(3, len(finding.unsaved_req_resp)) for req_resp in finding.unsaved_req_resp: - self.assertIn('req', req_resp) - self.assertIsNotNone(req_resp['req']) - self.assertIsInstance(req_resp['req'], str) - self.assertIn('resp', req_resp) - self.assertIsNotNone(req_resp['resp']) - self.assertIsInstance(req_resp['resp'], str) + self.assertIn("req", req_resp) + self.assertIsNotNone(req_resp["req"]) + self.assertIsInstance(req_resp["req"], str) + self.assertIn("resp", req_resp) + self.assertIsNotNone(req_resp["resp"]) + self.assertIsInstance(req_resp["resp"], str) with self.subTest(i=6): finding = findings[6] @@ -191,17 +191,17 @@ def test_parse_file_with_example_com(self): endpoint = finding.unsaved_endpoints[0] self.assertIsNone(endpoint.protocol) self.assertIsNone(endpoint.port) - self.assertEqual('example.co.id', endpoint.host) + self.assertEqual("example.co.id", endpoint.host) self.assertIsNone(endpoint.path) # check req/resp self.assertEqual(1, len(finding.unsaved_req_resp)) req_resp = finding.unsaved_req_resp[0] - self.assertIn('req', req_resp) - self.assertIsNotNone(req_resp['req']) - self.assertIsInstance(req_resp['req'], str) - self.assertIn('resp', req_resp) - self.assertIsNotNone(req_resp['resp']) - self.assertIsInstance(req_resp['resp'], str) + self.assertIn("req", req_resp) + self.assertIsNotNone(req_resp["req"]) + self.assertIsInstance(req_resp["req"], str) + self.assertIn("resp", req_resp) + self.assertIsNotNone(req_resp["resp"]) + self.assertIsInstance(req_resp["resp"], str) def test_parse_file_with_one_finding_acunetix360(self): with open("unittests/scans/acunetix/acunetix360_one_finding.json") as testfile: diff --git a/unittests/tools/test_anchore_engine_parser.py b/unittests/tools/test_anchore_engine_parser.py index ee4f2ae509..8a94ee27e3 100644 --- a/unittests/tools/test_anchore_engine_parser.py +++ b/unittests/tools/test_anchore_engine_parser.py @@ -29,8 +29,8 @@ def test_anchore_engine_parser_has_many_findings_2_4_1(self): self.assertEqual(51, len(findings)) finding = findings[50] self.assertEqual("CVE-2020-13776", finding.vuln_id_from_tool) - self.assertEqual('systemd-pam', finding.component_name) - self.assertEqual('239-41.el8_3.1', finding.component_version) + self.assertEqual("systemd-pam", finding.component_name) + self.assertEqual("239-41.el8_3.1", finding.component_version) self.assertEqual(6.7, finding.cvssv3_score) self.assertEqual(1, len(finding.unsaved_vulnerability_ids)) self.assertEqual("CVE-2020-13776", finding.unsaved_vulnerability_ids[0]) diff --git a/unittests/tools/test_anchore_grype_parser.py b/unittests/tools/test_anchore_grype_parser.py index 4112837dca..c243654fa8 100644 --- a/unittests/tools/test_anchore_grype_parser.py +++ b/unittests/tools/test_anchore_grype_parser.py @@ -24,7 +24,7 @@ def test_parser_has_many_findings(self): if finding.vuln_id_from_tool == "CVE-2011-3389": vulnerability_ids = finding.unsaved_vulnerability_ids self.assertEqual(1, len(vulnerability_ids)) - self.assertEqual('CVE-2011-3389', vulnerability_ids[0]) + self.assertEqual("CVE-2011-3389", vulnerability_ids[0]) self.assertEqual("Medium", finding.severity) self.assertEqual("libgnutls-openssl27", finding.component_name) self.assertEqual("3.6.7-4+deb10u5", finding.component_version) @@ -46,7 +46,7 @@ def test_grype_parser_with_one_criticle_vuln_has_one_findings(self): if finding.vuln_id_from_tool == "CVE-2019-9192": vulnerability_ids = finding.unsaved_vulnerability_ids self.assertEqual(1, len(vulnerability_ids)) - self.assertEqual('CVE-2019-9192', vulnerability_ids[0]) + self.assertEqual("CVE-2019-9192", vulnerability_ids[0]) self.assertEqual("libc6-dev", finding.component_name) self.assertEqual("2.28-10", finding.component_version) self.assertEqual("Info", finding.severity) @@ -67,7 +67,7 @@ def test_grype_parser_with_many_vulns3(self): if finding.vuln_id_from_tool == "CVE-2011-3389": vulnerability_ids = finding.unsaved_vulnerability_ids self.assertEqual(1, len(vulnerability_ids)) - self.assertEqual('CVE-2011-3389', vulnerability_ids[0]) + self.assertEqual("CVE-2011-3389", vulnerability_ids[0]) self.assertEqual("Medium", finding.severity) self.assertEqual("libgnutls30", finding.component_name) self.assertEqual("3.6.7-4+deb10u5", finding.component_version) @@ -88,7 +88,7 @@ def test_grype_parser_with_new_matcher_list(self): if finding.vuln_id_from_tool == "CVE-1999-1338": vulnerability_ids = finding.unsaved_vulnerability_ids self.assertEqual(1, len(vulnerability_ids)) - self.assertEqual('CVE-1999-1338', vulnerability_ids[0]) + self.assertEqual("CVE-1999-1338", vulnerability_ids[0]) self.assertEqual("Medium", finding.severity) self.assertIn("javascript-matcher", finding.description) self.assertEqual("delegate", finding.component_name) @@ -103,7 +103,7 @@ def test_check_all_fields(self): self.assertEqual(5, len(findings)) finding = findings[0] - self.assertEqual('CVE-2004-0971 in libgssapi-krb5-2:1.17-3+deb10u3', finding.title) + self.assertEqual("CVE-2004-0971 in libgssapi-krb5-2:1.17-3+deb10u3", finding.title) description = """**Vulnerability Namespace:** debian:10 **Related Vulnerability Description:** The krb5-send-pr script in the kerberos5 (krb5) package in Trustix Secure Linux 1.5 through 2.1, and possibly other operating systems, allows local users to overwrite files via a symlink attack on temporary files. **Matcher:** dpkg-matcher @@ -111,12 +111,12 @@ def test_check_all_fields(self): self.assertEqual(description, finding.description) vulnerability_ids = finding.unsaved_vulnerability_ids self.assertEqual(2, len(vulnerability_ids)) - self.assertEqual('CVE-2004-0971', vulnerability_ids[0]) - self.assertEqual('CVE-2004-0971', vulnerability_ids[1]) + self.assertEqual("CVE-2004-0971", vulnerability_ids[0]) + self.assertEqual("CVE-2004-0971", vulnerability_ids[1]) self.assertEqual(1352, finding.cwe) self.assertIsNone(finding.cvssv3) self.assertIsNone(finding.cvssv3_score) - self.assertEqual('Info', finding.severity) + self.assertEqual("Info", finding.severity) self.assertIsNone(finding.mitigation) references = """**Vulnerability Datasource:** https://security-tracker.debian.org/tracker/CVE-2004-0971 **Related Vulnerability Datasource:** https://nvd.nist.gov/vuln/detail/CVE-2004-0971 @@ -130,14 +130,14 @@ def test_check_all_fields(self): - https://oval.cisecurity.org/repository/search/definition/oval%3Aorg.mitre.oval%3Adef%3A10497 - https://lists.apache.org/thread.html/rc713534b10f9daeee2e0990239fa407e2118e4aa9e88a7041177497c@%3Cissues.guacamole.apache.org%3E""" self.assertEqual(references, finding.references) - self.assertEqual('libgssapi-krb5-2', finding.component_name) - self.assertEqual('1.17-3+deb10u3', finding.component_version) - self.assertEqual('CVE-2004-0971', finding.vuln_id_from_tool) - self.assertEqual(['dpkg'], finding.tags) + self.assertEqual("libgssapi-krb5-2", finding.component_name) + self.assertEqual("1.17-3+deb10u3", finding.component_version) + self.assertEqual("CVE-2004-0971", finding.vuln_id_from_tool) + self.assertEqual(["dpkg"], finding.tags) self.assertEqual(1, finding.nb_occurences) finding = findings[1] - self.assertEqual('CVE-2021-32626 in redis:4.0.2', finding.title) + self.assertEqual("CVE-2021-32626 in redis:4.0.2", finding.title) description = """**Vulnerability Namespace:** nvd **Vulnerability Description:** Redis is an open source, in-memory database that persists on disk. In affected versions specially crafted Lua scripts executing in Redis can cause the heap-based Lua stack to be overflowed, due to incomplete checks for this condition. This can result with heap corruption and potentially remote code execution. This problem exists in all versions of Redis with Lua scripting support, starting from 2.6. The problem is fixed in versions 6.2.6, 6.0.16 and 5.0.14. For users unable to update an additional workaround to mitigate the problem without patching the redis-server executable is to prevent users from executing Lua scripts. This can be done using ACL to restrict EVAL and EVALSHA commands. **Matchers:** @@ -147,10 +147,10 @@ def test_check_all_fields(self): self.assertEqual(description, finding.description) vulnerability_ids = finding.unsaved_vulnerability_ids self.assertEqual(1, len(vulnerability_ids)) - self.assertEqual('CVE-2021-32626', vulnerability_ids[0]) + self.assertEqual("CVE-2021-32626", vulnerability_ids[0]) self.assertEqual(1352, finding.cwe) - self.assertEqual('CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H', finding.cvssv3) - self.assertEqual('High', finding.severity) + self.assertEqual("CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H", finding.cvssv3) + self.assertEqual("High", finding.severity) mitigation = """Upgrade to version: - fix_1 - fix_2""" @@ -166,14 +166,14 @@ def test_check_all_fields(self): - https://lists.fedoraproject.org/archives/list/package-announce@lists.fedoraproject.org/message/WR5WKJWXD4D6S3DJCZ56V74ESLTDQRAB/ - https://www.debian.org/security/2021/dsa-5001""" self.assertEqual(references, finding.references) - self.assertEqual('redis', finding.component_name) - self.assertEqual('4.0.2', finding.component_version) - self.assertEqual('CVE-2021-32626', finding.vuln_id_from_tool) - self.assertEqual(['python', 'python2'], finding.tags) + self.assertEqual("redis", finding.component_name) + self.assertEqual("4.0.2", finding.component_version) + self.assertEqual("CVE-2021-32626", finding.vuln_id_from_tool) + self.assertEqual(["python", "python2"], finding.tags) self.assertEqual(1, finding.nb_occurences) finding = findings[2] - self.assertEqual('CVE-2021-33574 in libc-bin:2.28-10', finding.title) + self.assertEqual("CVE-2021-33574 in libc-bin:2.28-10", finding.title) description = """**Vulnerability Namespace:** debian:10 **Related Vulnerability Description:** The mq_notify function in the GNU C Library (aka glibc) versions 2.32 and 2.33 has a use-after-free. It may use the notification thread attributes object (passed through its struct sigevent parameter) after it has been freed by the caller, leading to a denial of service (application crash) or possibly unspecified other impact. **Matcher:** dpkg-matcher @@ -181,11 +181,11 @@ def test_check_all_fields(self): self.assertEqual(description, finding.description) vulnerability_ids = finding.unsaved_vulnerability_ids self.assertEqual(2, len(vulnerability_ids)) - self.assertEqual('CVE-2021-33574', vulnerability_ids[0]) - self.assertEqual('CVE-2021-33574', vulnerability_ids[1]) + self.assertEqual("CVE-2021-33574", vulnerability_ids[0]) + self.assertEqual("CVE-2021-33574", vulnerability_ids[1]) self.assertEqual(1352, finding.cwe) - self.assertEqual('CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H', finding.cvssv3) - self.assertEqual('Critical', finding.severity) + self.assertEqual("CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H", finding.cvssv3) + self.assertEqual("Critical", finding.severity) self.assertIsNone(finding.mitigation) references = """**Vulnerability Datasource:** https://security-tracker.debian.org/tracker/CVE-2021-33574 **Related Vulnerability Datasource:** https://nvd.nist.gov/vuln/detail/CVE-2021-33574 @@ -197,14 +197,14 @@ def test_check_all_fields(self): - https://security.gentoo.org/glsa/202107-07 - https://lists.fedoraproject.org/archives/list/package-announce@lists.fedoraproject.org/message/KJYYIMDDYOHTP2PORLABTOHYQYYREZDD/""" self.assertEqual(references, finding.references) - self.assertEqual('libc-bin', finding.component_name) - self.assertEqual('2.28-10', finding.component_version) - self.assertEqual('CVE-2021-33574', finding.vuln_id_from_tool) - self.assertEqual(['dpkg'], finding.tags) + self.assertEqual("libc-bin", finding.component_name) + self.assertEqual("2.28-10", finding.component_version) + self.assertEqual("CVE-2021-33574", finding.vuln_id_from_tool) + self.assertEqual(["dpkg"], finding.tags) self.assertEqual(1, finding.nb_occurences) finding = findings[3] - self.assertEqual('CVE-2021-33574 in libc6:2.28-10', finding.title) + self.assertEqual("CVE-2021-33574 in libc6:2.28-10", finding.title) description = """**Vulnerability Namespace:** debian:10 **Related Vulnerability Description:** The mq_notify function in the GNU C Library (aka glibc) versions 2.32 and 2.33 has a use-after-free. It may use the notification thread attributes object (passed through its struct sigevent parameter) after it has been freed by the caller, leading to a denial of service (application crash) or possibly unspecified other impact. **Matcher:** dpkg-matcher @@ -212,11 +212,11 @@ def test_check_all_fields(self): self.assertEqual(description, finding.description) vulnerability_ids = finding.unsaved_vulnerability_ids self.assertEqual(2, len(vulnerability_ids)) - self.assertEqual('CVE-2021-33574', vulnerability_ids[0]) - self.assertEqual('CVE-2021-33574', vulnerability_ids[1]) + self.assertEqual("CVE-2021-33574", vulnerability_ids[0]) + self.assertEqual("CVE-2021-33574", vulnerability_ids[1]) self.assertEqual(1352, finding.cwe) - self.assertEqual('CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H', finding.cvssv3) - self.assertEqual('Critical', finding.severity) + self.assertEqual("CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H", finding.cvssv3) + self.assertEqual("Critical", finding.severity) self.assertIsNone(finding.mitigation) references = """**Vulnerability Datasource:** https://security-tracker.debian.org/tracker/CVE-2021-33574 **Related Vulnerability Datasource:** https://nvd.nist.gov/vuln/detail/CVE-2021-33574 @@ -228,14 +228,14 @@ def test_check_all_fields(self): - https://security.gentoo.org/glsa/202107-07 - https://lists.fedoraproject.org/archives/list/package-announce@lists.fedoraproject.org/message/KJYYIMDDYOHTP2PORLABTOHYQYYREZDD/""" self.assertEqual(references, finding.references) - self.assertEqual('libc6', finding.component_name) - self.assertEqual('2.28-10', finding.component_version) - self.assertEqual('CVE-2021-33574', finding.vuln_id_from_tool) - self.assertEqual(['dpkg'], finding.tags) + self.assertEqual("libc6", finding.component_name) + self.assertEqual("2.28-10", finding.component_version) + self.assertEqual("CVE-2021-33574", finding.vuln_id_from_tool) + self.assertEqual(["dpkg"], finding.tags) self.assertEqual(1, finding.nb_occurences) finding = findings[4] - self.assertEqual('GHSA-v6rh-hp5x-86rv in Django:3.2.9', finding.title) + self.assertEqual("GHSA-v6rh-hp5x-86rv in Django:3.2.9", finding.title) description = """**Vulnerability Namespace:** github:python **Vulnerability Description:** Potential bypass of an upstream access control based on URL paths in Django **Related Vulnerability Description:** In Django 2.2 before 2.2.25, 3.1 before 3.1.14, and 3.2 before 3.2.10, HTTP requests for URLs with trailing newlines could bypass upstream access control based on URL paths. @@ -244,12 +244,12 @@ def test_check_all_fields(self): self.assertEqual(description, finding.description) vulnerability_ids = finding.unsaved_vulnerability_ids self.assertEqual(2, len(vulnerability_ids)) - self.assertEqual('GHSA-v6rh-hp5x-86rv', vulnerability_ids[0]) - self.assertEqual('CVE-2021-44420', vulnerability_ids[1]) + self.assertEqual("GHSA-v6rh-hp5x-86rv", vulnerability_ids[0]) + self.assertEqual("CVE-2021-44420", vulnerability_ids[1]) self.assertEqual(1352, finding.cwe) - self.assertEqual('CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:L/A:L', finding.cvssv3) - self.assertEqual('High', finding.severity) - mitigation = 'Upgrade to version: 3.2.10' + self.assertEqual("CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:L/A:L", finding.cvssv3) + self.assertEqual("High", finding.severity) + mitigation = "Upgrade to version: 3.2.10" self.assertEqual(mitigation, finding.mitigation) references = """**Vulnerability Datasource:** https://github.com/advisories/GHSA-v6rh-hp5x-86rv **Related Vulnerability Datasource:** https://nvd.nist.gov/vuln/detail/CVE-2021-44420 @@ -259,10 +259,10 @@ def test_check_all_fields(self): - https://www.djangoproject.com/weblog/2021/dec/07/security-releases/ - https://groups.google.com/forum/#!forum/django-announce""" self.assertEqual(references, finding.references) - self.assertEqual('Django', finding.component_name) - self.assertEqual('3.2.9', finding.component_version) - self.assertEqual('GHSA-v6rh-hp5x-86rv', finding.vuln_id_from_tool) - self.assertEqual(['python'], finding.tags) + self.assertEqual("Django", finding.component_name) + self.assertEqual("3.2.9", finding.component_version) + self.assertEqual("GHSA-v6rh-hp5x-86rv", finding.vuln_id_from_tool) + self.assertEqual(["python"], finding.tags) self.assertEqual(2, finding.nb_occurences) def test_grype_issue_9618(self): diff --git a/unittests/tools/test_anchorectl_policies_parser.py b/unittests/tools/test_anchorectl_policies_parser.py index 0bb48d0521..14aa9187c8 100644 --- a/unittests/tools/test_anchorectl_policies_parser.py +++ b/unittests/tools/test_anchorectl_policies_parser.py @@ -16,9 +16,9 @@ def test_anchore_engine_parser_has_one_finding_and_it_is_correctly_parsed(self): findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) singleFinding = findings[0] - self.assertEqual(singleFinding.severity, 'Medium') - self.assertEqual(singleFinding.title, 'RootUser - gate|dockerfile - trigger|b2605c2ddbdb02b8e2365c9248dada5a') - self.assertEqual(singleFinding.description, 'User root found as effective user, which is not on the allowed list') + self.assertEqual(singleFinding.severity, "Medium") + self.assertEqual(singleFinding.title, "RootUser - gate|dockerfile - trigger|b2605c2ddbdb02b8e2365c9248dada5a") + self.assertEqual(singleFinding.description, "User root found as effective user, which is not on the allowed list") def test_anchore_engine_parser_has_many_findings(self): with open("unittests/scans/anchorectl_policies/many_violations.json") as testfile: diff --git a/unittests/tools/test_anchorectl_vulns_parser.py b/unittests/tools/test_anchorectl_vulns_parser.py index 384de7aba8..d3d7276cd6 100644 --- a/unittests/tools/test_anchorectl_vulns_parser.py +++ b/unittests/tools/test_anchorectl_vulns_parser.py @@ -16,9 +16,9 @@ def test_anchore_engine_parser_has_one_finding_and_it_is_correctly_parsed(self): findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) singleFinding = findings[0] - self.assertEqual(singleFinding.title, 'CVE-2011-3389 - libgnutls30-3.5.8-5+deb9u4(dpkg)') - self.assertEqual(singleFinding.severity, 'Medium') - self.assertEqual(singleFinding.description, '**Image hash**: None\n\n**Package**: libgnutls30-3.5.8-5+deb9u4\n\n**Package path**: None\n\n**Package type**: dpkg\n\n**Feed**: vulnerabilities/debian:9\n\n**CPE**: None\n\n**Description**: That test description\n\n') + self.assertEqual(singleFinding.title, "CVE-2011-3389 - libgnutls30-3.5.8-5+deb9u4(dpkg)") + self.assertEqual(singleFinding.severity, "Medium") + self.assertEqual(singleFinding.description, "**Image hash**: None\n\n**Package**: libgnutls30-3.5.8-5+deb9u4\n\n**Package path**: None\n\n**Package type**: dpkg\n\n**Feed**: vulnerabilities/debian:9\n\n**CPE**: None\n\n**Description**: That test description\n\n") def test_anchore_engine_parser_has_many_findings(self): with open("unittests/scans/anchorectl_vulns/many_vulns.json") as testfile: diff --git a/unittests/tools/test_api_bugcrowd_parser.py b/unittests/tools/test_api_bugcrowd_parser.py index 4433ea61ee..dd60565b6c 100644 --- a/unittests/tools/test_api_bugcrowd_parser.py +++ b/unittests/tools/test_api_bugcrowd_parser.py @@ -151,7 +151,7 @@ def test_parse_file_with_not_reproducible_finding(self): def test_parse_file_with_broken_bug_url(self): with open("unittests/scans/api_bugcrowd/bugcrowd_broken_bug_url.json") as testfile: parser = ApiBugcrowdParser() - with self.assertLogs('dojo.tools.api_bugcrowd.parser', level='ERROR') as cm: + with self.assertLogs("dojo.tools.api_bugcrowd.parser", level="ERROR") as cm: parser.get_findings(testfile, Test()) - self.assertEqual(cm.output, ['ERROR:dojo.tools.api_bugcrowd.parser:' - 'Error parsing bugcrowd bug_url : curl https://example.com/']) + self.assertEqual(cm.output, ["ERROR:dojo.tools.api_bugcrowd.parser:" + "Error parsing bugcrowd bug_url : curl https://example.com/"]) diff --git a/unittests/tools/test_api_cobalt_importer.py b/unittests/tools/test_api_cobalt_importer.py index 8361ac9b10..4423f40850 100644 --- a/unittests/tools/test_api_cobalt_importer.py +++ b/unittests/tools/test_api_cobalt_importer.py @@ -16,12 +16,12 @@ def setUpTestData(cls): cls.tool_type = Tool_Type() cls.tool_configuration = Tool_Configuration() cls.tool_configuration.tool_type = cls.tool_type - cls.tool_configuration.authentication_type = 'API' - cls.tool_configuration.api_key = 'API_KEY' - cls.tool_configuration.extras = 'EXTRAS' + cls.tool_configuration.authentication_type = "API" + cls.tool_configuration.api_key = "API_KEY" + cls.tool_configuration.extras = "EXTRAS" cls.product = Product() - cls.product.name = 'Product' + cls.product.name = "Product" cls.engagement = Engagement() cls.engagement.product = cls.product cls.test = Test() @@ -33,7 +33,7 @@ def setUpTestData(cls): cls.api_scan_configuration.tool_configuration = cls.tool_configuration cls.product_2 = Product() - cls.product_2.name = 'Product_2' + cls.product_2.name = "Product_2" cls.engagement_2 = Engagement() cls.engagement_2.product = cls.product_2 cls.test_2 = Test() @@ -44,9 +44,9 @@ def setUpTestData(cls): cls.test_2.api_scan_configuration = cls.api_scan_configuration_2 cls.api_scan_configuration_2.product = cls.product_2 cls.api_scan_configuration_2.tool_configuration = cls.tool_configuration - cls.api_scan_configuration_2.service_key_1 = 'SERVICE_KEY_1' + cls.api_scan_configuration_2.service_key_1 = "SERVICE_KEY_1" - cls.findings = json.dumps({'a': 1, 'b': 2}) + cls.findings = json.dumps({"a": 1, "b": 2}) def test_prepare_client_do_not_match(self): product_3 = Product() @@ -62,7 +62,7 @@ def test_prepare_client_do_not_match(self): cobalt_api_importer = CobaltApiImporter() cobalt_api_importer.prepare_client(test_3) - @patch('dojo.models.Product_API_Scan_Configuration.objects') + @patch("dojo.models.Product_API_Scan_Configuration.objects") def test_prepare_client_more_than_one_configuration(self, mock_foo): mock_foo.filter.return_value = mock_foo mock_foo.count.return_value = 2 @@ -73,7 +73,7 @@ def test_prepare_client_more_than_one_configuration(self, mock_foo): mock_foo.filter.assert_called_with(product=self.product) - @patch('dojo.models.Product_API_Scan_Configuration.objects') + @patch("dojo.models.Product_API_Scan_Configuration.objects") def test_prepare_client_no_configuration(self, mock_foo): mock_foo.filter.return_value = mock_foo mock_foo.count.return_value = 0 @@ -84,7 +84,7 @@ def test_prepare_client_no_configuration(self, mock_foo): mock_foo.filter.assert_called_with(product=self.product) - @patch('dojo.models.Product_API_Scan_Configuration.objects') + @patch("dojo.models.Product_API_Scan_Configuration.objects") def test_prepare_client_one_product_configuration(self, mock_foo): mock_foo.filter.return_value = mock_foo mock_foo.count.return_value = 1 @@ -93,25 +93,25 @@ def test_prepare_client_one_product_configuration(self, mock_foo): cobalt_api_importer = CobaltApiImporter() cobalt_api, api_scan_configuration = cobalt_api_importer.prepare_client(self.test) - mock_foo.filter.assert_called_with(product=self.product, tool_configuration__tool_type__name='Cobalt.io') + mock_foo.filter.assert_called_with(product=self.product, tool_configuration__tool_type__name="Cobalt.io") self.assertEqual(api_scan_configuration, self.api_scan_configuration) - self.assertEqual(cobalt_api.api_token, 'API_KEY') - self.assertEqual(cobalt_api.org_token, 'EXTRAS') + self.assertEqual(cobalt_api.api_token, "API_KEY") + self.assertEqual(cobalt_api.org_token, "EXTRAS") def test_prepare_client_one_test_configuration(self): cobalt_api_importer = CobaltApiImporter() cobalt_api, api_scan_configuration = cobalt_api_importer.prepare_client(self.test_2) self.assertEqual(api_scan_configuration, self.api_scan_configuration_2) - self.assertEqual(cobalt_api.api_token, 'API_KEY') - self.assertEqual(cobalt_api.org_token, 'EXTRAS') + self.assertEqual(cobalt_api.api_token, "API_KEY") + self.assertEqual(cobalt_api.org_token, "EXTRAS") - @patch('dojo.tools.api_cobalt.importer.CobaltAPI.get_findings') + @patch("dojo.tools.api_cobalt.importer.CobaltAPI.get_findings") def test_get_findings(self, mock_foo): mock_foo.return_value = self.findings cobalt_api_importer = CobaltApiImporter() my_findings = cobalt_api_importer.get_findings(self.test_2) - mock_foo.assert_called_with('SERVICE_KEY_1') + mock_foo.assert_called_with("SERVICE_KEY_1") self.assertEqual(my_findings, self.findings) diff --git a/unittests/tools/test_api_cobalt_parser.py b/unittests/tools/test_api_cobalt_parser.py index 644dec0e5f..8db2c23aac 100644 --- a/unittests/tools/test_api_cobalt_parser.py +++ b/unittests/tools/test_api_cobalt_parser.py @@ -266,14 +266,14 @@ def test_cobalt_api_parser_with_wont_fix_finding(self): self.assertFalse(finding.static_finding) self.assertTrue(finding.dynamic_finding) - @patch('dojo.tools.api_cobalt.importer.CobaltApiImporter.get_findings') + @patch("dojo.tools.api_cobalt.importer.CobaltApiImporter.get_findings") def test_cobalt_api_parser_with_api(self, mock): - with open(get_unit_tests_path() + '/scans/api_cobalt/cobalt_api_many_vul.json') as api_findings_file: + with open(get_unit_tests_path() + "/scans/api_cobalt/cobalt_api_many_vul.json") as api_findings_file: api_findings = json.load(api_findings_file) mock.return_value = api_findings test_type = Test_Type() - test_type.name = 'test_type' + test_type.name = "test_type" test = Test() test.test_type = test_type @@ -282,6 +282,6 @@ def test_cobalt_api_parser_with_api(self, mock): mock.assert_called_with(test) self.assertEqual(3, len(findings)) - self.assertEqual(findings[0].title, 'SQL Injection') - self.assertEqual(findings[1].title, 'Cross Site Scripting') - self.assertEqual(findings[2].title, 'Missing firewall') + self.assertEqual(findings[0].title, "SQL Injection") + self.assertEqual(findings[1].title, "Cross Site Scripting") + self.assertEqual(findings[2].title, "Missing firewall") diff --git a/unittests/tools/test_api_edgescan_importer.py b/unittests/tools/test_api_edgescan_importer.py index 2cd969affd..7d28d50627 100644 --- a/unittests/tools/test_api_edgescan_importer.py +++ b/unittests/tools/test_api_edgescan_importer.py @@ -16,12 +16,12 @@ def setUpTestData(cls): cls.tool_type = Tool_Type() cls.tool_configuration = Tool_Configuration() cls.tool_configuration.tool_type = cls.tool_type - cls.tool_configuration.authentication_type = 'API' - cls.tool_configuration.api_key = 'API_KEY' + cls.tool_configuration.authentication_type = "API" + cls.tool_configuration.api_key = "API_KEY" cls.tool_configuration.extras = '{"extras": "EXTRAS"}' cls.product = Product() - cls.product.name = 'Product' + cls.product.name = "Product" cls.engagement = Engagement() cls.engagement.product = cls.product cls.test = Test() @@ -33,7 +33,7 @@ def setUpTestData(cls): cls.api_scan_configuration.tool_configuration = cls.tool_configuration cls.product_2 = Product() - cls.product_2.name = 'Product_2' + cls.product_2.name = "Product_2" cls.engagement_2 = Engagement() cls.engagement_2.product = cls.product_2 cls.test_2 = Test() @@ -44,9 +44,9 @@ def setUpTestData(cls): cls.test_2.api_scan_configuration = cls.api_scan_configuration_2 cls.api_scan_configuration_2.product = cls.product_2 cls.api_scan_configuration_2.tool_configuration = cls.tool_configuration - cls.api_scan_configuration_2.service_key_1 = 'SERVICE_KEY_1' + cls.api_scan_configuration_2.service_key_1 = "SERVICE_KEY_1" - cls.findings = json.dumps({'a': 1, 'b': 2}) + cls.findings = json.dumps({"a": 1, "b": 2}) def test_prepare_client_do_not_match(self): product_3 = Product() @@ -62,7 +62,7 @@ def test_prepare_client_do_not_match(self): edgescan_importer = EdgescanImporter() edgescan_importer.prepare_client(test_3) - @patch('dojo.models.Product_API_Scan_Configuration.objects') + @patch("dojo.models.Product_API_Scan_Configuration.objects") def test_prepare_client_more_than_one_configuration(self, mock_foo): mock_foo.filter.return_value = mock_foo mock_foo.count.return_value = 2 @@ -73,7 +73,7 @@ def test_prepare_client_more_than_one_configuration(self, mock_foo): mock_foo.filter.assert_called_with(product=self.product) - @patch('dojo.models.Product_API_Scan_Configuration.objects') + @patch("dojo.models.Product_API_Scan_Configuration.objects") def test_prepare_client_no_configuration(self, mock_foo): mock_foo.filter.return_value = mock_foo mock_foo.count.return_value = 0 @@ -84,7 +84,7 @@ def test_prepare_client_no_configuration(self, mock_foo): mock_foo.filter.assert_called_with(product=self.product) - @patch('dojo.models.Product_API_Scan_Configuration.objects') + @patch("dojo.models.Product_API_Scan_Configuration.objects") def test_prepare_client_one_product_configuration(self, mock_foo): mock_foo.filter.return_value = mock_foo mock_foo.count.return_value = 1 @@ -95,21 +95,21 @@ def test_prepare_client_one_product_configuration(self, mock_foo): mock_foo.filter.assert_called_with(product=self.product) self.assertEqual(api_scan_configuration, self.api_scan_configuration) - self.assertEqual(edgescan_api.api_key, 'API_KEY') + self.assertEqual(edgescan_api.api_key, "API_KEY") def test_prepare_client_one_test_configuration(self): edgescan_importer = EdgescanImporter() edgescan_api, api_scan_configuration = edgescan_importer.prepare_client(self.test_2) self.assertEqual(api_scan_configuration, self.api_scan_configuration_2) - self.assertEqual(edgescan_api.api_key, 'API_KEY') + self.assertEqual(edgescan_api.api_key, "API_KEY") - @patch('dojo.tools.api_edgescan.importer.EdgescanAPI.get_findings') + @patch("dojo.tools.api_edgescan.importer.EdgescanAPI.get_findings") def test_get_findings(self, mock_foo): mock_foo.return_value = self.findings edgescan_importer = EdgescanImporter() my_findings = edgescan_importer.get_findings(self.test_2) - mock_foo.assert_called_with('SERVICE_KEY_1') + mock_foo.assert_called_with("SERVICE_KEY_1") self.assertEqual(my_findings, self.findings) diff --git a/unittests/tools/test_api_sonarqube_importer.py b/unittests/tools/test_api_sonarqube_importer.py index f2a49cc20d..c7ff7e90dc 100644 --- a/unittests/tools/test_api_sonarqube_importer.py +++ b/unittests/tools/test_api_sonarqube_importer.py @@ -9,55 +9,55 @@ def dummy_product(self, *args, **kwargs): - with open(get_unit_tests_path() + '/scans/api_sonarqube/product.json') as json_file: + with open(get_unit_tests_path() + "/scans/api_sonarqube/product.json") as json_file: data = json.load(json_file) return data def dummy_issues(self, *args, **kwargs): - with open(get_unit_tests_path() + '/scans/api_sonarqube/issues.json') as json_file: + with open(get_unit_tests_path() + "/scans/api_sonarqube/issues.json") as json_file: data = json.load(json_file) return data def dummy_rule(self, *args, **kwargs): - with open(get_unit_tests_path() + '/scans/api_sonarqube/rule.json') as json_file: + with open(get_unit_tests_path() + "/scans/api_sonarqube/rule.json") as json_file: data = json.load(json_file) return data def dummy_rule_wo_html_desc(self, *args, **kwargs): - with open(get_unit_tests_path() + '/scans/api_sonarqube/rule_wo_html_desc.json') as json_file: + with open(get_unit_tests_path() + "/scans/api_sonarqube/rule_wo_html_desc.json") as json_file: data = json.load(json_file) return data def dummy_no_hotspot(self, *args, **kwargs): - with open(get_unit_tests_path() + '/scans/api_sonarqube/hotspots/no_vuln.json') as json_file: + with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/no_vuln.json") as json_file: data = json.load(json_file) return data def dummy_one_hotspot(self, *args, **kwargs): - with open(get_unit_tests_path() + '/scans/api_sonarqube/hotspots/one_vuln.json') as json_file: + with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/one_vuln.json") as json_file: data = json.load(json_file) return data def dummy_many_hotspots(self, *args, **kwargs): - with open(get_unit_tests_path() + '/scans/api_sonarqube/hotspots/many_vulns.json') as json_file: + with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/many_vulns.json") as json_file: data = json.load(json_file) return data def dummy_hotspot_rule(self, *args, **kwargs): - with open(get_unit_tests_path() + '/scans/api_sonarqube/hotspots/rule.json') as json_file: + with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/rule.json") as json_file: data = json.load(json_file) return data def dummy_hotspot_rule_wo_risk_description(self, *args, **kwargs): - with open(get_unit_tests_path() + '/scans/api_sonarqube/hotspots/rule_wo_risk_description.json') as json_file: + with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/rule_wo_risk_description.json") as json_file: data = json.load(json_file) return data @@ -69,12 +69,12 @@ def empty_list(self, *args, **kwargs): class TestSonarqubeImporterNoSQToolConfig(DojoTestCase): # Testing case no 1. https://github.com/DefectDojo/django-DefectDojo/pull/4676 fixtures = [ - 'unit_sonarqube_toolType.json', - 'unit_sonarqube_product.json', + "unit_sonarqube_toolType.json", + "unit_sonarqube_product.json", ] def setUp(self): - product = Product.objects.get(name='product') + product = Product.objects.get(name="product") engagement = Engagement(product=product) self.test = Test(engagement=engagement) @@ -86,13 +86,13 @@ def test_parser(self): class TestSonarqubeImporterOneSQToolConfig(DojoTestCase): # Testing case no 2. https://github.com/DefectDojo/django-DefectDojo/pull/4676 fixtures = [ - 'unit_sonarqube_toolType.json', - 'unit_sonarqube_toolConfig1.json', - 'unit_sonarqube_product.json', + "unit_sonarqube_toolType.json", + "unit_sonarqube_toolConfig1.json", + "unit_sonarqube_product.json", ] def setUp(self): - product = Product.objects.get(name='product') + product = Product.objects.get(name="product") engagement = Engagement(product=product) self.test = Test(engagement=engagement) @@ -104,14 +104,14 @@ def test_parser(self): class TestSonarqubeImporterMultipleSQToolConfig(DojoTestCase): # Testing case no 3. https://github.com/DefectDojo/django-DefectDojo/pull/4676 fixtures = [ - 'unit_sonarqube_toolType.json', - 'unit_sonarqube_toolConfig1.json', - 'unit_sonarqube_toolConfig2.json', - 'unit_sonarqube_product.json', + "unit_sonarqube_toolType.json", + "unit_sonarqube_toolConfig1.json", + "unit_sonarqube_toolConfig2.json", + "unit_sonarqube_product.json", ] def setUp(self): - product = Product.objects.get(name='product') + product = Product.objects.get(name="product") engagement = Engagement(product=product) self.test = Test(engagement=engagement) @@ -123,23 +123,23 @@ def test_parser(self): class TestSonarqubeImporterOneSQConfigNoKey(DojoTestCase): # Testing case no 4. https://github.com/DefectDojo/django-DefectDojo/pull/4676 without Project key fixtures = [ - 'unit_sonarqube_toolType.json', - 'unit_sonarqube_toolConfig1.json', - 'unit_sonarqube_toolConfig2.json', - 'unit_sonarqube_product.json', - 'unit_sonarqube_sqcNoKey.json', + "unit_sonarqube_toolType.json", + "unit_sonarqube_toolConfig1.json", + "unit_sonarqube_toolConfig2.json", + "unit_sonarqube_product.json", + "unit_sonarqube_sqcNoKey.json", ] def setUp(self): - product = Product.objects.get(name='product') + product = Product.objects.get(name="product") engagement = Engagement(product=product) self.test = Test(engagement=engagement) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_project', dummy_product) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_rule', dummy_rule) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_issues', dummy_issues) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_hotspot_rule', dummy_hotspot_rule) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_hotspots', empty_list) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_project", dummy_product) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_rule", dummy_rule) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_issues", dummy_issues) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_hotspot_rule", dummy_hotspot_rule) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_hotspots", empty_list) def test_parser(self): parser = SonarQubeApiImporter() findings = parser.get_findings(None, self.test) @@ -149,23 +149,23 @@ def test_parser(self): class TestSonarqubeImporterOneSQConfigWithKey(DojoTestCase): # Testing case no 5. https://github.com/DefectDojo/django-DefectDojo/pull/4676 without Project key fixtures = [ - 'unit_sonarqube_toolType.json', - 'unit_sonarqube_toolConfig1.json', - 'unit_sonarqube_toolConfig2.json', - 'unit_sonarqube_product.json', - 'unit_sonarqube_sqcWithKey.json', + "unit_sonarqube_toolType.json", + "unit_sonarqube_toolConfig1.json", + "unit_sonarqube_toolConfig2.json", + "unit_sonarqube_product.json", + "unit_sonarqube_sqcWithKey.json", ] def setUp(self): - product = Product.objects.get(name='product') + product = Product.objects.get(name="product") engagement = Engagement(product=product) self.test = Test(engagement=engagement) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_project', dummy_product) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_rule', dummy_rule) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_issues', dummy_issues) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_hotspot_rule', dummy_hotspot_rule) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_hotspots', empty_list) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_project", dummy_product) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_rule", dummy_rule) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_issues", dummy_issues) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_hotspot_rule", dummy_hotspot_rule) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_hotspots", empty_list) def test_parser(self): parser = SonarQubeApiImporter() findings = parser.get_findings(None, self.test) @@ -175,16 +175,16 @@ def test_parser(self): class TestSonarqubeImporterMultipleSQConfigs(DojoTestCase): # Testing case no 6. https://github.com/DefectDojo/django-DefectDojo/pull/4676 without Project key fixtures = [ - 'unit_sonarqube_toolType.json', - 'unit_sonarqube_toolConfig1.json', - 'unit_sonarqube_toolConfig2.json', - 'unit_sonarqube_product.json', - 'unit_sonarqube_sqcNoKey.json', - 'unit_sonarqube_sqcWithKey.json', + "unit_sonarqube_toolType.json", + "unit_sonarqube_toolConfig1.json", + "unit_sonarqube_toolConfig2.json", + "unit_sonarqube_product.json", + "unit_sonarqube_sqcNoKey.json", + "unit_sonarqube_sqcWithKey.json", ] def setUp(self): - product = Product.objects.get(name='product') + product = Product.objects.get(name="product") engagement = Engagement(product=product) self.test = Test(engagement=engagement) @@ -196,27 +196,27 @@ def test_parser(self): class TestSonarqubeImporterSelectedSQConfigsNoKey(DojoTestCase): # Testing case no 7. https://github.com/DefectDojo/django-DefectDojo/pull/4676 without Project key fixtures = [ - 'unit_sonarqube_toolType.json', - 'unit_sonarqube_toolConfig1.json', - 'unit_sonarqube_toolConfig2.json', - 'unit_sonarqube_product.json', - 'unit_sonarqube_sqcNoKey.json', - 'unit_sonarqube_sqcWithKey.json', + "unit_sonarqube_toolType.json", + "unit_sonarqube_toolConfig1.json", + "unit_sonarqube_toolConfig2.json", + "unit_sonarqube_product.json", + "unit_sonarqube_sqcNoKey.json", + "unit_sonarqube_sqcWithKey.json", ] def setUp(self): - product = Product.objects.get(name='product') + product = Product.objects.get(name="product") engagement = Engagement(product=product) self.test = Test( engagement=engagement, api_scan_configuration=Product_API_Scan_Configuration.objects.all().first(), ) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_project', dummy_product) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_rule', dummy_rule) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_issues', dummy_issues) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_hotspot_rule', dummy_hotspot_rule) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_hotspots', empty_list) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_project", dummy_product) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_rule", dummy_rule) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_issues", dummy_issues) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_hotspot_rule", dummy_hotspot_rule) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_hotspots", empty_list) def test_parser(self): parser = SonarQubeApiImporter() findings = parser.get_findings(None, self.test) @@ -226,33 +226,33 @@ def test_parser(self): class TestSonarqubeImporterSelectedSQConfigsWithKey(DojoTestCase): # Testing case no 8. https://github.com/DefectDojo/django-DefectDojo/pull/4676 without Project key fixtures = [ - 'unit_sonarqube_toolType.json', - 'unit_sonarqube_toolConfig1.json', - 'unit_sonarqube_toolConfig2.json', - 'unit_sonarqube_product.json', - 'unit_sonarqube_sqcNoKey.json', - 'unit_sonarqube_sqcWithKey.json', + "unit_sonarqube_toolType.json", + "unit_sonarqube_toolConfig1.json", + "unit_sonarqube_toolConfig2.json", + "unit_sonarqube_product.json", + "unit_sonarqube_sqcNoKey.json", + "unit_sonarqube_sqcWithKey.json", ] def setUp(self): - product = Product.objects.get(name='product') + product = Product.objects.get(name="product") engagement = Engagement(product=product) self.test = Test( engagement=engagement, api_scan_configuration=Product_API_Scan_Configuration.objects.all().last(), ) - other_product = Product(name='other product') + other_product = Product(name="other product") other_engagement = Engagement(product=other_product) self.other_test = Test( engagement=other_engagement, api_scan_configuration=Product_API_Scan_Configuration.objects.all().last(), ) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_project', dummy_product) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_rule', dummy_rule) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_issues', dummy_issues) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_hotspot_rule', dummy_hotspot_rule) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_hotspots', empty_list) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_project", dummy_product) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_rule", dummy_rule) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_issues", dummy_issues) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_hotspot_rule", dummy_hotspot_rule) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_hotspots", empty_list) def test_parser(self): parser = SonarQubeApiImporter() findings = parser.get_findings(None, self.test) @@ -267,27 +267,27 @@ class TestSonarqubeImporterExternalRule(DojoTestCase): # Test that finding governed by a rule without htmlDesc can be imported. # Custom (user defined) rules may have no htmlDesc field. fixtures = [ - 'unit_sonarqube_toolType.json', - 'unit_sonarqube_toolConfig1.json', - 'unit_sonarqube_toolConfig2.json', - 'unit_sonarqube_product.json', - 'unit_sonarqube_sqcNoKey.json', - 'unit_sonarqube_sqcWithKey.json', + "unit_sonarqube_toolType.json", + "unit_sonarqube_toolConfig1.json", + "unit_sonarqube_toolConfig2.json", + "unit_sonarqube_product.json", + "unit_sonarqube_sqcNoKey.json", + "unit_sonarqube_sqcWithKey.json", ] def setUp(self): - product = Product.objects.get(name='product') + product = Product.objects.get(name="product") engagement = Engagement(product=product) self.test = Test( engagement=engagement, api_scan_configuration=Product_API_Scan_Configuration.objects.all().last(), ) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_project', dummy_product) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_rule', dummy_rule_wo_html_desc) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_issues', dummy_issues) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_hotspot_rule', dummy_hotspot_rule) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_hotspots', empty_list) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_project", dummy_product) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_rule", dummy_rule_wo_html_desc) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_issues", dummy_issues) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_hotspot_rule", dummy_hotspot_rule) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_hotspots", empty_list) def test_parser(self): parser = SonarQubeApiImporter() findings = parser.get_findings(None, self.test) @@ -295,32 +295,32 @@ def test_parser(self): finding = findings[0] self.assertEqual('Remove this useless assignment to local variable "currentValue".', finding.title) self.assertEqual(None, finding.cwe) - self.assertEqual('', finding.description) - self.assertEqual('[Issue permalink](http://localhoproject/issues?issues=AWKWIl8pZpu0CyehMfc4&open=AWKWIl8pZpu0CyehMfc4&resolved=CONFIRMED&id=internal.dummy.project) \n', finding.references) - self.assertEqual('Medium', finding.severity) + self.assertEqual("", finding.description) + self.assertEqual("[Issue permalink](http://localhoproject/issues?issues=AWKWIl8pZpu0CyehMfc4&open=AWKWIl8pZpu0CyehMfc4&resolved=CONFIRMED&id=internal.dummy.project) \n", finding.references) + self.assertEqual("Medium", finding.severity) self.assertEqual(242, finding.line) - self.assertEqual('internal.dummy.project:src/main/javascript/TranslateDirective.ts', finding.file_path) + self.assertEqual("internal.dummy.project:src/main/javascript/TranslateDirective.ts", finding.file_path) class TestSonarqubeImporterTwoIssuesNoHotspots(DojoTestCase): # Testing case no 9. https://github.com/DefectDojo/django-DefectDojo/pull/4107 fixtures = [ - 'unit_sonarqube_toolType.json', - 'unit_sonarqube_toolConfig1.json', - 'unit_sonarqube_sqcWithKey.json', - 'unit_sonarqube_product.json', + "unit_sonarqube_toolType.json", + "unit_sonarqube_toolConfig1.json", + "unit_sonarqube_sqcWithKey.json", + "unit_sonarqube_product.json", ] def setUp(self): - product = Product.objects.get(name='product') + product = Product.objects.get(name="product") engagement = Engagement(product=product) self.test = Test(engagement=engagement) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_project', dummy_product) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_rule', dummy_rule) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_issues', dummy_issues) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_hotspot_rule', dummy_hotspot_rule) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_hotspots', empty_list) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_project", dummy_product) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_rule", dummy_rule) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_issues", dummy_issues) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_hotspot_rule", dummy_hotspot_rule) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_hotspots", empty_list) def test_parser(self): parser = SonarQubeApiImporter() findings = parser.get_findings(None, self.test) @@ -330,22 +330,22 @@ def test_parser(self): class TestSonarqubeImporterNoIssuesOneHotspot(DojoTestCase): # Testing case no 9. https://github.com/DefectDojo/django-DefectDojo/pull/4107 fixtures = [ - 'unit_sonarqube_toolType.json', - 'unit_sonarqube_toolConfig1.json', - 'unit_sonarqube_sqcWithKey.json', - 'unit_sonarqube_product.json', + "unit_sonarqube_toolType.json", + "unit_sonarqube_toolConfig1.json", + "unit_sonarqube_sqcWithKey.json", + "unit_sonarqube_product.json", ] def setUp(self): - product = Product.objects.get(name='product') + product = Product.objects.get(name="product") engagement = Engagement(product=product) self.test = Test(engagement=engagement) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_project', dummy_product) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_rule', dummy_rule) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_issues', empty_list) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_hotspot_rule', dummy_hotspot_rule) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_hotspots', dummy_one_hotspot) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_project", dummy_product) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_rule", dummy_rule) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_issues", empty_list) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_hotspot_rule", dummy_hotspot_rule) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_hotspots", dummy_one_hotspot) def test_parser(self): parser = SonarQubeApiImporter() findings = parser.get_findings(None, self.test) @@ -355,22 +355,22 @@ def test_parser(self): class TestSonarqubeImporterNoIssuesTwoHotspots(DojoTestCase): # Testing case no 11. https://github.com/DefectDojo/django-DefectDojo/pull/4107 fixtures = [ - 'unit_sonarqube_toolType.json', - 'unit_sonarqube_toolConfig1.json', - 'unit_sonarqube_sqcWithKey.json', - 'unit_sonarqube_product.json', + "unit_sonarqube_toolType.json", + "unit_sonarqube_toolConfig1.json", + "unit_sonarqube_sqcWithKey.json", + "unit_sonarqube_product.json", ] def setUp(self): - product = Product.objects.get(name='product') + product = Product.objects.get(name="product") engagement = Engagement(product=product) self.test = Test(engagement=engagement) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_project', dummy_product) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_rule', dummy_rule) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_issues', empty_list) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_hotspot_rule', dummy_hotspot_rule) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_hotspots', dummy_many_hotspots) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_project", dummy_product) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_rule", dummy_rule) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_issues", empty_list) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_hotspot_rule", dummy_hotspot_rule) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_hotspots", dummy_many_hotspots) def test_parser(self): parser = SonarQubeApiImporter() findings = parser.get_findings(None, self.test) @@ -380,22 +380,22 @@ def test_parser(self): class TestSonarqubeImporterTwoIssuesTwoHotspots(DojoTestCase): # Testing case no 12. https://github.com/DefectDojo/django-DefectDojo/pull/4107 fixtures = [ - 'unit_sonarqube_toolType.json', - 'unit_sonarqube_toolConfig1.json', - 'unit_sonarqube_sqcWithKey.json', - 'unit_sonarqube_product.json', + "unit_sonarqube_toolType.json", + "unit_sonarqube_toolConfig1.json", + "unit_sonarqube_sqcWithKey.json", + "unit_sonarqube_product.json", ] def setUp(self): - product = Product.objects.get(name='product') + product = Product.objects.get(name="product") engagement = Engagement(product=product) self.test = Test(engagement=engagement) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_project', dummy_product) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_rule', dummy_rule) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_issues', dummy_issues) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_hotspot_rule', dummy_hotspot_rule) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_hotspots', dummy_many_hotspots) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_project", dummy_product) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_rule", dummy_rule) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_issues", dummy_issues) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_hotspot_rule", dummy_hotspot_rule) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_hotspots", dummy_many_hotspots) def test_parser(self): parser = SonarQubeApiImporter() findings = parser.get_findings(None, self.test) @@ -405,63 +405,63 @@ def test_parser(self): class TestSonarqubeImporterValidateHotspotData(DojoTestCase): # Testing case no 13. https://github.com/DefectDojo/django-DefectDojo/pull/4107 fixtures = [ - 'unit_sonarqube_toolType.json', - 'unit_sonarqube_toolConfig1.json', - 'unit_sonarqube_sqcWithKey.json', - 'unit_sonarqube_product.json', + "unit_sonarqube_toolType.json", + "unit_sonarqube_toolConfig1.json", + "unit_sonarqube_sqcWithKey.json", + "unit_sonarqube_product.json", ] def setUp(self): - product = Product.objects.get(name='product') + product = Product.objects.get(name="product") engagement = Engagement(product=product) self.test = Test(engagement=engagement) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_project', dummy_product) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_rule', dummy_rule) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_issues', empty_list) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_hotspot_rule', dummy_hotspot_rule) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_hotspots', dummy_one_hotspot) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_project", dummy_product) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_rule", dummy_rule) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_issues", empty_list) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_hotspot_rule", dummy_hotspot_rule) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_hotspots", dummy_one_hotspot) def test_parser(self): parser = SonarQubeApiImporter() findings = parser.get_findings(None, self.test) self.assertEqual(findings[0].title, '"password" detected here, make sure this is not a hard-coded credential.') self.assertEqual(findings[0].cwe, 798) self.assertMultiLineEqual( - '**Ask Yourself Whether**' - '\n\n ' - '* Credentials allows access to a sensitive component like a database, a file storage, an API or a service. ' - '\n ' - '* Credentials are used in production environments. ' - '\n ' - '* Application re-distribution is required before updating the credentials. ' - '\n\n' - 'There is a risk if you answered yes to any of those questions.' - '\n\n', + "**Ask Yourself Whether**" + "\n\n " + "* Credentials allows access to a sensitive component like a database, a file storage, an API or a service. " + "\n " + "* Credentials are used in production environments. " + "\n " + "* Application re-distribution is required before updating the credentials. " + "\n\n" + "There is a risk if you answered yes to any of those questions." + "\n\n", findings[0].description, ) - self.assertEqual(str(findings[0].severity), 'High') + self.assertEqual(str(findings[0].severity), "High") self.assertMultiLineEqual( - '[Hotspot permalink](http://localhosecurity_hotspots?id=internal.dummy.project&hotspots=AXgm6Z-ophPPY0C1qhRq) ' - '\n' - '[CVE-2019-13466](http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-13466)' - '\n' - '[CVE-2018-15389](http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-15389)' - '\n' - '[OWASP Top 10 2017 Category A2](https://www.owasp.org/index.php/Top_10-2017_A2-Broken_Authentication)' - '\n' - '[MITRE, CWE-798](http://cwe.mitre.org/data/definitions/798)' - '\n' - '[MITRE, CWE-259](http://cwe.mitre.org/data/definitions/259)' - '\n' - '[CERT, MSC03-J.](https://wiki.sei.cmu.edu/confluence/x/OjdGBQ)' - '\n' - '[SANS Top 25](https://www.sans.org/top25-software-errors/#cat3)' - '\n' - '[Hard Coded Password](http://h3xstream.github.io/find-sec-bugs/bugs.htm#HARD_CODE_PASSWORD)' - '\n', + "[Hotspot permalink](http://localhosecurity_hotspots?id=internal.dummy.project&hotspots=AXgm6Z-ophPPY0C1qhRq) " + "\n" + "[CVE-2019-13466](http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-13466)" + "\n" + "[CVE-2018-15389](http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-15389)" + "\n" + "[OWASP Top 10 2017 Category A2](https://www.owasp.org/index.php/Top_10-2017_A2-Broken_Authentication)" + "\n" + "[MITRE, CWE-798](http://cwe.mitre.org/data/definitions/798)" + "\n" + "[MITRE, CWE-259](http://cwe.mitre.org/data/definitions/259)" + "\n" + "[CERT, MSC03-J.](https://wiki.sei.cmu.edu/confluence/x/OjdGBQ)" + "\n" + "[SANS Top 25](https://www.sans.org/top25-software-errors/#cat3)" + "\n" + "[Hard Coded Password](http://h3xstream.github.io/find-sec-bugs/bugs.htm#HARD_CODE_PASSWORD)" + "\n", findings[0].references, ) - self.assertEqual(str(findings[0].file_path), 'internal.dummy.project:spec/support/user_fixture.rb') + self.assertEqual(str(findings[0].file_path), "internal.dummy.project:spec/support/user_fixture.rb") self.assertEqual(findings[0].line, 9) self.assertEqual(findings[0].active, True) self.assertEqual(findings[0].verified, False) @@ -470,49 +470,49 @@ def test_parser(self): self.assertEqual(findings[0].out_of_scope, False) self.assertEqual(findings[0].static_finding, True) self.assertEqual(findings[0].scanner_confidence, 1) - self.assertEqual(str(findings[0].sonarqube_issue), 'AXgm6Z-ophPPY0C1qhRq') + self.assertEqual(str(findings[0].sonarqube_issue), "AXgm6Z-ophPPY0C1qhRq") class TestSonarqubeImporterHotspotRule_WO_Risk_Description(DojoTestCase): # Testing case no 14. https://github.com/DefectDojo/django-DefectDojo/issues/6506 fixtures = [ - 'unit_sonarqube_toolType.json', - 'unit_sonarqube_toolConfig1.json', - 'unit_sonarqube_sqcWithKey.json', - 'unit_sonarqube_product.json', + "unit_sonarqube_toolType.json", + "unit_sonarqube_toolConfig1.json", + "unit_sonarqube_sqcWithKey.json", + "unit_sonarqube_product.json", ] def setUp(self): - product = Product.objects.get(name='product') + product = Product.objects.get(name="product") engagement = Engagement(product=product) self.test = Test(engagement=engagement) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_project', dummy_product) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_rule', dummy_rule) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_issues', empty_list) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_hotspot_rule', dummy_hotspot_rule_wo_risk_description) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_hotspots', dummy_one_hotspot) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_project", dummy_product) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_rule", dummy_rule) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_issues", empty_list) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_hotspot_rule", dummy_hotspot_rule_wo_risk_description) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_hotspots", dummy_one_hotspot) def test_parser(self): parser = SonarQubeApiImporter() findings = parser.get_findings(None, self.test) self.assertEqual(findings[0].title, '"password" detected here, make sure this is not a hard-coded credential.') self.assertIsNone(findings[0].cwe) self.assertMultiLineEqual( - '**Ask Yourself Whether**' - '\n\n ' - '* Credentials allows access to a sensitive component like a database, a file storage, an API or a service. ' - '\n ' - '* Credentials are used in production environments. ' - '\n ' - '* Application re-distribution is required before updating the credentials. ' - '\n\n' - 'There is a risk if you answered yes to any of those questions.' - '\n\n', + "**Ask Yourself Whether**" + "\n\n " + "* Credentials allows access to a sensitive component like a database, a file storage, an API or a service. " + "\n " + "* Credentials are used in production environments. " + "\n " + "* Application re-distribution is required before updating the credentials. " + "\n\n" + "There is a risk if you answered yes to any of those questions." + "\n\n", findings[0].description, ) - self.assertEqual(str(findings[0].severity), 'High') - self.assertEqual(findings[0].references, '[Hotspot permalink](http://localhosecurity_hotspots?id=internal.dummy.project&hotspots=AXgm6Z-ophPPY0C1qhRq) \n') - self.assertEqual(str(findings[0].file_path), 'internal.dummy.project:spec/support/user_fixture.rb') + self.assertEqual(str(findings[0].severity), "High") + self.assertEqual(findings[0].references, "[Hotspot permalink](http://localhosecurity_hotspots?id=internal.dummy.project&hotspots=AXgm6Z-ophPPY0C1qhRq) \n") + self.assertEqual(str(findings[0].file_path), "internal.dummy.project:spec/support/user_fixture.rb") self.assertEqual(findings[0].line, 9) self.assertEqual(findings[0].active, True) self.assertEqual(findings[0].verified, False) @@ -521,4 +521,4 @@ def test_parser(self): self.assertEqual(findings[0].out_of_scope, False) self.assertEqual(findings[0].static_finding, True) self.assertEqual(findings[0].scanner_confidence, 1) - self.assertEqual(str(findings[0].sonarqube_issue), 'AXgm6Z-ophPPY0C1qhRq') + self.assertEqual(str(findings[0].sonarqube_issue), "AXgm6Z-ophPPY0C1qhRq") diff --git a/unittests/tools/test_api_sonarqube_parser.py b/unittests/tools/test_api_sonarqube_parser.py index d9963d1fb7..4a44b39b7e 100644 --- a/unittests/tools/test_api_sonarqube_parser.py +++ b/unittests/tools/test_api_sonarqube_parser.py @@ -33,7 +33,7 @@ def dummy_rule(self, *args, **kwargs): def dummy_hotspot_rule(self, *args, **kwargs): - with open(get_unit_tests_path() + '/scans/api_sonarqube/hotspots/rule.json') as json_file: + with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/rule.json") as json_file: data = json.load(json_file) return data @@ -50,18 +50,18 @@ def setUp(self): # build Sonarqube conf (the parser need it) tool_type, _ = Tool_Type.objects.get_or_create(name="SonarQube") tool_conf, _ = Tool_Configuration.objects.get_or_create( - name="SQ1_unittests", authentication_type="API", tool_type=tool_type, url='http://dummy.url.foo.bar/api', + name="SQ1_unittests", authentication_type="API", tool_type=tool_type, url="http://dummy.url.foo.bar/api", ) pasc, _ = Product_API_Scan_Configuration.objects.get_or_create( - product=product, tool_configuration=tool_conf, service_key_1='ABCD', + product=product, tool_configuration=tool_conf, service_key_1="ABCD", ) self.test = Test(engagement=engagement, api_scan_configuration=pasc) @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_project", dummy_product) @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_rule", dummy_rule) @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_issues", dummy_issues) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_hotspot_rule', dummy_hotspot_rule) - @mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_hotspots', empty_list) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_hotspot_rule", dummy_hotspot_rule) + @mock.patch("dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_hotspots", empty_list) def test_get_findings(self): parser = ApiSonarQubeParser() findings = parser.get_findings(None, self.test) diff --git a/unittests/tools/test_aqua_parser.py b/unittests/tools/test_aqua_parser.py index 39191a7e3d..ea4e5ac837 100644 --- a/unittests/tools/test_aqua_parser.py +++ b/unittests/tools/test_aqua_parser.py @@ -18,16 +18,16 @@ def test_aqua_parser_has_one_finding(self): findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) finding = findings[0] - self.assertEqual('CVE-2019-14697 - musl (1.1.20-r4) ', finding.title) - self.assertEqual('High', finding.severity) - self.assertEqual('CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H', finding.cvssv3) - self.assertEqual('musl libc through 1.1.23 has an x87 floating-point stack adjustment imbalance, related to the math/i386/ directory. In some cases, use of this library could introduce out-of-bounds writes that are not present in an application\'s source code.', finding.description) - self.assertEqual('1.1.20-r5', finding.mitigation) - self.assertEqual('\nhttps://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2019-14697', finding.references) - self.assertEqual('musl', finding.component_name) - self.assertEqual('1.1.20-r4', finding.component_version) + self.assertEqual("CVE-2019-14697 - musl (1.1.20-r4) ", finding.title) + self.assertEqual("High", finding.severity) + self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H", finding.cvssv3) + self.assertEqual("musl libc through 1.1.23 has an x87 floating-point stack adjustment imbalance, related to the math/i386/ directory. In some cases, use of this library could introduce out-of-bounds writes that are not present in an application's source code.", finding.description) + self.assertEqual("1.1.20-r5", finding.mitigation) + self.assertEqual("\nhttps://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2019-14697", finding.references) + self.assertEqual("musl", finding.component_name) + self.assertEqual("1.1.20-r4", finding.component_version) self.assertEqual(1, len(finding.unsaved_vulnerability_ids)) - self.assertEqual('CVE-2019-14697', finding.unsaved_vulnerability_ids[0]) + self.assertEqual("CVE-2019-14697", finding.unsaved_vulnerability_ids[0]) def test_aqua_parser_has_many_findings(self): with open("unittests/scans/aqua/many_vulns.json") as testfile: @@ -41,12 +41,12 @@ def test_aqua_parser_v2_has_one_finding(self): findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) finding = findings[0] - self.assertEqual('CVE-2019-15601: curl', finding.title) - self.assertEqual('Medium', finding.severity) - self.assertEqual('CURL before 7.68.0 lacks proper input validation, which allows users to create a `FILE:` URL that can make the client access a remote file using SMB (Windows-only issue).', finding.description) - self.assertEqual('Upgrade to curl 7.68.0', finding.mitigation) + self.assertEqual("CVE-2019-15601: curl", finding.title) + self.assertEqual("Medium", finding.severity) + self.assertEqual("CURL before 7.68.0 lacks proper input validation, which allows users to create a `FILE:` URL that can make the client access a remote file using SMB (Windows-only issue).", finding.description) + self.assertEqual("Upgrade to curl 7.68.0", finding.mitigation) self.assertEqual(1, len(finding.unsaved_vulnerability_ids)) - self.assertEqual('CVE-2019-15601', finding.unsaved_vulnerability_ids[0]) + self.assertEqual("CVE-2019-15601", finding.unsaved_vulnerability_ids[0]) def test_aqua_parser_v2_has_many_findings(self): with open("unittests/scans/aqua/many_v2.json") as testfile: @@ -86,8 +86,8 @@ def test_aqua_parser_for_aqua_severity(self): sevs.append(finding.severity) d = Counter(sevs) - self.assertEqual(1, d['Critical']) - self.assertEqual(1, d['High']) - self.assertEqual(2, d['Medium']) - self.assertEqual(2, d['Low']) - self.assertEqual(7, d['Info']) + self.assertEqual(1, d["Critical"]) + self.assertEqual(1, d["High"]) + self.assertEqual(2, d["Medium"]) + self.assertEqual(2, d["Low"]) + self.assertEqual(7, d["Info"]) diff --git a/unittests/tools/test_arachni_parser.py b/unittests/tools/test_arachni_parser.py index ca75a6ed43..e175ec7a8c 100644 --- a/unittests/tools/test_arachni_parser.py +++ b/unittests/tools/test_arachni_parser.py @@ -39,9 +39,9 @@ def test_parser_has_many_finding(self): tzinfo=datetime.timezone(datetime.timedelta(seconds=3600))), finding.date) self.assertEqual(1, len(finding.unsaved_endpoints)) endpoint = finding.unsaved_endpoints[0] - self.assertEqual('demo.defectdojo.org', endpoint.host) + self.assertEqual("demo.defectdojo.org", endpoint.host) self.assertEqual(443, endpoint.port) - self.assertEqual('https', endpoint.protocol) + self.assertEqual("https", endpoint.protocol) # finding 2 finding = findings[2] self.assertEqual("Interesting response", finding.title) @@ -49,9 +49,9 @@ def test_parser_has_many_finding(self): self.assertEqual("Info", finding.severity) self.assertEqual(datetime.datetime(2021, 3, 17, 19, 41, 46, tzinfo=datetime.timezone(datetime.timedelta(seconds=3600))), finding.date) - self.assertIn('interesting', finding.unsaved_tags) - self.assertIn('response', finding.unsaved_tags) - self.assertIn('server', finding.unsaved_tags) + self.assertIn("interesting", finding.unsaved_tags) + self.assertIn("response", finding.unsaved_tags) + self.assertIn("server", finding.unsaved_tags) def test_parser_has_many_finding2(self): with open("unittests/scans/arachni/js.com.afr.json") as testfile: @@ -70,9 +70,9 @@ def test_parser_has_many_finding2(self): tzinfo=datetime.timezone(datetime.timedelta(seconds=3600))), finding.date) self.assertEqual(1, len(finding.unsaved_endpoints)) endpoint = finding.unsaved_endpoints[0] - self.assertEqual('juice-shop.herokuapp.com', endpoint.host) + self.assertEqual("juice-shop.herokuapp.com", endpoint.host) self.assertEqual(443, endpoint.port) - self.assertEqual('https', endpoint.protocol) + self.assertEqual("https", endpoint.protocol) # finding 9 finding = findings[9] self.assertEqual("Interesting response", finding.title) @@ -83,6 +83,6 @@ def test_parser_has_many_finding2(self): self.assertEqual(25, finding.nb_occurences) self.assertEqual(25, len(finding.unsaved_endpoints)) endpoint = finding.unsaved_endpoints[0] - self.assertEqual('juice-shop.herokuapp.com', endpoint.host) + self.assertEqual("juice-shop.herokuapp.com", endpoint.host) self.assertEqual(443, endpoint.port) - self.assertEqual('https', endpoint.protocol) + self.assertEqual("https", endpoint.protocol) diff --git a/unittests/tools/test_aws_prowler_parser.py b/unittests/tools/test_aws_prowler_parser.py index db567d00f0..567e963abd 100644 --- a/unittests/tools/test_aws_prowler_parser.py +++ b/unittests/tools/test_aws_prowler_parser.py @@ -80,19 +80,19 @@ def test_aws_prowler_parser_with_critical_vuln_has_one_findings_json(self): open("unittests/scans/aws_prowler/one_vuln.json")) self.assertEqual(1, len(findings)) self.assertEqual("eu-central-1: Only Virtual MFA is enabled for root", findings[0].title) - self.assertIn('012345678912', findings[0].description) - self.assertIn('Ensure hardware MFA is enabled for the root account', findings[0].description) - self.assertIn('check114', findings[0].description) - self.assertIn('1.14', findings[0].description) - self.assertIn('eu-central-1', findings[0].description) - self.assertIn('Software and Configuration Checks', findings[0].description) - self.assertIn('iam', findings[0].description) - self.assertIn('IAM', findings[0].description) - self.assertIn('MFA', findings[0].description) - self.assertEqual('Critical', findings[0].severity) - self.assertIn('The root account is the most privileged user in an AWS account. MFA adds an extra layer', findings[0].impact) - self.assertEqual('Using IAM console navigate to Dashboard and expand Activate MFA on your root account.', findings[0].mitigation) - self.assertEqual('https://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-user.html#id_root-user_manage_mfa', findings[0].references) + self.assertIn("012345678912", findings[0].description) + self.assertIn("Ensure hardware MFA is enabled for the root account", findings[0].description) + self.assertIn("check114", findings[0].description) + self.assertIn("1.14", findings[0].description) + self.assertIn("eu-central-1", findings[0].description) + self.assertIn("Software and Configuration Checks", findings[0].description) + self.assertIn("iam", findings[0].description) + self.assertIn("IAM", findings[0].description) + self.assertIn("MFA", findings[0].description) + self.assertEqual("Critical", findings[0].severity) + self.assertIn("The root account is the most privileged user in an AWS account. MFA adds an extra layer", findings[0].impact) + self.assertEqual("Using IAM console navigate to Dashboard and expand Activate MFA on your root account.", findings[0].mitigation) + self.assertEqual("https://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-user.html#id_root-user_manage_mfa", findings[0].references) self.assertEqual(datetime.date(2021, 8, 23), findings[0].date) def test_aws_prowler_parser_with_many_vuln_has_many_findings_json(self): @@ -101,13 +101,13 @@ def test_aws_prowler_parser_with_many_vuln_has_many_findings_json(self): self.assertEqual(4, len(findings)) with self.subTest(i=0): self.assertEqual("eu-central-1: Only Virtual MFA is enabled for root", findings[0].title) - self.assertEqual('Critical', findings[0].severity) + self.assertEqual("Critical", findings[0].severity) with self.subTest(i=1): self.assertEqual("eu-central-1: Cluster control plane access is not restricted for EKS cluster prod", findings[1].title) - self.assertEqual('High', findings[1].severity) + self.assertEqual("High", findings[1].severity) with self.subTest(i=2): self.assertEqual("eu-central-1: Control plane logging is not enabled for EKS cluster prod", findings[2].title) - self.assertEqual('Medium', findings[2].severity) + self.assertEqual("Medium", findings[2].severity) with self.subTest(i=3): self.assertEqual("eu-central-1: prod.config_read.iam has inline policy directly attached", findings[3].title) - self.assertEqual('Low', findings[3].severity) + self.assertEqual("Low", findings[3].severity) diff --git a/unittests/tools/test_aws_prowler_v3plus_parser.py b/unittests/tools/test_aws_prowler_v3plus_parser.py index 5096c6275d..6c7f2564e2 100644 --- a/unittests/tools/test_aws_prowler_v3plus_parser.py +++ b/unittests/tools/test_aws_prowler_v3plus_parser.py @@ -20,9 +20,9 @@ def test_aws_prowler_parser_with_critical_vuln_has_one_findings_json(self): open("unittests/scans/aws_prowler_v3plus/one_vuln.json")) self.assertEqual(1, len(findings)) self.assertEqual("prowler-aws-acm_certificates_expiration_check-999999999999-us-east-1-api.sandbox.partner.teste.com", findings[0].unique_id_from_tool) - self.assertIn('Check if ACM Certificates are about to expire in specific days or less', findings[0].description) + self.assertIn("Check if ACM Certificates are about to expire in specific days or less", findings[0].description) self.assertEqual("arn:aws:acm:us-east-1:999999999999:certificate/ffffffff-0000-0000-0000-000000000000", findings[0].component_name) - self.assertIn('https://docs.aws.amazon.com/config/latest/developerguide/acm-certificate-expiration-check.html', findings[0].references) + self.assertIn("https://docs.aws.amazon.com/config/latest/developerguide/acm-certificate-expiration-check.html", findings[0].references) def test_aws_prowler_parser_with_many_vuln_has_many_findings_json(self): findings = self.setup( @@ -30,13 +30,13 @@ def test_aws_prowler_parser_with_many_vuln_has_many_findings_json(self): self.assertEqual(3, len(findings)) with self.subTest(i=0): self.assertEqual("prowler-aws-acm_certificates_expiration_check-999999999999-us-east-1-api.teste.teste.com", findings[0].unique_id_from_tool) - self.assertIn('Check if ACM Certificates are about to expire in specific days or less', findings[0].description) + self.assertIn("Check if ACM Certificates are about to expire in specific days or less", findings[0].description) with self.subTest(i=1): self.assertEqual("prowler-aws-accessanalyzer_enabled-999999999999-us-east-1-999999999999", findings[1].unique_id_from_tool) - self.assertIn('Check if IAM Access Analyzer is enabled', findings[1].description) + self.assertIn("Check if IAM Access Analyzer is enabled", findings[1].description) with self.subTest(i=3): self.assertEqual("prowler-aws-account_maintain_current_contact_details-999999999999-us-east-1-999999999999", findings[2].unique_id_from_tool) - self.assertIn('Maintain current contact details.', findings[2].description) + self.assertIn("Maintain current contact details.", findings[2].description) def test_aws_prowler_parser_with_no_vuln_has_no_findings_ocsf_json(self): findings = self.setup( @@ -48,9 +48,9 @@ def test_aws_prowler_parser_with_critical_vuln_has_one_findings_ocsf_json(self): open("unittests/scans/aws_prowler_v3plus/one_vuln.ocsf.json")) self.assertEqual(1, len(findings)) self.assertEqual("prowler-aws-iam_role_administratoraccess_policy_permissive_trust_relationship-123456789012-us-east-1-myAdministratorExecutionRole", findings[0].unique_id_from_tool) - self.assertIn('Ensure IAM Roles with attached AdministratorAccess policy have a well defined trust relationship', findings[0].description) + self.assertIn("Ensure IAM Roles with attached AdministratorAccess policy have a well defined trust relationship", findings[0].description) self.assertEqual("arn:aws:iam::123456789012:role/myAdministratorExecutionRole", findings[0].component_name) - self.assertIn('https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#grant-least-privilege', findings[0].references) + self.assertIn("https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#grant-least-privilege", findings[0].references) def test_aws_prowler_parser_with_many_vuln_has_many_findings_ocsf_json(self): findings = self.setup( @@ -58,10 +58,10 @@ def test_aws_prowler_parser_with_many_vuln_has_many_findings_ocsf_json(self): self.assertEqual(3, len(findings)) with self.subTest(i=0): self.assertEqual("prowler-aws-iam_role_administratoraccess_policy_permissive_trust_relationship-123456789012-us-east-1-myAdministratorExecutionRole", findings[0].unique_id_from_tool) - self.assertIn('Ensure IAM Roles with attached AdministratorAccess policy have a well defined trust relationship', findings[0].description) + self.assertIn("Ensure IAM Roles with attached AdministratorAccess policy have a well defined trust relationship", findings[0].description) with self.subTest(i=1): self.assertEqual("prowler-aws-iam_role_cross_account_readonlyaccess_policy-123456789012-us-east-1-AuditRole", findings[1].unique_id_from_tool) - self.assertIn('Ensure IAM Roles do not have ReadOnlyAccess access for external AWS accounts', findings[1].description) + self.assertIn("Ensure IAM Roles do not have ReadOnlyAccess access for external AWS accounts", findings[1].description) with self.subTest(i=3): self.assertEqual("prowler-aws-iam_role_permissive_trust_relationship-123456789012-us-east-1-CrossAccountResourceAccessRole", findings[2].unique_id_from_tool) - self.assertIn('Ensure IAM Roles do not allow assume role from any role of a cross account', findings[2].description) + self.assertIn("Ensure IAM Roles do not allow assume role from any role of a cross account", findings[2].description) diff --git a/unittests/tools/test_awssecurityhub_parser.py b/unittests/tools/test_awssecurityhub_parser.py index f287f8937b..f4eb990d0e 100644 --- a/unittests/tools/test_awssecurityhub_parser.py +++ b/unittests/tools/test_awssecurityhub_parser.py @@ -68,7 +68,7 @@ def test_inspector_ec2(self): self.assertEqual("CVE-2022-3643", finding.unsaved_vulnerability_ids[0]) self.assertEqual("- Update kernel-4.14.301\n\t- yum update kernel\n", finding.mitigation) endpoint = finding.unsaved_endpoints[0] - self.assertEqual('AwsEc2Instance arn:aws:ec2:us-east-1:XXXXXXXXXXXX:i-11111111111111111', endpoint.host) + self.assertEqual("AwsEc2Instance arn:aws:ec2:us-east-1:XXXXXXXXXXXX:i-11111111111111111", endpoint.host) def test_inspector_ec2_with_no_vulnerabilities(self): with open(get_unit_tests_path() + sample_path("inspector_ec2_cve_no_vulnerabilities.json")) as test_file: @@ -91,7 +91,7 @@ def test_inspector_ec2_ghsa(self): self.assertSetEqual({"CVE-2023-34256", "GHSA-p98r-538v-jgw5"}, set(finding.unsaved_vulnerability_ids)) self.assertEqual("https://github.com/bottlerocket-os/bottlerocket/security/advisories/GHSA-p98r-538v-jgw5", finding.references) endpoint = finding.unsaved_endpoints[0] - self.assertEqual('AwsEc2Instance arn:aws:ec2:eu-central-1:012345678912:instance/i-07c11cc535d830123', endpoint.host) + self.assertEqual("AwsEc2Instance arn:aws:ec2:eu-central-1:012345678912:instance/i-07c11cc535d830123", endpoint.host) def test_inspector_ecr(self): with open(get_unit_tests_path() + sample_path("inspector_ecr.json")) as test_file: @@ -108,7 +108,7 @@ def test_inspector_ecr(self): self.assertIn("Repository: repo-os", finding.impact) self.assertEqual(0.0014, finding.epss_score) endpoint = finding.unsaved_endpoints[0] - self.assertEqual('AwsEcrContainerImage arn:aws:ecr:eu-central-1:123456789012:repository/repo-os/sha256:af965ef68c78374a5f987fce98c0ddfa45801df2395bf012c50b863e65978d74', endpoint.host) + self.assertEqual("AwsEcrContainerImage arn:aws:ecr:eu-central-1:123456789012:repository/repo-os/sha256:af965ef68c78374a5f987fce98c0ddfa45801df2395bf012c50b863e65978d74", endpoint.host) def test_guardduty(self): with open(get_unit_tests_path() + sample_path("guardduty.json")) as test_file: @@ -124,5 +124,5 @@ def test_guardduty(self): self.assertEqual("User AssumedRole : 123123123 is anomalously invoking APIs commonly used in Discovery tactics. - Resource: 123123123", finding.title) self.assertEqual("TTPs/Discovery/IAMUser-AnomalousBehavior\n[https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_finding-types-active.html](https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_finding-types-active.html)", finding.mitigation) endpoint = findings[0].unsaved_endpoints[0] - self.assertEqual('AwsEc2Instance arn:aws:ec2:us-east-1:123456789012:instance/i-1234567890', endpoint.host) + self.assertEqual("AwsEc2Instance arn:aws:ec2:us-east-1:123456789012:instance/i-1234567890", endpoint.host) self.assertEqual("This is a GuardDuty Finding\nAPIs commonly used in Discovery tactics were invoked by user AssumedRole : 123123123, under anomalous circumstances. Such activity is not typically seen from this user.\n**AWS Finding ARN:** arn:aws:guardduty:us-east-1:123456789012:detector/123456789/finding/2123123123123\n**SourceURL:** [https://us-east-1.console.aws.amazon.com/guardduty/home?region=us-east-1#/findings?macros=current&fId=2123123123123](https://us-east-1.console.aws.amazon.com/guardduty/home?region=us-east-1#/findings?macros=current&fId=2123123123123)\n**AwsAccountId:** 123456789012\n**Region:** us-east-1\n**Generator ID:** arn:aws:guardduty:us-east-1:123456789012:detector/123456789\n", finding.description) diff --git a/unittests/tools/test_azure_security_center_recommendations_parser.py b/unittests/tools/test_azure_security_center_recommendations_parser.py index 5865ea02c9..0119f982e8 100644 --- a/unittests/tools/test_azure_security_center_recommendations_parser.py +++ b/unittests/tools/test_azure_security_center_recommendations_parser.py @@ -21,10 +21,10 @@ def test_parse_file_with_multiple_findings(self): self.assertEqual(3, len(findings)) finding = findings[0] - self.assertEqual('my_virtual_network - Virtual networks should be protected by Azure Firewall', finding.title) - self.assertEqual(date.fromisoformat('2021-09-28'), finding.date) + self.assertEqual("my_virtual_network - Virtual networks should be protected by Azure Firewall", finding.title) + self.assertEqual(date.fromisoformat("2021-09-28"), finding.date) self.assertEqual(1032, finding.cwe) - self.assertEqual('Low', finding.severity) + self.assertEqual("Low", finding.severity) description = """**Recommendation:** Virtual networks should be protected by Azure Firewall **Resource Name:** my_virtual_network **Resource Type:** virtualnetworks @@ -34,21 +34,21 @@ def test_parse_file_with_multiple_findings(self): **Subscription:** My first subscription **Subscription Id:** 9cfbad7a-7369-42e4-bcce-7677c5b3a44b""" self.assertEqual(description, finding.description) - mitigation = 'To protect your virtual networks with Azure Firewall: 1. From the list below, select a network. Or select Take action if you\'ve arrived here from a specific virtual network page. 2. Follow the Azure Firewall deployment instructions. Make sure to configure all default routes properly.Important: Azure Firewall is billed separately from Azure Security Center. Learn more about Azure Firewall pricing.' + mitigation = "To protect your virtual networks with Azure Firewall: 1. From the list below, select a network. Or select Take action if you've arrived here from a specific virtual network page. 2. Follow the Azure Firewall deployment instructions. Make sure to configure all default routes properly.Important: Azure Firewall is billed separately from Azure Security Center. Learn more about Azure Firewall pricing." self.assertEqual(mitigation, finding.mitigation) - references = 'https://portal.azure.com/#blade/Microsoft_Azure_Security/RecommendationsBlade/assessmentKey/f67fb4ed-d481-44d7-91e5-efadf504f74a/resourceId/%2fsubscriptions%2f9cfbad7a-7369-42e4-bcce-7677c5b3a44b%2fresourcegroups%2fmy_resource_group%2fproviders%2fmicrosoft.network%2fvirtualnetworks%2fmy_virtual_network' + references = "https://portal.azure.com/#blade/Microsoft_Azure_Security/RecommendationsBlade/assessmentKey/f67fb4ed-d481-44d7-91e5-efadf504f74a/resourceId/%2fsubscriptions%2f9cfbad7a-7369-42e4-bcce-7677c5b3a44b%2fresourcegroups%2fmy_resource_group%2fproviders%2fmicrosoft.network%2fvirtualnetworks%2fmy_virtual_network" self.assertEqual(references, finding.references) self.assertTrue(finding.static_finding) self.assertFalse(finding.dynamic_finding) - unique_id_from_tool = '/subscriptions/9cfbad7a-7369-42e4-bcce-7677c5b3a44b/resourcegroups/my_resource_group/providers/microsoft.network/virtualnetworks/my_virtual_network/providers/Microsoft.Security/assessments/f67fb4ed-d481-44d7-91e5-efadf504f74a' + unique_id_from_tool = "/subscriptions/9cfbad7a-7369-42e4-bcce-7677c5b3a44b/resourcegroups/my_resource_group/providers/microsoft.network/virtualnetworks/my_virtual_network/providers/Microsoft.Security/assessments/f67fb4ed-d481-44d7-91e5-efadf504f74a" self.assertEqual(unique_id_from_tool, finding.unique_id_from_tool) - self.assertEqual('f67fb4ed-d481-44d7-91e5-efadf504f74a', finding.vuln_id_from_tool) + self.assertEqual("f67fb4ed-d481-44d7-91e5-efadf504f74a", finding.vuln_id_from_tool) finding = findings[1] - self.assertEqual('My first subscription - Azure Defender for Resource Manager should be enabled', finding.title) - self.assertEqual(date.fromisoformat('2021-09-28'), finding.date) + self.assertEqual("My first subscription - Azure Defender for Resource Manager should be enabled", finding.title) + self.assertEqual(date.fromisoformat("2021-09-28"), finding.date) self.assertEqual(1032, finding.cwe) - self.assertEqual('High', finding.severity) + self.assertEqual("High", finding.severity) description = """**Recommendation:** Azure Defender for Resource Manager should be enabled **Resource Name:** My first subscription **Resource Type:** Subscription @@ -60,19 +60,19 @@ def test_parse_file_with_multiple_findings(self): self.assertEqual(description, finding.description) mitigation = 'To enable Azure Defender for Resource Manager on your subscription: 1. Open Security Center\'s Pricing & settings page. 2. Select the subscription on which you want to enable Azure Defender. 3. Under "Select Azure Defender plan by resource type", set "Resource Manager" to "On".' self.assertEqual(mitigation, finding.mitigation) - references = 'https://portal.azure.com/#blade/Microsoft_Azure_Security/RecommendationsBlade/assessmentKey/f0fb2a7e-16d5-849f-be57-86db712e9bd0/resourceId/%2fsubscriptions%2f9cfbad7a-7369-42e4-bcce-7677c5b3a44b' + references = "https://portal.azure.com/#blade/Microsoft_Azure_Security/RecommendationsBlade/assessmentKey/f0fb2a7e-16d5-849f-be57-86db712e9bd0/resourceId/%2fsubscriptions%2f9cfbad7a-7369-42e4-bcce-7677c5b3a44b" self.assertEqual(references, finding.references) self.assertTrue(finding.static_finding) self.assertFalse(finding.dynamic_finding) - unique_id_from_tool = '/subscriptions/9cfbad7a-7369-42e4-bcce-7677c5b3a44b/providers/Microsoft.Security/assessments/f0fb2a7e-16d5-849f-be57-86db712e9bd0' + unique_id_from_tool = "/subscriptions/9cfbad7a-7369-42e4-bcce-7677c5b3a44b/providers/Microsoft.Security/assessments/f0fb2a7e-16d5-849f-be57-86db712e9bd0" self.assertEqual(unique_id_from_tool, finding.unique_id_from_tool) - self.assertEqual('f0fb2a7e-16d5-849f-be57-86db712e9bd0', finding.vuln_id_from_tool) + self.assertEqual("f0fb2a7e-16d5-849f-be57-86db712e9bd0", finding.vuln_id_from_tool) finding = findings[2] - self.assertEqual('swe10032201245e263h - Storage account should use a private link connection', finding.title) - self.assertEqual(date.fromisoformat('2021-09-28'), finding.date) + self.assertEqual("swe10032201245e263h - Storage account should use a private link connection", finding.title) + self.assertEqual(date.fromisoformat("2021-09-28"), finding.date) self.assertEqual(1032, finding.cwe) - self.assertEqual('Medium', finding.severity) + self.assertEqual("Medium", finding.severity) description = """**Recommendation:** Storage account should use a private link connection **Resource Name:** swe10032201245e263h **Resource Type:** storageaccounts @@ -82,12 +82,12 @@ def test_parse_file_with_multiple_findings(self): **Subscription:** My first subscription **Subscription Id:** 9cfbad7a-7369-42e4-bcce-7677c5b3a44b""" self.assertEqual(description, finding.description) - mitigation = 'To enforce secure communications for your storage accounts, add a private endpoint as described here: https://aka.ms/connectprivatelytostorageaccount.' + mitigation = "To enforce secure communications for your storage accounts, add a private endpoint as described here: https://aka.ms/connectprivatelytostorageaccount." self.assertEqual(mitigation, finding.mitigation) - references = 'https://portal.azure.com/#blade/Microsoft_Azure_Security/RecommendationsBlade/assessmentKey/cdc78c07-02b0-4af0-1cb2-cb7c672a8b0a/resourceId/%2fsubscriptions%2f9cfbad7a-7369-42e4-bcce-7677c5b3a44b%2fresourcegroups%2fcloud-shell-storage-westeurope%2fproviders%2fmicrosoft.storage%2fstorageaccounts%2fswe10032201245e263h' + references = "https://portal.azure.com/#blade/Microsoft_Azure_Security/RecommendationsBlade/assessmentKey/cdc78c07-02b0-4af0-1cb2-cb7c672a8b0a/resourceId/%2fsubscriptions%2f9cfbad7a-7369-42e4-bcce-7677c5b3a44b%2fresourcegroups%2fcloud-shell-storage-westeurope%2fproviders%2fmicrosoft.storage%2fstorageaccounts%2fswe10032201245e263h" self.assertEqual(references, finding.references) self.assertTrue(finding.static_finding) self.assertFalse(finding.dynamic_finding) - unique_id_from_tool = '/subscriptions/9cfbad7a-7369-42e4-bcce-7677c5b3a44b/resourcegroups/cloud-shell-storage-westeurope/providers/microsoft.storage/storageaccounts/swe10032201245e263h/providers/Microsoft.Security/assessments/cdc78c07-02b0-4af0-1cb2-cb7c672a8b0a' + unique_id_from_tool = "/subscriptions/9cfbad7a-7369-42e4-bcce-7677c5b3a44b/resourcegroups/cloud-shell-storage-westeurope/providers/microsoft.storage/storageaccounts/swe10032201245e263h/providers/Microsoft.Security/assessments/cdc78c07-02b0-4af0-1cb2-cb7c672a8b0a" self.assertEqual(unique_id_from_tool, finding.unique_id_from_tool) - self.assertEqual('cdc78c07-02b0-4af0-1cb2-cb7c672a8b0a', finding.vuln_id_from_tool) + self.assertEqual("cdc78c07-02b0-4af0-1cb2-cb7c672a8b0a", finding.vuln_id_from_tool) diff --git a/unittests/tools/test_burp_graphql_parser.py b/unittests/tools/test_burp_graphql_parser.py index 1574d4f3e0..320f70e6a3 100644 --- a/unittests/tools/test_burp_graphql_parser.py +++ b/unittests/tools/test_burp_graphql_parser.py @@ -22,15 +22,15 @@ def test_burp_one_finding(self): self.assertIn("remediation 1", findings[0].mitigation) self.assertIn("issue description 1", findings[0].impact) self.assertIn("issue remediation 1", findings[0].mitigation) - self.assertEqual('High', findings[0].severity) + self.assertEqual("High", findings[0].severity) self.assertEqual(1, len(findings[0].unsaved_endpoints)) - self.assertEqual('www.test.com', findings[0].unsaved_endpoints[0].host) - self.assertEqual('path', findings[0].unsaved_endpoints[0].path) - self.assertEqual('https', findings[0].unsaved_endpoints[0].protocol) + self.assertEqual("www.test.com", findings[0].unsaved_endpoints[0].host) + self.assertEqual("path", findings[0].unsaved_endpoints[0].path) + self.assertEqual("https", findings[0].unsaved_endpoints[0].protocol) self.assertEqual(1, len(findings[0].unsaved_req_resp)) - self.assertEqual('request data 1/request data 2/request data 3/', findings[0].unsaved_req_resp[0]['req']) - self.assertIn('ref 1', findings[0].references) - self.assertIn('CWE-79', findings[0].references) + self.assertEqual("request data 1/request data 2/request data 3/", findings[0].unsaved_req_resp[0]["req"]) + self.assertIn("ref 1", findings[0].references) + self.assertIn("CWE-79", findings[0].references) def test_burp_two_findings(self): with open(path.join(path.dirname(__file__), "../scans/burp_graphql/two_findings.json")) as test_file: diff --git a/unittests/tools/test_checkmarx_osa_parser.py b/unittests/tools/test_checkmarx_osa_parser.py index ba348b64e5..176ced0f33 100644 --- a/unittests/tools/test_checkmarx_osa_parser.py +++ b/unittests/tools/test_checkmarx_osa_parser.py @@ -59,7 +59,7 @@ def test_checkmarx_osa_parse_file_with_single_vulnerability_has_single_finding( self.assertEqual(float, type(item.cvssv3_score)) self.assertEqual(7.5, item.cvssv3_score) self.assertEqual(datetime, type(item.publish_date)) - self.assertEqual(datetime.strptime("2020-12-03T17:15:00", '%Y-%m-%dT%H:%M:%S'), item.publish_date) + self.assertEqual(datetime.strptime("2020-12-03T17:15:00", "%Y-%m-%dT%H:%M:%S"), item.publish_date) self.assertEqual(str, type(item.component_name)) self.assertEqual("com.fasterxml.jackson.core:jackson-databind", item.component_name) self.assertEqual(str, type(item.component_version)) diff --git a/unittests/tools/test_checkmarx_parser.py b/unittests/tools/test_checkmarx_parser.py index 6bfbbc1304..55ad10be51 100644 --- a/unittests/tools/test_checkmarx_parser.py +++ b/unittests/tools/test_checkmarx_parser.py @@ -26,7 +26,7 @@ def teardown(self, my_file_handle): # no_finding # ---------------------------------------------------------------------------- # Default checkmarx scanner, aggregated by sink file_path - @patch('dojo.tools.checkmarx.parser.add_language') + @patch("dojo.tools.checkmarx.parser.add_language") def test_file_name_aggregated_parse_file_with_no_vulnerabilities_has_no_findings(self, mock): my_file_handle, _product, _engagement, test = self.init( get_unit_tests_path() + "/scans/checkmarx/no_finding.xml", @@ -36,19 +36,19 @@ def test_file_name_aggregated_parse_file_with_no_vulnerabilities_has_no_findings self.teardown(my_file_handle) self.assertEqual(0, len(findings)) - @patch('dojo.tools.checkmarx.parser.add_language') + @patch("dojo.tools.checkmarx.parser.add_language") def test_detailed_parse_file_with_no_vulnerabilities_has_no_findings(self, mock): """Checkmarx detailed scanner, with all vulnerabilities from checkmarx""" my_file_handle, _product, _engagement, test = self.init( get_unit_tests_path() + "/scans/checkmarx/no_finding.xml", ) parser = CheckmarxParser() - parser.set_mode('detailed') + parser.set_mode("detailed") findings = parser.get_findings(my_file_handle, test) self.teardown(my_file_handle) self.assertEqual(0, len(findings)) - @patch('dojo.tools.checkmarx.parser.add_language') + @patch("dojo.tools.checkmarx.parser.add_language") def test_file_name_aggregated_parse_file_with_single_vulnerability_has_single_finding(self, mock): my_file_handle, product, _engagement, test = self.init( get_unit_tests_path() + "/scans/checkmarx/single_finding.xml", @@ -74,15 +74,15 @@ def test_file_name_aggregated_parse_file_with_single_vulnerability_has_single_fi item.description, ) self.assertEqual(1, item.nb_occurences) - mock.assert_called_with(product, 'Java', files=1) + mock.assert_called_with(product, "Java", files=1) - @patch('dojo.tools.checkmarx.parser.add_language') + @patch("dojo.tools.checkmarx.parser.add_language") def test_detailed_parse_file_with_single_vulnerability_has_single_finding(self, mock): my_file_handle, product, _engagement, test = self.init( get_unit_tests_path() + "/scans/checkmarx/single_finding.xml", ) parser = CheckmarxParser() - parser.set_mode('detailed') + parser.set_mode("detailed") findings = parser.get_findings(my_file_handle, test) self.teardown(my_file_handle) # Verifications common to both parsers @@ -177,7 +177,7 @@ def test_detailed_parse_file_with_single_vulnerability_has_single_finding(self, item.sast_source_file_path, ) self.assertIsNone(item.nb_occurences) - mock.assert_called_with(product, 'Java', files=1) + mock.assert_called_with(product, "Java", files=1) def check_parse_file_with_single_vulnerability_has_single_finding(self, findings): self.assertEqual(1, len(findings)) @@ -210,7 +210,7 @@ def check_parse_file_with_single_vulnerability_has_single_finding(self, findings # ---------------------------------------------------------------------------- # single finding false positive # ---------------------------------------------------------------------------- - @patch('dojo.tools.checkmarx.parser.add_language') + @patch("dojo.tools.checkmarx.parser.add_language") def test_file_name_aggregated_parse_file_with_false_positive_is_false_positive(self, mock): my_file_handle, product, _engagement, test = self.init( get_unit_tests_path() + "/scans/checkmarx/single_finding_false_positive.xml", @@ -220,20 +220,20 @@ def test_file_name_aggregated_parse_file_with_false_positive_is_false_positive(s self.teardown(my_file_handle) # Verifications common to both parsers self.check_parse_file_with_false_positive_is_false_positive(findings) - mock.assert_called_with(product, 'Java', files=1) + mock.assert_called_with(product, "Java", files=1) - @patch('dojo.tools.checkmarx.parser.add_language') + @patch("dojo.tools.checkmarx.parser.add_language") def test_detailed_parse_file_with_false_positive_is_false_positive(self, mock): my_file_handle, product, _engagement, test = self.init( get_unit_tests_path() + "/scans/checkmarx/single_finding_false_positive.xml", ) parser = CheckmarxParser() - parser.set_mode('detailed') + parser.set_mode("detailed") findings = parser.get_findings(my_file_handle, test) self.teardown(my_file_handle) # Verifications common to both parsers self.check_parse_file_with_false_positive_is_false_positive(findings) - mock.assert_called_with(product, 'Java', files=1) + mock.assert_called_with(product, "Java", files=1) def check_parse_file_with_false_positive_is_false_positive(self, findings): self.assertEqual(1, len(findings)) @@ -251,7 +251,7 @@ def check_parse_file_with_false_positive_is_false_positive(self, findings): # the result should be one exploitable finding, even though the first one found was false positive # ---------------------------------------------------------------------------- - @patch('dojo.tools.checkmarx.parser.add_language') + @patch("dojo.tools.checkmarx.parser.add_language") def test_file_name_aggregated_parse_file_with_two_aggregated_findings_one_is_false_p(self, mock): my_file_handle, product, _engagement, test = self.init( get_unit_tests_path() + "/scans/checkmarx/two_aggregated_findings_one_is_false_positive.xml", @@ -271,13 +271,13 @@ def test_file_name_aggregated_parse_file_with_two_aggregated_findings_one_is_fal self.assertEqual(bool, type(item.false_p)) # If at least one of the findings in the aggregate is exploitable, the defectdojo finding should not be "false positive" self.assertEqual(False, item.false_p) - mock.assert_called_with(product, 'Java', files=2) + mock.assert_called_with(product, "Java", files=2) # ---------------------------------------------------------------------------- # multiple_findings : source filename = sink filename. # ---------------------------------------------------------------------------- - @patch('dojo.tools.checkmarx.parser.add_language') + @patch("dojo.tools.checkmarx.parser.add_language") def test_file_name_aggregated_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self, mock): my_file_handle, product, _engagement, test = self.init( get_unit_tests_path() + "/scans/checkmarx/multiple_findings.xml", @@ -287,7 +287,7 @@ def test_file_name_aggregated_parse_file_with_multiple_vulnerabilities_has_multi self.teardown(my_file_handle) # checkmarx says 3 but we're down to 2 due to the aggregation on sink filename rather than source filename + source line number + sink filename + sink line number self.assertEqual(2, len(findings)) - mock.assert_called_with(product, 'Java', files=3) + mock.assert_called_with(product, "Java", files=3) with self.subTest(i=0): finding = findings[0] self.assertEqual("SQL Injection (Assignment5.java)", finding.title) @@ -296,17 +296,17 @@ def test_file_name_aggregated_parse_file_with_multiple_vulnerabilities_has_multi self.assertEqual(True, finding.static_finding) self.assertEqual("WebGoat/webgoat-lessons/challenge/src/main/java/org/owasp/webgoat/plugin/challenge5/challenge6/Assignment5.java", finding.file_path) - @patch('dojo.tools.checkmarx.parser.add_language') + @patch("dojo.tools.checkmarx.parser.add_language") def test_detailed_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self, mock): my_file_handle, product, _engagement, test = self.init( get_unit_tests_path() + "/scans/checkmarx/multiple_findings.xml", ) parser = CheckmarxParser() - parser.set_mode('detailed') + parser.set_mode("detailed") findings = parser.get_findings(my_file_handle, test) self.teardown(my_file_handle) self.assertEqual(3, len(findings)) - mock.assert_called_with(product, 'Java', files=3) + mock.assert_called_with(product, "Java", files=3) with self.subTest(i=0): finding = findings[0] self.assertEqual("SQL Injection (Assignment5.java)", finding.title) @@ -319,7 +319,7 @@ def test_detailed_parse_file_with_multiple_vulnerabilities_has_multiple_findings # ---------------------------------------------------------------------------- # multiple_findings : different sourceFilename but same sinkFilename # ---------------------------------------------------------------------------- - @patch('dojo.tools.checkmarx.parser.add_language') + @patch("dojo.tools.checkmarx.parser.add_language") def test_file_name_aggregated_parse_file_with_different_sourceFilename_same_sinkFilename_is_aggregated(self, mock): my_file_handle, product, _engagement, test = self.init( get_unit_tests_path() + "/scans/checkmarx/multiple_findings_different_sourceFilename_same_sinkFilename.xml", @@ -331,26 +331,26 @@ def test_file_name_aggregated_parse_file_with_different_sourceFilename_same_sink self.assertEqual(1, len(findings)) # nb_occurences counts the number of aggregated vulnerabilities from tool self.assertEqual(2, findings[0].nb_occurences) - mock.assert_called_with(product, 'Java', files=2) + mock.assert_called_with(product, "Java", files=2) - @patch('dojo.tools.checkmarx.parser.add_language') + @patch("dojo.tools.checkmarx.parser.add_language") def test_detailed_parse_file_with_different_sourceFilename_same_sinkFilename_is_not_aggregated(self, mock): my_file_handle, product, _engagement, test = self.init( get_unit_tests_path() + "/scans/checkmarx/multiple_findings_different_sourceFilename_same_sinkFilename.xml", ) parser = CheckmarxParser() - parser.set_mode('detailed') + parser.set_mode("detailed") findings = parser.get_findings(my_file_handle, test) self.teardown(my_file_handle) self.assertEqual(2, len(findings)) self.assertIsNone(findings[0].nb_occurences) self.assertIsNone(findings[1].nb_occurences) - mock.assert_called_with(product, 'Java', files=2) + mock.assert_called_with(product, "Java", files=2) # ---------------------------------------------------------------------------- # multiple_findings : same sourceFilename but different sinkFilename # ---------------------------------------------------------------------------- - @patch('dojo.tools.checkmarx.parser.add_language') + @patch("dojo.tools.checkmarx.parser.add_language") def test_file_name_aggregated_parse_file_with_same_sourceFilename_different_sinkFilename_is_not_aggregated(self, mock): my_file_handle, product, _engagement, test = self.init( get_unit_tests_path() + "/scans/checkmarx/multiple_findings_same_sourceFilename_different_sinkFilename.xml", @@ -360,24 +360,24 @@ def test_file_name_aggregated_parse_file_with_same_sourceFilename_different_sink self.teardown(my_file_handle) # aggregation is on sink filename but sink filename differ -> not aggregated self.assertEqual(2, len(findings)) - mock.assert_called_with(product, 'Java', files=2) + mock.assert_called_with(product, "Java", files=2) - @patch('dojo.tools.checkmarx.parser.add_language') + @patch("dojo.tools.checkmarx.parser.add_language") def test_detailed_parse_file_with_same_sourceFilename_different_sinkFilename_is_not_aggregated(self, mock): my_file_handle, product, _engagement, test = self.init( get_unit_tests_path() + "/scans/checkmarx/multiple_findings_same_sourceFilename_different_sinkFilename.xml", ) parser = CheckmarxParser() - parser.set_mode('detailed') + parser.set_mode("detailed") findings = parser.get_findings(my_file_handle, test) self.teardown(my_file_handle) self.assertEqual(2, len(findings)) - mock.assert_called_with(product, 'Java', files=2) + mock.assert_called_with(product, "Java", files=2) # ---------------------------------------------------------------------------- # utf-8 replacement char in various fields of the report. check all finding elements # ---------------------------------------------------------------------------- - @patch('dojo.tools.checkmarx.parser.add_language') + @patch("dojo.tools.checkmarx.parser.add_language") def test_file_name_aggregated_parse_file_with_utf8_replacement_char(self, mock): my_file_handle, product, _engagement, test = self.init( get_unit_tests_path() + "/scans/checkmarx/utf8_replacement_char.xml", @@ -403,15 +403,15 @@ def test_file_name_aggregated_parse_file_with_utf8_replacement_char(self, mock): item.description, ) self.assertIsNone(item.line) - mock.assert_called_with(product, 'Java', files=1) + mock.assert_called_with(product, "Java", files=1) - @patch('dojo.tools.checkmarx.parser.add_language') + @patch("dojo.tools.checkmarx.parser.add_language") def test_detailed_parse_file_with_utf8_replacement_char(self, mock): my_file_handle, product, _engagement, test = self.init( get_unit_tests_path() + "/scans/checkmarx/utf8_replacement_char.xml", ) parser = CheckmarxParser() - parser.set_mode('detailed') + parser.set_mode("detailed") findings = parser.get_findings(my_file_handle, test) self.teardown(my_file_handle) # Verifications common to both parsers @@ -490,7 +490,7 @@ def test_detailed_parse_file_with_utf8_replacement_char(self, mock): ) self.assertEqual(int, type(item.line)) self.assertEqual(58, item.line) - mock.assert_called_with(product, 'Java', files=1) + mock.assert_called_with(product, "Java", files=1) def check_parse_file_with_utf8_replacement_char(self, findings): self.assertEqual(1, len(findings)) @@ -523,7 +523,7 @@ def check_parse_file_with_utf8_replacement_char(self, findings): # ---------------------------------------------------------------------------- # more utf-8 non-ascii chars. # ---------------------------------------------------------------------------- - @patch('dojo.tools.checkmarx.parser.add_language') + @patch("dojo.tools.checkmarx.parser.add_language") def test_file_name_aggregated_parse_file_with_utf8_various_non_ascii_char(self, mock): my_file_handle, product, _engagement, test = self.init( get_unit_tests_path() + "/scans/checkmarx/utf8_various_non_ascii_char.xml", @@ -549,15 +549,15 @@ def test_file_name_aggregated_parse_file_with_utf8_various_non_ascii_char(self, item.description, ) self.assertIsNone(item.line) - mock.assert_called_with(product, 'Java', files=1) + mock.assert_called_with(product, "Java", files=1) - @patch('dojo.tools.checkmarx.parser.add_language') + @patch("dojo.tools.checkmarx.parser.add_language") def test_detailed_parse_file_with_utf8_various_non_ascii_char(self, mock): my_file_handle, product, _engagement, test = self.init( get_unit_tests_path() + "/scans/checkmarx/utf8_various_non_ascii_char.xml", ) parser = CheckmarxParser() - parser.set_mode('detailed') + parser.set_mode("detailed") findings = parser.get_findings(my_file_handle, test) self.teardown(my_file_handle) # Verifications common to both parsers @@ -636,7 +636,7 @@ def test_detailed_parse_file_with_utf8_various_non_ascii_char(self, mock): ) self.assertEqual(int, type(item.line)) self.assertEqual(58, item.line) - mock.assert_called_with(product, 'Java', files=1) + mock.assert_called_with(product, "Java", files=1) def check_parse_file_with_utf8_various_non_ascii_char(self, findings): self.assertEqual(1, len(findings)) @@ -669,7 +669,7 @@ def check_parse_file_with_utf8_various_non_ascii_char(self, findings): self.assertEqual(bool, type(item.static_finding)) self.assertEqual(True, item.static_finding) - @patch('dojo.tools.checkmarx.parser.add_language') + @patch("dojo.tools.checkmarx.parser.add_language") def test_file_with_multiple_findings_is_aggregated_with_query_id(self, mock): my_file_handle, product, _engagement, test = self.init( get_unit_tests_path() + "/scans/checkmarx/multiple_findings_same_query_id.xml", @@ -678,7 +678,7 @@ def test_file_with_multiple_findings_is_aggregated_with_query_id(self, mock): findings = parser.get_findings(my_file_handle, test) self.teardown(my_file_handle) self.assertEqual(6, len(findings)) - mock.assert_called_with(product, 'Java', files=4) + mock.assert_called_with(product, "Java", files=4) with self.subTest(i=0): finding = findings[0] # ScanStart @@ -689,7 +689,7 @@ def test_file_with_multiple_findings_is_aggregated_with_query_id(self, mock): self.assertEqual(bool, type(finding.static_finding)) self.assertEqual(True, finding.static_finding) - @patch('dojo.tools.checkmarx.parser.add_language') + @patch("dojo.tools.checkmarx.parser.add_language") def test_file_with_empty_filename(self, mock): my_file_handle, product, _engagement, test = self.init( get_unit_tests_path() + "/scans/checkmarx/single_no_filename.xml", @@ -698,7 +698,7 @@ def test_file_with_empty_filename(self, mock): findings = parser.get_findings(my_file_handle, test) self.teardown(my_file_handle) self.assertEqual(1, len(findings)) - mock.assert_called_with(product, 'PHP', files=1) + mock.assert_called_with(product, "PHP", files=1) with self.subTest(i=0): finding = findings[0] # ScanStart @@ -709,7 +709,7 @@ def test_file_with_empty_filename(self, mock): self.assertEqual(bool, type(finding.static_finding)) self.assertEqual(True, finding.static_finding) - @patch('dojo.tools.checkmarx.parser.add_language') + @patch("dojo.tools.checkmarx.parser.add_language") def test_file_with_many_aggregated_findings(self, mock): my_file_handle, _product, _engagement, test = self.init( get_unit_tests_path() + "/scans/checkmarx/many_aggregated_findings.xml", @@ -726,7 +726,7 @@ def test_file_with_many_aggregated_findings(self, mock): self.assertEqual(185, finding.nb_occurences) self.assertEqual("5273", finding.vuln_id_from_tool) - @patch('dojo.tools.checkmarx.parser.add_language') + @patch("dojo.tools.checkmarx.parser.add_language") def test_file_with_many_findings_json(self, mock): my_file_handle, _product, _engagement, _test = self.init( get_unit_tests_path() + "/scans/checkmarx/multiple_findings.json", @@ -760,7 +760,7 @@ def test_file_with_many_findings_json(self, mock): self.assertEqual(54, finding.line) self.assertEqual("udB1urKobWKTYYlRQbAAub1yRAc=", finding.unique_id_from_tool) - @patch('dojo.tools.checkmarx.parser.add_language') + @patch("dojo.tools.checkmarx.parser.add_language") def test_file_issue6956(self, mock): my_file_handle, _product, _engagement, _test = self.init( get_unit_tests_path() + "/scans/checkmarx/sample_report.json", @@ -823,24 +823,24 @@ def test_file_issue6956(self, mock): self.assertEqual("/webgoat-server/Dockerfile", finding.file_path) self.assertEqual(datetime.date(2022, 5, 6), finding.date) - @patch('dojo.tools.checkmarx.parser.add_language') + @patch("dojo.tools.checkmarx.parser.add_language") def test_finding_date_should_be_date_xml(self, mock): my_file_handle, _product, _engagement, test = self.init( get_unit_tests_path() + "/scans/checkmarx/single_finding.xml", ) parser = CheckmarxParser() - parser.set_mode('detailed') + parser.set_mode("detailed") findings = parser.get_findings(my_file_handle, test) self.teardown(my_file_handle) self.assertEqual(findings[0].date, datetime.date(2018, 2, 25)) - @patch('dojo.tools.checkmarx.parser.add_language') + @patch("dojo.tools.checkmarx.parser.add_language") def test_finding_date_should_be_date_json(self, mock): my_file_handle, _product, _engagement, test = self.init( get_unit_tests_path() + "/scans/checkmarx/multiple_findings.json", ) parser = CheckmarxParser() - parser.set_mode('detailed') + parser.set_mode("detailed") findings = parser.get_findings(my_file_handle, test) self.teardown(my_file_handle) self.assertEqual(findings[0].date, datetime.date(2022, 2, 25)) diff --git a/unittests/tools/test_checkov_parser.py b/unittests/tools/test_checkov_parser.py index 49bc3d1a5f..f4379b0c0c 100644 --- a/unittests/tools/test_checkov_parser.py +++ b/unittests/tools/test_checkov_parser.py @@ -39,43 +39,43 @@ def test_parse_file_with_multiple_check_type_has_multiple_check_type(self): terraform_findings_amount = 0 dockerfile_findings_amount = 0 for finding in findings: - if 'Check Type: terraform' in finding.description: + if "Check Type: terraform" in finding.description: terraform_findings_amount += 1 - elif 'Check Type: dockerfile' in finding.description: + elif "Check Type: dockerfile" in finding.description: dockerfile_findings_amount += 1 self.assertEqual(11, terraform_findings_amount) self.assertEqual(2, dockerfile_findings_amount) # Terraform first_terraform_finding = findings[0] - self.assertEqual('Medium', first_terraform_finding.severity) + self.assertEqual("Medium", first_terraform_finding.severity) self.assertEqual( - 'Check Type: terraform\n' - 'Check Id: CKV_AWS_161\n' - 'Ensure RDS database has IAM authentication enabled\n', + "Check Type: terraform\n" + "Check Id: CKV_AWS_161\n" + "Ensure RDS database has IAM authentication enabled\n", first_terraform_finding.description, ) - self.assertEqual('/aws/db-app.tf', first_terraform_finding.file_path) + self.assertEqual("/aws/db-app.tf", first_terraform_finding.file_path) self.assertEqual(1, first_terraform_finding.line) - self.assertEqual('aws_db_instance.default', first_terraform_finding.component_name) - self.assertEqual('', first_terraform_finding.mitigation) - self.assertEqual('', first_terraform_finding.references) + self.assertEqual("aws_db_instance.default", first_terraform_finding.component_name) + self.assertEqual("", first_terraform_finding.mitigation) + self.assertEqual("", first_terraform_finding.references) # Dockerfile first_dockerfile_finding = findings[11] - self.assertEqual('Medium', first_dockerfile_finding.severity) + self.assertEqual("Medium", first_dockerfile_finding.severity) self.assertEqual( - 'Check Type: dockerfile\n' - 'Check Id: CKV_DOCKER_3\n' - 'Ensure that a user for the container has been created\n', + "Check Type: dockerfile\n" + "Check Id: CKV_DOCKER_3\n" + "Ensure that a user for the container has been created\n", first_dockerfile_finding.description, ) - self.assertEqual('/aws/resources/Dockerfile', first_dockerfile_finding.file_path) + self.assertEqual("/aws/resources/Dockerfile", first_dockerfile_finding.file_path) self.assertEqual(0, first_dockerfile_finding.line) - self.assertEqual('/aws/resources/Dockerfile.', first_dockerfile_finding.component_name) - self.assertEqual('', first_dockerfile_finding.mitigation) + self.assertEqual("/aws/resources/Dockerfile.", first_dockerfile_finding.component_name) + self.assertEqual("", first_dockerfile_finding.mitigation) self.assertEqual( - 'https://docs.bridgecrew.io/docs/ensure-that-a-user-for-the-container-has-been-created', + "https://docs.bridgecrew.io/docs/ensure-that-a-user-for-the-container-has-been-created", first_dockerfile_finding.references, ) diff --git a/unittests/tools/test_codechecker_parser.py b/unittests/tools/test_codechecker_parser.py index 756ba4c780..6053d38e67 100644 --- a/unittests/tools/test_codechecker_parser.py +++ b/unittests/tools/test_codechecker_parser.py @@ -40,11 +40,11 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings(self): self.assertEqual(94, len(findings), str(len(findings))) self.assertEqual(sum(1 for f in findings if f.duplicate), 0) - self.assertEqual(sum(1 for f in findings if f.severity.upper() == 'HIGH'), 20) - self.assertEqual(sum(1 for f in findings if f.severity.upper() == 'INFO'), 6) - self.assertEqual(sum(1 for f in findings if f.severity.upper() == 'CRITICAL'), 0) - self.assertEqual(sum(1 for f in findings if f.severity.upper() == 'LOW'), 5) - self.assertEqual(sum(1 for f in findings if f.severity.upper() == 'MEDIUM'), 63) + self.assertEqual(sum(1 for f in findings if f.severity.upper() == "HIGH"), 20) + self.assertEqual(sum(1 for f in findings if f.severity.upper() == "INFO"), 6) + self.assertEqual(sum(1 for f in findings if f.severity.upper() == "CRITICAL"), 0) + self.assertEqual(sum(1 for f in findings if f.severity.upper() == "LOW"), 5) + self.assertEqual(sum(1 for f in findings if f.severity.upper() == "MEDIUM"), 63) finding = findings[0] self.assertEqual("clang-diagnostic-sign-compare", finding.title) diff --git a/unittests/tools/test_contrast_parser.py b/unittests/tools/test_contrast_parser.py index 2d48445cd5..6b6f3ae85f 100644 --- a/unittests/tools/test_contrast_parser.py +++ b/unittests/tools/test_contrast_parser.py @@ -29,9 +29,9 @@ def test_example_report(self): self.assertIsNotNone(finding.unsaved_endpoints) self.assertEqual(1, len(finding.unsaved_endpoints)) endpoint = finding.unsaved_endpoints[0] - self.assertEqual('http', endpoint.protocol) - self.assertEqual('0.0.0.0', endpoint.host) - self.assertEqual('WebGoat/login.mvc', endpoint.path) + self.assertEqual("http", endpoint.protocol) + self.assertEqual("0.0.0.0", endpoint.host) + self.assertEqual("WebGoat/login.mvc", endpoint.path) with self.subTest(i=11): finding = findings[11] self.assertEqual(datetime.date(2018, 4, 23), finding.date.date()) @@ -44,13 +44,13 @@ def test_example_report(self): self.assertIsNotNone(finding.unsaved_endpoints) self.assertEqual(4, len(finding.unsaved_endpoints)) endpoint = finding.unsaved_endpoints[0] - self.assertEqual('http', endpoint.protocol) - self.assertEqual('0.0.0.0', endpoint.host) - self.assertEqual('WebGoat/services/SoapRequest', endpoint.path) + self.assertEqual("http", endpoint.protocol) + self.assertEqual("0.0.0.0", endpoint.host) + self.assertEqual("WebGoat/services/SoapRequest", endpoint.path) endpoint = finding.unsaved_endpoints[1] - self.assertEqual('http', endpoint.protocol) - self.assertEqual('0.0.0.0', endpoint.host) - self.assertEqual('WebGoat/attack', endpoint.path) + self.assertEqual("http", endpoint.protocol) + self.assertEqual("0.0.0.0", endpoint.host) + self.assertEqual("WebGoat/attack", endpoint.path) def test_example2_report(self): test = Test() diff --git a/unittests/tools/test_crashtest_security_parser.py b/unittests/tools/test_crashtest_security_parser.py index a87248ca3d..5201d7c291 100644 --- a/unittests/tools/test_crashtest_security_parser.py +++ b/unittests/tools/test_crashtest_security_parser.py @@ -21,7 +21,7 @@ def test_crashtest_security_json_parser_full_file_has_many_findings(self): self.assertIsNone(finding.unsaved_vulnerability_ids) finding = findings[4] self.assertEqual(1, len(finding.unsaved_vulnerability_ids)) - self.assertEqual('CVE-2016-4072', finding.unsaved_vulnerability_ids[0]) + self.assertEqual("CVE-2016-4072", finding.unsaved_vulnerability_ids[0]) def test_crashtest_security_json_parser_extracted_data_file_has_many_findings(self): testfile = open( diff --git a/unittests/tools/test_cyclonedx_parser.py b/unittests/tools/test_cyclonedx_parser.py index 16a346ce16..4a548f6702 100644 --- a/unittests/tools/test_cyclonedx_parser.py +++ b/unittests/tools/test_cyclonedx_parser.py @@ -26,7 +26,7 @@ def test_grype_report(self): self.assertEqual("2.3.0-2+deb10u2", finding.component_version) vulnerability_ids = finding.unsaved_vulnerability_ids self.assertEqual(1, len(vulnerability_ids)) - self.assertEqual('CVE-2019-6988', vulnerability_ids[0]) + self.assertEqual("CVE-2019-6988", vulnerability_ids[0]) self.assertEqual(datetime.date(2021, 4, 13), datetime.datetime.date(finding.date)) def test_spec1_report(self): @@ -41,7 +41,7 @@ def test_spec1_report(self): finding = findings[0] vulnerability_ids = finding.unsaved_vulnerability_ids self.assertEqual(1, len(vulnerability_ids)) - self.assertEqual('CVE-2018-7489', vulnerability_ids[0]) + self.assertEqual("CVE-2018-7489", vulnerability_ids[0]) self.assertEqual("Critical", finding.severity) self.assertIn(finding.cwe, [184, 502]) # there is 2 CWE in the report self.assertEqual("CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H", finding.cvssv3) @@ -64,7 +64,7 @@ def test_spec1_report_low_first(self): finding = findings[0] vulnerability_ids = finding.unsaved_vulnerability_ids self.assertEqual(1, len(vulnerability_ids)) - self.assertEqual('CVE-2018-7489', vulnerability_ids[0]) + self.assertEqual("CVE-2018-7489", vulnerability_ids[0]) self.assertEqual("Critical", finding.severity) self.assertIn(finding.cwe, [184, 502]) # there is 2 CWE in the report self.assertEqual("CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H", finding.cvssv3) @@ -126,7 +126,7 @@ def test_cyclonedx_grype_11_report(self): self.assertEqual("1.30+dfsg-6", finding.component_version) vulnerability_ids = finding.unsaved_vulnerability_ids self.assertEqual(1, len(vulnerability_ids)) - self.assertEqual('CVE-2019-9923', vulnerability_ids[0]) + self.assertEqual("CVE-2019-9923", vulnerability_ids[0]) self.assertIn("urn:uuid:be0e9032-5b6b-4ce4-9be4-e5956a0309c1", finding.description) self.assertEqual("CVE-2019-9923", finding.vuln_id_from_tool) with self.subTest(i=380): @@ -136,7 +136,7 @@ def test_cyclonedx_grype_11_report(self): self.assertEqual("1.30+dfsg-6", finding.component_version) vulnerability_ids = finding.unsaved_vulnerability_ids self.assertEqual(1, len(vulnerability_ids)) - self.assertEqual('CVE-2021-20193', vulnerability_ids[0]) + self.assertEqual("CVE-2021-20193", vulnerability_ids[0]) self.assertIn("urn:uuid:17a8ccee-f13b-4d9d-abfc-f3964597df9a", finding.description) self.assertEqual("CVE-2021-20193", finding.vuln_id_from_tool) @@ -180,9 +180,9 @@ def test_cyclonedx_1_4_xml(self): self.assertEqual("SNYK-JAVA-COMFASTERXMLJACKSONCORE-32111", finding.vuln_id_from_tool) vulnerability_ids = finding.unsaved_vulnerability_ids self.assertEqual(3, len(vulnerability_ids)) - self.assertEqual('SNYK-JAVA-COMFASTERXMLJACKSONCORE-32111', vulnerability_ids[0]) - self.assertEqual('CVE-2018-7489', vulnerability_ids[1]) - self.assertEqual('CVE-2018-7489', vulnerability_ids[2]) + self.assertEqual("SNYK-JAVA-COMFASTERXMLJACKSONCORE-32111", vulnerability_ids[0]) + self.assertEqual("CVE-2018-7489", vulnerability_ids[1]) + self.assertEqual("CVE-2018-7489", vulnerability_ids[2]) self.assertTrue(finding.is_mitigated) self.assertFalse(finding.active) @@ -225,8 +225,8 @@ def test_cyclonedx_1_4_json(self): self.assertEqual("SNYK-JAVA-COMFASTERXMLJACKSONCORE-32111", finding.vuln_id_from_tool) vulnerability_ids = finding.unsaved_vulnerability_ids self.assertEqual(2, len(vulnerability_ids)) - self.assertEqual('SNYK-JAVA-COMFASTERXMLJACKSONCORE-32111', vulnerability_ids[0]) - self.assertEqual('CVE-2018-7489', vulnerability_ids[1]) + self.assertEqual("SNYK-JAVA-COMFASTERXMLJACKSONCORE-32111", vulnerability_ids[0]) + self.assertEqual("CVE-2018-7489", vulnerability_ids[1]) self.assertTrue(finding.is_mitigated) self.assertFalse(finding.active) @@ -247,7 +247,7 @@ def test_cyclonedx_1_4_jake_json(self): self.assertEqual("2.0", finding.component_version) vulnerability_ids = finding.unsaved_vulnerability_ids self.assertEqual(2, len(vulnerability_ids)) - self.assertEqual('CVE-2021-33203', vulnerability_ids[1]) + self.assertEqual("CVE-2021-33203", vulnerability_ids[1]) self.assertEqual("CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:N/A:N", finding.cvssv3) self.assertIn( "Django before 2.2.24, 3.x before 3.1.12, and 3.2.x before 3.2.4 has a potential directory traversal", @@ -262,7 +262,7 @@ def test_cyclonedx_1_4_jake_json(self): self.assertEqual("2.0", finding.component_version) vulnerability_ids = finding.unsaved_vulnerability_ids self.assertEqual(2, len(vulnerability_ids)) - self.assertEqual('CVE-2018-7536', vulnerability_ids[1]) + self.assertEqual("CVE-2018-7536", vulnerability_ids[1]) self.assertEqual("CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:L", finding.cvssv3) self.assertIn( "An issue was discovered in Django 2.0 before 2.0.3, 1.11 before 1.11.11, and 1.8 before 1.8.19.", @@ -276,7 +276,7 @@ def test_cyclonedx_1_4_jake_json(self): self.assertEqual("2.0", finding.component_version) vulnerability_ids = finding.unsaved_vulnerability_ids self.assertEqual(2, len(vulnerability_ids)) - self.assertEqual('CVE-2018-6188', vulnerability_ids[1]) + self.assertEqual("CVE-2018-6188", vulnerability_ids[1]) self.assertEqual("CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:N/A:N", finding.cvssv3) self.assertIn( "django.contrib.auth.forms.AuthenticationForm in Django 2.0 before 2.0.2, and 1.11.8 and 1.11.9, allows remote attackers to obtain potentially sensitive information", diff --git a/unittests/tools/test_deepfence_threatmapper_parser.py b/unittests/tools/test_deepfence_threatmapper_parser.py index 8e899a5f3a..e4e6070dfe 100644 --- a/unittests/tools/test_deepfence_threatmapper_parser.py +++ b/unittests/tools/test_deepfence_threatmapper_parser.py @@ -6,37 +6,37 @@ class TestDeepfenceThreatmapperParser(DojoTestCase): def test_parse_file_compliance_report(self): - testfile = open("unittests/scans/deepfence_threatmapper/compliance_report.xlsx", "rb") - parser = DeepfenceThreatmapperParser() - findings = parser.get_findings(testfile, Test()) - self.assertEqual(7, len(findings)) - self.assertEqual(findings[0].title, "Threatmapper_Compliance_Report-gdpr_3.6") - self.assertEqual(findings[0].severity, "Info") + with open("unittests/scans/deepfence_threatmapper/compliance_report.xlsx", "rb") as testfile: + parser = DeepfenceThreatmapperParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(7, len(findings)) + self.assertEqual(findings[0].title, "Threatmapper_Compliance_Report-gdpr_3.6") + self.assertEqual(findings[0].severity, "Info") def test_parse_file_malware_report(self): - testfile = open("unittests/scans/deepfence_threatmapper/malware_report.xlsx", "rb") - parser = DeepfenceThreatmapperParser() - findings = parser.get_findings(testfile, Test()) - self.assertEqual(9, len(findings)) - self.assertEqual(findings[0].title, "MD5_Constants") - self.assertEqual(findings[0].severity, "Low") - self.assertEqual(findings[0].file_path, "/tmp/Deepfence/YaraHunter/df_db09257b02e615049e0aecc05be2dc2401735e67db4ab74225df777c62c39753/usr/sbin/mkfs.cramfs") + with open("unittests/scans/deepfence_threatmapper/malware_report.xlsx", "rb") as testfile: + parser = DeepfenceThreatmapperParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(9, len(findings)) + self.assertEqual(findings[0].title, "MD5_Constants") + self.assertEqual(findings[0].severity, "Low") + self.assertEqual(findings[0].file_path, "/tmp/Deepfence/YaraHunter/df_db09257b02e615049e0aecc05be2dc2401735e67db4ab74225df777c62c39753/usr/sbin/mkfs.cramfs") def test_parse_file_secret_report(self): - testfile = open("unittests/scans/deepfence_threatmapper/secret_report.xlsx", "rb") - parser = DeepfenceThreatmapperParser() - findings = parser.get_findings(testfile, Test()) - self.assertEqual(7, len(findings)) - self.assertEqual(findings[0].title, "Username and password in URI") - self.assertEqual(findings[0].severity, "High") - self.assertEqual(findings[0].file_path, "usr/share/doc/curl-8.3.0/TheArtOfHttpScripting.md") + with open("unittests/scans/deepfence_threatmapper/secret_report.xlsx", "rb") as testfile: + parser = DeepfenceThreatmapperParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(7, len(findings)) + self.assertEqual(findings[0].title, "Username and password in URI") + self.assertEqual(findings[0].severity, "High") + self.assertEqual(findings[0].file_path, "usr/share/doc/curl-8.3.0/TheArtOfHttpScripting.md") def test_parse_file_vulnerability_report(self): - testfile = open("unittests/scans/deepfence_threatmapper/vulnerability_report.xlsx", "rb") - parser = DeepfenceThreatmapperParser() - findings = parser.get_findings(testfile, Test()) - self.assertEqual(3, len(findings)) - self.assertEqual(findings[0].title, "Threatmapper_Vuln_Report-CVE-2021-36084") - self.assertEqual(findings[0].severity, "Low") - self.assertEqual(findings[0].mitigation, "2.5-10.amzn2.0.1") - self.assertEqual(findings[0].cve, "CVE-2021-36084") + with open("unittests/scans/deepfence_threatmapper/vulnerability_report.xlsx", "rb") as testfile: + parser = DeepfenceThreatmapperParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(3, len(findings)) + self.assertEqual(findings[0].title, "Threatmapper_Vuln_Report-CVE-2021-36084") + self.assertEqual(findings[0].severity, "Low") + self.assertEqual(findings[0].mitigation, "2.5-10.amzn2.0.1") + self.assertEqual(findings[0].cve, "CVE-2021-36084") diff --git a/unittests/tools/test_dependency_check_parser.py b/unittests/tools/test_dependency_check_parser.py index f72db048a4..161a9f47d9 100644 --- a/unittests/tools/test_dependency_check_parser.py +++ b/unittests/tools/test_dependency_check_parser.py @@ -80,7 +80,7 @@ def test_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self): items[0].date, datetime(2016, 11, 5, 14, 52, 15, 748000, tzinfo=tzoffset(None, -14400)), ) # 2016-11-05T14:52:15.748-0400 self.assertEqual(1, len(items[0].unsaved_vulnerability_ids)) - self.assertEqual('CVE-0000-0001', items[0].unsaved_vulnerability_ids[0]) + self.assertEqual("CVE-0000-0001", items[0].unsaved_vulnerability_ids[0]) with self.subTest(i=1): self.assertEqual(items[1].title, "org.dom4j:dom4j:2.1.1.redhat-00001 | CVE-0000-0001") @@ -102,7 +102,7 @@ def test_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self): ) self.assertEqual(items[1].tags, "related") self.assertEqual(1, len(items[1].unsaved_vulnerability_ids)) - self.assertEqual('CVE-0000-0001', items[1].unsaved_vulnerability_ids[0]) + self.assertEqual("CVE-0000-0001", items[1].unsaved_vulnerability_ids[0]) with self.subTest(i=2): self.assertEqual(items[2].title, "org.dom4j:dom4j:2.1.1.redhat-00001 | CVE-0000-0001") @@ -123,7 +123,7 @@ def test_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self): "Update org.dom4j:dom4j:2.1.1.redhat-00001 to at least the version recommended in the description", ) self.assertEqual(1, len(items[2].unsaved_vulnerability_ids)) - self.assertEqual('CVE-0000-0001', items[2].unsaved_vulnerability_ids[0]) + self.assertEqual("CVE-0000-0001", items[2].unsaved_vulnerability_ids[0]) with self.subTest(i=3): # identifier -> package url javascript, no vulnerabilitids, 3 vulnerabilities, relateddependencies without filename (pre v6.0.0) @@ -166,7 +166,7 @@ def test_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self): items[4].mitigation, "Update yargs-parser:5.0.0 to at least the version recommended in the description", ) self.assertEqual(1, len(items[4].unsaved_vulnerability_ids)) - self.assertEqual('CVE-2020-7608', items[4].unsaved_vulnerability_ids[0]) + self.assertEqual("CVE-2020-7608", items[4].unsaved_vulnerability_ids[0]) with self.subTest(i=5): self.assertEqual( @@ -203,7 +203,7 @@ def test_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self): "Update org.dom4j:dom4j:2.1.1.redhat-00001 to at least the version recommended in the description", ) self.assertEqual(1, len(items[6].unsaved_vulnerability_ids)) - self.assertEqual('CVE-0000-0001', items[6].unsaved_vulnerability_ids[0]) + self.assertEqual("CVE-0000-0001", items[6].unsaved_vulnerability_ids[0]) with self.subTest(i=7): # identifier -> maven java @@ -302,4 +302,4 @@ def test_parse_file_pr6439(self): items[0].date, datetime(2022, 12, 14, 1, 35, 43, 684166, tzinfo=tzlocal()), ) # 2016-11-05T14:52:15.748-0400 self.assertEqual(1, len(items[0].unsaved_vulnerability_ids)) - self.assertEqual('CVE-2015-3208', items[0].unsaved_vulnerability_ids[0]) + self.assertEqual("CVE-2015-3208", items[0].unsaved_vulnerability_ids[0]) diff --git a/unittests/tools/test_dependency_track_parser.py b/unittests/tools/test_dependency_track_parser.py index 40fd0a8177..783d68441f 100644 --- a/unittests/tools/test_dependency_track_parser.py +++ b/unittests/tools/test_dependency_track_parser.py @@ -40,12 +40,12 @@ def test_dependency_track_parser_has_many_findings(self): self.assertIsNone(findings[0].unsaved_vulnerability_ids) self.assertIsNone(findings[1].unsaved_vulnerability_ids) self.assertEqual(1, len(findings[2].unsaved_vulnerability_ids)) - self.assertEqual('CVE-2016-2097', findings[2].unsaved_vulnerability_ids[0]) + self.assertEqual("CVE-2016-2097", findings[2].unsaved_vulnerability_ids[0]) self.assertTrue(findings[2].false_p) self.assertTrue(findings[2].is_mitigated) self.assertFalse(findings[2].active) self.assertEqual(1, len(findings[3].unsaved_vulnerability_ids)) - self.assertEqual('CVE-2016-2097', findings[3].unsaved_vulnerability_ids[0]) + self.assertEqual("CVE-2016-2097", findings[3].unsaved_vulnerability_ids[0]) def test_dependency_track_parser_has_one_finding(self): with open( @@ -75,7 +75,7 @@ def test_dependency_track_parser_findings_with_alias(self): self.assertEqual(12, len(findings)) self.assertTrue(all(item.file_path is not None for item in findings)) self.assertTrue(all(item.vuln_id_from_tool is not None for item in findings)) - self.assertIn('CVE-2022-42004', findings[0].unsaved_vulnerability_ids) + self.assertIn("CVE-2022-42004", findings[0].unsaved_vulnerability_ids) def test_dependency_track_parser_findings_with_empty_alias(self): with open( @@ -85,7 +85,7 @@ def test_dependency_track_parser_findings_with_empty_alias(self): findings = parser.get_findings(testfile, Test()) self.assertEqual(12, len(findings)) - self.assertIn('CVE-2022-2053', findings[11].unsaved_vulnerability_ids) + self.assertIn("CVE-2022-2053", findings[11].unsaved_vulnerability_ids) def test_dependency_track_parser_findings_with_cvssV3_score(self): with open(f"{get_unit_tests_path()}/scans/dependency_track/many_findings_with_cvssV3_score.json") as testfile: @@ -94,7 +94,7 @@ def test_dependency_track_parser_findings_with_cvssV3_score(self): self.assertEqual(12, len(findings)) self.assertTrue(all(item.file_path is not None for item in findings)) self.assertTrue(all(item.vuln_id_from_tool is not None for item in findings)) - self.assertIn('CVE-2022-42004', findings[0].unsaved_vulnerability_ids) + self.assertIn("CVE-2022-42004", findings[0].unsaved_vulnerability_ids) self.assertEqual(8.3, findings[0].cvssv3_score) def test_dependency_track_parser_findings_with_epss_score(self): @@ -105,4 +105,4 @@ def test_dependency_track_parser_findings_with_epss_score(self): self.assertEqual(0.00043, findings[0].epss_score) self.assertEqual(0.07756, findings[0].epss_percentile) self.assertEqual(4.2, findings[0].cvssv3_score) - self.assertIn('CVE-2023-45803', findings[0].unsaved_vulnerability_ids) + self.assertIn("CVE-2023-45803", findings[0].unsaved_vulnerability_ids) diff --git a/unittests/tools/test_dockerbench_parser.py b/unittests/tools/test_dockerbench_parser.py index b3d5f603f0..820972c076 100644 --- a/unittests/tools/test_dockerbench_parser.py +++ b/unittests/tools/test_dockerbench_parser.py @@ -34,10 +34,10 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings(self): parser = DockerBenchParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(len(findings), 50) - self.assertEqual(sum(1 for f in findings if f.severity.upper() == 'CRITICAL'), 0) - self.assertEqual(sum(1 for f in findings if f.severity.upper() == 'HIGH'), 32) - self.assertEqual(sum(1 for f in findings if f.severity.upper() == 'LOW'), 16) - self.assertEqual(sum(1 for f in findings if f.severity.upper() == 'INFO'), 2) + self.assertEqual(sum(1 for f in findings if f.severity.upper() == "CRITICAL"), 0) + self.assertEqual(sum(1 for f in findings if f.severity.upper() == "HIGH"), 32) + self.assertEqual(sum(1 for f in findings if f.severity.upper() == "LOW"), 16) + self.assertEqual(sum(1 for f in findings if f.severity.upper() == "INFO"), 2) finding = findings[3] self.assertEqual("High", finding.severity) diff --git a/unittests/tools/test_fortify_parser.py b/unittests/tools/test_fortify_parser.py index c65531fcf5..cba9a984a9 100644 --- a/unittests/tools/test_fortify_parser.py +++ b/unittests/tools/test_fortify_parser.py @@ -27,7 +27,7 @@ def test_fortify_few_findings(self): self.assertEqual("High", finding.severity) self.assertEqual("app/build/intermediates/bundle_manifest/developDebug/processDevelopDebugManifest/bundle-manifest/AndroidManifest.xml", finding.file_path) self.assertEqual(11, finding.line) - self.assertEqual('53C25D2FC6950554F16D3CEF9E41EF6F', finding.unique_id_from_tool) + self.assertEqual("53C25D2FC6950554F16D3CEF9E41EF6F", finding.unique_id_from_tool) def test_fortify_few_findings_count_chart(self): with open("unittests/scans/fortify/fortify_few_findings_count_chart.xml") as testfile: @@ -40,7 +40,7 @@ def test_fortify_few_findings_count_chart(self): self.assertEqual("High", finding.severity) self.assertEqual("app/build/intermediates/bundle_manifest/developDebug/processDevelopDebugManifest/bundle-manifest/AndroidManifest.xml", finding.file_path) self.assertEqual(11, finding.line) - self.assertEqual('53C25D2FC6950554F16D3CEF9E41EF6F', finding.unique_id_from_tool) + self.assertEqual("53C25D2FC6950554F16D3CEF9E41EF6F", finding.unique_id_from_tool) def test_fortify_issue6260(self): with open("unittests/scans/fortify/issue6260.xml") as testfile: @@ -53,7 +53,7 @@ def test_fortify_issue6260(self): self.assertEqual("Low", finding.severity) self.assertEqual("src/main/java/command.java", finding.file_path) self.assertEqual(40, finding.line) - self.assertEqual('7A2F1C728BDDBB17C7CB31CEDF5D8F85', finding.unique_id_from_tool) + self.assertEqual("7A2F1C728BDDBB17C7CB31CEDF5D8F85", finding.unique_id_from_tool) def test_fortify_issue6082(self): with open("unittests/scans/fortify/issue6082.xml") as testfile: @@ -66,14 +66,14 @@ def test_fortify_issue6082(self): self.assertEqual("High", finding.severity) self.assertEqual("login.html", finding.file_path) self.assertEqual(19, finding.line) - self.assertEqual('F46C9EF7203D77D83D3486BCDC78565F', finding.unique_id_from_tool) + self.assertEqual("F46C9EF7203D77D83D3486BCDC78565F", finding.unique_id_from_tool) with self.subTest(i=1): finding = findings[1] self.assertEqual("Unreleased Resource: Database - MyContextListener.java: 28", finding.title) self.assertEqual("High", finding.severity) self.assertEqual("src/adrui/MyContextListener.java", finding.file_path) self.assertEqual(28, finding.line) - self.assertEqual('B5B15F27E10F4D7799BD0ED1E6D34C5D', finding.unique_id_from_tool) + self.assertEqual("B5B15F27E10F4D7799BD0ED1E6D34C5D", finding.unique_id_from_tool) def test_fortify_many_fdr_findings(self): with open("unittests/scans/fortify/many_findings.fpr") as testfile: @@ -84,6 +84,6 @@ def test_fortify_many_fdr_findings(self): finding = findings[0] self.assertEqual("Cross-Site Request Forgery 114E5A67-3446-4DD5-B578-D0E6FDBB304E", finding.title) self.assertEqual("High", finding.severity) - self.assertEqual('114E5A67-3446-4DD5-B578-D0E6FDBB304E', finding.unique_id_from_tool) + self.assertEqual("114E5A67-3446-4DD5-B578-D0E6FDBB304E", finding.unique_id_from_tool) finding = findings[12] self.assertEqual("Critical", finding.severity) diff --git a/unittests/tools/test_generic_parser.py b/unittests/tools/test_generic_parser.py index f8a3f467a7..8684efe4b5 100644 --- a/unittests/tools/test_generic_parser.py +++ b/unittests/tools/test_generic_parser.py @@ -18,9 +18,9 @@ def __init__(self, name, content): class TestGenericParser(DojoTestCase): def setUp(self): - self.product = Product(name='sample product', - description='what a description') - self.engagement = Engagement(name='sample engagement', + self.product = Product(name="sample product", + description="what a description") + self.engagement = Engagement(name="sample engagement", product=self.product) self.test = Test(engagement=self.engagement) @@ -126,7 +126,7 @@ def test_parsed_finding_has_title(self): file = TestFile("findings.csv", content) parser = GenericParser() findings = parser.get_findings(file, self.test) - self.assertEqual('Potential XSS Vulnerability', + self.assertEqual("Potential XSS Vulnerability", findings[0].title) def test_parsed_finding_has_cve(self): @@ -173,10 +173,10 @@ def test_parsed_finding_has_url(self): finding = findings[0] self.assertEqual(1, len(finding.unsaved_endpoints)) endpoint = finding.unsaved_endpoints[0] - self.assertEqual('localhost', endpoint.host) + self.assertEqual("localhost", endpoint.host) self.assertEqual(80, endpoint.port) - self.assertEqual('http', endpoint.protocol) - self.assertEqual('default.aspx', endpoint.path) + self.assertEqual("http", endpoint.protocol) + self.assertEqual("default.aspx", endpoint.path) self.assertIsNone(endpoint.query) self.assertIsNone(endpoint.fragment) self.assertEqual(True, finding.active) @@ -195,7 +195,7 @@ def test_parsed_finding_has_severity(self): for finding in findings: for endpoint in finding.unsaved_endpoints: endpoint.clean() - self.assertEqual('High', findings[0].severity) + self.assertEqual("High", findings[0].severity) def test_parsed_finding_with_invalid_severity_has_info_severity(self): content = """Date,Title,CweId,Url,Severity,Description,Mitigation,Impact,References,Active,Verified @@ -210,7 +210,7 @@ def test_parsed_finding_with_invalid_severity_has_info_severity(self): for finding in findings: for endpoint in finding.unsaved_endpoints: endpoint.clean() - self.assertEqual('Info', findings[0].severity) + self.assertEqual("Info", findings[0].severity) def test_parsed_finding_has_description(self): content = """Date,Title,CweId,Url,Severity,Description,Mitigation,Impact,References,Active,Verified @@ -226,7 +226,7 @@ def test_parsed_finding_has_description(self): for endpoint in finding.unsaved_endpoints: endpoint.clean() self.assertEqual( - 'FileName: default.aspx.cs\nDescription: Potential XSS Vulnerability\nLine:18\nCode Line: Response.Write(output);', + "FileName: default.aspx.cs\nDescription: Potential XSS Vulnerability\nLine:18\nCode Line: Response.Write(output);", findings[0].description) def test_parsed_finding_has_mitigation(self): @@ -242,7 +242,7 @@ def test_parsed_finding_has_mitigation(self): for finding in findings: for endpoint in finding.unsaved_endpoints: endpoint.clean() - self.assertEqual('None Currently Available', + self.assertEqual("None Currently Available", findings[0].mitigation) def test_parsed_finding_has_impact(self): @@ -258,7 +258,7 @@ def test_parsed_finding_has_impact(self): for finding in findings: for endpoint in finding.unsaved_endpoints: endpoint.clean() - self.assertEqual('Impact is currently unknown', + self.assertEqual("Impact is currently unknown", findings[0].impact) def test_parsed_finding_has_references(self): @@ -274,7 +274,7 @@ def test_parsed_finding_has_references(self): for finding in findings: for endpoint in finding.unsaved_endpoints: endpoint.clean() - self.assertEqual('Finding has references.', findings[0].references) + self.assertEqual("Finding has references.", findings[0].references) def test_parsed_finding_has_positive_active_status(self): content = """Date,Title,CweId,Url,Severity,Description,Mitigation,Impact,References,Active,Verified @@ -428,8 +428,8 @@ def test_column_order_is_flexible(self): finding1 = findings1[0] finding2 = findings2[0] - fields1 = {k: v for k, v in finding1.__dict__.items() if k != '_state'} - fields2 = {k: v for k, v in finding2.__dict__.items() if k != '_state'} + fields1 = {k: v for k, v in finding1.__dict__.items() if k != "_state"} + fields2 = {k: v for k, v in finding2.__dict__.items() if k != "_state"} self.assertEqual(fields1, fields2) diff --git a/unittests/tools/test_harbor_vulnerability_parser.py b/unittests/tools/test_harbor_vulnerability_parser.py index 14994db1fd..9d6aa3e578 100644 --- a/unittests/tools/test_harbor_vulnerability_parser.py +++ b/unittests/tools/test_harbor_vulnerability_parser.py @@ -25,13 +25,13 @@ def test_parse_file_with_one_vuln_has_one_findings(self): finding.description, "This is a sample description for sample description from Harbor API.", ) - self.assertEqual(finding.severity, 'Info') - self.assertEqual(finding.mitigation, 'Upgrade package to version unexploitable-version') + self.assertEqual(finding.severity, "Info") + self.assertEqual(finding.mitigation, "Upgrade package to version unexploitable-version") self.assertEqual(1, len(finding.unsaved_vulnerability_ids)) - self.assertEqual(finding.unsaved_vulnerability_ids[0], 'CVE-YYYY-NNN') - self.assertEqual(finding.component_name, 'package') - self.assertEqual(finding.component_version, 'exploitable-version') - self.assertEqual(finding.references, 'https://github.com/goharbor/harbor\n') + self.assertEqual(finding.unsaved_vulnerability_ids[0], "CVE-YYYY-NNN") + self.assertEqual(finding.component_name, "package") + self.assertEqual(finding.component_version, "exploitable-version") + self.assertEqual(finding.references, "https://github.com/goharbor/harbor\n") # Sample with Multiple Test def test_parse_file_with_multiple_vuln_has_multiple_findings(self): @@ -41,7 +41,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings(self): self.assertEqual(5, len(findings)) finding = findings[1] - self.assertEqual(finding.severity, 'High') + self.assertEqual(finding.severity, "High") self.assertIsNone(finding.mitigation) self.assertIsNone(finding.references) @@ -52,8 +52,8 @@ def test_parse_file_with_multiple_vuln_has_multiple_trivy_findings(self): findings = parser.get_findings(testfile, Test()) finding = findings[0] - self.assertEqual(finding.severity, 'High') - self.assertEqual(finding.cwe, '125') + self.assertEqual(finding.severity, "High") + self.assertEqual(finding.cwe, "125") # Sample with harborapi pip def test_parse_file_with_multiple_vuln_has_harborapi_pip_package(self): @@ -63,5 +63,5 @@ def test_parse_file_with_multiple_vuln_has_harborapi_pip_package(self): self.assertEqual(2, len(findings)) finding = findings[0] - self.assertEqual(finding.severity, 'Medium') - self.assertEqual(finding.cwe, '787') + self.assertEqual(finding.severity, "Medium") + self.assertEqual(finding.cwe, "787") diff --git a/unittests/tools/test_horusec_parser.py b/unittests/tools/test_horusec_parser.py index 5353acdd79..ee56202524 100644 --- a/unittests/tools/test_horusec_parser.py +++ b/unittests/tools/test_horusec_parser.py @@ -13,7 +13,7 @@ def test_get_findings(self): parser = HorusecParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(267, len(findings)) - self.assertEqual('2021-10-19', findings[0].date.strftime("%Y-%m-%d")) + self.assertEqual("2021-10-19", findings[0].date.strftime("%Y-%m-%d")) def test_get_tests(self): """Version 2.6.3 with big project in Python""" @@ -22,7 +22,7 @@ def test_get_tests(self): tests = parser.get_tests("Horusec Scan", testfile) self.assertEqual(1, len(tests)) test = tests[0] - self.assertEqual('2.6.3', test.version) + self.assertEqual("2.6.3", test.version) self.assertEqual(267, len(test.findings)) findings = test.findings with self.subTest(i=0): diff --git a/unittests/tools/test_ibm_app_parser.py b/unittests/tools/test_ibm_app_parser.py index cc58e1b014..ecaa3f017e 100644 --- a/unittests/tools/test_ibm_app_parser.py +++ b/unittests/tools/test_ibm_app_parser.py @@ -16,10 +16,10 @@ def test_parse_file(self): self.assertEqual(27, len(findings)) finding = findings[15] - self.assertEqual('High', finding.severity) + self.assertEqual("High", finding.severity) self.assertEqual(79, finding.cwe) self.assertEqual(1, len(finding.unsaved_vulnerability_ids)) - self.assertEqual('CVE-2022-00001', finding.unsaved_vulnerability_ids[0]) + self.assertEqual("CVE-2022-00001", finding.unsaved_vulnerability_ids[0]) finding = findings[1] - self.assertEqual('Info', finding.severity) + self.assertEqual("Info", finding.severity) diff --git a/unittests/tools/test_intsights_parser.py b/unittests/tools/test_intsights_parser.py index 79f367cfaf..0154a79138 100644 --- a/unittests/tools/test_intsights_parser.py +++ b/unittests/tools/test_intsights_parser.py @@ -15,12 +15,12 @@ def test_intsights_parser_with_one_critical_vuln_has_one_findings_json( finding = list(findings)[0] self.assertEqual( - '5c80dbf83b4a3900078b6be6', + "5c80dbf83b4a3900078b6be6", finding.unique_id_from_tool) self.assertEqual( - 'HTTP headers weakness in initech.com web server', + "HTTP headers weakness in initech.com web server", finding.title) - self.assertEqual('Critical', finding.severity) + self.assertEqual("Critical", finding.severity) self.assertEqual( "https://dashboard.intsights.com/#/threat-command/alerts?search=5c80dbf83b4a3900078b6be6", finding.references) diff --git a/unittests/tools/test_jfrog_xray_on_demand_binary_scan_parser.py b/unittests/tools/test_jfrog_xray_on_demand_binary_scan_parser.py index b88bc87d23..b37f4e1b7b 100644 --- a/unittests/tools/test_jfrog_xray_on_demand_binary_scan_parser.py +++ b/unittests/tools/test_jfrog_xray_on_demand_binary_scan_parser.py @@ -77,7 +77,7 @@ def test_parse_file_with_many_vulns_pypi(self): self.assertFalse(findings[0].dynamic_finding) self.assertEqual("CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H", findings[0].cvssv3) self.assertEqual("XRAY-515353", findings[0].vuln_id_from_tool) - self.assertEqual(['CVE-2023-30608'], findings[0].unsaved_vulnerability_ids) + self.assertEqual(["CVE-2023-30608"], findings[0].unsaved_vulnerability_ids) with self.subTest(finding=1): self.assertIn("**Short description**\nA design problem in Django may lead to denial of service when processing multipart forms.\n", findings[1].severity_justification) diff --git a/unittests/tools/test_jfrog_xray_unified_parser.py b/unittests/tools/test_jfrog_xray_unified_parser.py index 914d3fb073..4acdf8bd89 100644 --- a/unittests/tools/test_jfrog_xray_unified_parser.py +++ b/unittests/tools/test_jfrog_xray_unified_parser.py @@ -325,7 +325,7 @@ def test_parse_file_with_very_many_vulns(self): self.assertEqual("TABLE statements.\n\nRed Hat Severity: Moderate", item.description[-45:]) self.assertIsNone(item.mitigation) self.assertEqual("7:sqlite:0", item.component_name) - self.assertIn('packagetype_rpm', item.tags) + self.assertIn("packagetype_rpm", item.tags) self.assertEqual("3.7.17-8.el7_7.1", item.component_version) self.assertEqual("elastic-docker-remote/elasticsearch/elasticsearch/7.9.1-amd64/", item.file_path) self.assertIsNotNone(item.severity_justification) diff --git a/unittests/tools/test_kics_parser.py b/unittests/tools/test_kics_parser.py index 04078968b1..da9fdaeb45 100644 --- a/unittests/tools/test_kics_parser.py +++ b/unittests/tools/test_kics_parser.py @@ -31,7 +31,7 @@ def test_parse_many_findings(self): **Category:** Secret Management **Issue type:** RedundantAttribute""" self.assertEqual(description, finding.description) - self.assertEqual('https://kics.io/', finding.references) + self.assertEqual("https://kics.io/", finding.references) self.assertEqual(1, finding.nb_occurences) with self.subTest(i=1): @@ -50,7 +50,7 @@ def test_parse_many_findings(self): **Issue type:** IncorrectValue **Actual value:** aws_s3_bucket_policy[this].policy.Principal is equal to or contains \'*\'""" self.assertEqual(description, finding.description) - self.assertEqual('https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_policy', finding.references) + self.assertEqual("https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_policy", finding.references) self.assertEqual(1, finding.nb_occurences) with self.subTest(i=2): diff --git a/unittests/tools/test_kubebench_parser.py b/unittests/tools/test_kubebench_parser.py index 2e732fef40..56a8efa1f9 100644 --- a/unittests/tools/test_kubebench_parser.py +++ b/unittests/tools/test_kubebench_parser.py @@ -41,9 +41,9 @@ def test_parse_file_with_controls_tag(self): medium_severities = 0 info_severities = 0 for finding in findings: - if finding.severity == 'Medium': + if finding.severity == "Medium": medium_severities += 1 - if finding.severity == 'Info': + if finding.severity == "Info": info_severities += 1 self.assertEqual(36, medium_severities) diff --git a/unittests/tools/test_kubehunter_parser.py b/unittests/tools/test_kubehunter_parser.py index df5cf17334..4fee661bf4 100644 --- a/unittests/tools/test_kubehunter_parser.py +++ b/unittests/tools/test_kubehunter_parser.py @@ -25,7 +25,7 @@ def test_kubehunter_parser_with_one_criticle_vuln_has_one_findings(self): self.assertEqual(True, finding.active) self.assertEqual(False, finding.duplicate) - self.assertEqual(finding.severity, 'High') + self.assertEqual(finding.severity, "High") def test_kubehunter_parser_with_many_vuln_has_many_findings(self): with open("unittests/scans/kubehunter/kubehunter_many_vul.json") as testfile: diff --git a/unittests/tools/test_microfocus_webinspect_parser.py b/unittests/tools/test_microfocus_webinspect_parser.py index 76869be044..dd21b4f610 100644 --- a/unittests/tools/test_microfocus_webinspect_parser.py +++ b/unittests/tools/test_microfocus_webinspect_parser.py @@ -81,8 +81,8 @@ def test_parse_file_version_18_20(self): endpoint.clean() self.assertEqual(4, len(findings)) item = findings[0] - self.assertEqual('Cache Management: Headers', item.title) - self.assertEqual('Info', item.severity) + self.assertEqual("Cache Management: Headers", item.title) + self.assertEqual("Info", item.severity) self.assertEqual(200, item.cwe) self.assertEqual(2, item.nb_occurences) self.assertEqual(2, len(item.unsaved_endpoints)) diff --git a/unittests/tools/test_mobsf_parser.py b/unittests/tools/test_mobsf_parser.py index 2cd4ed7363..4f4953a894 100644 --- a/unittests/tools/test_mobsf_parser.py +++ b/unittests/tools/test_mobsf_parser.py @@ -16,19 +16,19 @@ def test_parse_file(self): testfile.close() self.assertEqual(68, len(findings)) item = findings[0] - self.assertEqual('android.permission.WRITE_EXTERNAL_STORAGE', item.title) - self.assertEqual('High', item.severity) + self.assertEqual("android.permission.WRITE_EXTERNAL_STORAGE", item.title) + self.assertEqual("High", item.severity) item = findings[2] - self.assertEqual('android.permission.INTERNET', item.title) - self.assertEqual('Info', item.severity) + self.assertEqual("android.permission.INTERNET", item.title) + self.assertEqual("Info", item.severity) item = findings[10] - self.assertEqual('This shared object does not have RELRO enabled', item.title) - self.assertEqual('High', item.severity) - self.assertEqual('lib/armeabi-v7a/libdivajni.so', item.file_path) + self.assertEqual("This shared object does not have RELRO enabled", item.title) + self.assertEqual("High", item.severity) + self.assertEqual("lib/armeabi-v7a/libdivajni.so", item.file_path) self.assertEqual(1, item.nb_occurences) item = findings[17] - self.assertEqual('This shared object does not have a stack canary value added to the stack', item.title) - self.assertEqual('High', item.severity) + self.assertEqual("This shared object does not have a stack canary value added to the stack", item.title) + self.assertEqual("High", item.severity) self.assertEqual(1, item.nb_occurences) def test_parse_file2(self): @@ -42,8 +42,8 @@ def test_parse_file2(self): testfile.close() self.assertEqual(1022, len(findings)) item = findings[1] - self.assertEqual('Potential API Key found', item.title) - self.assertEqual('Info', item.severity) + self.assertEqual("Potential API Key found", item.title) + self.assertEqual("Info", item.severity) def test_parse_file_3_1_9_android(self): test = Test() @@ -55,17 +55,17 @@ def test_parse_file_3_1_9_android(self): findings = parser.get_findings(testfile, test) testfile.close() item = findings[1] - self.assertEqual('android.permission.ACCESS_GPS', item.title) - self.assertEqual('High', item.severity) + self.assertEqual("android.permission.ACCESS_GPS", item.title) + self.assertEqual("High", item.severity) item = findings[4] - self.assertEqual('android.permission.ACCESS_LOCATION', item.title) - self.assertEqual('High', item.severity) + self.assertEqual("android.permission.ACCESS_LOCATION", item.title) + self.assertEqual("High", item.severity) item = findings[7] - self.assertEqual('android.permission.READ_PHONE_STATE', item.title) - self.assertEqual('High', item.severity) + self.assertEqual("android.permission.READ_PHONE_STATE", item.title) + self.assertEqual("High", item.severity) item = findings[70] - self.assertEqual('HTTPS Connection', item.title) - self.assertEqual('Info', item.severity) + self.assertEqual("HTTPS Connection", item.title) + self.assertEqual("Info", item.severity) self.assertEqual(1, item.nb_occurences) def test_parse_file_3_1_9_ios(self): @@ -79,14 +79,14 @@ def test_parse_file_3_1_9_ios(self): testfile.close() self.assertEqual(11, len(findings)) item = findings[2] - self.assertEqual('NSLocationAlwaysUsageDescription', item.title) - self.assertEqual('High', item.severity) + self.assertEqual("NSLocationAlwaysUsageDescription", item.title) + self.assertEqual("High", item.severity) item = findings[3] - self.assertEqual('NSLocationWhenInUseUsageDescription', item.title) - self.assertEqual('High', item.severity) + self.assertEqual("NSLocationWhenInUseUsageDescription", item.title) + self.assertEqual("High", item.severity) item = findings[10] - self.assertEqual('App is compiled with Automatic Reference Counting (ARC) flag. ARC is a compiler feature that provides automatic memory management of Objective-C objects and is an exploit mitigation mechanism against memory corruption vulnerabilities.', item.title) - self.assertEqual('Info', item.severity) + self.assertEqual("App is compiled with Automatic Reference Counting (ARC) flag. ARC is a compiler feature that provides automatic memory management of Objective-C objects and is an exploit mitigation mechanism against memory corruption vulnerabilities.", item.title) + self.assertEqual("Info", item.severity) self.assertEqual(1, item.nb_occurences) def test_parse_file_mobsf_3_7_9(self): diff --git a/unittests/tools/test_nancy_parser.py b/unittests/tools/test_nancy_parser.py index 39baa59297..271a5c05e9 100644 --- a/unittests/tools/test_nancy_parser.py +++ b/unittests/tools/test_nancy_parser.py @@ -19,7 +19,7 @@ def test_nancy_parser_with_one_vuln_has_one_findings(self): self.assertEqual(1, len(findings)) with self.subTest(i=0): finding = findings[0] - self.assertEqual('Info', finding.severity) + self.assertEqual("Info", finding.severity) self.assertIsNotNone(finding.description) self.assertGreater(len(finding.description), 0) self.assertEqual(None, finding.cve) diff --git a/unittests/tools/test_nexpose_parser.py b/unittests/tools/test_nexpose_parser.py index d3a9af3884..3242949cd4 100644 --- a/unittests/tools/test_nexpose_parser.py +++ b/unittests/tools/test_nexpose_parser.py @@ -126,13 +126,13 @@ def test_nexpose_parser_has_many_finding(self): finding = findings[37] self.assertEqual("Open port UDP/137", finding.title) self.assertIn('udp/137 port is open with "CIFS Name Service" service', finding.description) - self.assertIn('cifs-name-service', finding.unsaved_tags) + self.assertIn("cifs-name-service", finding.unsaved_tags) self.assertEqual(1, len(finding.unsaved_endpoints)) # vuln 37 - endpoint endpoint = finding.unsaved_endpoints[0] self.assertEqual(137, endpoint.port) - self.assertEqual('udp', endpoint.protocol) + self.assertEqual("udp", endpoint.protocol) def test_nexpose_parser_tests_outside_endpoint(self): with open("unittests/scans/nexpose/report_auth.xml") as testfile: @@ -180,31 +180,31 @@ def test_nexpose_parser_dns(self): finding = findings[1] self.assertEqual("DNS server allows cache snooping", finding.title) self.assertEqual(2, len(finding.unsaved_endpoints)) - self.assertEqual('dns', str(finding.unsaved_endpoints[0].protocol)) - self.assertEqual('tcp', str(finding.unsaved_endpoints[0].fragment)) - self.assertEqual('dns', str(finding.unsaved_endpoints[1].protocol)) - self.assertEqual('udp', str(finding.unsaved_endpoints[1].fragment)) - self.assertEqual('dns://192.168.1.1#tcp', str(finding.unsaved_endpoints[0])) - self.assertEqual('dns://192.168.1.1#udp', str(finding.unsaved_endpoints[1])) + self.assertEqual("dns", str(finding.unsaved_endpoints[0].protocol)) + self.assertEqual("tcp", str(finding.unsaved_endpoints[0].fragment)) + self.assertEqual("dns", str(finding.unsaved_endpoints[1].protocol)) + self.assertEqual("udp", str(finding.unsaved_endpoints[1].fragment)) + self.assertEqual("dns://192.168.1.1#tcp", str(finding.unsaved_endpoints[0])) + self.assertEqual("dns://192.168.1.1#udp", str(finding.unsaved_endpoints[1])) # vuln 2 finding = findings[2] self.assertEqual("Nameserver Processes Recursive Queries", finding.title) self.assertEqual(2, len(finding.unsaved_endpoints)) - self.assertEqual('dns', str(finding.unsaved_endpoints[0].protocol)) - self.assertEqual('tcp', str(finding.unsaved_endpoints[0].fragment)) - self.assertEqual('dns', str(finding.unsaved_endpoints[1].protocol)) - self.assertEqual('udp', str(finding.unsaved_endpoints[1].fragment)) - self.assertEqual('dns://192.168.1.1#tcp', str(finding.unsaved_endpoints[0])) - self.assertEqual('dns://192.168.1.1#udp', str(finding.unsaved_endpoints[1])) + self.assertEqual("dns", str(finding.unsaved_endpoints[0].protocol)) + self.assertEqual("tcp", str(finding.unsaved_endpoints[0].fragment)) + self.assertEqual("dns", str(finding.unsaved_endpoints[1].protocol)) + self.assertEqual("udp", str(finding.unsaved_endpoints[1].fragment)) + self.assertEqual("dns://192.168.1.1#tcp", str(finding.unsaved_endpoints[0])) + self.assertEqual("dns://192.168.1.1#udp", str(finding.unsaved_endpoints[1])) # vuln 4 finding = findings[4] self.assertEqual("DNS Traffic Amplification", finding.title) self.assertEqual(1, len(finding.unsaved_endpoints)) - self.assertEqual('dns', str(finding.unsaved_endpoints[0].protocol)) - self.assertEqual('udp', str(finding.unsaved_endpoints[0].fragment)) - self.assertEqual('dns://192.168.1.1#udp', str(finding.unsaved_endpoints[0])) + self.assertEqual("dns", str(finding.unsaved_endpoints[0].protocol)) + self.assertEqual("udp", str(finding.unsaved_endpoints[0].fragment)) + self.assertEqual("dns://192.168.1.1#udp", str(finding.unsaved_endpoints[0])) @override_settings(USE_FIRST_SEEN=True) def test_nexpose_parser_use_first_seen(self): diff --git a/unittests/tools/test_nmap_parser.py b/unittests/tools/test_nmap_parser.py index fa5efd00f8..26dffc2381 100644 --- a/unittests/tools/test_nmap_parser.py +++ b/unittests/tools/test_nmap_parser.py @@ -32,9 +32,9 @@ def test_parse_file_with_single_open_ports_has_single_finding(self): self.assertEqual(datetime.datetime(2014, 3, 29, 14, 46, 56), finding.date) self.assertEqual(1, len(finding.unsaved_endpoints)) endpoint = finding.unsaved_endpoints[0] - self.assertEqual('localhost.localdomain', endpoint.host) + self.assertEqual("localhost.localdomain", endpoint.host) self.assertEqual(5432, endpoint.port) - self.assertEqual('tcp', endpoint.protocol) + self.assertEqual("tcp", endpoint.protocol) def test_parse_file_with_multiple_open_ports_has_multiple_finding(self): with open("unittests/scans/nmap/nmap_multiple_port.xml") as testfile: @@ -52,9 +52,9 @@ def test_parse_file_with_multiple_open_ports_has_multiple_finding(self): self.assertEqual(datetime.datetime(2016, 5, 16, 17, 56, 59), finding.date) self.assertEqual(1, len(finding.unsaved_endpoints)) endpoint = finding.unsaved_endpoints[0] - self.assertEqual('mocha2005.mochahost.com', endpoint.host) + self.assertEqual("mocha2005.mochahost.com", endpoint.host) self.assertEqual(21, endpoint.port) - self.assertEqual('tcp', endpoint.protocol) + self.assertEqual("tcp", endpoint.protocol) def test_parse_file_with_script_vulner(self): with open("unittests/scans/nmap/nmap_script_vulners.xml") as testfile: @@ -118,9 +118,9 @@ def test_parse_issue4406(self): self.assertEqual(datetime.datetime(2021, 4, 29, 9, 26, 36), finding.date) self.assertEqual(1, len(finding.unsaved_endpoints)) endpoint = finding.unsaved_endpoints[0] - self.assertEqual('ip-10-250-195-71.eu-west-1.compute.internal', endpoint.host) + self.assertEqual("ip-10-250-195-71.eu-west-1.compute.internal", endpoint.host) self.assertEqual(9100, endpoint.port) - self.assertEqual('tcp', endpoint.protocol) + self.assertEqual("tcp", endpoint.protocol) with self.subTest(i=66): finding = findings[66] self.assertEqual("Info", finding.severity) @@ -128,6 +128,6 @@ def test_parse_issue4406(self): self.assertEqual(datetime.datetime(2021, 4, 29, 9, 26, 36), finding.date) self.assertEqual(1, len(finding.unsaved_endpoints)) endpoint = finding.unsaved_endpoints[0] - self.assertEqual('ip-10-250-195-71.eu-west-1.compute.internal', endpoint.host) + self.assertEqual("ip-10-250-195-71.eu-west-1.compute.internal", endpoint.host) self.assertEqual(31641, endpoint.port) - self.assertEqual('tcp', endpoint.protocol) + self.assertEqual("tcp", endpoint.protocol) diff --git a/unittests/tools/test_npm_audit_parser.py b/unittests/tools/test_npm_audit_parser.py index 0793f0cfd4..5c11d848f0 100644 --- a/unittests/tools/test_npm_audit_parser.py +++ b/unittests/tools/test_npm_audit_parser.py @@ -34,8 +34,8 @@ def test_npm_audit_parser_with_many_vuln_has_many_findings(self): if find.file_path == "express>fresh": self.assertEqual(1, len(find.unsaved_vulnerability_ids)) self.assertEqual("CVE-2017-16119", find.unsaved_vulnerability_ids[0]) - self.assertEqual('mime', findings[4].component_name) - self.assertEqual('1.3.4', findings[4].component_version) + self.assertEqual("mime", findings[4].component_name) + self.assertEqual("1.3.4", findings[4].component_version) def test_npm_audit_parser_multiple_cwes_per_finding(self): # cwes formatted as escaped list: "cwe": "[\"CWE-346\",\"CWE-453\"]", diff --git a/unittests/tools/test_nuclei_parser.py b/unittests/tools/test_nuclei_parser.py index 131694523e..fe8d81b26f 100644 --- a/unittests/tools/test_nuclei_parser.py +++ b/unittests/tools/test_nuclei_parser.py @@ -174,7 +174,7 @@ def test_parse_many_findings_new(self): self.assertEqual("CVE-2018-15473", finding.vuln_id_from_tool) vulnerability_ids = finding.unsaved_vulnerability_ids self.assertEqual(1, len(vulnerability_ids)) - self.assertIn('CVE-2018-15473', vulnerability_ids) + self.assertIn("CVE-2018-15473", vulnerability_ids) self.assertEqual(362, finding.cwe) self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:N/A:N", finding.cvssv3) self.assertEqual(5.3, finding.cvssv3_score) @@ -184,7 +184,7 @@ def test_parse_many_findings_new(self): self.assertEqual("Exposed Prometheus metrics", finding.title) self.assertEqual("Low", finding.severity) self.assertEqual(1, finding.nb_occurences) - self.assertEqual('', finding.description) + self.assertEqual("", finding.description) self.assertIn("config", finding.unsaved_tags) self.assertIn("exposure", finding.unsaved_tags) self.assertIn("prometheus", finding.unsaved_tags) diff --git a/unittests/tools/test_pip_audit_parser.py b/unittests/tools/test_pip_audit_parser.py index 679fe10590..22771c1d40 100644 --- a/unittests/tools/test_pip_audit_parser.py +++ b/unittests/tools/test_pip_audit_parser.py @@ -36,53 +36,53 @@ def test_parser_many_vulns(self): self.assertEqual(7, len(findings)) finding = findings[0] - self.assertEqual('PYSEC-2021-76 in aiohttp:3.6.2', finding.title) + self.assertEqual("PYSEC-2021-76 in aiohttp:3.6.2", finding.title) description = 'aiohttp is an asynchronous HTTP client/server framework for asyncio and Python. In aiohttp before version 3.7.4 there is an open redirect vulnerability. A maliciously crafted link to an aiohttp-based web-server could redirect the browser to a different website. It is caused by a bug in the `aiohttp.web_middlewares.normalize_path_middleware` middleware. This security problem has been fixed in 3.7.4. Upgrade your dependency using pip as follows "pip install aiohttp >= 3.7.4". If upgrading is not an option for you, a workaround can be to avoid using `aiohttp.web_middlewares.normalize_path_middleware` in your applications.' self.assertEqual(description, finding.description) self.assertEqual(1395, finding.cwe) vulnerability_ids = finding.unsaved_vulnerability_ids self.assertEqual(1, len(vulnerability_ids)) - self.assertEqual('PYSEC-2021-76', vulnerability_ids[0]) - self.assertEqual('Medium', finding.severity) - self.assertEqual('Upgrade to version: 3.7.4', finding.mitigation) - self.assertEqual('aiohttp', finding.component_name) - self.assertEqual('3.6.2', finding.component_version) - self.assertEqual('PYSEC-2021-76', finding.vuln_id_from_tool) + self.assertEqual("PYSEC-2021-76", vulnerability_ids[0]) + self.assertEqual("Medium", finding.severity) + self.assertEqual("Upgrade to version: 3.7.4", finding.mitigation) + self.assertEqual("aiohttp", finding.component_name) + self.assertEqual("3.6.2", finding.component_version) + self.assertEqual("PYSEC-2021-76", finding.vuln_id_from_tool) finding = findings[1] - self.assertEqual('PYSEC-2021-439 in django:3.2.9', finding.title) - description = 'In Django 2.2 before 2.2.25, 3.1 before 3.1.14, and 3.2 before 3.2.10, HTTP requests for URLs with trailing newlines could bypass upstream access control based on URL paths.' + self.assertEqual("PYSEC-2021-439 in django:3.2.9", finding.title) + description = "In Django 2.2 before 2.2.25, 3.1 before 3.1.14, and 3.2 before 3.2.10, HTTP requests for URLs with trailing newlines could bypass upstream access control based on URL paths." self.assertEqual(description, finding.description) vulnerability_ids = finding.unsaved_vulnerability_ids self.assertEqual(1, len(vulnerability_ids)) - self.assertEqual('PYSEC-2021-439', vulnerability_ids[0]) + self.assertEqual("PYSEC-2021-439", vulnerability_ids[0]) self.assertEqual(1395, finding.cwe) - self.assertEqual('Medium', finding.severity) - self.assertEqual('django', finding.component_name) - self.assertEqual('3.2.9', finding.component_version) - self.assertEqual('PYSEC-2021-439', finding.vuln_id_from_tool) + self.assertEqual("Medium", finding.severity) + self.assertEqual("django", finding.component_name) + self.assertEqual("3.2.9", finding.component_version) + self.assertEqual("PYSEC-2021-439", finding.vuln_id_from_tool) finding = findings[2] - self.assertEqual('PYSEC-2021-852 in lxml:4.6.4', finding.title) - description = 'lxml is a library for processing XML and HTML in the Python language. Prior to version 4.6.5, the HTML Cleaner in lxml.html lets certain crafted script content pass through, as well as script content in SVG files embedded using data URIs. Users that employ the HTML cleaner in a security relevant context should upgrade to lxml 4.6.5 to receive a patch. There are no known workarounds available.' + self.assertEqual("PYSEC-2021-852 in lxml:4.6.4", finding.title) + description = "lxml is a library for processing XML and HTML in the Python language. Prior to version 4.6.5, the HTML Cleaner in lxml.html lets certain crafted script content pass through, as well as script content in SVG files embedded using data URIs. Users that employ the HTML cleaner in a security relevant context should upgrade to lxml 4.6.5 to receive a patch. There are no known workarounds available." self.assertEqual(description, finding.description) vulnerability_ids = finding.unsaved_vulnerability_ids self.assertEqual(1, len(vulnerability_ids)) - self.assertEqual('PYSEC-2021-852', vulnerability_ids[0]) + self.assertEqual("PYSEC-2021-852", vulnerability_ids[0]) self.assertEqual(1395, finding.cwe) - self.assertEqual('Medium', finding.severity) - self.assertEqual('lxml', finding.component_name) - self.assertEqual('4.6.4', finding.component_version) - self.assertEqual('PYSEC-2021-852', finding.vuln_id_from_tool) + self.assertEqual("Medium", finding.severity) + self.assertEqual("lxml", finding.component_name) + self.assertEqual("4.6.4", finding.component_version) + self.assertEqual("PYSEC-2021-852", finding.vuln_id_from_tool) finding = findings[3] - self.assertEqual('PYSEC-2019-128 in twisted:18.9.0', finding.title) + self.assertEqual("PYSEC-2019-128 in twisted:18.9.0", finding.title) finding = findings[4] - self.assertEqual('PYSEC-2020-260 in twisted:18.9.0', finding.title) + self.assertEqual("PYSEC-2020-260 in twisted:18.9.0", finding.title) finding = findings[5] - self.assertEqual('PYSEC-2019-129 in twisted:18.9.0', finding.title) + self.assertEqual("PYSEC-2019-129 in twisted:18.9.0", finding.title) finding = findings[6] - self.assertEqual('PYSEC-2020-259 in twisted:18.9.0', finding.title) + self.assertEqual("PYSEC-2020-259 in twisted:18.9.0", finding.title) diff --git a/unittests/tools/test_popeye_parser.py b/unittests/tools/test_popeye_parser.py index 04a553679b..1dac138770 100644 --- a/unittests/tools/test_popeye_parser.py +++ b/unittests/tools/test_popeye_parser.py @@ -22,7 +22,7 @@ def test_popeye_parser_with_one_warning_has_one_findings(self): "**Group** : test-group" + "\n\n" + \ "**Severity** : Warning" + "\n\n" + \ "**Message** : [POP-106] No resources requests/limits defined" - finding_vuln_id_from_tool = 'POP-106' + finding_vuln_id_from_tool = "POP-106" testfile.close() self.assertEqual(1, len(findings)) self.assertEqual("Low", findings[0].severity) diff --git a/unittests/tools/test_qualys_infrascan_webgui_parser.py b/unittests/tools/test_qualys_infrascan_webgui_parser.py index 941aee124c..e692eee050 100644 --- a/unittests/tools/test_qualys_infrascan_webgui_parser.py +++ b/unittests/tools/test_qualys_infrascan_webgui_parser.py @@ -76,4 +76,4 @@ def test_parse_file_with_finding_no_dns(self): self.assertEqual(datetime(2019, 4, 2, 10, 14, 53, tzinfo=pytz.utc), finding.date) self.assertEqual(1, len(finding.unsaved_endpoints)) unsaved_endpoint = finding.unsaved_endpoints[0] - self.assertEqual('10.1.10.1', unsaved_endpoint.host) + self.assertEqual("10.1.10.1", unsaved_endpoint.host) diff --git a/unittests/tools/test_qualys_webapp_parser.py b/unittests/tools/test_qualys_webapp_parser.py index 078e8f7dd0..221c7a8482 100644 --- a/unittests/tools/test_qualys_webapp_parser.py +++ b/unittests/tools/test_qualys_webapp_parser.py @@ -66,4 +66,4 @@ def test_discussion_10239(self): testfile.close() self.assertEqual(1, len(findings)) finding = findings[0] - self.assertEqual(finding.unsaved_req_resp[0].get('req'), "POST: https://example.com/vulnerable/path\nReferer: https://example.com/\n\nHost: www.example.com\n\nUser-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.1 Safari/605.1.15\n\nAccept: */*\n\nContent-Length: 39\n\nContent-Type: application/x-www-form-urlencoded REQUEST_ONE\n\nBODY: post_param=malicious_code_here\n") + self.assertEqual(finding.unsaved_req_resp[0].get("req"), "POST: https://example.com/vulnerable/path\nReferer: https://example.com/\n\nHost: www.example.com\n\nUser-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.1 Safari/605.1.15\n\nAccept: */*\n\nContent-Length: 39\n\nContent-Type: application/x-www-form-urlencoded REQUEST_ONE\n\nBODY: post_param=malicious_code_here\n") diff --git a/unittests/tools/test_scout_suite_parser.py b/unittests/tools/test_scout_suite_parser.py index 1cab703c17..c8b04c4502 100644 --- a/unittests/tools/test_scout_suite_parser.py +++ b/unittests/tools/test_scout_suite_parser.py @@ -22,14 +22,14 @@ def test_scout_suite_parser_with_two_findings(self): self.assertEqual("Bucket with Logging Disabled", finding.title) self.assertEqual("Medium", finding.severity) self.assertEqual(1032, finding.cwe) - self.assertEqual('gcp:cloudstorage-bucket-no-logging', finding.vuln_id_from_tool) + self.assertEqual("gcp:cloudstorage-bucket-no-logging", finding.vuln_id_from_tool) with self.subTest(i=2): finding = findings[2] self.assertEqual("Bucket with Versioning Disabled", finding.title) self.assertEqual("Medium", finding.severity) self.assertEqual(1032, finding.cwe) self.assertEqual(datetime.date(2021, 1, 8), finding.date) - self.assertEqual('gcp:cloudstorage-bucket-no-versioning', finding.vuln_id_from_tool) + self.assertEqual("gcp:cloudstorage-bucket-no-versioning", finding.vuln_id_from_tool) def test_get_findings(self): with open("unittests/scans/scout_suite/new2.js") as test_file: @@ -41,19 +41,19 @@ def test_get_findings(self): self.assertEqual("CloudTrail Service Not Configured", finding.title) self.assertEqual("Critical", finding.severity) self.assertEqual(1032, finding.cwe) - self.assertEqual('aws:cloudtrail-not-configured', finding.vuln_id_from_tool) + self.assertEqual("aws:cloudtrail-not-configured", finding.vuln_id_from_tool) with self.subTest(i=15): finding = findings[15] self.assertEqual("CloudTrail Service Not Configured", finding.title) self.assertEqual("Critical", finding.severity) self.assertEqual(1032, finding.cwe) - self.assertEqual('aws:cloudtrail-not-configured', finding.vuln_id_from_tool) + self.assertEqual("aws:cloudtrail-not-configured", finding.vuln_id_from_tool) with self.subTest(i=29): finding = findings[29] self.assertEqual("AWS Config Not Enabled", finding.title) self.assertEqual("Medium", finding.severity) self.assertEqual(1032, finding.cwe) - self.assertEqual('aws:config-recorder-not-configured', finding.vuln_id_from_tool) + self.assertEqual("aws:config-recorder-not-configured", finding.vuln_id_from_tool) def test_get_tests(self): with open("unittests/scans/scout_suite/new2.js") as test_file: @@ -73,16 +73,16 @@ def test_get_tests(self): self.assertEqual("Critical", finding.severity) self.assertEqual(1032, finding.cwe) self.assertEqual(datetime.date(2021, 10, 1), finding.date) - self.assertEqual('aws:cloudtrail-not-configured', finding.vuln_id_from_tool) + self.assertEqual("aws:cloudtrail-not-configured", finding.vuln_id_from_tool) with self.subTest(i=15): finding = findings[15] self.assertEqual("CloudTrail Service Not Configured", finding.title) self.assertEqual("Critical", finding.severity) self.assertEqual(1032, finding.cwe) - self.assertEqual('aws:cloudtrail-not-configured', finding.vuln_id_from_tool) + self.assertEqual("aws:cloudtrail-not-configured", finding.vuln_id_from_tool) with self.subTest(i=29): finding = findings[29] self.assertEqual("AWS Config Not Enabled", finding.title) self.assertEqual("Medium", finding.severity) self.assertEqual(1032, finding.cwe) - self.assertEqual('aws:config-recorder-not-configured', finding.vuln_id_from_tool) + self.assertEqual("aws:config-recorder-not-configured", finding.vuln_id_from_tool) diff --git a/unittests/tools/test_semgrep_parser.py b/unittests/tools/test_semgrep_parser.py index 4287fa7bdf..ce19977b90 100644 --- a/unittests/tools/test_semgrep_parser.py +++ b/unittests/tools/test_semgrep_parser.py @@ -129,5 +129,5 @@ def test_parse_sca_deployments_vulns(self): finding = findings[0] self.assertEqual("High", finding.severity) self.assertEqual("requirements3.txt", finding.file_path) - self.assertEqual('222', finding.line) + self.assertEqual("222", finding.line) self.assertEqual(617, finding.cwe) diff --git a/unittests/tools/test_snyk_parser.py b/unittests/tools/test_snyk_parser.py index ba6f27cb3c..ab17657626 100644 --- a/unittests/tools/test_snyk_parser.py +++ b/unittests/tools/test_snyk_parser.py @@ -157,7 +157,7 @@ def test_snykParser_target_file(self): # Mobile-Security-Framework-MobSF@0.0.0: SQL Injection finding = findings[0] self.assertEqual("Critical", finding.severity) - self.assertIn('target_file:Mobile-Security-Framework-MobSF/requirements.txt', finding.unsaved_tags) + self.assertIn("target_file:Mobile-Security-Framework-MobSF/requirements.txt", finding.unsaved_tags) def test_snykParser_update_libs_tag(self): with open("unittests/scans/snyk/single_project_upgrade_libs.json") as testfile: @@ -167,9 +167,9 @@ def test_snykParser_update_libs_tag(self): # acme-review@1.0.0: Remote Code Execution (RCE) finding = findings[227] self.assertEqual("High", finding.severity) - self.assertIn('target_file:package-lock.json', finding.unsaved_tags) - self.assertIn('upgrade_to:react-scripts@5.0.0', finding.unsaved_tags) - self.assertIn('shell-quote@1.7.2', finding.mitigation) + self.assertIn("target_file:package-lock.json", finding.unsaved_tags) + self.assertIn("upgrade_to:react-scripts@5.0.0", finding.unsaved_tags) + self.assertIn("shell-quote@1.7.2", finding.mitigation) def test_snykcontainer_issue_9270(self): with open("unittests/scans/snyk/snykcontainer_issue_9270.json") as testfile: diff --git a/unittests/tools/test_sonarqube_parser.py b/unittests/tools/test_sonarqube_parser.py index 57e110f59c..c80be607e1 100644 --- a/unittests/tools/test_sonarqube_parser.py +++ b/unittests/tools/test_sonarqube_parser.py @@ -34,7 +34,7 @@ def test_detailed_parse_file_with_no_vulnerabilities_has_no_findings(self): get_unit_tests_path() + "/scans/sonarqube/sonar-no-finding.html", ) parser = SonarQubeParser() - parser.set_mode('detailed') + parser.set_mode("detailed") findings = parser.get_findings(my_file_handle, test) self.assertEqual(0, len(findings)) my_file_handle.close() @@ -96,7 +96,7 @@ def test_detailed_parse_file_with_single_vulnerability_has_single_finding(self): get_unit_tests_path() + "/scans/sonarqube/sonar-single-finding.html", ) parser = SonarQubeParser() - parser.set_mode('detailed') + parser.set_mode("detailed") findings = parser.get_findings(my_file_handle, test) # common verifications self.assertEqual(1, len(findings)) @@ -144,7 +144,7 @@ def test_detailed_parse_file_with_multiple_vulnerabilities_has_multiple_findings get_unit_tests_path() + "/scans/sonarqube/sonar-6-findings.html", ) parser = SonarQubeParser() - parser.set_mode('detailed') + parser.set_mode("detailed") findings = parser.get_findings(my_file_handle, test) # common verifications self.assertEqual(6, len(findings)) @@ -157,7 +157,7 @@ def test_file_name_aggregated_parse_file_with_multiple_vulnerabilities_has_multi get_unit_tests_path() + "/scans/sonarqube/sonar-6-findings.html", ) parser = SonarQubeParser() - parser.set_mode('detailed') + parser.set_mode("detailed") findings = parser.get_findings(my_file_handle, test) # common verifications # (there is no aggregation to be done here) @@ -170,7 +170,7 @@ def test_detailed_parse_file_with_table_in_table(self): get_unit_tests_path() + "/scans/sonarqube/sonar-table-in-table.html", ) parser = SonarQubeParser() - parser.set_mode('detailed') + parser.set_mode("detailed") findings = parser.get_findings(my_file_handle, test) self.assertEqual(1, len(findings)) @@ -250,7 +250,7 @@ def test_detailed_parse_file_with_rule_undefined(self): get_unit_tests_path() + "/scans/sonarqube/sonar-rule-undefined.html", ) parser = SonarQubeParser() - parser.set_mode('detailed') + parser.set_mode("detailed") findings = parser.get_findings(my_file_handle, test) self.assertEqual(1, len(findings)) @@ -359,7 +359,7 @@ def test_detailed_parse_file_with_vuln_on_same_filename(self): get_unit_tests_path() + "/scans/sonarqube/sonar-4-findings-3-to-aggregate.html", ) parser = SonarQubeParser() - parser.set_mode('detailed') + parser.set_mode("detailed") findings = parser.get_findings(my_file_handle, test) # specific verifications self.assertEqual(4, len(findings)) @@ -371,7 +371,7 @@ def test_detailed_parse_file_with_vuln_issue_3725(self): """ my_file_handle, _product, _engagement, test = self.init(get_unit_tests_path() + "/scans/sonarqube/sonar.html") parser = SonarQubeParser() - parser.set_mode('detailed') + parser.set_mode("detailed") findings = parser.get_findings(my_file_handle, test) # specific verifications self.assertEqual(322, len(findings)) @@ -387,7 +387,7 @@ def test_detailed_parse_file_table_has_whitespace(self): get_unit_tests_path() + "/scans/sonarqube/sonar-table-in-table-with-whitespace.html", ) parser = SonarQubeParser() - parser.set_mode('detailed') + parser.set_mode("detailed") findings = parser.get_findings(my_file_handle, test) self.assertEqual(1, len(findings)) @@ -466,7 +466,7 @@ def test_detailed_parse_json_file_with_no_vulnerabilities_has_no_findings(self): get_unit_tests_path() + "/scans/sonarqube/sonar-no-finding.json", ) parser = SonarQubeParser() - parser.set_mode('detailed') + parser.set_mode("detailed") findings = parser.get_findings(my_file_handle, test) self.assertEqual(0, len(findings)) my_file_handle.close() @@ -476,7 +476,7 @@ def test_detailed_parse_json_file_with_single_vulnerability_has_single_finding(s get_unit_tests_path() + "/scans/sonarqube/sonar-single-finding.json", ) parser = SonarQubeParser() - parser.set_mode('detailed') + parser.set_mode("detailed") findings = parser.get_findings(my_file_handle, test) # common verifications self.assertEqual(1, len(findings)) @@ -555,7 +555,7 @@ def test_detailed_parse_json_file_with_multiple_vulnerabilities_has_multiple_fin get_unit_tests_path() + "/scans/sonarqube/sonar-6-findings.json", ) parser = SonarQubeParser() - parser.set_mode('detailed') + parser.set_mode("detailed") findings = parser.get_findings(my_file_handle, test) # common verifications # (there is no aggregation to be done here) diff --git a/unittests/tools/test_sslyze_parser.py b/unittests/tools/test_sslyze_parser.py index 24c930b6f2..4f1186f155 100644 --- a/unittests/tools/test_sslyze_parser.py +++ b/unittests/tools/test_sslyze_parser.py @@ -25,7 +25,7 @@ def test_parse_json_file_with_one_target_has_one_vuln_old(self): self.assertEqual(1, len(findings)) finding = findings[0] - self.assertEqual('Problems in certificate deployments (www.example.com:443)', finding.title) + self.assertEqual("Problems in certificate deployments (www.example.com:443)", finding.title) description = """www.example.com:443 has problems in certificate deployments: - certificate has expired for trust store Android, version 9.0.0_r9 - certificate has expired for trust store Apple, version iOS 13, iPadOS 13, macOS 10.15, watchOS 6, and tvOS 13 @@ -33,11 +33,11 @@ def test_parse_json_file_with_one_target_has_one_vuln_old(self): - certificate has expired for trust store Mozilla, version 2019-11-28 - certificate has expired for trust store Windows, version 2020-05-04""" self.assertEqual(description, finding.description) - self.assertEqual('Medium', finding.severity) + self.assertEqual("Medium", finding.severity) self.assertEqual(1, len(finding.unsaved_endpoints)) endpoint = finding.unsaved_endpoints[0] - self.assertEqual('www.example.com', endpoint.host) + self.assertEqual("www.example.com", endpoint.host) self.assertEqual(443, endpoint.port) def test_parse_json_file_with_one_target_has_four_vuln_old(self): @@ -48,10 +48,10 @@ def test_parse_json_file_with_one_target_has_four_vuln_old(self): self.assertEqual(4, len(findings)) self.assertEqual(1, len(findings[0].unsaved_vulnerability_ids)) - self.assertEqual('CVE-2014-0160', findings[0].unsaved_vulnerability_ids[0]) + self.assertEqual("CVE-2014-0160", findings[0].unsaved_vulnerability_ids[0]) self.assertEqual(1, len(findings[1].unsaved_vulnerability_ids)) - self.assertEqual('CVE-2014-0224', findings[1].unsaved_vulnerability_ids[0]) + self.assertEqual("CVE-2014-0224", findings[1].unsaved_vulnerability_ids[0]) def test_parse_json_file_with_two_target_has_many_vuln_old(self): with open(path.join(path.dirname(__file__), "../scans/sslyze/two_targets_two_vuln_old.json")) as testfile: @@ -73,7 +73,7 @@ def test_parse_json_file_with_one_target_has_one_vuln_new(self): self.assertEqual(1, len(findings)) finding = findings[0] - self.assertEqual('Unrecommended cipher suites for TLS 1.2 (example.com:443)', finding.title) + self.assertEqual("Unrecommended cipher suites for TLS 1.2 (example.com:443)", finding.title) description = """example.com:443 accepts unrecommended cipher suites for TLS 1.2: - TLS_RSA_WITH_AES_256_GCM_SHA384 - TLS_RSA_WITH_AES_256_CCM_8 @@ -92,15 +92,15 @@ def test_parse_json_file_with_one_target_has_one_vuln_new(self): - TLS_DHE_RSA_WITH_AES_128_CCM_8 - TLS_DHE_RSA_WITH_AES_128_CBC_SHA""" self.assertEqual(description, finding.description) - self.assertEqual('Medium', finding.severity) + self.assertEqual("Medium", finding.severity) self.assertEqual( - 'TLS recommendations of German BSI: https://www.bsi.bund.de/SharedDocs/Downloads/EN/BSI/Publications/TechGuidelines/TG02102/BSI-TR-02102-2.pdf?__blob=publicationFile&v=10', + "TLS recommendations of German BSI: https://www.bsi.bund.de/SharedDocs/Downloads/EN/BSI/Publications/TechGuidelines/TG02102/BSI-TR-02102-2.pdf?__blob=publicationFile&v=10", finding.references, ) self.assertEqual(1, len(finding.unsaved_endpoints)) endpoint = finding.unsaved_endpoints[0] - self.assertEqual('example.com', endpoint.host) + self.assertEqual("example.com", endpoint.host) self.assertEqual(443, endpoint.port) def test_parse_json_file_with_one_target_has_three_vuln_new(self): @@ -117,7 +117,7 @@ def test_parse_json_file_with_two_target_has_many_vuln_new(self): # We look at 2 examplary findings, the others are similar and don't give more test coverage finding = findings[0] - self.assertEqual('Unrecommended cipher suites for TLS 1.2 (example.com:443)', finding.title) + self.assertEqual("Unrecommended cipher suites for TLS 1.2 (example.com:443)", finding.title) description = """example.com:443 accepts unrecommended cipher suites for TLS 1.2: - TLS_RSA_WITH_AES_256_GCM_SHA384 - TLS_RSA_WITH_AES_256_CBC_SHA256 @@ -131,29 +131,29 @@ def test_parse_json_file_with_two_target_has_many_vuln_new(self): - TLS_DHE_RSA_WITH_AES_256_CBC_SHA - TLS_DHE_RSA_WITH_AES_128_CBC_SHA""" self.assertEqual(description, finding.description) - self.assertEqual('Medium', finding.severity) + self.assertEqual("Medium", finding.severity) self.assertEqual( - 'TLS recommendations of German BSI: https://www.bsi.bund.de/SharedDocs/Downloads/EN/BSI/Publications/TechGuidelines/TG02102/BSI-TR-02102-2.pdf?__blob=publicationFile&v=10', + "TLS recommendations of German BSI: https://www.bsi.bund.de/SharedDocs/Downloads/EN/BSI/Publications/TechGuidelines/TG02102/BSI-TR-02102-2.pdf?__blob=publicationFile&v=10", finding.references, ) self.assertEqual(1, len(finding.unsaved_endpoints)) endpoint = finding.unsaved_endpoints[0] - self.assertEqual('example.com', endpoint.host) + self.assertEqual("example.com", endpoint.host) self.assertEqual(443, endpoint.port) finding = findings[1] - self.assertEqual('TLS 1.0 not recommended (example2.com:443)', finding.title) - self.assertEqual('example2.com:443 accepts TLS 1.0 connections', finding.description) - self.assertEqual('Medium', finding.severity) + self.assertEqual("TLS 1.0 not recommended (example2.com:443)", finding.title) + self.assertEqual("example2.com:443 accepts TLS 1.0 connections", finding.description) + self.assertEqual("Medium", finding.severity) self.assertEqual( - 'TLS recommendations of German BSI: https://www.bsi.bund.de/SharedDocs/Downloads/EN/BSI/Publications/TechGuidelines/TG02102/BSI-TR-02102-2.pdf?__blob=publicationFile&v=10', + "TLS recommendations of German BSI: https://www.bsi.bund.de/SharedDocs/Downloads/EN/BSI/Publications/TechGuidelines/TG02102/BSI-TR-02102-2.pdf?__blob=publicationFile&v=10", finding.references, ) self.assertEqual(1, len(finding.unsaved_endpoints)) endpoint = finding.unsaved_endpoints[0] - self.assertEqual('example2.com', endpoint.host) + self.assertEqual("example2.com", endpoint.host) self.assertEqual(443, endpoint.port) diff --git a/unittests/tools/test_stackhawk_parser.py b/unittests/tools/test_stackhawk_parser.py index 94cf2de470..22bd9fb135 100644 --- a/unittests/tools/test_stackhawk_parser.py +++ b/unittests/tools/test_stackhawk_parser.py @@ -147,9 +147,9 @@ def test_that_a_scan_import_updates_the_test_description(self): parser.get_findings(testfile, test) self.assertEqual( test.description, - 'View scan details here: ' - + '[https://app.stackhawk.com/scans/e2ff5651-7eef-47e9-b743-0c2f7d861e27]' - + '(https://app.stackhawk.com/scans/e2ff5651-7eef-47e9-b743-0c2f7d861e27)', + "View scan details here: " + + "[https://app.stackhawk.com/scans/e2ff5651-7eef-47e9-b743-0c2f7d861e27]" + + "(https://app.stackhawk.com/scans/e2ff5651-7eef-47e9-b743-0c2f7d861e27)", ) def test_that_a_scan_with_all_false_positive_endpoints_on_a_finding_marks_as_false_positive(self): @@ -231,7 +231,7 @@ def __assertFindingEquals( self.assertEqual(application_name, actual_finding.component_name) self.assertEqual(environment, actual_finding.component_version) self.assertEqual(severity, actual_finding.severity) - self.assertEqual("View this finding in the StackHawk platform at:\n[" + finding_url + '](' + finding_url + ')', + self.assertEqual("View this finding in the StackHawk platform at:\n[" + finding_url + "](" + finding_url + ")", actual_finding.description) self.assertRegex( actual_finding.steps_to_reproduce, diff --git a/unittests/tools/test_tenable_parser.py b/unittests/tools/test_tenable_parser.py index 29922a3f64..09291da9d4 100644 --- a/unittests/tools/test_tenable_parser.py +++ b/unittests/tools/test_tenable_parser.py @@ -152,7 +152,7 @@ def test_parse_some_findings_samples_nessus_legacy(self): finding = findings[9] self.assertEqual(7, len(finding.unsaved_vulnerability_ids)) for vulnerability_id in finding.unsaved_vulnerability_ids: - self.assertEqual('CVE-2005-1794', vulnerability_id) + self.assertEqual("CVE-2005-1794", vulnerability_id) def test_parse_some_findings_with_cvssv3_nessus_legacy(self): """test with cvssv3""" @@ -180,11 +180,11 @@ def test_parse_many_findings_xml_nessus_was_legacy(self): self.assertEqual(5, len(findings)) for i in [0, 1, 2, 3, 4]: finding = findings[i] - self.assertEqual('http', finding.unsaved_endpoints[0].protocol) + self.assertEqual("http", finding.unsaved_endpoints[0].protocol) self.assertIsNone(finding.cwe) finding = findings[0] - self.assertEqual('High', finding.severity) - self.assertEqual('Cross-Site Scripting (XSS)', finding.title) + self.assertEqual("High", finding.severity) + self.assertEqual("Cross-Site Scripting (XSS)", finding.title) def test_parse_one_findings_xml_nessus_was_legacy(self): with open("unittests/scans/tenable/nessus_was/nessus_was_one_vuln.xml") as testfile: @@ -195,10 +195,10 @@ def test_parse_one_findings_xml_nessus_was_legacy(self): endpoint.clean() self.assertEqual(1, len(findings)) finding = findings[0] - self.assertEqual('http', finding.unsaved_endpoints[0].protocol) + self.assertEqual("http", finding.unsaved_endpoints[0].protocol) self.assertIsNone(finding.cwe) - self.assertEqual('High', finding.severity) - self.assertEqual('Cross-Site Scripting (XSS)', finding.title) + self.assertEqual("High", finding.severity) + self.assertEqual("Cross-Site Scripting (XSS)", finding.title) def test_parse_no_findings_xml_nessus_was_legacy(self): with open("unittests/scans/tenable/nessus_was/nessus_was_no_vuln.xml") as testfile: @@ -220,12 +220,12 @@ def test_parse_many_findings_csv_nessus_was_legacy(self): for i in [0, 1, 2, 3, 4]: finding = findings[i] self.assertIn(finding.severity, Finding.SEVERITIES) - self.assertEqual('google.com', finding.unsaved_endpoints[0].host) + self.assertEqual("google.com", finding.unsaved_endpoints[0].host) self.assertEqual(0, len(finding.unsaved_vulnerability_ids)) finding = findings[0] - self.assertEqual('7.1', finding.cvssv3_score) - self.assertEqual('High', finding.severity) - self.assertEqual('http', finding.unsaved_endpoints[0].protocol) + self.assertEqual("7.1", finding.cvssv3_score) + self.assertEqual("High", finding.severity) + self.assertEqual("http", finding.unsaved_endpoints[0].protocol) def test_parse_one_findings_csv_nessus_was_legacy(self): with open("unittests/scans/tenable/nessus_was/nessus_was_one_vuln.csv") as testfile: @@ -237,11 +237,11 @@ def test_parse_one_findings_csv_nessus_was_legacy(self): self.assertEqual(1, len(findings)) finding = findings[0] self.assertIn(finding.severity, Finding.SEVERITIES) - self.assertEqual('google.com', finding.unsaved_endpoints[0].host) + self.assertEqual("google.com", finding.unsaved_endpoints[0].host) self.assertEqual(0, len(finding.unsaved_vulnerability_ids)) - self.assertEqual('7.1', finding.cvssv3_score) - self.assertEqual('High', finding.severity) - self.assertEqual('http', finding.unsaved_endpoints[0].protocol) + self.assertEqual("7.1", finding.cvssv3_score) + self.assertEqual("High", finding.severity) + self.assertEqual("http", finding.unsaved_endpoints[0].protocol) def test_parse_no_findings_csv_nessus_was_legacy(self): with open("unittests/scans/tenable/nessus_was/nessus_was_no_vuln.csv") as testfile: @@ -259,15 +259,15 @@ def test_parse_many_tenable_vulns(self): self.assertEqual(9, len(findings)) finding = findings[0] self.assertIn(finding.severity, Finding.SEVERITIES) - self.assertEqual('High', finding.severity) - self.assertEqual('ip-127-0-0-1.us-west-2.compute.internal', finding.unsaved_endpoints[0].host) - self.assertEqual('Amazon Linux 2 : kernel (ALAS-2023-2050)', finding.title) - self.assertEqual('tcp', finding.unsaved_endpoints[0].protocol) + self.assertEqual("High", finding.severity) + self.assertEqual("ip-127-0-0-1.us-west-2.compute.internal", finding.unsaved_endpoints[0].host) + self.assertEqual("Amazon Linux 2 : kernel (ALAS-2023-2050)", finding.title) + self.assertEqual("tcp", finding.unsaved_endpoints[0].protocol) self.assertEqual(None, finding.unsaved_endpoints[0].port) - self.assertIn('https://alas.aws.amazon.com/AL2/ALAS-2023-2050.html', finding.references) + self.assertIn("https://alas.aws.amazon.com/AL2/ALAS-2023-2050.html", finding.references) self.assertEqual(1, len(finding.unsaved_vulnerability_ids)) for vulnerability_id in finding.unsaved_vulnerability_ids: - self.assertEqual('CVE-2023-32233', vulnerability_id) + self.assertEqual("CVE-2023-32233", vulnerability_id) def test_parse_issue_6992(self): with open("unittests/scans/tenable/nessus/issue_6992.nessus") as testfile: diff --git a/unittests/tools/test_trivy_parser.py b/unittests/tools/test_trivy_parser.py index 025127b704..2c13876bf2 100644 --- a/unittests/tools/test_trivy_parser.py +++ b/unittests/tools/test_trivy_parser.py @@ -44,7 +44,7 @@ def test_scheme_2_many_vulns(self): self.assertEqual(len(findings), 5) finding = findings[0] self.assertEqual("Medium", finding.severity) - self.assertEqual('CVE-2020-15999 freetype 2.9.1-r2', finding.title) + self.assertEqual("CVE-2020-15999 freetype 2.9.1-r2", finding.title) self.assertEqual(1, len(finding.unsaved_vulnerability_ids)) self.assertEqual("CVE-2020-15999", finding.unsaved_vulnerability_ids[0]) self.assertEqual(787, finding.cwe) @@ -53,13 +53,13 @@ def test_scheme_2_many_vulns(self): self.assertEqual("2.9.1-r2", finding.component_version) self.assertIsNotNone(finding.description) self.assertIsNotNone(finding.references) - self.assertEqual('2.9.1-r3', finding.mitigation) - self.assertEqual('CVSS:3.1/AV:N/AC:L/PR:N/UI:R/S:U/C:N/I:N/A:H', finding.cvssv3) + self.assertEqual("2.9.1-r3", finding.mitigation) + self.assertEqual("CVSS:3.1/AV:N/AC:L/PR:N/UI:R/S:U/C:N/I:N/A:H", finding.cvssv3) self.assertTrue(finding.static_finding) self.assertFalse(finding.dynamic_finding) finding = findings[1] self.assertEqual("High", finding.severity) - self.assertEqual('CVE-2020-28196 krb5-libs 1.15.5-r0', finding.title) + self.assertEqual("CVE-2020-28196 krb5-libs 1.15.5-r0", finding.title) self.assertEqual(1, len(finding.unsaved_vulnerability_ids)) self.assertEqual("CVE-2020-28196", finding.unsaved_vulnerability_ids[0]) self.assertEqual(674, finding.cwe) @@ -68,7 +68,7 @@ def test_scheme_2_many_vulns(self): self.assertEqual("1.15.5-r0", finding.component_version) self.assertIsNotNone(finding.description) self.assertIsNotNone(finding.references) - self.assertEqual('1.15.5-r1', finding.mitigation) + self.assertEqual("1.15.5-r1", finding.mitigation) self.assertIsNone(finding.cvssv3) self.assertTrue(finding.static_finding) self.assertFalse(finding.dynamic_finding) @@ -79,8 +79,8 @@ def test_misconfigurations_and_secrets(self): findings = parser.get_findings(test_file, Test()) self.assertEqual(len(findings), 5) finding = findings[2] - self.assertEqual('DS002 - Image user should not be \'root\'', finding.title) - self.assertEqual('High', finding.severity) + self.assertEqual("DS002 - Image user should not be 'root'", finding.title) + self.assertEqual("High", finding.severity) description = """**Target:** Dockerfile **Type:** Dockerfile Security Check @@ -88,22 +88,22 @@ def test_misconfigurations_and_secrets(self): Specify at least 1 USER command in Dockerfile with non-root user as argument """ self.assertEqual(description, finding.description) - self.assertEqual('Add \'USER \' line to the Dockerfile', finding.mitigation) + self.assertEqual("Add 'USER ' line to the Dockerfile", finding.mitigation) references = """https://avd.aquasec.com/misconfig/ds002 https://docs.docker.com/develop/develop-images/dockerfile_best-practices/""" self.assertEqual(references, finding.references) - self.assertEqual(['config', 'dockerfile'], finding.tags) + self.assertEqual(["config", "dockerfile"], finding.tags) finding = findings[3] - self.assertEqual('Secret detected in Dockerfile - GitHub Personal Access Token', finding.title) - self.assertEqual('Critical', finding.severity) + self.assertEqual("Secret detected in Dockerfile - GitHub Personal Access Token", finding.title) + self.assertEqual("Critical", finding.severity) description = """GitHub Personal Access Token **Category:** GitHub **Match:** ENV GITHUB_PAT=***** """ self.assertEqual(description, finding.description) - self.assertEqual('Dockerfile', finding.file_path) + self.assertEqual("Dockerfile", finding.file_path) self.assertEqual(24, finding.line) - self.assertEqual(['secret'], finding.tags) + self.assertEqual(["secret"], finding.tags) def test_kubernetes(self): with open(sample_path("kubernetes.json")) as test_file: @@ -111,8 +111,8 @@ def test_kubernetes(self): findings = parser.get_findings(test_file, Test()) self.assertEqual(len(findings), 20) finding = findings[0] - self.assertEqual('CVE-2020-27350 apt 1.8.2.1', finding.title) - self.assertEqual('Medium', finding.severity) + self.assertEqual("CVE-2020-27350 apt 1.8.2.1", finding.title) + self.assertEqual("Medium", finding.severity) description = """apt: integer overflows and underflows while parsing .deb packages **Target:** gcr.io/google_samples/gb-redis-follower:v2 (debian 10.4) **Type:** debian @@ -121,17 +121,17 @@ def test_kubernetes(self): APT had several integer overflows and underflows while parsing .deb packages, aka GHSL-2020-168 GHSL-2020-169, in files apt-pkg/contrib/extracttar.cc, apt-pkg/deb/debfile.cc, and apt-pkg/contrib/arfile.cc. This issue affects: apt 1.2.32ubuntu0 versions prior to 1.2.32ubuntu0.2; 1.6.12ubuntu0 versions prior to 1.6.12ubuntu0.2; 2.0.2ubuntu0 versions prior to 2.0.2ubuntu0.2; 2.1.10ubuntu0 versions prior to 2.1.10ubuntu0.1; """ self.assertEqual(description, finding.description) - self.assertEqual('1.8.2.2', finding.mitigation) + self.assertEqual("1.8.2.2", finding.mitigation) self.assertEqual(1, len(finding.unsaved_vulnerability_ids)) self.assertEqual("CVE-2020-27350", finding.unsaved_vulnerability_ids[0]) - self.assertEqual(['debian', 'os-pkgs'], finding.tags) - self.assertEqual('apt', finding.component_name) - self.assertEqual('1.8.2.1', finding.component_version) - self.assertEqual('default / Deployment / redis-follower', finding.service) + self.assertEqual(["debian", "os-pkgs"], finding.tags) + self.assertEqual("apt", finding.component_name) + self.assertEqual("1.8.2.1", finding.component_version) + self.assertEqual("default / Deployment / redis-follower", finding.service) self.assertEqual(finding.file_path, "gcr.io/google_samples/gb-redis-follower:v2 (debian 10.4)") finding = findings[5] - self.assertEqual('CVE-2020-27350 apt 1.8.2.1', finding.title) - self.assertEqual('Medium', finding.severity) + self.assertEqual("CVE-2020-27350 apt 1.8.2.1", finding.title) + self.assertEqual("Medium", finding.severity) description = """apt: integer overflows and underflows while parsing .deb packages **Target:** docker.io/redis:6.0.5 (debian 10.4) **Type:** debian @@ -140,16 +140,16 @@ def test_kubernetes(self): APT had several integer overflows and underflows while parsing .deb packages, aka GHSL-2020-168 GHSL-2020-169, in files apt-pkg/contrib/extracttar.cc, apt-pkg/deb/debfile.cc, and apt-pkg/contrib/arfile.cc. This issue affects: apt 1.2.32ubuntu0 versions prior to 1.2.32ubuntu0.2; 1.6.12ubuntu0 versions prior to 1.6.12ubuntu0.2; 2.0.2ubuntu0 versions prior to 2.0.2ubuntu0.2; 2.1.10ubuntu0 versions prior to 2.1.10ubuntu0.1; """ self.assertEqual(description, finding.description) - self.assertEqual('1.8.2.2', finding.mitigation) + self.assertEqual("1.8.2.2", finding.mitigation) self.assertEqual(1, len(finding.unsaved_vulnerability_ids)) self.assertEqual("CVE-2020-27350", finding.unsaved_vulnerability_ids[0]) - self.assertEqual(['debian', 'os-pkgs'], finding.tags) - self.assertEqual('apt', finding.component_name) - self.assertEqual('1.8.2.1', finding.component_version) - self.assertEqual('default / Deployment / redis-leader', finding.service) + self.assertEqual(["debian", "os-pkgs"], finding.tags) + self.assertEqual("apt", finding.component_name) + self.assertEqual("1.8.2.1", finding.component_version) + self.assertEqual("default / Deployment / redis-leader", finding.service) finding = findings[10] - self.assertEqual('KSV001 - Process can elevate its own privileges', finding.title) - self.assertEqual('Medium', finding.severity) + self.assertEqual("KSV001 - Process can elevate its own privileges", finding.title) + self.assertEqual("Medium", finding.severity) description = """**Target:** Deployment/redis-follower **Type:** Kubernetes Security Check @@ -169,12 +169,12 @@ def test_kubernetes(self): re_description = re.sub(r"\s+", " ", description) re_finding_description = re.sub(r"\s+", " ", finding.description) self.assertEqual(re_description.strip(), re_finding_description.strip()) - self.assertEqual('Set \'set containers[].securityContext.allowPrivilegeEscalation\' to \'false\'.', finding.mitigation) + self.assertEqual("Set 'set containers[].securityContext.allowPrivilegeEscalation' to 'false'.", finding.mitigation) self.assertIsNone(finding.unsaved_vulnerability_ids) - self.assertEqual(['config', 'kubernetes'], finding.tags) + self.assertEqual(["config", "kubernetes"], finding.tags) self.assertIsNone(finding.component_name) self.assertIsNone(finding.component_version) - self.assertEqual('default / Deployment / redis-follower', finding.service) + self.assertEqual("default / Deployment / redis-follower", finding.service) def test_license_scheme(self): with open(sample_path("license_scheme.json")) as test_file: diff --git a/unittests/tools/test_trufflehog3_parser.py b/unittests/tools/test_trufflehog3_parser.py index f80f9ae834..ac750194f0 100644 --- a/unittests/tools/test_trufflehog3_parser.py +++ b/unittests/tools/test_trufflehog3_parser.py @@ -38,7 +38,7 @@ def test_many_vulns_legacy(self): finding = findings[0] self.assertEqual("High", finding.severity) self.assertEqual(798, finding.cwe) - self.assertEqual('fixtures/users.json', finding.file_path) + self.assertEqual("fixtures/users.json", finding.file_path) # FIXME for now the date in Finding is type datetime.date we need to switch to datetime # self.assertEqual(datetime.datetime, type(finding.date)) # self.assertEqual(datetime.datetime(2018, 2, 25, 11, 35, 52), finding.date) @@ -53,7 +53,7 @@ def test_many_vulns2_legacy(self): finding = findings[0] self.assertEqual("High", finding.severity) self.assertEqual(798, finding.cwe) - self.assertEqual('test_all.py', finding.file_path) + self.assertEqual("test_all.py", finding.file_path) self.assertEqual(8, finding.nb_occurences) def test_many_vulns_current(self): @@ -63,7 +63,7 @@ def test_many_vulns_current(self): self.assertEqual(len(findings), 3) finding = findings[0] - self.assertEqual('High Entropy found in docker/Dockerfile', finding.title) + self.assertEqual("High Entropy found in docker/Dockerfile", finding.title) self.assertEqual(798, finding.cwe) description = """**Secret:** 964a1afa20dd4a3723002560124dd96f2a9e853f7ef5b86f5c2354af336fca37 **Context:** @@ -74,22 +74,22 @@ def test_many_vulns_current(self): **Commit date:** 2021-10-08T20:14:27+02:00""" self.assertEqual(description, finding.description) self.assertEqual("High", finding.severity) - self.assertEqual('docker/Dockerfile', finding.file_path) + self.assertEqual("docker/Dockerfile", finding.file_path) self.assertEqual(3, finding.line) self.assertEqual(1, finding.nb_occurences) finding = findings[1] - self.assertEqual('High Entropy found in docker/Dockerfile', finding.title) + self.assertEqual("High Entropy found in docker/Dockerfile", finding.title) self.assertEqual(798, finding.cwe) self.maxDiff = None - self.assertIn('\n\n***\n\n', finding.description) + self.assertIn("\n\n***\n\n", finding.description) self.assertEqual("Medium", finding.severity) - self.assertEqual('docker/Dockerfile', finding.file_path) + self.assertEqual("docker/Dockerfile", finding.file_path) self.assertEqual(2, finding.line) self.assertEqual(2, finding.nb_occurences) finding = findings[2] - self.assertEqual('High Entropy found in env-file.txt', finding.title) + self.assertEqual("High Entropy found in env-file.txt", finding.title) self.assertEqual(798, finding.cwe) description = """**Secret:** 44c45225cf94e58d0c86f0a31051eb7c52c8f78f **Context:** @@ -97,7 +97,7 @@ def test_many_vulns_current(self): 11: second line of context""" self.assertEqual(description, finding.description) self.assertEqual("Low", finding.severity) - self.assertEqual('env-file.txt', finding.file_path) + self.assertEqual("env-file.txt", finding.file_path) self.assertEqual(10, finding.line) self.assertEqual(1, finding.nb_occurences) diff --git a/unittests/tools/test_trufflehog_parser.py b/unittests/tools/test_trufflehog_parser.py index 11aa8b55a3..5c7089a2a0 100644 --- a/unittests/tools/test_trufflehog_parser.py +++ b/unittests/tools/test_trufflehog_parser.py @@ -19,7 +19,7 @@ def test_many_vulns_v2(self): finding = findings[0] self.assertEqual("Medium", finding.severity) self.assertEqual(798, finding.cwe) - self.assertEqual('test_all.py', finding.file_path) + self.assertEqual("test_all.py", finding.file_path) def test_many_vulns_git_v3(self): with open(sample_path("v3_git.json")) as test_file: @@ -29,7 +29,7 @@ def test_many_vulns_git_v3(self): finding = findings[0] self.assertEqual("Critical", finding.severity) self.assertEqual(798, finding.cwe) - self.assertEqual('keys', finding.file_path) + self.assertEqual("keys", finding.file_path) def test_many_vulns_github_v3(self): with open(sample_path("v3_github.json")) as test_file: @@ -39,4 +39,4 @@ def test_many_vulns_github_v3(self): finding = findings[0] self.assertEqual("Critical", finding.severity) self.assertEqual(798, finding.cwe) - self.assertEqual('keys', finding.file_path) + self.assertEqual("keys", finding.file_path) diff --git a/unittests/tools/test_veracode_parser.py b/unittests/tools/test_veracode_parser.py index 3daed41862..e6cb7d83f9 100644 --- a/unittests/tools/test_veracode_parser.py +++ b/unittests/tools/test_veracode_parser.py @@ -81,12 +81,12 @@ def parse_file_with_multiple_finding(self): self.assertEqual("sourcefilepathMyApp.java", finding.file_path) self.assertEqual(2, finding.line) self.assertEqual("app-1234_issue-1", finding.unique_id_from_tool) - self.assertIn('sast', finding.unsaved_tags) + self.assertIn("sast", finding.unsaved_tags) finding = findings[1] self.assertEqual("Medium", finding.severity) self.assertEqual(456, finding.cwe) self.assertTrue(finding.dynamic_finding) - self.assertIn('dast', finding.unsaved_tags) + self.assertIn("dast", finding.unsaved_tags) finding = findings[2] self.assertEqual("High", finding.severity) self.assertIsNone(finding.cwe) @@ -94,14 +94,14 @@ def parse_file_with_multiple_finding(self): self.assertEqual("CVE-1234-1234", finding.unsaved_vulnerability_ids[0]) self.assertEqual("Vulnerable component: library:1234", finding.title) self.assertFalse(finding.is_mitigated) - self.assertIn('sca', finding.unsaved_tags) + self.assertIn("sca", finding.unsaved_tags) finding = findings[3] self.assertEqual("High", finding.severity) self.assertEqual(1, len(finding.unsaved_vulnerability_ids)) self.assertEqual("CVE-5678-5678", finding.unsaved_vulnerability_ids[0]) self.assertEqual("Vulnerable component: library1:1234", finding.title) self.assertFalse(finding.is_mitigated) - self.assertIn('sca', finding.unsaved_tags) + self.assertIn("sca", finding.unsaved_tags) @override_settings(USE_FIRST_SEEN=True) def test_parse_file_with_multiple_finding2_first_seen(self): @@ -213,12 +213,12 @@ def parse_file_with_dynamic_finding(self): self.assertEqual("catname", finding.title) self.assertEqual("Description", finding.description) self.assertFalse(finding.is_mitigated) - self.assertIn('dast', finding.unsaved_tags) + self.assertIn("dast", finding.unsaved_tags) self.assertEqual(1, len(finding.unsaved_endpoints)) endpoint = finding.unsaved_endpoints[0] - self.assertEqual('https', endpoint.protocol) - self.assertEqual('www.example.com', endpoint.host) - self.assertEqual('index.html', endpoint.path) + self.assertEqual("https", endpoint.protocol) + self.assertEqual("www.example.com", endpoint.host) + self.assertEqual("index.html", endpoint.path) return finding @@ -275,41 +275,41 @@ def json_static_findings_test(self, file_name): self.assertEqual(finding.severity, "Medium") self.assertEqual(finding.cwe, 80) self.assertEqual(finding.description, ( - '### Meta Information\n' - '**Exploitability Predication**: Likely\n' - '**Attack Vector**: page.html\n' - '**Module**: CoolProduct.jsa\n' - '### Details\n' - 'This call to page.html() contains a cross-site scripting ' - '(XSS) flaw. The application populates the HTTP response with ' - 'untrusted input, allowing an attacker to embed malicious ' - 'content, such as Javascript code, which will be executed in ' + "### Meta Information\n" + "**Exploitability Predication**: Likely\n" + "**Attack Vector**: page.html\n" + "**Module**: CoolProduct.jsa\n" + "### Details\n" + "This call to page.html() contains a cross-site scripting " + "(XSS) flaw. The application populates the HTTP response with " + "untrusted input, allowing an attacker to embed malicious " + "content, such as Javascript code, which will be executed in " "the context of the victim's browser. XSS vulnerabilities are " - 'commonly exploited to steal or manipulate cookies, modify ' - 'presentation of content, and compromise confidential ' - 'information, with new attack vectors being discovered on a ' - 'regular basis.' + "commonly exploited to steal or manipulate cookies, modify " + "presentation of content, and compromise confidential " + "information, with new attack vectors being discovered on a " + "regular basis." )) self.assertEqual(finding.mitigation, ( - 'Use contextual escaping on all untrusted data before using it ' - 'to construct any portion of an HTTP response. The escaping ' - 'method should be chosen based on the specific use case of the ' - 'untrusted data, otherwise it may not protect fully against the ' - 'attack. For example, if the data is being written to the body ' - 'of an HTML page, use HTML entity escaping; if the data is ' - 'being written to an attribute, use attribute escaping; etc. ' - 'Both the OWASP Java Encoder library and the Microsoft AntiXSS ' - 'library provide contextual escaping methods. For more details ' - 'on contextual escaping, see ' - 'https://github.com/OWASP/CheatSheetSeries/blob/master/cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.md. ' - 'In addition, as a best practice, always validate untrusted ' - 'input to ensure that it conforms to the expected format, using ' - 'centralized data validation routines when possible.' + "Use contextual escaping on all untrusted data before using it " + "to construct any portion of an HTTP response. The escaping " + "method should be chosen based on the specific use case of the " + "untrusted data, otherwise it may not protect fully against the " + "attack. For example, if the data is being written to the body " + "of an HTML page, use HTML entity escaping; if the data is " + "being written to an attribute, use attribute escaping; etc. " + "Both the OWASP Java Encoder library and the Microsoft AntiXSS " + "library provide contextual escaping methods. For more details " + "on contextual escaping, see " + "https://github.com/OWASP/CheatSheetSeries/blob/master/cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.md. " + "In addition, as a best practice, always validate untrusted " + "input to ensure that it conforms to the expected format, using " + "centralized data validation routines when possible." )) self.assertEqual(finding.references, ( - '- [CWE](https://cwe.mitre.org/data/definitions/79.html)\n' - '- [OWASP](https://owasp.org/www-community/attacks/xss/)\n' - '- [Supported Cleansers](https://docs.veracode.com/r/review_cleansers)\n' + "- [CWE](https://cwe.mitre.org/data/definitions/79.html)\n" + "- [OWASP](https://owasp.org/www-community/attacks/xss/)\n" + "- [Supported Cleansers](https://docs.veracode.com/r/review_cleansers)\n" )) self.assertEqual(finding.line, 50) self.assertEqual(finding.sast_source_line, 50) @@ -351,38 +351,38 @@ def json_dynamic_findings_test(self, file_name): self.assertEqual(finding.severity, "High") self.assertEqual(finding.cwe, 74) self.assertEqual(finding.description, ( - '### Meta Information\n' - '**Plugin**: Code Injection\n' - '**Attack Vector**: Improper Neutralization of Special ' - 'Elements in Output Used by a Downstream Component ' + "### Meta Information\n" + "**Plugin**: Code Injection\n" + "**Attack Vector**: Improper Neutralization of Special " + "Elements in Output Used by a Downstream Component " "('Injection')\n" - '**Vulnerable Parameter**: api\n' - '### Details\n' - 'Injections happen when untrusted data is inserted into an ' - 'interpreted syntax and subsequently evaluated on the server ' - 'side. This syntax may be a SQL query, a parsed JSON or XML ' - 'document, an executed script or other syntax that may be in ' - 'use within the application. Although the target syntax has ' - 'not been identified, the application behavior demonstrates ' - 'that the input HTTP parameter may be inserted without proper ' - 'escaping. It was observed by sending valid and invalid ' - 'payloads that should throw or should not throw errors. By ' - 'inserting a proper and improper comments such as ``, `*/_/*`, ' - '`/*_*/` into the `api` parameter, the scanner was able to ' - 'spot a difference in the responses, which is a good indicator ' - 'of a potential vulnerability. Confidence: medium. Response ' - 'codes: `404`, `404`, `404`. Similarities: `` vs `*/_/*`: 0.0; ' - '`*/_/*` vs `/*_*/`: 0.0; `` vs `/*_*/`: 1.0.' + "**Vulnerable Parameter**: api\n" + "### Details\n" + "Injections happen when untrusted data is inserted into an " + "interpreted syntax and subsequently evaluated on the server " + "side. This syntax may be a SQL query, a parsed JSON or XML " + "document, an executed script or other syntax that may be in " + "use within the application. Although the target syntax has " + "not been identified, the application behavior demonstrates " + "that the input HTTP parameter may be inserted without proper " + "escaping. It was observed by sending valid and invalid " + "payloads that should throw or should not throw errors. By " + "inserting a proper and improper comments such as ``, `*/_/*`, " + "`/*_*/` into the `api` parameter, the scanner was able to " + "spot a difference in the responses, which is a good indicator " + "of a potential vulnerability. Confidence: medium. Response " + "codes: `404`, `404`, `404`. Similarities: `` vs `*/_/*`: 0.0; " + "`*/_/*` vs `/*_*/`: 0.0; `` vs `/*_*/`: 1.0." )) self.assertEqual(finding.mitigation, ( - 'It is recommended to identify how the current parameter is ' - 'used in the application source code, and make sure it is ' - 'escaped before inserting into any syntax or query. You can add ' - 'valid values to an allowlist and invalid values to a ' - 'blocklist.' + "It is recommended to identify how the current parameter is " + "used in the application source code, and make sure it is " + "escaped before inserting into any syntax or query. You can add " + "valid values to an allowlist and invalid values to a " + "blocklist." )) self.assertEqual(finding.references, ( - '- [CWE](http://cwe.mitre.org/cgi-bin/jumpmenu.cgi?id=74)\n' + "- [CWE](http://cwe.mitre.org/cgi-bin/jumpmenu.cgi?id=74)\n" )) self.assertEqual(finding.unsaved_tags, ["policy-violation"]) self.assertEqual(finding.unsaved_endpoints[0], Endpoint( @@ -423,30 +423,30 @@ def json_sca_findings_test(self, file_name): self.assertEqual(finding.severity, "High") self.assertEqual(finding.cwe, 400) self.assertEqual(finding.description, ( - '### Meta Information\n' - '**Product ID**: abc123-bca321\n' - '**Component ID**: efg456-gfe654\n' - '**Language**: JAVA\n' - '#### Component Locations\n' - '- path/to/alpha/spring-boot-autoconfigure-2.5.14.jar\n' - '- path/to/beta/spring-boot-autoconfigure-2.5.14.jar\n' - '- path/to/charlie/spring-boot-autoconfigure-2.5.14.jar\n' - '- path/to/delta/spring-boot-autoconfigure-2.5.14.jar\n' - '#### Licenses\n' - '- apache-2.0: Low\n' - ' - Low-risk licenses are typically permissive licenses ' - 'that require you to preserve the copyright and license ' - 'notices, but allow distribution under different terms without ' - 'disclosing source code.\n' - '### Details\n' - 'spring-boot-autoconfigure is vulnerable to Denial Of Service ' - '(DoS). The vulnerability is applicable when the application ' - 'has Spring MVC auto-configuration enabled and uses the Spring ' - 'Boot welcome page, which can be either static or templated, ' - 'and the application is deployed behind a proxy which caches ' - 'the 404 responses. An attacker can cause the application to ' - 'crash by submitting a request to the welcome page which the ' - 'server is unable to properly respond to.' + "### Meta Information\n" + "**Product ID**: abc123-bca321\n" + "**Component ID**: efg456-gfe654\n" + "**Language**: JAVA\n" + "#### Component Locations\n" + "- path/to/alpha/spring-boot-autoconfigure-2.5.14.jar\n" + "- path/to/beta/spring-boot-autoconfigure-2.5.14.jar\n" + "- path/to/charlie/spring-boot-autoconfigure-2.5.14.jar\n" + "- path/to/delta/spring-boot-autoconfigure-2.5.14.jar\n" + "#### Licenses\n" + "- apache-2.0: Low\n" + " - Low-risk licenses are typically permissive licenses " + "that require you to preserve the copyright and license " + "notices, but allow distribution under different terms without " + "disclosing source code.\n" + "### Details\n" + "spring-boot-autoconfigure is vulnerable to Denial Of Service " + "(DoS). The vulnerability is applicable when the application " + "has Spring MVC auto-configuration enabled and uses the Spring " + "Boot welcome page, which can be either static or templated, " + "and the application is deployed behind a proxy which caches " + "the 404 responses. An attacker can cause the application to " + "crash by submitting a request to the welcome page which the " + "server is unable to properly respond to." )) self.assertEqual(finding.cvssv3, "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H") self.assertEqual(finding.component_name, "spring-boot-autoconfigure.jar") @@ -458,23 +458,23 @@ def json_sca_findings_test(self, file_name): self.assertEqual(finding.severity, "Medium") self.assertEqual(finding.cwe, 0) self.assertEqual(finding.description, ( - '### Meta Information\n' - '**Product ID**: abc123-bca321\n' - '**Component ID**: efg456-gfe654\n' - '**Language**: JAVASCRIPT\n' - '#### Component Locations\n' - '- path/to/alpha/node_modules:inflight\n' - '#### Licenses\n' - '- isc: Low\n' - ' - Low-risk licenses are typically permissive licenses ' - 'that require you to preserve the copyright and license ' - 'notices, but allow distribution under different terms without ' - 'disclosing source code.\n' - '### Details\n' - 'inflight is vulnerable to a Memory Leak. The vulnerability is ' - 'caused by improper memory management due to a lack of ' - 'resource freeing, which can result in Denial of Service ' - 'conditions.' + "### Meta Information\n" + "**Product ID**: abc123-bca321\n" + "**Component ID**: efg456-gfe654\n" + "**Language**: JAVASCRIPT\n" + "#### Component Locations\n" + "- path/to/alpha/node_modules:inflight\n" + "#### Licenses\n" + "- isc: Low\n" + " - Low-risk licenses are typically permissive licenses " + "that require you to preserve the copyright and license " + "notices, but allow distribution under different terms without " + "disclosing source code.\n" + "### Details\n" + "inflight is vulnerable to a Memory Leak. The vulnerability is " + "caused by improper memory management due to a lack of " + "resource freeing, which can result in Denial of Service " + "conditions." )) self.assertEqual(finding.cvssv3, "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H") self.assertEqual(finding.component_name, "inflight")