Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Release: Merge back 2.37.3 into dev from: master-into-dev/2.37.3-2.38.0-dev #10810

Merged
merged 13 commits into from
Aug 26, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 11 additions & 2 deletions Dockerfile.integration-tests-debian
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,13 @@ RUN pip install --no-cache-dir selenium==4.9.0 requests

# Install the latest Google Chrome stable release
WORKDIR /opt/chrome

# TODO: figure out whatever fix is necessary to use Chrome >= 128 and put this back in the RUN below so we stay
# up-to-date
# chrome_url=$(curl https://googlechromelabs.github.io/chrome-for-testing/last-known-good-versions-with-downloads.json | jq -r '.channels[] | select(.channel == "Stable") | .downloads.chrome[] | select(.platform == "linux64").url') && \

RUN \
chrome_url=$(curl https://googlechromelabs.github.io/chrome-for-testing/last-known-good-versions-with-downloads.json | jq -r '.channels[] | select(.channel == "Stable") | .downloads.chrome[] | select(.platform == "linux64").url') && \
chrome_url="https://storage.googleapis.com/chrome-for-testing-public/127.0.6533.119/linux64/chrome-linux64.zip" && \
wget $chrome_url && \
unzip chrome-linux64.zip && \
rm -rf chrome-linux64.zip && \
Expand All @@ -49,8 +54,12 @@ RUN apt-get install -y libxi6 libgconf-2-4 jq libjq1 libonig5 libxkbcommon0 libx

# Installing the latest stable Google Chrome driver release
WORKDIR /opt/chrome-driver
# TODO: figure out whatever fix is necessary to use Chrome >= 128 and put this back in the RUN below so we stay
# up-to-date
# chromedriver_url=$(curl https://googlechromelabs.github.io/chrome-for-testing/last-known-good-versions-with-downloads.json | jq -r '.channels[] | select(.channel == "Stable") | .downloads.chromedriver[] | select(.platform == "linux64").url') && \

RUN \
chromedriver_url=$(curl https://googlechromelabs.github.io/chrome-for-testing/last-known-good-versions-with-downloads.json | jq -r '.channels[] | select(.channel == "Stable") | .downloads.chromedriver[] | select(.platform == "linux64").url') && \
chromedriver_url="https://storage.googleapis.com/chrome-for-testing-public/127.0.6533.119/linux64/chromedriver-linux64.zip" && \
wget $chromedriver_url && \
unzip -j chromedriver-linux64.zip chromedriver-linux64/chromedriver && \
rm -rf chromedriver-linux64.zip && \
Expand Down
1 change: 1 addition & 0 deletions docs/content/en/integrations/api-v2-docs.md
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ For example: :

If you use [an alternative authentication method](../social-authentication/) for users, you may want to disable DefectDojo API tokens because it could bypass your authentication concept. \
Using of DefectDojo API tokens can be disabled by specifying the environment variable `DD_API_TOKENS_ENABLED` to `False`.
Or only `api/v2/api-token-auth/` endpoint can be disabled by setting `DD_API_TOKEN_AUTH_ENDPOINT_ENABLED` to `False`.

## Sample Code

Expand Down
1 change: 1 addition & 0 deletions dojo/context_processors.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ def globalize_vars(request):
"SAML2_LOGOUT_URL": settings.SAML2_LOGOUT_URL,
"DOCUMENTATION_URL": settings.DOCUMENTATION_URL,
"API_TOKENS_ENABLED": settings.API_TOKENS_ENABLED,
"API_TOKEN_AUTH_ENDPOINT_ENABLED": settings.API_TOKEN_AUTH_ENDPOINT_ENABLED,
}


Expand Down
12 changes: 11 additions & 1 deletion dojo/engagement/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@
TypedNoteForm,
UploadThreatForm,
)
from dojo.importers.base_importer import BaseImporter
from dojo.importers.default_importer import DefaultImporter
from dojo.models import (
Check_List,
Expand Down Expand Up @@ -922,6 +923,15 @@ def create_engagement(
# Return the engagement
return engagement

def get_importer(
self,
context: dict,
) -> BaseImporter:
"""
Gets the importer to use
"""
return DefaultImporter(**context)

def import_findings(
self,
context: dict,
Expand All @@ -930,7 +940,7 @@ def import_findings(
Attempt to import with all the supplied information
"""
try:
importer_client = DefaultImporter(**context)
importer_client = self.get_importer(context)
context["test"], _, finding_count, closed_finding_count, _, _, _ = importer_client.process_scan(
context.pop("scan", None),
)
Expand Down
5 changes: 3 additions & 2 deletions dojo/importers/default_importer.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def process_scan(
new_findings = self.determine_process_method(self.parsed_findings, **kwargs)
# Close any old findings in the processed list if the the user specified for that
# to occur in the form that is then passed to the kwargs
closed_findings = self.close_old_findings(self.test.finding_set.values(), **kwargs)
closed_findings = self.close_old_findings(self.test.finding_set.all(), **kwargs)
# Update the timestamps of the test object by looking at the findings imported
self.update_timestamps()
# Update the test meta
Expand Down Expand Up @@ -247,11 +247,12 @@ def close_old_findings(
logger.debug("REIMPORT_SCAN: Closing findings no longer present in scan report")
# Close old active findings that are not reported by this scan.
# Refactoring this to only call test.finding_set.values() once.
findings = findings.values()
mitigated_hash_codes = []
new_hash_codes = []
for finding in findings:
new_hash_codes.append(finding["hash_code"])
if getattr(finding, "is_mitigated", None):
if finding.get("is_mitigated", None):
mitigated_hash_codes.append(finding["hash_code"])
for hash_code in new_hash_codes:
if hash_code == finding["hash_code"]:
Expand Down
9 changes: 8 additions & 1 deletion dojo/importers/default_reimporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,13 @@ def process_scan(
test_import_history,
)

def determine_deduplication_algorithm(self) -> str:
"""
Determines what dedupe algorithm to use for the Test being processed.
:return: A string representing the dedupe algorithm to use.
"""
return self.test.deduplication_algorithm

def process_findings(
self,
parsed_findings: List[Finding],
Expand All @@ -160,7 +167,7 @@ def process_findings(
at import time
"""

self.deduplication_algorithm = self.test.deduplication_algorithm
self.deduplication_algorithm = self.determine_deduplication_algorithm()
self.original_items = list(self.test.finding_set.all())
self.new_items = []
self.reactivated_items = []
Expand Down
23 changes: 15 additions & 8 deletions dojo/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -2640,14 +2640,7 @@ def save(self, dedupe_option=True, rules_option=True, product_grading_option=Tru
except Exception as ex:
logger.error("Can't compute cvssv3 score for finding id %i. Invalid cvssv3 vector found: '%s'. Exception: %s", self.id, self.cvssv3, ex)

# Finding.save is called once from serializers.py with dedupe_option=False because the finding is not ready yet, for example the endpoints are not built
# It is then called a second time with dedupe_option defaulted to true; now we can compute the hash_code and run the deduplication
if dedupe_option:
if (self.hash_code is not None):
deduplicationLogger.debug("Hash_code already computed for finding")
else:
self.hash_code = self.compute_hash_code()
deduplicationLogger.debug("Hash_code computed for finding: %s", self.hash_code)
self.set_hash_code(dedupe_option)

if self.pk is None:
# We enter here during the first call from serializers.py
Expand Down Expand Up @@ -3346,6 +3339,20 @@ def inherit_tags(self, potentially_existing_tags):
def violates_sla(self):
return (self.sla_expiration_date and self.sla_expiration_date < timezone.now().date())

def set_hash_code(self, dedupe_option):
from dojo.utils import get_custom_method
if hash_method := get_custom_method("FINDING_HASH_METHOD"):
hash_method(self, dedupe_option)
else:
# Finding.save is called once from serializers.py with dedupe_option=False because the finding is not ready yet, for example the endpoints are not built
# It is then called a second time with dedupe_option defaulted to true; now we can compute the hash_code and run the deduplication
if dedupe_option:
if self.hash_code is not None:
deduplicationLogger.debug("Hash_code already computed for finding")
else:
self.hash_code = self.compute_hash_code()
deduplicationLogger.debug("Hash_code computed for finding: %s", self.hash_code)


class FindingAdmin(admin.ModelAdmin):
# For efficiency with large databases, display many-to-many fields with raw
Expand Down
2 changes: 1 addition & 1 deletion dojo/settings/.settings.dist.py.sha256sum
Original file line number Diff line number Diff line change
@@ -1 +1 @@
7a605674ff68576fef116e62103d11d55f25fb8dc15c87b93e850dde56604639
38096a82c7cdeec6ca9c663c1ec3d6a5692a0e7bbfdea8fd2f05c58f753430d4
5 changes: 5 additions & 0 deletions dojo/settings/settings.dist.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,6 +282,9 @@
# When disabled, existing user tokens will not be removed but it will not be
# possible to create new and it will not be possible to use exising.
DD_API_TOKENS_ENABLED=(bool, True),
# Enable endpoint which allow user to get API token when user+pass is provided
# It is useful to disable when non-local authentication (like SAML, Azure, ...) is in place
DD_API_TOKEN_AUTH_ENDPOINT_ENABLED=(bool, True),
# You can set extra Jira headers by suppling a dictionary in header: value format (pass as env var like "headr_name=value,another_header=anohter_value")
DD_ADDITIONAL_HEADERS=(dict, {}),
# Set fields used by the hashcode generator for deduplication, via en env variable that contains a JSON string
Expand Down Expand Up @@ -747,6 +750,8 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param

API_TOKENS_ENABLED = env("DD_API_TOKENS_ENABLED")

API_TOKEN_AUTH_ENDPOINT_ENABLED = env("DD_API_TOKEN_AUTH_ENDPOINT_ENABLED")

REST_FRAMEWORK = {
"DEFAULT_SCHEMA_CLASS": "drf_spectacular.openapi.AutoSchema",
"DEFAULT_AUTHENTICATION_CLASSES": (
Expand Down
2 changes: 2 additions & 0 deletions dojo/templates/dojo/api_v2_key.html
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,11 @@ <h2> {{ name }}</h2>
<input class="btn btn-primary" type="submit" value="{% trans "Generate New Key" %}"/>
</form>
<hr/>
{% if API_TOKEN_AUTH_ENDPOINT_ENABLED %}
<p>{% trans "Alternatively, you can use /api/v2/api-token-auth/ to get your token. Example:" %}</p>
<pre>
curl -X POST -H 'content-type: application/json' {% if request.is_secure %}https{% else %}http{% endif %}://{{ request.META.HTTP_HOST }}/api/v2/api-token-auth/ -d '{"username": "&lt;YOURUSERNAME&gt;", "password": "&lt;YOURPASSWORD&gt;"}'</pre>
{% endif %}
<p>{% trans "To use your API Key you need to specify an Authorization header. Example:" %}</p>
<pre>
# As a header
Expand Down
12 changes: 11 additions & 1 deletion dojo/test/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
TestForm,
TypedNoteForm,
)
from dojo.importers.base_importer import BaseImporter
from dojo.importers.default_reimporter import DefaultReImporter
from dojo.models import (
IMPORT_UNTOUCHED_FINDING,
Expand Down Expand Up @@ -986,6 +987,15 @@ def process_jira_form(
context["push_to_jira"] = push_all_jira_issues or (form and form.cleaned_data.get("push_to_jira"))
return None

def get_reimporter(
self,
context: dict,
) -> BaseImporter:
"""
Gets the reimporter to use
"""
return DefaultReImporter(**context)

def reimport_findings(
self,
context: dict,
Expand All @@ -994,7 +1004,7 @@ def reimport_findings(
Attempt to import with all the supplied information
"""
try:
importer_client = DefaultReImporter(**context)
importer_client = self.get_reimporter(context)
(
context["test"],
finding_count,
Expand Down
16 changes: 10 additions & 6 deletions dojo/tools/appcheck_web_application_scanner/engines/appcheck.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,18 @@ class AppCheckScanningEngineParser(BaseEngineParser):
"""
SCANNING_ENGINE = "NewAppCheckScannerMultiple"

REQUEST_RESPONSE_PATTERN = re.compile(r"^--->\n\n(.+)\n\n<---\n\n(.+)$", re.DOTALL)
HTTP_1_REQUEST_RESPONSE_PATTERN = re.compile(r"^--->\n\n(.+)\n\n<---\n\n(.+)$", re.DOTALL)
HTTP_2_REQUEST_RESPONSE_PATTERN = re.compile(
r"^HTTP/2 Request Headers:\n\n(.+)\r\nHTTP/2 Response Headers:\n\n(.+)$", re.DOTALL)

def extract_request_response(self, finding: Finding, value: dict[str, [str]]) -> None:
if rr_details := self.REQUEST_RESPONSE_PATTERN.findall(value.get("Messages") or ""):
# Remove the 'Messages' entry since we've parsed it as a request/response pair; don't need to add it to the
# Finding description
value.pop("Messages")
finding.unsaved_request, finding.unsaved_response = (d.strip() for d in rr_details[0])
if messages := value.get("Messages"):
# If we match either HTTP/1 or HTTP/2 request/response entries, remove the 'Messages' entry since we'll have
# parsed it as a request/response pair; don't need to add it to the Finding description
if rr_details := self.HTTP_1_REQUEST_RESPONSE_PATTERN.findall(messages)\
or self.HTTP_2_REQUEST_RESPONSE_PATTERN.findall(messages):
value.pop("Messages")
finding.unsaved_request, finding.unsaved_response = (d.strip() for d in rr_details[0])

def parse_details(self, finding: Finding, value: dict[str, Union[str, dict[str, [str]]]]) -> None:
self.extract_request_response(finding, value)
Expand Down
21 changes: 18 additions & 3 deletions dojo/tools/appcheck_web_application_scanner/engines/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,21 @@ def strip_markup(value: str) -> str:
return value


def escape_non_printable(s: str) -> str:
"""
Replaces non-printable characters from a string, for some definition of non-printable that probably differs from the
uncountable other available definitions of non-printable, with a more-printable version.
"""
def escape_if_needed(x):
# Accept isprintable() stuff (which includes space) and common whitespaces that can be rendered
if x.isprintable() or x in {"\r", "\n", "\t"}:
return x
# Anything else -- including other weird whitespaces -- use repr() to give the string representation; also
# remove the surrounding single quotes
return repr(x)[1:-1]
return "".join([escape_if_needed(c) for c in s])


#######
# Field parsing helper classes
#######
Expand Down Expand Up @@ -67,10 +82,10 @@ def check(self, engine_parser):

class DeMarkupedAttribute(Attribute):
"""
Class for an Attribute (as above) but whose value is stripped of markup prior to being set.
Class for an Attribute (as above) but whose value is stripped of markup and non-printable chars prior to being set.
"""
def handle(self, engine_class, finding, value):
super().handle(engine_class, finding, strip_markup(value))
super().handle(engine_class, finding, escape_non_printable(strip_markup(value)))


class Method(FieldType):
Expand Down Expand Up @@ -210,7 +225,7 @@ def parse_components(self, finding: Finding, value: [str]) -> None:
# For parsing additional description-related entries (description, notes, and details)
#####
def format_additional_description(self, section: str, value: str) -> str:
return f"**{section}**: {strip_markup(value)}"
return f"**{section}**: {escape_non_printable(strip_markup(value))}"

def append_description(self, finding: Finding, addendum: dict[str, str]) -> None:
if addendum:
Expand Down
4 changes: 2 additions & 2 deletions dojo/urls.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,8 +215,8 @@
re_path(r"^{}api/v2/user_profile/".format(get_system_setting("url_prefix")), UserProfileView.as_view(), name="user_profile"),
]

if hasattr(settings, "API_TOKENS_ENABLED"):
if settings.API_TOKENS_ENABLED:
if hasattr(settings, "API_TOKENS_ENABLED") and hasattr(settings, "API_TOKEN_AUTH_ENDPOINT_ENABLED"):
if settings.API_TOKENS_ENABLED and settings.API_TOKEN_AUTH_ENDPOINT_ENABLED:
api_v2_urls += [
re_path(
f"^{get_system_setting('url_prefix')}api/v2/api-token-auth/",
Expand Down
22 changes: 22 additions & 0 deletions dojo/utils.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,15 @@
import binascii
import calendar as tcalendar
import hashlib
import importlib
import logging
import mimetypes
import os
import re
from calendar import monthrange
from datetime import date, datetime, timedelta
from math import pi, sqrt
from typing import Callable, Optional

import bleach
import crum
Expand Down Expand Up @@ -295,6 +297,9 @@ def do_dedupe_finding_task(new_finding, *args, **kwargs):


def do_dedupe_finding(new_finding, *args, **kwargs):
if dedupe_method := get_custom_method("FINDING_DEDUPE_METHOD"):
return dedupe_method(new_finding, *args, **kwargs)

try:
enabled = System_Settings.objects.get(no_cache=True).enable_deduplication
except System_Settings.DoesNotExist:
Expand Down Expand Up @@ -2587,6 +2592,23 @@ def get_open_findings_burndown(product):
return past_90_days


def get_custom_method(setting_name: str) -> Optional[Callable]:
"""
Attempts to load and return the method specified by fully-qualified name at the given setting.

:param setting_name: The name of the setting that holds the fqname of the Python method we want to load
:return: The callable if it was able to be loaded, else None
"""
if fq_name := getattr(settings, setting_name, None):
try:
mn, _, fn = fq_name.rpartition(".")
m = importlib.import_module(mn)
return getattr(m, fn)
except ModuleNotFoundError:
pass
return None


def generate_file_response(file_object: FileUpload) -> FileResponse:
"""Serve an uploaded file in a uniformed way.

Expand Down
2 changes: 1 addition & 1 deletion helm/defectdojo/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ apiVersion: v2
appVersion: "2.38.0-dev"
description: A Helm chart for Kubernetes to install DefectDojo
name: defectdojo
version: 1.6.147-dev
version: 1.6.148-dev
icon: https://www.defectdojo.org/img/favicon.ico
maintainers:
- name: madchap
Expand Down
6 changes: 3 additions & 3 deletions helm/defectdojo/templates/_helpers.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ Create chart name and version as used by the chart label.
imagePullPolicy: {{ .Values.imagePullPolicy }}
{{- if .Values.securityContext.enabled }}
securityContext:
{{- toYaml .Values.securityContext.djangoSecurityContext | nindent 10 }}
{{- toYaml .Values.securityContext.djangoSecurityContext | nindent 4 }}
{{- end }}
envFrom:
- configMapRef:
Expand All @@ -165,8 +165,8 @@ Create chart name and version as used by the chart label.
key: postgresql-postgres-password
{{- end }}
{{- if .Values.extraEnv }}
{{- toYaml .Values.extraEnv | nindent 8 }}
{{- toYaml .Values.extraEnv | nindent 2 }}
{{- end }}
resources:
{{- toYaml .Values.dbMigrationChecker.resources | nindent 10 }}
{{- toYaml .Values.dbMigrationChecker.resources | nindent 4 }}
{{- end -}}
Loading
Loading