Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/dev' into dev
Browse files Browse the repository at this point in the history
  • Loading branch information
Maffooch committed Aug 26, 2024
2 parents 4c9fd1a + 096bd04 commit 38b203b
Show file tree
Hide file tree
Showing 26 changed files with 737 additions and 212 deletions.
13 changes: 11 additions & 2 deletions Dockerfile.integration-tests-debian
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,13 @@ RUN pip install --no-cache-dir selenium==4.9.0 requests

# Install the latest Google Chrome stable release
WORKDIR /opt/chrome

# TODO: figure out whatever fix is necessary to use Chrome >= 128 and put this back in the RUN below so we stay
# up-to-date
# chrome_url=$(curl https://googlechromelabs.github.io/chrome-for-testing/last-known-good-versions-with-downloads.json | jq -r '.channels[] | select(.channel == "Stable") | .downloads.chrome[] | select(.platform == "linux64").url') && \

RUN \
chrome_url=$(curl https://googlechromelabs.github.io/chrome-for-testing/last-known-good-versions-with-downloads.json | jq -r '.channels[] | select(.channel == "Stable") | .downloads.chrome[] | select(.platform == "linux64").url') && \
chrome_url="https://storage.googleapis.com/chrome-for-testing-public/127.0.6533.119/linux64/chrome-linux64.zip" && \
wget $chrome_url && \
unzip chrome-linux64.zip && \
rm -rf chrome-linux64.zip && \
Expand All @@ -49,8 +54,12 @@ RUN apt-get install -y libxi6 libgconf-2-4 jq libjq1 libonig5 libxkbcommon0 libx

# Installing the latest stable Google Chrome driver release
WORKDIR /opt/chrome-driver
# TODO: figure out whatever fix is necessary to use Chrome >= 128 and put this back in the RUN below so we stay
# up-to-date
# chromedriver_url=$(curl https://googlechromelabs.github.io/chrome-for-testing/last-known-good-versions-with-downloads.json | jq -r '.channels[] | select(.channel == "Stable") | .downloads.chromedriver[] | select(.platform == "linux64").url') && \

RUN \
chromedriver_url=$(curl https://googlechromelabs.github.io/chrome-for-testing/last-known-good-versions-with-downloads.json | jq -r '.channels[] | select(.channel == "Stable") | .downloads.chromedriver[] | select(.platform == "linux64").url') && \
chromedriver_url="https://storage.googleapis.com/chrome-for-testing-public/127.0.6533.119/linux64/chromedriver-linux64.zip" && \
wget $chromedriver_url && \
unzip -j chromedriver-linux64.zip chromedriver-linux64/chromedriver && \
rm -rf chromedriver-linux64.zip && \
Expand Down
60 changes: 0 additions & 60 deletions docker-compose.override.debug.yml

This file was deleted.

12 changes: 8 additions & 4 deletions docker-compose.override.dev.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,26 +5,30 @@ services:
volumes:
- '.:/app:z'
environment:
PYTHONWARNINGS: always # We are strict during development so Warnings needs to be more verbose
PYTHONWARNINGS: error # We are strict about Warnings during development
DD_DEBUG: 'True'
DD_ADMIN_USER: "${DD_ADMIN_USER:-admin}"
DD_ADMIN_PASSWORD: "${DD_ADMIN_PASSWORD:-admin}"
DD_EMAIL_URL: "smtp://mailhog:1025"
celeryworker:
volumes:
- '.:/app:z'
environment:
PYTHONWARNINGS: always # We are strict during development so Warnings needs to be more verbose
PYTHONWARNINGS: error # We are strict about Warnings during development
DD_DEBUG: 'True'
DD_EMAIL_URL: "smtp://mailhog:1025"
celerybeat:
volumes:
- '.:/app:z'
environment:
PYTHONWARNINGS: always # We are strict during development so Warnings needs to be more verbose
PYTHONWARNINGS: error # We are strict about Warnings during development
DD_DEBUG: 'True'
initializer:
volumes:
- '.:/app:z'
environment:
PYTHONWARNINGS: always # We are strict during development so Warnings needs to be more verbose
PYTHONWARNINGS: error # We are strict about Warnings during development
DD_DEBUG: 'True'
DD_ADMIN_USER: "${DD_ADMIN_USER:-admin}"
DD_ADMIN_PASSWORD: "${DD_ADMIN_PASSWORD:-admin}"
nginx:
Expand Down
14 changes: 0 additions & 14 deletions docker/setEnv.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
target_dir="${0%/*}/.."
override_link='docker-compose.override.yml'
override_file_dev='docker-compose.override.dev.yml'
override_file_debug='docker-compose.override.debug.yml'
override_file_unit_tests='docker-compose.override.unit_tests.yml'
override_file_unit_tests_cicd='docker-compose.override.unit_tests_cicd.yml'
override_file_integration_tests='docker-compose.override.integration_tests.yml'
Expand Down Expand Up @@ -77,19 +76,6 @@ function set_dev {
fi
}

function set_debug {
get_current
if [ "${current_env}" != debug ]
then
docker compose down
rm -f ${override_link}
ln -s ${override_file_debug} ${override_link}
echo "Now using 'debug' configuration."
else
echo "Already using 'debug' configuration."
fi
}

function set_unit_tests {
get_current
if [ "${current_env}" != unit_tests ]
Expand Down
1 change: 1 addition & 0 deletions docs/content/en/integrations/api-v2-docs.md
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ For example: :

If you use [an alternative authentication method](../social-authentication/) for users, you may want to disable DefectDojo API tokens because it could bypass your authentication concept. \
Using of DefectDojo API tokens can be disabled by specifying the environment variable `DD_API_TOKENS_ENABLED` to `False`.
Or only `api/v2/api-token-auth/` endpoint can be disabled by setting `DD_API_TOKEN_AUTH_ENDPOINT_ENABLED` to `False`.

## Sample Code

Expand Down
1 change: 1 addition & 0 deletions dojo/context_processors.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ def globalize_vars(request):
"SAML2_LOGOUT_URL": settings.SAML2_LOGOUT_URL,
"DOCUMENTATION_URL": settings.DOCUMENTATION_URL,
"API_TOKENS_ENABLED": settings.API_TOKENS_ENABLED,
"API_TOKEN_AUTH_ENDPOINT_ENABLED": settings.API_TOKEN_AUTH_ENDPOINT_ENABLED,
}


Expand Down
12 changes: 11 additions & 1 deletion dojo/engagement/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@
TypedNoteForm,
UploadThreatForm,
)
from dojo.importers.base_importer import BaseImporter
from dojo.importers.default_importer import DefaultImporter
from dojo.models import (
Check_List,
Expand Down Expand Up @@ -922,6 +923,15 @@ def create_engagement(
# Return the engagement
return engagement

def get_importer(
self,
context: dict,
) -> BaseImporter:
"""
Gets the importer to use
"""
return DefaultImporter(**context)

def import_findings(
self,
context: dict,
Expand All @@ -930,7 +940,7 @@ def import_findings(
Attempt to import with all the supplied information
"""
try:
importer_client = DefaultImporter(**context)
importer_client = self.get_importer(context)
context["test"], _, finding_count, closed_finding_count, _, _, _ = importer_client.process_scan(
context.pop("scan", None),
)
Expand Down
5 changes: 3 additions & 2 deletions dojo/importers/default_importer.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def process_scan(
new_findings = self.determine_process_method(self.parsed_findings, **kwargs)
# Close any old findings in the processed list if the the user specified for that
# to occur in the form that is then passed to the kwargs
closed_findings = self.close_old_findings(self.test.finding_set.values(), **kwargs)
closed_findings = self.close_old_findings(self.test.finding_set.all(), **kwargs)
# Update the timestamps of the test object by looking at the findings imported
self.update_timestamps()
# Update the test meta
Expand Down Expand Up @@ -247,11 +247,12 @@ def close_old_findings(
logger.debug("REIMPORT_SCAN: Closing findings no longer present in scan report")
# Close old active findings that are not reported by this scan.
# Refactoring this to only call test.finding_set.values() once.
findings = findings.values()
mitigated_hash_codes = []
new_hash_codes = []
for finding in findings:
new_hash_codes.append(finding["hash_code"])
if getattr(finding, "is_mitigated", None):
if finding.get("is_mitigated", None):
mitigated_hash_codes.append(finding["hash_code"])
for hash_code in new_hash_codes:
if hash_code == finding["hash_code"]:
Expand Down
9 changes: 8 additions & 1 deletion dojo/importers/default_reimporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,13 @@ def process_scan(
test_import_history,
)

def determine_deduplication_algorithm(self) -> str:
"""
Determines what dedupe algorithm to use for the Test being processed.
:return: A string representing the dedupe algorithm to use.
"""
return self.test.deduplication_algorithm

def process_findings(
self,
parsed_findings: List[Finding],
Expand All @@ -160,7 +167,7 @@ def process_findings(
at import time
"""

self.deduplication_algorithm = self.test.deduplication_algorithm
self.deduplication_algorithm = self.determine_deduplication_algorithm()
self.original_items = list(self.test.finding_set.all())
self.new_items = []
self.reactivated_items = []
Expand Down
23 changes: 15 additions & 8 deletions dojo/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -2640,14 +2640,7 @@ def save(self, dedupe_option=True, rules_option=True, product_grading_option=Tru
except Exception as ex:
logger.error("Can't compute cvssv3 score for finding id %i. Invalid cvssv3 vector found: '%s'. Exception: %s", self.id, self.cvssv3, ex)

# Finding.save is called once from serializers.py with dedupe_option=False because the finding is not ready yet, for example the endpoints are not built
# It is then called a second time with dedupe_option defaulted to true; now we can compute the hash_code and run the deduplication
if dedupe_option:
if (self.hash_code is not None):
deduplicationLogger.debug("Hash_code already computed for finding")
else:
self.hash_code = self.compute_hash_code()
deduplicationLogger.debug("Hash_code computed for finding: %s", self.hash_code)
self.set_hash_code(dedupe_option)

if self.pk is None:
# We enter here during the first call from serializers.py
Expand Down Expand Up @@ -3346,6 +3339,20 @@ def inherit_tags(self, potentially_existing_tags):
def violates_sla(self):
return (self.sla_expiration_date and self.sla_expiration_date < timezone.now().date())

def set_hash_code(self, dedupe_option):
from dojo.utils import get_custom_method
if hash_method := get_custom_method("FINDING_HASH_METHOD"):
hash_method(self, dedupe_option)
else:
# Finding.save is called once from serializers.py with dedupe_option=False because the finding is not ready yet, for example the endpoints are not built
# It is then called a second time with dedupe_option defaulted to true; now we can compute the hash_code and run the deduplication
if dedupe_option:
if self.hash_code is not None:
deduplicationLogger.debug("Hash_code already computed for finding")
else:
self.hash_code = self.compute_hash_code()
deduplicationLogger.debug("Hash_code computed for finding: %s", self.hash_code)


class FindingAdmin(admin.ModelAdmin):
# For efficiency with large databases, display many-to-many fields with raw
Expand Down
2 changes: 1 addition & 1 deletion dojo/settings/.settings.dist.py.sha256sum
Original file line number Diff line number Diff line change
@@ -1 +1 @@
7a605674ff68576fef116e62103d11d55f25fb8dc15c87b93e850dde56604639
38096a82c7cdeec6ca9c663c1ec3d6a5692a0e7bbfdea8fd2f05c58f753430d4
5 changes: 5 additions & 0 deletions dojo/settings/settings.dist.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,6 +282,9 @@
# When disabled, existing user tokens will not be removed but it will not be
# possible to create new and it will not be possible to use exising.
DD_API_TOKENS_ENABLED=(bool, True),
# Enable endpoint which allow user to get API token when user+pass is provided
# It is useful to disable when non-local authentication (like SAML, Azure, ...) is in place
DD_API_TOKEN_AUTH_ENDPOINT_ENABLED=(bool, True),
# You can set extra Jira headers by suppling a dictionary in header: value format (pass as env var like "headr_name=value,another_header=anohter_value")
DD_ADDITIONAL_HEADERS=(dict, {}),
# Set fields used by the hashcode generator for deduplication, via en env variable that contains a JSON string
Expand Down Expand Up @@ -747,6 +750,8 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param

API_TOKENS_ENABLED = env("DD_API_TOKENS_ENABLED")

API_TOKEN_AUTH_ENDPOINT_ENABLED = env("DD_API_TOKEN_AUTH_ENDPOINT_ENABLED")

REST_FRAMEWORK = {
"DEFAULT_SCHEMA_CLASS": "drf_spectacular.openapi.AutoSchema",
"DEFAULT_AUTHENTICATION_CLASSES": (
Expand Down
2 changes: 2 additions & 0 deletions dojo/templates/dojo/api_v2_key.html
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,11 @@ <h2> {{ name }}</h2>
<input class="btn btn-primary" type="submit" value="{% trans "Generate New Key" %}"/>
</form>
<hr/>
{% if API_TOKEN_AUTH_ENDPOINT_ENABLED %}
<p>{% trans "Alternatively, you can use /api/v2/api-token-auth/ to get your token. Example:" %}</p>
<pre>
curl -X POST -H 'content-type: application/json' {% if request.is_secure %}https{% else %}http{% endif %}://{{ request.META.HTTP_HOST }}/api/v2/api-token-auth/ -d '{"username": "&lt;YOURUSERNAME&gt;", "password": "&lt;YOURPASSWORD&gt;"}'</pre>
{% endif %}
<p>{% trans "To use your API Key you need to specify an Authorization header. Example:" %}</p>
<pre>
# As a header
Expand Down
12 changes: 11 additions & 1 deletion dojo/test/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
TestForm,
TypedNoteForm,
)
from dojo.importers.base_importer import BaseImporter
from dojo.importers.default_reimporter import DefaultReImporter
from dojo.models import (
IMPORT_UNTOUCHED_FINDING,
Expand Down Expand Up @@ -986,6 +987,15 @@ def process_jira_form(
context["push_to_jira"] = push_all_jira_issues or (form and form.cleaned_data.get("push_to_jira"))
return None

def get_reimporter(
self,
context: dict,
) -> BaseImporter:
"""
Gets the reimporter to use
"""
return DefaultReImporter(**context)

def reimport_findings(
self,
context: dict,
Expand All @@ -994,7 +1004,7 @@ def reimport_findings(
Attempt to import with all the supplied information
"""
try:
importer_client = DefaultReImporter(**context)
importer_client = self.get_reimporter(context)
(
context["test"],
finding_count,
Expand Down
16 changes: 10 additions & 6 deletions dojo/tools/appcheck_web_application_scanner/engines/appcheck.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,18 @@ class AppCheckScanningEngineParser(BaseEngineParser):
"""
SCANNING_ENGINE = "NewAppCheckScannerMultiple"

REQUEST_RESPONSE_PATTERN = re.compile(r"^--->\n\n(.+)\n\n<---\n\n(.+)$", re.DOTALL)
HTTP_1_REQUEST_RESPONSE_PATTERN = re.compile(r"^--->\n\n(.+)\n\n<---\n\n(.+)$", re.DOTALL)
HTTP_2_REQUEST_RESPONSE_PATTERN = re.compile(
r"^HTTP/2 Request Headers:\n\n(.+)\r\nHTTP/2 Response Headers:\n\n(.+)$", re.DOTALL)

def extract_request_response(self, finding: Finding, value: dict[str, [str]]) -> None:
if rr_details := self.REQUEST_RESPONSE_PATTERN.findall(value.get("Messages") or ""):
# Remove the 'Messages' entry since we've parsed it as a request/response pair; don't need to add it to the
# Finding description
value.pop("Messages")
finding.unsaved_request, finding.unsaved_response = (d.strip() for d in rr_details[0])
if messages := value.get("Messages"):
# If we match either HTTP/1 or HTTP/2 request/response entries, remove the 'Messages' entry since we'll have
# parsed it as a request/response pair; don't need to add it to the Finding description
if rr_details := self.HTTP_1_REQUEST_RESPONSE_PATTERN.findall(messages)\
or self.HTTP_2_REQUEST_RESPONSE_PATTERN.findall(messages):
value.pop("Messages")
finding.unsaved_request, finding.unsaved_response = (d.strip() for d in rr_details[0])

def parse_details(self, finding: Finding, value: dict[str, Union[str, dict[str, [str]]]]) -> None:
self.extract_request_response(finding, value)
Expand Down
Loading

0 comments on commit 38b203b

Please sign in to comment.