Skip to content

Commit

Permalink
Merge branch 'bugfix' into rm_mysqlleftover
Browse files Browse the repository at this point in the history
  • Loading branch information
manuel-sommer authored Nov 29, 2024
2 parents ab14a0f + 06ff84f commit 3b6863e
Show file tree
Hide file tree
Showing 31 changed files with 1,027 additions and 183 deletions.
2 changes: 2 additions & 0 deletions .github/workflows/release-drafter.yml
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,8 @@ jobs:
steps:
- name: Load OAS files from artifacts
uses: actions/download-artifact@v4
with:
pattern: oas-*

- name: Upload Release Asset - OpenAPI Specification - YAML
id: upload-release-asset-yaml
Expand Down
2 changes: 1 addition & 1 deletion dojo/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,6 @@
# Django starts so that shared_task will use this app.
from .celery import app as celery_app # noqa: F401

__version__ = "2.40.1"
__version__ = "2.40.3"
__url__ = "https://github.com/DefectDojo/django-DefectDojo"
__docs__ = "https://documentation.defectdojo.com"
5 changes: 3 additions & 2 deletions dojo/api_v2/serializers.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import collections
import json
import logging
import re
Expand Down Expand Up @@ -280,10 +281,10 @@ def _pop_tags(self, validated_data):
return (to_be_tagged, validated_data)


class RequestResponseDict(list):
class RequestResponseDict(collections.UserList):
def __init__(self, *args, **kwargs):
pretty_print = kwargs.pop("pretty_print", True)
list.__init__(self, *args, **kwargs)
collections.UserList.__init__(self, *args, **kwargs)
self.pretty_print = pretty_print

def __add__(self, rhs):
Expand Down
3 changes: 2 additions & 1 deletion dojo/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -4613,7 +4613,7 @@ def __str__(self):
auditlog.register(Dojo_User, exclude_fields=["password"])
auditlog.register(Endpoint)
auditlog.register(Engagement)
auditlog.register(Finding)
auditlog.register(Finding, m2m_fields={"reviewers"})
auditlog.register(Finding_Group)
auditlog.register(Product_Type)
auditlog.register(Product)
Expand All @@ -4623,6 +4623,7 @@ def __str__(self):
auditlog.register(Cred_User, exclude_fields=["password"])
auditlog.register(Notification_Webhooks, exclude_fields=["header_name", "header_value"])


from dojo.utils import calculate_grade, to_str_typed # noqa: E402 # there is issue due to a circular import

tagulous.admin.register(Product.tags)
Expand Down
2 changes: 1 addition & 1 deletion dojo/settings/.settings.dist.py.sha256sum
Original file line number Diff line number Diff line change
@@ -1 +1 @@
54333d96827fa4d0c3fd43805659daaa648ee6b2886359b492e6eb32aea2ae24
93f0a72eaae484814b5b38dba8dc57d529ea3c414b7fa4da8b2e347851dba46e
3 changes: 2 additions & 1 deletion dojo/settings/settings.dist.py
Original file line number Diff line number Diff line change
Expand Up @@ -1213,7 +1213,7 @@ def saml2_attrib_map_format(dict):
"Dependency Check Scan": ["title", "cwe", "file_path"],
"Dockle Scan": ["title", "description", "vuln_id_from_tool"],
"Dependency Track Finding Packaging Format (FPF) Export": ["component_name", "component_version", "vulnerability_ids"],
"Mobsfscan Scan": ["title", "severity", "cwe"],
"Mobsfscan Scan": ["title", "severity", "cwe", "file_path", "description"],
"Tenable Scan": ["title", "severity", "vulnerability_ids", "cwe", "description"],
"Nexpose Scan": ["title", "severity", "vulnerability_ids", "cwe"],
# possible improvement: in the scanner put the library name into file_path, then dedup on cwe + file_path + severity
Expand Down Expand Up @@ -1764,6 +1764,7 @@ def saml2_attrib_map_format(dict):
"TEMP": "https://security-tracker.debian.org/tracker/", # e.g. https://security-tracker.debian.org/tracker/TEMP-0841856-B18BAF
"DSA": "https://security-tracker.debian.org/tracker/", # e.g. https://security-tracker.debian.org/tracker/DSA-5791-1
"RLSA": "https://errata.rockylinux.org/", # e.g. https://errata.rockylinux.org/RLSA-2024:7001
"RLBA": "https://errata.rockylinux.org/", # e.g. https://errata.rockylinux.org/RLBA-2024:6968
}
# List of acceptable file types that can be uploaded to a given object via arbitrary file upload
FILE_UPLOAD_TYPES = env("DD_FILE_UPLOAD_TYPES")
Expand Down
5 changes: 4 additions & 1 deletion dojo/templatetags/display_tags.py
Original file line number Diff line number Diff line change
Expand Up @@ -328,7 +328,10 @@ def action_log_entry(value, autoescape=None):
history = json.loads(value)
text = ""
for k in history.keys():
text += k.capitalize() + ' changed from "' + \
if isinstance(history[k], dict):
text += k.capitalize() + " operation: " + history[k].get("operation", "unknown") + ": " + str(history[k].get("objects", "unknown"))
else:
text += k.capitalize() + ' changed from "' + \
history[k][0] + '" to "' + history[k][1] + '"\n'
return text

Expand Down
29 changes: 24 additions & 5 deletions dojo/tools/anchorectl_policies/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def get_findings(self, filename, test):
image_name = result["tag"]
trigger_id = result["triggerId"]
repo, tag = image_name.split(":", 2)
severity = map_gate_action_to_severity(status)
severity, active = get_severity(status, description)
vulnerability_id = extract_vulnerability_id(trigger_id)
title = (
policy_id
Expand All @@ -54,6 +54,7 @@ def get_findings(self, filename, test):
test=test,
description=description,
severity=severity,
active=active,
references=f"Policy ID: {policy_id}\nTrigger ID: {trigger_id}",
file_path=search_filepath(description),
component_name=repo,
Expand All @@ -77,14 +78,32 @@ def get_findings(self, filename, test):
return items


def map_gate_action_to_severity(gate):
def map_gate_action_to_severity(status):
gate_action_to_severity = {
"stop": "Critical",
"warn": "Medium",
}
if gate in gate_action_to_severity:
return gate_action_to_severity[gate]
return "Low"
if status in gate_action_to_severity:
return gate_action_to_severity[status], True

return "Low", True


def get_severity(status, description):
parsed_severity = description.split()[0]
valid_severities = ["LOW", "INFO", "UNKNOWN", "CRITICAL", "MEDIUM"]
if parsed_severity in valid_severities:
severity = "Info"
if parsed_severity == "UNKNOWN":
severity = "Info"
elif status != "go":
severity = parsed_severity.lower().capitalize()

active = False if status == "go" else True

return severity, active

return map_gate_action_to_severity(status)


def policy_name(policies, policy_id):
Expand Down
3 changes: 2 additions & 1 deletion dojo/tools/aws_prowler_v3plus/prowler_v4.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,8 @@ def process_ocsf_json(self, file, test):
documentation = deserialized.get("remediation", {}).get("references", "")
documentation = str(documentation) + "\n" + str(deserialized.get("unmapped", {}).get("related_url", ""))
security_domain = deserialized.get("resources", [{}])[0].get("type", "")
timestamp = deserialized.get("event_time")
# Prowler v4.5.0 changed 'event_time' key in report with 'time_dt'
timestamp = deserialized.get("time_dt") or deserialized.get("event_time")
resource_arn = deserialized.get("resources", [{}])[0].get("uid", "")
resource_id = deserialized.get("resources", [{}])[0].get("name", "")
unique_id_from_tool = deserialized.get("finding_info", {}).get("uid", "")
Expand Down
9 changes: 9 additions & 0 deletions dojo/tools/checkmarx_one/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -262,6 +262,9 @@ def get_results_sast(
description = vulnerability.get("description")
file_path = vulnerability.get("data").get("nodes")[0].get("fileName")
unique_id_from_tool = vulnerability.get("id", vulnerability.get("similarityId"))
if description is None:
description = vulnerability.get("severity").title() + " " + vulnerability.get("data").get("queryName").replace("_", " ")

return Finding(
description=description,
title=description,
Expand All @@ -280,6 +283,9 @@ def get_results_kics(
description = vulnerability.get("description")
file_path = vulnerability.get("data").get("filename", vulnerability.get("data").get("fileName"))
unique_id_from_tool = vulnerability.get("id", vulnerability.get("similarityId"))
if description is None:
description = vulnerability.get("severity").title() + " " + vulnerability.get("data").get("queryName").replace("_", " ")

return Finding(
title=description,
description=description,
Expand All @@ -298,6 +304,9 @@ def get_results_sca(
) -> Finding:
description = vulnerability.get("description")
unique_id_from_tool = vulnerability.get("id", vulnerability.get("similarityId"))
if description is None:
description = vulnerability.get("severity").title() + " " + vulnerability.get("data").get("queryName").replace("_", " ")

finding = Finding(
title=description,
description=description,
Expand Down
52 changes: 33 additions & 19 deletions dojo/tools/mobsfscan/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,29 +51,43 @@ def get_findings(self, filename, test):
else:
severity = "Info"

finding = Finding(
title=f"{key}",
test=test,
severity=severity,
nb_occurences=1,
cwe=cwe,
description=description,
references=references,
)
files = []

if item.get("files"):
for file in item.get("files"):
file_path = file.get("file_path")
line = file.get("match_lines")[0]
file_path = file.get("file_path", "")
line = file.get("match_lines", [0])[0]
snippet = file.get("match_string", "")

files.append((file_path, line, snippet))
else:
files.append(("", 0, ""))

for file_path, line, snippet in files:

finding = Finding(
title=f"{key}",
test=test,
severity=severity,
nb_occurences=1,
cwe=cwe,
description=description,
references=references,
)

if file_path:
finding.file_path = file_path
finding.line = line
finding.description = f"{description}\n**Snippet:** `{snippet}`"

dupe_key = hashlib.sha256(
(key + str(cwe) + masvs + owasp_mobile).encode("utf-8"),
).hexdigest()
dupe_key = hashlib.sha256(
(key + str(cwe) + masvs + owasp_mobile + file_path).encode("utf-8"),
).hexdigest()

if dupe_key in dupes:
finding = dupes[dupe_key]
finding.nb_occurences += 1
else:
dupes[dupe_key] = finding

if dupe_key in dupes:
finding = dupes[dupe_key]
finding.nb_occurences += 1
else:
dupes[dupe_key] = finding
return list(dupes.values())
Loading

0 comments on commit 3b6863e

Please sign in to comment.