Skip to content

Commit

Permalink
Ruff: add PERF
Browse files Browse the repository at this point in the history
  • Loading branch information
kiblik committed Sep 17, 2024
1 parent 22a0ffe commit d3b5aa0
Show file tree
Hide file tree
Showing 22 changed files with 52 additions and 122 deletions.
3 changes: 1 addition & 2 deletions docker/install_chrome_dependencies.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,6 @@ def ldd(file_path):
p.endswith(suffix) for suffix in ["-dbg", "-test", "tests", "-dev", "-mesa"]
)
]
for p in packages:
missing_packages.append(p)
missing_packages += packages

logger.info("missing_packages: " + (" ".join(missing_packages)))
8 changes: 3 additions & 5 deletions dojo/api_v2/serializers.py
Original file line number Diff line number Diff line change
Expand Up @@ -1057,9 +1057,7 @@ class EngagementToFilesSerializer(serializers.Serializer):
def to_representation(self, data):
engagement = data.get("engagement_id")
files = data.get("files")
new_files = []
for file in files:
new_files.append(
new_files = [
{
"id": file.id,
"file": "{site_url}/{file_access_url}".format(
Expand All @@ -1069,8 +1067,8 @@ def to_representation(self, data):
),
),
"title": file.title,
},
)
}
for file in files]
return {"engagement_id": engagement.id, "files": new_files}


Expand Down
12 changes: 4 additions & 8 deletions dojo/metrics/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -601,10 +601,8 @@ def view_engineer(request, eid):
tzinfo=timezone.get_current_timezone())],
owner=user)
for finding in ra.accepted_findings.all()]
closed_month = []
for f in closed_findings:
if f.mitigated and f.mitigated.year == now.year and f.mitigated.month == now.month:
closed_month.append(f)
closed_month = [f for f in closed_findings
if f.mitigated and f.mitigated.year == now.year and f.mitigated.month == now.month]

o_dict, open_count = count_findings(open_month)
c_dict, closed_count = count_findings(closed_month)
Expand All @@ -618,7 +616,6 @@ def view_engineer(request, eid):
day_list.append(now)

q_objects = (Q(date=d) for d in day_list)
closed_week = []
open_week = findings.filter(reduce(operator.or_, q_objects))

accepted_week = [finding for ra in Risk_Acceptance.objects.filter(
Expand All @@ -627,9 +624,8 @@ def view_engineer(request, eid):

q_objects = (Q(mitigated=d) for d in day_list)
# closed_week= findings.filter(reduce(operator.or_, q_objects))
for f in closed_findings:
if f.mitigated and f.mitigated >= day_list[0]:
closed_week.append(f)
closed_week = [f for f in closed_findings
if f.mitigated and f.mitigated >= day_list[0]]

o_week_dict, open_week_count = count_findings(open_week)
c_week_dict, closed_week_count = count_findings(closed_week)
Expand Down
15 changes: 3 additions & 12 deletions dojo/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -1249,10 +1249,7 @@ def get_product_type(self):
def open_findings_list(self):
findings = Finding.objects.filter(test__engagement__product=self,
active=True)
findings_list = []
for i in findings:
findings_list.append(i.id)
return findings_list
return [i.id for i in findings]

@property
def has_jira_configured(self):
Expand Down Expand Up @@ -3296,10 +3293,7 @@ def get_references_with_links(self):
def vulnerability_ids(self):
# Get vulnerability ids from database and convert to list of strings
vulnerability_ids_model = self.vulnerability_id_set.all()
vulnerability_ids = []
for vulnerability_id in vulnerability_ids_model:
vulnerability_ids.append(vulnerability_id.vulnerability_id)

vulnerability_ids = [vulnerability_id.vulnerability_id for vulnerability_id in vulnerability_ids_model]
# Synchronize the cve field with the unsaved_vulnerability_ids
# We do this to be as flexible as possible to handle the fields until
# the cve field is not needed anymore and can be removed.
Expand Down Expand Up @@ -3506,10 +3500,7 @@ def get_breadcrumbs(self):
def vulnerability_ids(self):
# Get vulnerability ids from database and convert to list of strings
vulnerability_ids_model = self.vulnerability_id_template_set.all()
vulnerability_ids = []
for vulnerability_id in vulnerability_ids_model:
vulnerability_ids.append(vulnerability_id.vulnerability_id)

vulnerability_ids = [vulnerability_id.vulnerability_id for vulnerability_id in vulnerability_ids_model]
# Synchronize the cve field with the unsaved_vulnerability_ids
# We do this to be as flexible as possible to handle the fields until
# the cve field is not needed anymore and can be removed.
Expand Down
4 changes: 1 addition & 3 deletions dojo/notes/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,9 +183,7 @@ def find_available_notetypes(finding, editing_note):
notes = finding.notes.all()
single_note_types = Note_Type.objects.filter(is_single=True, is_active=True).values_list("id", flat=True)
multiple_note_types = Note_Type.objects.filter(is_single=False, is_active=True).values_list("id", flat=True)
available_note_types = []
for note_type_id in multiple_note_types:
available_note_types.append(note_type_id)
available_note_types = list(multiple_note_types)
for note_type_id in single_note_types:
for note in notes:
if note_type_id == note.note_type_id:
Expand Down
5 changes: 1 addition & 4 deletions dojo/templatetags/display_tags.py
Original file line number Diff line number Diff line change
Expand Up @@ -796,10 +796,7 @@ def first_vulnerability_id(finding):
def additional_vulnerability_ids(finding):
vulnerability_ids = finding.vulnerability_ids
if vulnerability_ids and len(vulnerability_ids) > 1:
references = []
for vulnerability_id in vulnerability_ids[1:]:
references.append(vulnerability_id)
return references
return vulnerability_ids[1:]
return None


Expand Down
7 changes: 6 additions & 1 deletion dojo/tools/cobalt/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,12 @@ def get_findings(self, filename, test):
io.StringIO(content), delimiter=",", quotechar='"',
)
dupes = {}
for row in reader:

# FIXME double loop, could lead to performance pb if the number of
# issues is big
csvarray = list(reader)

for row in csvarray:
finding = Finding(test=test)
finding.title = (
row["Title"] if row["Title"][0] != "'" else row["Title"][1:]
Expand Down
4 changes: 1 addition & 3 deletions dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,9 +154,7 @@ def get_item_set(vulnerability):
# Some entries have no CVE entries, despite they exist. Example CVE-2017-1000502.
cves = get_cve(vulnerability)
if len(cves) > 0:
for item in cves:
if item.get("cve"):
vulnerability_ids.append(item.get("cve"))
vulnerability_ids = [item.get("cve") for item in cves if item.get("cve")]
if "cvss_v3_vector" in cves[0]:
cvss_v3 = cves[0]["cvss_v3_vector"]
cvssv3 = CVSS3(cvss_v3).clean_vector()
Expand Down
4 changes: 1 addition & 3 deletions dojo/tools/jfrogxray/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,9 +83,7 @@ def get_item(vulnerability, test):
# CVE-2017-1000502.
cves = vulnerability["component_versions"]["more_details"].get("cves", [])
if len(cves) > 0:
for item in cves:
if item.get("cve"):
vulnerability_ids.append(item.get("cve"))
vulnerability_ids = [item.get("cve") for item in cves if item.get("cve")]
# take only the first one for now, limitation of DD model.
if len(cves[0].get("cwe", [])) > 0:
cwe = decode_cwe_number(cves[0].get("cwe", [])[0])
Expand Down
5 changes: 1 addition & 4 deletions dojo/tools/kiuwan/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,7 @@ def get_findings(self, filename, test):
reader = csv.DictReader(
io.StringIO(content), delimiter=",", quotechar='"',
)
csvarray = []

for row in reader:
csvarray.append(row)
csvarray = list(reader)

dupes = {}
for row in csvarray:
Expand Down
2 changes: 1 addition & 1 deletion dojo/tools/legitify/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def get_findings(self, file, test):
report_tree = self.parse_json(file)

findings = []
for content_key, content_value in report_tree.get("content", {}).items():
for content_value in report_tree.get("content", {}).values():
policy_info = content_value.get("policyInfo", {})
is_finding = False
endpoints = set()
Expand Down
2 changes: 1 addition & 1 deletion dojo/tools/npm_audit/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def parse_json(self, json_output):
def get_items(self, tree, test):
items = {}

for key, node in tree.items():
for node in tree.values():
item = get_item(node, test)
unique_key = str(node["id"]) + str(node["module_name"])
items[unique_key] = item
Expand Down
9 changes: 5 additions & 4 deletions dojo/tools/npm_audit_7_plus/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def get_items(self, tree, test):
"""Return the individual items found in report."""
items = {}

for key, node in tree.items():
for node in tree.values():
item = get_item(node, tree, test)
unique_key = item.title + item.severity
items[unique_key] = item
Expand Down Expand Up @@ -143,9 +143,10 @@ def get_item(item_node, tree, test):
and len(item_node["via"]) > 1):
# we have a multiple CWE vuln which we will capture in the
# vulnerability_ids and references
for vuln in item_node["via"][1:]: # have to decide if str or object
if isinstance(vuln, dict):
references.append(vuln["url"])
references.extend([vuln["url"]
for vuln in item_node["via"][1:] # have to decide if str or object
if isinstance(vuln, dict)
])

dojo_finding = Finding(
title=title,
Expand Down
8 changes: 2 additions & 6 deletions dojo/tools/nuclei/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,18 +30,14 @@ def get_findings(self, filename, test):
filecontent = filename.read()
if isinstance(filecontent, bytes):
filecontent = filecontent.decode("utf-8")
data = []
if filecontent == "" or len(filecontent) == 0:
return []
if filecontent[0] == "[":
content = json.loads(filecontent)
for template in content:
data.append(template)
data = content
elif filecontent[0] == "{":
file = filecontent.split("\n")
for line in file:
if line != "":
data.append(json.loads(line))
data = [json.loads(line) for line in file if line != ""]
dupes = {}
for item in data:
logger.debug("Item %s.", str(item))
Expand Down
15 changes: 4 additions & 11 deletions dojo/tools/openscap/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,8 @@ def get_findings(self, file, test):
}
# go to test result
test_result = tree.find(f"./{namespace}TestResult")
ips = []
# append all target in a list.
for ip in test_result.findall(f"./{namespace}target"):
ips.append(ip.text)
for ip in test_result.findall(f"./{namespace}target-address"):
ips.append(ip.text)
ips = [ip.text for ip in test_result.findall(f"./{namespace}target")] \
+ [ip.text for ip in test_result.findall(f"./{namespace}target-address")]

dupes = {}
# run both rule, and rule-result in parallel so that we can get title
Expand All @@ -65,11 +61,8 @@ def get_findings(self, file, test):
"**Title:** `" + title + "`",
],
)
vulnerability_ids = []
for vulnerability_id in rule_result.findall(
f"./{namespace}ident[@system='http://cve.mitre.org']",
):
vulnerability_ids.append(vulnerability_id.text)
vulnerability_ids = [vulnerability_id.text
for vulnerability_id in rule_result.findall(f"./{namespace}ident[@system='http://cve.mitre.org']")]
# get severity.
severity = (
rule_result.attrib.get("severity", "medium")
Expand Down
28 changes: 8 additions & 20 deletions dojo/tools/ort/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,11 +63,8 @@ def get_items(self, evaluatedModel, test):


def get_unresolved_rule_violations(rule_violations):
rule_violations_unresolved = []
for violation in rule_violations:
if is_rule_violation_unresolved(violation):
rule_violations_unresolved.append(violation)
return rule_violations_unresolved
return [violation for violation in rule_violations
if is_rule_violation_unresolved(violation)]


def is_rule_violation_unresolved(rule_violation):
Expand All @@ -88,11 +85,9 @@ def find_in_dependency_tree(tree, package_id):


def get_project_ids_for_package(dependency_trees, package_id):
project_ids = []
for project in dependency_trees:
if find_in_dependency_tree(project, package_id):
project_ids.append(project["pkg"])
return project_ids
return [project["pkg"]
for project in dependency_trees
if find_in_dependency_tree(project, package_id)]


def get_name_id_for_package(packages, package__id):
Expand All @@ -107,14 +102,9 @@ def get_name_id_for_package(packages, package__id):
def get_rule_violation_models(
rule_violations_unresolved, packages, licenses, dependency_trees,
):
models = []
for violation in rule_violations_unresolved:
models.append(
get_rule_violation_model(
return [get_rule_violation_model(
violation, packages, licenses, dependency_trees,
),
)
return models
) for violation in rule_violations_unresolved]


def get_rule_violation_model(
Expand All @@ -123,9 +113,7 @@ def get_rule_violation_model(
project_ids = get_project_ids_for_package(
dependency_trees, rule_violation_unresolved["pkg"],
)
project_names = []
for id in project_ids:
project_names.append(get_name_id_for_package(packages, id))
project_names = [get_name_id_for_package(packages, id) for id in project_ids]
package = find_package_by_id(packages, rule_violation_unresolved["pkg"])
if "license" in rule_violation_unresolved:
license_tmp = rule_violation_unresolved["license"]
Expand Down
5 changes: 1 addition & 4 deletions dojo/tools/solar_appscreener/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,7 @@ def get_findings(self, filename, test):
reader = csv.DictReader(
io.StringIO(content), delimiter=",", quotechar='"',
)
csvarray = []

for row in reader:
csvarray.append(row)
csvarray = list(reader)

items = []
for row in csvarray:
Expand Down
5 changes: 1 addition & 4 deletions dojo/tools/veracode_sca/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,10 +146,7 @@ def get_findings_csv(self, file, test):
reader = csv.DictReader(
io.StringIO(content), delimiter=",", quotechar='"',
)
csvarray = []

for row in reader:
csvarray.append(row)
csvarray = list(reader)

findings = []
for row in csvarray:
Expand Down
10 changes: 1 addition & 9 deletions dojo/tools/whitehat_sentinel/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,15 +177,7 @@ def _convert_attack_vectors_to_endpoints(
Returns: A list of Defect Dojo Endpoints
"""

endpoints_list = []

# This should be in the Endpoint class should it not?
for attack_vector in attack_vectors:
endpoints_list.append(
Endpoint.from_uri(attack_vector["request"]["url"]),
)

return endpoints_list
return [Endpoint.from_uri(attack_vector["request"]["url"]) for attack_vector in attack_vectors]

def _convert_whitehat_sentinel_vulns_to_dojo_finding(
self, whitehat_sentinel_vulns: [dict], test: str,
Expand Down
Loading

0 comments on commit d3b5aa0

Please sign in to comment.