Skip to content

Commit

Permalink
Ruff: add PERF
Browse files Browse the repository at this point in the history
  • Loading branch information
kiblik committed Aug 15, 2024
1 parent e9da652 commit 8806f11
Show file tree
Hide file tree
Showing 22 changed files with 53 additions and 117 deletions.
3 changes: 1 addition & 2 deletions docker/install_chrome_dependencies.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,6 @@ def ldd(file_path):
p.endswith(suffix) for suffix in ["-dbg", "-test", "tests", "-dev", "-mesa"]
)
]
for p in packages:
missing_packages.append(p)
missing_packages += packages

logger.info("missing_packages: " + (" ".join(missing_packages)))
8 changes: 3 additions & 5 deletions dojo/api_v2/serializers.py
Original file line number Diff line number Diff line change
Expand Up @@ -1059,9 +1059,7 @@ class EngagementToFilesSerializer(serializers.Serializer):
def to_representation(self, data):
engagement = data.get("engagement_id")
files = data.get("files")
new_files = []
for file in files:
new_files.append(
new_files = [
{
"id": file.id,
"file": "{site_url}/{file_access_url}".format(
Expand All @@ -1071,8 +1069,8 @@ def to_representation(self, data):
),
),
"title": file.title,
},
)
}
for file in files]
new_data = {"engagement_id": engagement.id, "files": new_files}
return new_data

Expand Down
12 changes: 4 additions & 8 deletions dojo/metrics/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -601,10 +601,8 @@ def view_engineer(request, eid):
tzinfo=timezone.get_current_timezone())],
owner=user)
for finding in ra.accepted_findings.all()]
closed_month = []
for f in closed_findings:
if f.mitigated and f.mitigated.year == now.year and f.mitigated.month == now.month:
closed_month.append(f)
closed_month = [f for f in closed_findings
if f.mitigated and f.mitigated.year == now.year and f.mitigated.month == now.month]

o_dict, open_count = count_findings(open_month)
c_dict, closed_count = count_findings(closed_month)
Expand All @@ -618,7 +616,6 @@ def view_engineer(request, eid):
day_list.append(now)

q_objects = (Q(date=d) for d in day_list)
closed_week = []
open_week = findings.filter(reduce(operator.or_, q_objects))

accepted_week = [finding for ra in Risk_Acceptance.objects.filter(
Expand All @@ -627,9 +624,8 @@ def view_engineer(request, eid):

q_objects = (Q(mitigated=d) for d in day_list)
# closed_week= findings.filter(reduce(operator.or_, q_objects))
for f in closed_findings:
if f.mitigated and f.mitigated >= day_list[0]:
closed_week.append(f)
closed_week = [f for f in closed_findings
if f.mitigated and f.mitigated >= day_list[0]]

o_week_dict, open_week_count = count_findings(open_week)
c_week_dict, closed_week_count = count_findings(closed_week)
Expand Down
14 changes: 3 additions & 11 deletions dojo/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -1243,9 +1243,7 @@ def get_product_type(self):
def open_findings_list(self):
findings = Finding.objects.filter(test__engagement__product=self,
active=True)
findings_list = []
for i in findings:
findings_list.append(i.id)
findings_list = [i.id for i in findings]
return findings_list

@property
Expand Down Expand Up @@ -3318,10 +3316,7 @@ def get_references_with_links(self):
def vulnerability_ids(self):
# Get vulnerability ids from database and convert to list of strings
vulnerability_ids_model = self.vulnerability_id_set.all()
vulnerability_ids = []
for vulnerability_id in vulnerability_ids_model:
vulnerability_ids.append(vulnerability_id.vulnerability_id)

vulnerability_ids = [vulnerability_id.vulnerability_id for vulnerability_id in vulnerability_ids_model]
# Synchronize the cve field with the unsaved_vulnerability_ids
# We do this to be as flexible as possible to handle the fields until
# the cve field is not needed anymore and can be removed.
Expand Down Expand Up @@ -3517,10 +3512,7 @@ def get_breadcrumbs(self):
def vulnerability_ids(self):
# Get vulnerability ids from database and convert to list of strings
vulnerability_ids_model = self.vulnerability_id_template_set.all()
vulnerability_ids = []
for vulnerability_id in vulnerability_ids_model:
vulnerability_ids.append(vulnerability_id.vulnerability_id)

vulnerability_ids = [vulnerability_id.vulnerability_id for vulnerability_id in vulnerability_ids_model]
# Synchronize the cve field with the unsaved_vulnerability_ids
# We do this to be as flexible as possible to handle the fields until
# the cve field is not needed anymore and can be removed.
Expand Down
4 changes: 1 addition & 3 deletions dojo/notes/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,9 +184,7 @@ def find_available_notetypes(finding, editing_note):
notes = finding.notes.all()
single_note_types = Note_Type.objects.filter(is_single=True, is_active=True).values_list("id", flat=True)
multiple_note_types = Note_Type.objects.filter(is_single=False, is_active=True).values_list("id", flat=True)
available_note_types = []
for note_type_id in multiple_note_types:
available_note_types.append(note_type_id)
available_note_types = list(multiple_note_types)
for note_type_id in single_note_types:
for note in notes:
if note_type_id == note.note_type_id:
Expand Down
5 changes: 1 addition & 4 deletions dojo/templatetags/display_tags.py
Original file line number Diff line number Diff line change
Expand Up @@ -787,10 +787,7 @@ def first_vulnerability_id(finding):
def additional_vulnerability_ids(finding):
vulnerability_ids = finding.vulnerability_ids
if vulnerability_ids and len(vulnerability_ids) > 1:
references = []
for vulnerability_id in vulnerability_ids[1:]:
references.append(vulnerability_id)
return references
return vulnerability_ids[1:]
else:
return None

Expand Down
7 changes: 6 additions & 1 deletion dojo/tools/cobalt/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,12 @@ def get_findings(self, filename, test):
io.StringIO(content), delimiter=",", quotechar='"',
)
dupes = {}
for row in reader:

# FIXME double loop, could lead to performance pb if the number of
# issues is big
csvarray = list(reader)

for row in csvarray:
finding = Finding(test=test)
finding.title = (
row["Title"] if row["Title"][0] != "'" else row["Title"][1:]
Expand Down
4 changes: 1 addition & 3 deletions dojo/tools/drheader/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,4 @@ def get_findings(self, filename, test):
items.append(self.return_finding(test=test, finding=finding, url=url))
return items
else:
for finding in data:
items.append(self.return_finding(test=test, finding=finding))
return items
return [self.return_finding(test=test, finding=finding) for finding in data]
4 changes: 1 addition & 3 deletions dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,9 +156,7 @@ def get_item_set(vulnerability):
# Some entries have no CVE entries, despite they exist. Example CVE-2017-1000502.
cves = get_cve(vulnerability)
if len(cves) > 0:
for item in cves:
if item.get("cve"):
vulnerability_ids.append(item.get("cve"))
vulnerability_ids = [item.get("cve") for item in cves if item.get("cve")]
if "cvss_v3_vector" in cves[0]:
cvss_v3 = cves[0]["cvss_v3_vector"]
cvssv3 = CVSS3(cvss_v3).clean_vector()
Expand Down
4 changes: 1 addition & 3 deletions dojo/tools/jfrogxray/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,9 +83,7 @@ def get_item(vulnerability, test):
# CVE-2017-1000502.
cves = vulnerability["component_versions"]["more_details"].get("cves", [])
if len(cves) > 0:
for item in cves:
if item.get("cve"):
vulnerability_ids.append(item.get("cve"))
vulnerability_ids = [item.get("cve") for item in cves if item.get("cve")]
# take only the first one for now, limitation of DD model.
if len(cves[0].get("cwe", [])) > 0:
cwe = decode_cwe_number(cves[0].get("cwe", [])[0])
Expand Down
5 changes: 1 addition & 4 deletions dojo/tools/kiuwan/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,7 @@ def get_findings(self, filename, test):
reader = csv.DictReader(
io.StringIO(content), delimiter=",", quotechar='"',
)
csvarray = []

for row in reader:
csvarray.append(row)
csvarray = list(reader)

dupes = {}
for row in csvarray:
Expand Down
2 changes: 1 addition & 1 deletion dojo/tools/npm_audit/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def parse_json(self, json_output):
def get_items(self, tree, test):
items = {}

for key, node in tree.items():
for node in tree.values():
item = get_item(node, test)
unique_key = str(node["id"]) + str(node["module_name"])
items[unique_key] = item
Expand Down
9 changes: 5 additions & 4 deletions dojo/tools/npm_audit_7_plus/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def get_items(self, tree, test):
"""Return the individual items found in report."""
items = {}

for key, node in tree.items():
for node in tree.values():
item = get_item(node, tree, test)
unique_key = item.title + item.severity
items[unique_key] = item
Expand Down Expand Up @@ -140,9 +140,10 @@ def get_item(item_node, tree, test):
and len(item_node["via"]) > 1):
# we have a multiple CWE vuln which we will capture in the
# vulnerability_ids and references
for vuln in item_node["via"][1:]: # have to decide if str or object
if isinstance(vuln, dict):
references.append(vuln["url"])
references.extend([vuln["url"]
for vuln in item_node["via"][1:] # have to decide if str or object
if isinstance(vuln, dict)
])

if len(cwe):
cwe = int(cwe.split("-")[1])
Expand Down
8 changes: 2 additions & 6 deletions dojo/tools/nuclei/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,18 +30,14 @@ def get_findings(self, filename, test):
filecontent = filename.read()
if isinstance(filecontent, bytes):
filecontent = filecontent.decode("utf-8")
data = []
if filecontent == "" or len(filecontent) == 0:
return []
elif filecontent[0] == "[":
content = json.loads(filecontent)
for template in content:
data.append(template)
data = content
elif filecontent[0] == "{":
file = filecontent.split("\n")
for line in file:
if line != "":
data.append(json.loads(line))
data = [json.loads(line) for line in file if line != ""]
dupes = {}
for item in data:
logger.debug("Item %s.", str(item))
Expand Down
15 changes: 4 additions & 11 deletions dojo/tools/openscap/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,8 @@ def get_findings(self, file, test):
}
# go to test result
test_result = tree.find(f"./{namespace}TestResult")
ips = []
# append all target in a list.
for ip in test_result.findall(f"./{namespace}target"):
ips.append(ip.text)
for ip in test_result.findall(f"./{namespace}target-address"):
ips.append(ip.text)
ips = [ip.text for ip in test_result.findall(f"./{namespace}target")] \
+ [ip.text for ip in test_result.findall(f"./{namespace}target-address")]

dupes = {}
# run both rule, and rule-result in parallel so that we can get title
Expand All @@ -65,11 +61,8 @@ def get_findings(self, file, test):
"**Title:** `" + title + "`",
],
)
vulnerability_ids = []
for vulnerability_id in rule_result.findall(
f"./{namespace}ident[@system='http://cve.mitre.org']",
):
vulnerability_ids.append(vulnerability_id.text)
vulnerability_ids = [vulnerability_id.text
for vulnerability_id in rule_result.findall(f"./{namespace}ident[@system='http://cve.mitre.org']")]
# get severity.
severity = (
rule_result.attrib.get("severity", "medium")
Expand Down
26 changes: 9 additions & 17 deletions dojo/tools/ort/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,10 +64,8 @@ def get_items(self, evaluatedModel, test):


def get_unresolved_rule_violations(rule_violations):
rule_violations_unresolved = []
for violation in rule_violations:
if is_rule_violation_unresolved(violation):
rule_violations_unresolved.append(violation)
rule_violations_unresolved = [violation for violation in rule_violations
if is_rule_violation_unresolved(violation)]
return rule_violations_unresolved


Expand All @@ -92,10 +90,10 @@ def find_in_dependency_tree(tree, package_id):


def get_project_ids_for_package(dependency_trees, package_id):
project_ids = []
for project in dependency_trees:
if find_in_dependency_tree(project, package_id):
project_ids.append(project["pkg"])
project_ids = [project["pkg"]
for project in dependency_trees
if find_in_dependency_tree(project, package_id)]

return project_ids


Expand All @@ -111,13 +109,9 @@ def get_name_id_for_package(packages, package__id):
def get_rule_violation_models(
rule_violations_unresolved, packages, licenses, dependency_trees,
):
models = []
for violation in rule_violations_unresolved:
models.append(
get_rule_violation_model(
models = [get_rule_violation_model(
violation, packages, licenses, dependency_trees,
),
)
) for violation in rule_violations_unresolved]
return models


Expand All @@ -127,9 +121,7 @@ def get_rule_violation_model(
project_ids = get_project_ids_for_package(
dependency_trees, rule_violation_unresolved["pkg"],
)
project_names = []
for id in project_ids:
project_names.append(get_name_id_for_package(packages, id))
project_names = [get_name_id_for_package(packages, id) for id in project_ids]
package = find_package_by_id(packages, rule_violation_unresolved["pkg"])
if "license" in rule_violation_unresolved:
license_tmp = rule_violation_unresolved["license"]
Expand Down
5 changes: 1 addition & 4 deletions dojo/tools/solar_appscreener/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,7 @@ def get_findings(self, filename, test):
reader = csv.DictReader(
io.StringIO(content), delimiter=",", quotechar='"',
)
csvarray = []

for row in reader:
csvarray.append(row)
csvarray = list(reader)

items = []
for row in csvarray:
Expand Down
5 changes: 1 addition & 4 deletions dojo/tools/veracode_sca/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,10 +146,7 @@ def get_findings_csv(self, file, test):
reader = csv.DictReader(
io.StringIO(content), delimiter=",", quotechar='"',
)
csvarray = []

for row in reader:
csvarray.append(row)
csvarray = list(reader)

findings = []
for row in csvarray:
Expand Down
9 changes: 1 addition & 8 deletions dojo/tools/whitehat_sentinel/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,14 +177,7 @@ def _convert_attack_vectors_to_endpoints(
Returns: A list of Defect Dojo Endpoints
"""

endpoints_list = []

# This should be in the Endpoint class should it not?
for attack_vector in attack_vectors:
endpoints_list.append(
Endpoint.from_uri(attack_vector["request"]["url"]),
)

endpoints_list = [Endpoint.from_uri(attack_vector["request"]["url"]) for attack_vector in attack_vectors]
return endpoints_list

def _convert_whitehat_sentinel_vulns_to_dojo_finding(
Expand Down
15 changes: 4 additions & 11 deletions dojo/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,9 +225,7 @@ def is_deduplication_on_engagement_mismatch(new_finding, to_duplicate_finding):


def get_endpoints_as_url(finding):
list1 = []
for e in finding.endpoints.all():
list1.append(hyperlink.parse(str(e)))
list1 = [hyperlink.parse(str(e)) for e in finding.endpoints.all()]
return list1


Expand Down Expand Up @@ -885,9 +883,7 @@ def get_punchcard_data(objs, start_date, weeks, view="Finding"):


def get_week_data(week_start_date, tick, day_counts):
data = []
for i in range(len(day_counts)):
data.append([tick, i, day_counts[i]])
data = [[tick, i, day_counts[i]] for i in range(len(day_counts))]
label = [tick, week_start_date.strftime("<span class='small'>%m/%d<br/>%Y</span>")]
return data, label

Expand Down Expand Up @@ -2144,7 +2140,7 @@ def add_error_message_to_response(message):

def add_field_errors_to_response(form):
if form and get_current_request():
for field, error in form.errors.items():
for error in form.errors.values():
add_error_message_to_response(error)


Expand Down Expand Up @@ -2267,10 +2263,7 @@ def get_file_images(obj, return_objects=False):

def get_enabled_notifications_list():
# Alerts need to enabled by default
enabled = ["alert"]
for choice in NOTIFICATION_CHOICES:
if get_system_setting(f"enable_{choice[0]}_notifications"):
enabled.append(choice[0])
enabled = ["alert"] + [choice[0] for choice in NOTIFICATION_CHOICES if get_system_setting(f"enable_{choice[0]}_notifications")]
return enabled


Expand Down
Loading

0 comments on commit 8806f11

Please sign in to comment.