diff --git a/docker/install_chrome_dependencies.py b/docker/install_chrome_dependencies.py index 1b8f29585ea..b156c6c4516 100644 --- a/docker/install_chrome_dependencies.py +++ b/docker/install_chrome_dependencies.py @@ -57,7 +57,6 @@ def ldd(file_path): p.endswith(suffix) for suffix in ["-dbg", "-test", "tests", "-dev", "-mesa"] ) ] - for p in packages: - missing_packages.append(p) + missing_packages += packages logger.info("missing_packages: " + (" ".join(missing_packages))) diff --git a/dojo/api_v2/serializers.py b/dojo/api_v2/serializers.py index 5bed7935f94..b40dd68c5da 100644 --- a/dojo/api_v2/serializers.py +++ b/dojo/api_v2/serializers.py @@ -1059,9 +1059,7 @@ class EngagementToFilesSerializer(serializers.Serializer): def to_representation(self, data): engagement = data.get("engagement_id") files = data.get("files") - new_files = [] - for file in files: - new_files.append( + new_files = [ { "id": file.id, "file": "{site_url}/{file_access_url}".format( @@ -1071,8 +1069,8 @@ def to_representation(self, data): ), ), "title": file.title, - }, - ) + } + for file in files] new_data = {"engagement_id": engagement.id, "files": new_files} return new_data diff --git a/dojo/metrics/views.py b/dojo/metrics/views.py index 631130b58da..ccca2264d74 100644 --- a/dojo/metrics/views.py +++ b/dojo/metrics/views.py @@ -601,10 +601,8 @@ def view_engineer(request, eid): tzinfo=timezone.get_current_timezone())], owner=user) for finding in ra.accepted_findings.all()] - closed_month = [] - for f in closed_findings: - if f.mitigated and f.mitigated.year == now.year and f.mitigated.month == now.month: - closed_month.append(f) + closed_month = [f for f in closed_findings + if f.mitigated and f.mitigated.year == now.year and f.mitigated.month == now.month] o_dict, open_count = count_findings(open_month) c_dict, closed_count = count_findings(closed_month) @@ -618,7 +616,6 @@ def view_engineer(request, eid): day_list.append(now) q_objects = (Q(date=d) for d in day_list) - closed_week = [] open_week = findings.filter(reduce(operator.or_, q_objects)) accepted_week = [finding for ra in Risk_Acceptance.objects.filter( @@ -627,9 +624,8 @@ def view_engineer(request, eid): q_objects = (Q(mitigated=d) for d in day_list) # closed_week= findings.filter(reduce(operator.or_, q_objects)) - for f in closed_findings: - if f.mitigated and f.mitigated >= day_list[0]: - closed_week.append(f) + closed_week = [f for f in closed_findings + if f.mitigated and f.mitigated >= day_list[0]] o_week_dict, open_week_count = count_findings(open_week) c_week_dict, closed_week_count = count_findings(closed_week) diff --git a/dojo/models.py b/dojo/models.py index 299abf4a88d..1799e6c7c3a 100644 --- a/dojo/models.py +++ b/dojo/models.py @@ -1243,9 +1243,7 @@ def get_product_type(self): def open_findings_list(self): findings = Finding.objects.filter(test__engagement__product=self, active=True) - findings_list = [] - for i in findings: - findings_list.append(i.id) + findings_list = [i.id for i in findings] return findings_list @property @@ -3318,10 +3316,7 @@ def get_references_with_links(self): def vulnerability_ids(self): # Get vulnerability ids from database and convert to list of strings vulnerability_ids_model = self.vulnerability_id_set.all() - vulnerability_ids = [] - for vulnerability_id in vulnerability_ids_model: - vulnerability_ids.append(vulnerability_id.vulnerability_id) - + vulnerability_ids = [vulnerability_id.vulnerability_id for vulnerability_id in vulnerability_ids_model] # Synchronize the cve field with the unsaved_vulnerability_ids # We do this to be as flexible as possible to handle the fields until # the cve field is not needed anymore and can be removed. @@ -3517,10 +3512,7 @@ def get_breadcrumbs(self): def vulnerability_ids(self): # Get vulnerability ids from database and convert to list of strings vulnerability_ids_model = self.vulnerability_id_template_set.all() - vulnerability_ids = [] - for vulnerability_id in vulnerability_ids_model: - vulnerability_ids.append(vulnerability_id.vulnerability_id) - + vulnerability_ids = [vulnerability_id.vulnerability_id for vulnerability_id in vulnerability_ids_model] # Synchronize the cve field with the unsaved_vulnerability_ids # We do this to be as flexible as possible to handle the fields until # the cve field is not needed anymore and can be removed. diff --git a/dojo/notes/views.py b/dojo/notes/views.py index a5947971b8a..3e9b86e25fe 100644 --- a/dojo/notes/views.py +++ b/dojo/notes/views.py @@ -184,9 +184,7 @@ def find_available_notetypes(finding, editing_note): notes = finding.notes.all() single_note_types = Note_Type.objects.filter(is_single=True, is_active=True).values_list("id", flat=True) multiple_note_types = Note_Type.objects.filter(is_single=False, is_active=True).values_list("id", flat=True) - available_note_types = [] - for note_type_id in multiple_note_types: - available_note_types.append(note_type_id) + available_note_types = list(multiple_note_types) for note_type_id in single_note_types: for note in notes: if note_type_id == note.note_type_id: diff --git a/dojo/templatetags/display_tags.py b/dojo/templatetags/display_tags.py index c755249fad0..b4d0cbd5f5a 100644 --- a/dojo/templatetags/display_tags.py +++ b/dojo/templatetags/display_tags.py @@ -787,10 +787,7 @@ def first_vulnerability_id(finding): def additional_vulnerability_ids(finding): vulnerability_ids = finding.vulnerability_ids if vulnerability_ids and len(vulnerability_ids) > 1: - references = [] - for vulnerability_id in vulnerability_ids[1:]: - references.append(vulnerability_id) - return references + return vulnerability_ids[1:] else: return None diff --git a/dojo/tools/cobalt/parser.py b/dojo/tools/cobalt/parser.py index 4ac5c43b731..a603afcfd94 100644 --- a/dojo/tools/cobalt/parser.py +++ b/dojo/tools/cobalt/parser.py @@ -28,7 +28,12 @@ def get_findings(self, filename, test): io.StringIO(content), delimiter=",", quotechar='"', ) dupes = {} - for row in reader: + + # FIXME double loop, could lead to performance pb if the number of + # issues is big + csvarray = list(reader) + + for row in csvarray: finding = Finding(test=test) finding.title = ( row["Title"] if row["Title"][0] != "'" else row["Title"][1:] diff --git a/dojo/tools/drheader/parser.py b/dojo/tools/drheader/parser.py index 158da541bd3..e8e46318723 100644 --- a/dojo/tools/drheader/parser.py +++ b/dojo/tools/drheader/parser.py @@ -51,6 +51,4 @@ def get_findings(self, filename, test): items.append(self.return_finding(test=test, finding=finding, url=url)) return items else: - for finding in data: - items.append(self.return_finding(test=test, finding=finding)) - return items + return [self.return_finding(test=test, finding=finding) for finding in data] diff --git a/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py b/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py index 053df04aa0e..5bead3f5886 100644 --- a/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py +++ b/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py @@ -156,9 +156,7 @@ def get_item_set(vulnerability): # Some entries have no CVE entries, despite they exist. Example CVE-2017-1000502. cves = get_cve(vulnerability) if len(cves) > 0: - for item in cves: - if item.get("cve"): - vulnerability_ids.append(item.get("cve")) + vulnerability_ids = [item.get("cve") for item in cves if item.get("cve")] if "cvss_v3_vector" in cves[0]: cvss_v3 = cves[0]["cvss_v3_vector"] cvssv3 = CVSS3(cvss_v3).clean_vector() diff --git a/dojo/tools/jfrogxray/parser.py b/dojo/tools/jfrogxray/parser.py index a1351dc0777..25424e371fb 100644 --- a/dojo/tools/jfrogxray/parser.py +++ b/dojo/tools/jfrogxray/parser.py @@ -83,9 +83,7 @@ def get_item(vulnerability, test): # CVE-2017-1000502. cves = vulnerability["component_versions"]["more_details"].get("cves", []) if len(cves) > 0: - for item in cves: - if item.get("cve"): - vulnerability_ids.append(item.get("cve")) + vulnerability_ids = [item.get("cve") for item in cves if item.get("cve")] # take only the first one for now, limitation of DD model. if len(cves[0].get("cwe", [])) > 0: cwe = decode_cwe_number(cves[0].get("cwe", [])[0]) diff --git a/dojo/tools/kiuwan/parser.py b/dojo/tools/kiuwan/parser.py index 5d91e5a315e..464511b1310 100644 --- a/dojo/tools/kiuwan/parser.py +++ b/dojo/tools/kiuwan/parser.py @@ -42,10 +42,7 @@ def get_findings(self, filename, test): reader = csv.DictReader( io.StringIO(content), delimiter=",", quotechar='"', ) - csvarray = [] - - for row in reader: - csvarray.append(row) + csvarray = list(reader) dupes = {} for row in csvarray: diff --git a/dojo/tools/npm_audit/parser.py b/dojo/tools/npm_audit/parser.py index fc07e281007..254fb69b193 100644 --- a/dojo/tools/npm_audit/parser.py +++ b/dojo/tools/npm_audit/parser.py @@ -53,7 +53,7 @@ def parse_json(self, json_output): def get_items(self, tree, test): items = {} - for key, node in tree.items(): + for node in tree.values(): item = get_item(node, test) unique_key = str(node["id"]) + str(node["module_name"]) items[unique_key] = item diff --git a/dojo/tools/npm_audit_7_plus/parser.py b/dojo/tools/npm_audit_7_plus/parser.py index 77d3b77c0fd..19388f7a462 100644 --- a/dojo/tools/npm_audit_7_plus/parser.py +++ b/dojo/tools/npm_audit_7_plus/parser.py @@ -82,7 +82,7 @@ def get_items(self, tree, test): """Return the individual items found in report.""" items = {} - for key, node in tree.items(): + for node in tree.values(): item = get_item(node, tree, test) unique_key = item.title + item.severity items[unique_key] = item @@ -140,9 +140,10 @@ def get_item(item_node, tree, test): and len(item_node["via"]) > 1): # we have a multiple CWE vuln which we will capture in the # vulnerability_ids and references - for vuln in item_node["via"][1:]: # have to decide if str or object - if isinstance(vuln, dict): - references.append(vuln["url"]) + references.extend([vuln["url"] + for vuln in item_node["via"][1:] # have to decide if str or object + if isinstance(vuln, dict) + ]) if len(cwe): cwe = int(cwe.split("-")[1]) diff --git a/dojo/tools/nuclei/parser.py b/dojo/tools/nuclei/parser.py index dc79eacaf65..0c98e07ad25 100644 --- a/dojo/tools/nuclei/parser.py +++ b/dojo/tools/nuclei/parser.py @@ -30,18 +30,14 @@ def get_findings(self, filename, test): filecontent = filename.read() if isinstance(filecontent, bytes): filecontent = filecontent.decode("utf-8") - data = [] if filecontent == "" or len(filecontent) == 0: return [] elif filecontent[0] == "[": content = json.loads(filecontent) - for template in content: - data.append(template) + data = content elif filecontent[0] == "{": file = filecontent.split("\n") - for line in file: - if line != "": - data.append(json.loads(line)) + data = [json.loads(line) for line in file if line != ""] dupes = {} for item in data: logger.debug("Item %s.", str(item)) diff --git a/dojo/tools/openscap/parser.py b/dojo/tools/openscap/parser.py index 186243526b8..c7054875b09 100644 --- a/dojo/tools/openscap/parser.py +++ b/dojo/tools/openscap/parser.py @@ -40,12 +40,8 @@ def get_findings(self, file, test): } # go to test result test_result = tree.find(f"./{namespace}TestResult") - ips = [] - # append all target in a list. - for ip in test_result.findall(f"./{namespace}target"): - ips.append(ip.text) - for ip in test_result.findall(f"./{namespace}target-address"): - ips.append(ip.text) + ips = [ip.text for ip in test_result.findall(f"./{namespace}target")] \ + + [ip.text for ip in test_result.findall(f"./{namespace}target-address")] dupes = {} # run both rule, and rule-result in parallel so that we can get title @@ -65,11 +61,8 @@ def get_findings(self, file, test): "**Title:** `" + title + "`", ], ) - vulnerability_ids = [] - for vulnerability_id in rule_result.findall( - f"./{namespace}ident[@system='http://cve.mitre.org']", - ): - vulnerability_ids.append(vulnerability_id.text) + vulnerability_ids = [vulnerability_id.text + for vulnerability_id in rule_result.findall(f"./{namespace}ident[@system='http://cve.mitre.org']")] # get severity. severity = ( rule_result.attrib.get("severity", "medium") diff --git a/dojo/tools/ort/parser.py b/dojo/tools/ort/parser.py index f314365ed88..0748eb70979 100644 --- a/dojo/tools/ort/parser.py +++ b/dojo/tools/ort/parser.py @@ -64,10 +64,8 @@ def get_items(self, evaluatedModel, test): def get_unresolved_rule_violations(rule_violations): - rule_violations_unresolved = [] - for violation in rule_violations: - if is_rule_violation_unresolved(violation): - rule_violations_unresolved.append(violation) + rule_violations_unresolved = [violation for violation in rule_violations + if is_rule_violation_unresolved(violation)] return rule_violations_unresolved @@ -92,10 +90,10 @@ def find_in_dependency_tree(tree, package_id): def get_project_ids_for_package(dependency_trees, package_id): - project_ids = [] - for project in dependency_trees: - if find_in_dependency_tree(project, package_id): - project_ids.append(project["pkg"]) + project_ids = [project["pkg"] + for project in dependency_trees + if find_in_dependency_tree(project, package_id)] + return project_ids @@ -111,13 +109,9 @@ def get_name_id_for_package(packages, package__id): def get_rule_violation_models( rule_violations_unresolved, packages, licenses, dependency_trees, ): - models = [] - for violation in rule_violations_unresolved: - models.append( - get_rule_violation_model( + models = [get_rule_violation_model( violation, packages, licenses, dependency_trees, - ), - ) + ) for violation in rule_violations_unresolved] return models @@ -127,9 +121,7 @@ def get_rule_violation_model( project_ids = get_project_ids_for_package( dependency_trees, rule_violation_unresolved["pkg"], ) - project_names = [] - for id in project_ids: - project_names.append(get_name_id_for_package(packages, id)) + project_names = [get_name_id_for_package(packages, id) for id in project_ids] package = find_package_by_id(packages, rule_violation_unresolved["pkg"]) if "license" in rule_violation_unresolved: license_tmp = rule_violation_unresolved["license"] diff --git a/dojo/tools/solar_appscreener/parser.py b/dojo/tools/solar_appscreener/parser.py index fc6110ebcd0..463f5d4f8da 100644 --- a/dojo/tools/solar_appscreener/parser.py +++ b/dojo/tools/solar_appscreener/parser.py @@ -28,10 +28,7 @@ def get_findings(self, filename, test): reader = csv.DictReader( io.StringIO(content), delimiter=",", quotechar='"', ) - csvarray = [] - - for row in reader: - csvarray.append(row) + csvarray = list(reader) items = [] for row in csvarray: diff --git a/dojo/tools/veracode_sca/parser.py b/dojo/tools/veracode_sca/parser.py index 15de6393300..5ba429637fc 100644 --- a/dojo/tools/veracode_sca/parser.py +++ b/dojo/tools/veracode_sca/parser.py @@ -146,10 +146,7 @@ def get_findings_csv(self, file, test): reader = csv.DictReader( io.StringIO(content), delimiter=",", quotechar='"', ) - csvarray = [] - - for row in reader: - csvarray.append(row) + csvarray = list(reader) findings = [] for row in csvarray: diff --git a/dojo/tools/whitehat_sentinel/parser.py b/dojo/tools/whitehat_sentinel/parser.py index eeb97ee8f5e..a0ff5068d83 100644 --- a/dojo/tools/whitehat_sentinel/parser.py +++ b/dojo/tools/whitehat_sentinel/parser.py @@ -177,14 +177,7 @@ def _convert_attack_vectors_to_endpoints( Returns: A list of Defect Dojo Endpoints """ - endpoints_list = [] - - # This should be in the Endpoint class should it not? - for attack_vector in attack_vectors: - endpoints_list.append( - Endpoint.from_uri(attack_vector["request"]["url"]), - ) - + endpoints_list = [Endpoint.from_uri(attack_vector["request"]["url"]) for attack_vector in attack_vectors] return endpoints_list def _convert_whitehat_sentinel_vulns_to_dojo_finding( diff --git a/dojo/utils.py b/dojo/utils.py index 7d4d59907f5..23c1823e60b 100644 --- a/dojo/utils.py +++ b/dojo/utils.py @@ -225,9 +225,7 @@ def is_deduplication_on_engagement_mismatch(new_finding, to_duplicate_finding): def get_endpoints_as_url(finding): - list1 = [] - for e in finding.endpoints.all(): - list1.append(hyperlink.parse(str(e))) + list1 = [hyperlink.parse(str(e)) for e in finding.endpoints.all()] return list1 @@ -885,9 +883,7 @@ def get_punchcard_data(objs, start_date, weeks, view="Finding"): def get_week_data(week_start_date, tick, day_counts): - data = [] - for i in range(len(day_counts)): - data.append([tick, i, day_counts[i]]) + data = [[tick, i, day_counts[i]] for i in range(len(day_counts))] label = [tick, week_start_date.strftime("%m/%d
%Y
")] return data, label @@ -2144,7 +2140,7 @@ def add_error_message_to_response(message): def add_field_errors_to_response(form): if form and get_current_request(): - for field, error in form.errors.items(): + for error in form.errors.values(): add_error_message_to_response(error) @@ -2267,10 +2263,7 @@ def get_file_images(obj, return_objects=False): def get_enabled_notifications_list(): # Alerts need to enabled by default - enabled = ["alert"] - for choice in NOTIFICATION_CHOICES: - if get_system_setting(f"enable_{choice[0]}_notifications"): - enabled.append(choice[0]) + enabled = ["alert"] + [choice[0] for choice in NOTIFICATION_CHOICES if get_system_setting(f"enable_{choice[0]}_notifications")] return enabled diff --git a/ruff.toml b/ruff.toml index ade1fc7c345..36480b36462 100644 --- a/ruff.toml +++ b/ruff.toml @@ -73,6 +73,7 @@ select = [ "NPY", "FAST", "AIR", + "PERF", "FURB", "RUF1","RUF2", "RUF001","RUF002", "RUF003", "RUF005", diff --git a/unittests/tools/test_aqua_parser.py b/unittests/tools/test_aqua_parser.py index 9b2279cfa75..e610c787764 100644 --- a/unittests/tools/test_aqua_parser.py +++ b/unittests/tools/test_aqua_parser.py @@ -80,10 +80,7 @@ def test_aqua_parser_for_aqua_severity(self): with open("unittests/scans/aqua/vulns_with_aqua_severity.json") as testfile: parser = AquaParser() findings = parser.get_findings(testfile, Test()) - sevs = [] - - for finding in findings: - sevs.append(finding.severity) + sevs = [finding.severity for finding in findings] d = Counter(sevs) self.assertEqual(1, d["Critical"])