diff --git a/dojo/endpoint/views.py b/dojo/endpoint/views.py index 571f4989ec2..d3483b4636d 100644 --- a/dojo/endpoint/views.py +++ b/dojo/endpoint/views.py @@ -327,12 +327,12 @@ def edit_meta_data(request, eid): endpoint = Endpoint.objects.get(id=eid) if request.method == "POST": - for key, value in request.POST.items(): + for key, orig_value in request.POST.items(): if key.startswith("cfv_"): cfv_id = int(key.split("_")[1]) cfv = get_object_or_404(DojoMeta, id=cfv_id) - value = value.strip() + value = orig_value.strip() if value: cfv.value = value cfv.save() diff --git a/dojo/forms.py b/dojo/forms.py index acf3546285b..cf9132d0b15 100644 --- a/dojo/forms.py +++ b/dojo/forms.py @@ -2386,8 +2386,10 @@ def get_jira_issue_template_dir_choices(): for dirname in dirnames: if base_dir.startswith(settings.TEMPLATE_DIR_PREFIX): - base_dir = base_dir[len(settings.TEMPLATE_DIR_PREFIX):] - template_dir_list.append((os.path.join(base_dir, dirname), dirname)) + clean_base_dir = base_dir[len(settings.TEMPLATE_DIR_PREFIX):] + else: + clean_base_dir = base_dir + template_dir_list.append((os.path.join(clean_base_dir, dirname), dirname)) logger.debug("templates: %s", template_dir_list) return template_dir_list diff --git a/dojo/importers/default_importer.py b/dojo/importers/default_importer.py index 41e91bc12de..b7d88fda0c2 100644 --- a/dojo/importers/default_importer.py +++ b/dojo/importers/default_importer.py @@ -155,9 +155,9 @@ def process_findings( logger.debug("starting import of %i parsed findings.", len(parsed_findings) if parsed_findings else 0) group_names_to_findings_dict = {} - for unsaved_finding in parsed_findings: + for non_clean_unsaved_finding in parsed_findings: # make sure the severity is something is digestible - unsaved_finding = self.sanitize_severity(unsaved_finding) + unsaved_finding = self.sanitize_severity(non_clean_unsaved_finding) # Filter on minimum severity if applicable if Finding.SEVERITIES[unsaved_finding.severity] > Finding.SEVERITIES[self.minimum_severity]: # finding's severity is below the configured threshold : ignoring the finding diff --git a/dojo/importers/default_reimporter.py b/dojo/importers/default_reimporter.py index 290e13f6ac5..09552383c14 100644 --- a/dojo/importers/default_reimporter.py +++ b/dojo/importers/default_reimporter.py @@ -178,9 +178,9 @@ def process_findings( logger.debug("STEP 1: looping over findings from the reimported report and trying to match them to existing findings") deduplicationLogger.debug(f"Algorithm used for matching new findings to existing findings: {self.deduplication_algorithm}") - for unsaved_finding in parsed_findings: + for non_clean_unsaved_finding in parsed_findings: # make sure the severity is something is digestible - unsaved_finding = self.sanitize_severity(unsaved_finding) + unsaved_finding = self.sanitize_severity(non_clean_unsaved_finding) # Filter on minimum severity if applicable if Finding.SEVERITIES[unsaved_finding.severity] > Finding.SEVERITIES[self.minimum_severity]: # finding's severity is below the configured threshold : ignoring the finding diff --git a/dojo/product/views.py b/dojo/product/views.py index e887938d450..0ef53640ce8 100644 --- a/dojo/product/views.py +++ b/dojo/product/views.py @@ -1241,11 +1241,11 @@ def add_meta_data(request, pid): def edit_meta_data(request, pid): prod = Product.objects.get(id=pid) if request.method == "POST": - for key, value in request.POST.items(): + for key, orig_value in request.POST.items(): if key.startswith("cfv_"): cfv_id = int(key.split("_")[1]) cfv = get_object_or_404(DojoMeta, id=cfv_id) - value = value.strip() + value = orig_value.strip() if value: cfv.value = value cfv.save() diff --git a/dojo/search/views.py b/dojo/search/views.py index 3e3a75923ca..fe3d7460ee4 100644 --- a/dojo/search/views.py +++ b/dojo/search/views.py @@ -500,15 +500,15 @@ def apply_tag_filters(qs, operators, skip_relations=False): # negative search based on not- prefix (not-tags, not-test-tags, not-engagement-tags, not-product-tags, etc) - for tag_filter in tag_filters: - tag_filter = "not-" + tag_filter + for base_tag_filter in tag_filters: + tag_filter = "not-" + base_tag_filter if tag_filter in operators: value = operators[tag_filter] value = ",".join(value) # contains needs a single value qs = qs.exclude(**{"{}tags__name__contains".format(tag_filters[tag_filter.replace("not-", "")]): value}) - for tag_filter in tag_filters: - tag_filter = "not-" + tag_filter + for base_tag_filter in tag_filters: + tag_filter = "not-" + base_tag_filter if tag_filter + "s" in operators: value = operators[tag_filter + "s"] qs = qs.exclude(**{"{}tags__name__in".format(tag_filters[tag_filter.replace("not-", "")]): value}) diff --git a/dojo/tools/hcl_appscan/parser.py b/dojo/tools/hcl_appscan/parser.py index 00124b3f6c4..4d225415996 100644 --- a/dojo/tools/hcl_appscan/parser.py +++ b/dojo/tools/hcl_appscan/parser.py @@ -102,7 +102,7 @@ def get_findings(self, file, test): case "port": port = self.xmltreehelper(item) description = description + "Port:" + port + "\n" - finding = Finding( + prepared_finding = Finding( title=title, description=description, severity=severity, @@ -111,11 +111,11 @@ def get_findings(self, file, test): dynamic_finding=True, static_finding=False, ) - findings.append(finding) + findings.append(prepared_finding) try: - finding.unsaved_endpoints = [] + prepared_finding.unsaved_endpoints = [] endpoint = Endpoint(host=host, port=port) - finding.unsaved_endpoints.append(endpoint) + prepared_finding.unsaved_endpoints.append(endpoint) except UnboundLocalError: pass return findings diff --git a/dojo/tools/intsights/parser.py b/dojo/tools/intsights/parser.py index e49c61b852f..73caade2511 100644 --- a/dojo/tools/intsights/parser.py +++ b/dojo/tools/intsights/parser.py @@ -56,7 +56,7 @@ def get_findings(self, file, test): raise ValueError(msg) for alert in alerts: dupe_key = alert["alert_id"] - alert = Finding( + uniq_alert = Finding( title=alert["title"], test=test, active=False if alert["status"] == "Closed" else True, @@ -68,7 +68,7 @@ def get_findings(self, file, test): dynamic_finding=True, unique_id_from_tool=alert["alert_id"], ) - duplicates[dupe_key] = alert + duplicates[dupe_key] = uniq_alert if dupe_key not in duplicates: duplicates[dupe_key] = True return duplicates.values() diff --git a/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py b/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py index 053df04aa0e..f9429c723ca 100644 --- a/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py +++ b/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py @@ -163,8 +163,8 @@ def get_item_set(vulnerability): cvss_v3 = cves[0]["cvss_v3_vector"] cvssv3 = CVSS3(cvss_v3).clean_vector() - for component_name, component in vulnerability.get("components", {}).items(): - component_name, component_version = get_component_name_version(component_name) + for component_name_with_version, component in vulnerability.get("components", {}).items(): + component_name, component_version = get_component_name_version(component_name_with_version) mitigation, impact = process_component(component) title = clean_title(vulnerability["summary"]) diff --git a/dojo/tools/mobsf/parser.py b/dojo/tools/mobsf/parser.py index 9e8ccf91029..2a14ed3f96e 100644 --- a/dojo/tools/mobsf/parser.py +++ b/dojo/tools/mobsf/parser.py @@ -73,8 +73,8 @@ def get_findings(self, filename, test): if "urls" in data: curl = "" for url in data["urls"]: - for curl in url["urls"]: - curl = f"{curl}\n" + for durl in url["urls"]: + curl = f"{durl}\n" if curl: test_description = f"{test_description}\n**URL's:**\n {curl}\n" diff --git a/dojo/tools/qualys_webapp/parser.py b/dojo/tools/qualys_webapp/parser.py index 59c0d2b855c..793139d2c92 100644 --- a/dojo/tools/qualys_webapp/parser.py +++ b/dojo/tools/qualys_webapp/parser.py @@ -367,12 +367,14 @@ def get_unique_items( qid = int(finding.vuln_id_from_tool) if qid in g_qid_list: index = g_qid_list.index(qid) - finding = get_glossary_item( + final_finding = get_glossary_item( glossary[index], finding, is_info=True, enable_weakness=enable_weakness, ) + else: + final_finding = finding if qid in ig_qid_list: index = ig_qid_list.index(qid) - findings[unique_id] = get_info_item(info_gathered[index], finding) + findings[unique_id] = get_info_item(info_gathered[index], final_finding) return findings @@ -404,12 +406,14 @@ def get_items( ).items(): if qid in g_qid_list: index = g_qid_list.index(qid) - finding = get_glossary_item( + final_finding = get_glossary_item( glossary[index], finding, is_info=True, enable_weakness=enable_weakness, ) + else: + final_finding = finding if qid in ig_qid_list: index = ig_qid_list.index(qid) - findings[qid] = get_info_item(info_gathered[index], finding) + findings[qid] = get_info_item(info_gathered[index], final_finding) return findings diff --git a/ruff.toml b/ruff.toml index 5d3eecbe4d5..0e603899a6b 100644 --- a/ruff.toml +++ b/ruff.toml @@ -52,11 +52,11 @@ select = [ "LOG", "G001", "G002", "G1", "G2", "INP", - "SLOT", "PIE", "T20", "Q", "RSE", + "SLOT", "TID", "TCH", "INT", @@ -67,7 +67,7 @@ select = [ "PGH", "PLE", "PLR0915", - "PLW15", + "PLW1", "PLW2", "PLW3", "TRY003", "TRY004", "TRY2", diff --git a/unittests/test_apiv2_scan_import_options.py b/unittests/test_apiv2_scan_import_options.py index 8d296ca20f2..5f3b5e6707e 100644 --- a/unittests/test_apiv2_scan_import_options.py +++ b/unittests/test_apiv2_scan_import_options.py @@ -32,12 +32,14 @@ def setUp(self): def import_zap_scan(self, upload_empty_scan=False): with open("tests/zap_sample.xml", encoding="utf-8") as file: if upload_empty_scan: - file = SimpleUploadedFile("zap_sample.xml", self.EMPTY_ZAP_SCAN.encode("utf-8")) + tested_file = SimpleUploadedFile("zap_sample.xml", self.EMPTY_ZAP_SCAN.encode("utf-8")) + else: + tested_file = file self.payload = { "engagement": 1, "scan_type": "ZAP Scan", - "file": file, + "file": tested_file, } test_ids = list(Test.objects.values_list("id", flat=True)) r = self.client.post(reverse("importscan-list"), self.payload) diff --git a/unittests/test_rest_framework.py b/unittests/test_rest_framework.py index e4c4ef361e6..e6f9d2ebcd5 100644 --- a/unittests/test_rest_framework.py +++ b/unittests/test_rest_framework.py @@ -519,8 +519,10 @@ def test_list_prefetch(self): for value in values: if not isinstance(value, int): - value = value["id"] - self.assertIn(value, objs["prefetch"][field]) + clean_value = value["id"] + else: + clean_value = value + self.assertIn(clean_value, objs["prefetch"][field]) # TODO: add schema check @@ -603,12 +605,14 @@ def test_update(self): if key not in ["push_to_jira", "ssh", "password", "api_key"]: # Convert data to sets to avoid problems with lists if isinstance(value, list): - value = set(value) + clean_value = set(value) + else: + clean_value = value if isinstance(response.data[key], list): response_data = set(response.data[key]) else: response_data = response.data[key] - self.assertEqual(value, response_data) + self.assertEqual(clean_value, response_data) self.assertNotIn("push_to_jira", response.data) self.assertNotIn("ssh", response.data)