diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml index 1e1882d4135..421b3bcd20f 100644 --- a/.github/workflows/ruff.yml +++ b/.github/workflows/ruff.yml @@ -33,4 +33,4 @@ jobs: run: pip install -r requirements-lint.txt - name: Run Ruff Linter - run: ruff dojo \ No newline at end of file + run: ruff . \ No newline at end of file diff --git a/tests/Import_scanner_test.py b/tests/Import_scanner_test.py index f0d1eb6d537..fd5ee3af0b6 100644 --- a/tests/Import_scanner_test.py +++ b/tests/Import_scanner_test.py @@ -17,7 +17,7 @@ def setUp(self): if os.path.isdir(self.repo_path): shutil.rmtree(self.repo_path) os.mkdir(self.repo_path) - scan_types = git.Repo.clone_from('https://github.com/DefectDojo/sample-scan-files', self.repo_path) + git.Repo.clone_from('https://github.com/DefectDojo/sample-scan-files', self.repo_path) self.remove_items = ['__init__.py', '__init__.pyc', 'factory.py', 'factory.pyc', 'factory.py', 'LICENSE', 'README.md', '.gitignore', '.git', '__pycache__'] tool_path = dir_path[:-5] + 'dojo/tools' diff --git a/tests/close_old_findings_dedupe_test.py b/tests/close_old_findings_dedupe_test.py index ae0e3dafc3a..08d9b462ae1 100644 --- a/tests/close_old_findings_dedupe_test.py +++ b/tests/close_old_findings_dedupe_test.py @@ -26,12 +26,11 @@ class CloseOldDedupeTest(BaseTestCase): # -------------------------------------------------------------------------------------------------------- def setUp(self): super().setUp() - self.relative_path = dir_path = os.path.dirname(os.path.realpath(__file__)) + self.relative_path = os.path.dirname(os.path.realpath(__file__)) def check_nb_duplicates(self, expected_number_of_duplicates): logger.debug("checking duplicates...") driver = self.driver - retries = 0 for i in range(0, 18): time.sleep(5) # wait bit for celery dedupe task which can be slow on travis self.goto_all_findings_list(driver) diff --git a/tests/close_old_findings_test.py b/tests/close_old_findings_test.py index b776c892181..c2b2fb4c47c 100644 --- a/tests/close_old_findings_test.py +++ b/tests/close_old_findings_test.py @@ -24,7 +24,7 @@ class CloseOldTest(BaseTestCase): # -------------------------------------------------------------------------------------------------------- def setUp(self): super().setUp() - self.relative_path = dir_path = os.path.dirname(os.path.realpath(__file__)) + self.relative_path = os.path.dirname(os.path.realpath(__file__)) @on_exception_html_source_logger def test_delete_findings(self): diff --git a/tests/dedupe_test.py b/tests/dedupe_test.py index 1a75f34f2a4..1199159dba2 100644 --- a/tests/dedupe_test.py +++ b/tests/dedupe_test.py @@ -24,12 +24,11 @@ class DedupeTest(BaseTestCase): # -------------------------------------------------------------------------------------------------------- def setUp(self): super().setUp() - self.relative_path = dir_path = os.path.dirname(os.path.realpath(__file__)) + self.relative_path = os.path.dirname(os.path.realpath(__file__)) def check_nb_duplicates(self, expected_number_of_duplicates): logger.debug("checking duplicates...") driver = self.driver - retries = 0 for i in range(0, 18): time.sleep(5) # wait bit for celery dedupe task which can be slow on travis self.goto_all_findings_list(driver) diff --git a/tests/false_positive_history_test.py b/tests/false_positive_history_test.py index 5b0b36a244d..5e37022b73e 100644 --- a/tests/false_positive_history_test.py +++ b/tests/false_positive_history_test.py @@ -96,7 +96,6 @@ def bulk_edit(self, finding_url, status_id): driver.find_element(By.CSS_SELECTOR, "input[type='submit']").click() def test_retroactive_edit_finding(self): - driver = self.driver # Create two equal findings on different engagements finding_1 = self.create_finding( product_name='QA Test', @@ -125,7 +124,6 @@ def test_retroactive_edit_finding(self): self.assert_is_active(finding_2) def test_retroactive_bulk_edit_finding(self): - driver = self.driver # Create two equal findings on different engagements finding_1 = self.create_finding( product_name='QA Test', diff --git a/tests/finding_test.py b/tests/finding_test.py index 57f512362c7..d9e305b8b3e 100644 --- a/tests/finding_test.py +++ b/tests/finding_test.py @@ -312,7 +312,7 @@ def test_simple_accept_finding(self): # Select and click on the particular finding to edit driver.find_element(By.LINK_TEXT, "App Vulnerable to XSS").click() # Get the status of the current endpoint - pre_status = driver.find_element(By.XPATH, '//*[@id="vuln_endpoints"]/tbody/tr/td[3]').text + driver.find_element(By.XPATH, '//*[@id="vuln_endpoints"]/tbody/tr/td[3]').text # Click on the 'dropdownMenu1 button' driver.find_element(By.ID, "dropdownMenu1").click() # Click on `Close Finding` @@ -336,7 +336,7 @@ def test_unaccept_finding(self): # Select and click on the particular finding to edit driver.find_element(By.LINK_TEXT, "App Vulnerable to XSS").click() # Get the status of the current endpoint - pre_status = driver.find_element(By.XPATH, '//*[@id="remd_endpoints"]/tbody/tr/td[3]').text + driver.find_element(By.XPATH, '//*[@id="remd_endpoints"]/tbody/tr/td[3]').text # Click on the 'dropdownMenu1 button' driver.find_element(By.ID, "dropdownMenu1").click() # Click on `Close Finding` diff --git a/tests/report_builder_test.py b/tests/report_builder_test.py index eaa0cae3db0..c7eca1dc3b1 100644 --- a/tests/report_builder_test.py +++ b/tests/report_builder_test.py @@ -31,7 +31,7 @@ def enter_values(self, driver): for field in inputs: field.send_keys('cover words') if 'wysiwyg-content' in class_names: - content = widget.find_element(By.CLASS_NAME, "editor").send_keys('wysiwyg') + widget.find_element(By.CLASS_NAME, "editor").send_keys('wysiwyg') def generate_HTML_report(self): driver = self.driver diff --git a/tests/search_test.py b/tests/search_test.py index 37f1fe0d1ff..c48a2fa71ac 100644 --- a/tests/search_test.py +++ b/tests/search_test.py @@ -6,9 +6,6 @@ class SearchTests(BaseTestCase): - def test_login(self): - driver = self.driver - def test_search(self): # very basic search test to see if it doesn't 500 driver = self.goto_some_page() diff --git a/tests/zap.py b/tests/zap.py index 8fe45611ca5..18a5d7b6f8a 100755 --- a/tests/zap.py +++ b/tests/zap.py @@ -19,7 +19,7 @@ class Main: try: s.connect((address, port)) - except socket.error as e: + except socket.error: print("Error connecting to ZAP, exiting.") sys.exit(0) diff --git a/unittests/authorization/test_authorization_tags.py b/unittests/authorization/test_authorization_tags.py index 5c98971aff0..0ee3a31f3c1 100644 --- a/unittests/authorization/test_authorization_tags.py +++ b/unittests/authorization/test_authorization_tags.py @@ -46,7 +46,7 @@ def test_has_object_permission_has_permission(self, mock_current_user, mock_has_ def test_has_object_permission_wrong_permission(self): with self.assertRaises(KeyError): - result = has_object_permission(self.product_type, 'Test') + has_object_permission(self.product_type, 'Test') @patch('dojo.templatetags.authorization_tags.configuration_permission') @patch('crum.get_current_user') diff --git a/unittests/test_apply_finding_template.py b/unittests/test_apply_finding_template.py index 7cb89799618..e3302d0df61 100644 --- a/unittests/test_apply_finding_template.py +++ b/unittests/test_apply_finding_template.py @@ -161,7 +161,7 @@ def test_apply_template_to_finding_with_data_saves_success(self): test_mitigation = 'template mitigation' test_impact = 'template impact' - result = self.make_request(True, 1, 1, + self.make_request(True, 1, 1, {'title': test_title, 'cwe': test_cwe, 'severity': test_severity, @@ -191,11 +191,11 @@ def test_unauthorized_apply_template_to_finding_fails(self): def test_apply_template_to_finding_with_illegal_finding_fails(self): with self.assertRaises(Exception): - result = self.make_request(True, None, 1) + self.make_request(True, None, 1) def test_apply_template_to_finding_with_illegal_template_fails(self): with self.assertRaises(Exception): - result = self.make_request(True, 1, None) + self.make_request(True, 1, None) def test_apply_template_to_finding_with_no_data_returns_view_success(self): result = self.make_request(True, 1, 1, None) diff --git a/unittests/test_deduplication_logic.py b/unittests/test_deduplication_logic.py index e42344ad31e..8937104d92d 100644 --- a/unittests/test_deduplication_logic.py +++ b/unittests/test_deduplication_logic.py @@ -214,7 +214,7 @@ def test_identical_except_filepath_legacy(self): finding_new, finding_24 = self.copy_and_reset_finding(id=24) finding_new.file_path = '/dev/null' - finding_22 = Finding.objects.get(id=22) + Finding.objects.get(id=22) finding_new.save(dedupe_option=True) diff --git a/unittests/test_endpoint_meta_import.py b/unittests/test_endpoint_meta_import.py index b7b25542a0f..e0ec437c42f 100644 --- a/unittests/test_endpoint_meta_import.py +++ b/unittests/test_endpoint_meta_import.py @@ -27,7 +27,7 @@ def test_endpoint_meta_import_endpoint_create_tag_create_meta_create(self): meta_count_before = self.db_dojo_meta_count() with assertImportModelsCreated(self, tests=0, engagements=0, products=0, endpoints=3): - import0 = self.endpoint_meta_import_scan_with_params( + self.endpoint_meta_import_scan_with_params( self.meta_import_full, create_endpoints=True, create_tags=True, create_dojo_meta=True) self.assertEqual(endpoint_count_before + 3, self.db_endpoint_count()) @@ -36,20 +36,20 @@ def test_endpoint_meta_import_endpoint_create_tag_create_meta_create(self): def test_endpoint_meta_import_endpoint_missing_hostname(self): with assertImportModelsCreated(self, tests=0, engagements=0, products=0, endpoints=0): - import0 = self.endpoint_meta_import_scan_with_params( + self.endpoint_meta_import_scan_with_params( self.meta_import_no_hostname, create_endpoints=True, create_tags=True, create_dojo_meta=True, expected_http_status_code=400) def test_endpoint_meta_import_tag_remove_column(self): # Import full scan first with assertImportModelsCreated(self, tests=0, engagements=0, products=0, endpoints=3): - import0 = self.endpoint_meta_import_scan_with_params( + self.endpoint_meta_import_scan_with_params( self.meta_import_full, create_endpoints=True, create_tags=True, create_dojo_meta=False) # Record numbers endpoint_count_before = self.db_endpoint_count() endpoint_tag_count_before = self.db_endpoint_tag_count() # Import again with one column missing with assertImportModelsCreated(self, tests=0, engagements=0, products=0, endpoints=0): - import0 = self.endpoint_meta_import_scan_with_params( + self.endpoint_meta_import_scan_with_params( self.meta_import_updated_removed, create_endpoints=True, create_tags=True, create_dojo_meta=False) # See that nothing has been removed self.assertEqual(endpoint_count_before, self.db_endpoint_count()) @@ -58,14 +58,14 @@ def test_endpoint_meta_import_tag_remove_column(self): def test_endpoint_meta_import_tag_added_column(self): # Import full scan first with assertImportModelsCreated(self, tests=0, engagements=0, products=0, endpoints=3): - import0 = self.endpoint_meta_import_scan_with_params( + self.endpoint_meta_import_scan_with_params( self.meta_import_full, create_endpoints=True, create_tags=True, create_dojo_meta=False) # Record numbers endpoint_count_before = self.db_endpoint_count() endpoint_tag_count_before = self.db_endpoint_tag_count() # Import again with one column added with assertImportModelsCreated(self, tests=0, engagements=0, products=0, endpoints=0): - import0 = self.endpoint_meta_import_scan_with_params( + self.endpoint_meta_import_scan_with_params( self.meta_import_updated_added, create_endpoints=True, create_tags=True, create_dojo_meta=False) # See that nothing has been removed self.assertEqual(endpoint_count_before, self.db_endpoint_count()) @@ -75,7 +75,7 @@ def test_endpoint_meta_import_tag_added_column(self): def test_endpoint_meta_import_tag_changed_column(self): # Import full scan first with assertImportModelsCreated(self, tests=0, engagements=0, products=0, endpoints=3): - import0 = self.endpoint_meta_import_scan_with_params( + self.endpoint_meta_import_scan_with_params( self.meta_import_full, create_endpoints=True, create_tags=True, create_dojo_meta=False) # Record numbers endpoint_count_before = self.db_endpoint_count() @@ -85,7 +85,7 @@ def test_endpoint_meta_import_tag_changed_column(self): human_resource_tag = endpoint['tags'][endpoint['tags'].index('team:human resources')] # Import again with one column missing with assertImportModelsCreated(self, tests=0, engagements=0, products=0, endpoints=0): - import0 = self.endpoint_meta_import_scan_with_params( + self.endpoint_meta_import_scan_with_params( self.meta_import_updated_changed, create_endpoints=True, create_tags=True, create_dojo_meta=False) # See that nothing has been added or removed self.assertEqual(endpoint_count_before, self.db_endpoint_count()) @@ -99,14 +99,14 @@ def test_endpoint_meta_import_tag_changed_column(self): def test_endpoint_meta_import_meta_remove_column(self): # Import full scan first with assertImportModelsCreated(self, tests=0, engagements=0, products=0, endpoints=3): - import0 = self.endpoint_meta_import_scan_with_params( + self.endpoint_meta_import_scan_with_params( self.meta_import_full, create_endpoints=True, create_tags=False, create_dojo_meta=True) # Record numbers endpoint_count_before = self.db_endpoint_count() meta_count_before = self.db_dojo_meta_count() # Import again with one column missing with assertImportModelsCreated(self, tests=0, engagements=0, products=0, endpoints=0): - import0 = self.endpoint_meta_import_scan_with_params( + self.endpoint_meta_import_scan_with_params( self.meta_import_updated_removed, create_endpoints=True, create_tags=False, create_dojo_meta=True) # See that nothing has been removed self.assertEqual(endpoint_count_before, self.db_endpoint_count()) @@ -115,14 +115,14 @@ def test_endpoint_meta_import_meta_remove_column(self): def test_endpoint_meta_import_meta_added_column(self): # Import full scan first with assertImportModelsCreated(self, tests=0, engagements=0, products=0, endpoints=3): - import0 = self.endpoint_meta_import_scan_with_params( + self.endpoint_meta_import_scan_with_params( self.meta_import_full, create_endpoints=True, create_tags=False, create_dojo_meta=True) # Record numbers endpoint_count_before = self.db_endpoint_count() meta_count_before = self.db_dojo_meta_count() # Import again with one column added with assertImportModelsCreated(self, tests=0, engagements=0, products=0, endpoints=0): - import0 = self.endpoint_meta_import_scan_with_params( + self.endpoint_meta_import_scan_with_params( self.meta_import_updated_added, create_endpoints=True, create_tags=False, create_dojo_meta=True) # 1 meta x 3 endpoints = 3 tags self.assertEqual(endpoint_count_before, self.db_endpoint_count()) @@ -131,7 +131,7 @@ def test_endpoint_meta_import_meta_added_column(self): def test_endpoint_meta_import_meta_changed_column(self): # Import full scan first with assertImportModelsCreated(self, tests=0, engagements=0, products=0, endpoints=3): - import0 = self.endpoint_meta_import_scan_with_params( + self.endpoint_meta_import_scan_with_params( self.meta_import_full, create_endpoints=True, create_tags=False, create_dojo_meta=True) # Record numbers endpoint_count_before = self.db_endpoint_count() @@ -141,7 +141,7 @@ def test_endpoint_meta_import_meta_changed_column(self): meta_value = self.get_endpoints_meta_api(endpoint_id, 'team')['results'][0]['value'] # Import again with one column missing with assertImportModelsCreated(self, tests=0, engagements=0, products=0, endpoints=0): - import0 = self.endpoint_meta_import_scan_with_params( + self.endpoint_meta_import_scan_with_params( self.meta_import_updated_changed, create_endpoints=True, create_tags=False, create_dojo_meta=True) # See that nothing has been added or removed self.assertEqual(endpoint_count_before, self.db_endpoint_count()) diff --git a/unittests/test_factory.py b/unittests/test_factory.py index f5bf436a475..a1e1d20fd69 100644 --- a/unittests/test_factory.py +++ b/unittests/test_factory.py @@ -9,25 +9,25 @@ def test_get_parser(self): scan_type = "Acunetix Scan" testfile = open(get_unit_tests_path() + "/scans/acunetix/one_finding.xml") parser = get_parser(scan_type) - findings = parser.get_findings(testfile, Test()) + parser.get_findings(testfile, Test()) testfile.close() with self.subTest(scan_type="Anchore Engine Scan"): scan_type = "Anchore Engine Scan" testfile = open(get_unit_tests_path() + "/scans/anchore_engine/one_vuln.json") parser = get_parser(scan_type) - findings = parser.get_findings(testfile, Test()) + parser.get_findings(testfile, Test()) testfile.close() with self.subTest(scan_type="Tenable Scan"): scan_type = "Tenable Scan" testfile = open(get_unit_tests_path() + "/scans/tenable/nessus/nessus_v_unknown.xml") parser = get_parser(scan_type) - findings = parser.get_findings(testfile, Test()) + parser.get_findings(testfile, Test()) testfile.close() with self.subTest(scan_type="ZAP Scan"): scan_type = "ZAP Scan" testfile = open(get_unit_tests_path() + "/scans/zap/some_2.9.0.xml") parser = get_parser(scan_type) - findings = parser.get_findings(testfile, Test()) + parser.get_findings(testfile, Test()) testfile.close() def test_get_parser_error(self): diff --git a/unittests/test_import_reimport.py b/unittests/test_import_reimport.py index d765377b8f6..92bcb0097d3 100644 --- a/unittests/test_import_reimport.py +++ b/unittests/test_import_reimport.py @@ -714,7 +714,7 @@ def test_import_0_reimport_1_active_not_verified(self): test_id = reimport1['test'] self.assertEqual(test_id, test_id) - test = self.get_test_api(test_id) + self.get_test_api(test_id) findings = self.get_test_findings_api(test_id) self.log_finding_summary_json_api(findings) @@ -754,7 +754,7 @@ def test_import_0_reimport_1_active_verified_reimport_0_active_verified(self): findings = self.get_test_findings_api(test_id) self.log_finding_summary_json_api(findings) - finding_count_before = self.db_finding_count() + self.db_finding_count() endpoint_count_before = self.db_endpoint_count() endpoint_status_count_before_active = self.db_endpoint_status_count(mitigated=False) endpoint_status_count_before_mitigated = self.db_endpoint_status_count(mitigated=True) @@ -770,12 +770,12 @@ def test_import_0_reimport_1_active_verified_reimport_0_active_verified(self): endpoint_status_count_before_mitigated = self.db_endpoint_status_count(mitigated=True) with assertTestImportModelsCreated(self, reimports=1, affected_findings=2, closed=1, reactivated=1, untouched=3): - reimport0 = self.reimport_scan_with_params(test_id, self.zap_sample0_filename) + self.reimport_scan_with_params(test_id, self.zap_sample0_filename) test_id = reimport1['test'] self.assertEqual(test_id, test_id) - test = self.get_test_api(test_id) + self.get_test_api(test_id) findings = self.get_test_findings_api(test_id) self.log_finding_summary_json_api(findings) @@ -928,7 +928,7 @@ def test_import_0_reimport_3_active_verified(self): test_id = reimport1['test'] self.assertEqual(test_id, test_id) - test = self.get_test_api(test_id) + self.get_test_api(test_id) findings = self.get_test_findings_api(test_id) self.log_finding_summary_json_api(findings) self.assert_finding_count_json(4 + 2, findings) @@ -1023,7 +1023,7 @@ def test_import_0_reimport_0_anchore_file_path(self): # reimport exact same report with assertTestImportModelsCreated(self, reimports=1, affected_findings=0, untouched=4): - reimport0 = self.reimport_scan_with_params(test_id, self.anchore_file_name, scan_type=self.scan_type_anchore) + self.reimport_scan_with_params(test_id, self.anchore_file_name, scan_type=self.scan_type_anchore) active_findings_after = self.get_test_findings_api(test_id, active=True) self.log_finding_summary_json_api(active_findings_after) @@ -1152,7 +1152,7 @@ def test_import_6_reimport_6_gitlab_dep_scan_component_name_and_version(self): self.assert_finding_count_json(6, active_findings_before) with assertTestImportModelsCreated(self, reimports=1, affected_findings=0, created=0, untouched=6): - reimport0 = self.reimport_scan_with_params(test_id, + self.reimport_scan_with_params(test_id, self.gitlab_dep_scan_components_filename, scan_type=self.scan_type_gtlab_dep_scan, minimum_severity='Info') @@ -1435,7 +1435,7 @@ def test_import_reimport_vulnerability_ids(self): ) reimport_test.save() - reimport0 = self.reimport_scan_with_params(reimport_test.id, self.anchore_grype_file_name, scan_type=self.anchore_grype_scan_type) + self.reimport_scan_with_params(reimport_test.id, self.anchore_grype_file_name, scan_type=self.anchore_grype_scan_type) findings = Finding.objects.filter(test=reimport_test) self.assertEqual(4, len(findings)) self.assertEqual('GHSA-v6rh-hp5x-86rv', findings[3].cve) diff --git a/unittests/test_importers_importer.py b/unittests/test_importers_importer.py index 6566b7f555c..5318c64e165 100644 --- a/unittests/test_importers_importer.py +++ b/unittests/test_importers_importer.py @@ -208,7 +208,7 @@ def test_import_by_product_name_exists_engagement_name_exists(self, mock): def test_import_by_product_name_exists_engagement_name_not_exists(self): with assertImportModelsCreated(self, tests=0, engagements=0, products=0, product_types=0, endpoints=0): - import0 = self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=PRODUCT_NAME_DEFAULT, + self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=PRODUCT_NAME_DEFAULT, engagement=None, engagement_name=ENGAGEMENT_NAME_NEW, expected_http_status_code=400) @patch('dojo.jira_link.helper.get_jira_project') @@ -227,7 +227,7 @@ def test_import_by_product_name_exists_engagement_name_not_exists_auto_create(se def test_import_by_product_name_not_exists_engagement_name(self): with assertImportModelsCreated(self, tests=0, engagements=0, products=0, product_types=0, endpoints=0): - import0 = self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=PRODUCT_NAME_NEW, + self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=PRODUCT_NAME_NEW, engagement=None, engagement_name=ENGAGEMENT_NAME_NEW, expected_http_status_code=400) @patch('dojo.jira_link.helper.get_jira_project') @@ -259,11 +259,11 @@ def test_import_by_product_type_name_not_exists_product_name_not_exists_engageme def test_endpoint_meta_import_by_product_name_exists(self): with assertImportModelsCreated(self, tests=0, engagements=0, products=0, endpoints=0): - import0 = self.endpoint_meta_import_scan_with_params(ENDPOINT_META_IMPORTER_FILENAME, product=None, product_name=PRODUCT_NAME_DEFAULT, expected_http_status_code=201) + self.endpoint_meta_import_scan_with_params(ENDPOINT_META_IMPORTER_FILENAME, product=None, product_name=PRODUCT_NAME_DEFAULT, expected_http_status_code=201) def test_endpoint_meta_import_by_product_name_not_exists(self): with assertImportModelsCreated(self, tests=0, engagements=0, products=0, endpoints=0): - import0 = self.endpoint_meta_import_scan_with_params(ENDPOINT_META_IMPORTER_FILENAME, product=None, product_name=PRODUCT_NAME_NEW, expected_http_status_code=400) + self.endpoint_meta_import_scan_with_params(ENDPOINT_META_IMPORTER_FILENAME, product=None, product_name=PRODUCT_NAME_NEW, expected_http_status_code=400) def test_import_with_invalid_parameters(self): with self.subTest('scan_date in the future'): @@ -379,7 +379,7 @@ def test_reimport_by_product_name_exists_engagement_name_exists_no_title(self): def test_reimport_by_product_name_exists_engagement_name_exists_scan_type_not_exsists_test_title_exists(self): with assertImportModelsCreated(self, tests=0, engagements=0, products=0, product_types=0, endpoints=0): - import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type='Acunetix Scan', product_name=PRODUCT_NAME_DEFAULT, + self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type='Acunetix Scan', product_name=PRODUCT_NAME_DEFAULT, engagement=None, engagement_name=ENGAGEMENT_NAME_DEFAULT, test_title=TEST_TITLE_DEFAULT, expected_http_status_code=400) @patch('dojo.jira_link.helper.get_jira_project') @@ -394,7 +394,7 @@ def test_reimport_by_product_name_exists_engagement_name_exists_scan_type_not_ex def test_reimport_by_product_name_exists_engagement_name_exists_scan_type_not_exsists_test_title_not_exists(self): with assertImportModelsCreated(self, tests=0, engagements=0, products=0, product_types=0, endpoints=0): - import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type='Acunetix Scan', product_name=PRODUCT_NAME_DEFAULT, + self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type='Acunetix Scan', product_name=PRODUCT_NAME_DEFAULT, engagement=None, engagement_name=ENGAGEMENT_NAME_DEFAULT, test_title='bogus title', expected_http_status_code=400) @patch('dojo.jira_link.helper.get_jira_project') @@ -419,7 +419,7 @@ def test_reimport_by_product_name_exists_engagement_name_exists_test_title_exist def test_reimport_by_product_name_exists_engagement_name_not_exists(self): with assertImportModelsCreated(self, tests=0, engagements=0, products=0, product_types=0, endpoints=0): - import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=PRODUCT_NAME_DEFAULT, + self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=PRODUCT_NAME_DEFAULT, engagement=None, engagement_name=ENGAGEMENT_NAME_NEW, expected_http_status_code=400) @patch('dojo.jira_link.helper.get_jira_project') @@ -438,7 +438,7 @@ def test_reimport_by_product_name_exists_engagement_name_not_exists_auto_create( def test_reimport_by_product_name_not_exists_engagement_name(self): with assertImportModelsCreated(self, tests=0, engagements=0, products=0, product_types=0, endpoints=0): - import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=PRODUCT_NAME_NEW, + self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=PRODUCT_NAME_NEW, engagement=None, engagement_name=ENGAGEMENT_NAME_NEW, expected_http_status_code=400) @patch('dojo.jira_link.helper.get_jira_project') diff --git a/unittests/test_jira_config_engagement.py b/unittests/test_jira_config_engagement.py index 19ccaaa3aa1..d457a1cfa27 100644 --- a/unittests/test_jira_config_engagement.py +++ b/unittests/test_jira_config_engagement.py @@ -247,21 +247,21 @@ def test_add_jira_project_to_engagement_without_jira_project(self, jira_mock): jira_mock.return_value = True # cannot set return_value in decorated AND have the mock into the method # TODO: add engagement also via API, but let's focus on JIRA here engagement = self.add_engagement_without_jira_project(expected_delta_jira_project_db=0) - response = self.edit_jira_project_for_engagement(engagement, expected_delta_jira_project_db=1) + self.edit_jira_project_for_engagement(engagement, expected_delta_jira_project_db=1) self.assertEqual(jira_mock.call_count, 1) @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') def test_add_empty_jira_project_to_engagement_without_jira_project(self, jira_mock): jira_mock.return_value = True # cannot set return_value in decorated AND have the mock into the method engagement = self.add_engagement_without_jira_project(expected_delta_jira_project_db=0) - response = self.empty_jira_project_for_engagement(engagement, expected_delta_jira_project_db=0) + self.empty_jira_project_for_engagement(engagement, expected_delta_jira_project_db=0) self.assertEqual(jira_mock.call_count, 0) @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') def test_edit_jira_project_to_engagement_with_jira_project(self, jira_mock): jira_mock.return_value = True # cannot set return_value in decorated AND have the mock into the method engagement = self.add_engagement_with_jira_project(expected_delta_jira_project_db=1) - response = self.edit_jira_project_for_engagement2(engagement, expected_delta_jira_project_db=0) + self.edit_jira_project_for_engagement2(engagement, expected_delta_jira_project_db=0) self.assertEqual(jira_mock.call_count, 2) @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') @@ -274,14 +274,14 @@ def test_edit_empty_jira_project_to_engagement_with_jira_project(self, jira_mock # - so prevent clearing out these values # response = self.empty_jira_project_for_engagement(Engagement.objects.get(id=3), -1) # expecting ValueError as we can't delete existing JIRA Projects - response = self.empty_jira_project_for_engagement(engagement, expected_delta_jira_project_db=0, expect_error=True) + self.empty_jira_project_for_engagement(engagement, expected_delta_jira_project_db=0, expect_error=True) self.assertEqual(jira_mock.call_count, 1) @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') def test_add_jira_project_to_engagement_without_jira_project_invalid_project(self, jira_mock): jira_mock.return_value = False # cannot set return_value in decorated AND have the mock into the method # errors means it won't redirect to view_engagement, but returns a 200 and redisplays the edit engagement page - response = self.edit_jira_project_for_engagement(Engagement.objects.get(id=3), expected_delta_jira_project_db=0, expect_200=True) + self.edit_jira_project_for_engagement(Engagement.objects.get(id=3), expected_delta_jira_project_db=0, expect_200=True) self.assertEqual(jira_mock.call_count, 1) @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') @@ -290,7 +290,7 @@ def test_edit_jira_project_to_engagement_with_jira_project_invalid_project(self, engagement = self.add_engagement_with_jira_project(expected_delta_jira_project_db=1) jira_mock.return_value = False # jira key is changed, so jira project will be checked - response = self.edit_jira_project_for_engagement2(engagement, expected_delta_jira_project_db=0, expect_200=True) + self.edit_jira_project_for_engagement2(engagement, expected_delta_jira_project_db=0, expect_200=True) self.assertEqual(jira_mock.call_count, 2) @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') @@ -329,8 +329,8 @@ def test_add_engagement_with_jira_project_to_engagement_jira_disabled(self, jira def test_edit_jira_project_to_engagement_with_jira_project_invalid_project_jira_disabled(self, jira_mock): self.system_settings(enable_jira=False) jira_mock.return_value = True # cannot set return_value in decorated AND have the mock into the method - response = self.edit_jira_project_for_engagement(Engagement.objects.get(id=3), expected_delta_jira_project_db=0) - response = self.edit_jira_project_for_engagement2(Engagement.objects.get(id=3), expected_delta_jira_project_db=0) + self.edit_jira_project_for_engagement(Engagement.objects.get(id=3), expected_delta_jira_project_db=0) + self.edit_jira_project_for_engagement2(Engagement.objects.get(id=3), expected_delta_jira_project_db=0) self.assertEqual(jira_mock.call_count, 0) diff --git a/unittests/test_jira_config_product.py b/unittests/test_jira_config_product.py index ab6d378ae36..0c30867a32e 100644 --- a/unittests/test_jira_config_product.py +++ b/unittests/test_jira_config_product.py @@ -86,7 +86,7 @@ def test_add_jira_instance_unknown_host(self): # test raw connection error with self.assertRaises(requests.exceptions.RequestException): - jira = jira_helper.get_jira_connection_raw(data['url'], data['username'], data['password']) + jira_helper.get_jira_connection_raw(data['url'], data['username'], data['password']) @patch('dojo.jira_link.views.jira_helper.get_jira_connection_raw') def test_add_jira_instance_invalid_credentials(self, jira_mock): @@ -110,21 +110,21 @@ def test_add_jira_project_to_product_without_jira_project(self, jira_mock): jira_mock.return_value = True # cannot set return_value in decorated AND have the mock into the method # TODO: add product also via API, but let's focus on JIRA here product = self.add_product_without_jira_project(expected_delta_jira_project_db=0) - response = self.edit_jira_project_for_product(product, expected_delta_jira_project_db=1) + self.edit_jira_project_for_product(product, expected_delta_jira_project_db=1) self.assertEqual(jira_mock.call_count, 1) @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') def test_add_empty_jira_project_to_product_without_jira_project(self, jira_mock): jira_mock.return_value = True # cannot set return_value in decorater AND have the mock into the method product = self.add_product_without_jira_project(expected_delta_jira_project_db=0) - response = self.empty_jira_project_for_product(product, expected_delta_jira_project_db=0) + self.empty_jira_project_for_product(product, expected_delta_jira_project_db=0) self.assertEqual(jira_mock.call_count, 0) @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') def test_edit_jira_project_to_product_with_jira_project(self, jira_mock): jira_mock.return_value = True # cannot set return_value in decorated AND have the mock into the method product = self.add_product_with_jira_project(expected_delta_jira_project_db=1) - response = self.edit_jira_project_for_product2(product, expected_delta_jira_project_db=0) + self.edit_jira_project_for_product2(product, expected_delta_jira_project_db=0) self.assertEqual(jira_mock.call_count, 2) @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') @@ -137,14 +137,14 @@ def test_edit_empty_jira_project_to_product_with_jira_project(self, jira_mock): # - so prevent clearing out these values # response = self.empty_jira_project_for_product(Product.objects.get(id=3), -1) # errors means it won't redirect to view_product, but returns a 200 and redisplays the edit product page - response = self.empty_jira_project_for_product(product, expected_delta_jira_project_db=0, expect_200=True) + self.empty_jira_project_for_product(product, expected_delta_jira_project_db=0, expect_200=True) self.assertEqual(jira_mock.call_count, 1) @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') def test_add_jira_project_to_product_without_jira_project_invalid_project(self, jira_mock): jira_mock.return_value = False # cannot set return_value in decorated AND have the mock into the method # errors means it won't redirect to view_product, but returns a 200 and redisplays the edit product page - response = self.edit_jira_project_for_product(Product.objects.get(id=3), expected_delta_jira_project_db=0, expect_200=True) + self.edit_jira_project_for_product(Product.objects.get(id=3), expected_delta_jira_project_db=0, expect_200=True) self.assertEqual(jira_mock.call_count, 1) @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') @@ -153,7 +153,7 @@ def test_edit_jira_project_to_product_with_jira_project_invalid_project(self, ji product = self.add_product_with_jira_project(expected_delta_jira_project_db=1) jira_mock.return_value = False # jira key is changed, so jira project will be checked - response = self.edit_jira_project_for_product2(product, expected_delta_jira_project_db=0, expect_200=True) + self.edit_jira_project_for_product2(product, expected_delta_jira_project_db=0, expect_200=True) self.assertEqual(jira_mock.call_count, 2) @patch('dojo.jira_link.views.jira_helper.is_jira_project_valid') @@ -192,8 +192,8 @@ def test_add_product_with_jira_project_to_product_jira_disabled(self, jira_mock) def test_edit_jira_project_to_product_with_jira_project_invalid_project_jira_disabled(self, jira_mock): self.system_settings(enable_jira=False) jira_mock.return_value = True # cannot set return_value in decorated AND have the mock into the method - response = self.edit_jira_project_for_product(Product.objects.get(id=3), expected_delta_jira_project_db=0) - response = self.edit_jira_project_for_product2(Product.objects.get(id=3), expected_delta_jira_project_db=0) + self.edit_jira_project_for_product(Product.objects.get(id=3), expected_delta_jira_project_db=0) + self.edit_jira_project_for_product2(Product.objects.get(id=3), expected_delta_jira_project_db=0) self.assertEqual(jira_mock.call_count, 0) diff --git a/unittests/test_jira_import_and_pushing_api.py b/unittests/test_jira_import_and_pushing_api.py index 801f697e208..dd50794c0a2 100644 --- a/unittests/test_jira_import_and_pushing_api.py +++ b/unittests/test_jira_import_and_pushing_api.py @@ -152,7 +152,7 @@ def test_import_no_push_to_jira_reimport_no_push_to_jira(self): self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 0) - reimport = self.reimport_scan_with_params(test_id, self.zap_sample5_filename, verified=True) + self.reimport_scan_with_params(test_id, self.zap_sample5_filename, verified=True) self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 0) @@ -162,7 +162,7 @@ def test_import_no_push_to_jira_reimport_push_to_jira_false(self): self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 0) - reimport = self.reimport_scan_with_params(test_id, self.zap_sample5_filename, push_to_jira=False, verified=True) + self.reimport_scan_with_params(test_id, self.zap_sample5_filename, push_to_jira=False, verified=True) self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 0) @@ -172,7 +172,7 @@ def test_import_no_push_to_jira_reimport_with_push_to_jira(self): self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 0) - reimport = self.reimport_scan_with_params(test_id, self.zap_sample5_filename, push_to_jira=True, verified=True) + self.reimport_scan_with_params(test_id, self.zap_sample5_filename, push_to_jira=True, verified=True) self.assert_jira_issue_count_in_test(test_id, 2) self.assert_jira_group_issue_count_in_test(test_id, 0) # by asserting full cassette is played we know issues have been updated in JIRA @@ -184,7 +184,7 @@ def test_import_with_groups_no_push_to_jira_reimport_with_push_to_jira(self): self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 0) - reimport = self.reimport_scan_with_params(test_id, self.npm_groups_sample_filename, scan_type='NPM Audit Scan', group_by='component_name+component_version', push_to_jira=True, verified=True) + self.reimport_scan_with_params(test_id, self.npm_groups_sample_filename, scan_type='NPM Audit Scan', group_by='component_name+component_version', push_to_jira=True, verified=True) self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 3) # by asserting full cassette is played we know issues have been updated in JIRA @@ -197,7 +197,7 @@ def test_import_no_push_to_jira_reimport_no_push_to_jira_but_push_all_issues(sel self.assert_jira_issue_count_in_test(test_id, 2) self.assert_jira_group_issue_count_in_test(test_id, 0) - reimport = self.reimport_scan_with_params(test_id, self.zap_sample5_filename, verified=True) + self.reimport_scan_with_params(test_id, self.zap_sample5_filename, verified=True) self.assert_jira_issue_count_in_test(test_id, 2) self.assert_jira_group_issue_count_in_test(test_id, 0) # by asserting full cassette is played we know issues have been updated in JIRA @@ -210,7 +210,7 @@ def test_import_with_groups_no_push_to_jira_reimport_no_push_to_jira_but_push_al self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 3) - reimport = self.reimport_scan_with_params(test_id, self.npm_groups_sample_filename, scan_type='NPM Audit Scan', group_by='component_name+component_version', verified=True) + self.reimport_scan_with_params(test_id, self.npm_groups_sample_filename, scan_type='NPM Audit Scan', group_by='component_name+component_version', verified=True) self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 3) # by asserting full cassette is played we know issues have been updated in JIRA @@ -222,9 +222,9 @@ def test_import_no_push_to_jira_reimport_push_to_jira_is_false_but_push_all_issu test_id = import0['test'] self.assert_jira_issue_count_in_test(test_id, 2) self.assert_jira_group_issue_count_in_test(test_id, 0) - updated_map = self.get_jira_issue_updated_map(test_id) + self.get_jira_issue_updated_map(test_id) - reimport = self.reimport_scan_with_params(test_id, self.zap_sample5_filename, push_to_jira=False, verified=True) + self.reimport_scan_with_params(test_id, self.zap_sample5_filename, push_to_jira=False, verified=True) self.assert_jira_issue_count_in_test(test_id, 2) self.assert_jira_group_issue_count_in_test(test_id, 0) # when sending in identical data to JIRA, JIRA does NOT update the updated timestamp.... @@ -240,7 +240,7 @@ def test_import_with_groups_no_push_to_jira_reimport_push_to_jira_is_false_but_p self.assert_jira_group_issue_count_in_test(test_id, 3) updated_map = self.get_jira_issue_updated_map(test_id) - reimport = self.reimport_scan_with_params(test_id, self.npm_groups_sample_filename, scan_type='NPM Audit Scan', group_by='component_name+component_version', push_to_jira=False, verified=True) + self.reimport_scan_with_params(test_id, self.npm_groups_sample_filename, scan_type='NPM Audit Scan', group_by='component_name+component_version', push_to_jira=False, verified=True) self.assert_jira_issue_count_in_test(test_id, 0) self.assert_jira_group_issue_count_in_test(test_id, 3) # when sending in identical data to JIRA, JIRA does NOT update the updated timestamp.... @@ -256,12 +256,12 @@ def test_import_push_to_jira_reimport_with_push_to_jira(self): self.assert_jira_group_issue_count_in_test(test_id, 0) # Get one of the findings from the test finding_id = Finding.objects.filter(test__id=test_id).first().id - pre_jira_status = self.get_jira_issue_updated(finding_id) + self.get_jira_issue_updated(finding_id) # re-import and see status change - reimport = self.reimport_scan_with_params(test_id, self.zap_sample5_filename, push_to_jira=True, verified=True) + self.reimport_scan_with_params(test_id, self.zap_sample5_filename, push_to_jira=True, verified=True) self.assert_jira_issue_count_in_test(test_id, 2) self.assert_jira_group_issue_count_in_test(test_id, 0) - post_jira_status = self.get_jira_issue_updated(finding_id) + self.get_jira_issue_updated(finding_id) # when sending in identical data to JIRA, JIRA does NOT update the updated timestamp.... # self.assert_jira_updated_change(pre_jira_status, post_jira_status) # by asserting full cassette is played we know issues have been updated in JIRA @@ -468,7 +468,7 @@ def test_import_with_push_to_jira_add_comment(self): finding_id = findings['results'][0]['id'] - response = self.post_finding_notes_api(finding_id, 'testing note. creating it and pushing it to JIRA') + self.post_finding_notes_api(finding_id, 'testing note. creating it and pushing it to JIRA') self.patch_finding_api(finding_id, {"push_to_jira": True}) # Make sure the number of comments match self.assertEqual(len(self.get_jira_comments(finding_id)), 1) @@ -483,8 +483,8 @@ def test_import_add_comments_then_push_to_jira(self): finding_id = findings['results'][0]['id'] - response = self.post_finding_notes_api(finding_id, 'testing note. creating it and pushing it to JIRA') - response = self.post_finding_notes_api(finding_id, 'testing second note. creating it and pushing it to JIRA') + self.post_finding_notes_api(finding_id, 'testing note. creating it and pushing it to JIRA') + self.post_finding_notes_api(finding_id, 'testing second note. creating it and pushing it to JIRA') self.patch_finding_api(finding_id, {"push_to_jira": True}) self.assert_jira_issue_count_in_test(test_id, 1) @@ -505,7 +505,7 @@ def test_import_with_push_to_jira_add_tags(self): finding = Finding.objects.get(id=findings['results'][0]['id']) tags = ['tag1', 'tag2'] - response = self.post_finding_tags_api(finding.id, tags) + self.post_finding_tags_api(finding.id, tags) self.patch_finding_api(finding.id, {"push_to_jira": True}) # Connect to jira to get the new issue @@ -530,7 +530,7 @@ def test_import_with_push_to_jira_update_tags(self): finding = Finding.objects.get(id=findings['results'][0]['id']) tags = ['tag1', 'tag2'] - response = self.post_finding_tags_api(finding.id, tags) + self.post_finding_tags_api(finding.id, tags) self.patch_finding_api(finding.id, {"push_to_jira": True}) # Connect to jira to get the new issue @@ -542,7 +542,7 @@ def test_import_with_push_to_jira_update_tags(self): self.assertEqual(issue.fields.labels, tags) tags_new = tags + ['tag3', 'tag4'] - response = self.post_finding_tags_api(finding.id, tags_new) + self.post_finding_tags_api(finding.id, tags_new) self.patch_finding_api(finding.id, {"push_to_jira": True}) # Connect to jira to get the new issue diff --git a/unittests/test_rest_framework.py b/unittests/test_rest_framework.py index c5e10179cb0..51bb0a17eed 100644 --- a/unittests/test_rest_framework.py +++ b/unittests/test_rest_framework.py @@ -487,7 +487,7 @@ def test_detail_prefetch(self): continue self.assertTrue(field in obj["prefetch"]) - values = field_value if type(field_value) is list else [field_value] + values = field_value if isinstance(field_value, list) else [field_value] for value in values: self.assertTrue(value in obj["prefetch"][field]) @@ -514,10 +514,10 @@ def test_list_prefetch(self): continue self.assertTrue(field in objs["prefetch"]) - values = field_value if type(field_value) is list else [field_value] + values = field_value if isinstance(field_value, list) else [field_value] for value in values: - if type(value) is not int: + if not isinstance(value, int): value = value['id'] self.assertTrue(value in objs["prefetch"][field]) @@ -588,7 +588,7 @@ def test_delete_object_not_authorized(self, mock): current_objects = self.client.get(self.url, format='json').data relative_url = self.url + '%s/' % current_objects['results'][0]['id'] - response = self.client.delete(relative_url) + self.client.delete(relative_url) if self.endpoint_model == Endpoint_Status: permission_object = Endpoint.objects.get(id=current_objects['results'][0]['endpoint']) diff --git a/unittests/test_risk_acceptance.py b/unittests/test_risk_acceptance.py index 4de529721a3..7e9a1a5d41b 100644 --- a/unittests/test_risk_acceptance.py +++ b/unittests/test_risk_acceptance.py @@ -128,7 +128,7 @@ def test_remove_risk_acceptance_findings_active(self): data = {'id': ra.id} - response = self.client.post(reverse('delete_risk_acceptance', args=(1, ra.id, )), data) + self.client.post(reverse('delete_risk_acceptance', args=(1, ra.id, )), data) self.assert_all_active_not_risk_accepted(findings) self.assert_all_active_not_risk_accepted(Finding.objects.filter(test__engagement=1)) @@ -143,7 +143,7 @@ def test_expire_risk_acceptance_findings_active(self): data = {'id': ra.id} - response = self.client.post(reverse('expire_risk_acceptance', args=(1, ra.id, )), data) + self.client.post(reverse('expire_risk_acceptance', args=(1, ra.id, )), data) ra.refresh_from_db() self.assert_all_active_not_risk_accepted(findings) @@ -165,7 +165,7 @@ def test_expire_risk_acceptance_findings_not_active(self): data = {'id': ra.id} - response = self.client.post(reverse('expire_risk_acceptance', args=(1, ra.id, )), data) + self.client.post(reverse('expire_risk_acceptance', args=(1, ra.id, )), data) ra.refresh_from_db() # no reactivation on expiry @@ -188,7 +188,7 @@ def test_expire_risk_acceptance_sla_not_reset(self): data = {'id': ra.id} - response = self.client.post(reverse('expire_risk_acceptance', args=(1, ra.id, )), data) + self.client.post(reverse('expire_risk_acceptance', args=(1, ra.id, )), data) ra.refresh_from_db() @@ -204,7 +204,7 @@ def test_expire_risk_acceptance_sla_reset(self): data = {'id': ra.id} - response = self.client.post(reverse('expire_risk_acceptance', args=(1, ra.id, )), data) + self.client.post(reverse('expire_risk_acceptance', args=(1, ra.id, )), data) ra.refresh_from_db() @@ -219,7 +219,7 @@ def test_reinstate_risk_acceptance_findings_accepted(self): data = {'id': ra.id} - response = self.client.post(reverse('reinstate_risk_acceptance', args=(1, ra.id, )), data) + self.client.post(reverse('reinstate_risk_acceptance', args=(1, ra.id, )), data) ra.refresh_from_db() expiration_delta_days = get_system_setting('risk_acceptance_form_default_days', 90) @@ -237,19 +237,19 @@ def create_multiple_ras(self): ra_data = copy.copy(self.data_risk_accceptance) ra_data['accepted_findings'] = [2] ra_data['return_url'] = reverse('view_finding', args=(2, )) - response = self.add_risk_acceptance(1, ra_data, 2) + self.add_risk_acceptance(1, ra_data, 2) ra1 = Risk_Acceptance.objects.last() ra_data = copy.copy(self.data_risk_accceptance) ra_data['accepted_findings'] = [7] ra_data['return_url'] = reverse('view_finding', args=(7, )) - response = self.add_risk_acceptance(1, ra_data, 7) + self.add_risk_acceptance(1, ra_data, 7) ra2 = Risk_Acceptance.objects.last() ra_data = copy.copy(self.data_risk_accceptance) ra_data['accepted_findings'] = [22] ra_data['return_url'] = reverse('view_finding', args=(22, )) - response = self.add_risk_acceptance(3, ra_data, 22) + self.add_risk_acceptance(3, ra_data, 22) ra3 = Risk_Acceptance.objects.last() return ra1, ra2, ra3 diff --git a/unittests/test_tags.py b/unittests/test_tags.py index ccb8eb69dae..0ea19b678a8 100644 --- a/unittests/test_tags.py +++ b/unittests/test_tags.py @@ -40,10 +40,10 @@ def test_finding_get_tags(self): def test_finding_filter_tags(self): tags = ['tag1', 'tag2'] - finding_id = self.create_finding_with_tags(tags) + self.create_finding_with_tags(tags) tags2 = ['tag1', 'tag3'] - finding_id2 = self.create_finding_with_tags(tags2) + self.create_finding_with_tags(tags2) response = self.get_finding_api_filter_tags('tag1') self.assertEqual(response['count'], 2) @@ -236,13 +236,13 @@ def test_import_and_reimport_with_tags(self): self.assertTrue(tag in response['tags']) # reimport, do not specify tags: should retain tags - reimport = self.reimport_scan_with_params(test_id, self.zap_sample5_filename) + self.reimport_scan_with_params(test_id, self.zap_sample5_filename) self.assertEqual(len(tags), len(response.get('tags'))) for tag in tags: self.assertTrue(tag in response['tags']) # reimport, specify tags others: currently reimport doesn't do anything with tags param and silently ignores them - reimport = self.reimport_scan_with_params(test_id, self.zap_sample5_filename, tags=['tag3', 'tag4']) + self.reimport_scan_with_params(test_id, self.zap_sample5_filename, tags=['tag3', 'tag4']) self.assertEqual(len(tags), len(response.get('tags'))) for tag in tags: self.assertTrue(tag in response['tags']) diff --git a/unittests/tools/test_anchore_enterprise_parser.py b/unittests/tools/test_anchore_enterprise_parser.py index 0009d5be22d..81a35caa956 100644 --- a/unittests/tools/test_anchore_enterprise_parser.py +++ b/unittests/tools/test_anchore_enterprise_parser.py @@ -31,7 +31,7 @@ def test_anchore_policy_check_parser_invalid_format(self): with open(path.join(path.dirname(__file__), "../scans/anchore_enterprise/invalid_checks_format.json")) as testfile: with self.assertRaises(Exception): parser = AnchoreEnterpriseParser() - findings = parser.get_findings(testfile, Test()) + parser.get_findings(testfile, Test()) def test_anchore_policy_check_extract_vulnerability_id(self): vulnerability_id = extract_vulnerability_id("CVE-2019-14540+openapi-generator-cli-4.0.0.jar:jackson-databind") diff --git a/unittests/tools/test_burp_api_parser.py b/unittests/tools/test_burp_api_parser.py index ae7850a57ca..7006de7b7f5 100644 --- a/unittests/tools/test_burp_api_parser.py +++ b/unittests/tools/test_burp_api_parser.py @@ -49,7 +49,6 @@ def test_convert_severity(self): self.assertEqual("Info", convert_severity({})) def test_convert_confidence(self): - confidence = None with self.subTest(confidence="certain"): self.assertGreater(3, convert_confidence({"confidence": "certain"})) with self.subTest(confidence="firm"): diff --git a/unittests/tools/test_burp_graphql_parser.py b/unittests/tools/test_burp_graphql_parser.py index 394469625d3..7c5dbb53072 100644 --- a/unittests/tools/test_burp_graphql_parser.py +++ b/unittests/tools/test_burp_graphql_parser.py @@ -60,7 +60,7 @@ def test_burp_null_title(self): with self.assertRaises(ValueError): parser = BurpGraphQLParser() - findings = parser.get_findings(test_file, Test()) + parser.get_findings(test_file, Test()) def test_burp_null_request_segments(self): with open(path.join(path.dirname(__file__), "../scans/burp_graphql/null_request_segments.json")) as test_file: diff --git a/unittests/tools/test_checkmarx_parser.py b/unittests/tools/test_checkmarx_parser.py index c3d01aa7ac9..c43e24fb572 100644 --- a/unittests/tools/test_checkmarx_parser.py +++ b/unittests/tools/test_checkmarx_parser.py @@ -330,7 +330,6 @@ def test_file_name_aggregated_parse_file_with_different_sourceFilename_same_sink self.teardown(my_file_handle) # aggregation is on sink filename so all vuln with different source filenames are aggregated self.assertEqual(1, len(findings)) - item = findings[0] # nb_occurences counts the number of aggregated vulnerabilities from tool self.assertEqual(2, findings[0].nb_occurences) mock.assert_called_with(product, 'Java', files=2) diff --git a/unittests/tools/test_coverity_api_parser.py b/unittests/tools/test_coverity_api_parser.py index 3da6f91fc81..3ec4423e621 100644 --- a/unittests/tools/test_coverity_api_parser.py +++ b/unittests/tools/test_coverity_api_parser.py @@ -7,10 +7,10 @@ class TestZapParser(DojoTestCase): def test_parse_wrong_file(self): - with self.assertRaises(ValueError) as ve: + with self.assertRaises(ValueError): testfile = open("unittests/scans/coverity_api/wrong.json") parser = CoverityApiParser() - findings = parser.get_findings(testfile, Test()) + parser.get_findings(testfile, Test()) def test_parse_no_findings(self): testfile = open("unittests/scans/coverity_api/empty.json") diff --git a/unittests/tools/test_generic_parser.py b/unittests/tools/test_generic_parser.py index 7fc12e6d20f..de4b8252ece 100644 --- a/unittests/tools/test_generic_parser.py +++ b/unittests/tools/test_generic_parser.py @@ -399,7 +399,7 @@ def test_missing_columns_is_fine(self): content = """Date,Title,Url,Severity,Description,References,Active,Verified""" file = TestFile("findings.csv", content) parser = GenericParser() - findings = parser.get_findings(file, self.test) + parser.get_findings(file, self.test) def test_column_order_is_flexible(self): content1 = """\ @@ -640,11 +640,11 @@ def test_parse_json_empty_finding(self): parser = GenericParser() with self.assertRaisesMessage(ValueError, "Required fields are missing: ['description', 'severity', 'title']"): - findings = parser.get_findings(file, Test()) + parser.get_findings(file, Test()) def test_parse_json_invalid_finding(self): file = open("unittests/scans/generic/generic_invalid.json") parser = GenericParser() with self.assertRaisesMessage(ValueError, "Not allowed fields are present: ['invalid_field', 'last_status_update']"): - findings = parser.get_findings(file, Test()) + parser.get_findings(file, Test()) diff --git a/unittests/tools/test_govulncheck_parser.py b/unittests/tools/test_govulncheck_parser.py index 55dc9bf84f0..ca21203a083 100644 --- a/unittests/tools/test_govulncheck_parser.py +++ b/unittests/tools/test_govulncheck_parser.py @@ -9,7 +9,7 @@ def test_parse_empty(self): with self.assertRaises(ValueError) as exp: testfile = open("unittests/scans/govulncheck/empty.json") parser = GovulncheckParser() - findings = parser.get_findings(testfile, Test()) + parser.get_findings(testfile, Test()) self.assertTrue( "Invalid JSON format" in str(exp.exception) ) diff --git a/unittests/tools/test_intsights_parser.py b/unittests/tools/test_intsights_parser.py index 7e7e469bbcf..c091a00c2eb 100644 --- a/unittests/tools/test_intsights_parser.py +++ b/unittests/tools/test_intsights_parser.py @@ -62,7 +62,7 @@ def test_intsights_parser_invalid_text_with_error_csv(self): testfile = open( "unittests/scans/intsights/intsights_invalid_file.txt") parser = IntSightsParser() - findings = parser.get_findings(testfile, Test()) + parser.get_findings(testfile, Test()) def test_intsights_parser_with_no_alerts_json(self): testfile = open("unittests/scans/intsights/intsights_zero_vuln.json") diff --git a/unittests/tools/test_kubehunter_parser.py b/unittests/tools/test_kubehunter_parser.py index 63e74daf573..033881c96ed 100644 --- a/unittests/tools/test_kubehunter_parser.py +++ b/unittests/tools/test_kubehunter_parser.py @@ -40,7 +40,7 @@ def test_kubehunter_parser_empty_with_error(self): with self.assertRaises(ValueError) as context: testfile = open("unittests/scans/kubehunter/empty.json") parser = KubeHunterParser() - findings = parser.get_findings(testfile, Test()) + parser.get_findings(testfile, Test()) testfile.close() self.assertTrue( diff --git a/unittests/tools/test_meterian_parser.py b/unittests/tools/test_meterian_parser.py index 7108b2c16a1..3a015e5c6f1 100644 --- a/unittests/tools/test_meterian_parser.py +++ b/unittests/tools/test_meterian_parser.py @@ -9,7 +9,7 @@ def test_meterianParser_invalid_security_report_raise_ValueError_exception(self) with self.assertRaises(ValueError): testfile = open("unittests/scans/meterian/report_invalid.json") parser = MeterianParser() - findings = parser.get_findings(testfile, Test()) + parser.get_findings(testfile, Test()) def test_meterianParser_report_has_no_finding(self): testfile = open("unittests/scans/meterian/report_no_vulns.json") diff --git a/unittests/tools/test_npm_audit_parser.py b/unittests/tools/test_npm_audit_parser.py index 189d8a7879f..b3de17d6468 100644 --- a/unittests/tools/test_npm_audit_parser.py +++ b/unittests/tools/test_npm_audit_parser.py @@ -74,7 +74,7 @@ def test_npm_audit_parser_empty_with_error(self): with self.assertRaises(ValueError) as context: testfile = open(path.join(path.dirname(__file__), "../scans/npm_audit/empty_with_error.json")) parser = NpmAuditParser() - findings = parser.get_findings(testfile, Test()) + parser.get_findings(testfile, Test()) testfile.close() self.assertTrue("npm audit report contains errors:" in str(context.exception)) self.assertTrue("ENOAUDIT" in str(context.exception)) diff --git a/unittests/tools/test_risk_recon_parser.py b/unittests/tools/test_risk_recon_parser.py index 62ec6306364..d2394d1dfa5 100644 --- a/unittests/tools/test_risk_recon_parser.py +++ b/unittests/tools/test_risk_recon_parser.py @@ -11,13 +11,13 @@ def test_api_with_bad_url(self): testfile = open("unittests/scans/risk_recon/bad_url.json") with self.assertRaises(Exception): parser = RiskReconParser() - findings = parser.get_findings(testfile, Test()) + parser.get_findings(testfile, Test()) def test_api_with_bad_key(self): testfile = open("unittests/scans/risk_recon/bad_key.json") with self.assertRaises(Exception): parser = RiskReconParser() - findings = parser.get_findings(testfile, Test()) + parser.get_findings(testfile, Test()) def test_parser_without_api(self): testfile = open("unittests/scans/risk_recon/findings.json") diff --git a/unittests/tools/test_yarn_audit_parser.py b/unittests/tools/test_yarn_audit_parser.py index 0fb5c64d496..89945c6b881 100644 --- a/unittests/tools/test_yarn_audit_parser.py +++ b/unittests/tools/test_yarn_audit_parser.py @@ -69,7 +69,7 @@ def test_yarn_audit_parser_empty_with_error(self): with self.assertRaises(ValueError) as context: testfile = open("unittests/scans/yarn_audit/empty_with_error.json") parser = YarnAuditParser() - findings = parser.get_findings(testfile, self.get_test()) + parser.get_findings(testfile, self.get_test()) testfile.close() self.assertTrue( "yarn audit report contains errors:" in str(context.exception)