From 6ad06a9b64485dd71b09466b0937d5dacd4dc3f1 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Thu, 2 May 2024 20:56:47 -0500 Subject: [PATCH] Importer + Reimport: Reorg, cleanup, comment (#10011) * First pass at base importer class * Further implementation and comments * Further implementation and comments * (untested) complete default importer * Importer clean up process_finding function * Complete import CBV (untested) * Importer: passing manual testing * Reimporter + auto create context manager (untested) * Completed testing by hand * Add missing tags patch * Correct some unit tests * Pass API tests * Complete reimport CBV * Fix URL mappings * Remove old importers * Correct missed import error * Remove testing cruft * Fix ruff * Remove extraneous comment * Fix flake 8 * Fix some copy/paste errors * Correcting unit tests * Remove lookups for a function called excessively * Fix ruff errors * More unit test fun * Fix ruff stuff * Flake8 * Fix typo * Fix conflicts * Fix quoting in unit test * Fix tests for real... * Some feedback * Fix oopsies * Fix some feedback * Typo * testing before comitting.. --- dojo/api_v2/permissions.py | 206 +++--- dojo/api_v2/serializers.py | 563 ++++++-------- dojo/api_v2/views.py | 114 +-- dojo/engagement/urls.py | 6 +- dojo/engagement/views.py | 611 ++++++++++----- dojo/forms.py | 53 +- dojo/importers/auto_create_context.py | 353 +++++++++ dojo/importers/base_importer.py | 940 ++++++++++++++++++++++++ dojo/importers/default_importer.py | 492 +++++++++++++ dojo/importers/default_reimporter.py | 877 ++++++++++++++++++++++ dojo/importers/endpoint_manager.py | 144 ++++ dojo/importers/importer/importer.py | 409 ----------- dojo/importers/reimporter/reimporter.py | 779 -------------------- dojo/importers/reimporter/utils.py | 263 ------- dojo/importers/utils.py | 209 ------ dojo/product/urls.py | 6 +- dojo/test/urls.py | 5 +- dojo/test/views.py | 440 +++++++---- unittests/test_importers_closeold.py | 109 +-- unittests/test_importers_importer.py | 176 ++--- unittests/test_rest_framework.py | 78 +- 21 files changed, 4087 insertions(+), 2746 deletions(-) create mode 100644 dojo/importers/auto_create_context.py create mode 100644 dojo/importers/base_importer.py create mode 100644 dojo/importers/default_importer.py create mode 100644 dojo/importers/default_reimporter.py create mode 100644 dojo/importers/endpoint_manager.py delete mode 100644 dojo/importers/importer/importer.py delete mode 100644 dojo/importers/reimporter/reimporter.py delete mode 100644 dojo/importers/reimporter/utils.py delete mode 100644 dojo/importers/utils.py diff --git a/dojo/api_v2/permissions.py b/dojo/api_v2/permissions.py index fc8e34d7626..bf999ac6357 100644 --- a/dojo/api_v2/permissions.py +++ b/dojo/api_v2/permissions.py @@ -8,23 +8,13 @@ ValidationError, ) -from dojo.api_v2.serializers import ( - get_import_meta_data_from_dict, - get_product_id_from_dict, -) from dojo.authorization.authorization import ( user_has_configuration_permission, user_has_global_permission, user_has_permission, ) from dojo.authorization.roles_permissions import Permissions -from dojo.importers.reimporter.utils import ( - get_target_engagement_if_exists, - get_target_product_by_id_if_exists, - get_target_product_if_exists, - get_target_product_type_if_exists, - get_target_test_if_exists, -) +from dojo.importers.auto_create_context import AutoCreateContextManager from dojo.models import ( Cred_Mapping, Dojo_Group, @@ -427,45 +417,39 @@ class UserHasImportPermission(permissions.BasePermission): def has_permission(self, request, view): # permission check takes place before validation, so we don't have access to serializer.validated_data() # and we have to validate ourselves unfortunately - - ( - _, - _, - _, - engagement_id, - engagement_name, - product_name, - product_type_name, - auto_create_context, - _deduplication_on_engagement, - _do_not_reactivate, - ) = get_import_meta_data_from_dict(request.data) - product_type = get_target_product_type_if_exists(product_type_name) - product = get_target_product_if_exists(product_name, product_type_name) - engagement = get_target_engagement_if_exists( - engagement_id, engagement_name, product - ) - - if engagement: + auto_create = AutoCreateContextManager() + # Process the context to make an conversions needed. Catch any exceptions + # in this case and wrap them in a DRF exception + try: + converted_dict = auto_create.convert_querydict_to_dict(request.data) + auto_create.process_import_meta_data_from_dict(converted_dict) + # Get an existing product + converted_dict["product_type"] = auto_create.get_target_product_type_if_exists(**converted_dict) + converted_dict["product"] = auto_create.get_target_product_if_exists(**converted_dict) + converted_dict["engagement"] = auto_create.get_target_engagement_if_exists(**converted_dict) + except (ValueError, TypeError) as e: + # Raise an explicit drf exception here + raise ValidationError(e) + if engagement := converted_dict.get("engagement"): # existing engagement, nothing special to check return user_has_permission( request.user, engagement, Permissions.Import_Scan_Result ) - elif engagement_id: + elif engagement_id := converted_dict.get("engagement_id"): # engagement_id doesn't exist - msg = f"Engagement '{engagement_id}' doesn't exist" + msg = f"Engagement \"{engagement_id}\" does not exist" raise serializers.ValidationError(msg) - if not auto_create_context: + if not converted_dict.get("auto_create_context"): raise_no_auto_create_import_validation_error( None, None, - engagement_name, - product_name, - product_type_name, - engagement, - product, - product_type, + converted_dict.get("engagement_name"), + converted_dict.get("product_name"), + converted_dict.get("product_type_name"), + converted_dict.get("engagement"), + converted_dict.get("product"), + converted_dict.get("product_type"), "Need engagement_id or product_name + engagement_name to perform import", ) else: @@ -473,12 +457,12 @@ def has_permission(self, request, view): # requested and is allowed to use auto_create return check_auto_create_permission( request.user, - product, - product_name, - engagement, - engagement_name, - product_type, - product_type_name, + converted_dict.get("product"), + converted_dict.get("product_name"), + converted_dict.get("engagement"), + converted_dict.get("engagement_name"), + converted_dict.get("product_type"), + converted_dict.get("product_type_name"), "Need engagement_id or product_name + engagement_name to perform import", ) @@ -487,32 +471,28 @@ class UserHasMetaImportPermission(permissions.BasePermission): def has_permission(self, request, view): # permission check takes place before validation, so we don't have access to serializer.validated_data() # and we have to validate ourselves unfortunately - - ( - _, - _, - _, - _, - _, - product_name, - _, - _, - _, - _, - ) = get_import_meta_data_from_dict(request.data) - product = get_target_product_if_exists(product_name) - if not product: - product_id = get_product_id_from_dict(request.data) - product = get_target_product_by_id_if_exists(product_id) + auto_create = AutoCreateContextManager() + # Process the context to make an conversions needed. Catch any exceptions + # in this case and wrap them in a DRF exception + try: + converted_dict = auto_create.convert_querydict_to_dict(request.data) + auto_create.process_import_meta_data_from_dict(converted_dict) + # Get an existing product + product = auto_create.get_target_product_if_exists(**converted_dict) + if not product: + product = auto_create.get_target_product_by_id_if_exists(**converted_dict) + except (ValueError, TypeError) as e: + # Raise an explicit drf exception here + raise ValidationError(e) if product: # existing product, nothing special to check return user_has_permission( request.user, product, Permissions.Import_Scan_Result ) - elif product_id: + elif product_id := converted_dict.get("product_id"): # product_id doesn't exist - msg = f"product '{product_id}' doesn't exist" + msg = f"Product \"{product_id}\" does not exist" raise serializers.ValidationError(msg) else: msg = "Need product_id or product_name to perform import" @@ -631,49 +611,41 @@ class UserHasReimportPermission(permissions.BasePermission): def has_permission(self, request, view): # permission check takes place before validation, so we don't have access to serializer.validated_data() # and we have to validate ourselves unfortunately - - ( - test_id, - test_title, - scan_type, - _, - engagement_name, - product_name, - product_type_name, - auto_create_context, - _deduplication_on_engagement, - _do_not_reactivate, - ) = get_import_meta_data_from_dict(request.data) - - product_type = get_target_product_type_if_exists(product_type_name) - product = get_target_product_if_exists(product_name, product_type_name) - engagement = get_target_engagement_if_exists( - None, engagement_name, product - ) - test = get_target_test_if_exists( - test_id, test_title, scan_type, engagement - ) - - if test: + auto_create = AutoCreateContextManager() + # Process the context to make an conversions needed. Catch any exceptions + # in this case and wrap them in a DRF exception + try: + converted_dict = auto_create.convert_querydict_to_dict(request.data) + auto_create.process_import_meta_data_from_dict(converted_dict) + # Get an existing product + converted_dict["product_type"] = auto_create.get_target_product_type_if_exists(**converted_dict) + converted_dict["product"] = auto_create.get_target_product_if_exists(**converted_dict) + converted_dict["engagement"] = auto_create.get_target_engagement_if_exists(**converted_dict) + converted_dict["test"] = auto_create.get_target_test_if_exists(**converted_dict) + except (ValueError, TypeError) as e: + # Raise an explicit drf exception here + raise ValidationError(e) + + if test := converted_dict.get("test"): # existing test, nothing special to check return user_has_permission( request.user, test, Permissions.Import_Scan_Result ) - elif test_id: + elif test_id := converted_dict.get("test_id"): # test_id doesn't exist - msg = f"Test '{test_id}' doesn't exist" + msg = f"Test \"{test_id}\" does not exist" raise serializers.ValidationError(msg) - if not auto_create_context: + if not converted_dict.get("auto_create_context"): raise_no_auto_create_import_validation_error( - test_title, - scan_type, - engagement_name, - product_name, - product_type_name, - engagement, - product, - product_type, + converted_dict.get("test_title"), + converted_dict.get("scan_type"), + converted_dict.get("engagement_name"), + converted_dict.get("product_name"), + converted_dict.get("product_type_name"), + converted_dict.get("engagement"), + converted_dict.get("product"), + converted_dict.get("product_type"), "Need test_id or product_name + engagement_name + scan_type to perform reimport", ) else: @@ -681,12 +653,12 @@ def has_permission(self, request, view): # requested and is allowed to use auto_create return check_auto_create_permission( request.user, - product, - product_name, - engagement, - engagement_name, - product_type, - product_type_name, + converted_dict.get("product"), + converted_dict.get("product_name"), + converted_dict.get("engagement"), + converted_dict.get("engagement_name"), + converted_dict.get("product_type"), + converted_dict.get("product_type_name"), "Need test_id or product_name + engagement_name + scan_type to perform reimport", ) @@ -955,28 +927,28 @@ def raise_no_auto_create_import_validation_error( raise ValidationError(msg) if product_type_name and not product_type: - msg = f"Product Type '{product_type_name}' doesn't exist" + msg = f"Product Type \"{product_type_name}\" does not exist" raise serializers.ValidationError(msg) if product_name and not product: if product_type_name: - msg = f"Product '{product_name}' doesn't exist in Product_Type '{product_type_name}'" + msg = f"Product \"{product_name}\" does not exist in Product_Type \"{product_type_name}\"" raise serializers.ValidationError(msg) else: - msg = f"Product '{product_name}' doesn't exist" + msg = f"Product \"{product_name}\" does not exist" raise serializers.ValidationError(msg) if engagement_name and not engagement: - msg = f"Engagement '{engagement_name}' doesn't exist in Product '{product_name}'" + msg = f"Engagement \"{engagement_name}\" does not exist in Product \"{product_name}\"" raise serializers.ValidationError(msg) # these are only set for reimport if test_title: - msg = f"Test '{test_title}' with scan_type '{scan_type}' doesn't exist in Engagement '{engagement_name}'" + msg = f"Test \"{test_title}\" with scan_type \"{scan_type}\" does not exist in Engagement \"{engagement_name}\"" raise serializers.ValidationError(msg) if scan_type: - msg = f"Test with scan_type '{scan_type}' doesn't exist in Engagement '{engagement_name}'" + msg = f"Test with scan_type \"{scan_type}\" does not exist in Engagement \"{engagement_name}\"" raise serializers.ValidationError(msg) raise ValidationError(error_message) @@ -1023,13 +995,13 @@ def check_auto_create_permission( if product and product_name and engagement_name: if not user_has_permission(user, product, Permissions.Engagement_Add): - msg = f"No permission to create engagements in product '{product_name}'" + msg = f"No permission to create engagements in product \"{product_name}\"" raise PermissionDenied(msg) if not user_has_permission( user, product, Permissions.Import_Scan_Result ): - msg = f"No permission to import scans into product '{product_name}'" + msg = f"No permission to import scans into product \"{product_name}\"" raise PermissionDenied(msg) # all good @@ -1037,14 +1009,14 @@ def check_auto_create_permission( if not product and product_name: if not product_type_name: - msg = f"Product '{product_name}' doesn't exist and no product_type_name provided to create the new product in" + msg = f"Product \"{product_name}\" does not exist and no product_type_name provided to create the new product in" raise serializers.ValidationError(msg) if not product_type: if not user_has_global_permission( user, Permissions.Product_Type_Add ): - msg = f"No permission to create product_type '{product_type_name}'" + msg = f"No permission to create product_type \"{product_type_name}\"" raise PermissionDenied(msg) # new product type can be created with current user as owner, so # all objects in it can be created as well @@ -1053,7 +1025,7 @@ def check_auto_create_permission( if not user_has_permission( user, product_type, Permissions.Product_Type_Add_Product ): - msg = f"No permission to create products in product_type '{product_type}'" + msg = f"No permission to create products in product_type \"{product_type}\"" raise PermissionDenied(msg) # product can be created, so objects in it can be created as well diff --git a/dojo/api_v2/serializers.py b/dojo/api_v2/serializers.py index 3eb65b18bc1..c8ab20cc33e 100644 --- a/dojo/api_v2/serializers.py +++ b/dojo/api_v2/serializers.py @@ -29,17 +29,10 @@ ) from dojo.finding.queries import get_authorized_findings from dojo.group.utils import get_auth_group_name -from dojo.importers.importer.importer import DojoDefaultImporter as Importer -from dojo.importers.reimporter.reimporter import ( - DojoDefaultReImporter as ReImporter, -) -from dojo.importers.reimporter.utils import ( - get_or_create_engagement, - get_target_engagement_if_exists, - get_target_product_by_id_if_exists, - get_target_product_if_exists, - get_target_test_if_exists, -) +from dojo.importers.auto_create_context import AutoCreateContextManager +from dojo.importers.base_importer import BaseImporter +from dojo.importers.default_importer import DefaultImporter +from dojo.importers.default_reimporter import DefaultReImporter from dojo.models import ( DEFAULT_NOTIFICATION, IMPORT_ACTIONS, @@ -126,52 +119,6 @@ deduplicationLogger = logging.getLogger("dojo.specific-loggers.deduplication") -def get_import_meta_data_from_dict(data): - test_id = data.get("test", None) - if test_id: - if isinstance(test_id, Test): - test_id = test_id.id - elif isinstance(test_id, str) and not test_id.isdigit(): - msg = "test must be an integer" - raise serializers.ValidationError(msg) - - scan_type = data.get("scan_type", None) - - test_title = data.get("test_title", None) - - engagement_id = data.get("engagement", None) - if engagement_id: - if isinstance(engagement_id, Engagement): - engagement_id = engagement_id.id - elif isinstance(engagement_id, str) and not engagement_id.isdigit(): - msg = "engagement must be an integer" - raise serializers.ValidationError(msg) - - engagement_name = data.get("engagement_name", None) - - product_name = data.get("product_name", None) - product_type_name = data.get("product_type_name", None) - - auto_create_context = data.get("auto_create_context", None) - - deduplication_on_engagement = data.get( - "deduplication_on_engagement", False - ) - do_not_reactivate = data.get("do_not_reactivate", False) - return ( - test_id, - test_title, - scan_type, - engagement_id, - engagement_name, - product_name, - product_type_name, - auto_create_context, - deduplication_on_engagement, - do_not_reactivate, - ) - - def get_product_id_from_dict(data): product_id = data.get("product", None) if product_id: @@ -261,7 +208,7 @@ def to_internal_value(self, data): except ValueError: self.fail("invalid_json") - logger.debug("data as json: %s", data) + logger.debug(f"data as json: {data}") if not isinstance(data, list): self.fail("not_a_list", input_type=type(data).__name__) @@ -2072,7 +2019,6 @@ class ImportScanSerializer(serializers.Serializer): help_text="The IP address, host name or full URL. It must be valid", ) file = serializers.FileField(allow_empty_file=True, required=False) - product_type_name = serializers.CharField(required=False) product_name = serializers.CharField(required=False) engagement_name = serializers.CharField(required=False) @@ -2134,7 +2080,6 @@ class ImportScanSerializer(serializers.Serializer): "This is an optional field which is used in deduplication and closing of old findings when set. " "This affects the whole engagement/product depending on your deduplication scope.", ) - group_by = serializers.ChoiceField( required=False, choices=Finding_Group.GROUP_BY_OPTIONS, @@ -2145,7 +2090,6 @@ class ImportScanSerializer(serializers.Serializer): required=False, default=True, ) - # extra fields populated in response # need to use the _id suffix as without the serializer framework gets # confused @@ -2156,7 +2100,6 @@ class ImportScanSerializer(serializers.Serializer): engagement_id = serializers.IntegerField(read_only=True) product_id = serializers.IntegerField(read_only=True) product_type_id = serializers.IntegerField(read_only=True) - statistics = ImportStatisticsSerializer(read_only=True, required=False) apply_tags_to_findings = serializers.BooleanField( help_text="If set to True, the tags will be applied to the findings", @@ -2167,135 +2110,106 @@ class ImportScanSerializer(serializers.Serializer): required=False, ) - def save(self, push_to_jira=False): - data = self.validated_data - close_old_findings = data.get("close_old_findings") - close_old_findings_product_scope = data.get( - "close_old_findings_product_scope" - ) - minimum_severity = data.get("minimum_severity") - endpoint_to_add = data.get("endpoint_to_add") - scan_date = data.get("scan_date", None) - # Will save in the provided environment or in the `Development` one if - # absent - version = data.get("version", None) - build_id = data.get("build_id", None) - branch_tag = data.get("branch_tag", None) - commit_hash = data.get("commit_hash", None) - api_scan_configuration = data.get("api_scan_configuration", None) - service = data.get("service", None) - apply_tags_to_findings = data.get("apply_tags_to_findings", False) - apply_tags_to_endpoints = data.get("apply_tags_to_endpoints", False) - source_code_management_uri = data.get( - "source_code_management_uri", None + def set_context( + self, + data: dict, + ) -> dict: + """ + Process all of the user supplied inputs to massage them into the correct + format the importer is expecting to see + """ + context = dict(data) + # update some vars + context["scan"] = data.get("file", None) + context["environment"] = Development_Environment.objects.get( + name=data.get("environment", "Development") ) - + # Set the active/verified status based upon the overrides if "active" in self.initial_data: - active = data.get("active") + context["active"] = data.get("active") else: - active = None + context["active"] = None if "verified" in self.initial_data: - verified = data.get("verified") + context["verified"] = data.get("verified") else: - verified = None - - environment_name = data.get("environment", "Development") - environment = Development_Environment.objects.get( - name=environment_name - ) - tags = data.get("tags", None) + context["verified"] = None + # Change the way that endpoints are sent to the importer + if endpoints_to_add := data.get("endpoint_to_add"): + context["endpoints_to_add"] = [endpoints_to_add] + else: + context["endpoint_to_add"] = None # Convert the tags to a list if needed. At this point, the # TaggitListSerializer has already removed commas supplied # by the user, so this operation will consistently return # a list to be used by the importer - if isinstance(tags, str): - tags = tags.split(", ") - lead = data.get("lead") - - scan = data.get("file", None) - endpoints_to_add = [endpoint_to_add] if endpoint_to_add else None - - group_by = data.get("group_by", None) - create_finding_groups_for_all_findings = data.get( - "create_finding_groups_for_all_findings", True - ) - - engagement_end_date = data.get("engagement_end_date", None) - ( - _, - test_title, - scan_type, - engagement_id, - engagement_name, - product_name, - product_type_name, - auto_create_context, - deduplication_on_engagement, - _do_not_reactivate, - ) = get_import_meta_data_from_dict(data) - engagement = get_or_create_engagement( - engagement_id, - engagement_name, - product_name, - product_type_name, - auto_create_context, - deduplication_on_engagement, - source_code_management_uri=source_code_management_uri, - target_end=engagement_end_date, - ) - + if tags := context.get("tags"): + if isinstance(tags, str): + context["tags"] = tags.split(", ") # have to make the scan_date_time timezone aware otherwise uploads via # the API would fail (but unit tests for api upload would pass...) - scan_date_time = ( + context["scan_date"] = ( timezone.make_aware( - datetime.combine(scan_date, datetime.min.time()) + datetime.combine(context.get("scan_date"), datetime.min.time()) ) - if scan_date + if context.get("scan_date") else None ) - importer = Importer() + # Process the auto create context inputs + self.process_auto_create_create_context(context) + + return context + + def process_auto_create_create_context( + self, + context: dict, + ) -> None: + """ + Extract all of the pertinent args used to auto create any product + types, products, or engagements. This function will also validate + those inputs for any required info that is not present. In the event + of an error, an exception will be raised and bubble up to the user + """ + auto_create = AutoCreateContextManager() + # Process the context to make an conversions needed. Catch any exceptions + # in this case and wrap them in a DRF exception try: - ( - test, - _finding_count, - _closed_finding_count, - _test_import, - ) = importer.import_scan( - scan, - scan_type, - engagement, - lead, - environment, - active=active, - verified=verified, - tags=tags, - minimum_severity=minimum_severity, - endpoints_to_add=endpoints_to_add, - scan_date=scan_date_time, - version=version, - branch_tag=branch_tag, - build_id=build_id, - commit_hash=commit_hash, - push_to_jira=push_to_jira, - close_old_findings=close_old_findings, - close_old_findings_product_scope=close_old_findings_product_scope, - group_by=group_by, - api_scan_configuration=api_scan_configuration, - service=service, - title=test_title, - create_finding_groups_for_all_findings=create_finding_groups_for_all_findings, - apply_tags_to_findings=apply_tags_to_findings, - apply_tags_to_endpoints=apply_tags_to_endpoints, + auto_create.process_import_meta_data_from_dict(context) + # Attempt to create an engagement + context["engagement"] = auto_create.get_or_create_engagement(**context) + except (ValueError, TypeError) as e: + # Raise an explicit drf exception here + raise ValidationError(str(e)) + + def get_importer(self) -> BaseImporter: + """ + Returns a new instance of an importer that extends + the BaseImporter class + """ + return DefaultImporter() + + def process_scan( + self, + data: dict, + context: dict + ) -> None: + """ + Process the scan with all of the supplied data fully massaged + into the format we are expecting + + Raises exceptions in the event of an error + """ + try: + context["test"], _, _, _, _, _, _ = self.get_importer().process_scan( + **context, ) - - if test: + # Update the response body with some new data + if test := context.get("test"): data["test"] = test.id data["test_id"] = test.id data["engagement_id"] = test.engagement.id data["product_id"] = test.engagement.product.id data["product_type_id"] = test.engagement.product.prod_type.id data["statistics"] = {"after": test.statistics} - # convert to exception otherwise django rest framework will swallow them as 400 error # exceptions are already logged in the importer except SyntaxError as se: @@ -2303,7 +2217,17 @@ def save(self, push_to_jira=False): except ValueError as ve: raise Exception(ve) - def validate(self, data): + def save(self, push_to_jira=False): + # Go through the validate method + data = self.validated_data + # Extract the data from the form + context = self.set_context(data) + # set the jira option again as it was overridden + context["push_to_jira"] = push_to_jira + # Import the scan with all of the supplied data + self.process_scan(data, context) + + def validate(self, data: dict) -> dict: scan_type = data.get("scan_type") file = data.get("file") if not file and requires_file(scan_type): @@ -2324,7 +2248,7 @@ def validate(self, data): raise serializers.ValidationError(msg) return data - def validate_scan_date(self, value): + def validate_scan_date(self, value: str) -> None: if value and value > timezone.localdate(): msg = "The scan_date cannot be in the future!" raise serializers.ValidationError(msg) @@ -2458,186 +2382,118 @@ class ReImportScanSerializer(TaggitSerializer, serializers.Serializer): required=False, ) - def save(self, push_to_jira=False): - logger.debug("push_to_jira: %s", push_to_jira) - data = self.validated_data - scan_type = data.get("scan_type") - endpoint_to_add = data.get("endpoint_to_add") - minimum_severity = data.get("minimum_severity") - scan_date = data.get("scan_date", None) - close_old_findings = data.get("close_old_findings") - close_old_findings_product_scope = data.get( - "close_old_findings_product_scope" - ) - apply_tags_to_findings = data.get("apply_tags_to_findings", False) - apply_tags_to_endpoints = data.get("apply_tags_to_endpoints", False) - do_not_reactivate = data.get("do_not_reactivate", False) - version = data.get("version", None) - build_id = data.get("build_id", None) - branch_tag = data.get("branch_tag", None) - commit_hash = data.get("commit_hash", None) - api_scan_configuration = data.get("api_scan_configuration", None) - service = data.get("service", None) - lead = data.get("lead", None) - tags = data.get("tags", None) - # Convert the tags to a list if needed. At this point, the - # TaggitListSerializer has already removed commas supplied - # by the user, so this operation will consistently return - # a list to be used by the importer - if isinstance(tags, str): - tags = tags.split(", ") - environment_name = data.get("environment", "Development") - environment = Development_Environment.objects.get( - name=environment_name - ) - scan = data.get("file", None) - endpoints_to_add = [endpoint_to_add] if endpoint_to_add else None - source_code_management_uri = data.get( - "source_code_management_uri", None + def set_context( + self, + data: dict, + ) -> dict: + """ + Process all of the user supplied inputs to massage them into the correct + format the importer is expecting to see + """ + context = dict(data) + # update some vars + context["scan"] = data.get("file", None) + context["environment"] = Development_Environment.objects.get( + name=data.get("environment", "Development") ) - engagement_end_date = data.get("engagement_end_date", None) - + # Set the active/verified status based upon the overrides if "active" in self.initial_data: - active = data.get("active") + context["active"] = data.get("active") else: - active = None + context["active"] = None if "verified" in self.initial_data: - verified = data.get("verified") + context["verified"] = data.get("verified") else: - verified = None - - group_by = data.get("group_by", None) - create_finding_groups_for_all_findings = data.get( - "create_finding_groups_for_all_findings", True - ) - - ( - test_id, - test_title, - scan_type, - _, - engagement_name, - product_name, - product_type_name, - auto_create_context, - deduplication_on_engagement, - do_not_reactivate, - ) = get_import_meta_data_from_dict(data) - # we passed validation, so the test is present - product = get_target_product_if_exists(product_name) - engagement = get_target_engagement_if_exists( - None, engagement_name, product - ) - test = get_target_test_if_exists( - test_id, test_title, scan_type, engagement - ) - + context["verified"] = None + # Change the way that endpoints are sent to the importer + if endpoints_to_add := data.get("endpoint_to_add"): + context["endpoints_to_add"] = [endpoints_to_add] + else: + context["endpoint_to_add"] = None + # Convert the tags to a list if needed. At this point, the + # TaggitListSerializer has already removed commas supplied + # by the user, so this operation will consistently return + # a list to be used by the importer + if tags := context.get("tags"): + if isinstance(tags, str): + context["tags"] = tags.split(", ") # have to make the scan_date_time timezone aware otherwise uploads via # the API would fail (but unit tests for api upload would pass...) - scan_date_time = ( + context["scan_date"] = ( timezone.make_aware( - datetime.combine(scan_date, datetime.min.time()) + datetime.combine(context.get("scan_date"), datetime.min.time()) ) - if scan_date + if context.get("scan_date") else None ) - statistics_before, statistics_delta = None, None + return context + + def process_auto_create_create_context( + self, + auto_create_manager: AutoCreateContextManager, + context: dict, + ) -> None: + """ + Extract all of the pertinent args used to auto create any product + types, products, or engagements. This function will also validate + those inputs for any required info that is not present. In the event + of an error, an exception will be raised and bubble up to the user + """ + # Process the context to make an conversions needed. Catch any exceptions + # in this case and wrap them in a DRF exception + try: + auto_create_manager.process_import_meta_data_from_dict(context) + context["product"] = auto_create_manager.get_target_product_if_exists(**context) + context["engagement"] = auto_create_manager.get_target_engagement_if_exists(**context) + context["test"] = auto_create_manager.get_target_test_if_exists(**context) + except (ValueError, TypeError) as e: + # Raise an explicit drf exception here + raise ValidationError(str(e)) + + def get_importer(self) -> BaseImporter: + """ + Returns a new instance of an importer that extends + the BaseImporter class + """ + return DefaultImporter() + + def get_reimporter(self) -> BaseImporter: + """ + Returns a new instance of a reimporter that extends + the BaseImporter class + """ + return DefaultReImporter() + + def process_scan( + self, + auto_create_manager: AutoCreateContextManager, + data: dict, + context: dict, + ) -> None: + """ + Process the scan with all of the supplied data fully massaged + into the format we are expecting + + Raises exceptions in the event of an error + """ + statistics_before, statistics_delta = None, None try: - if test: - # reimport into provided / latest test + if test := context.get("test"): statistics_before = test.statistics - reimporter = ReImporter() - ( - test, - _finding_count, - _new_finding_count, - _closed_finding_count, - _reactivated_finding_count, - _untouched_finding_count, - test_import, - ) = reimporter.reimport_scan( - scan, - scan_type, - test, - active=active, - verified=verified, - tags=tags, - minimum_severity=minimum_severity, - endpoints_to_add=endpoints_to_add, - scan_date=scan_date_time, - version=version, - branch_tag=branch_tag, - build_id=build_id, - commit_hash=commit_hash, - push_to_jira=push_to_jira, - close_old_findings=close_old_findings, - group_by=group_by, - api_scan_configuration=api_scan_configuration, - service=service, - do_not_reactivate=do_not_reactivate, - create_finding_groups_for_all_findings=create_finding_groups_for_all_findings, - apply_tags_to_findings=apply_tags_to_findings, - apply_tags_to_endpoints=apply_tags_to_endpoints, - ) - + context["test"], _, _, _, _, _, test_import = self.get_reimporter().process_scan(**context) if test_import: statistics_delta = test_import.statistics - elif auto_create_context: - # perform Import to create test - logger.debug( - "reimport for non-existing test, using import to create new test" - ) - engagement = get_or_create_engagement( - None, - engagement_name, - product_name, - product_type_name, - auto_create_context, - deduplication_on_engagement, - source_code_management_uri=source_code_management_uri, - target_end=engagement_end_date, - ) - importer = Importer() - ( - test, - _finding_count, - _closed_finding_count, - _, - ) = importer.import_scan( - scan, - scan_type, - engagement, - lead, - environment, - active=active, - verified=verified, - tags=tags, - minimum_severity=minimum_severity, - endpoints_to_add=endpoints_to_add, - scan_date=scan_date_time, - version=version, - branch_tag=branch_tag, - build_id=build_id, - commit_hash=commit_hash, - push_to_jira=push_to_jira, - close_old_findings=close_old_findings, - close_old_findings_product_scope=close_old_findings_product_scope, - group_by=group_by, - api_scan_configuration=api_scan_configuration, - service=service, - title=test_title, - create_finding_groups_for_all_findings=create_finding_groups_for_all_findings, - apply_tags_to_findings=apply_tags_to_findings, - apply_tags_to_endpoints=apply_tags_to_endpoints, - ) - + elif context.get("auto_create_context"): + # Attempt to create an engagement + logger.debug("reimport for non-existing test, using import to create new test") + context["engagement"] = auto_create_manager.get_or_create_engagement(**context) + context["test"], _, _, _, _, _, _ = self.get_importer().process_scan(**context) else: - # should be captured by validation / permission check already - msg = "test not found" + msg = "A test could not be found!" raise NotFound(msg) - - if test: + # Update the response body with some new data + if test := context.get("test"): data["test"] = test data["test_id"] = test.id data["engagement_id"] = test.engagement.id @@ -2649,7 +2505,6 @@ def save(self, push_to_jira=False): if statistics_delta: data["statistics"]["delta"] = statistics_delta data["statistics"]["after"] = test.statistics - # convert to exception otherwise django rest framework will swallow them as 400 error # exceptions are already logged in the importer except SyntaxError as se: @@ -2657,6 +2512,19 @@ def save(self, push_to_jira=False): except ValueError as ve: raise Exception(ve) + def save(self, push_to_jira=False): + # Go through the validate method + data = self.validated_data + # Extract the data from the form + context = self.set_context(data) + # set the jira option again as it was overridden + context["push_to_jira"] = push_to_jira + # Process the auto create context inputs + auto_create_manager = AutoCreateContextManager() + self.process_auto_create_create_context(auto_create_manager, context) + # Import the scan with all of the supplied data + self.process_scan(auto_create_manager, data, context) + def validate(self, data): scan_type = data.get("scan_type") file = data.get("file") @@ -2710,27 +2578,22 @@ def validate(self, data): def save(self): data = self.validated_data file = data.get("file") - create_endpoints = data.get("create_endpoints", True) create_tags = data.get("create_tags", True) create_dojo_meta = data.get("create_dojo_meta", False) + auto_create = AutoCreateContextManager() + # Process the context to make an conversions needed. Catch any exceptions + # in this case and wrap them in a DRF exception + try: + auto_create.process_import_meta_data_from_dict(data) + # Get an existing product + product = auto_create.get_target_product_if_exists(**data) + if not product: + product = auto_create.get_target_product_by_id_if_exists(**data) + except (ValueError, TypeError) as e: + # Raise an explicit drf exception here + raise ValidationError(str(e)) - ( - _, - _, - _, - _, - _, - product_name, - _, - _, - _, - _, - ) = get_import_meta_data_from_dict(data) - product = get_target_product_if_exists(product_name) - if not product: - product_id = get_product_id_from_dict(data) - product = get_target_product_by_id_if_exists(product_id) try: endpoint_meta_import( file, diff --git a/dojo/api_v2/views.py b/dojo/api_v2/views.py index 38abf945c7a..9b5239f0118 100644 --- a/dojo/api_v2/views.py +++ b/dojo/api_v2/views.py @@ -73,11 +73,7 @@ get_authorized_group_members, get_authorized_groups, ) -from dojo.importers.reimporter.utils import ( - get_target_engagement_if_exists, - get_target_product_if_exists, - get_target_test_if_exists, -) +from dojo.importers.auto_create_context import AutoCreateContextManager from dojo.jira_link.queries import ( get_authorized_jira_issues, get_authorized_jira_projects, @@ -2604,41 +2600,27 @@ class ImportScanView(mixins.CreateModelMixin, viewsets.GenericViewSet): permission_classes = (IsAuthenticated, permissions.UserHasImportPermission) def perform_create(self, serializer): - ( - _, - _, - _, - engagement_id, - engagement_name, - product_name, - _product_type_name, - _auto_create_context, - _deduplication_on_engagement, - _do_not_reactivate, - ) = serializers.get_import_meta_data_from_dict( - serializer.validated_data - ) - product = get_target_product_if_exists(product_name) - engagement = get_target_engagement_if_exists( - engagement_id, engagement_name, product - ) + auto_create = AutoCreateContextManager() + # Process the context to make an conversions needed. Catch any exceptions + # in this case and wrap them in a DRF exception + try: + converted_dict = auto_create.convert_querydict_to_dict(serializer.validated_data) + auto_create.process_import_meta_data_from_dict(converted_dict) + # Get an existing product + product = auto_create.get_target_product_if_exists(**converted_dict) + engagement = auto_create.get_target_engagement_if_exists(**converted_dict) + except (ValueError, TypeError) as e: + # Raise an explicit drf exception here + raise ValidationError(str(e)) # when using auto_create_context, the engagement or product may not # have been created yet - jira_driver = ( - engagement if engagement else product if product else None - ) - jira_project = ( - jira_helper.get_jira_project(jira_driver) if jira_driver else None - ) - push_to_jira = serializer.validated_data.get("push_to_jira") - if get_system_setting("enable_jira") and jira_project: - push_to_jira = push_to_jira or jira_project.push_all_issues - - logger.debug( - "push_to_jira: %s", serializer.validated_data.get("push_to_jira") - ) + if get_system_setting("enable_jira"): + jira_driver = (engagement if engagement else product if product else None) + if jira_project := (jira_helper.get_jira_project(jira_driver) if jira_driver else None): + push_to_jira = push_to_jira or jira_project.push_all_issues + logger.debug(f"push_to_jira: {push_to_jira}") serializer.save(push_to_jira=push_to_jira) def get_queryset(self): @@ -2783,50 +2765,30 @@ def get_queryset(self): return get_authorized_tests(Permissions.Import_Scan_Result) def perform_create(self, serializer): - ( - test_id, - test_title, - scan_type, - _, - engagement_name, - product_name, - _product_type_name, - _auto_create_context, - _deduplication_on_engagement, - _do_not_reactivate, - ) = serializers.get_import_meta_data_from_dict( - serializer.validated_data - ) - product = get_target_product_if_exists(product_name) - engagement = get_target_engagement_if_exists( - None, engagement_name, product - ) - test = get_target_test_if_exists( - test_id, test_title, scan_type, engagement - ) + auto_create = AutoCreateContextManager() + # Process the context to make an conversions needed. Catch any exceptions + # in this case and wrap them in a DRF exception + try: + converted_dict = auto_create.convert_querydict_to_dict(serializer.validated_data) + auto_create.process_import_meta_data_from_dict(converted_dict) + # Get an existing product + product = auto_create.get_target_product_if_exists(**converted_dict) + engagement = auto_create.get_target_engagement_if_exists(**converted_dict) + test = auto_create.get_target_test_if_exists(**converted_dict) + except (ValueError, TypeError) as e: + # Raise an explicit drf exception here + raise ValidationError(str(e)) # when using auto_create_context, the engagement or product may not # have been created yet - jira_driver = ( - test - if test - else engagement - if engagement - else product - if product - else None - ) - jira_project = ( - jira_helper.get_jira_project(jira_driver) if jira_driver else None - ) - push_to_jira = serializer.validated_data.get("push_to_jira") - if get_system_setting("enable_jira") and jira_project: - push_to_jira = push_to_jira or jira_project.push_all_issues - - logger.debug( - "push_to_jira: %s", serializer.validated_data.get("push_to_jira") - ) + if get_system_setting("enable_jira"): + jira_driver = ( + test if test else engagement if engagement else product if product else None + ) + if jira_project := (jira_helper.get_jira_project(jira_driver) if jira_driver else None): + push_to_jira = push_to_jira or jira_project.push_all_issues + logger.debug(f"push_to_jira: {push_to_jira}") serializer.save(push_to_jira=push_to_jira) diff --git a/dojo/engagement/urls.py b/dojo/engagement/urls.py index 66099672edd..df0a7f5af2d 100644 --- a/dojo/engagement/urls.py +++ b/dojo/engagement/urls.py @@ -22,8 +22,10 @@ name='copy_engagement'), re_path(r'^engagement/(?P\d+)/add_tests$', views.add_tests, name='add_tests'), - re_path(r'^engagement/(?P\d+)/import_scan_results$', - views.ImportScanResultsView.as_view(), name='import_scan_results'), + re_path( + r'^engagement/(?P\d+)/import_scan_results$', + views.ImportScanResultsView.as_view(), + name='import_scan_results'), re_path(r'^engagement/(?P\d+)/close$', views.close_eng, name='close_engagement'), re_path(r'^engagement/(?P\d+)/reopen$', views.reopen_eng, diff --git a/dojo/engagement/views.py b/dojo/engagement/views.py index 0c01cc72bbb..5ac3340fcc3 100644 --- a/dojo/engagement/views.py +++ b/dojo/engagement/views.py @@ -6,7 +6,7 @@ from functools import reduce from tempfile import NamedTemporaryFile from time import strftime -from typing import List +from typing import List, Tuple from django.conf import settings from django.contrib import messages @@ -66,11 +66,12 @@ TypedNoteForm, UploadThreatForm, ) -from dojo.importers.importer.importer import DojoDefaultImporter as Importer +from dojo.importers.default_importer import DefaultImporter from dojo.models import ( Check_List, Cred_Mapping, Development_Environment, + Dojo_User, Endpoint, Engagement, Finding, @@ -102,7 +103,6 @@ get_setting, get_system_setting, handle_uploaded_threat, - is_scan_file_too_large, redirect_to_return_url_or_else, ) @@ -706,205 +706,427 @@ def add_tests(request, eid): class ImportScanResultsView(View): - def get(self, request, eid=None, pid=None): - environment = Development_Environment.objects.filter(name='Development').first() - engagement = None - form = ImportScanForm(initial={'environment': environment}) - cred_form = CredMappingForm() - jform = None - user = request.user + def get_template(self) -> str: + """ + Returns the template that will be presented to the user + """ + return "dojo/import_scan_results.html" - if eid: - engagement = get_object_or_404(Engagement, id=eid) + def get_development_environment( + self, + environment_name: str = "Development", + ) -> Development_Environment | None: + """ + Get the development environment in two cases: + - GET: Environment "Development" by default + - POST: The label supplied by the user, with Development as a backup + """ + return Development_Environment.objects.filter(name=environment_name).first() + + def get_engagement_or_product( + self, + user: Dojo_User, + engagement_id: int = None, + product_id: int = None, + ) -> Tuple[Engagement, Product, Product | Engagement]: + """ + Using the path parameters, either fetch the product or engagement + """ + engagement = product = engagement_or_product = None + # Get the product if supplied + # Get the engagement if supplied + if engagement_id is not None: + engagement = get_object_or_404(Engagement, id=engagement_id) engagement_or_product = engagement - cred_form.fields["cred_user"].queryset = Cred_Mapping.objects.filter(engagement=engagement).order_by('cred_id') - elif pid: - product = get_object_or_404(Product, id=pid) + elif product_id is not None: + product = get_object_or_404(Product, id=product_id) engagement_or_product = product else: msg = 'Either Engagement or Product has to be provided' raise Exception(msg) - + # Ensure the supplied user has access to import to the engagement or product user_has_permission_or_403(user, engagement_or_product, Permissions.Import_Scan_Result) + return engagement, product, engagement_or_product + + def get_form( + self, + request: HttpRequest, + **kwargs: dict, + ) -> ImportScanForm: + """ + Returns the default import form for importing findings + """ + if request.method == "POST": + return ImportScanForm(request.POST, request.FILES, **kwargs) + else: + return ImportScanForm(**kwargs) + + def get_credential_form( + self, + request: HttpRequest, + engagement: Engagement, + ) -> CredMappingForm: + """ + Return a new instance of a form managing credentials. If an engagement + it present at this time any existing credential objects will be attempted + to be fetched to populate the form + """ + if request.method == "POST": + return CredMappingForm(request.POST) + else: + # If the engagement is not present, return an empty form + if engagement is None: + return CredMappingForm() + # Otherwise get all creds in the associated engagement + return CredMappingForm( + initial={ + "cred_user_queryset": Cred_Mapping.objects.filter( + engagement=engagement + ).order_by('cred_id'), + } + ) + + def get_jira_form( + self, + request: HttpRequest, + engagement_or_product: Engagement | Product, + ) -> Tuple[JIRAImportScanForm | None, bool]: + """ + Returns a JiraImportScanForm if jira is enabled + """ + jira_form = None + push_all_jira_issues = False + # Determine if jira issues should be pushed automatically push_all_jira_issues = jira_helper.is_push_all_issues(engagement_or_product) + # Only return the form if the jira is enabled on this engagement or product + if jira_helper.get_jira_project(engagement_or_product): + if request.method == "POST": + jira_form = JIRAImportScanForm( + request.POST, + push_all=push_all_jira_issues, + prefix='jiraform' + ) + else: + jira_form = JIRAImportScanForm( + push_all=push_all_jira_issues, + prefix='jiraform' + ) + return jira_form, push_all_jira_issues + + def get_product_tab( + self, + product: Product, + engagement: Engagement, + ) -> Tuple[Product_Tab, dict]: + """ + Determine how the product tab will be rendered, and what tab will be selected + as currently active + """ custom_breadcrumb = None - title = "Import Scan Results" if engagement: - product_tab = Product_Tab(engagement.product, title=title, tab="engagements") + product_tab = Product_Tab(engagement.product, title="Import Scan Results", tab="engagements") product_tab.setEngagement(engagement) else: custom_breadcrumb = {"", ""} - product_tab = Product_Tab(product, title=title, tab="findings") + product_tab = Product_Tab(product, title="Import Scan Results", tab="findings") + return product_tab, custom_breadcrumb - if jira_helper.get_jira_project(engagement_or_product): - jform = JIRAImportScanForm(push_all=push_all_jira_issues, prefix='jiraform') - - form.fields['endpoints'].queryset = Endpoint.objects.filter(product__id=product_tab.product.id) - form.fields['api_scan_configuration'].queryset = Product_API_Scan_Configuration.objects.filter(product__id=product_tab.product.id) - - return render(request, - 'dojo/import_scan_results.html', - {'form': form, - 'product_tab': product_tab, - 'engagement_or_product': engagement_or_product, - 'custom_breadcrumb': custom_breadcrumb, - 'title': title, - 'cred_form': cred_form, - 'jform': jform, - 'scan_types': get_scan_types_sorted(), - }) - - def post(self, request, eid=None, pid=None): - environment = Development_Environment.objects.filter(name='Development').first() # If 'Development' was removed, None is used - engagement = None - form = ImportScanForm(initial={'environment': environment}) - cred_form = CredMappingForm() - finding_count = 0 - jform = None + def handle_request( + self, + request: HttpRequest, + engagement_id: int = None, + product_id: int = None, + ) -> Tuple[HttpRequest, dict]: + """ + Process the common behaviors between request types, and then return + the request and context dict back to be rendered + """ user = request.user + # Get the development environment + environment = self.get_development_environment() + # Get the product or engagement from the path parameters + engagement, product, engagement_or_product = self.get_engagement_or_product( + user, + engagement_id=engagement_id, + product_id=product_id, + ) + # Get the product tab and any additional custom breadcrumbs + product_tab, custom_breadcrumb = self.get_product_tab(product, engagement) + # Get the import form with some initial data in place + form = self.get_form( + request, + environment=environment, + endpoints=Endpoint.objects.filter(product__id=product_tab.product.id), + api_scan_configuration=Product_API_Scan_Configuration.objects.filter(product__id=product_tab.product.id), + ) + # Get the credential mapping form + cred_form = self.get_credential_form(request, engagement) + # Get the jira form + jira_form, push_all_jira_issues = self.get_jira_form(request, engagement_or_product) + # Return the request and the context + return request, { + "user": user, + "lead": user, + "form": form, + "environment": environment, + "product_tab": product_tab, + "product": product, + "engagement": engagement, + "engagement_or_product": engagement_or_product, + "custom_breadcrumb": custom_breadcrumb, + "title": "Import Scan Results", + "cred_form": cred_form, + "jform": jira_form, + "scan_types": get_scan_types_sorted(), + "push_all_jira_issues": push_all_jira_issues, + } + + def validate_forms( + self, + context: dict, + ) -> bool: + """ + Validates each of the forms to ensure all errors from the form + level are bubbled up to the user first before we process too much + """ + form_validation_list = [] + if context.get("form") is not None: + form_validation_list.append(context.get("form").is_valid()) + if context.get("jform") is not None: + form_validation_list.append(context.get("jform").is_valid()) + if context.get("cred_form") is not None: + form_validation_list.append(context.get("cred_form").is_valid()) + return all(form_validation_list) + + def create_engagement( + self, + context: dict, + ) -> Engagement: + """ + Create an engagement if the import was triggered from the product level, + otherwise, return the existing engagement instead + """ + # Make sure an engagement does not exist already + engagement = context.get("engagement") + if engagement is None: + engagement = Engagement.objects.create( + name="AdHoc Import - " + strftime("%a, %d %b %Y %X", timezone.now().timetuple()), + threat_model=False, + api_test=False, + pen_test=False, + check_list=False, + active=True, + target_start=timezone.now().date(), + target_end=timezone.now().date(), + product=context.get("product"), + status='In Progress', + version=context.get("version"), + branch_tag=context.get("branch_tag"), + build_id=context.get("build_id"), + commit_hash=context.get("commit_hash"), + ) + # Update the engagement in the context + context["engagement"] = engagement + # Return the engagement + return engagement + + def import_findings( + self, + context: dict, + ) -> str | None: + """ + Attempt to import with all the supplied information + """ + try: + importer_client = DefaultImporter() + context["test"], _, finding_count, closed_finding_count, _, _, _ = importer_client.process_scan( + **context, + ) + # Add a message to the view for the user to see the results + add_success_message_to_response(importer_client.construct_imported_message( + context.get("scan_type"), + Test_Import.IMPORT_TYPE, + finding_count=finding_count, + closed_finding_count=closed_finding_count, + close_old_findings=context.get("close_old_findings"), + )) + except Exception as e: + logger.exception(e) + return f"An exception error occurred during the report import: {e}" + return None + + def process_form( + self, + request: HttpRequest, + form: ImportScanForm, + context: dict, + ) -> str | None: + """ + Process the form and manipulate the input in any way that is appropriate + """ + # Update the running context dict with cleaned form input + context.update({ + "scan": request.FILES.get("file", None), + "scan_date": form.cleaned_data.get("scan_date"), + "minimum_severity": form.cleaned_data.get("minimum_severity"), + "active": None, + "verified": None, + "scan_type": request.POST.get("scan_type"), + "tags": form.cleaned_data.get("tags"), + "version": form.cleaned_data.get("version"), + "branch_tag": form.cleaned_data.get("branch_tag", None), + "build_id": form.cleaned_data.get("build_id", None), + "commit_hash": form.cleaned_data.get("commit_hash", None), + "api_scan_configuration": form.cleaned_data.get("api_scan_configuration", None), + "service": form.cleaned_data.get("service", None), + "close_old_findings": form.cleaned_data.get("close_old_findings", None), + "apply_tags_to_findings": form.cleaned_data.get("apply_tags_to_findings", False), + "apply_tags_to_endpoints": form.cleaned_data.get("apply_tags_to_endpoints", False), + "close_old_findings_product_scope": form.cleaned_data.get("close_old_findings_product_scope", None), + "group_by": form.cleaned_data.get("group_by", None), + "create_finding_groups_for_all_findings": form.cleaned_data.get("create_finding_groups_for_all_findings"), + "environment": self.get_development_environment(environment_name=form.cleaned_data.get("environment")), + }) + # Create the engagement if necessary + self.create_engagement(context) + # close_old_findings_product_scope is a modifier of close_old_findings. + # If it is selected, close_old_findings should also be selected. + if close_old_findings_product_scope := form.cleaned_data.get('close_old_findings_product_scope', None): + context["close_old_findings_product_scope"] = close_old_findings_product_scope + context["close_old_findings"] = True + # Save newly added endpoints + added_endpoints = save_endpoints_to_add(form.endpoints_to_add_list, context.get("engagement").product) + endpoints_from_form = list(form.cleaned_data['endpoints']) + context["endpoints_to_add"] = endpoints_from_form + added_endpoints + # Override the form values of active and verified + if activeChoice := form.cleaned_data.get('active', None): + if activeChoice == 'force_to_true': + context["active"] = True + elif activeChoice == 'force_to_false': + context["active"] = False + if verifiedChoice := form.cleaned_data.get('verified', None): + if verifiedChoice == 'force_to_true': + context["verified"] = True + elif verifiedChoice == 'force_to_false': + context["verified"] = False + return None + + def process_jira_form( + self, + request: HttpRequest, + form: JIRAImportScanForm, + context: dict, + ) -> str | None: + """ + Process the jira form by first making sure one was supplied + and then setting any values supplied by the user. An error + may be returned and will be bubbled up in the form of a message + """ + # Determine if push all issues is enabled + push_all_jira_issues = context.get("push_all_jira_issues", False) + context["push_to_jira"] = push_all_jira_issues or (form and form.cleaned_data.get("push_to_jira")) + return None + + def process_credentials_form( + self, + request: HttpRequest, + form: CredMappingForm, + context: dict, + ) -> str | None: + """ + Process the credentials form by creating + """ + if cred_user := form.cleaned_data['cred_user']: + # Select the credential mapping object from the selected list and only allow if the credential is associated with the product + cred_user = Cred_Mapping.objects.filter( + pk=cred_user.id, + engagement=context.get("engagement") + ).first() + # Create the new credential mapping object + new_cred_mapping = form.save(commit=False) + new_cred_mapping.test = context.get("test") + new_cred_mapping.cred_id = cred_user.cred_id + new_cred_mapping.save() + # update the context + context["cred_user"] = cred_user + return None + + def success_redirect( + self, + context: dict, + ) -> HttpResponseRedirect: + """ + Redirect the user to a place that indicates a successful import + """ + return HttpResponseRedirect(reverse("view_test", args=(context.get("test").id, ))) + + def failure_redirect( + self, + context: dict, + ) -> HttpResponseRedirect: + """ + Redirect the user to a place that indicates a failed import + """ + return HttpResponseRedirect(reverse( + "import_scan_results", + args=(context.get("engagement", context.get("product")).id, ), + )) + + def get( + self, + request: HttpRequest, + engagement_id: int = None, + product_id: int = None, + ) -> HttpResponse: + """ + Process GET requests for the Import View + """ + # process the request and path parameters + request, context = self.handle_request( + request, + engagement_id=engagement_id, + product_id=product_id, + ) + # Render the form + return render(request, self.get_template(), context) - if eid: - engagement = get_object_or_404(Engagement, id=eid) - engagement_or_product = engagement - cred_form.fields["cred_user"].queryset = Cred_Mapping.objects.filter(engagement=engagement).order_by('cred_id') - elif pid: - product = get_object_or_404(Product, id=pid) - engagement_or_product = product - else: - msg = 'Either Engagement or Product has to be provided' - raise Exception(msg) - - user_has_permission_or_403(user, engagement_or_product, Permissions.Import_Scan_Result) - - push_all_jira_issues = jira_helper.is_push_all_issues(engagement_or_product) - form = ImportScanForm(request.POST, request.FILES) - cred_form = CredMappingForm(request.POST) - cred_form.fields["cred_user"].queryset = Cred_Mapping.objects.filter( - engagement=engagement).order_by('cred_id') - - if jira_helper.get_jira_project(engagement_or_product): - jform = JIRAImportScanForm(request.POST, push_all=push_all_jira_issues, prefix='jiraform') - logger.debug('jform valid: %s', jform.is_valid()) - logger.debug('jform errors: %s', jform.errors) - - if form.is_valid() and (jform is None or jform.is_valid()): - scan = request.FILES.get('file', None) - scan_date = form.cleaned_data['scan_date'] - minimum_severity = form.cleaned_data['minimum_severity'] - activeChoice = form.cleaned_data.get('active', None) - verifiedChoice = form.cleaned_data.get('verified', None) - scan_type = request.POST['scan_type'] - tags = form.cleaned_data['tags'] - version = form.cleaned_data['version'] - branch_tag = form.cleaned_data.get('branch_tag', None) - build_id = form.cleaned_data.get('build_id', None) - commit_hash = form.cleaned_data.get('commit_hash', None) - api_scan_configuration = form.cleaned_data.get('api_scan_configuration', None) - service = form.cleaned_data.get('service', None) - close_old_findings = form.cleaned_data.get('close_old_findings', None) - apply_tags_to_findings = form.cleaned_data.get('apply_tags_to_findings', False) - apply_tags_to_endpoints = form.cleaned_data.get('apply_tags_to_endpoints', False) - # close_old_findings_prodct_scope is a modifier of close_old_findings. - # If it is selected, close_old_findings should also be selected. - close_old_findings_product_scope = form.cleaned_data.get('close_old_findings_product_scope', None) - if close_old_findings_product_scope: - close_old_findings = True - # Will save in the provided environment or in the `Development` one if absent - environment_id = request.POST.get('environment', 'Development') - environment = Development_Environment.objects.get(id=environment_id) - - group_by = form.cleaned_data.get('group_by', None) - create_finding_groups_for_all_findings = form.cleaned_data['create_finding_groups_for_all_findings'] - - # TODO move to form validation? - if scan and is_scan_file_too_large(scan): - messages.add_message(request, - messages.ERROR, - f"Report file is too large. Maximum supported size is {settings.SCAN_FILE_MAX_SIZE} MB", - extra_tags='alert-danger') - return HttpResponseRedirect(reverse('import_scan_results', args=(engagement,))) - - # Allows for a test to be imported with an engagement created on the fly - if engagement is None: - engagement = Engagement() - engagement.name = "AdHoc Import - " + strftime("%a, %d %b %Y %X", timezone.now().timetuple()) - engagement.threat_model = False - engagement.api_test = False - engagement.pen_test = False - engagement.check_list = False - engagement.target_start = timezone.now().date() - engagement.target_end = timezone.now().date() - engagement.product = product - engagement.active = True - engagement.status = 'In Progress' - engagement.version = version - engagement.branch_tag = branch_tag - engagement.build_id = build_id - engagement.commit_hash = commit_hash - engagement.save() - - # can't use helper as when push_all_jira_issues is True, the checkbox gets disabled and is always false - # push_to_jira = jira_helper.is_push_to_jira(new_finding, jform.cleaned_data.get('push_to_jira')) - push_to_jira = push_all_jira_issues or (jform and jform.cleaned_data.get('push_to_jira')) - error = False - - # Save newly added endpoints - added_endpoints = save_endpoints_to_add(form.endpoints_to_add_list, engagement.product) - - active = None - if activeChoice: - if activeChoice == 'force_to_true': - active = True - elif activeChoice == 'force_to_false': - active = False - verified = None - if verifiedChoice: - if verifiedChoice == 'force_to_true': - verified = True - elif verifiedChoice == 'force_to_false': - verified = False - - try: - importer = Importer() - test, finding_count, closed_finding_count, _ = importer.import_scan(scan, scan_type, engagement, user, environment, active=active, verified=verified, tags=tags, - minimum_severity=minimum_severity, endpoints_to_add=list(form.cleaned_data['endpoints']) + added_endpoints, scan_date=scan_date, - version=version, branch_tag=branch_tag, build_id=build_id, commit_hash=commit_hash, push_to_jira=push_to_jira, - close_old_findings=close_old_findings, close_old_findings_product_scope=close_old_findings_product_scope, group_by=group_by, api_scan_configuration=api_scan_configuration, service=service, - create_finding_groups_for_all_findings=create_finding_groups_for_all_findings, apply_tags_to_findings=apply_tags_to_findings, apply_tags_to_endpoints=apply_tags_to_endpoints) - - message = f'{scan_type} processed a total of {finding_count} findings' - - if close_old_findings: - message = message + ' and closed %d findings' % (closed_finding_count) - - message = message + "." - - add_success_message_to_response(message) - - except Exception as e: - logger.exception(e) - add_error_message_to_response(f'An exception error occurred during the report import:{str(e)}') - error = True - - # Save the credential to the test - if cred_form.is_valid(): - if cred_form.cleaned_data['cred_user']: - # Select the credential mapping object from the selected list and only allow if the credential is associated with the product - cred_user = Cred_Mapping.objects.filter( - pk=cred_form.cleaned_data['cred_user'].id, - engagement=eid).first() - - new_f = cred_form.save(commit=False) - new_f.test = test - new_f.cred_id = cred_user.cred_id - new_f.save() - - if not error: - return HttpResponseRedirect( - reverse('view_test', args=(test.id, ))) - - return HttpResponseRedirect(reverse('import_scan_results', args=(engagement.id, ))) + def post( + self, + request: HttpRequest, + engagement_id: int = None, + product_id: int = None, + ) -> HttpResponse: + """ + Process POST requests for the Import View + """ + # process the request and path parameters + request, context = self.handle_request( + request, + engagement_id=engagement_id, + product_id=product_id, + ) + # ensure all three forms are valid first before moving forward + if not self.validate_forms(context): + return self.failure_redirect(context) + # Process the jira form if it is present + if form_error := self.process_jira_form(request, context.get("jform"), context): + add_error_message_to_response(form_error) + return self.failure_redirect(context) + # Process the import form + if form_error := self.process_form(request, context.get("form"), context): + add_error_message_to_response(form_error) + return self.failure_redirect(context) + # Kick off the import process + if import_error := self.import_findings(context): + add_error_message_to_response(import_error) + return self.failure_redirect(context) + # Process the credential form + if form_error := self.process_credentials_form(request, context.get("cred_form"), context): + add_error_message_to_response(form_error) + return self.failure_redirect(context) + # Otherwise return the user back to the engagement (if present) or the product + return self.success_redirect(context) @user_is_authorized(Engagement, Permissions.Engagement_Edit, 'eid') @@ -1101,7 +1323,7 @@ def view_edit_risk_acceptance(request, eid, raid, edit_mode=False): risk_acceptance_form = EditRiskAcceptanceForm(request.POST, request.FILES, instance=risk_acceptance) errors = errors or not risk_acceptance_form.is_valid() if not errors: - logger.debug('path: %s', risk_acceptance_form.cleaned_data['path']) + logger.debug(f"path: {risk_acceptance_form.cleaned_data['path']}") risk_acceptance_form.save() @@ -1187,8 +1409,7 @@ def view_edit_risk_acceptance(request, eid, raid, edit_mode=False): messages.add_message( request, messages.SUCCESS, - 'Finding%s added successfully.' % ('s' if len(findings) > 1 - else ''), + f"Finding{'s' if len(findings) > 1 else ''} added successfully.", extra_tags='alert-success') if not errors: logger.debug('redirecting to return_url') @@ -1350,13 +1571,17 @@ def engagement_ics(request, eid): eng = get_object_or_404(Engagement, id=eid) start_date = datetime.combine(eng.target_start, datetime.min.time()) end_date = datetime.combine(eng.target_end, datetime.max.time()) - uid = "dojo_eng_%d_%d" % (eng.id, eng.product.id) + uid = f"dojo_eng_{eng.id}_{eng.product.id}" cal = get_cal_event( - start_date, end_date, + start_date, + end_date, f"Engagement: {eng.name} ({eng.product.name})", - "Set aside for engagement {}, on product {}. Additional detail can be found at {}".format(eng.name, eng.product.name, - request.build_absolute_uri( - reverse("view_engagement", args=(eng.id, )))), uid) + ( + f"Set aside for engagement {eng.name}, on product {eng.product.name}. " + f"Additional detail can be found at {request.build_absolute_uri(reverse('view_engagement', args=(eng.id, )))}" + ), + uid + ) output = cal.serialize() response = HttpResponse(content=output) response['Content-Type'] = 'text/calendar' diff --git a/dojo/forms.py b/dojo/forms.py index c28b4c298b8..0bf5429f717 100644 --- a/dojo/forms.py +++ b/dojo/forms.py @@ -101,7 +101,13 @@ from dojo.tools.factory import get_choices_sorted, requires_file, requires_tool_type from dojo.user.queries import get_authorized_users, get_authorized_users_for_product_and_product_type from dojo.user.utils import get_configuration_permissions_fields -from dojo.utils import get_password_requirements_string, get_product, get_system_setting, is_finding_groups_enabled +from dojo.utils import ( + get_password_requirements_string, + get_product, + get_system_setting, + is_finding_groups_enabled, + is_scan_file_too_large, +) from dojo.widgets import TableCheckboxWidget logger = logging.getLogger(__name__) @@ -548,10 +554,18 @@ class ImportScanForm(forms.Form): create_finding_groups_for_all_findings = forms.BooleanField(help_text="If unchecked, finding groups will only be created when there is more than one grouped finding", required=False, initial=True) def __init__(self, *args, **kwargs): + environment = kwargs.pop("environment", None) + endpoints = kwargs.pop("endpoints", None) + api_scan_configuration = kwargs.pop("api_scan_configuration", None) super().__init__(*args, **kwargs) self.fields['active'].initial = self.active_verified_choices[0] self.fields['verified'].initial = self.active_verified_choices[0] - + if environment: + self.fields['environment'].initial = environment + if endpoints: + self.fields['endpoints'].queryset = endpoints + if api_scan_configuration: + self.fields['api_scan_configuration'].queryset = api_scan_configuration # couldn't find a cleaner way to add empty default if 'group_by' in self.fields: choices = self.fields['group_by'].choices @@ -564,10 +578,13 @@ def clean(self): cleaned_data = super().clean() scan_type = cleaned_data.get("scan_type") file = cleaned_data.get("file") + tool_type = requires_tool_type(scan_type) if requires_file(scan_type) and not file: - msg = f'Uploading a Report File is required for {scan_type}' + msg = _(f"Uploading a Report File is required for {scan_type}") + raise forms.ValidationError(msg) + if file and is_scan_file_too_large(file): + msg = _(f"Report file is too large. Maximum supported size is {settings.SCAN_FILE_MAX_SIZE} MB") raise forms.ValidationError(msg) - tool_type = requires_tool_type(scan_type) if tool_type: api_scan_configuration = cleaned_data.get('api_scan_configuration') if api_scan_configuration and tool_type != api_scan_configuration.tool_configuration.tool_type.name: @@ -649,6 +666,9 @@ class ReImportScanForm(forms.Form): create_finding_groups_for_all_findings = forms.BooleanField(help_text="If unchecked, finding groups will only be created when there is more than one grouped finding", required=False, initial=True) def __init__(self, *args, test=None, **kwargs): + endpoints = kwargs.pop("endpoints", None) + api_scan_configuration = kwargs.pop("api_scan_configuration", None) + api_scan_configuration_queryset = kwargs.pop("api_scan_configuration_queryset", None) super().__init__(*args, **kwargs) self.fields['active'].initial = self.active_verified_choices[0] self.fields['verified'].initial = self.active_verified_choices[0] @@ -656,7 +676,12 @@ def __init__(self, *args, test=None, **kwargs): if test: self.scan_type = test.test_type.name self.fields['tags'].initial = test.tags.all() - + if endpoints: + self.fields["endpoints"].queryset = endpoints + if api_scan_configuration: + self.initial["api_scan_configuration"] = api_scan_configuration + if api_scan_configuration_queryset: + self.fields["api_scan_configuration"].queryset = api_scan_configuration_queryset # couldn't find a cleaner way to add empty default if 'group_by' in self.fields: choices = self.fields['group_by'].choices @@ -667,7 +692,10 @@ def clean(self): cleaned_data = super().clean() file = cleaned_data.get("file") if requires_file(self.scan_type) and not file: - msg = "Uploading a report file is required for re-uploading findings." + msg = _("Uploading a report file is required for re-uploading findings.") + raise forms.ValidationError(msg) + if file and is_scan_file_too_large(file): + msg = _(f"Report file is too large. Maximum supported size is {settings.SCAN_FILE_MAX_SIZE} MB") raise forms.ValidationError(msg) tool_type = requires_tool_type(self.scan_type) if tool_type: @@ -2645,14 +2673,23 @@ def clean(self): class CredMappingForm(forms.ModelForm): - cred_user = forms.ModelChoiceField(queryset=Cred_Mapping.objects.all().select_related('cred_id'), required=False, - label='Select a Credential') + cred_user = forms.ModelChoiceField( + queryset=Cred_Mapping.objects.all().select_related('cred_id'), + required=False, + label='Select a Credential', + ) class Meta: model = Cred_Mapping fields = ['cred_user'] exclude = ['product', 'finding', 'engagement', 'test', 'url', 'is_authn_provider'] + def __init__(self, *args, **kwargs): + cred_user_queryset = kwargs.pop("cred_user_queryset", None) + super().__init__(*args, **kwargs) + if cred_user_queryset is not None: + self.fields["cred_user"].queryset = cred_user_queryset + class CredMappingFormProd(forms.ModelForm): class Meta: diff --git a/dojo/importers/auto_create_context.py b/dojo/importers/auto_create_context.py new file mode 100644 index 00000000000..c0c70f72b98 --- /dev/null +++ b/dojo/importers/auto_create_context.py @@ -0,0 +1,353 @@ +import logging +from datetime import datetime, timedelta +from typing import Any + +from crum import get_current_user +from django.http.request import QueryDict +from django.utils import timezone + +from dojo.models import ( + Engagement, + Product, + Product_Member, + Product_Type, + Product_Type_Member, + Role, + Test, +) +from dojo.utils import get_last_object_or_none, get_object_or_none + +logger = logging.getLogger(__name__) +deduplicationLogger = logging.getLogger("dojo.specific-loggers.deduplication") + + +class AutoCreateContextManager: + """ + Management of safely fetching and creating resources used in the import + and reimport processes. Resources managed by this class are: + - Product Types + - Products + - Engagements + - Tests + """ + """ + =================================== + ----------- Validators ------------ + =================================== + """ + def process_object_fields( + self, + key: str, + label: str, + object_type: Any, + data: dict, + **kwargs: dict, + ) -> None: + """ + Process the object fields such as product, engagement, and + test such that passing the whole object, or just the ID + will suffice + """ + if object_id := data.get(key, None): + # Convert to just the ID if the whole object as passed + if isinstance(object_id, object_type): + object_id = object_id.id + # Convert to a string if needed + if isinstance(object_id, list) and len(object_id) > 0: + object_id = object_id[0] + # Ensure the ID is an integer, not a string + elif isinstance(object_id, str) and not object_id.isdigit(): + msg = f"{key} must be an integer" + raise ValueError(msg) + # Update the "test" entry in the dict with the ID + data[label] = object_id + + def process_object_name( + self, + key: str, + data: dict, + **kwargs: dict, + ) -> None: + """ + Process the object names by ensuring that the inputs + are a string and not a list of strings + """ + if object_name := data.get(key): + # Convert to a string if needed + if isinstance(object_name, list) and len(object_name) > 0: + data[key] = object_name[0] + + def process_import_meta_data_from_dict( + self, + data: dict, + **kwargs: dict, + ) -> None: + """ + Ensure that the inputs supplied for test and engagement can be + derive into am integer ID. This can happen if a full Test or + Engagement is supplied, or if the input is an integer ID to + start with + """ + # Validate the test artifact + self.process_object_fields("test", "test_id", Test, data) + # Validate the engagement artifact + self.process_object_fields("engagement", "engagement_id", Engagement, data) + # Validate the product artifact + self.process_object_fields("product", "product_id", Product, data) + # Validate the product_type_name + self.process_object_name("product_type_name", data) + # Validate the product_name + self.process_object_name("product_name", data) + # Validate the engagement_name + self.process_object_name("engagement_name", data) + # Validate the test_title + self.process_object_name("test_title", data) + + """ + =================================== + ------------ Fetchers ------------- + =================================== + """ + def get_target_product_type_if_exists( + self, + product_type_name: str = None, + **kwargs: dict, + ) -> Product_Type | None: + """ + Query for a product type that matches the name `product_type_name`. + + If a match is not found, return None + """ + # Look for an existing object + if product_type_name: + return get_object_or_none(Product_Type, name=product_type_name) + return None + + def get_target_product_if_exists( + self, + product_name: str = None, + product_type_name: str = None, + **kwargs: dict, + ) -> Product | None: + """ + Query for a product that matches the name `product_name`. Some + extra verification is also administered to ensure the + `product_type_name` matches the one on the fetched product + + If a match is not found, return None + """ + # Look for an existing object + if product_name and (product := get_object_or_none(Product, name=product_name)): + # product type name must match if provided + if product_type_name and product.prod_type.name != product_type_name: + msg = ( + "The fetched product has a conflict with the supplied product type name: " + f"existing product type name - {product.prod_type.name} vs " + f"supplied product type name - {product_type_name}" + ) + raise ValueError(msg) + # Return the product + return product + return None + + def get_target_product_by_id_if_exists( + self, + product_id: int = 0, + **kwargs: dict, + ) -> Product | None: + """ + Query for a product matching by ID + + If a match is not found, return None + """ + return get_object_or_none(Product, pk=product_id) + + def get_target_engagement_if_exists( + self, + engagement_id: int = 0, + engagement_name: str = None, + product: Product = None, + **kwargs: dict, + ) -> Engagement | None: + """ + Query for an engagement matching by ID. If a match is not found, + and a product is supplied, return the last engagement created on + the product by name + + If a match is not found, and a product is not supplied, return None + """ + if engagement := get_object_or_none(Engagement, pk=engagement_id): + logger.debug('Using existing engagement by id: %s', engagement_id) + return engagement + # if there's no product, then for sure there's no engagement either + if product is None: + return None + # engagement name is not unique unfortunately + return get_last_object_or_none(Engagement, product=product, name=engagement_name) + + def get_target_test_if_exists( + self, + test_id: int = 0, + test_title: str = None, + scan_type: str = None, + engagement: Engagement = None, + **kwargs: dict, + ) -> Test | None: + """ + Retrieves the target test to reimport. This can be as simple as looking up the test via the `test_id` parameter. + If there is no `test_id` provided, we lookup the latest test inside the provided engagement that satisfies + the provided scan_type and test_title. + """ + if test := get_object_or_none(Test, pk=test_id): + logger.debug('Using existing Test by id: %s', test_id) + return test + # If the engagement is not supplied, we cannot do anything + if not engagement: + return None + # Check for a custom test title + if test_title: + return get_last_object_or_none(Test, engagement=engagement, title=test_title, scan_type=scan_type) + # Otherwise use the last test by scan type + return get_last_object_or_none(Test, engagement=engagement, scan_type=scan_type) + + """ + =================================== + ------------ Creators ------------- + =================================== + """ + def get_or_create_product_type( + self, + product_type_name: str = None, + **kwargs: dict, + ) -> Product_Type: + """ + Fetches a product type by name if one already exists. If not, + a new product type will be created with the current user being + added as product type member + """ + # Look for an existing object + if product_type := self.get_target_product_type_if_exists(product_type_name=product_type_name): + return product_type + else: + product_type, created = Product_Type.objects.get_or_create(name=product_type_name) + if created: + Product_Type_Member.objects.create( + user=get_current_user(), + product_type=product_type, + role=Role.objects.get(is_owner=True), + ) + return product_type + + def get_or_create_product( + self, + product_name: str = None, + product_type_name: str = None, + auto_create_context: bool = False, + **kwargs: dict, + ) -> Product: + """ + Fetches a product by name if it exists. When `auto_create_context` is + enabled the product will be created with the current user being added + as product member + """ + # try to find the product (within the provided product_type) + if product := self.get_target_product_if_exists(product_name, product_type_name): + return product + # not found .... create it + if not auto_create_context: + msg = "auto_create_context not True, unable to create non-existing product" + raise ValueError(msg) + # Look for a product type first + product_type = self.get_or_create_product_type(product_type_name=product_type_name) + # Create the product + product, created = Product.objects.get_or_create(name=product_name, prod_type=product_type, description=product_name) + if created: + Product_Member.objects.create( + user=get_current_user(), + product=product, + role=Role.objects.get(is_owner=True), + ) + + return product + + def get_or_create_engagement( + self, + engagement_id: int = 0, + engagement_name: str = None, + product_name: str = None, + product_type_name: str = None, + auto_create_context: bool = False, + deduplication_on_engagement: bool = False, + source_code_management_uri: str = None, + target_end: datetime = None, + **kwargs: dict, + ) -> Engagement: + """ + Fetches an engagement by name or ID if one already exists. + """ + # try to find the engagement (and product) + product = self.get_target_product_if_exists( + product_name=product_name, + product_type_name=product_type_name, + ) + engagement = self.get_target_engagement_if_exists( + engagement_id=engagement_id, + engagement_name=engagement_name, + product=product + ) + # If we have an engagement, we cna just return it + if engagement: + return engagement + # not found .... create it + if not auto_create_context: + msg = "auto_create_context not True, unable to create non-existing engagement" + raise ValueError(msg) + # Get a product first + product = self.get_or_create_product( + product_name=product_name, + product_type_name=product_type_name, + auto_create_context=auto_create_context, + ) + # Get the target start date in order + target_start = timezone.now().date() + if (target_end is None) or (target_start > target_end): + target_end = (timezone.now() + timedelta(days=365)).date() + # Create the engagement + return Engagement.objects.create( + engagement_type="CI/CD", + name=engagement_name, + product=product, + lead=get_current_user(), + target_start=target_start, + target_end=target_end, + status="In Progress", + deduplication_on_engagement=deduplication_on_engagement, + source_code_management_uri=source_code_management_uri, + ) + + """ + =================================== + ------------ Utilities ------------ + =================================== + """ + def convert_querydict_to_dict( + self, + query_dict_data: QueryDict, + ) -> dict: + """ + Creates a copy of a query dict, and then converts it + to a dict + """ + # First copy the query dict + copy = {} + # Iterate ovr the dict and extract the elements based + # on whether they are a single item, or a list + for key, value in query_dict_data.items(): + if value: + # Accommodate lists + if isinstance(value, list): + copy[key] = value if len(value) > 1 else value[0] + else: + copy[key] = value + # Convert to a regular dict + return copy diff --git a/dojo/importers/base_importer.py b/dojo/importers/base_importer.py new file mode 100644 index 00000000000..491450e1dec --- /dev/null +++ b/dojo/importers/base_importer.py @@ -0,0 +1,940 @@ +import base64 +import logging +from abc import ABC, abstractmethod +from datetime import datetime +from typing import List, Tuple + +from django.conf import settings +from django.core.exceptions import MultipleObjectsReturned, ValidationError +from django.core.files.base import ContentFile +from django.core.files.uploadedfile import TemporaryUploadedFile +from django.urls import reverse +from django.utils import timezone +from django.utils.timezone import make_aware + +import dojo.finding.helper as finding_helper +from dojo.celery import app +from dojo.decorators import dojo_async_task +from dojo.endpoint.utils import endpoint_get_or_create +from dojo.importers.endpoint_manager import DefaultReImporterEndpointManager +from dojo.models import ( + # Import History States + IMPORT_CLOSED_FINDING, + IMPORT_CREATED_FINDING, + IMPORT_REACTIVATED_FINDING, + IMPORT_UNTOUCHED_FINDING, + # Finding Severities + SEVERITIES, + BurpRawRequestResponse, + Dojo_User, + Endpoint, + Endpoint_Status, + # models + Engagement, + FileUpload, + Finding, + Test, + Test_Import, + Test_Import_Finding_Action, + Test_Type, + Tool_Configuration, + Vulnerability_Id, +) +from dojo.tools.factory import get_parser +from dojo.utils import get_current_user, is_finding_groups_enabled, max_safe + +logger = logging.getLogger(__name__) + + +class Parser: + """ + This class is used as an alias to a given parser + and is purely for the sake of type hinting + """ + + def get_findings(scan_type: str) -> List[Finding]: + """ + Stub function to make the hinting happier. The actual class + is loosely obligated to have this function defined. + + TODO This should be enforced in the future, but here is not the place + TODO once this enforced, this stub class should be removed + """ + pass + + +class BaseImporter(ABC, DefaultReImporterEndpointManager): + """ + A collection of utilities used by various importers within DefectDojo. + Some of these commonalities may be fully used by children importers, + or even extended + """ + def __init__(self, *args: list, **kwargs: dict): + """ + Initializing or constructing this parent class is prohibited + and will raise a `NotImplemented` exception + """ + self.new_or_init(*args, **kwargs) + + def __new__(self, *args: list, **kwargs: dict): + """ + Initializing or constructing this parent class is prohibited + and will raise a `NotImplemented` exception + """ + instance = super().__new__(self, *args, **kwargs) + instance.new_or_init(*args, **kwargs) + return instance + + def new_or_init(self, *args: list, **kwargs: dict): + """ + Ensures that that the parent BaseImporter class is not + instantiated directly + """ + self.check_child_implementation_exception() + + def check_child_implementation_exception(self): + """ + This is a helper function for a quick check to ensure that the methods of the + BaseImporter are not being used directly + """ + if isinstance(self, BaseImporter): + msg = ( + "The BaseImporter class must not be used directly. " + "Please use a class that extends the BaseImporter class." + ) + raise NotImplementedError(msg) + + @abstractmethod + def process_scan( + self, + scan: TemporaryUploadedFile, + scan_type: str, + engagement: Engagement = None, + test: Test = None, + user: Dojo_User = None, + parsed_findings: List[Finding] = None, + **kwargs: dict, + ) -> Tuple[Test, int, int, int, int, int, Test_Import]: + """ + A helper method that executes the entire import process in a single method. + This includes parsing the file, processing the findings, and returning the + statistics from the import + """ + self.check_child_implementation_exception() + + @abstractmethod + @dojo_async_task + @app.task(ignore_result=False) + def process_findings( + self, + test: Test, + parsed_findings: List[Finding], + user: Dojo_User, + **kwargs: dict, + ) -> List[Finding]: + """ + Make the conversion from unsaved Findings in memory to Findings that are saved in the + database with and ID associated with them. This processor will also save any associated + objects such as endpoints, vulnerability IDs, and request/response pairs + """ + self.check_child_implementation_exception() + + @abstractmethod + def close_old_findings( + self, + test: Test, + findings: List[Finding], + user: Dojo_User, + scan_date: datetime = timezone.now(), + **kwargs: dict, + ) -> List[Finding]: + """ + Identify any findings that have been imported before, + but are no longer present in later reports so that + we can automatically close them as "implied mitigated" + + This function will vary by importer, so it is marked as + abstract with a prohibitive exception raised if the + method is attempted to to be used by the BaseImporter class + """ + self.check_child_implementation_exception() + + def get_parser( + self, + scan_type: str, + ) -> Parser: + """ + Returns the correct parser based on the the test type supplied. If a test type + is supplied that does not have a parser created for it, an exception is raised + from the factory `get_parser` function + """ + return get_parser(scan_type) + + def process_scan_file( + self, + scan: TemporaryUploadedFile, + ) -> TemporaryUploadedFile: + """ + Make any preprocessing actions or changes on the report before submitting + to the parser to generate findings from the file + """ + return scan + + def parse_findings_static_test_type( + self, + parser: Parser, + scan_type: str, + scan: TemporaryUploadedFile, + test: Test = None, + **kwargs: dict, + ) -> List[Finding]: + """ + Parse the scan report submitted with the parser class and generate some findings + that are not saved to the database yet. This step is crucial in determining if + there are any errors in the parser before creating any new resources + """ + # Ensure that a test is present when calling this method as there are cases where + # the test will be created by this function in a child class + if test is None or not isinstance(test, Test): + msg = "A test must be supplied to parse the file" + raise ValidationError(msg) + try: + return parser.get_findings(scan, test) + except ValueError as e: + logger.warning(e) + raise ValidationError(e) + + def parse_dynamic_test_type_tests( + self, + parser: Parser, + scan_type: str, + scan: TemporaryUploadedFile, + **kwargs: dict, + ) -> List[Test]: + """ + Use the API configuration object to get the tests to be used by the parser + """ + try: + return parser.get_tests(scan_type, scan) + except ValueError as e: + logger.warning(e) + raise ValidationError(e) + + def parse_dynamic_test_type_findings_from_tests( + self, + tests: List[Test], + **kwargs: dict, + ) -> List[Finding]: + """ + currently we only support import one Test + so for parser that support multiple tests (like SARIF) + we aggregate all the findings into one uniq test + """ + parsed_findings = [] + for test_raw in tests: + parsed_findings.extend(test_raw.findings) + return parsed_findings + + def parse_findings_dynamic_test_type( + self, + parser: Parser, + scan_type: str, + scan: TemporaryUploadedFile, + **kwargs: dict, + ) -> List[Finding]: + """ + Use the API configuration object to get the tests to be used by the parser + to dump findings into + + This version of this function is intended to be extended by children classes + """ + tests = self.parse_dynamic_test_type_tests( + parser, + scan_type, + scan, + **kwargs, + ) + return self.parse_dynamic_test_type_findings_from_tests(tests, **kwargs) + + def parse_findings( + self, + parser: Parser, + scan_type: str, + scan: TemporaryUploadedFile, + test: Test = None, + **kwargs: dict, + ) -> List[Finding]: + """ + Determine how to parse the findings based on the presence of the + `get_tests` function on the parser object + """ + if hasattr(parser, 'get_tests'): + return self.parse_findings_dynamic_test_type( + parser, + scan_type, + scan, + **kwargs, + ) + else: + return self.parse_findings_static_test_type( + parser, + scan_type, + scan, + test=test, + **kwargs, + ) + + def determine_process_method( + self, + test: Test, + parsed_findings: List[Finding], + user: Dojo_User, + **kwargs: dict, + ) -> List[Finding]: + """ + Determines whether to process the scan iteratively, or in chunks, + based upon the ASYNC_FINDING_IMPORT setting + """ + if settings.ASYNC_FINDING_IMPORT: + return self.async_process_findings( + test, + parsed_findings, + user, + **kwargs, + ) + else: + return self.sync_process_findings( + test, + parsed_findings, + user, + **kwargs, + ) + + def update_test_meta( + self, + test: Test, + **kwargs: dict, + ) -> Test: + """ + Update the test with some values stored in the kwargs dict. The common + fields used today are `version`, `branch_tag`, `build_id`, and `commit_hash` + """ + # Add the extra fields to the test if they are specified here + if not (version := kwargs.get("version", "")).isspace(): + test.version = version + if not (branch_tag := kwargs.get("branch_tag", "")).isspace(): + test.branch_tag = branch_tag + if not (build_id := kwargs.get("build_id", "")).isspace(): + test.build_id = build_id + if not (commit_hash := kwargs.get("commit_hash", "")).isspace(): + test.commit_hash = commit_hash + + return test + + def update_timestamps( + self, + test: Test, + **kwargs: dict, + ) -> Test: + """ + Update the target end dates for tests as imports are occurring: + - Import + - Updates to the test target date are largely non impacting. + However, there is a possibility that the engagement is a CI/CD + engagement, so the target end should be updated + - Reimport + - Updates to the test target date are very important as we are + constantly reusing the same test over and over + - In the (likely) event the engagement is a CI/CD type, the target + end date should be updated as well + """ + # Make sure there is at least something in the scan date field + scan_date = kwargs.get("scan_date") + if scan_date is None: + scan_date = kwargs.get("now") + # Update the target end of the engagement if it is a CI/CD engagement + # If the supplied scan date is greater than the current configured + # target end date on the engagement + if test.engagement.engagement_type == 'CI/CD': + test.engagement.target_end = max_safe([scan_date.date(), test.engagement.target_end]) + # Set the target end date on the test in a similar fashion + max_test_start_date = max_safe([scan_date, test.target_end]) + # Quick check to make sure we have a datetime that is timezone aware + # so that we can suppress naive datetime warnings + if not max_test_start_date.tzinfo: + max_test_start_date = make_aware(max_test_start_date) + test.target_end = max_test_start_date + + return test + + def update_test_tags( + self, + test: Test, + tags: List[str], + ) -> None: + """ + Update the list of tags on the test if they are supplied + at import time + """ + # Make sure the list is not empty as we do not want to overwrite + # any existing tags + if tags is not None and len(tags) > 0: + test.tags = tags + # Save the test for changes to be applied + # TODO this may be a redundant save, and may be able to be pruned + test.save() + + def update_import_history( + self, + type: str, + test: Test, + new_findings: List[Finding] = [], + closed_findings: List[Finding] = [], + reactivated_findings: List[Finding] = [], + untouched_findings: List[Finding] = [], + **kwargs: dict, + ) -> Test_Import: + """ + Creates a record of the import or reimport operation that has occurred. + """ + # Quick fail check to determine if we even wanted this + if settings.TRACK_IMPORT_HISTORY is False: + return None + # Log the current state of what has occurred in case there could be + # deviation from what is displayed in the view + logger.debug( + f"new: {len(new_findings)} " + f"closed: {len(closed_findings)} " + f"reactivated: {len(reactivated_findings)} " + f"untouched: {len(untouched_findings)} " + ) + # Create a dictionary to stuff into the test import object + import_settings = {} + import_settings['active'] = kwargs.get("active") + import_settings['verified'] = kwargs.get("verified") + import_settings['minimum_severity'] = kwargs.get("minimum_severity") + import_settings['close_old_findings'] = kwargs.get("close_old_findings") + import_settings['push_to_jira'] = kwargs.get("push_to_jira") + import_settings['tags'] = kwargs.get("tags") + # Add the list of endpoints that were added exclusively at import time + if (endpoints_to_add := kwargs.get("endpoints_to_add")) and len(endpoints_to_add) > 0: + import_settings['endpoints'] = [str(endpoint) for endpoint in endpoints_to_add] + # Create the test import object + test_import = Test_Import.objects.create( + test=test, + import_settings=import_settings, + version=kwargs.get("version"), + branch_tag=kwargs.get("branch_tag"), + build_id=kwargs.get("build_id"), + commit_hash=kwargs.get("commit_hash"), + type=type, + ) + # Define all of the respective import finding actions for the test import object + test_import_finding_action_list = [] + for finding in closed_findings: + logger.debug(f"preparing Test_Import_Finding_Action for closed finding: {finding.id}") + test_import_finding_action_list.append(Test_Import_Finding_Action( + test_import=test_import, + finding=finding, + action=IMPORT_CLOSED_FINDING, + )) + for finding in new_findings: + logger.debug(f"preparing Test_Import_Finding_Action for created finding: {finding.id}") + test_import_finding_action_list.append(Test_Import_Finding_Action( + test_import=test_import, + finding=finding, + action=IMPORT_CREATED_FINDING, + )) + for finding in reactivated_findings: + logger.debug(f"preparing Test_Import_Finding_Action for reactivated finding: {finding.id}") + test_import_finding_action_list.append(Test_Import_Finding_Action( + test_import=test_import, + finding=finding, + action=IMPORT_REACTIVATED_FINDING, + )) + for finding in untouched_findings: + logger.debug(f"preparing Test_Import_Finding_Action for untouched finding: {finding.id}") + test_import_finding_action_list.append(Test_Import_Finding_Action( + test_import=test_import, + finding=finding, + action=IMPORT_UNTOUCHED_FINDING, + )) + # Bulk create all the defined objects + Test_Import_Finding_Action.objects.bulk_create(test_import_finding_action_list) + # Add any tags to the findings imported if necessary + if kwargs.get("apply_tags_to_findings", False) and (tags := kwargs.get("tags")): + for finding in test_import.findings_affected.all(): + for tag in tags: + finding.tags.add(tag) + # Add any tags to any endpoints of the findings imported if necessary + if kwargs.get("apply_tags_to_endpoints", False) and (tags := kwargs.get("tags")): + for finding in test_import.findings_affected.all(): + for endpoint in finding.endpoints.all(): + for tag in tags: + endpoint.tags.add(tag) + + return test_import + + def construct_imported_message( + self, + scan_type: str, + import_type: str, + finding_count: int = 0, + new_finding_count: int = 0, + closed_finding_count: int = 0, + reactivated_finding_count: int = 0, + untouched_finding_count: int = 0, + **kwargs: dict, + ) -> str: + """ + Constructs a success message to be displayed on screen in the UI as a digest for the user. + This digest includes counts for the findings in the following status: + - Created: New findings that have not been created before + - Closed: Findings that were not detected in the report any longer, so the original was closed + - Reactivated: Findings that were once closed, but has reappeared in the report again + - Untouched: Findings that have not changed between now, and the last import/reimport + """ + # Only construct this message if there is any change in finding status + if finding_count > 0: + # Set the base message to indicate how many findings were parsed from the report + message = f"{scan_type} processed a total of {finding_count} findings" + if import_type == Test_Import.IMPORT_TYPE: + # Check for close old findings context to determine if more detail should be added + if kwargs.get("close_old_findings", False): + message += f" and closed {closed_finding_count} findings" + if import_type == Test_Import.REIMPORT_TYPE: + # Add more details for any status changes recorded + if new_finding_count: + message += f" created {new_finding_count} findings" + if closed_finding_count: + message += f" closed {closed_finding_count} findings" + if reactivated_finding_count: + message += f" reactivated {reactivated_finding_count} findings" + if untouched_finding_count: + message += f" did not touch {untouched_finding_count} findings" + # Drop a period at the end + message += "." + else: + # Set the message to convey that all findings processed are identical to the last time an import/reimport occurred + message = "No findings were added/updated/closed/reactivated as the findings in Defect Dojo are identical to those in the uploaded report." + + return message + + def chunk_objects( + self, + object_list: List[Finding | Endpoint], + chunk_size: int = settings.ASYNC_FINDING_IMPORT_CHUNK_SIZE, + ) -> List[List[Finding | Endpoint]]: + """ + Split a single large list into a list of lists of size `chunk_size`. + For Example + ``` + >>> chunk_objects([A, B, C, D, E], 2) + >>> [[A, B], [B, C], [E]] + ``` + """ + # Break the list of parsed findings into "chunk_size" lists + chunk_list = [object_list[i:i + chunk_size] for i in range(0, len(object_list), chunk_size)] + logger.debug(f"IMPORT_SCAN: Split endpoints/findings into {len(chunk_list)} chunks of {chunk_size}") + return chunk_list + + def chunk_endpoints_and_disperse( + self, + finding: Finding, + test: Test, + endpoints: List[Endpoint], + **kwargs: dict, + ) -> None: + """ + Determines whether to asynchronously process endpoints on a finding or not. if so, + chunk up the findings to be dispersed into individual celery workers. Otherwise, + only use one worker + """ + if settings.ASYNC_FINDING_IMPORT: + chunked_list = self.chunk_objects(endpoints) + # If there is only one chunk, then do not bother with async + if len(chunked_list) < 2: + self.add_endpoints_to_unsaved_finding(finding, test, endpoints, sync=True) + return [] + # First kick off all the workers + for endpoints_list in chunked_list: + self.add_endpoints_to_unsaved_finding(finding, test, endpoints_list, sync=False) + else: + # Do not run this asynchronously or chunk the endpoints + self.add_endpoints_to_unsaved_finding(finding, test, endpoints, sync=True) + return None + + def clean_unsaved_endpoints( + self, + endpoints: List[Endpoint] + ) -> None: + """ + Clean endpoints that are supplied. For any endpoints that fail this validation + process, raise a message that broken endpoints are being stored + """ + for endpoint in endpoints: + try: + endpoint.clean() + except ValidationError as e: + logger.warning(f"DefectDojo is storing broken endpoint because cleaning wasn't successful: {e}") + return None + + @dojo_async_task + @app.task() + def add_endpoints_to_unsaved_finding( + self, + finding: Finding, + test: Test, + endpoints: List[Endpoint], + **kwargs: dict, + ) -> None: + """ + Creates Endpoint objects for a single finding and creates the link via the endpoint status + """ + logger.debug(f"IMPORT_SCAN: Adding {len(endpoints)} endpoints to finding: {finding}") + self.clean_unsaved_endpoints(endpoints) + for endpoint in endpoints: + ep = None + try: + ep, _ = endpoint_get_or_create( + protocol=endpoint.protocol, + userinfo=endpoint.userinfo, + host=endpoint.host, + port=endpoint.port, + path=endpoint.path, + query=endpoint.query, + fragment=endpoint.fragment, + product=test.engagement.product) + except (MultipleObjectsReturned): + msg = ( + f"Endpoints in your database are broken. " + f"Please access {reverse('endpoint_migrate')} and migrate them to new format or remove them." + ) + raise Exception(msg) + + Endpoint_Status.objects.get_or_create( + finding=finding, + endpoint=ep, + defaults={'date': finding.date}) + logger.debug(f"IMPORT_SCAN: {len(endpoints)} imported") + return None + + @dojo_async_task + @app.task() + def update_test_progress( + self, + test: Test, + **kwargs: dict, + ) -> None: + """ + This function is added to the async queue at the end of all finding import tasks + and after endpoint task, so this should only run after all the other ones are done. + It's purpose is to update the percent completion of the test to 100 percent + """ + test.percent_complete = 100 + test.save() + return None + + def get_or_create_test_type( + self, + test_type_name: str, + ) -> Test_Type: + """ + Ensures that a test type exists for a given test. This function can be called + in the following circumstances: + - Ensuring a test type exists for import + - Ensuring a test type exists for reimport with auto-create context + - Creating a new test type for dynamic test types such as generic and sarif + """ + test_type, created = Test_Type.objects.get_or_create(name=test_type_name) + if created: + logger.info(f"Created new Test_Type with name {test_type.name} because a report is being imported") + return test_type + + def add_timezone_scan_date_and_now( + self, + scan_date: datetime = None, + now: datetime = timezone.now(), + ) -> Tuple[datetime, datetime]: + """ + Add timezone information the scan date set at import time. In the event the + scan date is not supplied, fall back on the current time so that the test + can have a time for the target start and end + """ + # Add timezone information to the scan date if it is not already present + if scan_date is not None and not scan_date.tzinfo: + scan_date = timezone.make_aware(scan_date) + # Add timezone information to the current time if it is not already present + if now is None: + now = timezone.now() + elif not now.tzinfo: + now = timezone.make_aware(now) + + return scan_date, now + + def get_user_if_supplied( + self, + user: Dojo_User = None, + ) -> Dojo_User: + """ + Determines whether the user supplied at import time should + be used or not. If the user supplied is not actually a user, + the current authorized user will be fetched instead + """ + if user is None: + return get_current_user() + return user + + def verify_tool_configuration_from_test( + self, + api_scan_configuration: Tool_Configuration, + test: Test, + ) -> Test: + """ + Verify that the Tool_Configuration supplied along with the + test is found on the product. If not, then raise a validation + error that will bubble up back to the user + + if f there is a case where the Tool_Configuration supplied to + this function does not match the one saved on the test, then + we will user the one supplied rather than the one on the test. + """ + # Do not bother with any of the verification if a Tool_Configuration is not supplied + if api_scan_configuration is None: + # Return early as there is no value in validating further + return test + # Ensure that a test was supplied + elif not isinstance(test, Test): + msg = "A test must be supplied to verify the Tool_Configuration against" + raise ValidationError(msg) + # Validate that the test has a value + elif test is not None: + # Make sure the Tool_Configuration is connected to the product that the test is + if api_scan_configuration.product != test.engagement.product: + msg = "API Scan Configuration has to be from same product as the Test" + raise ValidationError(msg) + # If the Tool_Configuration on the test is not the same as the one supplied, then lets + # use the one that is supplied + if test.api_scan_configuration != api_scan_configuration: + test.api_scan_configuration = api_scan_configuration + test.save() + # Return the test here for an early exit + return test + + def verify_tool_configuration_from_engagement( + self, + api_scan_configuration: Tool_Configuration, + engagement: Engagement, + ) -> Test | Engagement: + """ + Verify that the Tool_Configuration supplied along with the + engagement is found on the product. If not, then raise a validation + error that will bubble up back to the user + + if there is a case where the Tool_Configuration supplied to + this function does not match the one saved on the engagement, then + we will user the one supplied rather than the one on the engagement. + """ + # Do not bother with any of the verification if a Tool_Configuration is not supplied + if api_scan_configuration is None: + # Return early as there is no value in validating further + return engagement + # Ensure that an engagement was supplied + elif not isinstance(engagement, Engagement): + msg = "An engagement must be supplied to verify the Tool_Configuration against" + raise ValidationError(msg) + # Validate that the engagement has a value + elif engagement is not None and isinstance(engagement, Engagement): + # Make sure the Tool_Configuration is connected to the engagement that the test is + if api_scan_configuration.product != engagement.product: + msg = "API Scan Configuration has to be from same product as the Engagement" + raise ValidationError(msg) + # Return the test here for an early exit + return engagement + + def sanitize_severity( + self, + finding: Finding, + ) -> Finding: + """ + Sanitization on the finding severity such that only the following + severities may be set on the finding: + - Critical, High, Medium, Low, Info + There is a simple conversion process to convert any of the following + to a value of Info + - info, informational, Informational, None, none + If not, raise a ValidationError explaining as such + """ + # Checks around Informational/Info severity + starts_with_info = finding.severity.lower().startswith('info') + lower_none = finding.severity.lower() == 'none' + not_info = finding.severity != 'Info' + # Make the comparisons + if not_info and (starts_with_info or lower_none): + # Correct the severity + finding.severity = 'Info' + # Ensure the final severity is one of the supported options + if finding.severity not in SEVERITIES: + msg = ( + f"Finding severity \"{finding.severity}\" is not supported. " + f"Any of the following are supported: {SEVERITIES}." + ) + raise ValidationError(msg) + # Set the numerical severity on the finding based on the cleaned severity + finding.numerical_severity = Finding.get_numerical_severity(finding.severity) + # Return the finding if all else is good + return finding + + def process_finding_groups( + self, + finding: Finding, + group_by: str, + group_names_to_findings_dict: dict, + ) -> None: + """ + Determines how to handle an incoming finding with respect to grouping + if finding groups are enabled, use the supplied grouping mechanism to + store a reference of how the finding should be grouped + """ + if is_finding_groups_enabled() and group_by: + # If finding groups are enabled, group all findings by group name + name = finding_helper.get_group_by_group_name(finding, group_by) + if name is not None: + if name in group_names_to_findings_dict: + group_names_to_findings_dict[name].append(finding) + else: + group_names_to_findings_dict[name] = [finding] + + def process_request_response_pairs( + self, + finding: Finding + ) -> None: + """ + Search the unsaved finding for the following attributes to determine + if the data can be saved to the finding + - unsaved_req_resp + - unsaved_request + - unsaved_response + Create BurpRawRequestResponse objects linked to the finding without + returning the finding afterward + """ + if len(unsaved_req_resp := getattr(finding, 'unsaved_req_resp', [])) > 0: + for req_resp in unsaved_req_resp: + burp_rr = BurpRawRequestResponse( + finding=finding, + burpRequestBase64=base64.b64encode(req_resp["req"].encode("utf-8")), + burpResponseBase64=base64.b64encode(req_resp["resp"].encode("utf-8"))) + burp_rr.clean() + burp_rr.save() + + unsaved_request = getattr(finding, "unsaved_request", None) + unsaved_response = getattr(finding, "unsaved_response", None) + if unsaved_request is not None and unsaved_response is not None: + burp_rr = BurpRawRequestResponse( + finding=finding, + burpRequestBase64=base64.b64encode(unsaved_request.encode()), + burpResponseBase64=base64.b64encode(unsaved_response.encode())) + burp_rr.clean() + burp_rr.save() + + def process_endpoints( + self, + finding: Finding, + endpoints_to_add: List[Endpoint], + ) -> None: + """ + Process any endpoints to add to the finding. Endpoints could come from two places + - Directly from the report + - Supplied by the user from the import form + These endpoints will be processed in to endpoints objects and associated with the + finding and and product + """ + # Save the unsaved endpoints + self.chunk_endpoints_and_disperse(finding, finding.test, finding.unsaved_endpoints) + # Check for any that were added in the form + if len(endpoints_to_add) > 0: + logger.debug('endpoints_to_add: %s', endpoints_to_add) + self.chunk_endpoints_and_disperse(finding, finding.test, endpoints_to_add) + + def process_vulnerability_ids( + self, + finding: Finding + ) -> Finding: + """ + Parse the `unsaved_vulnerability_ids` field from findings after they are parsed + to create `Vulnerability_Id` objects with the finding associated correctly + """ + # Synchronize the cve field with the unsaved_vulnerability_ids + # We do this to be as flexible as possible to handle the fields until + # the cve field is not needed anymore and can be removed. + if finding.unsaved_vulnerability_ids and finding.cve: + # Make sure the first entry of the list is the value of the cve field + finding.unsaved_vulnerability_ids.insert(0, finding.cve) + elif finding.unsaved_vulnerability_ids and not finding.cve: + # If the cve field is not set, use the first entry of the list to set it + finding.cve = finding.unsaved_vulnerability_ids[0] + elif not finding.unsaved_vulnerability_ids and finding.cve: + # If there is no list, make one with the value of the cve field + finding.unsaved_vulnerability_ids = [finding.cve] + + if finding.unsaved_vulnerability_ids: + # Remove duplicates + finding.unsaved_vulnerability_ids = list(dict.fromkeys(finding.unsaved_vulnerability_ids)) + # Add all vulnerability ids to the database + for vulnerability_id in finding.unsaved_vulnerability_ids: + Vulnerability_Id( + vulnerability_id=vulnerability_id, + finding=finding, + ).save() + + return finding + + def process_files( + self, + finding: Finding, + ) -> None: + """ + Some parsers may supply files in the form of base64 encoded blobs, + so lets save them in the form of an attached file on the finding + object + """ + if finding.unsaved_files: + for unsaved_file in finding.unsaved_files: + data = base64.b64decode(unsaved_file.get('data')) + title = unsaved_file.get('title', '') + file_upload, _ = FileUpload.objects.get_or_create(title=title) + file_upload.file.save(title, ContentFile(data)) + file_upload.save() + finding.files.add(file_upload) + + def mitigate_finding( + self, + finding: Finding, + user: Dojo_User, + scan_date: datetime, + note_message: str, + finding_groups_enabled: bool, + push_to_jira: bool, + ) -> None: + """ + Mitigates a finding, all endpoint statuses, leaves a note on the finding + with a record of what happened, and then saves the finding. Changes to + this finding will also be synced with some ticket tracking system as well + as groups + """ + finding.active = False + finding.is_mitigated = True + finding.mitigated = scan_date + finding.mitigated_by = user + finding.notes.create( + author=user, + entry=note_message, + ) + # Mitigate the endpoint statuses + self.mitigate_endpoint_status(finding.status_finding.all(), user, kwuser=user, sync=True) + # to avoid pushing a finding group multiple times, we push those outside of the loop + if finding_groups_enabled and finding.finding_group: + # don't try to dedupe findings that we are closing + finding.save(dedupe_option=False) + else: + finding.save(dedupe_option=False, push_to_jira=push_to_jira) diff --git a/dojo/importers/default_importer.py b/dojo/importers/default_importer.py new file mode 100644 index 00000000000..d324f0e9af5 --- /dev/null +++ b/dojo/importers/default_importer.py @@ -0,0 +1,492 @@ +import logging +from abc import ABC +from datetime import datetime +from typing import List, Tuple + +from django.core.files.uploadedfile import TemporaryUploadedFile +from django.core.serializers import deserialize, serialize +from django.db.models.query_utils import Q +from django.utils import timezone + +import dojo.finding.helper as finding_helper +import dojo.jira_link.helper as jira_helper +import dojo.notifications.helper as notifications_helper +from dojo.importers.base_importer import BaseImporter, Parser +from dojo.models import ( + Dojo_User, + Engagement, + Finding, + Test, + Test_Import, +) +from dojo.utils import is_finding_groups_enabled + +logger = logging.getLogger(__name__) +deduplicationLogger = logging.getLogger("dojo.specific-loggers.deduplication") + + +class DefaultImporter(BaseImporter): + """ + The classic importer process used by DefectDojo + + This Importer is intended to be used when auditing the history + of findings at a given point in time is required + """ + def __init__(self, *args: list, **kwargs: dict): + """ + Bypass the __init__ method of the BaseImporter class + as it will raise a `NotImplemented` exception + """ + ABC.__init__(self, *args, **kwargs) + + def __new__(self, *args: list, **kwargs: dict): + """ + Bypass the __new__ method of the BaseImporter class + as it will raise a `NotImplemented` exception + """ + return ABC.__new__(self, *args, **kwargs) + + def create_test( + self, + scan_type: str, + test_type_name: str, + **kwargs: dict, + ) -> Test: + """ + Create a fresh test object to be used by the importer. This + new test will be attached to the supplied engagement with the + supplied user being marked as the lead of the test + """ + # Ensure the following fields were supplied in the kwargs + required_fields = ["engagement", "lead", "environment"] + if not all(field in kwargs for field in required_fields): + msg = ( + "(Importer) parse_findings_static_test_type - " + f"The following fields must be supplied: {required_fields}" + ) + raise ValueError(msg) + # Grab the fields from the kwargs + engagement = kwargs.get("engagement") + lead = kwargs.get("lead") + environment = kwargs.get("environment") + # Ensure a test type is available for use + test_type = self.get_or_create_test_type(test_type_name) + target_date = (kwargs.get("scan_date") or kwargs.get("now")) or timezone.now() + # Create the test object + return Test.objects.create( + title=kwargs.get("test_title"), + engagement=engagement, + lead=lead, + environment=environment, + test_type=test_type, + scan_type=scan_type, + target_start=target_date, + target_end=target_date, + percent_complete=100, + version=kwargs.get("version"), + branch_tag=kwargs.get("branch_tag"), + build_id=kwargs.get("build_id"), + commit_hash=kwargs.get("commit_hash"), + api_scan_configuration=kwargs.get("api_scan_configuration"), + tags=kwargs.get("tags"), + ) + + def process_scan( + self, + scan: TemporaryUploadedFile, + scan_type: str, + engagement: Engagement = None, + test: Test = None, + user: Dojo_User = None, + parsed_findings: List[Finding] = None, + **kwargs: dict, + ) -> Tuple[Test, int, int, int, int, int, Test_Import]: + """ + The full step process of taking a scan report, and converting it to + findings in the database. This entails the the following actions: + - Verify the API scan configuration (if supplied) + - Parser the findings + - Process the findings + - Update the timestamps on the test + - Update/Create import history objects + - Send out notifications + - Update the test progress + """ + logger.debug(f'IMPORT_SCAN: parameters: {locals()}') + # Get a user in some point + user = self.get_user_if_supplied(user=user) + # Validate the Tool_Configuration + engagement = self.verify_tool_configuration_from_engagement( + kwargs.get("api_scan_configuration", None), + engagement + ) + # Make sure timezone is applied to dates + kwargs["scan_date"], kwargs["now"] = self.add_timezone_scan_date_and_now( + kwargs.get("scan_date"), + now=kwargs.get("now", timezone.now()) + ) + # Fetch the parser based upon the string version of the scan type + parser = self.get_parser(scan_type) + # Get the findings from the parser based on what methods the parser supplies + # This could either mean traditional file parsing, or API pull parsing + test, parsed_findings = self.parse_findings(parser, scan_type, scan, test=None, engagement=engagement, **kwargs) + # process the findings in the foreground or background + new_findings = self.determine_process_method(test, parsed_findings, user, **kwargs) + # Close any old findings in the processed list if the the user specified for that + # to occur in the form that is then passed to the kwargs + closed_findings = self.close_old_findings(test, test.finding_set.values(), user, **kwargs) + # Update the timestamps of the test object by looking at the findings imported + test = self.update_timestamps(test, **kwargs) + # Update the test meta + test = self.update_test_meta(test, **kwargs) + # Save the test and engagement for changes to take affect + test.save() + test.engagement.save() + # Create a test import history object to record the flags sent to the importer + # This operation will return None if the user does not have the import history + # feature enabled + test_import_history = self.update_import_history( + Test_Import.IMPORT_TYPE, + test, + new_findings=new_findings, + closed_findings=closed_findings, + **kwargs, + ) + # Send out som notifications to the user + logger.debug('IMPORT_SCAN: Generating notifications') + notifications_helper.notify_test_created(test) + updated_count = len(new_findings) + len(closed_findings) + notifications_helper.notify_scan_added(test, updated_count, new_findings=new_findings, findings_mitigated=closed_findings) + # Update the test progress to reflect that the import has completed + logger.debug('IMPORT_SCAN: Updating Test progress') + self.update_test_progress(test) + logger.debug('IMPORT_SCAN: Done') + return test, 0, len(new_findings), len(closed_findings), 0, 0, test_import_history + + def process_findings( + self, + test: Test, + parsed_findings: List[Finding], + user: Dojo_User, + **kwargs: dict, + ) -> List[Finding]: + """ + Saves findings in memory that were parsed from the scan report into the database. + This process involves first saving associated objects such as endpoints, files, + vulnerability IDs, and request response pairs. Once all that has been completed, + the finding may be appended to a new or existing group based upon user selection + at import time + """ + new_findings = [] + logger.debug('starting import of %i parsed findings.', len(parsed_findings) if parsed_findings else 0) + group_names_to_findings_dict = {} + + for unsaved_finding in parsed_findings: + # make sure the severity is something is digestible + unsaved_finding = self.sanitize_severity(unsaved_finding) + # Filter on minimum severity if applicable + if (minimum_severity := kwargs.get("minimum_severity")) and (Finding.SEVERITIES[unsaved_finding.severity] > Finding.SEVERITIES[minimum_severity]): + # finding's severity is below the configured threshold : ignoring the finding + continue + + now = kwargs.get("now") + # Some parsers provide "mitigated" field but do not set timezone (because they are probably not available in the report) + # Finding.mitigated is DateTimeField and it requires timezone + if unsaved_finding.mitigated and not unsaved_finding.mitigated.tzinfo: + unsaved_finding.mitigated = unsaved_finding.mitigated.replace(tzinfo=now.tzinfo) + # Set some explicit fields on the finding + unsaved_finding.test = test + unsaved_finding.reporter = user + unsaved_finding.last_reviewed_by = user + unsaved_finding.last_reviewed = now + logger.debug('process_parsed_findings: active from report: %s, verified from report: %s', unsaved_finding.active, unsaved_finding.verified) + # indicates an override. Otherwise, do not change the value of unsaved_finding.active + if (active := kwargs.get("active")) is not None: + unsaved_finding.active = active + # indicates an override. Otherwise, do not change the value of verified + if (verified := kwargs.get("verified")) is not None: + unsaved_finding.verified = verified + # scan_date was provided, override value from parser + if (scan_date := kwargs.get("scan_date")) is not None: + unsaved_finding.date = scan_date.date() + if (service := kwargs.get("service")) is not None: + unsaved_finding.service = service + unsaved_finding.save(dedupe_option=False) + finding = unsaved_finding + # Determine how the finding should be grouped + group_by = kwargs.get("group_by") + self.process_finding_groups( + finding, + group_by, + group_names_to_findings_dict, + ) + # Process any request/response pairs + self.process_request_response_pairs(finding) + # Process any endpoints on the endpoint, or added on the form + self.process_endpoints(finding, kwargs.get("endpoints_to_add", [])) + # Process any tags + if finding.unsaved_tags: + finding.tags = finding.unsaved_tags + # Process any files + self.process_files(finding) + # Process vulnerability IDs + finding = self.process_vulnerability_ids(finding) + # Categorize this finding as a new one + new_findings.append(finding) + # to avoid pushing a finding group multiple times, we push those outside of the loop + push_to_jira = kwargs.get("push_to_jira", False) + if is_finding_groups_enabled() and group_by: + finding.save() + else: + finding.save(push_to_jira=push_to_jira) + + for (group_name, findings) in group_names_to_findings_dict.items(): + finding_helper.add_findings_to_auto_group( + group_name, + findings, + **kwargs + ) + if push_to_jira: + if findings[0].finding_group is not None: + jira_helper.push_to_jira(findings[0].finding_group) + else: + jira_helper.push_to_jira(findings[0]) + + sync = kwargs.get('sync', False) + if not sync: + return [serialize('json', [finding, ]) for finding in new_findings] + return new_findings + + def close_old_findings( + self, + test: Test, + findings: List[Finding], + user: Dojo_User, + scan_date: datetime = timezone.now(), + **kwargs: dict, + ) -> List[Finding]: + """ + Closes old findings based on a hash code match at either the product + or the engagement scope. Closing an old finding entails setting the + finding to mitigated status, setting all endpoint statuses to mitigated, + as well as leaving a not on the finding indicating that it was mitigated + because the vulnerability is no longer present in the submitted scan report. + """ + # First check if close old findings is desired + if kwargs.get("close_old_findings") is False: + return [] + logger.debug("REIMPORT_SCAN: Closing findings no longer present in scan report") + # Close old active findings that are not reported by this scan. + # Refactoring this to only call test.finding_set.values() once. + mitigated_hash_codes = [] + new_hash_codes = [] + for finding in findings: + new_hash_codes.append(finding["hash_code"]) + if getattr(finding, "is_mitigated", None): + mitigated_hash_codes.append(finding["hash_code"]) + for hash_code in new_hash_codes: + if hash_code == finding["hash_code"]: + new_hash_codes.remove(hash_code) + # Get the initial filtered list of old findings to be closed without + # considering the scope of the product or engagement + old_findings = Finding.objects.exclude( + test=test + ).exclude( + hash_code__in=new_hash_codes + ).filter( + test__test_type=test.test_type, + active=True + ) + # Accommodate for product scope or engagement scope + if kwargs.get("close_old_findings_product_scope"): + old_findings = old_findings.filter(test__engagement__product=test.engagement.product) + else: + old_findings = old_findings.filter(test__engagement=test.engagement) + # Use the service to differentiate further + if service := kwargs.get("service"): + old_findings = old_findings.filter(service=service) + else: + old_findings = old_findings.filter(Q(service__isnull=True) | Q(service__exact='')) + # Determine if pushing to jira or if the finding groups are enabled + push_to_jira = kwargs.get("push_to_jira", False) + finding_groups_enabled = is_finding_groups_enabled() + # Update the status of the findings and any endpoints + for old_finding in old_findings: + self.mitigate_finding( + old_finding, + user, + scan_date, + ( + "This finding has been automatically closed " + "as it is not present anymore in recent scans." + ), + finding_groups_enabled, + push_to_jira, + ) + # push finding groups to jira since we only only want to push whole groups + if finding_groups_enabled and push_to_jira: + for finding_group in {finding.finding_group for finding in old_findings if finding.finding_group is not None}: + jira_helper.push_to_jira(finding_group) + + return old_findings + + def parse_findings( + self, + parser: Parser, + scan_type: str, + scan: TemporaryUploadedFile, + test: Test = None, + **kwargs: dict, + ) -> Tuple[Test, List[Finding]]: + """ + A stub function for making function definitions easier to follow + with correct type signatures + """ + return BaseImporter.parse_findings( + self, + parser, + scan_type, + scan, + **kwargs, + ) + + def parse_findings_static_test_type( + self, + parser: Parser, + scan_type: str, + scan: TemporaryUploadedFile, + test: Test = None, + **kwargs: dict, + ) -> Tuple[Test, List[Finding]]: + """ + Creates a test object as part of the import process as there is not one present + at the time of import. Once the test is created, proceed with the traditional + file import as usual from the base class + """ + # by default test_type == scan_type + test = self.create_test( + scan_type, + scan_type, + **kwargs, + ) + logger.debug('IMPORT_SCAN: Parse findings') + # Use the parent method for the rest of this + return test, BaseImporter.parse_findings_static_test_type( + self, + parser, + scan_type, + scan, + test=test, + **kwargs, + ) + + def parse_findings_dynamic_test_type( + self, + parser: Parser, + scan_type: str, + scan: TemporaryUploadedFile, + **kwargs: dict, + ) -> Tuple[Test, List[Finding]]: + """ + Uses the parser to fetch any tests that may have been created + by the API based parser, aggregates all findings from each test + into a single test, and then renames the test is applicable + """ + logger.debug('IMPORT_SCAN parser v2: Create Test and parse findings') + parsed_findings = [] + tests = self.parse_dynamic_test_type_tests( + parser, + scan_type, + scan, + **kwargs, + ) + # Make sure we have at least one test returned + if len(tests) == 0: + logger.info(f'No tests found in import for {scan_type}') + return None, parsed_findings + # for now we only consider the first test in the list and artificially aggregate all findings of all tests + # this is the same as the old behavior as current import/reimporter implementation doesn't handle the case + # when there is more than 1 test + # + # we also aggregate the label of the Test_type to show the user the original scan_type + # only if they are different. This is to support meta format like SARIF + # so a report that have the label 'CodeScanner' will be changed to 'CodeScanner Scan (SARIF)' + test_type_name = scan_type + # Determine if we should use a custom test type name + if tests[0].type: + test_type_name = f"{tests[0].type} Scan" + if test_type_name != scan_type: + test_type_name = f"{test_type_name} ({scan_type})" + # Create a new test + test = self.create_test( + scan_type, + test_type_name, + **kwargs, + ) + # This part change the name of the Test + # we get it from the data of the parser + test_raw = tests[0] + if test_raw.name: + test.name = test_raw.name + if test_raw.description: + test.description = test_raw.description + test.save() + logger.debug('IMPORT_SCAN parser v2: Parse findings (aggregate)') + # Aggregate all the findings and return them with the newly created test + return test, self.parse_dynamic_test_type_findings_from_tests(tests) + + def sync_process_findings( + self, + test: Test, + parsed_findings: List[Finding], + user: Dojo_User, + **kwargs: dict, + ) -> List[Finding]: + """ + Processes findings in a synchronous manner such that all findings + will be processed in a worker/process/thread + """ + return self.process_findings( + test, + parsed_findings, + user, + sync=True, + **kwargs, + ) + + def async_process_findings( + self, + test: Test, + parsed_findings: List[Finding], + user: Dojo_User, + **kwargs: dict, + ) -> List[Finding]: + """ + Processes findings in chunks within N number of processes. The + ASYNC_FINDING_IMPORT_CHUNK_SIZE setting will determine how many + findings will be processed in a given worker/process/thread + """ + chunk_list = self.chunk_objects(parsed_findings) + results_list = [] + # First kick off all the workers + for findings_list in chunk_list: + result = self.process_findings( + test, + findings_list, + user, + sync=False, + **kwargs, + ) + # Since I dont want to wait until the task is done right now, save the id + # So I can check on the task later + results_list += [result] + # After all tasks have been started, time to pull the results + logger.info('IMPORT_SCAN: Collecting Findings') + for results in results_list: + serial_new_findings = results.get() + new_findings += [next(deserialize("json", finding)).object for finding in serial_new_findings] + logger.info('IMPORT_SCAN: All Findings Collected') + # Indicate that the test is not complete yet as endpoints will still be rolling in. + test.percent_complete = 50 + test.save() + return new_findings diff --git a/dojo/importers/default_reimporter.py b/dojo/importers/default_reimporter.py new file mode 100644 index 00000000000..be6455e1616 --- /dev/null +++ b/dojo/importers/default_reimporter.py @@ -0,0 +1,877 @@ +import logging +from abc import ABC +from datetime import datetime +from typing import List, Tuple + +from django.core.files.uploadedfile import TemporaryUploadedFile +from django.core.serializers import deserialize, serialize +from django.db.models.query_utils import Q +from django.utils import timezone + +import dojo.finding.helper as finding_helper +import dojo.jira_link.helper as jira_helper +import dojo.notifications.helper as notifications_helper +from dojo.importers.base_importer import BaseImporter, Parser +from dojo.models import ( + Dojo_User, + Engagement, + Finding, + Notes, + Test, + Test_Import, +) +from dojo.utils import is_finding_groups_enabled + +logger = logging.getLogger(__name__) +deduplicationLogger = logging.getLogger("dojo.specific-loggers.deduplication") + + +class DefaultReImporter(BaseImporter): + """ + The classic reimporter process used by DefectDojo + + This importer is intended to be used when mitigation of + vulnerabilities is the ultimate tool for getting a current + point time view of security of a given product + """ + def __init__(self, *args: list, **kwargs: dict): + """ + Bypass the __init__ method of the BaseImporter class + as it will raise a `NotImplemented` exception + """ + ABC.__init__(self, *args, **kwargs) + + def __new__(self, *args: list, **kwargs: dict): + """ + Bypass the __new__ method of the BaseImporter class + as it will raise a `NotImplemented` exception + """ + return ABC.__new__(self, *args, **kwargs) + + def process_scan( + self, + scan: TemporaryUploadedFile, + scan_type: str, + engagement: Engagement = None, + test: Test = None, + user: Dojo_User = None, + parsed_findings: List[Finding] = None, + **kwargs: dict, + ) -> Tuple[Test, int, int, int, int, int, Test_Import]: + """ + The full step process of taking a scan report, and converting it to + findings in the database. This entails the the following actions: + - Verify the API scan configuration (if supplied) + - Parser the findings + - Process the findings + - Update the timestamps on the test + - Update/Create import history objects + - Send out notifications + - Update the test progress + """ + logger.debug(f'REIMPORT_SCAN: parameters: {locals()}') + # Get a user in some point + user = self.get_user_if_supplied(user=user) + # Validate the Tool_Configuration + test = self.verify_tool_configuration_from_test( + kwargs.get("api_scan_configuration", None), + test + ) + # Make sure timezone is applied to dates + kwargs["scan_date"], kwargs["now"] = self.add_timezone_scan_date_and_now( + kwargs.get("scan_date"), + now=kwargs.get("now", timezone.now()) + ) + # Fetch the parser based upon the string version of the scan type + parser = self.get_parser(scan_type) + # Get the findings from the parser based on what methods the parser supplies + # This could either mean traditional file parsing, or API pull parsing + parsed_findings = self.parse_findings(parser, scan_type, scan, test=test, engagement=engagement, **kwargs) + # process the findings in the foreground or background + ( + new_findings, + reactivated_findings, + findings_to_mitigate, + untouched_findings, + ) = self.determine_process_method(test, parsed_findings, user, **kwargs) + # Close any old findings in the processed list if the the user specified for that + # to occur in the form that is then passed to the kwargs + closed_findings = self.close_old_findings(test, findings_to_mitigate, user, **kwargs) + # Update the timestamps of the test object by looking at the findings imported + logger.debug("REIMPORT_SCAN: Updating test/engagement timestamps") + test = self.update_timestamps(test, **kwargs) + # Update the test meta + test = self.update_test_meta(test, **kwargs) + # Save the test and engagement for changes to take affect + test.save() + test.engagement.save() + logger.debug("REIMPORT_SCAN: Updating test tags") + self.update_test_tags(test, kwargs.get("tags", [])) + # Create a test import history object to record the flags sent to the importer + # This operation will return None if the user does not have the import history + # feature enabled + test_import_history = self.update_import_history( + Test_Import.REIMPORT_TYPE, + test, + new_findings=new_findings, + closed_findings=closed_findings, + reactivated_findings=reactivated_findings, + untouched_findings=untouched_findings, + **kwargs, + ) + # Send out som notifications to the user + logger.debug('REIMPORT_SCAN: Generating notifications') + updated_count = ( + len(closed_findings) + len(reactivated_findings) + len(new_findings) + ) + notifications_helper.notify_scan_added( + test, + updated_count, + new_findings=new_findings, + findings_mitigated=closed_findings, + findings_reactivated=reactivated_findings, + findings_untouched=untouched_findings, + ) + # Update the test progress to reflect that the import has completed + logger.debug('REIMPORT_SCAN: Updating Test progress') + self.update_test_progress(test) + logger.debug('REIMPORT_SCAN: Done') + return ( + test, + updated_count, + len(new_findings), + len(closed_findings), + len(reactivated_findings), + len(untouched_findings), + test_import_history, + ) + + def process_findings( + self, + test: Test, + parsed_findings: List[Finding], + user: Dojo_User, + **kwargs: dict, + ) -> Tuple[List[Finding], List[Finding], List[Finding], List[Finding]]: + """ + Saves findings in memory that were parsed from the scan report into the database. + This process involves first saving associated objects such as endpoints, files, + vulnerability IDs, and request response pairs. Once all that has been completed, + the finding may be appended to a new or existing group based upon user selection + at import time + """ + + original_items = list(test.finding_set.all()) + deduplication_algorithm = test.deduplication_algorithm + group_names_to_findings_dict = {} + new_items = [] + reactivated_items = [] + unchanged_items = [] + + group_names_to_findings_dict = {} + logger.debug(f"starting reimport of {len(parsed_findings) if parsed_findings else 0} items.") + logger.debug("STEP 1: looping over findings from the reimported report and trying to match them to existing findings") + deduplicationLogger.debug(f"Algorithm used for matching new findings to existing findings: {deduplication_algorithm}") + + for unsaved_finding in parsed_findings: + # make sure the severity is something is digestible + unsaved_finding = self.sanitize_severity(unsaved_finding) + # Filter on minimum severity if applicable + if (minimum_severity := kwargs.get("minimum_severity")) and (Finding.SEVERITIES[unsaved_finding.severity] > Finding.SEVERITIES[minimum_severity]): + # finding's severity is below the configured threshold : ignoring the finding + continue + + now = kwargs.get("now") + group_by = kwargs.get("group_by") + push_to_jira = kwargs.get("push_to_jira", False) + # Some parsers provide "mitigated" field but do not set timezone (because they are probably not available in the report) + # Finding.mitigated is DateTimeField and it requires timezone + if unsaved_finding.mitigated and not unsaved_finding.mitigated.tzinfo: + unsaved_finding.mitigated = unsaved_finding.mitigated.replace(tzinfo=now.tzinfo) + # Override the test if needed + if not hasattr(unsaved_finding, "test"): + unsaved_finding.test = test + # Set the service supplied at import time + if service := kwargs.get("service"): + unsaved_finding.service = service + # Clean any endpoints that are on the finding + self.clean_unsaved_endpoints(unsaved_finding.unsaved_endpoints) + # Calculate the hash code to be used to identify duplicates + unsaved_finding.hash_code = unsaved_finding.compute_hash_code() + deduplicationLogger.debug(f"unsaved finding's hash_code: {unsaved_finding.hash_code}") + # Match any findings to this new one coming in + matched_findings = self.match_new_finding_to_existing_finding(unsaved_finding, test, deduplication_algorithm) + deduplicationLogger.debug(f"found {len(matched_findings)} findings matching with current new finding") + # Determine how to proceed based on whether matches were found or not + if matched_findings: + existing_finding = matched_findings[0] + finding, force_continue = self.process_matched_finding( + unsaved_finding, + existing_finding, + user, + new_items, + reactivated_items, + unchanged_items, + **kwargs + ) + # Determine if we should skip the rest of the loop + if force_continue: + continue + # Update endpoints on the existing finding with those on the new finding + if finding.dynamic_finding: + logger.debug( + "Re-import found an existing dynamic finding for this new " + "finding. Checking the status of endpoints" + ) + self.update_endpoint_status(existing_finding, unsaved_finding, user) + else: + finding = self.process_finding_that_was_not_matched( + unsaved_finding, + user, + group_names_to_findings_dict, + new_items, + reactivated_items, + unchanged_items, + **kwargs + ) + # This condition __appears__ to always be true, but am afraid to remove it + if finding: + # Process the rest of the items on the finding + finding = self.finding_post_processing( + finding, + unsaved_finding, + test, + new_items, + reactivated_items, + unchanged_items, + **kwargs + ) + # finding = new finding or existing finding still in the upload report + # to avoid pushing a finding group multiple times, we push those outside of the loop + if is_finding_groups_enabled() and group_by: + finding.save() + else: + finding.save(push_to_jira=push_to_jira) + + to_mitigate = (set(original_items) - set(reactivated_items) - set(unchanged_items)) + # due to #3958 we can have duplicates inside the same report + # this could mean that a new finding is created and right after + # that it is detected as the 'matched existing finding' for a + # following finding in the same report + # this means untouched can have this finding inside it, + # while it is in fact a new finding. So we subtract new_items + untouched = set(unchanged_items) - set(to_mitigate) - set(new_items) + # Process groups + self.process_groups_for_all_findings( + group_names_to_findings_dict, + reactivated_items, + unchanged_items, + **kwargs, + ) + # Process the results and return them back + return self.process_results( + new_items, + reactivated_items, + to_mitigate, + untouched, + **kwargs, + ) + + def close_old_findings( + self, + test: Test, + findings: List[Finding], + user: Dojo_User, + scan_date: datetime = timezone.now(), + **kwargs: dict, + ) -> List[Finding]: + """ + Updates the status of findings that were detected as "old" by the reimport + process findings methods + """ + # First check if close old findings is desired + if kwargs.get("close_old_findings") is False: + return [] + logger.debug("REIMPORT_SCAN: Closing findings no longer present in scan report") + # Determine if pushing to jira or if the finding groups are enabled + push_to_jira = kwargs.get("push_to_jira", False) + finding_groups_enabled = is_finding_groups_enabled() + mitigated_findings = [] + for finding in findings: + if not finding.mitigated or not finding.is_mitigated: + logger.debug("mitigating finding: %i:%s", finding.id, finding) + self.mitigate_finding( + finding, + user, + scan_date, + f"Mitigated by {test.test_type} re-upload.", + finding_groups_enabled, + push_to_jira, + ) + mitigated_findings.append(finding) + # push finding groups to jira since we only only want to push whole groups + if finding_groups_enabled and push_to_jira: + for finding_group in {finding.finding_group for finding in findings if finding.finding_group is not None}: + jira_helper.push_to_jira(finding_group) + + return mitigated_findings + + def parse_findings_static_test_type( + self, + parser: Parser, + scan_type: str, + scan: TemporaryUploadedFile, + test: Test = None, + **kwargs: dict, + ) -> List[Finding]: + """ + Parses the findings from file and assigns them to the test + that was supplied + """ + logger.debug("REIMPORT_SCAN: Parse findings") + # Use the parent method for the rest of this + return BaseImporter.parse_findings_static_test_type( + self, + parser, + scan_type, + scan, + test=test, + **kwargs, + ) + + def parse_findings_dynamic_test_type( + self, + parser: Parser, + scan_type: str, + scan: TemporaryUploadedFile, + **kwargs: dict, + ) -> List[Finding]: + """ + Uses the parser to fetch any tests that may have been created + by the API based parser, aggregates all findings from each test + into a single test, and then renames the test is applicable + """ + logger.debug("REIMPORT_SCAN parser v2: Create parse findings") + return BaseImporter.parse_findings_dynamic_test_type( + self, + parser, + scan_type, + scan, + **kwargs, + ) + + def sync_process_findings( + self, + test: Test, + parsed_findings: List[Finding], + user: Dojo_User, + **kwargs: dict, + ) -> Tuple[List[Finding], List[Finding], List[Finding], List[Finding]]: + """ + Processes findings in a synchronous manner such that all findings + will be processed in a worker/process/thread + """ + return self.process_findings( + test, + parsed_findings, + user, + sync=True, + **kwargs, + ) + + def async_process_findings( + self, + test: Test, + parsed_findings: List[Finding], + user: Dojo_User, + **kwargs: dict, + ) -> Tuple[List[Finding], List[Finding], List[Finding], List[Finding]]: + """ + Processes findings in chunks within N number of processes. The + ASYNC_FINDING_IMPORT_CHUNK_SIZE setting will determine how many + findings will be processed in a given worker/process/thread + """ + chunk_list = self.chunk_objects(parsed_findings) + new_findings = [] + reactivated_findings = [] + findings_to_mitigate = [] + untouched_findings = [] + # First kick off all the workers + for findings_list in chunk_list: + result = self.process_findings( + test, + findings_list, + user, + sync=False, + **kwargs, + ) + # Since I dont want to wait until the task is done right now, save the id + # So I can check on the task later + results_list += [result] + # After all tasks have been started, time to pull the results + logger.debug("REIMPORT_SCAN: Collecting Findings") + for results in results_list: + ( + serial_new_findings, + serial_reactivated_findings, + serial_findings_to_mitigate, + serial_untouched_findings, + ) = results.get() + new_findings += [ + next(deserialize("json", finding)).object + for finding in serial_new_findings + ] + reactivated_findings += [ + next(deserialize("json", finding)).object + for finding in serial_reactivated_findings + ] + findings_to_mitigate += [ + next(deserialize("json", finding)).object + for finding in serial_findings_to_mitigate + ] + untouched_findings += [ + next(deserialize("json", finding)).object + for finding in serial_untouched_findings + ] + logger.debug("REIMPORT_SCAN: All Findings Collected") + # Indicate that the test is not complete yet as endpoints will still be rolling in. + test.percent_complete = 50 + test.save() + self.update_test_progress(test, sync=False) + + return new_findings, reactivated_findings, findings_to_mitigate, untouched_findings + + def match_new_finding_to_existing_finding( + self, + unsaved_finding: Finding, + test: Test, + deduplication_algorithm: str, + ) -> List[Finding]: + """ + Matches a single new finding to N existing findings and then returns those matches + """ + # This code should match the logic used for deduplication out of the re-import feature. + # See utils.py deduplicate_* functions + deduplicationLogger.debug('return findings bases on algorithm: %s', deduplication_algorithm) + if deduplication_algorithm == 'hash_code': + return Finding.objects.filter( + test=test, + hash_code=unsaved_finding.hash_code + ).exclude(hash_code=None).order_by('id') + elif deduplication_algorithm == 'unique_id_from_tool': + return Finding.objects.filter( + test=test, + unique_id_from_tool=unsaved_finding.unique_id_from_tool + ).exclude(unique_id_from_tool=None).order_by('id') + elif deduplication_algorithm == 'unique_id_from_tool_or_hash_code': + query = Finding.objects.filter( + Q(test=test), + (Q(hash_code__isnull=False) & Q(hash_code=unsaved_finding.hash_code)) + | (Q(unique_id_from_tool__isnull=False) & Q(unique_id_from_tool=unsaved_finding.unique_id_from_tool)) + ).order_by('id') + deduplicationLogger.debug(query.query) + return query + elif deduplication_algorithm == 'legacy': + # This is the legacy reimport behavior. Although it's pretty flawed and doesn't match the legacy algorithm for deduplication, + # this is left as is for simplicity. + # Re-writing the legacy deduplication here would be complicated and counter-productive. + # If you have use cases going through this section, you're advised to create a deduplication configuration for your parser + logger.debug("Legacy reimport. In case of issue, you're advised to create a deduplication configuration in order not to go through this section") + return Finding.objects.filter( + title=unsaved_finding.title, + test=test, + severity=unsaved_finding.severity, + numerical_severity=Finding.get_numerical_severity(unsaved_finding.severity)).order_by('id') + else: + logger.error(f"Internal error: unexpected deduplication_algorithm: \"{deduplication_algorithm}\"") + return None + + def process_matched_finding( + self, + unsaved_finding: Finding, + existing_finding: Finding, + user: Dojo_User, + new_items: List[Finding], + reactivated_items: List[Finding], + unchanged_items: List[Finding], + **kwargs: dict, + ) -> Tuple[Finding, bool]: + """ + Determine how to handle the an existing finding based on the status + that is possesses at the time of reimport + """ + if existing_finding.false_p or existing_finding.out_of_scope or existing_finding.risk_accepted: + return self.process_matched_special_status_finding( + unsaved_finding, + existing_finding, + user, + new_items, + reactivated_items, + unchanged_items, + **kwargs + ) + elif existing_finding.is_mitigated: + return self.process_matched_mitigated_finding( + unsaved_finding, + existing_finding, + user, + new_items, + reactivated_items, + unchanged_items, + **kwargs + ) + else: + return self.process_matched_active_finding( + unsaved_finding, + existing_finding, + user, + new_items, + reactivated_items, + unchanged_items, + **kwargs + ) + + def process_matched_special_status_finding( + self, + unsaved_finding: Finding, + existing_finding: Finding, + user: Dojo_User, + new_items: List[Finding], + reactivated_items: List[Finding], + unchanged_items: List[Finding], + **kwargs: dict, + ) -> Tuple[Finding, bool]: + """ + Determine if there is parity between statuses of the new and existing finding. + If so, do not touch either finding, and move on to the next unsaved finding + """ + logger.debug( + f"Skipping existing finding (it is marked as false positive: {existing_finding.false_p} " + f"and/or out of scope: {existing_finding.out_of_scope} or is a risk accepted: " + f"{existing_finding.risk_accepted}) - {existing_finding.id}: {existing_finding.title} " + f"({existing_finding.component_name} - {existing_finding.component_version})" + ) + # If all statuses are the same between findings, we can safely move on to the next + # finding in the report. Return True here to force a continue in the loop + if ( + existing_finding.false_p == unsaved_finding.false_p + and existing_finding.out_of_scope == unsaved_finding.out_of_scope + and existing_finding.risk_accepted == unsaved_finding.risk_accepted + ): + unchanged_items.append(existing_finding) + return existing_finding, True + # The finding was not an exact match, so we need to add more details about from the + # new finding to the existing. Return False here to make process further + return existing_finding, False + + def process_matched_mitigated_finding( + self, + unsaved_finding: Finding, + existing_finding: Finding, + user: Dojo_User, + new_items: List[Finding], + reactivated_items: List[Finding], + unchanged_items: List[Finding], + **kwargs: dict, + ) -> Tuple[Finding, bool]: + """ + Determine how mitigated the existing and new findings really are. We need + to cover circumstances where mitigation timestamps are different, and + decide which one to honor + """ + # if the reimported item has a mitigation time, we can compare + scan_type = kwargs.get("scan_type") + verified = kwargs.get("verified") + if unsaved_finding.is_mitigated: + # The new finding is already mitigated, so nothing to change on the + # the existing finding + unchanged_items.append(existing_finding) + # Look closer at the mitigation timestamp + if unsaved_finding.mitigated: + logger.debug(f"item mitigated time: {unsaved_finding.mitigated.timestamp()}") + logger.debug(f"finding mitigated time: {existing_finding.mitigated.timestamp()}") + # Determine if the mitigation timestamp is the same between the new finding + # and the existing finding. If they are, we do not need any further processing + if unsaved_finding.mitigated.timestamp() == existing_finding.mitigated.timestamp(): + logger.debug( + "New imported finding and already existing finding have the same mitigation " + "date, will skip as they are the same." + ) + # Return True here to force the loop to continue + return existing_finding, True + else: + logger.debug( + "New imported finding and already existing finding are both mitigated but " + "have different dates, not taking action" + ) + # Return True here to force the loop to continue + return existing_finding, True + else: + # even if there is no mitigation time, skip it, because both the current finding and + # the reimported finding are is_mitigated + # Return True here to force the loop to continue + return existing_finding, True + else: + if kwargs.get("do_not_reactivate"): + logger.debug( + "Skipping reactivating by user's choice do_not_reactivate: " + f" - {existing_finding.id}: {existing_finding.title} " + f"({existing_finding.component_name} - {existing_finding.component_version})" + ) + # Search for an existing note that this finding has been skipped for reactivation + # before this current time + existing_note = existing_finding.notes.filter( + entry=f"Finding has skipped reactivation from {scan_type} re-upload with user decision do_not_reactivate.", + author=user, + ) + # If a note has not been left before, we can skip this finding + if len(existing_note) == 0: + note = Notes( + entry=f"Finding has skipped reactivation from {scan_type} re-upload with user decision do_not_reactivate.", + author=user, + ) + note.save() + existing_finding.notes.add(note) + existing_finding.save(dedupe_option=False) + # Return True here to force the loop to continue + return existing_finding, True + else: + logger.debug( + f"Reactivating: - {existing_finding.id}: {existing_finding.title} " + f"({existing_finding.component_name} - {existing_finding.component_version})" + ) + existing_finding.mitigated = None + existing_finding.is_mitigated = False + existing_finding.mitigated_by = None + existing_finding.active = True + if verified is not None: + existing_finding.verified = verified + + component_name = getattr(unsaved_finding, "component_name", None) + component_version = getattr(unsaved_finding, "component_version", None) + existing_finding.component_name = existing_finding.component_name or component_name + existing_finding.component_version = existing_finding.component_version or component_version + existing_finding.save(dedupe_option=False) + # don't dedupe before endpoints are added + existing_finding.save(dedupe_option=False) + note = Notes(entry=f"Re-activated by {scan_type} re-upload.", author=user) + note.save() + endpoint_statuses = existing_finding.status_finding.exclude( + Q(false_positive=True) + | Q(out_of_scope=True) + | Q(risk_accepted=True) + ) + self.chunk_endpoints_and_reactivate(endpoint_statuses) + existing_finding.notes.add(note) + reactivated_items.append(existing_finding) + # The new finding is active while the existing on is mitigated. The existing finding needs to + # be updated in some way + # Return False here to make sure further processing happens + return existing_finding, False + + def process_matched_active_finding( + self, + unsaved_finding: Finding, + existing_finding: Finding, + user: Dojo_User, + new_items: List[Finding], + reactivated_items: List[Finding], + unchanged_items: List[Finding], + **kwargs: dict, + ) -> Tuple[Finding, bool]: + """ + The existing finding must be active here, so we need to compare it + closely with the new finding coming in and determine how to proceed + """ + # if finding associated to new item is none of risk accepted, mitigated, false positive or out of scope + # existing findings may be from before we had component_name/version fields + logger.debug( + f"Updating existing finding: {existing_finding.id}: {existing_finding.title} " + f"({existing_finding.component_name} - {existing_finding.component_version})" + ) + # First check that the existing finding is definitely not mitigated + if not (existing_finding.mitigated and existing_finding.is_mitigated): + verified = kwargs.get("verified") + logger.debug("Reimported item matches a finding that is currently open.") + if unsaved_finding.is_mitigated: + logger.debug("Reimported mitigated item matches a finding that is currently open, closing.") + # TODO: Implement a date comparison for opened defectdojo findings before closing them by reimporting, + # as they could be force closed by the scanner but a DD user forces it open ? + logger.debug( + f"Closing: {existing_finding.id}: {existing_finding.title} " + f"({existing_finding.component_name} - {existing_finding.component_version})" + ) + existing_finding.mitigated = unsaved_finding.mitigated + existing_finding.is_mitigated = True + existing_finding.mitigated_by = unsaved_finding.mitigated_by + existing_finding.active = False + if verified is not None: + existing_finding.verified = verified + elif unsaved_finding.risk_accepted or unsaved_finding.false_p or unsaved_finding.out_of_scope: + logger.debug('Reimported mitigated item matches a finding that is currently open, closing.') + logger.debug( + f"Closing: {existing_finding.id}: {existing_finding.title} " + f"({existing_finding.component_name} - {existing_finding.component_version})" + ) + existing_finding.risk_accepted = unsaved_finding.risk_accepted + existing_finding.false_p = unsaved_finding.false_p + existing_finding.out_of_scope = unsaved_finding.out_of_scope + existing_finding.active = False + if verified is not None: + existing_finding.verified = verified + else: + # if finding is the same but list of affected was changed, finding is marked as unchanged. This is a known issue + unchanged_items.append(existing_finding) + # Set the component name and version on the existing finding if it is present + # on the old finding, but not present on the existing finding (do not override) + component_name = getattr(unsaved_finding, "component_name", None) + component_version = getattr(unsaved_finding, "component_version", None) + if (component_name is not None and not existing_finding.component_name) or ( + component_version is not None and not existing_finding.component_version + ): + existing_finding.component_name = existing_finding.component_name or component_name + existing_finding.component_version = existing_finding.component_version or component_version + existing_finding.save(dedupe_option=False) + # Return False here to make sure further processing happens + return existing_finding, False + + def process_finding_that_was_not_matched( + self, + unsaved_finding: Finding, + user: Dojo_User, + group_names_to_findings_dict: dict, + new_items: List[Finding], + reactivated_items: List[Finding], + unchanged_items: List[Finding], + **kwargs: dict, + ) -> Finding: + """ + Create a new finding from the one parsed from the report + """ + # Set some explicit settings + unsaved_finding.reporter = user + unsaved_finding.last_reviewed = timezone.now() + unsaved_finding.last_reviewed_by = user + # indicates an override. Otherwise, do not change the value of unsaved_finding.active + if (active := kwargs.get("active")) is not None: + unsaved_finding.active = active + # indicates an override. Otherwise, do not change the value of verified + if (verified := kwargs.get("verified")) is not None: + unsaved_finding.verified = verified + # scan_date was provided, override value from parser + if (scan_date := kwargs.get("scan_date")) is not None: + unsaved_finding.date = scan_date.date() + # Save it. Don't dedupe before endpoints are added. + unsaved_finding.save(dedupe_option=False) + finding = unsaved_finding + logger.debug( + "Reimport created new finding as no existing finding match: " + f"{finding.id}: {finding.title} " + f"({finding.component_name} - {finding.component_version})" + ) + # Manage the finding grouping selection + self.process_finding_groups( + unsaved_finding, + kwargs.get("group_by"), + group_names_to_findings_dict, + ) + # Add the new finding to the list + new_items.append(unsaved_finding) + # Process any request/response pairs + self.process_request_response_pairs(unsaved_finding) + return unsaved_finding + + def finding_post_processing( + self, + finding: Finding, + finding_from_report: Finding, + test: Test, + new_items: List[Finding], + reactivated_items: List[Finding], + unchanged_items: List[Finding], + **kwargs: dict, + ) -> None: + """ + Save all associated objects to the finding after it has been saved + for the purpose of foreign key restrictions + """ + self.chunk_endpoints_and_disperse(finding, test, finding_from_report.unsaved_endpoints) + if endpoints_to_add := kwargs.get("endpoints_to_add"): + self.chunk_endpoints_and_disperse(finding, test, endpoints_to_add) + # Update finding tags + if finding_from_report.unsaved_tags: + finding.tags = finding_from_report.unsaved_tags + # Process any files + if finding_from_report.unsaved_files: + finding.unsaved_files = finding_from_report.unsaved_files + self.process_files(finding) + # Process vulnerability IDs + finding = self.process_vulnerability_ids(finding) + + return finding + + def process_groups_for_all_findings( + self, + group_names_to_findings_dict: dict, + reactivated_items: List[Finding], + unchanged_items: List[Finding], + **kwargs: dict, + ) -> None: + """ + Add findings to a group that may or may not exist, based upon the users + selection at import time + """ + push_to_jira = kwargs.get("push_to_jira", False) + for (group_name, findings) in group_names_to_findings_dict.items(): + finding_helper.add_findings_to_auto_group( + group_name, + findings, + **kwargs + ) + if push_to_jira: + if findings[0].finding_group is not None: + jira_helper.push_to_jira(findings[0].finding_group) + else: + jira_helper.push_to_jira(findings[0]) + + if is_finding_groups_enabled() and push_to_jira: + for finding_group in { + finding.finding_group + for finding in reactivated_items + unchanged_items + if finding.finding_group is not None and not finding.is_mitigated + }: + jira_helper.push_to_jira(finding_group) + + def process_results( + self, + new_items: List[Finding], + reactivated_items: List[Finding], + to_mitigate: List[Finding], + untouched: List[Finding], + **kwargs: dict, + ) -> Tuple[List[Finding], List[Finding], List[Finding], List[Finding]]: + """ + Determine how to to return the results based on whether the process was + ran asynchronous or not + """ + if not kwargs.get("sync", False): + serialized_new_items = [ + serialize("json", [finding]) for finding in new_items + ] + serialized_reactivated_items = [ + serialize("json", [finding]) for finding in reactivated_items + ] + serialized_to_mitigate = [ + serialize("json", [finding]) for finding in to_mitigate + ] + serialized_untouched = [ + serialize("json", [finding]) for finding in untouched + ] + return ( + serialized_new_items, + serialized_reactivated_items, + serialized_to_mitigate, + serialized_untouched, + ) + else: + return new_items, reactivated_items, to_mitigate, untouched diff --git a/dojo/importers/endpoint_manager.py b/dojo/importers/endpoint_manager.py new file mode 100644 index 00000000000..0c6ff8a0838 --- /dev/null +++ b/dojo/importers/endpoint_manager.py @@ -0,0 +1,144 @@ +import logging +from typing import List + +from django.conf import settings +from django.utils import timezone + +from dojo.celery import app +from dojo.decorators import dojo_async_task +from dojo.models import ( + Dojo_User, + Endpoint_Status, + Finding, +) + +logger = logging.getLogger(__name__) + + +class DefaultReImporterEndpointManager: + @dojo_async_task + @app.task() + def mitigate_endpoint_status( + self, + endpoint_status_list: List[Endpoint_Status], + user: Dojo_User, + **kwargs: dict, + ) -> None: + """ + Mitigates all endpoint status objects that are supplied + """ + now = timezone.now() + for endpoint_status in endpoint_status_list: + # Only mitigate endpoints that are actually active + if endpoint_status.mitigated is False: + endpoint_status.mitigated_time = now + endpoint_status.last_modified = now + endpoint_status.mitigated_by = user + endpoint_status.mitigated = True + endpoint_status.save() + return None + + @dojo_async_task + @app.task() + def reactivate_endpoint_status( + self, + endpoint_status_list: List[Endpoint_Status], + **kwargs: dict, + ) -> None: + """ + Reactivate all endpoint status objects that are supplied + """ + for endpoint_status in endpoint_status_list: + # Only reactivate endpoints that are actually mitigated + if endpoint_status.mitigated: + logger.debug("Re-import: reactivating endpoint %s that is present in this scan", str(endpoint_status.endpoint)) + endpoint_status.mitigated_by = None + endpoint_status.mitigated_time = None + endpoint_status.mitigated = False + endpoint_status.last_modified = timezone.now() + endpoint_status.save() + return None + + def chunk_endpoints_and_reactivate( + self, + endpoint_status_list: List[Endpoint_Status], + **kwargs: dict, + ) -> None: + """ + Reactivates all endpoint status objects. Whether this function will asynchronous or not is dependent + on the ASYNC_FINDING_IMPORT setting. If it is set to true, endpoint statuses will be chunked, + and dispersed over celery workers. + """ + # Determine if this can be run async + if settings.ASYNC_FINDING_IMPORT: + chunked_list = self.chunk_objects(endpoint_status_list) + # If there is only one chunk, then do not bother with async + if len(chunked_list) < 2: + self.reactivate_endpoint_status(endpoint_status_list, sync=True) + logger.debug(f"Split endpoints into {len(chunked_list)} chunks of {len(chunked_list[0])}") + # First kick off all the workers + for endpoint_status_list in chunked_list: + self.reactivate_endpoint_status(endpoint_status_list, sync=False) + else: + self.reactivate_endpoint_status(endpoint_status_list, sync=True) + return None + + def chunk_endpoints_and_mitigate( + self, + endpoint_status_list: List[Endpoint_Status], + user: Dojo_User, + **kwargs: dict, + ) -> None: + """ + Mitigates all endpoint status objects. Whether this function will asynchronous or not is dependent + on the ASYNC_FINDING_IMPORT setting. If it is set to true, endpoint statuses will be chunked, + and dispersed over celery workers. + """ + # Determine if this can be run async + if settings.ASYNC_FINDING_IMPORT: + chunked_list = self.chunk_objects(endpoint_status_list) + # If there is only one chunk, then do not bother with async + if len(chunked_list) < 2: + self.mitigate_endpoint_status(endpoint_status_list, user, sync=True) + logger.debug(f"Split endpoints into {len(chunked_list)} chunks of {len(chunked_list[0])}") + # First kick off all the workers + for endpoint_status_list in chunked_list: + self.mitigate_endpoint_status(endpoint_status_list, user, sync=False) + else: + self.mitigate_endpoint_status(endpoint_status_list, user, sync=True) + return None + + def update_endpoint_status( + self, + existing_finding: Finding, + new_finding: Finding, + user: Dojo_User, + **kwargs: dict, + ) -> None: + """ + Update the list of endpoints from the new finding with the list that is in the old finding + """ + # New endpoints are already added in serializers.py / views.py (see comment "# for existing findings: make sure endpoints are present or created") + # So we only need to mitigate endpoints that are no longer present + # using `.all()` will mark as mitigated also `endpoint_status` with flags `false_positive`, `out_of_scope` and `risk_accepted`. This is a known issue. This is not a bug. This is a future. + existing_finding_endpoint_status_list = existing_finding.status_finding.all() + new_finding_endpoints_list = new_finding.unsaved_endpoints + if new_finding.is_mitigated: + # New finding is mitigated, so mitigate all old endpoints + endpoint_status_to_mitigate = existing_finding_endpoint_status_list + else: + # Mitigate any endpoints in the old finding not found in the new finding + endpoint_status_to_mitigate = list( + filter( + lambda existing_finding_endpoint_status: existing_finding_endpoint_status.endpoint not in new_finding_endpoints_list, + existing_finding_endpoint_status_list) + ) + # Re-activate any endpoints in the old finding that are in the new finding + endpoint_status_to_reactivate = list( + filter( + lambda existing_finding_endpoint_status: existing_finding_endpoint_status.endpoint in new_finding_endpoints_list, + existing_finding_endpoint_status_list) + ) + self.chunk_endpoints_and_reactivate(endpoint_status_to_reactivate) + self.chunk_endpoints_and_mitigate(endpoint_status_to_mitigate, user) + return None diff --git a/dojo/importers/importer/importer.py b/dojo/importers/importer/importer.py deleted file mode 100644 index 19c5acac1aa..00000000000 --- a/dojo/importers/importer/importer.py +++ /dev/null @@ -1,409 +0,0 @@ -import base64 -import logging - -from django.conf import settings -from django.core import serializers -from django.core.exceptions import ValidationError -from django.core.files.base import ContentFile -from django.db.models.query_utils import Q -from django.utils import timezone - -import dojo.finding.helper as finding_helper -import dojo.jira_link.helper as jira_helper -import dojo.notifications.helper as notifications_helper -from dojo.celery import app -from dojo.decorators import dojo_async_task -from dojo.importers import utils as importer_utils -from dojo.models import BurpRawRequestResponse, FileUpload, Finding, Test, Test_Import, Test_Type -from dojo.tools.factory import get_parser -from dojo.utils import get_current_user, is_finding_groups_enabled - -logger = logging.getLogger(__name__) -deduplicationLogger = logging.getLogger("dojo.specific-loggers.deduplication") - - -class DojoDefaultImporter: - - def create_test(self, scan_type, test_type_name, engagement, lead, environment, tags=None, - scan_date=None, version=None, branch_tag=None, build_id=None, commit_hash=None, now=timezone.now(), - api_scan_configuration=None, title=None): - - test_type, created = Test_Type.objects.get_or_create( - name=test_type_name) - - if created: - logger.info('Created new Test_Type with name %s because a report is being imported', test_type.name) - - if scan_date and not scan_date.tzinfo: - scan_date = timezone.make_aware(scan_date) - - if now and not now.tzinfo: - now = timezone.make_aware(now) - - test = Test( - title=title, - engagement=engagement, - lead=lead, - test_type=test_type, - scan_type=scan_type, - target_start=scan_date or now, - target_end=scan_date or now, - environment=environment, - percent_complete=100, - version=version, - branch_tag=branch_tag, - build_id=build_id, - commit_hash=commit_hash, - api_scan_configuration=api_scan_configuration, - tags=tags, - ) - - test.full_clean() - test.save() - return test - - @dojo_async_task - @app.task(ignore_result=False) - def process_parsed_findings(self, test, parsed_findings, scan_type, user, active=None, verified=None, minimum_severity=None, - endpoints_to_add=None, push_to_jira=None, group_by=None, now=timezone.now(), service=None, scan_date=None, - create_finding_groups_for_all_findings=True, **kwargs): - logger.debug('endpoints_to_add: %s', endpoints_to_add) - new_findings = [] - items = parsed_findings - logger.debug('starting import of %i items.', len(items) if items else 0) - group_names_to_findings_dict = {} - - for item in items: - # FIXME hack to remove when all parsers have unit tests for this attribute - # Importing the cvss module via: - # `from cvss import CVSS3` - # _and_ given a CVSS vector string such as: - # cvss_vector_str = 'CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:N', - # the following severity calculation returns the - # string values of, "None" instead of the expected string values - # of "Info": - # ``` - # cvss_obj = CVSS3(cvss_vector_str) - # severities = cvss_obj.severities() - # print(severities) - # ('None', 'None', 'None') - # print(severities[0]) - # 'None' - # print(type(severities[0])) - # - # ``` - if (item.severity.lower().startswith('info') or item.severity.lower() == 'none') and item.severity != 'Info': - item.severity = 'Info' - - item.numerical_severity = Finding.get_numerical_severity(item.severity) - - if minimum_severity and (Finding.SEVERITIES[item.severity] - > Finding.SEVERITIES[minimum_severity]): - # finding's severity is below the configured threshold : ignoring the finding - continue - - # Some parsers provide "mitigated" field but do not set timezone (because they are probably not available in the report) - # Finding.mitigated is DateTimeField and it requires timezone - if item.mitigated and not item.mitigated.tzinfo: - item.mitigated = item.mitigated.replace(tzinfo=now.tzinfo) - - item.test = test - item.reporter = user if user else get_current_user - item.last_reviewed = now - item.last_reviewed_by = user if user else get_current_user - - logger.debug('process_parsed_findings: active from report: %s, verified from report: %s', item.active, item.verified) - if active is not None: - # indicates an override. Otherwise, do not change the value of item.active - item.active = active - - if verified is not None: - # indicates an override. Otherwise, do not change the value of verified - item.verified = verified - - # if scan_date was provided, override value from parser - if scan_date: - item.date = scan_date.date() - - if service: - item.service = service - - item.save(dedupe_option=False) - - if is_finding_groups_enabled() and group_by: - # If finding groups are enabled, group all findings by group name - name = finding_helper.get_group_by_group_name(item, group_by) - if name is not None: - if name in group_names_to_findings_dict: - group_names_to_findings_dict[name].append(item) - else: - group_names_to_findings_dict[name] = [item] - - if (hasattr(item, 'unsaved_req_resp') - and len(item.unsaved_req_resp) > 0): - for req_resp in item.unsaved_req_resp: - burp_rr = BurpRawRequestResponse( - finding=item, - burpRequestBase64=base64.b64encode(req_resp["req"].encode("utf-8")), - burpResponseBase64=base64.b64encode(req_resp["resp"].encode("utf-8"))) - burp_rr.clean() - burp_rr.save() - - if (item.unsaved_request is not None - and item.unsaved_response is not None): - burp_rr = BurpRawRequestResponse( - finding=item, - burpRequestBase64=base64.b64encode(item.unsaved_request.encode()), - burpResponseBase64=base64.b64encode(item.unsaved_response.encode())) - burp_rr.clean() - burp_rr.save() - - importer_utils.chunk_endpoints_and_disperse(item, test, item.unsaved_endpoints) - if endpoints_to_add: - importer_utils.chunk_endpoints_and_disperse(item, test, endpoints_to_add) - - if item.unsaved_tags: - item.tags = item.unsaved_tags - - if item.unsaved_files: - for unsaved_file in item.unsaved_files: - data = base64.b64decode(unsaved_file.get('data')) - title = unsaved_file.get('title', '') - file_upload, _file_upload_created = FileUpload.objects.get_or_create( - title=title, - ) - file_upload.file.save(title, ContentFile(data)) - file_upload.save() - item.files.add(file_upload) - - importer_utils.handle_vulnerability_ids(item) - - new_findings.append(item) - # to avoid pushing a finding group multiple times, we push those outside of the loop - if is_finding_groups_enabled() and group_by: - item.save() - else: - item.save(push_to_jira=push_to_jira) - - for (group_name, findings) in group_names_to_findings_dict.items(): - finding_helper.add_findings_to_auto_group(group_name, findings, group_by, create_finding_groups_for_all_findings, **kwargs) - if push_to_jira: - if findings[0].finding_group is not None: - jira_helper.push_to_jira(findings[0].finding_group) - else: - jira_helper.push_to_jira(findings[0]) - - sync = kwargs.get('sync', False) - if not sync: - return [serializers.serialize('json', [finding, ]) for finding in new_findings] - return new_findings - - def close_old_findings(self, test, scan_date_time, user, push_to_jira=None, service=None, close_old_findings_product_scope=False): - # Close old active findings that are not reported by this scan. - # Refactoring this to only call test.finding_set.values() once. - findings = test.finding_set.values() - mitigated_hash_codes = [] - new_hash_codes = [] - for finding in findings: - new_hash_codes.append(finding["hash_code"]) - if finding["is_mitigated"]: - mitigated_hash_codes.append(finding["hash_code"]) - for hash_code in new_hash_codes: - if hash_code == finding["hash_code"]: - new_hash_codes.remove(hash_code) - if close_old_findings_product_scope: - # Close old findings of the same test type in the same product - old_findings = Finding.objects.exclude(test=test) \ - .exclude(hash_code__in=new_hash_codes) \ - .filter(test__engagement__product=test.engagement.product, - test__test_type=test.test_type, - active=True) - else: - # Close old findings of the same test type in the same engagement - old_findings = Finding.objects.exclude(test=test) \ - .exclude(hash_code__in=new_hash_codes) \ - .filter(test__engagement=test.engagement, - test__test_type=test.test_type, - active=True) - - if service: - old_findings = old_findings.filter(service=service) - else: - old_findings = old_findings.filter(Q(service__isnull=True) | Q(service__exact='')) - - for old_finding in old_findings: - old_finding.active = False - old_finding.is_mitigated = True - old_finding.mitigated = scan_date_time - old_finding.notes.create(author=user, - entry="This finding has been automatically closed" - " as it is not present anymore in recent scans.") - endpoint_status = old_finding.status_finding.all() - for status in endpoint_status: - status.mitigated_by = user - status.mitigated_time = timezone.now() - status.mitigated = True - status.last_modified = timezone.now() - status.save() - - old_finding.tags.add('stale') - - # to avoid pushing a finding group multiple times, we push those outside of the loop - if is_finding_groups_enabled() and old_finding.finding_group: - # don't try to dedupe findings that we are closing - old_finding.save(dedupe_option=False) - else: - old_finding.save(dedupe_option=False, push_to_jira=push_to_jira) - - if is_finding_groups_enabled() and push_to_jira: - for finding_group in set(finding.finding_group for finding in old_findings if finding.finding_group is not None): # noqa: C401 - jira_helper.push_to_jira(finding_group) - - return old_findings - - def import_scan(self, scan, scan_type, engagement, lead, environment, active=None, verified=None, tags=None, minimum_severity=None, - user=None, endpoints_to_add=None, scan_date=None, version=None, branch_tag=None, build_id=None, - commit_hash=None, push_to_jira=None, close_old_findings=False, close_old_findings_product_scope=False, - group_by=None, api_scan_configuration=None, service=None, title=None, create_finding_groups_for_all_findings=True, - apply_tags_to_findings=False, apply_tags_to_endpoints=False): - - logger.debug(f'IMPORT_SCAN: parameters: {locals()}') - - user = user or get_current_user() - - now = timezone.now() - - if api_scan_configuration and api_scan_configuration.product != engagement.product: - msg = 'API Scan Configuration has to be from same product as the Engagement' - raise ValidationError(msg) - - # check if the parser that handle the scan_type manage tests - # if yes, we parse the data first - # after that we customize the Test_Type to reflect the data - # This allow us to support some meta-formats like SARIF or the generic format - parser = get_parser(scan_type) - if hasattr(parser, 'get_tests'): - logger.debug('IMPORT_SCAN parser v2: Create Test and parse findings') - try: - tests = parser.get_tests(scan_type, scan) - except ValueError as e: - logger.warning(e) - raise ValidationError(e) - # for now we only consider the first test in the list and artificially aggregate all findings of all tests - # this is the same as the old behavior as current import/reimporter implementation doesn't handle the case - # when there is more than 1 test - # - # we also aggregate the label of the Test_type to show the user the original scan_type - # only if they are different. This is to support meta format like SARIF - # so a report that have the label 'CodeScanner' will be changed to 'CodeScanner Scan (SARIF)' - test_type_name = scan_type - if len(tests) > 0: - if tests[0].type: - test_type_name = tests[0].type + " Scan" - if test_type_name != scan_type: - test_type_name = f"{test_type_name} ({scan_type})" - - test = self.create_test(scan_type, test_type_name, engagement, lead, environment, scan_date=scan_date, tags=tags, - version=version, branch_tag=branch_tag, build_id=build_id, commit_hash=commit_hash, now=now, - api_scan_configuration=api_scan_configuration, title=title) - # This part change the name of the Test - # we get it from the data of the parser - test_raw = tests[0] - if test_raw.name: - test.name = test_raw.name - if test_raw.description: - test.description = test_raw.description - test.save() - - logger.debug('IMPORT_SCAN parser v2: Parse findings (aggregate)') - # currently we only support import one Test - # so for parser that support multiple tests (like SARIF) - # we aggregate all the findings into one uniq test - parsed_findings = [] - for test_raw in tests: - parsed_findings.extend(test_raw.findings) - else: - logger.info(f'No tests found in import for {scan_type}') - else: - logger.debug('IMPORT_SCAN: Create Test') - # by default test_type == scan_type - test = self.create_test(scan_type, scan_type, engagement, lead, environment, scan_date=scan_date, tags=tags, - version=version, branch_tag=branch_tag, build_id=build_id, commit_hash=commit_hash, now=now, - api_scan_configuration=api_scan_configuration, title=title) - - logger.debug('IMPORT_SCAN: Parse findings') - parser = get_parser(scan_type) - try: - parsed_findings = parser.get_findings(scan, test) - except ValueError as e: - logger.warning(e) - raise ValidationError(e) - - logger.debug('IMPORT_SCAN: Processing findings') - new_findings = [] - if settings.ASYNC_FINDING_IMPORT: - chunk_list = importer_utils.chunk_list(parsed_findings) - results_list = [] - # First kick off all the workers - for findings_list in chunk_list: - result = self.process_parsed_findings(test, findings_list, scan_type, user, active=active, - verified=verified, minimum_severity=minimum_severity, - endpoints_to_add=endpoints_to_add, push_to_jira=push_to_jira, - group_by=group_by, now=now, service=service, scan_date=scan_date, sync=False, - create_finding_groups_for_all_findings=create_finding_groups_for_all_findings) - # Since I dont want to wait until the task is done right now, save the id - # So I can check on the task later - results_list += [result] - # After all tasks have been started, time to pull the results - logger.info('IMPORT_SCAN: Collecting Findings') - for results in results_list: - serial_new_findings = results.get() - new_findings += [next(serializers.deserialize("json", finding)).object for finding in serial_new_findings] - logger.info('IMPORT_SCAN: All Findings Collected') - # Indicate that the test is not complete yet as endpoints will still be rolling in. - test.percent_complete = 50 - test.save() - else: - new_findings = self.process_parsed_findings(test, parsed_findings, scan_type, user, active=active, - verified=verified, minimum_severity=minimum_severity, - endpoints_to_add=endpoints_to_add, push_to_jira=push_to_jira, - group_by=group_by, now=now, service=service, scan_date=scan_date, sync=True, - create_finding_groups_for_all_findings=create_finding_groups_for_all_findings) - - closed_findings = [] - if close_old_findings: - logger.debug('IMPORT_SCAN: Closing findings no longer present in scan report') - closed_findings = self.close_old_findings(test, scan_date, user=user, push_to_jira=push_to_jira, service=service, - close_old_findings_product_scope=close_old_findings_product_scope) - - logger.debug('IMPORT_SCAN: Updating test/engagement timestamps') - importer_utils.update_timestamps(test, version, branch_tag, build_id, commit_hash, now, scan_date) - - test_import = None - if settings.TRACK_IMPORT_HISTORY: - logger.debug('IMPORT_SCAN: Updating Import History') - test_import = importer_utils.update_import_history(Test_Import.IMPORT_TYPE, active, verified, tags, minimum_severity, - endpoints_to_add, version, branch_tag, build_id, commit_hash, - push_to_jira, close_old_findings, test, new_findings, closed_findings) - if apply_tags_to_findings and tags: - for finding in test_import.findings_affected.all(): - for tag in tags: - finding.tags.add(tag) - - if apply_tags_to_endpoints and tags: - for finding in test_import.findings_affected.all(): - for endpoint in finding.endpoints.all(): - for tag in tags: - endpoint.tags.add(tag) - - logger.debug('IMPORT_SCAN: Generating notifications') - notifications_helper.notify_test_created(test) - updated_count = len(new_findings) + len(closed_findings) - notifications_helper.notify_scan_added(test, updated_count, new_findings=new_findings, findings_mitigated=closed_findings) - - logger.debug('IMPORT_SCAN: Updating Test progress') - importer_utils.update_test_progress(test) - - logger.debug('IMPORT_SCAN: Done') - - return test, len(new_findings), len(closed_findings), test_import diff --git a/dojo/importers/reimporter/reimporter.py b/dojo/importers/reimporter/reimporter.py deleted file mode 100644 index 9a3ebda8280..00000000000 --- a/dojo/importers/reimporter/reimporter.py +++ /dev/null @@ -1,779 +0,0 @@ -import base64 -import logging - -from django.conf import settings -from django.core import serializers -from django.core.exceptions import ValidationError -from django.core.files.base import ContentFile -from django.db.models import Q -from django.utils import timezone - -import dojo.finding.helper as finding_helper -import dojo.jira_link.helper as jira_helper -import dojo.notifications.helper as notifications_helper -from dojo.celery import app -from dojo.decorators import dojo_async_task -from dojo.importers import utils as importer_utils -from dojo.importers.reimporter import utils as reimporter_utils -from dojo.models import BurpRawRequestResponse, FileUpload, Finding, Notes, Test_Import -from dojo.tools.factory import get_parser -from dojo.utils import get_current_user, is_finding_groups_enabled - -logger = logging.getLogger(__name__) -deduplicationLogger = logging.getLogger("dojo.specific-loggers.deduplication") - - -class DojoDefaultReImporter: - @dojo_async_task - @app.task(ignore_result=False) - def process_parsed_findings( - self, - test, - parsed_findings, - scan_type, - user, - active=None, - verified=None, - minimum_severity=None, - endpoints_to_add=None, - push_to_jira=None, - group_by=None, - now=timezone.now(), - service=None, - scan_date=None, - do_not_reactivate=False, - create_finding_groups_for_all_findings=True, - **kwargs, - ): - - items = parsed_findings - original_items = list(test.finding_set.all()) - new_items = [] - finding_count = 0 - finding_added_count = 0 - reactivated_count = 0 - reactivated_items = [] - unchanged_count = 0 - unchanged_items = [] - - logger.debug("starting reimport of %i items.", len(items) if items else 0) - deduplication_algorithm = test.deduplication_algorithm - - i = 0 - group_names_to_findings_dict = {} - logger.debug( - "STEP 1: looping over findings from the reimported report and trying to match them to existing findings" - ) - deduplicationLogger.debug( - "Algorithm used for matching new findings to existing findings: %s", - deduplication_algorithm, - ) - for item in items: - # FIXME hack to remove when all parsers have unit tests for this attribute - if item.severity.lower().startswith("info") and item.severity != "Info": - item.severity = "Info" - - item.numerical_severity = Finding.get_numerical_severity(item.severity) - - if minimum_severity and ( - Finding.SEVERITIES[item.severity] > Finding.SEVERITIES[minimum_severity] - ): - # finding's severity is below the configured threshold : ignoring the finding - continue - - # existing findings may be from before we had component_name/version fields - component_name = ( - item.component_name if hasattr(item, "component_name") else None - ) - component_version = ( - item.component_version if hasattr(item, "component_version") else None - ) - - # Some parsers provide "mitigated" field but do not set timezone (because it is probably not available in the report) - # Finding.mitigated is DateTimeField and it requires timezone - if item.mitigated and not item.mitigated.tzinfo: - item.mitigated = item.mitigated.replace(tzinfo=now.tzinfo) - - if not hasattr(item, "test"): - item.test = test - - if service: - item.service = service - - if item.dynamic_finding: - for e in item.unsaved_endpoints: - try: - e.clean() - except ValidationError as err: - logger.warning( - "DefectDojo is storing broken endpoint because cleaning wasn't successful: " - f"{err}" - ) - - item.hash_code = item.compute_hash_code() - deduplicationLogger.debug("item's hash_code: %s", item.hash_code) - - findings = reimporter_utils.match_new_finding_to_existing_finding( - item, test, deduplication_algorithm - ) - deduplicationLogger.debug( - "found %i findings matching with current new finding", len(findings) - ) - - if findings: - # existing finding found - finding = findings[0] - if finding.false_p or finding.out_of_scope or finding.risk_accepted: - logger.debug( - "%i: skipping existing finding (it is marked as false positive:%s and/or out of scope:%s or is a risk accepted:%s): %i:%s:%s:%s", - i, - finding.false_p, - finding.out_of_scope, - finding.risk_accepted, - finding.id, - finding, - finding.component_name, - finding.component_version, - ) - if ( - finding.false_p == item.false_p - and finding.out_of_scope == item.out_of_scope - and finding.risk_accepted == item.risk_accepted - ): - unchanged_items.append(finding) - unchanged_count += 1 - continue - elif finding.is_mitigated: - # if the reimported item has a mitigation time, we can compare - if item.is_mitigated: - unchanged_items.append(finding) - unchanged_count += 1 - if item.mitigated: - logger.debug( - "item mitigated time: " - + str(item.mitigated.timestamp()) - ) - logger.debug( - "finding mitigated time: " - + str(finding.mitigated.timestamp()) - ) - if ( - item.mitigated.timestamp() - == finding.mitigated.timestamp() - ): - logger.debug( - "New imported finding and already existing finding have the same mitigation date, will skip as they are the same." - ) - continue - if ( - item.mitigated.timestamp() - != finding.mitigated.timestamp() - ): - logger.debug( - "New imported finding and already existing finding are both mitigated but have different dates, not taking action" - ) - # TODO: implement proper date-aware reimporting mechanism, if an imported finding is closed more recently than the defectdojo finding, then there might be details in the scanner that should be added - continue - else: - # even if there is no mitigation time, skip it, because both the current finding and the reimported finding are is_mitigated - continue - else: - if not do_not_reactivate: - logger.debug( - "%i: reactivating: %i:%s:%s:%s", - i, - finding.id, - finding, - finding.component_name, - finding.component_version, - ) - finding.mitigated = None - finding.is_mitigated = False - finding.mitigated_by = None - finding.active = True - if verified is not None: - finding.verified = verified - if do_not_reactivate: - logger.debug( - "%i: skipping reactivating by user's choice do_not_reactivate: %i:%s:%s:%s", - i, - finding.id, - finding, - finding.component_name, - finding.component_version, - ) - existing_note = finding.notes.filter( - entry=f"Finding has skipped reactivation from {scan_type} re-upload with user decision do_not_reactivate.", - author=user, - ) - if len(existing_note) == 0: - note = Notes( - entry=f"Finding has skipped reactivation from {scan_type} re-upload with user decision do_not_reactivate.", - author=user, - ) - note.save() - finding.notes.add(note) - finding.save(dedupe_option=False) - continue - # existing findings may be from before we had component_name/version fields - finding.component_name = ( - finding.component_name - if finding.component_name - else component_name - ) - finding.component_version = ( - finding.component_version - if finding.component_version - else component_version - ) - - # don't dedupe before endpoints are added - finding.save(dedupe_option=False) - note = Notes( - entry=f"Re-activated by {scan_type} re-upload.", author=user - ) - note.save() - - endpoint_statuses = finding.status_finding.exclude( - Q(false_positive=True) - | Q(out_of_scope=True) - | Q(risk_accepted=True) - ) - reimporter_utils.chunk_endpoints_and_reactivate(endpoint_statuses) - - finding.notes.add(note) - reactivated_items.append(finding) - reactivated_count += 1 - else: - # if finding associated to new item is none of risk accepted, mitigated, false positive or out of scope - # existing findings may be from before we had component_name/version fields - logger.debug( - "%i: updating existing finding: %i:%s:%s:%s", - i, - finding.id, - finding, - finding.component_name, - finding.component_version, - ) - if not (finding.mitigated and finding.is_mitigated): - logger.debug( - "Reimported item matches a finding that is currently open." - ) - if item.is_mitigated: - logger.debug( - "Reimported mitigated item matches a finding that is currently open, closing." - ) - # TODO: Implement a date comparison for opened defectdojo findings before closing them by reimporting, as they could be force closed by the scanner but a DD user forces it open ? - logger.debug( - "%i: closing: %i:%s:%s:%s", - i, - finding.id, - finding, - finding.component_name, - finding.component_version, - ) - finding.mitigated = item.mitigated - finding.is_mitigated = True - finding.mitigated_by = item.mitigated_by - finding.active = False - if verified is not None: - finding.verified = verified - elif item.risk_accepted or item.false_p or item.out_of_scope: - logger.debug('Reimported mitigated item matches a finding that is currently open, closing.') - logger.debug('%i: closing: %i:%s:%s:%s', i, finding.id, finding, finding.component_name, finding.component_version) - finding.risk_accepted = item.risk_accepted - finding.false_p = item.false_p - finding.out_of_scope = item.out_of_scope - finding.active = False - if verified is not None: - finding.verified = verified - else: - # if finding is the same but list of affected was changed, finding is marked as unchanged. This is a known issue - unchanged_items.append(finding) - unchanged_count += 1 - - if (component_name is not None and not finding.component_name) or ( - component_version is not None and not finding.component_version - ): - finding.component_name = ( - finding.component_name - if finding.component_name - else component_name - ) - finding.component_version = ( - finding.component_version - if finding.component_version - else component_version - ) - finding.save(dedupe_option=False) - - if finding.dynamic_finding: - logger.debug( - "Re-import found an existing dynamic finding for this new finding. Checking the status of endpoints" - ) - reimporter_utils.update_endpoint_status(finding, item, user) - else: - # no existing finding found - item.reporter = user - item.last_reviewed = timezone.now() - item.last_reviewed_by = user - - if active is not None: - # indicates an override. Otherwise, do not change the value of item.active - item.active = active - - if verified is not None: - # indicates an override. Otherwise, do not change the value of verified - item.verified = verified - - # if scan_date was provided, override value from parser - if scan_date: - item.date = scan_date.date() - - # Save it. Don't dedupe before endpoints are added. - item.save(dedupe_option=False) - logger.debug( - "%i: reimport created new finding as no existing finding match: %i:%s:%s:%s", - i, - item.id, - item, - item.component_name, - item.component_version, - ) - - # only new items get auto grouped to avoid confusion around already existing items that are already grouped - if is_finding_groups_enabled() and group_by: - # If finding groups are enabled, group all findings by group name - name = finding_helper.get_group_by_group_name(item, group_by) - if name is not None: - if name in group_names_to_findings_dict: - group_names_to_findings_dict[name].append(item) - else: - group_names_to_findings_dict[name] = [item] - - finding_added_count += 1 - new_items.append(item) - finding = item - - if hasattr(item, "unsaved_req_resp"): - for req_resp in item.unsaved_req_resp: - burp_rr = BurpRawRequestResponse( - finding=finding, - burpRequestBase64=base64.b64encode( - req_resp["req"].encode("utf-8") - ), - burpResponseBase64=base64.b64encode( - req_resp["resp"].encode("utf-8") - ), - ) - burp_rr.clean() - burp_rr.save() - - if item.unsaved_request and item.unsaved_response: - burp_rr = BurpRawRequestResponse( - finding=finding, - burpRequestBase64=base64.b64encode( - item.unsaved_request.encode() - ), - burpResponseBase64=base64.b64encode( - item.unsaved_response.encode() - ), - ) - burp_rr.clean() - burp_rr.save() - - # for existing findings: make sure endpoints are present or created - if finding: - finding_count += 1 - importer_utils.chunk_endpoints_and_disperse( - finding, test, item.unsaved_endpoints - ) - if endpoints_to_add: - importer_utils.chunk_endpoints_and_disperse( - finding, test, endpoints_to_add - ) - - if item.unsaved_tags: - finding.tags = item.unsaved_tags - - if item.unsaved_files: - for unsaved_file in item.unsaved_files: - data = base64.b64decode(unsaved_file.get("data")) - title = unsaved_file.get("title", "") - ( - file_upload, - _file_upload_created, - ) = FileUpload.objects.get_or_create( - title=title, - ) - file_upload.file.save(title, ContentFile(data)) - file_upload.save() - finding.files.add(file_upload) - - if finding.unsaved_vulnerability_ids: - importer_utils.handle_vulnerability_ids(finding) - - # existing findings may be from before we had component_name/version fields - finding.component_name = ( - finding.component_name if finding.component_name else component_name - ) - finding.component_version = ( - finding.component_version - if finding.component_version - else component_version - ) - - # finding = new finding or existing finding still in the upload report - # to avoid pushing a finding group multiple times, we push those outside of the loop - if is_finding_groups_enabled() and group_by: - finding.save() - else: - finding.save(push_to_jira=push_to_jira) - - to_mitigate = ( - set(original_items) - set(reactivated_items) - set(unchanged_items) - ) - # due to #3958 we can have duplicates inside the same report - # this could mean that a new finding is created and right after - # that it is detected as the 'matched existing finding' for a - # following finding in the same report - # this means untouched can have this finding inside it, - # while it is in fact a new finding. So we substract new_items - untouched = set(unchanged_items) - set(to_mitigate) - set(new_items) - - for (group_name, findings) in group_names_to_findings_dict.items(): - finding_helper.add_findings_to_auto_group(group_name, findings, group_by, create_finding_groups_for_all_findings, **kwargs) - if push_to_jira: - if findings[0].finding_group is not None: - jira_helper.push_to_jira(findings[0].finding_group) - else: - jira_helper.push_to_jira(findings[0]) - - if is_finding_groups_enabled() and push_to_jira: - for finding_group in set( # noqa: C401 - finding.finding_group - for finding in reactivated_items + unchanged_items - if finding.finding_group is not None and not finding.is_mitigated - ): - jira_helper.push_to_jira(finding_group) - - sync = kwargs.get("sync", False) - if not sync: - serialized_new_items = [ - serializers.serialize( - "json", - [ - finding, - ], - ) - for finding in new_items - ] - serialized_reactivated_items = [ - serializers.serialize( - "json", - [ - finding, - ], - ) - for finding in reactivated_items - ] - serialized_to_mitigate = [ - serializers.serialize( - "json", - [ - finding, - ], - ) - for finding in to_mitigate - ] - serialized_untouched = [ - serializers.serialize( - "json", - [ - finding, - ], - ) - for finding in untouched - ] - return ( - serialized_new_items, - serialized_reactivated_items, - serialized_to_mitigate, - serialized_untouched, - ) - - return new_items, reactivated_items, to_mitigate, untouched - - def close_old_findings( - self, test, to_mitigate, scan_date_time, user, push_to_jira=None - ): - logger.debug("IMPORT_SCAN: Closing findings no longer present in scan report") - mitigated_findings = [] - for finding in to_mitigate: - if not finding.mitigated or not finding.is_mitigated: - logger.debug("mitigating finding: %i:%s", finding.id, finding) - finding.mitigated = scan_date_time - finding.is_mitigated = True - finding.mitigated_by = user - finding.active = False - - endpoint_status = finding.status_finding.all() - reimporter_utils.mitigate_endpoint_status( - endpoint_status, user, kwuser=user, sync=True - ) - - # to avoid pushing a finding group multiple times, we push those outside of the loop - if is_finding_groups_enabled() and finding.finding_group: - # don't try to dedupe findings that we are closing - finding.save(dedupe_option=False) - else: - finding.save(push_to_jira=push_to_jira, dedupe_option=False) - - note = Notes( - entry=f"Mitigated by {test.test_type} re-upload.", author=user - ) - note.save() - finding.notes.add(note) - mitigated_findings.append(finding) - - if is_finding_groups_enabled() and push_to_jira: - for finding_group in set( # noqa: C401 - finding.finding_group - for finding in to_mitigate - if finding.finding_group is not None - ): - jira_helper.push_to_jira(finding_group) - - return mitigated_findings - - def reimport_scan( - self, - scan, - scan_type, - test, - active=None, - verified=None, - tags=None, - minimum_severity=None, - user=None, - endpoints_to_add=None, - scan_date=None, - version=None, - branch_tag=None, - build_id=None, - commit_hash=None, - push_to_jira=None, - close_old_findings=True, - group_by=None, - api_scan_configuration=None, - service=None, - do_not_reactivate=False, - create_finding_groups_for_all_findings=True, - apply_tags_to_findings=False, - apply_tags_to_endpoints=False, - ): - - logger.debug(f"REIMPORT_SCAN: parameters: {locals()}") - - user = user or get_current_user() - - now = timezone.now() - - if api_scan_configuration: - if api_scan_configuration.product != test.engagement.product: - msg = "API Scan Configuration has to be from same product as the Test" - raise ValidationError(msg) - if test.api_scan_configuration != api_scan_configuration: - test.api_scan_configuration = api_scan_configuration - test.save() - - # check if the parser that handle the scan_type manage tests - parser = get_parser(scan_type) - if hasattr(parser, "get_tests"): - logger.debug("REIMPORT_SCAN parser v2: Create parse findings") - try: - tests = parser.get_tests(scan_type, scan) - except ValueError as e: - logger.warning(e) - raise ValidationError(e) - # for now we only consider the first test in the list and artificially aggregate all findings of all tests - # this is the same as the old behavior as current import/reimporter implementation doesn't handle the case - # when there is more than 1 test - parsed_findings = [] - for test_raw in tests: - parsed_findings.extend(test_raw.findings) - else: - logger.debug("REIMPORT_SCAN: Parse findings") - try: - parsed_findings = parser.get_findings(scan, test) - except ValueError as e: - logger.warning(e) - raise ValidationError(e) - - logger.debug("REIMPORT_SCAN: Processing findings") - new_findings = [] - reactivated_findings = [] - findings_to_mitigate = [] - untouched_findings = [] - if settings.ASYNC_FINDING_IMPORT: - chunk_list = importer_utils.chunk_list(parsed_findings) - results_list = [] - # First kick off all the workers - for findings_list in chunk_list: - result = self.process_parsed_findings( - test, - findings_list, - scan_type, - user, - active=active, - verified=verified, - minimum_severity=minimum_severity, - endpoints_to_add=endpoints_to_add, - push_to_jira=push_to_jira, - group_by=group_by, - now=now, - service=service, - scan_date=scan_date, - sync=False, - do_not_reactivate=do_not_reactivate, - create_finding_groups_for_all_findings=create_finding_groups_for_all_findings, - ) - - # Since I dont want to wait until the task is done right now, save the id - # So I can check on the task later - results_list += [result] - # After all tasks have been started, time to pull the results - logger.debug("REIMPORT_SCAN: Collecting Findings") - for results in results_list: - ( - serial_new_findings, - serial_reactivated_findings, - serial_findings_to_mitigate, - serial_untouched_findings, - ) = results.get() - new_findings += [ - next(serializers.deserialize("json", finding)).object - for finding in serial_new_findings - ] - reactivated_findings += [ - next(serializers.deserialize("json", finding)).object - for finding in serial_reactivated_findings - ] - findings_to_mitigate += [ - next(serializers.deserialize("json", finding)).object - for finding in serial_findings_to_mitigate - ] - untouched_findings += [ - next(serializers.deserialize("json", finding)).object - for finding in serial_untouched_findings - ] - logger.debug("REIMPORT_SCAN: All Findings Collected") - # Indicate that the test is not complete yet as endpoints will still be rolling in. - test.percent_complete = 50 - test.save() - importer_utils.update_test_progress(test) - else: - ( - new_findings, - reactivated_findings, - findings_to_mitigate, - untouched_findings, - ) = self.process_parsed_findings( - test, - parsed_findings, - scan_type, - user, - active=active, - verified=verified, - minimum_severity=minimum_severity, - endpoints_to_add=endpoints_to_add, - push_to_jira=push_to_jira, - group_by=group_by, - now=now, - service=service, - scan_date=scan_date, - sync=True, - do_not_reactivate=do_not_reactivate, - create_finding_groups_for_all_findings=create_finding_groups_for_all_findings, - ) - - closed_findings = [] - if close_old_findings: - logger.debug( - "REIMPORT_SCAN: Closing findings no longer present in scan report" - ) - closed_findings = self.close_old_findings( - test, - findings_to_mitigate, - scan_date, - user=user, - push_to_jira=push_to_jira, - ) - - logger.debug("REIMPORT_SCAN: Updating test/engagement timestamps") - importer_utils.update_timestamps( - test, version, branch_tag, build_id, commit_hash, now, scan_date - ) - - logger.debug("REIMPORT_SCAN: Updating test tags") - importer_utils.update_tags(test, tags) - - test_import = None - if settings.TRACK_IMPORT_HISTORY: - logger.debug("REIMPORT_SCAN: Updating Import History") - test_import = importer_utils.update_import_history( - Test_Import.REIMPORT_TYPE, - active, - verified, - tags, - minimum_severity, - endpoints_to_add, - version, - branch_tag, - build_id, - commit_hash, - push_to_jira, - close_old_findings, - test, - new_findings, - closed_findings, - reactivated_findings, - untouched_findings, - ) - - if apply_tags_to_findings and tags: - for finding in test_import.findings_affected.all(): - for tag in tags: - finding.tags.add(tag) - - if apply_tags_to_endpoints and tags: - for finding in test_import.findings_affected.all(): - for endpoint in finding.endpoints.all(): - for tag in tags: - endpoint.tags.add(tag) - - logger.debug("REIMPORT_SCAN: Generating notifications") - - updated_count = ( - len(closed_findings) + len(reactivated_findings) + len(new_findings) - ) - notifications_helper.notify_scan_added( - test, - updated_count, - new_findings=new_findings, - findings_mitigated=closed_findings, - findings_reactivated=reactivated_findings, - findings_untouched=untouched_findings, - ) - - logger.debug("REIMPORT_SCAN: Done") - - return ( - test, - updated_count, - len(new_findings), - len(closed_findings), - len(reactivated_findings), - len(untouched_findings), - test_import, - ) diff --git a/dojo/importers/reimporter/utils.py b/dojo/importers/reimporter/utils.py deleted file mode 100644 index 4ec9ce1752e..00000000000 --- a/dojo/importers/reimporter/utils.py +++ /dev/null @@ -1,263 +0,0 @@ -import logging -from datetime import timedelta - -from crum import get_current_user -from django.conf import settings -from django.utils import timezone - -from dojo.celery import app -from dojo.decorators import dojo_async_task -from dojo.importers import utils as importer_utils -from dojo.models import Engagement, Finding, Product, Product_Member, Product_Type, Product_Type_Member, Q, Role, Test -from dojo.utils import get_last_object_or_none, get_object_or_none - -logger = logging.getLogger(__name__) -deduplicationLogger = logging.getLogger("dojo.specific-loggers.deduplication") - -""" -Common code for reimporting from APIV2 or from the GUI -""" - - -def match_new_finding_to_existing_finding(new_finding, test, deduplication_algorithm): - # This code should match the logic used for deduplication out of the re-import feature. - # See utils.py deduplicate_* functions - deduplicationLogger.debug('return findings bases on algorithm: %s', deduplication_algorithm) - if deduplication_algorithm == 'hash_code': - return Finding.objects.filter( - test=test, - hash_code=new_finding.hash_code).exclude( - hash_code=None).order_by('id') - elif deduplication_algorithm == 'unique_id_from_tool': - return Finding.objects.filter( - test=test, - unique_id_from_tool=new_finding.unique_id_from_tool).exclude( - unique_id_from_tool=None).order_by('id') - elif deduplication_algorithm == 'unique_id_from_tool_or_hash_code': - query = Finding.objects.filter( - Q(test=test), - (Q(hash_code__isnull=False) & Q(hash_code=new_finding.hash_code)) - | (Q(unique_id_from_tool__isnull=False) & Q(unique_id_from_tool=new_finding.unique_id_from_tool))).order_by('id') - deduplicationLogger.debug(query.query) - return query - elif deduplication_algorithm == 'legacy': - # This is the legacy reimport behavior. Although it's pretty flawed and doesn't match the legacy algorithm for deduplication, - # this is left as is for simplicity. - # Re-writing the legacy deduplication here would be complicated and counter-productive. - # If you have use cases going through this section, you're advised to create a deduplication configuration for your parser - logger.debug("Legacy reimport. In case of issue, you're advised to create a deduplication configuration in order not to go through this section") - return Finding.objects.filter( - title=new_finding.title, - test=test, - severity=new_finding.severity, - numerical_severity=Finding.get_numerical_severity(new_finding.severity)).order_by('id') - else: - logger.error("Internal error: unexpected deduplication_algorithm: '%s' ", deduplication_algorithm) - return None - - -def update_endpoint_status(existing_finding, new_finding, user): - # New endpoints are already added in serializers.py / views.py (see comment "# for existing findings: make sure endpoints are present or created") - # So we only need to mitigate endpoints that are no longer present - # using `.all()` will mark as mitigated also `endpoint_status` with flags `false_positive`, `out_of_scope` and `risk_accepted`. This is a known issue. This is not a bug. This is a future. - existing_finding_endpoint_status_list = existing_finding.status_finding.all() - new_finding_endpoints_list = new_finding.unsaved_endpoints - if new_finding.is_mitigated: - # New finding is mitigated, so mitigate all old endpoints - endpoint_status_to_mitigate = existing_finding_endpoint_status_list - else: - # Mitigate any endpoints in the old finding not found in the new finding - endpoint_status_to_mitigate = list( - filter( - lambda existing_finding_endpoint_status: existing_finding_endpoint_status.endpoint not in new_finding_endpoints_list, - existing_finding_endpoint_status_list) - ) - # Re-activate any endpoints in the old finding that are in the new finding - endpoint_status_to_reactivate = list( - filter( - lambda existing_finding_endpoint_status: existing_finding_endpoint_status.endpoint in new_finding_endpoints_list, - existing_finding_endpoint_status_list) - ) - chunk_endpoints_and_reactivate(endpoint_status_to_reactivate) - - # Determine if this can be run async - if settings.ASYNC_FINDING_IMPORT: - chunk_list = importer_utils.chunk_list(endpoint_status_to_mitigate) - # If there is only one chunk, then do not bother with async - if len(chunk_list) < 2: - mitigate_endpoint_status(endpoint_status_to_mitigate, user, kwuser=user, sync=True) - return - # First kick off all the workers - for endpoint_status_list in chunk_list: - mitigate_endpoint_status(endpoint_status_list, user, kwuser=user, sync=False) - else: - mitigate_endpoint_status(endpoint_status_to_mitigate, user, kwuser=user, sync=True) - - -@dojo_async_task -@app.task() -def mitigate_endpoint_status(endpoint_status_list, user, **kwargs): - """ Only mitigate endpoints that are actually active """ - for endpoint_status in endpoint_status_list: - # Only mitigate endpoints that are actually active - if not endpoint_status.mitigated: - logger.debug("Re-import: mitigating endpoint %s that is no longer present", str(endpoint_status.endpoint)) - endpoint_status.mitigated_by = user - endpoint_status.mitigated_time = timezone.now() - endpoint_status.mitigated = True - endpoint_status.last_modified = timezone.now() - endpoint_status.save() - - -def chunk_endpoints_and_reactivate(endpoint_statuses, **kwargs): - # Determine if this can be run async - if settings.ASYNC_FINDING_IMPORT: - chunk_list = importer_utils.chunk_list(endpoint_statuses) - # If there is only one chunk, then do not bother with async - if len(chunk_list) < 2: - reactivate_endpoint_status(endpoint_statuses, sync=True) - logger.debug('IMPORT_SCAN: Split endpoints into ' + str(len(chunk_list)) + ' chunks of ' + str(chunk_list[0])) - # First kick off all the workers - for endpoint_status_list in chunk_list: - reactivate_endpoint_status(endpoint_status_list, sync=False) - else: - reactivate_endpoint_status(endpoint_statuses, sync=True) - - -@dojo_async_task -@app.task() -def reactivate_endpoint_status(endpoint_status_list, **kwargs): - for endpoint_status in endpoint_status_list: - # Only reactivate endpoints that are actually mitigated - if endpoint_status.mitigated: - logger.debug("Re-import: reactivating endpoint %s that is present in this scan", str(endpoint_status.endpoint)) - endpoint_status.mitigated_by = None - endpoint_status.mitigated_time = None - endpoint_status.mitigated = False - endpoint_status.last_modified = timezone.now() - endpoint_status.save() - - -def get_target_product_if_exists(product_name=None, product_type_name=None): - if product_name: - product = get_object_or_none(Product, name=product_name) - if product: - # product type name must match if provided - if product_type_name: - if product.prod_type.name == product_type_name: - return product - else: - return product - - return None - - -def get_target_product_type_if_exists(product_type_name=None): - if product_type_name: - return get_object_or_none(Product_Type, name=product_type_name) - else: - return None - - -def get_target_product_by_id_if_exists(product_id=None): - product = None - if product_id: - product = get_object_or_none(Product, pk=product_id) - logger.debug('Using existing product by id: %s', product_id) - return product - - -def get_target_engagement_if_exists(engagement_id=None, engagement_name=None, product=None): - if engagement_id: - engagement = get_object_or_none(Engagement, pk=engagement_id) - logger.debug('Using existing engagement by id: %s', engagement_id) - return engagement - - if not product: - # if there's no product, then for sure there's no engagement either - return None - - # engagement name is not unique unfortunately - engagement = get_last_object_or_none(Engagement, product=product, name=engagement_name) - return engagement - - -def get_target_test_if_exists(test_id=None, test_title=None, scan_type=None, engagement=None): - """ - Retrieves the target test to reimport. This can be as simple as looking up the test via the `test_id` parameter. - If there is no `test_id` provided, we lookup the latest test inside the provided engagement that satisfies - the provided scan_type and test_title. - """ - if test_id: - test = get_object_or_none(Test, pk=test_id) - logger.debug('Using existing Test by id: %s', test_id) - return test - - if not engagement: - return None - - if test_title: - return get_last_object_or_none(Test, engagement=engagement, title=test_title, scan_type=scan_type) - - return get_last_object_or_none(Test, engagement=engagement, scan_type=scan_type) - - -def get_or_create_product(product_name=None, product_type_name=None, auto_create_context=None): - # try to find the product (withing the provided product_type) - product = get_target_product_if_exists(product_name, product_type_name) - if product: - return product - - # not found .... create it - if not auto_create_context: - msg = 'auto_create_context not True, unable to create non-existing product' - raise ValueError(msg) - else: - product_type, created = Product_Type.objects.get_or_create(name=product_type_name) - if created: - member = Product_Type_Member() - member.user = get_current_user() - member.product_type = product_type - member.role = Role.objects.get(is_owner=True) - member.save() - - product, created = Product.objects.get_or_create(name=product_name, prod_type=product_type, description=product_name) - if created: - member = Product_Member() - member.user = get_current_user() - member.product = product - member.role = Role.objects.get(is_owner=True) - member.save() - - return product - - -def get_or_create_engagement(engagement_id=None, engagement_name=None, product_name=None, product_type_name=None, auto_create_context=None, - deduplication_on_engagement=False, source_code_management_uri=None, target_end=None): - # try to find the engagement (and product) - product = get_target_product_if_exists(product_name, product_type_name) - engagement = get_target_engagement_if_exists(engagement_id, engagement_name, product) - if engagement: - return engagement - - # not found .... create it - if not auto_create_context: - msg = 'auto_create_context not True, unable to create non-existing engagement' - raise ValueError(msg) - else: - product = get_or_create_product(product_name, product_type_name, auto_create_context) - - if not product: - msg = 'no product, unable to create engagement' - raise ValueError(msg) - - target_start = timezone.now().date() - if (target_end is None) or (target_start > target_end): - target_end = (timezone.now() + timedelta(days=365)).date() - - engagement = Engagement.objects.create(engagement_type="CI/CD", name=engagement_name, product=product, lead=get_current_user(), - target_start=target_start, target_end=target_end, status="In Progress", - deduplication_on_engagement=deduplication_on_engagement, - source_code_management_uri=source_code_management_uri) - - return engagement diff --git a/dojo/importers/utils.py b/dojo/importers/utils.py deleted file mode 100644 index 255e2ffa2e6..00000000000 --- a/dojo/importers/utils.py +++ /dev/null @@ -1,209 +0,0 @@ -import logging - -from django.conf import settings -from django.core.exceptions import MultipleObjectsReturned, ValidationError -from django.urls import reverse -from django.utils.timezone import make_aware - -from dojo.celery import app -from dojo.decorators import dojo_async_task -from dojo.endpoint.utils import endpoint_get_or_create -from dojo.models import ( - IMPORT_CLOSED_FINDING, - IMPORT_CREATED_FINDING, - IMPORT_REACTIVATED_FINDING, - IMPORT_UNTOUCHED_FINDING, - Endpoint_Status, - Test_Import, - Test_Import_Finding_Action, - Vulnerability_Id, -) -from dojo.utils import max_safe - -logger = logging.getLogger(__name__) - - -def update_timestamps(test, version, branch_tag, build_id, commit_hash, now, scan_date): - if not scan_date: - scan_date = now - - if test.engagement.engagement_type == 'CI/CD': - test.engagement.target_end = max_safe([scan_date.date(), test.engagement.target_end]) - - max_test_start_date = max_safe([scan_date, test.target_end]) - if not max_test_start_date.tzinfo: - max_test_start_date = make_aware(max_test_start_date) - test.target_end = max_test_start_date - - if version: - test.version = version - - if branch_tag: - test.branch_tag = branch_tag - - if build_id: - test.build_id = build_id - - if commit_hash: - test.commit_hash = commit_hash - - test.save() - test.engagement.save() - - -def update_tags(test, tags): - if tags: - test.tags = tags - - test.save() - - -def update_import_history(type, active, verified, tags, minimum_severity, endpoints_to_add, version, branch_tag, - build_id, commit_hash, push_to_jira, close_old_findings, test, - new_findings=[], closed_findings=[], reactivated_findings=[], untouched_findings=[]): - logger.debug("new: %d closed: %d reactivated: %d untouched: %d", len(new_findings), len(closed_findings), len(reactivated_findings), len(untouched_findings)) - # json field - import_settings = {} - import_settings['active'] = active - import_settings['verified'] = verified - import_settings['minimum_severity'] = minimum_severity - import_settings['close_old_findings'] = close_old_findings - import_settings['push_to_jira'] = push_to_jira - import_settings['tags'] = tags - - if endpoints_to_add: - import_settings['endpoints'] = [str(endpoint) for endpoint in endpoints_to_add] - - test_import = Test_Import(test=test, import_settings=import_settings, version=version, branch_tag=branch_tag, build_id=build_id, commit_hash=commit_hash, type=type) - test_import.save() - - test_import_finding_action_list = [] - for finding in closed_findings: - logger.debug('preparing Test_Import_Finding_Action for closed finding: %i', finding.id) - test_import_finding_action_list.append(Test_Import_Finding_Action(test_import=test_import, finding=finding, action=IMPORT_CLOSED_FINDING)) - for finding in new_findings: - logger.debug('preparing Test_Import_Finding_Action for created finding: %i', finding.id) - test_import_finding_action_list.append(Test_Import_Finding_Action(test_import=test_import, finding=finding, action=IMPORT_CREATED_FINDING)) - for finding in reactivated_findings: - logger.debug('preparing Test_Import_Finding_Action for reactivated finding: %i', finding.id) - test_import_finding_action_list.append(Test_Import_Finding_Action(test_import=test_import, finding=finding, action=IMPORT_REACTIVATED_FINDING)) - for finding in untouched_findings: - logger.debug('preparing Test_Import_Finding_Action for untouched finding: %i', finding.id) - test_import_finding_action_list.append(Test_Import_Finding_Action(test_import=test_import, finding=finding, action=IMPORT_UNTOUCHED_FINDING)) - - Test_Import_Finding_Action.objects.bulk_create(test_import_finding_action_list) - - return test_import - - -def construct_imported_message(scan_type, finding_count=0, new_finding_count=0, closed_finding_count=0, reactivated_finding_count=0, untouched_finding_count=0): - if finding_count: - message = f'{scan_type} processed a total of {finding_count} findings' - - if new_finding_count: - message = message + ' created %d findings' % (new_finding_count) - if closed_finding_count: - message = message + ' closed %d findings' % (closed_finding_count) - if reactivated_finding_count: - message = message + ' reactivated %d findings' % (reactivated_finding_count) - if untouched_finding_count: - message = message + ' did not touch %d findings' % (untouched_finding_count) - - message = message + "." - else: - message = 'No findings were added/updated/closed/reactivated as the findings in Defect Dojo are identical to those in the uploaded report.' - - return message - - -def chunk_list(list): - chunk_size = settings.ASYNC_FINDING_IMPORT_CHUNK_SIZE - # Break the list of parsed findings into "chunk_size" lists - chunk_list = [list[i:i + chunk_size] for i in range(0, len(list), chunk_size)] - logger.debug('IMPORT_SCAN: Split endpoints into ' + str(len(chunk_list)) + ' chunks of ' + str(chunk_size)) - return chunk_list - - -def chunk_endpoints_and_disperse(finding, test, endpoints, **kwargs): - if settings.ASYNC_FINDING_IMPORT: - chunked_list = chunk_list(endpoints) - # If there is only one chunk, then do not bother with async - if len(chunked_list) < 2: - add_endpoints_to_unsaved_finding(finding, test, endpoints, sync=True) - return [] - # First kick off all the workers - for endpoints_list in chunked_list: - add_endpoints_to_unsaved_finding(finding, test, endpoints_list, sync=False) - else: - add_endpoints_to_unsaved_finding(finding, test, endpoints, sync=True) - - -# Since adding a model to a ManyToMany relationship does not require an additional -# save, there is no need to keep track of when the task finishes. -@dojo_async_task -@app.task() -def add_endpoints_to_unsaved_finding(finding, test, endpoints, **kwargs): - logger.debug('IMPORT_SCAN: Adding ' + str(len(endpoints)) + ' endpoints to finding:' + str(finding)) - for endpoint in endpoints: - try: - endpoint.clean() - except ValidationError as e: - logger.warning("DefectDojo is storing broken endpoint because cleaning wasn't successful: " - f"{e}") - ep = None - try: - ep, _created = endpoint_get_or_create( - protocol=endpoint.protocol, - userinfo=endpoint.userinfo, - host=endpoint.host, - port=endpoint.port, - path=endpoint.path, - query=endpoint.query, - fragment=endpoint.fragment, - product=test.engagement.product) - except (MultipleObjectsReturned): - msg = "Endpoints in your database are broken. Please access {} and migrate them to new format or " \ - "remove them.".format(reverse('endpoint_migrate')) - raise Exception(msg) - - _eps, _created = Endpoint_Status.objects.get_or_create( - finding=finding, - endpoint=ep, - defaults={'date': finding.date}) - - logger.debug('IMPORT_SCAN: ' + str(len(endpoints)) + ' imported') - - -# This function is added to the async queue at the end of all finding import tasks -# and after endpoint task, so this should only run after all the other ones are done -@dojo_async_task -@app.task() -def update_test_progress(test, **kwargs): - test.percent_complete = 100 - test.save() - - -def handle_vulnerability_ids(finding): - # Synchronize the cve field with the unsaved_vulnerability_ids - # We do this to be as flexible as possible to handle the fields until - # the cve field is not needed anymore and can be removed. - if finding.unsaved_vulnerability_ids and finding.cve: - # Make sure the first entry of the list is the value of the cve field - finding.unsaved_vulnerability_ids.insert(0, finding.cve) - elif finding.unsaved_vulnerability_ids and not finding.cve: - # If the cve field is not set, use the first entry of the list to set it - finding.cve = finding.unsaved_vulnerability_ids[0] - elif not finding.unsaved_vulnerability_ids and finding.cve: - # If there is no list, make one with the value of the cve field - finding.unsaved_vulnerability_ids = [finding.cve] - - if finding.unsaved_vulnerability_ids: - # Remove duplicates - finding.unsaved_vulnerability_ids = list(dict.fromkeys(finding.unsaved_vulnerability_ids)) - - # Add all vulnerability ids to the database - for vulnerability_id in finding.unsaved_vulnerability_ids: - Vulnerability_Id( - vulnerability_id=vulnerability_id, - finding=finding, - ).save() diff --git a/dojo/product/urls.py b/dojo/product/urls.py index 5daa8a2ce45..f2e05a613f6 100644 --- a/dojo/product/urls.py +++ b/dojo/product/urls.py @@ -12,8 +12,10 @@ name='view_product_components'), re_path(r'^product/(?P\d+)/engagements$', views.view_engagements, name='view_engagements'), - re_path(r'^product/(?P\d+)/import_scan_results$', - dojo_engagement_views.ImportScanResultsView.as_view(), name='import_scan_results_prod'), + re_path( + r'^product/(?P\d+)/import_scan_results$', + dojo_engagement_views.ImportScanResultsView.as_view(), + name='import_scan_results_prod'), re_path(r'^product/(?P\d+)/metrics$', views.view_product_metrics, name='view_product_metrics'), re_path(r'^product/(?P\d+)/async_burndown_metrics$', views.async_burndown_metrics, diff --git a/dojo/test/urls.py b/dojo/test/urls.py index 7791e1319c3..c77aca76900 100644 --- a/dojo/test/urls.py +++ b/dojo/test/urls.py @@ -25,5 +25,8 @@ re_path(r'^test/(?P\d+)/add_findings/(?P\d+)$', views.add_temp_finding, name='add_temp_finding'), re_path(r'^test/(?P\d+)/search$', views.search, name='search'), - re_path(r'^test/(?P\d+)/re_import_scan_results', views.re_import_scan_results, name='re_import_scan_results'), + re_path( + r'^test/(?P\d+)/re_import_scan_results', + views.ReImportScanResultsView.as_view(), + name='re_import_scan_results'), ] diff --git a/dojo/test/views.py b/dojo/test/views.py index d23c4f2164a..7eee27829e6 100644 --- a/dojo/test/views.py +++ b/dojo/test/views.py @@ -4,8 +4,8 @@ import operator from datetime import datetime from functools import reduce +from typing import Tuple -from django.conf import settings from django.contrib import messages from django.contrib.admin.utils import NestedObjects from django.core.exceptions import ValidationError @@ -41,8 +41,7 @@ TestForm, TypedNoteForm, ) -from dojo.importers.reimporter.reimporter import DojoDefaultReImporter as ReImporter -from dojo.importers.utils import construct_imported_message +from dojo.importers.default_reimporter import DefaultReImporter from dojo.models import ( IMPORT_UNTOUCHED_FINDING, BurpRawRequestResponse, @@ -76,7 +75,6 @@ get_setting, get_system_setting, get_words_for_field, - is_scan_file_too_large, process_notifications, redirect_to_return_url_or_else, ) @@ -337,9 +335,9 @@ def delete_test(request, tid): message, extra_tags='alert-success') create_notification(event='other', - title=_('Deletion of %(title)s') % {"title": test.title}, + title=_(f"Deletion of {test.title}"), product=product, - description=_('The test "%(title)s" was deleted by %(user)s') % {"title": test.title, "user": request.user}, + description=_(f'The test "{test.title}" was deleted by {request.user}'), url=request.build_absolute_uri(reverse('view_engagement', args=(eng.id, ))), recipients=[test.engagement.lead], icon="exclamation-triangle") @@ -440,19 +438,17 @@ def test_ics(request, tid): test = get_object_or_404(Test, id=tid) start_date = datetime.combine(test.target_start, datetime.min.time()) end_date = datetime.combine(test.target_end, datetime.max.time()) - uid = "dojo_test_%d_%d_%d" % (test.id, test.engagement.id, test.engagement.product.id) - cal = get_cal_event(start_date, - end_date, - _("Test: %(test_type_name)s (%(product_name)s)") % { - 'test_type_name': test.test_type.name, - 'product_name': test.engagement.product.name - }, - _("Set aside for test %(test_type_name)s, on product %(product_name)s. Additional detail can be found at %(detail_url)s") % { - 'test_type_name': test.test_type.name, - 'product_name': test.engagement.product.name, - 'detail_url': request.build_absolute_uri(reverse("view_test", args=(test.id,))) - }, - uid) + uid = f"dojo_test_{test.id}_{test.engagement.id}_{test.engagement.product.id}" + cal = get_cal_event( + start_date, + end_date, + _(f"Test: {test.test_type.name} ({test.engagement.product.name}"), + _( + f"Set aside for test {test.test_type.name}, on product {test.engagement.product.name}. " + f"Additional detail can be found at {request.build_absolute_uri(reverse('view_test', args=(test.id,)))}" + ), + uid + ) output = cal.serialize() response = HttpResponse(content=output) response['Content-Type'] = 'text/calendar' @@ -631,11 +627,9 @@ def process_forms(self, request: HttpRequest, test: Test, context: dict): # Create a notification create_notification( event='other', - title=_('Addition of %(title)s') % {'title': finding.title}, + title=_(f'Addition of {finding.title}'), finding=finding, - description=_('Finding "%(title)s" was added by %(user)s') % { - 'title': finding.title, 'user': request.user - }, + description=_(f'Finding "{finding.title}" was added by {request.user}'), url=reverse("view_finding", args=(finding.id,)), icon="exclamation-triangle") # Add a success message @@ -694,7 +688,7 @@ def add_temp_finding(request, tid, fid): form = AddFindingForm(request.POST, req_resp=None, product=test.engagement.product) if jira_helper.get_jira_project(test): jform = JIRAFindingForm(push_all=jira_helper.is_push_all_issues(test), prefix='jiraform', jira_project=jira_helper.get_jira_project(test), finding_form=form) - logger.debug('jform valid: %s', jform.is_valid()) + logger.debug(f'jform valid: {jform.is_valid()}') if (form['active'].value() is False or form['false_p'].value()) and form['duplicate'].value() is False: closing_disabled = Note_Type.objects.filter(is_mandatory=True, is_active=True).count() @@ -779,12 +773,6 @@ def add_temp_finding(request, tid, fid): if jira_helper.get_jira_project(test): jform = JIRAFindingForm(push_all=jira_helper.is_push_all_issues(test), prefix='jiraform', jira_project=jira_helper.get_jira_project(test), finding_form=form) - # logger.debug('form valid: %s', form.is_valid()) - # logger.debug('jform valid: %s', jform.is_valid()) - # logger.debug('form errors: %s', form.errors) - # logger.debug('jform errors: %s', jform.errors) - # logger.debug('jform errors: %s', vars(jform)) - product_tab = Product_Tab(test.engagement.product, title=_("Add Finding"), tab="engagements") product_tab.setEngagement(test.engagement) return render(request, 'dojo/add_findings.html', @@ -818,121 +806,281 @@ def search(request, tid): }) -@user_is_authorized(Test, Permissions.Import_Scan_Result, 'tid') -def re_import_scan_results(request, tid): - additional_message = _("When re-uploading a scan, any findings not found in original scan will be updated as " - "mitigated. The process attempts to identify the differences, however manual verification " - "is highly recommended.") - test = get_object_or_404(Test, id=tid) - # by default we keep a trace of the scan_type used to create the test - # if it's not here, we use the "name" of the test type - # this feature exists to provide custom label for tests for some parsers - if test.scan_type: - scan_type = test.scan_type - else: - scan_type = test.test_type.name - engagement = test.engagement - form = ReImportScanForm(test=test) - jform = None - jira_project = jira_helper.get_jira_project(test) - push_all_jira_issues = jira_helper.is_push_all_issues(test) - - # Decide if we need to present the Push to JIRA form - if get_system_setting('enable_jira') and jira_project: - jform = JIRAImportScanForm(push_all=push_all_jira_issues, prefix='jiraform') - - if request.method == "POST": - form = ReImportScanForm(request.POST, request.FILES, test=test) - if jira_project: - jform = JIRAImportScanForm(request.POST, push_all=push_all_jira_issues, prefix='jiraform') - if form.is_valid() and (jform is None or jform.is_valid()): - scan_date = form.cleaned_data['scan_date'] - - minimum_severity = form.cleaned_data['minimum_severity'] - scan = request.FILES.get('file', None) - activeChoice = form.cleaned_data.get('active', None) - verifiedChoice = form.cleaned_data.get('verified', None) - do_not_reactivate = form.cleaned_data['do_not_reactivate'] - tags = form.cleaned_data['tags'] - version = form.cleaned_data.get('version', None) - branch_tag = form.cleaned_data.get('branch_tag', None) - build_id = form.cleaned_data.get('build_id', None) - commit_hash = form.cleaned_data.get('commit_hash', None) - api_scan_configuration = form.cleaned_data.get('api_scan_configuration', None) - service = form.cleaned_data.get('service', None) - - endpoints_to_add = None # not available on reimport UI - - close_old_findings = form.cleaned_data.get('close_old_findings', True) - - group_by = form.cleaned_data.get('group_by', None) - create_finding_groups_for_all_findings = form.cleaned_data.get('create_finding_groups_for_all_findings') - apply_tags_to_findings = form.cleaned_data.get('apply_tags_to_findings', False) - apply_tags_to_endpoints = form.cleaned_data.get('apply_tags_to_endpoints', False) - - active = None - if activeChoice: - if activeChoice == 'force_to_true': - active = True - elif activeChoice == 'force_to_false': - active = False - verified = None - if verifiedChoice: - if verifiedChoice == 'force_to_true': - verified = True - elif verifiedChoice == 'force_to_false': - verified = False - - # Tags are replaced, same behaviour as with django-tagging - test.tags = tags - test.version = version - if scan and is_scan_file_too_large(scan): - messages.add_message(request, - messages.ERROR, - _("Report file is too large. Maximum supported size is %(size)d MB") % {'size': settings.SCAN_FILE_MAX_SIZE}, - extra_tags='alert-danger') - return HttpResponseRedirect(reverse('re_import_scan_results', args=(test.id,))) - - push_to_jira = push_all_jira_issues or (jform and jform.cleaned_data.get('push_to_jira')) - error = False - finding_count, new_finding_count, closed_finding_count, reactivated_finding_count, untouched_finding_count = 0, 0, 0, 0, 0 - reimporter = ReImporter() - try: - test, finding_count, new_finding_count, closed_finding_count, reactivated_finding_count, untouched_finding_count, _test_import = \ - reimporter.reimport_scan(scan, scan_type, test, active=active, verified=verified, - tags=tags, minimum_severity=minimum_severity, - endpoints_to_add=endpoints_to_add, scan_date=scan_date, - version=version, branch_tag=branch_tag, build_id=build_id, - commit_hash=commit_hash, push_to_jira=push_to_jira, - close_old_findings=close_old_findings, group_by=group_by, - api_scan_configuration=api_scan_configuration, service=service, do_not_reactivate=do_not_reactivate, - create_finding_groups_for_all_findings=create_finding_groups_for_all_findings, - apply_tags_to_findings=apply_tags_to_findings, apply_tags_to_endpoints=apply_tags_to_endpoints) - except Exception as e: - logger.exception(e) - add_error_message_to_response(f'An exception error occurred during the report import:{str(e)}') - error = True - - if not error: - message = construct_imported_message(scan_type, finding_count, new_finding_count=new_finding_count, - closed_finding_count=closed_finding_count, - reactivated_finding_count=reactivated_finding_count, - untouched_finding_count=untouched_finding_count) - add_success_message_to_response(message) +class ReImportScanResultsView(View): + def get_template(self) -> str: + """ + Returns the template that will be presented to the user + """ + return "dojo/import_scan_results.html" + + def get_form( + self, + request: HttpRequest, + test: Test, + **kwargs: dict, + ) -> ReImportScanForm: + """ + Returns the default import form for importing findings + """ + if request.method == "POST": + return ReImportScanForm(request.POST, request.FILES, test=test, **kwargs) + else: + return ReImportScanForm(test=test, **kwargs) + + def get_jira_form( + self, + request: HttpRequest, + test: Test, + ) -> Tuple[JIRAImportScanForm | None, bool]: + """ + Returns a JiraImportScanForm if jira is enabled + """ + jira_form = None + push_all_jira_issues = False + # Decide if we need to present the Push to JIRA form + if get_system_setting('enable_jira'): + # Determine if jira issues should be pushed automatically + push_all_jira_issues = jira_helper.is_push_all_issues(test) + # Only return the form if the jira is enabled on this engagement or product + if jira_helper.get_jira_project(test): + if request.method == "POST": + jira_form = JIRAImportScanForm( + request.POST, + push_all=push_all_jira_issues, + prefix='jiraform' + ) + else: + jira_form = JIRAImportScanForm( + push_all=push_all_jira_issues, + prefix='jiraform' + ) + return jira_form, push_all_jira_issues + + def handle_request( + self, + request: HttpRequest, + test_id: int, + ) -> Tuple[HttpRequest, dict]: + """ + Process the common behaviors between request types, and then return + the request and context dict back to be rendered + """ + # Get the test object + test = get_object_or_404(Test, id=test_id) + # Ensure the supplied user has access to import to the engagement or product + user_has_permission_or_403(request.user, test, Permissions.Import_Scan_Result) + # by default we keep a trace of the scan_type used to create the test + # if it's not here, we use the "name" of the test type + # this feature exists to provide custom label for tests for some parsers + if test.scan_type: + scan_type = test.scan_type + else: + scan_type = test.test_type.name + # Set the product tab + product_tab = Product_Tab(test.engagement.product, title=_(f"Re-upload a {scan_type}"), tab="engagements") + product_tab.setEngagement(test.engagement) + # Get the import form with some initial data in place + form = self.get_form( + request, + test, + endpoints=Endpoint.objects.filter(product__id=product_tab.product.id), + api_scan_configuration=test.api_scan_configuration, + api_scan_configuration_queryset=Product_API_Scan_Configuration.objects.filter(product__id=product_tab.product.id), + ) + # Get the jira form + jira_form, push_all_jira_issues = self.get_jira_form(request, test) + # Return the request and the context + return request, { + "test": test, + "form": form, + "product_tab": product_tab, + "eid": test.engagement.id, + "jform": jira_form, + "scan_type": scan_type, + "scan_types": get_scan_types_sorted(), + "push_all_jira_issues": push_all_jira_issues, + "additional_message": ( + "When re-uploading a scan, any findings not found in original scan will be updated as " + "mitigated. The process attempts to identify the differences, however manual verification " + "is highly recommended." + ), + } - return HttpResponseRedirect(reverse('view_test', args=(test.id,))) + def validate_forms( + self, + context: dict, + ) -> bool: + """ + Validates each of the forms to ensure all errors from the form + level are bubbled up to the user first before we process too much + """ + form_validation_list = [] + if context.get("form") is not None: + form_validation_list.append(context.get("form").is_valid()) + if context.get("jform") is not None: + form_validation_list.append(context.get("jform").is_valid()) + return all(form_validation_list) + + def process_form( + self, + request: HttpRequest, + form: ReImportScanForm, + context: dict, + ) -> str | None: + """ + Process the form and manipulate the input in any way that is appropriate + """ + # Update the running context dict with cleaned form input + context.update({ + "scan": request.FILES.get("file", None), + "scan_date": form.cleaned_data.get("scan_date"), + "minimum_severity": form.cleaned_data.get("minimum_severity"), + "do_not_reactivate": form.cleaned_data.get("do_not_reactivate"), + "tags": form.cleaned_data.get("tags"), + "version": form.cleaned_data.get("version"), + "branch_tag": form.cleaned_data.get("branch_tag", None), + "build_id": form.cleaned_data.get("build_id", None), + "commit_hash": form.cleaned_data.get("commit_hash", None), + "api_scan_configuration": form.cleaned_data.get("api_scan_configuration", None), + "service": form.cleaned_data.get("service", None), + "apply_tags_to_findings": form.cleaned_data.get("apply_tags_to_findings", False), + "apply_tags_to_endpoints": form.cleaned_data.get("apply_tags_to_endpoints", False), + "group_by": form.cleaned_data.get("group_by", None), + "close_old_findings": form.cleaned_data.get("close_old_findings", None), + "create_finding_groups_for_all_findings": form.cleaned_data.get("create_finding_groups_for_all_findings"), + }) + # Override the form values of active and verified + if activeChoice := form.cleaned_data.get('active', None): + if activeChoice == 'force_to_true': + context["active"] = True + elif activeChoice == 'force_to_false': + context["active"] = False + if verifiedChoice := form.cleaned_data.get('verified', None): + if verifiedChoice == 'force_to_true': + context["verified"] = True + elif verifiedChoice == 'force_to_false': + context["verified"] = False + # Override the tags and version + context.get("test").tags = context.get("tags") + context.get("test").version = context.get("version") + return None - product_tab = Product_Tab(engagement.product, title=_("Re-upload a %(scan_type)s") % {"scan_type": scan_type}, tab="engagements") - product_tab.setEngagement(engagement) - form.fields['endpoints'].queryset = Endpoint.objects.filter(product__id=product_tab.product.id) - form.initial['api_scan_configuration'] = test.api_scan_configuration - form.fields['api_scan_configuration'].queryset = Product_API_Scan_Configuration.objects.filter(product__id=product_tab.product.id) - return render(request, - 'dojo/import_scan_results.html', - {'form': form, - 'product_tab': product_tab, - 'eid': engagement.id, - 'additional_message': additional_message, - 'jform': jform, - 'scan_types': get_scan_types_sorted(), - }) + def process_jira_form( + self, + request: HttpRequest, + form: JIRAImportScanForm, + context: dict, + ) -> str | None: + """ + Process the jira form by first making sure one was supplied + and then setting any values supplied by the user. An error + may be returned and will be bubbled up in the form of a message + """ + # Determine if push all issues is enabled + push_all_jira_issues = context.get("push_all_jira_issues", False) + context["push_to_jira"] = push_all_jira_issues or (form and form.cleaned_data.get("push_to_jira")) + return None + + def reimport_findings( + self, + context: dict, + ) -> str | None: + """ + Attempt to import with all the supplied information + """ + try: + importer_client = DefaultReImporter() + ( + context["test"], + finding_count, + new_finding_count, + closed_finding_count, + reactivated_finding_count, + untouched_finding_count, + _, + ) = importer_client.process_scan( + **context, + ) + # Add a message to the view for the user to see the results + add_success_message_to_response(importer_client.construct_imported_message( + context.get("scan_type"), + Test_Import.REIMPORT_TYPE, + finding_count=finding_count, + new_finding_count=new_finding_count, + closed_finding_count=closed_finding_count, + reactivated_finding_count=reactivated_finding_count, + untouched_finding_count=untouched_finding_count, + close_old_findings=context.get("close_old_findings"), + )) + except Exception as e: + logger.exception(e) + return f"An exception error occurred during the report import: {e}" + return None + + def success_redirect( + self, + context: dict, + ) -> HttpResponseRedirect: + """ + Redirect the user to a place that indicates a successful import + """ + return HttpResponseRedirect(reverse("view_test", args=(context.get("test").id, ))) + + def failure_redirect( + self, + context: dict, + ) -> HttpResponseRedirect: + """ + Redirect the user to a place that indicates a failed import + """ + return HttpResponseRedirect(reverse( + "re_import_scan_results", + args=(context.get("test").id, ), + )) + + def get( + self, + request: HttpRequest, + test_id: int, + ) -> HttpResponse: + """ + Process GET requests for the ReImport View + """ + # process the request and path parameters + request, context = self.handle_request( + request, + test_id=test_id, + ) + # Render the form + return render(request, self.get_template(), context) + + def post( + self, + request: HttpRequest, + test_id: int, + ) -> HttpResponse: + """ + Process POST requests for the ReImport View + """ + # process the request and path parameters + request, context = self.handle_request( + request, + test_id=test_id, + ) + # ensure all three forms are valid first before moving forward + if not self.validate_forms(context): + return self.failure_redirect(context) + # Process the jira form if it is present + if form_error := self.process_jira_form(request, context.get("jform"), context): + add_error_message_to_response(form_error) + return self.failure_redirect(context) + # Process the import form + if form_error := self.process_form(request, context.get("form"), context): + add_error_message_to_response(form_error) + return self.failure_redirect(context) + # Kick off the import process + if import_error := self.reimport_findings(context): + add_error_message_to_response(import_error) + return self.failure_redirect(context) + # Otherwise return the user back to the engagement (if present) or the product + return self.success_redirect(context) diff --git a/unittests/test_importers_closeold.py b/unittests/test_importers_closeold.py index 1b16cde9ba8..fb9d46ce5d6 100644 --- a/unittests/test_importers_closeold.py +++ b/unittests/test_importers_closeold.py @@ -2,7 +2,7 @@ from django.utils import timezone -from dojo.importers.importer.importer import DojoDefaultImporter as Importer +from dojo.importers.default_importer import DefaultImporter from dojo.models import Development_Environment, Engagement, Product, Product_Type, User from .dojo_test_case import DojoTestCase, get_unit_tests_path @@ -12,66 +12,62 @@ class TestDojoCloseOld(DojoTestCase): def test_close_old_same_engagement(self): - scan = get_unit_tests_path() + "/scans/acunetix/many_findings.xml" + importer = DefaultImporter() scan_type = "Acunetix Scan" - user, _ = User.objects.get_or_create(username="admin") - _user_reporter, _ = User.objects.get_or_create(username="user_reporter") - product_type, _ = Product_Type.objects.get_or_create(name="closeold") + environment, _ = Development_Environment.objects.get_or_create(name="Development") product, _ = Product.objects.get_or_create( name="TestDojoCloseOldImporter1", prod_type=product_type, ) - engagement, _ = Engagement.objects.get_or_create( name="Close Old Same Engagement", product=product, target_start=timezone.now(), target_end=timezone.now(), ) - importer = Importer() - scan_date = None - environment, _ = Development_Environment.objects.get_or_create(name="Development") + import_options = { + "user": user, + "lead": user, + "scan_date": None, + "environment": environment, + "active": True, + "verified": False, + } # Import first test - _test, len_new_findings, len_closed_findings, _ = importer.import_scan(scan, scan_type, engagement, lead=None, environment=environment, - active=True, verified=False, tags=None, minimum_severity=None, - user=user, endpoints_to_add=None, scan_date=scan_date, version=None, branch_tag=None, build_id=None, - commit_hash=None, push_to_jira=None, close_old_findings=False, group_by=None, api_scan_configuration=None) - - self.assertEqual(4, len_new_findings) - self.assertEqual(0, len_closed_findings) + with open(f"{get_unit_tests_path()}/scans/acunetix/many_findings.xml", "r+") as many_findings_scan: + _, _, len_new_findings, len_closed_findings, _, _, _ = importer.process_scan( + many_findings_scan, scan_type, engagement, close_old_findings=False, **import_options, + ) + self.assertEqual(4, len_new_findings) + self.assertEqual(0, len_closed_findings) # Import same test, should close no findings - _test, len_new_findings, len_closed_findings, _ = importer.import_scan(scan, scan_type, engagement, lead=None, environment=environment, - active=True, verified=False, tags=None, minimum_severity=None, - user=user, endpoints_to_add=None, scan_date=scan_date, version=None, branch_tag=None, build_id=None, - commit_hash=None, push_to_jira=None, close_old_findings=True, group_by=None, api_scan_configuration=None) - self.assertEqual(4, len_new_findings) - self.assertEqual(0, len_closed_findings) + with open(f"{get_unit_tests_path()}/scans/acunetix/many_findings.xml", "r+") as many_findings_scan: + _, _, len_new_findings, len_closed_findings, _, _, _ = importer.process_scan( + many_findings_scan, scan_type, engagement, close_old_findings=True, **import_options, + ) + self.assertEqual(4, len_new_findings) + self.assertEqual(0, len_closed_findings) # Import test with only one finding. Remaining findings should close - with open(get_unit_tests_path() + "/scans/acunetix/one_finding.xml") as scan: - _test, len_new_findings, len_closed_findings, _ = importer.import_scan(scan, scan_type, engagement, lead=None, environment=environment, - active=True, verified=False, tags=None, minimum_severity=None, - user=user, endpoints_to_add=None, scan_date=scan_date, version=None, branch_tag=None, build_id=None, - commit_hash=None, push_to_jira=None, close_old_findings=True, group_by=None, api_scan_configuration=None) + with open(f"{get_unit_tests_path()}/scans/acunetix/one_finding.xml", "r+") as single_finding_scan: + _, _, len_new_findings, len_closed_findings, _, _, _ = importer.process_scan( + single_finding_scan, scan_type, engagement, close_old_findings=True, **import_options, + ) self.assertEqual(1, len_new_findings) # Dedupe is off and close old findings does not close old findings if they are the same finding. - # If this behaviour changes, or dedupe is on, the number of closed findings will be 4 + # If this behavior changes, or dedupe is on, the number of closed findings will be 4 self.assertEqual(8, len_closed_findings) def test_close_old_same_product_scan(self): - scan = get_unit_tests_path() + "/scans/acunetix/many_findings.xml" + importer = DefaultImporter() scan_type = "Acunetix Scan" - user, _ = User.objects.get_or_create(username="admin") - _user_reporter, _ = User.objects.get_or_create(username="user_reporter") - product_type, _ = Product_Type.objects.get_or_create(name="test2") product, _ = Product.objects.get_or_create( name="TestDojoCloseOldImporter2", prod_type=product_type, ) - engagement1, _ = Engagement.objects.get_or_create( name="Close Old Same Product 1", product=product, @@ -90,31 +86,36 @@ def test_close_old_same_product_scan(self): target_start=timezone.now(), target_end=timezone.now(), ) - importer = Importer() - scan_date = None environment, _ = Development_Environment.objects.get_or_create(name="Development") + import_options = { + "user": user, + "lead": user, + "scan_date": None, + "environment": environment, + "active": True, + "verified": False, + "close_old_findings_product_scope": True, + } # Import first test - _test, len_new_findings, len_closed_findings, _ = importer.import_scan(scan, scan_type, engagement1, lead=None, environment=environment, - active=True, verified=False, tags=None, minimum_severity=None, - user=user, endpoints_to_add=None, scan_date=scan_date, version=None, branch_tag=None, build_id=None, - commit_hash=None, push_to_jira=None, close_old_findings=False, close_old_findings_product_scope=True, group_by=None, api_scan_configuration=None) - - self.assertEqual(4, len_new_findings) - self.assertEqual(0, len_closed_findings) + with open(f"{get_unit_tests_path()}/scans/acunetix/many_findings.xml", "r+") as many_findings_scan: + _, _, len_new_findings, len_closed_findings, _, _, _ = importer.process_scan( + many_findings_scan, scan_type, engagement1, close_old_findings=False, **import_options, + ) + self.assertEqual(4, len_new_findings) + self.assertEqual(0, len_closed_findings) # Import same test, should close no findings - _test, len_new_findings, len_closed_findings, _ = importer.import_scan(scan, scan_type, engagement2, lead=None, environment=environment, - active=True, verified=False, tags=None, minimum_severity=None, - user=user, endpoints_to_add=None, scan_date=scan_date, version=None, branch_tag=None, build_id=None, - commit_hash=None, push_to_jira=None, close_old_findings=True, close_old_findings_product_scope=True, group_by=None, api_scan_configuration=None) - self.assertEqual(4, len_new_findings) - self.assertEqual(0, len_closed_findings) + with open(f"{get_unit_tests_path()}/scans/acunetix/many_findings.xml", "r+") as many_findings_scan: + _, _, len_new_findings, len_closed_findings, _, _, _ = importer.process_scan( + many_findings_scan, scan_type, engagement2, close_old_findings=True, **import_options, + ) + self.assertEqual(4, len_new_findings) + self.assertEqual(0, len_closed_findings) # Import test with only one finding. Remaining findings should close - with open(get_unit_tests_path() + "/scans/acunetix/one_finding.xml") as scan: - _test, len_new_findings, len_closed_findings, _ = importer.import_scan(scan, scan_type, engagement3, lead=None, environment=environment, - active=True, verified=False, tags=None, minimum_severity=None, - user=user, endpoints_to_add=None, scan_date=scan_date, version=None, branch_tag=None, build_id=None, - commit_hash=None, push_to_jira=None, close_old_findings=True, close_old_findings_product_scope=True, group_by=None, api_scan_configuration=None) + with open(f"{get_unit_tests_path()}/scans/acunetix/one_finding.xml", "r+") as single_finding_scan: + _, _, len_new_findings, len_closed_findings, _, _, _ = importer.process_scan( + single_finding_scan, scan_type, engagement3, close_old_findings=True, **import_options, + ) self.assertEqual(1, len_new_findings) # Dedupe is off, and close old findings does not close old findings if they are the same finding. - # If this behaviour changes, or dedupe is on, the number of closed findings will be 4 + # If this behavior changes, or dedupe is on, the number of closed findings will be 4 self.assertEqual(8, len_closed_findings) diff --git a/unittests/test_importers_importer.py b/unittests/test_importers_importer.py index 5a41fc927d9..40caea86f9e 100644 --- a/unittests/test_importers_importer.py +++ b/unittests/test_importers_importer.py @@ -6,10 +6,8 @@ from rest_framework.authtoken.models import Token from rest_framework.test import APIClient -from dojo.importers.importer.importer import DojoDefaultImporter as Importer -from dojo.importers.utils import handle_vulnerability_ids +from dojo.importers.default_importer import DefaultImporter from dojo.models import Development_Environment, Engagement, Finding, Product, Product_Type, Test, User -from dojo.tools.factory import get_parser from dojo.tools.gitlab_sast.parser import GitlabSastParser from dojo.tools.sarif.parser import SarifParser from dojo.utils import get_object_or_none @@ -41,118 +39,111 @@ class TestDojoDefaultImporter(DojoTestCase): def test_parse_findings(self): - scan_type = "Acunetix Scan" with open(get_unit_tests_path() + "/scans/acunetix/one_finding.xml") as scan: - + importer = DefaultImporter() + scan_type = "Acunetix Scan" user, _created = User.objects.get_or_create(username="admin") - product_type, _created = Product_Type.objects.get_or_create(name="test") product, _created = Product.objects.get_or_create( name="TestDojoDefaultImporter", prod_type=product_type, ) - - engagement_name = "Test Create Engagement" engagement, _created = Engagement.objects.get_or_create( - name=engagement_name, + name="Test Create Engagement", product=product, target_start=timezone.now(), target_end=timezone.now(), ) lead, _ = User.objects.get_or_create(username="admin") environment, _ = Development_Environment.objects.get_or_create(name="Development") - - # boot - importer = Importer() - + import_options = { + "user": user, + "lead": lead, + "scan_date": None, + "environment": environment, + "minimum_severity": "Info", + "active": True, + "verified": True, + "sync": True, + } # create the test - # by defaut test_type == scan_type - test = importer.create_test(scan_type, scan_type, engagement, lead, environment) - + # by default test_type == scan_type + test = importer.create_test(scan_type, scan_type, engagement=engagement, **import_options) # parse the findings - parser = get_parser(scan_type) + parser = importer.get_parser(scan_type) parsed_findings = parser.get_findings(scan, test) - # process - minimum_severity = "Info" - active = True - verified = True - scan_date = None - new_findings = importer.process_parsed_findings( + new_findings = importer.process_findings( test, parsed_findings, - scan_type, - user, - active, - verified, - minimum_severity=minimum_severity, - scan_date=scan_date, - sync=True + **import_options, ) - for finding in new_findings: self.assertIn(finding.numerical_severity, ["S0", "S1", "S2", "S3", "S4"]) def test_import_scan(self): with open(get_unit_tests_path() + "/scans/sarif/spotbugs.sarif") as scan: + importer = DefaultImporter() scan_type = SarifParser().get_scan_types()[0] # SARIF format implement the new method - user, _ = User.objects.get_or_create(username="admin") - _user_reporter, _ = User.objects.get_or_create(username="user_reporter") - product_type, _ = Product_Type.objects.get_or_create(name="test2") product, _ = Product.objects.get_or_create( name="TestDojoDefaultImporter2", prod_type=product_type, ) - engagement, _ = Engagement.objects.get_or_create( name="Test Create Engagement2", product=product, target_start=timezone.now(), target_end=timezone.now(), ) - importer = Importer() - scan_date = None environment, _ = Development_Environment.objects.get_or_create(name="Development") - test, len_new_findings, len_closed_findings, _ = importer.import_scan(scan, scan_type, engagement, lead=None, environment=environment, - active=True, verified=True, tags=None, minimum_severity=None, - user=user, endpoints_to_add=None, scan_date=scan_date, version=None, branch_tag=None, build_id=None, - commit_hash=None, push_to_jira=None, close_old_findings=False, group_by=None, api_scan_configuration=None) - + import_options = { + "user": user, + "lead": user, + "scan_date": None, + "environment": environment, + "minimum_severity": "Info", + "active": True, + "verified": True, + } + test, _, len_new_findings, len_closed_findings, _, _, _ = importer.process_scan( + scan, scan_type, engagement, close_old_findings=False, **import_options, + ) self.assertEqual(f"SpotBugs Scan ({scan_type})", test.test_type.name) self.assertEqual(56, len_new_findings) self.assertEqual(0, len_closed_findings) def test_import_scan_without_test_scan_type(self): - # GitLabSastParser implements get_tests but report has no scanner name with open(f"{get_unit_tests_path()}/scans/gitlab_sast/gl-sast-report-1-vuln_v15.json") as scan: + importer = DefaultImporter() + # GitLabSastParser implements get_tests but report has no scanner name scan_type = GitlabSastParser().get_scan_types()[0] - user, _ = User.objects.get_or_create(username="admin") - _user_reporter, _ = User.objects.get_or_create(username="user_reporter") - product_type, _ = Product_Type.objects.get_or_create(name="test2") product, _ = Product.objects.get_or_create( name="TestDojoDefaultImporter2", prod_type=product_type, ) - engagement, _ = Engagement.objects.get_or_create( name="Test Create Engagement2", product=product, target_start=timezone.now(), target_end=timezone.now(), ) - - importer = Importer() - scan_date = None environment, _ = Development_Environment.objects.get_or_create(name="Development") - test, len_new_findings, len_closed_findings, _ = importer.import_scan(scan, scan_type, engagement, lead=None, environment=environment, - active=True, verified=True, tags=None, minimum_severity=None, - user=user, endpoints_to_add=None, scan_date=scan_date, version=None, branch_tag=None, build_id=None, - commit_hash=None, push_to_jira=None, close_old_findings=False, group_by=None, api_scan_configuration=None) - + import_options = { + "user": user, + "lead": user, + "scan_date": None, + "environment": environment, + "minimum_severity": "Info", + "active": True, + "verified": True, + } + test, _, len_new_findings, len_closed_findings, _, _, _ = importer.process_scan( + scan, scan_type, engagement, close_old_findings=False, **import_options, + ) self.assertEqual("GitLab SAST Report", test.test_type.name) self.assertEqual(1, len_new_findings) self.assertEqual(0, len_closed_findings) @@ -185,18 +176,15 @@ def create_default_data(self): # engagement name is not unique by itself and not unique inside a product self.engagement_last = self.create_engagement(ENGAGEMENT_NAME_DEFAULT, product=self.product) - @patch('dojo.jira_link.helper.get_jira_project') - def test_import_by_engagement_id(self, mock): + def test_import_by_engagement_id(self): with assertImportModelsCreated(self, tests=1, engagements=0, products=0, product_types=0, endpoints=0): import0 = self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, engagement=self.engagement.id, test_title=TEST_TITLE_DEFAULT) test_id = import0['test'] self.assertEqual(get_object_or_none(Test, id=test_id).title, TEST_TITLE_DEFAULT) self.assertEqual(import0['engagement_id'], self.engagement.id) self.assertEqual(import0['product_id'], self.engagement.product.id) - mock.assert_called_with(self.engagement) - @patch('dojo.jira_link.helper.get_jira_project') - def test_import_by_product_name_exists_engagement_name_exists(self, mock): + def test_import_by_product_name_exists_engagement_name_exists(self): with assertImportModelsCreated(self, tests=1, engagements=0, products=0, product_types=0, endpoints=0): import0 = self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=PRODUCT_NAME_DEFAULT, engagement=None, engagement_name=ENGAGEMENT_NAME_DEFAULT) @@ -204,16 +192,13 @@ def test_import_by_product_name_exists_engagement_name_exists(self, mock): self.assertEqual(Test.objects.get(id=test_id).engagement, self.engagement_last) self.assertEqual(import0['engagement_id'], self.engagement_last.id) self.assertEqual(import0['product_id'], self.engagement_last.product.id) - mock.assert_called_with(self.engagement_last) def test_import_by_product_name_exists_engagement_name_not_exists(self): with assertImportModelsCreated(self, tests=0, engagements=0, products=0, product_types=0, endpoints=0): self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=PRODUCT_NAME_DEFAULT, engagement=None, engagement_name=ENGAGEMENT_NAME_NEW, expected_http_status_code=400) - @patch('dojo.jira_link.helper.get_jira_project') - def test_import_by_product_name_exists_engagement_name_not_exists_auto_create(self, mock): - mock.return_value = None + def test_import_by_product_name_exists_engagement_name_not_exists_auto_create(self): with assertImportModelsCreated(self, tests=1, engagements=1, products=0, product_types=0, endpoints=0): import0 = self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=PRODUCT_NAME_DEFAULT, engagement=None, engagement_name=ENGAGEMENT_NAME_NEW, auto_create_context=True) @@ -221,9 +206,6 @@ def test_import_by_product_name_exists_engagement_name_not_exists_auto_create(se self.assertEqual(get_object_or_none(Test, id=test_id).title, None) self.assertEqual(get_object_or_none(Engagement, id=import0['engagement_id']).name, ENGAGEMENT_NAME_NEW) self.assertEqual(import0['product_id'], self.engagement.product.id) - # the new engagement should inherit the jira settings from the product - # the jira settings are retrieved before an engagement is auto created - mock.assert_called_with(self.product) def test_import_by_product_name_not_exists_engagement_name(self): with assertImportModelsCreated(self, tests=0, engagements=0, products=0, product_types=0, endpoints=0): @@ -289,7 +271,7 @@ def test_import_with_invalid_parameters(self): with self.subTest('invalid product type'): import0 = self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, engagement=None, product_type_name='valentijn', product_name='67283', engagement_name='valentijn', expected_http_status_code=400) - self.assertEqual(import0, ["Product Type 'valentijn' doesn't exist"]) + self.assertEqual(import0, ['Product Type "valentijn" does not exist']) with self.subTest('invalid product'): # random product type to avoid collision with other tests @@ -297,12 +279,16 @@ def test_import_with_invalid_parameters(self): Product_Type.objects.create(name=another_product_type_name) import0 = self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, engagement=None, product_type_name=another_product_type_name, product_name=PRODUCT_NAME_DEFAULT, engagement_name='valentijn', expected_http_status_code=400) - self.assertEqual(import0, [f"Product '{PRODUCT_NAME_DEFAULT}' doesn't exist in Product_Type '{another_product_type_name}'"]) + self.assertEqual(import0, [( + "The fetched product has a conflict with the supplied product type name: " + f"existing product type name - {PRODUCT_TYPE_NAME_DEFAULT} vs " + f"supplied product type name - {another_product_type_name}" + )]) with self.subTest('invalid engagement'): import0 = self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, engagement=1254235, expected_http_status_code=400) - self.assertEqual(import0, ["Engagement '1254235' doesn't exist"]) + self.assertEqual(import0, ['Engagement "1254235" does not exist']) with self.subTest('invalid engagement, but exists in another product'): # random product to avoid collision with other tests @@ -310,7 +296,7 @@ def test_import_with_invalid_parameters(self): self.product = self.create_product(another_product_name) import0 = self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, engagement=None, engagement_name=ENGAGEMENT_NAME_DEFAULT, product_name=another_product_name, expected_http_status_code=400) - self.assertEqual(import0, [f"Engagement 'Engagement 1' doesn't exist in Product '{another_product_name}'"]) + self.assertEqual(import0, [f'Engagement "Engagement 1" does not exist in Product "{another_product_name}"']) with self.subTest('invalid engagement not id'): import0 = self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, @@ -320,7 +306,7 @@ def test_import_with_invalid_parameters(self): with self.subTest('autocreate product but no product type name'): import0 = self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=PRODUCT_NAME_NEW, engagement=None, engagement_name=ENGAGEMENT_NAME_NEW, auto_create_context=True, expected_http_status_code=400) - self.assertEqual(import0, [f"Product '{PRODUCT_NAME_NEW}' doesn't exist and no product_type_name provided to create the new product in"]) + self.assertEqual(import0, [f'Product "{PRODUCT_NAME_NEW}" does not exist and no product_type_name provided to create the new product in']) with self.subTest('autocreate engagement but no product_name'): import0 = self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=None, @@ -382,23 +368,20 @@ def test_reimport_by_product_name_exists_engagement_name_exists_scan_type_not_ex self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type='Acunetix Scan', product_name=PRODUCT_NAME_DEFAULT, engagement=None, engagement_name=ENGAGEMENT_NAME_DEFAULT, test_title=TEST_TITLE_DEFAULT, expected_http_status_code=400) - @patch('dojo.jira_link.helper.get_jira_project') - def test_reimport_by_product_name_exists_engagement_name_exists_scan_type_not_exsists_test_title_exists_auto_create(self, mock): + def test_reimport_by_product_name_exists_engagement_name_exists_scan_type_not_exsists_test_title_exists_auto_create(self): with assertImportModelsCreated(self, tests=1, engagements=0, products=0, product_types=0, endpoints=1): import0 = self.reimport_scan_with_params(None, ACUNETIX_AUDIT_ONE_VULN_FILENAME, scan_type='Acunetix Scan', product_name=PRODUCT_NAME_DEFAULT, engagement=None, engagement_name=ENGAGEMENT_NAME_DEFAULT, test_title=TEST_TITLE_DEFAULT, auto_create_context=True) test_id = import0['test'] self.assertEqual(get_object_or_none(Test, id=test_id).title, TEST_TITLE_DEFAULT) self.assertEqual(import0['engagement_id'], self.engagement.id) - mock.assert_called_with(self.engagement) def test_reimport_by_product_name_exists_engagement_name_exists_scan_type_not_exsists_test_title_not_exists(self): with assertImportModelsCreated(self, tests=0, engagements=0, products=0, product_types=0, endpoints=0): self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type='Acunetix Scan', product_name=PRODUCT_NAME_DEFAULT, engagement=None, engagement_name=ENGAGEMENT_NAME_DEFAULT, test_title='bogus title', expected_http_status_code=400) - @patch('dojo.jira_link.helper.get_jira_project') - def test_reimport_by_product_name_exists_engagement_name_exists_scan_type_not_exsists_test_title_not_exists_auto_create(self, mock): + def test_reimport_by_product_name_exists_engagement_name_exists_scan_type_not_exsists_test_title_not_exists_auto_create(self): with assertImportModelsCreated(self, tests=1, engagements=0, products=0, product_types=0, endpoints=1): import0 = self.reimport_scan_with_params(None, ACUNETIX_AUDIT_ONE_VULN_FILENAME, scan_type='Acunetix Scan', product_name=PRODUCT_NAME_DEFAULT, engagement=None, engagement_name=ENGAGEMENT_NAME_DEFAULT, test_title='bogus title', auto_create_context=True) @@ -406,9 +389,6 @@ def test_reimport_by_product_name_exists_engagement_name_exists_scan_type_not_ex self.assertEqual(get_object_or_none(Test, id=test_id).scan_type, 'Acunetix Scan') self.assertEqual(get_object_or_none(Test, id=test_id).title, 'bogus title') self.assertEqual(import0['engagement_id'], self.engagement.id) - # the new test should inherit the jira settings from the engagement - # the jira settings are retrieved before an test is auto created - mock.assert_called_with(self.engagement) def test_reimport_by_product_name_exists_engagement_name_exists_test_title_exists(self): with assertImportModelsCreated(self, tests=0, engagements=0, products=0, product_types=0, endpoints=0): @@ -422,8 +402,7 @@ def test_reimport_by_product_name_exists_engagement_name_not_exists(self): self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=PRODUCT_NAME_DEFAULT, engagement=None, engagement_name=ENGAGEMENT_NAME_NEW, expected_http_status_code=400) - @patch('dojo.jira_link.helper.get_jira_project') - def test_reimport_by_product_name_exists_engagement_name_not_exists_auto_create(self, mock): + def test_reimport_by_product_name_exists_engagement_name_not_exists_auto_create(self): with assertImportModelsCreated(self, tests=1, engagements=1, products=0, product_types=0, endpoints=0): import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=PRODUCT_NAME_DEFAULT, engagement=None, engagement_name=ENGAGEMENT_NAME_NEW, auto_create_context=True) @@ -432,9 +411,6 @@ def test_reimport_by_product_name_exists_engagement_name_not_exists_auto_create( self.assertEqual(get_object_or_none(Engagement, id=import0['engagement_id']).name, ENGAGEMENT_NAME_NEW) self.assertEqual(import0['product_id'], self.engagement.product.id) self.assertEqual(import0['product_type_id'], self.engagement.product.prod_type.id) - # the new engagement should inherit the jira settings from the product - # the jira settings are retrieved before an engagement is auto created - mock.assert_called_with(self.product) def test_reimport_by_product_name_not_exists_engagement_name(self): with assertImportModelsCreated(self, tests=0, engagements=0, products=0, product_types=0, endpoints=0): @@ -491,12 +467,12 @@ def test_reimport_with_invalid_parameters(self): with self.subTest('invalid product type'): import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, engagement=None, product_type_name='valentijn', product_name='67283', engagement_name='valentijn', expected_http_status_code=400) - self.assertEqual(import0, ["Product Type 'valentijn' doesn't exist"]) + self.assertEqual(import0, ['Product Type "valentijn" does not exist']) with self.subTest('invalid product'): import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, engagement=None, product_name='67283', engagement_name='valentijn', expected_http_status_code=400) - self.assertEqual(import0, ["Product '67283' doesn't exist"]) + self.assertEqual(import0, ['Product "67283" does not exist']) with self.subTest('valid product, but other product type'): # random product type to avoid collision with other tests @@ -505,7 +481,11 @@ def test_reimport_with_invalid_parameters(self): import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, engagement=None, product_type_name=another_product_type_name, product_name=PRODUCT_NAME_DEFAULT, engagement_name='valentijn', expected_http_status_code=400) - self.assertEqual(import0, [f"Product '{PRODUCT_NAME_DEFAULT}' doesn't exist in Product_Type '{another_product_type_name}'"]) + self.assertEqual(import0, [( + "The fetched product has a conflict with the supplied product type name: " + f"existing product type name - {PRODUCT_TYPE_NAME_DEFAULT} vs " + f"supplied product type name - {another_product_type_name}" + )]) with self.subTest('invalid engagement'): import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, @@ -518,7 +498,7 @@ def test_reimport_with_invalid_parameters(self): self.product = self.create_product(another_product_name) import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, engagement_name=ENGAGEMENT_NAME_DEFAULT, product_name=another_product_name, expected_http_status_code=400) - self.assertEqual(import0, [f"Engagement 'Engagement 1' doesn't exist in Product '{another_product_name}'"]) + self.assertEqual(import0, [f'Engagement "Engagement 1" does not exist in Product "{another_product_name}"']) with self.subTest('invalid engagement not id'): import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, @@ -528,7 +508,7 @@ def test_reimport_with_invalid_parameters(self): with self.subTest('autocreate product but no product type name'): import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, product_name=PRODUCT_NAME_NEW, engagement=None, engagement_name=ENGAGEMENT_NAME_NEW, auto_create_context=True, expected_http_status_code=400) - self.assertEqual(import0, [f"Product '{PRODUCT_NAME_NEW}' doesn't exist and no product_type_name provided to create the new product in"]) + self.assertEqual(import0, [f'Product "{PRODUCT_NAME_NEW}" does not exist and no product_type_name provided to create the new product in']) with self.subTest('autocreate engagement but no product_name'): import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, @@ -537,13 +517,13 @@ def test_reimport_with_invalid_parameters(self): class TestImporterUtils(DojoAPITestCase): - @patch('dojo.importers.utils.Vulnerability_Id', autospec=True) + @patch('dojo.importers.base_importer.Vulnerability_Id', autospec=True) def test_handle_vulnerability_ids_references_and_cve(self, mock): finding = Finding() finding.cve = 'CVE' finding.unsaved_vulnerability_ids = ['REF-1', 'REF-2'] - handle_vulnerability_ids(finding) + DefaultImporter().process_vulnerability_ids(finding) vulnerability_ids = ['CVE', 'REF-1', 'REF-2'] @@ -558,12 +538,12 @@ def test_handle_vulnerability_ids_references_and_cve(self, mock): self.assertEqual('CVE', mock.mock_calls[4].kwargs['finding'].cve) self.assertEqual(vulnerability_ids, mock.mock_calls[2].kwargs['finding'].unsaved_vulnerability_ids) - @patch('dojo.importers.utils.Vulnerability_Id', autospec=True) + @patch('dojo.importers.base_importer.Vulnerability_Id', autospec=True) def test_handle_no_vulnerability_ids_references_and_cve(self, mock): finding = Finding() finding.cve = 'CVE' - handle_vulnerability_ids(finding) + DefaultImporter().process_vulnerability_ids(finding) vulnerability_ids = ['CVE'] @@ -572,12 +552,12 @@ def test_handle_no_vulnerability_ids_references_and_cve(self, mock): self.assertEqual('CVE', mock.mock_calls[0].kwargs['finding'].cve) self.assertEqual(vulnerability_ids, mock.mock_calls[0].kwargs['finding'].unsaved_vulnerability_ids) - @patch('dojo.importers.utils.Vulnerability_Id', autospec=True) + @patch('dojo.importers.base_importer.Vulnerability_Id', autospec=True) def test_handle_vulnerability_ids_references_and_no_cve(self, mock): finding = Finding() finding.unsaved_vulnerability_ids = ['REF-1', 'REF-2'] - handle_vulnerability_ids(finding) + DefaultImporter().process_vulnerability_ids(finding) vulnerability_ids = ['REF-1', 'REF-2'] @@ -589,10 +569,10 @@ def test_handle_vulnerability_ids_references_and_no_cve(self, mock): self.assertEqual('REF-1', mock.mock_calls[2].kwargs['finding'].cve) self.assertEqual(vulnerability_ids, mock.mock_calls[2].kwargs['finding'].unsaved_vulnerability_ids) - @patch('dojo.importers.utils.Vulnerability_Id', autospec=True) + @patch('dojo.importers.base_importer.Vulnerability_Id', autospec=True) def test_no_handle_vulnerability_ids_references_and_no_cve(self, mock): finding = Finding() - handle_vulnerability_ids(finding) + DefaultImporter().process_vulnerability_ids(finding) mock.assert_not_called() diff --git a/unittests/test_rest_framework.py b/unittests/test_rest_framework.py index 5e2adac7a0e..0427ceb1d90 100644 --- a/unittests/test_rest_framework.py +++ b/unittests/test_rest_framework.py @@ -143,7 +143,7 @@ TYPE_ARRAY = "array" #: TYPE_FILE = "file" #: -IMPORTER_MOCK_RETURN_VALUE = None, 0, 0, None +IMPORTER_MOCK_RETURN_VALUE = None, 0, 0, 0, 0, 0, MagicMock() REIMPORTER_MOCK_RETURN_VALUE = None, 0, 0, 0, 0, 0, MagicMock() @@ -1804,8 +1804,8 @@ def __init__(self, *args, **kwargs): def __del__(self: object): self.payload['file'].close() - @patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan') - @patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan') + @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') + @patch('dojo.importers.default_importer.DefaultImporter.process_scan') @patch('dojo.api_v2.permissions.user_has_permission') def test_create_not_authorized_product_name_engagement_name(self, mock, importer_mock, reimporter_mock): mock.return_value = False @@ -1834,8 +1834,8 @@ def test_create_not_authorized_product_name_engagement_name(self, mock, importer importer_mock.assert_not_called() reimporter_mock.assert_not_called() - @patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan') - @patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan') + @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') + @patch('dojo.importers.default_importer.DefaultImporter.process_scan') @patch('dojo.api_v2.permissions.user_has_permission') def test_create_not_authorized_product_name_engagement_name_auto_create_engagement(self, mock, importer_mock, reimporter_mock): mock.return_value = False @@ -1865,8 +1865,8 @@ def test_create_not_authorized_product_name_engagement_name_auto_create_engageme importer_mock.assert_not_called() reimporter_mock.assert_not_called() - @patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan') - @patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan') + @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') + @patch('dojo.importers.default_importer.DefaultImporter.process_scan') @patch('dojo.api_v2.permissions.user_has_permission') def test_create_not_authorized_product_name_engagement_name_auto_create_product(self, mock, importer_mock, reimporter_mock): mock.return_value = False @@ -1897,8 +1897,8 @@ def test_create_not_authorized_product_name_engagement_name_auto_create_product( importer_mock.assert_not_called() reimporter_mock.assert_not_called() - @patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan') - @patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan') + @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') + @patch('dojo.importers.default_importer.DefaultImporter.process_scan') @patch('dojo.api_v2.permissions.user_has_global_permission') def test_create_not_authorized_product_name_engagement_name_auto_create_product_type(self, mock, importer_mock, reimporter_mock): mock.return_value = False @@ -1928,8 +1928,8 @@ def test_create_not_authorized_product_name_engagement_name_auto_create_product_ importer_mock.assert_not_called() reimporter_mock.assert_not_called() - @patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan') - @patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan') + @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') + @patch('dojo.importers.default_importer.DefaultImporter.process_scan') @patch('dojo.api_v2.permissions.user_has_permission') def test_create_authorized_product_name_engagement_name_auto_create_engagement(self, mock, importer_mock, reimporter_mock): """ @@ -1967,8 +1967,8 @@ def test_create_authorized_product_name_engagement_name_auto_create_engagement(s importer_mock.assert_called_once() reimporter_mock.assert_not_called() - @patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan') - @patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan') + @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') + @patch('dojo.importers.default_importer.DefaultImporter.process_scan') @patch('dojo.api_v2.permissions.user_has_permission') def test_create_authorized_product_name_engagement_name_auto_create_product(self, mock, importer_mock, reimporter_mock): mock.return_value = True @@ -1998,8 +1998,8 @@ def test_create_authorized_product_name_engagement_name_auto_create_product(self importer_mock.assert_called_once() reimporter_mock.assert_not_called() - @patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan') - @patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan') + @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') + @patch('dojo.importers.default_importer.DefaultImporter.process_scan') @patch('dojo.api_v2.permissions.user_has_global_permission') def test_create_authorized_product_name_engagement_name_auto_create_product_type(self, mock, importer_mock, reimporter_mock): mock.return_value = True @@ -2042,8 +2042,8 @@ def setUp(self): # Specific tests for reimport - @patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan') - @patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan') + @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') + @patch('dojo.importers.default_importer.DefaultImporter.process_scan') def test_reimport_zap_xml(self, importer_mock, reimporter_mock): importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE @@ -2066,8 +2066,8 @@ def test_reimport_zap_xml(self, importer_mock, reimporter_mock): importer_mock.assert_not_called() reimporter_mock.assert_called_once() - @patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan') - @patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan') + @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') + @patch('dojo.importers.default_importer.DefaultImporter.process_scan') @patch('dojo.api_v2.permissions.user_has_permission') def test_create_not_authorized_product_name_engagement_name(self, mock, importer_mock, reimporter_mock): mock.return_value = False @@ -2096,8 +2096,8 @@ def test_create_not_authorized_product_name_engagement_name(self, mock, importer importer_mock.assert_not_called() reimporter_mock.assert_not_called() - @patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan') - @patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan') + @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') + @patch('dojo.importers.default_importer.DefaultImporter.process_scan') @patch('dojo.api_v2.permissions.user_has_permission') def test_create_authorized_product_name_engagement_name_scan_type_title_auto_create(self, mock, importer_mock, reimporter_mock): mock.return_value = True @@ -2126,8 +2126,8 @@ def test_create_authorized_product_name_engagement_name_scan_type_title_auto_cre importer_mock.assert_called_once() reimporter_mock.assert_not_called() - @patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan') - @patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan') + @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') + @patch('dojo.importers.default_importer.DefaultImporter.process_scan') @patch('dojo.api_v2.permissions.user_has_permission') def test_create_authorized_product_name_engagement_name_auto_create_engagement(self, mock, importer_mock, reimporter_mock): """ @@ -2165,8 +2165,8 @@ def test_create_authorized_product_name_engagement_name_auto_create_engagement(s importer_mock.assert_called_once() reimporter_mock.assert_not_called() - @patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan') - @patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan') + @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') + @patch('dojo.importers.default_importer.DefaultImporter.process_scan') @patch('dojo.api_v2.permissions.user_has_permission') def test_create_authorized_product_name_engagement_name_auto_create_product(self, mock, importer_mock, reimporter_mock): mock.return_value = True @@ -2197,8 +2197,8 @@ def test_create_authorized_product_name_engagement_name_auto_create_product(self importer_mock.assert_called_once() reimporter_mock.assert_not_called() - @patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan') - @patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan') + @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') + @patch('dojo.importers.default_importer.DefaultImporter.process_scan') @patch('dojo.api_v2.permissions.user_has_global_permission') def test_create_authorized_product_name_engagement_name_auto_create_product_type(self, mock, importer_mock, reimporter_mock): mock.return_value = True @@ -2228,8 +2228,8 @@ def test_create_authorized_product_name_engagement_name_auto_create_product_type importer_mock.assert_called_once() reimporter_mock.assert_not_called() - @patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan') - @patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan') + @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') + @patch('dojo.importers.default_importer.DefaultImporter.process_scan') @patch('dojo.api_v2.permissions.user_has_permission') def test_create_not_authorized_test_id(self, mock, importer_mock, reimporter_mock): mock.return_value = False @@ -2256,8 +2256,8 @@ def test_create_not_authorized_test_id(self, mock, importer_mock, reimporter_moc # copied tests from import, unsure how to use inheritance/mixins with test_ methods - @patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan') - @patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan') + @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') + @patch('dojo.importers.default_importer.DefaultImporter.process_scan') @patch('dojo.api_v2.permissions.user_has_permission') def test_create_not_authorized_product_name_engagement_name_auto_create_engagement(self, mock, importer_mock, reimporter_mock): mock.return_value = False @@ -2287,8 +2287,8 @@ def test_create_not_authorized_product_name_engagement_name_auto_create_engageme importer_mock.assert_not_called() reimporter_mock.assert_not_called() - @patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan') - @patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan') + @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') + @patch('dojo.importers.default_importer.DefaultImporter.process_scan') @patch('dojo.api_v2.permissions.user_has_permission') def test_create_not_authorized_product_name_engagement_name_auto_create_product(self, mock, importer_mock, reimporter_mock): mock.return_value = False @@ -2319,8 +2319,8 @@ def test_create_not_authorized_product_name_engagement_name_auto_create_product( importer_mock.assert_not_called() reimporter_mock.assert_not_called() - @patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan') - @patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan') + @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') + @patch('dojo.importers.default_importer.DefaultImporter.process_scan') @patch('dojo.api_v2.permissions.user_has_global_permission') def test_create_not_authorized_product_name_engagement_name_auto_create_product_type(self, mock, importer_mock, reimporter_mock): mock.return_value = False @@ -2350,8 +2350,8 @@ def test_create_not_authorized_product_name_engagement_name_auto_create_product_ importer_mock.assert_not_called() reimporter_mock.assert_not_called() - @patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan') - @patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan') + @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') + @patch('dojo.importers.default_importer.DefaultImporter.process_scan') @patch('dojo.api_v2.permissions.user_has_permission') def test_create_not_authorized_product_name_engagement_name_scan_type(self, mock, importer_mock, reimporter_mock): mock.return_value = False @@ -2378,8 +2378,8 @@ def test_create_not_authorized_product_name_engagement_name_scan_type(self, mock importer_mock.assert_not_called() reimporter_mock.assert_not_called() - @patch('dojo.importers.reimporter.reimporter.DojoDefaultReImporter.reimport_scan') - @patch('dojo.importers.importer.importer.DojoDefaultImporter.import_scan') + @patch('dojo.importers.default_reimporter.DefaultReImporter.process_scan') + @patch('dojo.importers.default_importer.DefaultImporter.process_scan') @patch('dojo.api_v2.permissions.user_has_permission') def test_create_not_authorized_product_name_engagement_name_scan_type_title(self, mock, importer_mock, reimporter_mock): mock.return_value = False