diff --git a/dojo/api_v2/mixins.py b/dojo/api_v2/mixins.py index 54d55a76d09..4e2791aec50 100644 --- a/dojo/api_v2/mixins.py +++ b/dojo/api_v2/mixins.py @@ -47,11 +47,11 @@ def flatten(elem): return self.get_paginated_response(serializer.data) -class QuestionSubClassFieldsMixin(object): +class QuestionSubClassFieldsMixin: def get_queryset(self): return Question.objects.select_subclasses() -class AnswerSubClassFieldsMixin(object): +class AnswerSubClassFieldsMixin: def get_queryset(self): return Answer.objects.select_subclasses() diff --git a/dojo/api_v2/permissions.py b/dojo/api_v2/permissions.py index 4c0e8c6e2e3..b9c2ec433bb 100644 --- a/dojo/api_v2/permissions.py +++ b/dojo/api_v2/permissions.py @@ -40,9 +40,7 @@ def check_post_permission(request, post_model, post_pk, post_permission): if request.method == "POST": if request.data.get(post_pk) is None: raise ParseError( - "Unable to check for permissions: Attribute '{}' is required".format( - post_pk - ) + f"Unable to check for permissions: Attribute '{post_pk}' is required" ) object = get_object_or_404(post_model, pk=request.data.get(post_pk)) return user_has_permission(request.user, object, post_permission) @@ -965,8 +963,7 @@ def raise_no_auto_create_import_validation_error( if product_name and not product: if product_type_name: raise serializers.ValidationError( - "Product '%s' doesn't exist in Product_Type '%s'" - % (product_name, product_type_name) + f"Product '{product_name}' doesn't exist in Product_Type '{product_type_name}'" ) else: raise serializers.ValidationError( @@ -975,21 +972,18 @@ def raise_no_auto_create_import_validation_error( if engagement_name and not engagement: raise serializers.ValidationError( - "Engagement '%s' doesn't exist in Product '%s'" - % (engagement_name, product_name) + f"Engagement '{engagement_name}' doesn't exist in Product '{product_name}'" ) # these are only set for reimport if test_title: raise serializers.ValidationError( - "Test '%s' with scan_type '%s' doesn't exist in Engagement '%s'" - % (test_title, scan_type, engagement_name) + f"Test '{test_title}' with scan_type '{scan_type}' doesn't exist in Engagement '{engagement_name}'" ) if scan_type: raise serializers.ValidationError( - "Test with scan_type '%s' doesn't exist in Engagement '%s'" - % (scan_type, engagement_name) + f"Test with scan_type '{scan_type}' doesn't exist in Engagement '{engagement_name}'" ) raise ValidationError(error_message) diff --git a/dojo/api_v2/serializers.py b/dojo/api_v2/serializers.py index 69a684d1cfd..a39397b8de7 100644 --- a/dojo/api_v2/serializers.py +++ b/dojo/api_v2/serializers.py @@ -245,7 +245,7 @@ def __init__(self, **kwargs): kwargs["style"] = {"base_template": "textarea.html"} kwargs["style"].update(style) - super(TagListSerializerField, self).__init__(**kwargs) + super().__init__(**kwargs) self.pretty_print = pretty_print @@ -300,14 +300,14 @@ class TaggitSerializer(serializers.Serializer): def create(self, validated_data): to_be_tagged, validated_data = self._pop_tags(validated_data) - tag_object = super(TaggitSerializer, self).create(validated_data) + tag_object = super().create(validated_data) return self._save_tags(tag_object, to_be_tagged) def update(self, instance, validated_data): to_be_tagged, validated_data = self._pop_tags(validated_data) - tag_object = super(TaggitSerializer, self).update( + tag_object = super().update( instance, validated_data ) @@ -389,7 +389,7 @@ def __init__(self, **kwargs): if isinstance(data, list): kwargs["many"] = True - super(RequestResponseSerializerField, self).__init__(**kwargs) + super().__init__(**kwargs) self.pretty_print = pretty_print @@ -1464,10 +1464,7 @@ def to_representation(self, data): new_files.append( { "id": file.id, - "file": "{site_url}/{file_access_url}".format( - site_url=settings.SITE_URL, - file_access_url=file.get_accessible_url(test, test.id), - ), + "file": f"{settings.SITE_URL}/{file.get_accessible_url(test, test.id)}", "title": file.title, } ) @@ -2306,13 +2303,11 @@ def validate(self, data): file = data.get("file") if not file and requires_file(scan_type): raise serializers.ValidationError( - "Uploading a Report File is required for {}".format(scan_type) + f"Uploading a Report File is required for {scan_type}" ) if file and is_scan_file_too_large(file): raise serializers.ValidationError( - "Report file is too large. Maximum supported size is {} MB".format( - settings.SCAN_FILE_MAX_SIZE - ) + f"Report file is too large. Maximum supported size is {settings.SCAN_FILE_MAX_SIZE} MB" ) tool_type = requires_tool_type(scan_type) if tool_type: @@ -2665,13 +2660,11 @@ def validate(self, data): file = data.get("file") if not file and requires_file(scan_type): raise serializers.ValidationError( - "Uploading a Report File is required for {}".format(scan_type) + f"Uploading a Report File is required for {scan_type}" ) if file and is_scan_file_too_large(file): raise serializers.ValidationError( - "Report file is too large. Maximum supported size is {} MB".format( - settings.SCAN_FILE_MAX_SIZE - ) + f"Report file is too large. Maximum supported size is {settings.SCAN_FILE_MAX_SIZE} MB" ) tool_type = requires_tool_type(scan_type) if tool_type: @@ -2712,9 +2705,7 @@ def validate(self, data): file = data.get("file") if file and is_scan_file_too_large(file): raise serializers.ValidationError( - "Report file is too large. Maximum supported size is {} MB".format( - settings.SCAN_FILE_MAX_SIZE - ) + f"Report file is too large. Maximum supported size is {settings.SCAN_FILE_MAX_SIZE} MB" ) return data @@ -2818,9 +2809,7 @@ def save(self): def validate(self, data): if is_scan_file_too_large(data["file"]): raise serializers.ValidationError( - "File is too large. Maximum supported size is {} MB".format( - settings.SCAN_FILE_MAX_SIZE - ) + f"File is too large. Maximum supported size is {settings.SCAN_FILE_MAX_SIZE} MB" ) return data diff --git a/dojo/api_v2/views.py b/dojo/api_v2/views.py index 2149a86a356..ff73d2f01f7 100644 --- a/dojo/api_v2/views.py +++ b/dojo/api_v2/views.py @@ -1214,9 +1214,7 @@ def remove_tags(self, request, pk=None): if tag not in all_tags: return Response( { - "error": "'{}' is not a valid tag in list".format( - tag - ) + "error": f"'{tag}' is not a valid tag in list" }, status=status.HTTP_400_BAD_REQUEST, ) @@ -2877,7 +2875,7 @@ def report_generate(request, obj, options): include_executive_summary = False include_table_of_contents = False - report_info = "Generated By %s on %s" % ( + report_info = "Generated By {} on {}".format( user.get_full_name(), (timezone.now().strftime("%m/%d/%Y %I:%M%p %Z")), ) diff --git a/dojo/authorization/authorization.py b/dojo/authorization/authorization.py index 69f3884a4ce..1cbef9ecd6e 100644 --- a/dojo/authorization/authorization.py +++ b/dojo/authorization/authorization.py @@ -259,7 +259,7 @@ def user_has_global_permission_or_403(user, permission): def get_roles_for_permission(permission): if not Permissions.has_value(permission): raise PermissionDoesNotExistError( - "Permission {} does not exist".format(permission) + f"Permission {permission} does not exist" ) roles_for_permissions = set() roles = get_roles_with_permissions() @@ -274,7 +274,7 @@ def role_has_permission(role, permission): if role is None: return False if not Roles.has_value(role): - raise RoleDoesNotExistError("Role {} does not exist".format(role)) + raise RoleDoesNotExistError(f"Role {role} does not exist") roles = get_roles_with_permissions() permissions = roles.get(role) if not permissions: @@ -286,7 +286,7 @@ def role_has_global_permission(role, permission): if role is None: return False if not Roles.has_value(role): - raise RoleDoesNotExistError("Role {} does not exist".format(role)) + raise RoleDoesNotExistError(f"Role {role} does not exist") roles = get_global_roles_with_permissions() permissions = roles.get(role) if permissions and permission in permissions: diff --git a/dojo/celery.py b/dojo/celery.py index f2d73f03868..1fbf6e73fcb 100644 --- a/dojo/celery.py +++ b/dojo/celery.py @@ -20,7 +20,7 @@ @app.task(bind=True) def debug_task(self): - print(('Request: {0!r}'.format(self.request))) + print(f'Request: {self.request!r}') @setup_logging.connect diff --git a/dojo/components/sql_group_concat.py b/dojo/components/sql_group_concat.py index 5aa8f10d645..ba23b24e385 100644 --- a/dojo/components/sql_group_concat.py +++ b/dojo/components/sql_group_concat.py @@ -9,7 +9,7 @@ def __init__( self, expression, separator, distinct=False, ordering=None, **extra ): self.separator = separator - super(Sql_GroupConcat, self).__init__( + super().__init__( expression, distinct="DISTINCT " if distinct else "", ordering=" ORDER BY %s" % ordering if ordering is not None else "", diff --git a/dojo/development_environment/views.py b/dojo/development_environment/views.py index d6d4c167b17..8de454f1a8b 100644 --- a/dojo/development_environment/views.py +++ b/dojo/development_environment/views.py @@ -85,7 +85,7 @@ def edit_dev_env(request, deid): except RestrictedError as err: messages.add_message(request, messages.WARNING, - 'Environment cannot be deleted: {}'.format(err), + f'Environment cannot be deleted: {err}', extra_tags='alert-warning') return HttpResponseRedirect(reverse('dev_env')) diff --git a/dojo/endpoint/utils.py b/dojo/endpoint/utils.py index f52a46311c0..4c404a32755 100644 --- a/dojo/endpoint/utils.py +++ b/dojo/endpoint/utils.py @@ -92,7 +92,7 @@ def clean_hosts_run(apps, change): def err_log(message, html_log, endpoint_html_log, endpoint): error_suffix = 'It is not possible to migrate it. Delete or edit this endpoint.' html_log.append({**endpoint_html_log, **{'message': message}}) - logger.error('Endpoint (id={}) {}. {}'.format(endpoint.pk, message, error_suffix)) + logger.error(f'Endpoint (id={endpoint.pk}) {message}. {error_suffix}') broken_endpoints.add(endpoint.pk) html_log = [] broken_endpoints = set() @@ -120,8 +120,8 @@ def err_log(message, html_log, endpoint_html_log, endpoint): if parts.protocol: if endpoint.protocol and (endpoint.protocol != parts.protocol): - message = 'has defined protocol ({}) and it is not the same as protocol in host ' \ - '({})'.format(endpoint.protocol, parts.protocol) + message = f'has defined protocol ({endpoint.protocol}) and it is not the same as protocol in host ' \ + f'({parts.protocol})' err_log(message, html_log, endpoint_html_log, endpoint) else: if change: @@ -135,26 +135,26 @@ def err_log(message, html_log, endpoint_html_log, endpoint): if change: endpoint.host = parts.host else: - message = '"{}" use invalid format of host'.format(endpoint.host) + message = f'"{endpoint.host}" use invalid format of host' err_log(message, html_log, endpoint_html_log, endpoint) if parts.port: try: if (endpoint.port is not None) and (int(endpoint.port) != parts.port): - message = 'has defined port number ({}) and it is not the same as port number in ' \ - 'host ({})'.format(endpoint.port, parts.port) + message = f'has defined port number ({endpoint.port}) and it is not the same as port number in ' \ + f'host ({parts.port})' err_log(message, html_log, endpoint_html_log, endpoint) else: if change: endpoint.port = parts.port except ValueError: - message = 'uses non-numeric port: {}'.format(endpoint.port) + message = f'uses non-numeric port: {endpoint.port}' err_log(message, html_log, endpoint_html_log, endpoint) if parts.path: if endpoint.path and (endpoint.path != parts.path): - message = 'has defined path ({}) and it is not the same as path in host ' \ - '({})'.format(endpoint.path, parts.path) + message = f'has defined path ({endpoint.path}) and it is not the same as path in host ' \ + f'({parts.path})' err_log(message, html_log, endpoint_html_log, endpoint) else: if change: @@ -162,8 +162,8 @@ def err_log(message, html_log, endpoint_html_log, endpoint): if parts.query: if endpoint.query and (endpoint.query != parts.query): - message = 'has defined query ({}) and it is not the same as query in host ' \ - '({})'.format(endpoint.query, parts.query) + message = f'has defined query ({endpoint.query}) and it is not the same as query in host ' \ + f'({parts.query})' err_log(message, html_log, endpoint_html_log, endpoint) else: if change: @@ -171,8 +171,8 @@ def err_log(message, html_log, endpoint_html_log, endpoint): if parts.fragment: if endpoint.fragment and (endpoint.fragment != parts.fragment): - message = 'has defined fragment ({}) and it is not the same as fragment in host ' \ - '({})'.format(endpoint.fragment, parts.fragment) + message = f'has defined fragment ({endpoint.fragment}) and it is not the same as fragment in host ' \ + f'({parts.fragment})' err_log(message, html_log, endpoint_html_log, endpoint) else: if change: @@ -182,7 +182,7 @@ def err_log(message, html_log, endpoint_html_log, endpoint): endpoint.save() except ValidationError: - message = '"{}" uses invalid format of host'.format(endpoint.host) + message = f'"{endpoint.host}" uses invalid format of host' err_log(message, html_log, endpoint_html_log, endpoint) try: @@ -197,8 +197,8 @@ def err_log(message, html_log, endpoint_html_log, endpoint): err_log('Missing product', html_log, endpoint_html_log, endpoint) if broken_endpoints: - logger.error('It is not possible to migrate database because there is/are {} broken endpoint(s). ' - 'Please check logs.'.format(len(broken_endpoints))) + logger.error(f'It is not possible to migrate database because there is/are {len(broken_endpoints)} broken endpoint(s). ' + 'Please check logs.') else: logger.info('There is not broken endpoint.') @@ -223,8 +223,8 @@ def err_log(message, html_log, endpoint_html_log, endpoint): to_be_deleted.update(ep_ids[1:]) if change: message = "Merging Endpoints {} into '{}'".format( - ["{} (id={})".format(str(x), x.pk) for x in ep[1:]], - "{} (id={})".format(str(ep[0]), ep[0].pk)) + [f"{str(x)} (id={x.pk})" for x in ep[1:]], + f"{str(ep[0])} (id={ep[0].pk})") html_log.append(message) logger.info(message) Endpoint_Status_model.objects\ @@ -240,18 +240,18 @@ def err_log(message, html_log, endpoint_html_log, endpoint): .filter(finding=eps['finding'])\ .order_by('-last_modified') message = "Endpoint Statuses {} will be replaced by '{}'".format( - ["last_modified: {} (id={})".format(x.last_modified, x.pk) for x in esm[1:]], - "last_modified: {} (id={})".format(esm[0].last_modified, esm[0].pk)) + [f"last_modified: {x.last_modified} (id={x.pk})" for x in esm[1:]], + f"last_modified: {esm[0].last_modified} (id={esm[0].pk})") html_log.append(message) logger.info(message) esm.exclude(id=esm[0].pk).delete() if to_be_deleted: if change: - message = "Removing endpoints: {}".format(list(to_be_deleted)) + message = f"Removing endpoints: {list(to_be_deleted)}" Endpoint_model.objects.filter(id__in=to_be_deleted).delete() else: - message = "Redundant endpoints: {}, migration is required.".format(list(to_be_deleted)) + message = f"Redundant endpoints: {list(to_be_deleted)}, migration is required." html_log.append(message) logger.info(message) @@ -283,7 +283,7 @@ def validate_endpoints_to_add(endpoints_to_add): except ValidationError as ves: for ve in ves: errors.append( - ValidationError("Invalid endpoint {}: {}".format(endpoint, ve)) + ValidationError(f"Invalid endpoint {endpoint}: {ve}") ) return endpoint_list, errors diff --git a/dojo/endpoint/views.py b/dojo/endpoint/views.py index d35a1390988..dbe0956d104 100644 --- a/dojo/endpoint/views.py +++ b/dojo/endpoint/views.py @@ -216,7 +216,7 @@ def delete_endpoint(request, eid): create_notification(event='other', title='Deletion of %s' % endpoint, product=product, - description='The endpoint "%s" was deleted by %s' % (endpoint, request.user), + description=f'The endpoint "{endpoint}" was deleted by {request.user}', url=reverse('endpoint'), icon="exclamation-triangle") return HttpResponseRedirect(reverse('view_product', args=(product.id,))) @@ -372,12 +372,12 @@ def endpoint_bulk_update_all(request, pid=None): calculate_grade(prod) if skipped_endpoint_count > 0: - add_error_message_to_response('Skipped deletion of {} endpoints because you are not authorized.'.format(skipped_endpoint_count)) + add_error_message_to_response(f'Skipped deletion of {skipped_endpoint_count} endpoints because you are not authorized.') if deleted_endpoint_count > 0: messages.add_message(request, messages.SUCCESS, - 'Bulk delete of {} endpoints was successful.'.format(deleted_endpoint_count), + f'Bulk delete of {deleted_endpoint_count} endpoints was successful.', extra_tags='alert-success') else: if endpoints_to_update: @@ -392,7 +392,7 @@ def endpoint_bulk_update_all(request, pid=None): updated_endpoint_count = endpoints.count() if skipped_endpoint_count > 0: - add_error_message_to_response('Skipped mitigation of {} endpoints because you are not authorized.'.format(skipped_endpoint_count)) + add_error_message_to_response(f'Skipped mitigation of {skipped_endpoint_count} endpoints because you are not authorized.') eps_count = Endpoint_Status.objects.filter(endpoint__in=endpoints).update( mitigated=True, @@ -404,8 +404,7 @@ def endpoint_bulk_update_all(request, pid=None): if updated_endpoint_count > 0: messages.add_message(request, messages.SUCCESS, - 'Bulk mitigation of {} endpoints ({} endpoint statuses) was successful.'.format( - updated_endpoint_count, eps_count), + f'Bulk mitigation of {updated_endpoint_count} endpoints ({eps_count} endpoint statuses) was successful.', extra_tags='alert-success') else: messages.add_message(request, @@ -488,7 +487,7 @@ def import_endpoint_meta(request, pid): messages.add_message( request, messages.ERROR, - "Report file is too large. Maximum supported size is {} MB".format(settings.SCAN_FILE_MAX_SIZE), + f"Report file is too large. Maximum supported size is {settings.SCAN_FILE_MAX_SIZE} MB", extra_tags='alert-danger') create_endpoints = form.cleaned_data['create_endpoints'] diff --git a/dojo/engagement/views.py b/dojo/engagement/views.py index 230c18cc3a0..45bb5009c80 100644 --- a/dojo/engagement/views.py +++ b/dojo/engagement/views.py @@ -310,7 +310,7 @@ def delete_engagement(request, eid): create_notification(event='other', title='Deletion of %s' % engagement.name, product=product, - description='The engagement "%s" was deleted by %s' % (engagement.name, request.user), + description=f'The engagement "{engagement.name}" was deleted by {request.user}', url=request.build_absolute_uri(reverse('view_engagements', args=(product.id, ))), recipients=[engagement.lead], icon="exclamation-triangle") @@ -352,7 +352,7 @@ def copy_engagement(request, eid): extra_tags='alert-success') create_notification(event='other', title='Copying of %s' % engagement.name, - description='The engagement "%s" was copied by %s' % (engagement.name, request.user), + description=f'The engagement "{engagement.name}" was copied by {request.user}', product=product, url=request.build_absolute_uri(reverse('view_engagement', args=(engagement_copy.id, ))), recipients=[engagement.lead], @@ -504,7 +504,7 @@ def post(self, request, eid, *args, **kwargs): form = TypedNoteForm(available_note_types=available_note_types) else: form = NoteForm() - title = "Engagement: %s on %s" % (eng.name, eng.product.name) + title = f"Engagement: {eng.name} on {eng.product.name}" messages.add_message(request, messages.SUCCESS, 'Note added successfully.', @@ -755,7 +755,7 @@ def post(self, request, eid=None, pid=None): if scan and is_scan_file_too_large(scan): messages.add_message(request, messages.ERROR, - "Report file is too large. Maximum supported size is {} MB".format(settings.SCAN_FILE_MAX_SIZE), + f"Report file is too large. Maximum supported size is {settings.SCAN_FILE_MAX_SIZE} MB", extra_tags='alert-danger') return HttpResponseRedirect(reverse('import_scan_results', args=(engagement,))) @@ -1288,11 +1288,10 @@ def engagement_ics(request, eid): uid = "dojo_eng_%d_%d" % (eng.id, eng.product.id) cal = get_cal_event( start_date, end_date, - "Engagement: %s (%s)" % (eng.name, eng.product.name), - "Set aside for engagement %s, on product %s. Additional detail can be found at %s" - % (eng.name, eng.product.name, + f"Engagement: {eng.name} ({eng.product.name})", + "Set aside for engagement {}, on product {}. Additional detail can be found at {}".format(eng.name, eng.product.name, request.build_absolute_uri( - (reverse("view_engagement", args=(eng.id, ))))), uid) + reverse("view_engagement", args=(eng.id, )))), uid) output = cal.serialize() response = HttpResponse(content=output) response['Content-Type'] = 'text/calendar' diff --git a/dojo/filters.py b/dojo/filters.py index 4f1f3c539ad..f5726fb4a80 100644 --- a/dojo/filters.py +++ b/dojo/filters.py @@ -131,7 +131,7 @@ def under_review(self, qs, name): def __init__(self, *args, **kwargs): kwargs['choices'] = [ (key, value[0]) for key, value in six.iteritems(self.options)] - super(FindingStatusFilter, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def filter(self, qs, value): earliest_finding = get_earliest_finding(qs) @@ -179,7 +179,7 @@ def sla_violated(self, qs, name): def __init__(self, *args, **kwargs): kwargs['choices'] = [ (key, value[0]) for key, value in six.iteritems(self.options)] - super(FindingSLAFilter, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def filter(self, qs, value): try: @@ -214,7 +214,7 @@ def sla_violated(self, qs, name): def __init__(self, *args, **kwargs): kwargs['choices'] = [ (key, value[0]) for key, value in six.iteritems(self.options)] - super(ProductSLAFilter, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def filter(self, qs, value): try: @@ -246,7 +246,7 @@ def cwe_options(queryset): class DojoFilter(FilterSet): def __init__(self, *args, **kwargs): - super(DojoFilter, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) for field in ['tags', 'test__tags', 'test__engagement__tags', 'test__engagement__product__tags', 'not_tags', 'not_test__tags', 'not_test__engagement__tags', 'not_test__engagement__product__tags']: @@ -483,7 +483,7 @@ class DateRangeFilter(ChoiceFilter): def __init__(self, *args, **kwargs): kwargs['choices'] = [ (key, value[0]) for key, value in six.iteritems(self.options)] - super(DateRangeFilter, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def filter(self, qs, value): try: @@ -545,7 +545,7 @@ class DateRangeOmniFilter(ChoiceFilter): def __init__(self, *args, **kwargs): kwargs['choices'] = [ (key, value[0]) for key, value in six.iteritems(self.options)] - super(DateRangeOmniFilter, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def filter(self, qs, value): try: @@ -569,7 +569,7 @@ class ReportBooleanFilter(ChoiceFilter): def __init__(self, *args, **kwargs): kwargs['choices'] = [ (key, value[0]) for key, value in six.iteritems(self.options)] - super(ReportBooleanFilter, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def filter(self, qs, value): try: @@ -604,7 +604,7 @@ def was_accepted(self, qs, name): def __init__(self, *args, **kwargs): kwargs['choices'] = [ (key, value[0]) for key, value in six.iteritems(self.options)] - super(ReportRiskAcceptanceFilter, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def filter(self, qs, value): try: @@ -679,7 +679,7 @@ def past_year(self, qs, name): def __init__(self, *args, **kwargs): kwargs['choices'] = [ (key, value[0]) for key, value in six.iteritems(self.options)] - super(MetricsDateRangeFilter, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def filter(self, qs, value): if value == 8: @@ -729,7 +729,7 @@ class ComponentFilter(ProductComponentFilter): label="Product") def __init__(self, *args, **kwargs): - super(ComponentFilter, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.form.fields[ 'test__engagement__product__prod_type'].queryset = get_authorized_product_types(Permissions.Product_Type_View) self.form.fields[ @@ -792,7 +792,7 @@ class EngagementDirectFilter(DojoFilter): ) def __init__(self, *args, **kwargs): - super(EngagementDirectFilter, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.form.fields['product__prod_type'].queryset = get_authorized_product_types(Permissions.Product_Type_View) self.form.fields['lead'].queryset = get_authorized_users(Permissions.Product_Type_View) \ .filter(engagement__lead__isnull=False).distinct() @@ -852,7 +852,7 @@ class EngagementFilter(DojoFilter): ) def __init__(self, *args, **kwargs): - super(EngagementFilter, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.form.fields['prod_type'].queryset = get_authorized_product_types(Permissions.Product_Type_View) self.form.fields['engagement__lead'].queryset = get_authorized_users(Permissions.Product_Type_View) \ .filter(engagement__lead__isnull=False).distinct() @@ -910,7 +910,7 @@ class ProductEngagementFilter(DojoFilter): ) def __init__(self, *args, **kwargs): - super(ProductEngagementFilter, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.form.fields['lead'].queryset = get_authorized_users(Permissions.Product_Type_View) \ .filter(engagement__lead__isnull=False).distinct() @@ -1098,7 +1098,7 @@ def __init__(self, *args, **kwargs): if 'user' in kwargs: self.user = kwargs.pop('user') - super(ProductFilter, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.form.fields['prod_type'].queryset = get_authorized_product_types(Permissions.Product_Type_View) @@ -1715,7 +1715,7 @@ class Meta: not_tag = CharFilter(field_name='tags__name', lookup_expr='icontains', label='Not tag name contains', exclude=True) def __init__(self, *args, **kwargs): - super(TemplateFindingFilter, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.form.fields['cwe'].choices = cwe_options(self.queryset) @@ -1950,12 +1950,12 @@ def __init__(self, *args, **kwargs): self.user = None if 'user' in kwargs: self.user = kwargs.pop('user') - super(EndpointFilter, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.form.fields['product'].queryset = get_authorized_products(Permissions.Product_View) @property def qs(self): - parent = super(EndpointFilter, self).qs + parent = super().qs return get_authorized_endpoints(Permissions.Endpoint_View, parent) class Meta: @@ -2348,7 +2348,7 @@ class LogEntryFilter(DojoFilter): timestamp = DateRangeFilter() def __init__(self, *args, **kwargs): - super(LogEntryFilter, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.form.fields['actor'].queryset = get_authorized_users(Permissions.Product_View) class Meta: @@ -2467,7 +2467,7 @@ def choice_question(self, qs, name): def __init__(self, *args, **kwargs): kwargs['choices'] = [ (key, value[0]) for key, value in six.iteritems(self.options)] - super(QuestionTypeFilter, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def filter(self, qs, value): try: diff --git a/dojo/finding/helper.py b/dojo/finding/helper.py index 95421553f44..debb209b3f1 100644 --- a/dojo/finding/helper.py +++ b/dojo/finding/helper.py @@ -236,7 +236,7 @@ def get_group_by_group_name(finding, finding_group_by_option): group_name = finding.component_name elif finding_group_by_option == 'component_name+component_version': if finding.component_name or finding.component_version: - group_name = '%s:%s' % ((finding.component_name if finding.component_name else 'None'), + group_name = '{}:{}'.format((finding.component_name if finding.component_name else 'None'), (finding.component_version if finding.component_version else 'None')) elif finding_group_by_option == 'file_path': if finding.file_path: diff --git a/dojo/finding/views.py b/dojo/finding/views.py index 08ce201491d..0adae7ae642 100644 --- a/dojo/finding/views.py +++ b/dojo/finding/views.py @@ -1279,8 +1279,7 @@ def close_finding(request, fid): event="other", title="Closing of %s" % finding.title, finding=finding, - description='The finding "%s" was closed by %s' - % (finding.title, request.user), + description=f'The finding "{finding.title}" was closed by {request.user}', url=reverse("view_finding", args=(finding.id,)), ) return HttpResponseRedirect( @@ -1443,8 +1442,7 @@ def reopen_finding(request, fid): event="other", title="Reopening of %s" % finding.title, finding=finding, - description='The finding "%s" was reopened by %s' - % (finding.title, request.user), + description=f'The finding "{finding.title}" was reopened by {request.user}', url=reverse("view_finding", args=(finding.id,)), ) return HttpResponseRedirect(reverse("view_finding", args=(finding.id,))) @@ -1501,8 +1499,7 @@ def copy_finding(request, fid): create_notification( event="other", title="Copying of %s" % finding.title, - description='The finding "%s" was copied by %s to %s' - % (finding.title, request.user, test.title), + description=f'The finding "{finding.title}" was copied by {request.user} to {test.title}', product=product, url=request.build_absolute_uri( reverse("copy_finding", args=(finding_copy.id,)) @@ -2274,8 +2271,7 @@ def apply_cwe_mitigation(apply_to_findings, template, update=True): template.save() new_note = Notes() new_note.entry = ( - "CWE remediation text applied to finding for CWE: %s using template: %s." - % (template.cwe, template.title) + f"CWE remediation text applied to finding for CWE: {template.cwe} using template: {template.title}." ) new_note.author, _created = User.objects.get_or_create( username="System" @@ -2518,9 +2514,7 @@ def merge_finding_product(request, pid): for finding in findings_to_merge.exclude( pk=finding_to_merge_into.pk ): - notes_entry = "{}\n- {} ({}),".format( - notes_entry, finding.title, finding.id - ) + notes_entry = f"{notes_entry}\n- {finding.title} ({finding.id})," if finding.static_finding: static = finding.static_finding @@ -2528,23 +2522,17 @@ def merge_finding_product(request, pid): dynamic = finding.dynamic_finding if form.cleaned_data["append_description"]: - finding_descriptions = "{}\n{}".format( - finding_descriptions, finding.description - ) + finding_descriptions = f"{finding_descriptions}\n{finding.description}" # Workaround until file path is one to many if finding.file_path: - finding_descriptions = "{}\n**File Path:** {}\n".format( - finding_descriptions, finding.file_path - ) + finding_descriptions = f"{finding_descriptions}\n**File Path:** {finding.file_path}\n" # If checked merge the Reference if ( form.cleaned_data["append_reference"] and finding.references is not None ): - finding_references = "{}\n{}".format( - finding_references, finding.references - ) + finding_references = f"{finding_references}\n{finding.references}" # if checked merge the endpoints if form.cleaned_data["add_endpoints"]: @@ -2566,9 +2554,7 @@ def merge_finding_product(request, pid): # Add merge finding information to the note if set to inactive if form.cleaned_data["finding_action"] == "inactive": single_finding_notes_entry = ("Finding has been set to inactive " - "and merged with the finding: {}.").format( - finding_to_merge_into.title - ) + f"and merged with the finding: {finding_to_merge_into.title}.") note = Notes( entry=single_finding_notes_entry, author=request.user ) @@ -2581,9 +2567,7 @@ def merge_finding_product(request, pid): # Update the finding to merge into if finding_descriptions != "": - finding_to_merge_into.description = "{}\n\n{}".format( - finding_to_merge_into.description, finding_descriptions - ) + finding_to_merge_into.description = f"{finding_to_merge_into.description}\n\n{finding_descriptions}" if finding_to_merge_into.static_finding: static = finding.static_finding @@ -2592,9 +2576,7 @@ def merge_finding_product(request, pid): dynamic = finding.dynamic_finding if finding_references != "": - finding_to_merge_into.references = "{}\n{}".format( - finding_to_merge_into.references, finding_references - ) + finding_to_merge_into.references = f"{finding_to_merge_into.references}\n{finding_references}" finding_to_merge_into.static_finding = static finding_to_merge_into.dynamic_finding = dynamic @@ -2624,9 +2606,7 @@ def merge_finding_product(request, pid): findings_to_merge.delete() notes_entry = ("Finding consists of merged findings from the following " - "findings which have been {}: {}").format( - finding_action, notes_entry[:-1] - ) + f"findings which have been {finding_action}: {notes_entry[:-1]}") note = Notes(entry=notes_entry, author=request.user) note.save() finding_to_merge_into.notes.add(note) @@ -2716,18 +2696,14 @@ def finding_bulk_update_all(request, pid=None): if skipped_find_count > 0: add_error_message_to_response( - "Skipped deletion of {} findings because you are not authorized.".format( - skipped_find_count - ) + f"Skipped deletion of {skipped_find_count} findings because you are not authorized." ) if deleted_find_count > 0: messages.add_message( request, messages.SUCCESS, - "Bulk delete of {} findings was successful.".format( - deleted_find_count - ), + f"Bulk delete of {deleted_find_count} findings was successful.", extra_tags="alert-success", ) else: @@ -2748,9 +2724,7 @@ def finding_bulk_update_all(request, pid=None): if skipped_find_count > 0: add_error_message_to_response( - "Skipped update of {} findings because you are not authorized.".format( - skipped_find_count - ) + f"Skipped update of {skipped_find_count} findings because you are not authorized." ) finds = prefetch_for_findings(finds) @@ -2887,8 +2861,7 @@ def finding_bulk_update_all(request, pid=None): if added: add_success_message_to_response( - "Added %s findings to finding group %s" - % (added, finding_group.name) + f"Added {added} findings to finding group {finding_group.name}" ) return_url = reverse( "view_finding_group", args=(finding_group.id,) @@ -2896,9 +2869,8 @@ def finding_bulk_update_all(request, pid=None): if skipped: add_success_message_to_response( - ("Skipped %s findings when adding to finding group %s, " - "findings already part of another group") - % (skipped, finding_group.name) + f"Skipped {skipped} findings when adding to finding group {finding_group.name}, " + "findings already part of another group" ) # refresh findings from db @@ -2914,8 +2886,7 @@ def finding_bulk_update_all(request, pid=None): if removed: add_success_message_to_response( - "Removed %s findings from finding groups %s" - % ( + "Removed {} findings from finding groups {}".format( removed, ",".join( [ @@ -2958,9 +2929,8 @@ def finding_bulk_update_all(request, pid=None): if skipped: add_success_message_to_response( - ("Skipped %s findings when grouping by %s as these findings " - "were already in an existing group") - % (skipped, finding_group_by_option) + f"Skipped {skipped} findings when grouping by {finding_group_by_option} as these findings " + "were already in an existing group" ) # refresh findings from db @@ -3104,9 +3074,7 @@ def finding_bulk_update_all(request, pid=None): messages.add_message( request, messages.SUCCESS, - "Bulk update of {} findings was successful.".format( - updated_find_count - ), + f"Bulk update of {updated_find_count} findings was successful.", extra_tags="alert-success", ) else: diff --git a/dojo/finding_group/views.py b/dojo/finding_group/views.py index e6f92a71d27..054fb29bfe8 100644 --- a/dojo/finding_group/views.py +++ b/dojo/finding_group/views.py @@ -122,7 +122,7 @@ def delete_finding_group(request, fgid): create_notification(event='other', title='Deletion of %s' % finding_group.name, product=product, - description='The finding group "%s" was deleted by %s' % (finding_group.name, request.user), + description=f'The finding group "{finding_group.name}" was deleted by {request.user}', url=request.build_absolute_uri(reverse('view_test', args=(finding_group.test.id,))), icon="exclamation-triangle") return HttpResponseRedirect(reverse('view_test', args=(finding_group.test.id,))) diff --git a/dojo/forms.py b/dojo/forms.py index 1d24ed429ab..8bce183a95c 100755 --- a/dojo/forms.py +++ b/dojo/forms.py @@ -75,7 +75,7 @@ class MultipleSelectWithPop(forms.SelectMultiple): def render(self, name, *args, **kwargs): - html = super(MultipleSelectWithPop, self).render(name, *args, **kwargs) + html = super().render(name, *args, **kwargs) popup_plus = '
' + html + '
' return mark_safe(popup_plus) @@ -154,7 +154,7 @@ def value_from_datadict(self, data, files, name): if y == m == "0": return None if y and m: - return '%s-%s-%s' % (y, m, 1) + return f'{y}-{m}-{1}' return data.get(name, None) @@ -179,7 +179,7 @@ class Meta: class Edit_Product_Type_MemberForm(forms.ModelForm): def __init__(self, *args, **kwargs): - super(Edit_Product_Type_MemberForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['product_type'].disabled = True self.fields['user'].queryset = Dojo_User.objects.order_by('first_name', 'last_name') self.fields['user'].disabled = True @@ -193,7 +193,7 @@ class Add_Product_Type_MemberForm(forms.ModelForm): users = forms.ModelMultipleChoiceField(queryset=Dojo_User.objects.none(), required=True, label='Users') def __init__(self, *args, **kwargs): - super(Add_Product_Type_MemberForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) current_members = Product_Type_Member.objects.filter(product_type=self.initial["product_type"]).values_list('user', flat=True) self.fields['users'].queryset = Dojo_User.objects.exclude( Q(is_superuser=True) @@ -209,7 +209,7 @@ class Add_Product_Type_Member_UserForm(forms.ModelForm): product_types = forms.ModelMultipleChoiceField(queryset=Product_Type.objects.none(), required=True, label='Product Types') def __init__(self, *args, **kwargs): - super(Add_Product_Type_Member_UserForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) current_members = Product_Type_Member.objects.filter(user=self.initial['user']).values_list('product_type', flat=True) self.fields['product_types'].queryset = get_authorized_product_types(Permissions.Product_Type_Member_Add_Owner) \ .exclude(id__in=current_members) @@ -222,7 +222,7 @@ class Meta: class Delete_Product_Type_MemberForm(Edit_Product_Type_MemberForm): def __init__(self, *args, **kwargs): - super(Delete_Product_Type_MemberForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['role'].disabled = True @@ -263,7 +263,7 @@ class ProductForm(forms.ModelForm): team_manager = forms.ModelChoiceField(queryset=Dojo_User.objects.exclude(is_active=False).order_by('first_name', 'last_name'), required=False) def __init__(self, *args, **kwargs): - super(ProductForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['prod_type'].queryset = get_authorized_product_types(Permissions.Product_Type_Add_Product) # if this product has findings being asynchronously updated, disable the sla config field @@ -294,7 +294,7 @@ class EditFindingGroupForm(forms.ModelForm): help_text='Leave empty and check push to jira to create a new JIRA issue for this finding group.') def __init__(self, *args, **kwargs): - super(EditFindingGroupForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) import dojo.jira_link.helper as jira_helper self.fields['push_to_jira'] = forms.BooleanField() @@ -325,7 +325,7 @@ class Meta: class Edit_Product_MemberForm(forms.ModelForm): def __init__(self, *args, **kwargs): - super(Edit_Product_MemberForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['product'].disabled = True self.fields['user'].queryset = Dojo_User.objects.order_by('first_name', 'last_name') self.fields['user'].disabled = True @@ -339,7 +339,7 @@ class Add_Product_MemberForm(forms.ModelForm): users = forms.ModelMultipleChoiceField(queryset=Dojo_User.objects.none(), required=True, label='Users') def __init__(self, *args, **kwargs): - super(Add_Product_MemberForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['product'].disabled = True current_members = Product_Member.objects.filter(product=self.initial["product"]).values_list('user', flat=True) self.fields['users'].queryset = Dojo_User.objects.exclude( @@ -355,7 +355,7 @@ class Add_Product_Member_UserForm(forms.ModelForm): products = forms.ModelMultipleChoiceField(queryset=Product.objects.none(), required=True, label='Products') def __init__(self, *args, **kwargs): - super(Add_Product_Member_UserForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) current_members = Product_Member.objects.filter(user=self.initial["user"]).values_list('product', flat=True) self.fields['products'].queryset = get_authorized_products(Permissions.Product_Member_Add_Owner) \ .exclude(id__in=current_members) @@ -368,7 +368,7 @@ class Meta: class Delete_Product_MemberForm(Edit_Product_MemberForm): def __init__(self, *args, **kwargs): - super(Delete_Product_MemberForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['role'].disabled = True @@ -385,14 +385,14 @@ class EditNoteTypeForm(NoteTypeForm): def __init__(self, *args, **kwargs): is_single = kwargs.pop('is_single') - super(EditNoteTypeForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) if is_single is False: self.fields['is_single'].widget = forms.HiddenInput() class DisableOrEnableNoteTypeForm(NoteTypeForm): def __init__(self, *args, **kwargs): - super(DisableOrEnableNoteTypeForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['name'].disabled = True self.fields['description'].disabled = True self.fields['is_single'].disabled = True @@ -409,7 +409,7 @@ class DojoMetaDataForm(forms.ModelForm): required=True) def full_clean(self): - super(DojoMetaDataForm, self).full_clean() + super().full_clean() try: self.instance.validate_unique() except ValidationError: @@ -498,7 +498,7 @@ class ImportScanForm(forms.Form): create_finding_groups_for_all_findings = forms.BooleanField(help_text="If unchecked, finding groups will only be created when there is more than one grouped finding", required=False, initial=True) def __init__(self, *args, **kwargs): - super(ImportScanForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['active'].initial = self.active_verified_choices[0] self.fields['verified'].initial = self.active_verified_choices[0] @@ -515,7 +515,7 @@ def clean(self): scan_type = cleaned_data.get("scan_type") file = cleaned_data.get("file") if requires_file(scan_type) and not file: - raise forms.ValidationError('Uploading a Report File is required for {}'.format(scan_type)) + raise forms.ValidationError(f'Uploading a Report File is required for {scan_type}') tool_type = requires_tool_type(scan_type) if tool_type: api_scan_configuration = cleaned_data.get('api_scan_configuration') @@ -596,7 +596,7 @@ class ReImportScanForm(forms.Form): create_finding_groups_for_all_findings = forms.BooleanField(help_text="If unchecked, finding groups will only be created when there is more than one grouped finding", required=False, initial=True) def __init__(self, *args, test=None, **kwargs): - super(ReImportScanForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['active'].initial = self.active_verified_choices[0] self.fields['verified'].initial = self.active_verified_choices[0] self.scan_type = None @@ -653,7 +653,7 @@ class ImportEndpointMetaForm(forms.Form): help_text="Add data from file as Metadata. Metadata is used for displaying custom fields",) def __init__(self, *args, **kwargs): - super(ImportEndpointMetaForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) class DoneForm(forms.Form): @@ -696,7 +696,7 @@ class MergeFindings(forms.ModelForm): def __init__(self, *args, **kwargs): _ = kwargs.pop('finding') findings = kwargs.pop('findings') - super(MergeFindings, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['finding_to_merge_into'] = forms.ModelChoiceField( queryset=findings, initial=0, required="False", label="Finding to Merge Into", help_text="Findings selected below will be merged into this finding.") @@ -817,7 +817,7 @@ class CheckForm(forms.ModelForm): def __init__(self, *args, **kwargs): findings = kwargs.pop('findings') - super(CheckForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['session_issues'].queryset = findings self.fields['crypto_issues'].queryset = findings self.fields['config_issues'].queryset = findings @@ -868,7 +868,7 @@ def __init__(self, *args, **kwargs): if 'user' in kwargs: self.user = kwargs.pop('user') - super(EngForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) if product: self.fields['preset'] = forms.ModelChoiceField(help_text="Settings and notes for performing this engagement.", required=False, queryset=Engagement_Presets.objects.filter(product=product)) @@ -892,7 +892,7 @@ def __init__(self, *args, **kwargs): del self.fields['status'] def is_valid(self): - valid = super(EngForm, self).is_valid() + valid = super().is_valid() # we're done now if not valid if not valid: @@ -943,7 +943,7 @@ def __init__(self, *args, **kwargs): if 'instance' in kwargs: obj = kwargs.get('instance') - super(TestForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) if obj: product = get_product(obj) @@ -976,7 +976,7 @@ class CopyTestForm(forms.Form): def __init__(self, *args, **kwargs): authorized_lists = kwargs.pop('engagements', None) - super(CopyTestForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['engagement'].queryset = authorized_lists @@ -1024,7 +1024,7 @@ def __init__(self, *args, **kwargs): if 'product' in kwargs: product = kwargs.pop('product') - super(AddFindingForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) if product: self.fields['endpoints'].queryset = Endpoint.objects.filter(product=product) @@ -1036,7 +1036,7 @@ def __init__(self, *args, **kwargs): self.endpoints_to_add_list = [] def clean(self): - cleaned_data = super(AddFindingForm, self).clean() + cleaned_data = super().clean() if ((cleaned_data['active'] or cleaned_data['verified']) and cleaned_data['duplicate']): raise forms.ValidationError('Duplicate findings cannot be' ' verified or active') @@ -1105,7 +1105,7 @@ def __init__(self, *args, **kwargs): if 'product' in kwargs: product = kwargs.pop('product') - super(AdHocFindingForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) if product: self.fields['endpoints'].queryset = Endpoint.objects.filter(product=product) @@ -1117,7 +1117,7 @@ def __init__(self, *args, **kwargs): self.endpoints_to_add_list = [] def clean(self): - cleaned_data = super(AdHocFindingForm, self).clean() + cleaned_data = super().clean() if ((cleaned_data['active'] or cleaned_data['verified']) and cleaned_data['duplicate']): raise forms.ValidationError('Duplicate findings cannot be' ' verified or active') @@ -1173,7 +1173,7 @@ def __init__(self, *args, **kwargs): if 'product' in kwargs: product = kwargs.pop('product') - super(PromoteFindingForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) if product: self.fields['endpoints'].queryset = Endpoint.objects.filter(product=product) @@ -1181,7 +1181,7 @@ def __init__(self, *args, **kwargs): self.endpoints_to_add_list = [] def clean(self): - cleaned_data = super(PromoteFindingForm, self).clean() + cleaned_data = super().clean() endpoints_to_add_list, errors = validate_endpoints_to_add(cleaned_data['endpoints_to_add']) if errors: @@ -1249,7 +1249,7 @@ def __init__(self, *args, **kwargs): self.can_edit_mitigated_data = kwargs.pop('can_edit_mitigated_data') if 'can_edit_mitigated_data' in kwargs \ else False - super(FindingForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['endpoints'].queryset = Endpoint.objects.filter(product=self.instance.test.engagement.product) self.fields['mitigated_by'].queryset = get_authorized_users(Permissions.Test_Edit) @@ -1295,7 +1295,7 @@ def __init__(self, *args, **kwargs): self.endpoints_to_add_list = [] def clean(self): - cleaned_data = super(FindingForm, self).clean() + cleaned_data = super().clean() if (cleaned_data['active'] or cleaned_data['verified']) and cleaned_data['duplicate']: raise forms.ValidationError('Duplicate findings cannot be' @@ -1316,7 +1316,7 @@ def clean(self): return cleaned_data def _post_clean(self): - super(FindingForm, self)._post_clean() + super()._post_clean() if self.can_edit_mitigated_data: opts = self.instance._meta @@ -1342,7 +1342,7 @@ class Meta: 'date', 'description', 'severity', 'reporter', 'test', 'is_mitigated') def clean(self): - cleaned_data = super(StubFindingForm, self).clean() + cleaned_data = super().clean() if 'title' in cleaned_data: if len(cleaned_data['title']) <= 0: raise forms.ValidationError("The title is required.") @@ -1370,14 +1370,14 @@ class ApplyFindingTemplateForm(forms.Form): tags = TagField(required=False, help_text="Add tags that help describe this finding template. Choose from the list or add new tags. Press Enter key to add.", initial=Finding.tags.tag_model.objects.all().order_by('name')) def __init__(self, template=None, *args, **kwargs): - super(ApplyFindingTemplateForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['tags'].autocomplete_tags = Finding.tags.tag_model.objects.all().order_by('name') self.template = template if template: self.template.vulnerability_ids = '\n'.join(template.vulnerability_ids) def clean(self): - cleaned_data = super(ApplyFindingTemplateForm, self).clean() + cleaned_data = super().clean() if 'title' in cleaned_data: if len(cleaned_data['title']) <= 0: @@ -1409,7 +1409,7 @@ class FindingTemplateForm(forms.ModelForm): field_order = ['title', 'cwe', 'vulnerability_ids', 'severity', 'cvssv3', 'description', 'mitigation', 'impact', 'references', 'tags', 'template_match', 'template_match_cwe', 'template_match_title', 'apply_to_findings'] def __init__(self, *args, **kwargs): - super(FindingTemplateForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['tags'].autocomplete_tags = Finding.tags.tag_model.objects.all().order_by('name') class Meta: @@ -1452,13 +1452,13 @@ class FindingBulkUpdateForm(forms.ModelForm): notes = forms.CharField(required=False, max_length=1024, widget=forms.TextInput(attrs={'class': 'form-control'})) def __init__(self, *args, **kwargs): - super(FindingBulkUpdateForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['severity'].required = False # we need to defer initialization to prevent multiple initializations if other forms are shown self.fields['tags'].widget.tag_options = tagulous.models.options.TagOptions(autocomplete_settings={'width': '200px', 'defer': True}) def clean(self): - cleaned_data = super(FindingBulkUpdateForm, self).clean() + cleaned_data = super().clean() if (cleaned_data['active'] or cleaned_data['verified']) and cleaned_data['duplicate']: raise forms.ValidationError('Duplicate findings cannot be' @@ -1482,7 +1482,7 @@ class Meta: def __init__(self, *args, **kwargs): self.product = None self.endpoint_instance = None - super(EditEndpointForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) if 'instance' in kwargs: self.endpoint_instance = kwargs.pop('instance') self.product = self.endpoint_instance.product @@ -1492,7 +1492,7 @@ def __init__(self, *args, **kwargs): def clean(self): - cleaned_data = super(EditEndpointForm, self).clean() + cleaned_data = super().clean() protocol = cleaned_data['protocol'] userinfo = cleaned_data['userinfo'] @@ -1536,7 +1536,7 @@ def __init__(self, *args, **kwargs): product = None if 'product' in kwargs: product = kwargs.pop('product') - super(AddEndpointForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['product'] = forms.ModelChoiceField(queryset=get_authorized_products(Permissions.Endpoint_Add)) if product is not None: self.fields['product'].initial = product.id @@ -1562,7 +1562,7 @@ def save(self): def clean(self): - cleaned_data = super(AddEndpointForm, self).clean() + cleaned_data = super().clean() if 'endpoint' in cleaned_data and 'product' in cleaned_data: endpoint = cleaned_data['endpoint'] @@ -1606,10 +1606,10 @@ class TypedNoteForm(NoteForm): def __init__(self, *args, **kwargs): queryset = kwargs.pop('available_note_types') - super(TypedNoteForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['note_type'] = forms.ModelChoiceField(queryset=queryset, label='Note Type', required=True) - class Meta(): + class Meta: model = Notes fields = ['note_type', 'entry', 'private'] @@ -1639,7 +1639,7 @@ class CloseFindingForm(forms.ModelForm): def __init__(self, *args, **kwargs): queryset = kwargs.pop('missing_note_types') - super(CloseFindingForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) if len(queryset) == 0: self.fields['note_type'].widget = forms.HiddenInput() else: @@ -1654,7 +1654,7 @@ def __init__(self, *args, **kwargs): self.fields['mitigated_by'].initial = self.instance.mitigated_by def _post_clean(self): - super(CloseFindingForm, self)._post_clean() + super()._post_clean() if self.can_edit_mitigated_data: opts = self.instance._meta @@ -1676,7 +1676,7 @@ def __init__(self, *args, **kwargs): if 'finding' in kwargs: finding = kwargs.pop('finding') - super(EditPlannedRemediationDateFindingForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['planned_remediation_date'].required = True self.fields['planned_remediation_date'].widget = forms.DateInput(attrs={'class': 'datepicker'}) @@ -1742,7 +1742,7 @@ class ReviewFindingForm(forms.Form): def __init__(self, *args, **kwargs): finding = kwargs.pop("finding", None) user = kwargs.pop("user", None) - super(ReviewFindingForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) # Get the list of users if finding is not None: users = get_authorized_users_for_product_and_product_type(None, finding.test.engagement.product, Permissions.Finding_Edit) @@ -1776,7 +1776,7 @@ class WeeklyMetricsForm(forms.Form): dates = forms.ChoiceField() def __init__(self, *args, **kwargs): - super(WeeklyMetricsForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) wmf_options = [] for i in range(6): @@ -1842,7 +1842,7 @@ def __init__(self, *args, **kwargs): exclude_product_types = kwargs.get('exclude_product_types', False) if 'exclude_product_types' in kwargs: del kwargs['exclude_product_types'] - super(MetricsFilterForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) if exclude_product_types: del self.fields['exclude_product_types'] @@ -1871,7 +1871,7 @@ class Add_Group_MemberForm(forms.ModelForm): users = forms.ModelMultipleChoiceField(queryset=Dojo_Group_Member.objects.none(), required=True, label='Users') def __init__(self, *args, **kwargs): - super(Add_Group_MemberForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['group'].disabled = True current_members = Dojo_Group_Member.objects.filter(group=self.initial['group']).values_list('user', flat=True) self.fields['users'].queryset = Dojo_User.objects.exclude( @@ -1888,7 +1888,7 @@ class Add_Group_Member_UserForm(forms.ModelForm): groups = forms.ModelMultipleChoiceField(queryset=Dojo_Group.objects.none(), required=True, label='Groups') def __init__(self, *args, **kwargs): - super(Add_Group_Member_UserForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['user'].disabled = True current_groups = Dojo_Group_Member.objects.filter(user=self.initial['user']).values_list('group', flat=True) self.fields['groups'].queryset = Dojo_Group.objects.exclude(id__in=current_groups) @@ -1901,7 +1901,7 @@ class Meta: class Edit_Group_MemberForm(forms.ModelForm): def __init__(self, *args, **kwargs): - super(Edit_Group_MemberForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['group'].disabled = True self.fields['user'].disabled = True self.fields['role'].queryset = get_group_member_roles() @@ -1913,7 +1913,7 @@ class Meta: class Delete_Group_MemberForm(Edit_Group_MemberForm): def __init__(self, *args, **kwargs): - super(Delete_Group_MemberForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['role'].disabled = True @@ -1921,7 +1921,7 @@ class Add_Product_GroupForm(forms.ModelForm): groups = forms.ModelMultipleChoiceField(queryset=Dojo_Group.objects.none(), required=True, label='Groups') def __init__(self, *args, **kwargs): - super(Add_Product_GroupForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['product'].disabled = True current_groups = Product_Group.objects.filter(product=self.initial["product"]).values_list('group', flat=True) authorized_groups = get_authorized_groups(Permissions.Group_View) @@ -1937,7 +1937,7 @@ class Add_Product_Group_GroupForm(forms.ModelForm): products = forms.ModelMultipleChoiceField(queryset=Product.objects.none(), required=True, label='Products') def __init__(self, *args, **kwargs): - super(Add_Product_Group_GroupForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) current_members = Product_Group.objects.filter(group=self.initial["group"]).values_list('product', flat=True) self.fields['products'].queryset = get_authorized_products(Permissions.Product_Member_Add_Owner) \ .exclude(id__in=current_members) @@ -1951,7 +1951,7 @@ class Meta: class Edit_Product_Group_Form(forms.ModelForm): def __init__(self, *args, **kwargs): - super(Edit_Product_Group_Form, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['product'].disabled = True self.fields['group'].disabled = True @@ -1962,7 +1962,7 @@ class Meta: class Delete_Product_GroupForm(Edit_Product_Group_Form): def __init__(self, *args, **kwargs): - super(Delete_Product_GroupForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['role'].disabled = True @@ -1970,7 +1970,7 @@ class Add_Product_Type_GroupForm(forms.ModelForm): groups = forms.ModelMultipleChoiceField(queryset=Dojo_Group.objects.none(), required=True, label='Groups') def __init__(self, *args, **kwargs): - super(Add_Product_Type_GroupForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) current_groups = Product_Type_Group.objects.filter(product_type=self.initial["product_type"]).values_list('group', flat=True) authorized_groups = get_authorized_groups(Permissions.Group_View) authorized_groups = authorized_groups.exclude(id__in=current_groups) @@ -1986,7 +1986,7 @@ class Add_Product_Type_Group_GroupForm(forms.ModelForm): product_types = forms.ModelMultipleChoiceField(queryset=Product_Type.objects.none(), required=True, label='Product Types') def __init__(self, *args, **kwargs): - super(Add_Product_Type_Group_GroupForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) current_members = Product_Type_Group.objects.filter(group=self.initial['group']).values_list('product_type', flat=True) self.fields['product_types'].queryset = get_authorized_product_types(Permissions.Product_Type_Member_Add_Owner) \ .exclude(id__in=current_members) @@ -2000,7 +2000,7 @@ class Meta: class Edit_Product_Type_Group_Form(forms.ModelForm): def __init__(self, *args, **kwargs): - super(Edit_Product_Type_Group_Form, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['product_type'].disabled = True self.fields['group'].disabled = True @@ -2011,13 +2011,13 @@ class Meta: class Delete_Product_Type_GroupForm(Edit_Product_Type_Group_Form): def __init__(self, *args, **kwargs): - super(Delete_Product_Type_GroupForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['role'].disabled = True class DojoUserForm(forms.ModelForm): def __init__(self, *args, **kwargs): - super(DojoUserForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) if not get_current_user().is_superuser and not get_system_setting('enable_user_profile_editable'): for field in self.fields: self.fields[field].disabled = True @@ -2045,7 +2045,7 @@ def __init__(self, *args, **kwargs): self.user = None if 'user' in kwargs: self.user = kwargs.pop('user') - super(ChangePasswordForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['new_password'].help_text = get_password_requirements_string() def clean(self): @@ -2214,7 +2214,7 @@ class CopyFindingForm(forms.Form): def __init__(self, *args, **kwargs): authorized_lists = kwargs.pop('tests', None) - super(CopyFindingForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['test'].queryset = authorized_lists @@ -2329,7 +2329,7 @@ class JIRAForm(BaseJiraForm): help_text='Choose the folder containing the Django templates used to render the JIRA issue description. These are stored in dojo/templates/issue-trackers. Leave empty to use the default jira_full templates.') def __init__(self, *args, **kwargs): - super(JIRAForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) if self.instance: self.fields['password'].required = False @@ -2380,7 +2380,7 @@ class Meta: class Product_API_Scan_ConfigurationForm(forms.ModelForm): def __init__(self, *args, **kwargs): - super(Product_API_Scan_ConfigurationForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) tool_configuration = forms.ModelChoiceField( label='Tool Configuration', @@ -2453,7 +2453,7 @@ class Meta: exclude = ['product', 'tags'] def __init__(self, *args, **kwargs): - super(DeleteAppAnalysisForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['name'].disabled = True self.fields['user'].disabled = True self.fields['confidence'].disabled = True @@ -2489,7 +2489,7 @@ def clean(self): class SLAConfigForm(forms.ModelForm): def __init__(self, *args, **kwargs): - super(SLAConfigForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) # if this sla config has findings being asynchronously updated, disable the days by severity fields if self.instance.async_updating: @@ -2574,7 +2574,7 @@ class Meta: exclude = ['product'] def __init__(self, *args, **kwargs): - super(ObjectSettingsForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def clean(self): form_data = self.cleaned_data @@ -2625,7 +2625,7 @@ class SystemSettingsForm(forms.ModelForm): jira_webhook_secret = forms.CharField(required=False) def __init__(self, *args, **kwargs): - super(SystemSettingsForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['default_group_role'].queryset = get_group_member_roles() def clean(self): @@ -2667,7 +2667,7 @@ class Meta: class ProductNotificationsForm(forms.ModelForm): def __init__(self, *args, **kwargs): - super(ProductNotificationsForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) if not self.instance.id: self.initial['engagement_added'] = '' self.initial['close_engagement'] = '' @@ -2837,7 +2837,7 @@ def clean(self): class GITHUBFindingForm(forms.Form): def __init__(self, *args, **kwargs): self.enabled = kwargs.pop('enabled') - super(GITHUBFindingForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['push_to_github'] = forms.BooleanField() self.fields['push_to_github'].required = False self.fields['push_to_github'].help_text = "Checking this will overwrite content of your Github issue, or create one." @@ -2857,7 +2857,7 @@ def __init__(self, *args, **kwargs): if self.instance is None and self.jira_project is None: raise ValueError('either and finding instance or jira_project is needed') - super(JIRAFindingForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['push_to_jira'] = forms.BooleanField() self.fields['push_to_jira'].required = False if is_finding_groups_enabled(): @@ -2892,7 +2892,7 @@ def __init__(self, *args, **kwargs): def clean(self): logger.debug('jform clean') - super(JIRAFindingForm, self).clean() + super().clean() jira_issue_key_new = self.cleaned_data.get('jira_issue') finding = self.instance jira_project = self.jira_project @@ -2968,7 +2968,7 @@ class JIRAImportScanForm(forms.Form): def __init__(self, *args, **kwargs): self.push_all = kwargs.pop('push_all', False) - super(JIRAImportScanForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) if self.push_all: # This will show the checkbox as checked and greyed out, this way the user is aware # that issues will be pushed to JIRA, given their product-level settings. @@ -2987,7 +2987,7 @@ class JIRAEngagementForm(forms.Form): def __init__(self, *args, **kwargs): self.instance = kwargs.pop('instance', None) - super(JIRAEngagementForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) if self.instance: if self.instance.has_jira_issue: @@ -3026,7 +3026,7 @@ class Meta: class AnnouncementRemoveForm(AnnouncementCreateForm): def __init__(self, *args, **kwargs): - super(AnnouncementRemoveForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['dismissable'].disabled = True self.fields['message'].disabled = True self.fields['style'].disabled = True @@ -3069,12 +3069,12 @@ def __init__(self, *args, **kwargs): raise ValueError('Need a question to render') del kwargs['question'] - super(QuestionForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) class TextQuestionForm(QuestionForm): def __init__(self, *args, **kwargs): - super(TextQuestionForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) # work out initial data @@ -3119,7 +3119,7 @@ def save(self): class ChoiceQuestionForm(QuestionForm): def __init__(self, *args, **kwargs): - super(ChoiceQuestionForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) choices = [(c.id, c.label) for c in self.question.choices.all()] @@ -3326,7 +3326,7 @@ def __init__(self, attrs=None): forms.TextInput(attrs={'data-type': 'choice'}), forms.TextInput(attrs={'data-type': 'choice'}), forms.TextInput(attrs={'data-type': 'choice'})] - super(MultiWidgetBasic, self).__init__(widgets, attrs) + super().__init__(widgets, attrs) def decompress(self, value): if value: @@ -3348,7 +3348,7 @@ def __init__(self, *args, **kwargs): forms.fields.CharField(required=False), forms.fields.CharField(required=False), forms.fields.CharField(required=False)] - super(MultiExampleField, self).__init__(list_fields, *args, **kwargs) + super().__init__(list_fields, *args, **kwargs) def compress(self, values): return pickle.dumps(values) @@ -3405,7 +3405,7 @@ def __init__(self, *args, **kwargs): assignee = None if 'assignee' in kwargs: assignee = kwargs.pop('asignees') - super(AssignUserForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) if assignee is None: self.fields['assignee'] = forms.ModelChoiceField(queryset=get_authorized_users(Permissions.Engagement_View), empty_label='Not Assigned', required=False) else: @@ -3424,7 +3424,7 @@ class AddEngagementForm(forms.Form): help_text='Select which product to attach Engagement') def __init__(self, *args, **kwargs): - super(AddEngagementForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.fields['product'].queryset = get_authorized_products(Permissions.Engagement_Add) @@ -3433,7 +3433,7 @@ class ConfigurationPermissionsForm(forms.Form): def __init__(self, *args, **kwargs): self.user = kwargs.pop('user', None) self.group = kwargs.pop('group', None) - super(ConfigurationPermissionsForm, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.permission_fields = get_configuration_permissions_fields() diff --git a/dojo/group/views.py b/dojo/group/views.py index 8cdf17b31c3..d454a8d977d 100644 --- a/dojo/group/views.py +++ b/dojo/group/views.py @@ -419,7 +419,7 @@ def edit_group_member(request, mid): if owners < 1: messages.add_message(request, messages.WARNING, - 'There must be at least one owner for group {}.'.format(member.group.name), + f'There must be at least one owner for group {member.group.name}.', extra_tags='alert-warning') if is_title_in_breadcrumbs('View User'): return HttpResponseRedirect(reverse('view_user', args=(member.user.id, ))) @@ -461,7 +461,7 @@ def delete_group_member(request, mid): if owners <= 1: messages.add_message(request, messages.WARNING, - 'There must be at least one owner for group {}.'.format(member.group.name), + f'There must be at least one owner for group {member.group.name}.', extra_tags='alert-warning') if is_title_in_breadcrumbs('View User'): return HttpResponseRedirect(reverse('view_user', args=(member.user.id, ))) diff --git a/dojo/importers/importer/importer.py b/dojo/importers/importer/importer.py index 337df8d0470..1be23a01e85 100644 --- a/dojo/importers/importer/importer.py +++ b/dojo/importers/importer/importer.py @@ -23,7 +23,7 @@ deduplicationLogger = logging.getLogger("dojo.specific-loggers.deduplication") -class DojoDefaultImporter(object): +class DojoDefaultImporter: def create_test(self, scan_type, test_type_name, engagement, lead, environment, tags=None, scan_date=None, version=None, branch_tag=None, build_id=None, commit_hash=None, now=timezone.now(), diff --git a/dojo/importers/reimporter/reimporter.py b/dojo/importers/reimporter/reimporter.py index 1515b6ba410..0128d0daaa9 100644 --- a/dojo/importers/reimporter/reimporter.py +++ b/dojo/importers/reimporter/reimporter.py @@ -22,7 +22,7 @@ deduplicationLogger = logging.getLogger("dojo.specific-loggers.deduplication") -class DojoDefaultReImporter(object): +class DojoDefaultReImporter: @dojo_async_task @app.task(ignore_result=False) def process_parsed_findings( @@ -106,7 +106,7 @@ def process_parsed_findings( except ValidationError as err: logger.warning( "DefectDojo is storing broken endpoint because cleaning wasn't successful: " - "{}".format(err) + f"{err}" ) item.hash_code = item.compute_hash_code() diff --git a/dojo/importers/utils.py b/dojo/importers/utils.py index 0b98caf551d..5788b5fcf45 100644 --- a/dojo/importers/utils.py +++ b/dojo/importers/utils.py @@ -142,7 +142,7 @@ def add_endpoints_to_unsaved_finding(finding, test, endpoints, **kwargs): endpoint.clean() except ValidationError as e: logger.warning("DefectDojo is storing broken endpoint because cleaning wasn't successful: " - "{}".format(e)) + f"{e}") ep = None try: ep, _created = endpoint_get_or_create( diff --git a/dojo/jira_link/helper.py b/dojo/jira_link/helper.py index 090c75c8972..eb2b1c3adf2 100644 --- a/dojo/jira_link/helper.py +++ b/dojo/jira_link/helper.py @@ -722,7 +722,7 @@ def failure_to_add_message(message: str, exception: Exception, object: Any) -> b return False if not is_jira_configured_and_enabled(obj): - message = 'Object %s cannot be pushed to JIRA as there is no JIRA configuration for %s.' % (obj.id, to_str_typed(obj)) + message = f'Object {obj.id} cannot be pushed to JIRA as there is no JIRA configuration for {to_str_typed(obj)}.' return failure_to_add_message(message, None, obj) jira_project = get_jira_project(obj) @@ -884,7 +884,7 @@ def failure_to_update_message(message: str, exception: Exception, obj: Any) -> b jira_instance = get_jira_instance(obj) if not is_jira_configured_and_enabled(obj): - message = 'Object %s cannot be pushed to JIRA as there is no JIRA configuration for %s.' % (obj.id, to_str_typed(obj)) + message = f'Object {obj.id} cannot be pushed to JIRA as there is no JIRA configuration for {to_str_typed(obj)}.' return failure_to_update_message(message, None, obj) j_issue = obj.jira_issue @@ -985,7 +985,7 @@ def get_jira_issue_from_jira(find): j_issue = find.jira_issue if not jira_project: logger.error("Unable to retrieve latest status change from JIRA %s for finding %s as there is no JIRA_Project configured for this finding.", j_issue.jira_key, format(find.id)) - log_jira_alert("Unable to retrieve latest status change from JIRA %s for finding %s as there is no JIRA_Project configured for this finding." % (j_issue.jira_key, find), find) + log_jira_alert(f"Unable to retrieve latest status change from JIRA {j_issue.jira_key} for finding {find} as there is no JIRA_Project configured for this finding.", find) return False meta = None @@ -1203,7 +1203,7 @@ def close_epic(eng, push_to_jira, **kwargs): auth=HTTPBasicAuth(jira_instance.username, jira_instance.password), json=json_data) if r.status_code != 204: - logger.warning("JIRA close epic failed with error: {}".format(r.text)) + logger.warning(f"JIRA close epic failed with error: {r.text}") return False return True except JIRAError as e: @@ -1349,7 +1349,7 @@ def add_comment(obj, note, force_push=False, **kwargs): j_issue = obj.jira_issue jira.add_comment( j_issue.jira_id, - '(%s): %s' % (note.author.get_full_name() if note.author.get_full_name() else note.author.username, note.entry)) + f'({note.author.get_full_name() if note.author.get_full_name() else note.author.username}): {note.entry}') return True except JIRAError as e: log_jira_generic_alert('Jira Add Comment Error', str(e)) @@ -1580,7 +1580,7 @@ def process_resolution_from_jira(finding, resolution_id, resolution_name, assign if resolved: if jira_instance and resolution_name in jira_instance.accepted_resolutions: if not finding.risk_accepted: - logger.debug("Marking related finding of {} as accepted. Creating risk acceptance.".format(jira_issue.jira_key)) + logger.debug(f"Marking related finding of {jira_issue.jira_key} as accepted. Creating risk acceptance.") finding.active = False finding.mitigated = None finding.is_mitigated = False @@ -1594,7 +1594,7 @@ def process_resolution_from_jira(finding, resolution_id, resolution_name, assign status_changed = True elif jira_instance and resolution_name in jira_instance.false_positive_resolutions: if not finding.false_p: - logger.debug("Marking related finding of {} as false-positive".format(jira_issue.jira_key)) + logger.debug(f"Marking related finding of {jira_issue.jira_key} as false-positive") finding.active = False finding.verified = False finding.mitigated = None @@ -1605,7 +1605,7 @@ def process_resolution_from_jira(finding, resolution_id, resolution_name, assign else: # Mitigated by default as before if not finding.is_mitigated: - logger.debug("Marking related finding of {} as mitigated (default)".format(jira_issue.jira_key)) + logger.debug(f"Marking related finding of {jira_issue.jira_key} as mitigated (default)") finding.active = False finding.mitigated = jira_now finding.is_mitigated = True @@ -1617,7 +1617,7 @@ def process_resolution_from_jira(finding, resolution_id, resolution_name, assign else: if not finding.active: # Reopen / Open Jira issue - logger.debug("Re-opening related finding of {}".format(jira_issue.jira_key)) + logger.debug(f"Re-opening related finding of {jira_issue.jira_key}") finding.active = True finding.mitigated = None finding.is_mitigated = False diff --git a/dojo/management/commands/dupecheck.py b/dojo/management/commands/dupecheck.py index ab7432fdec3..92567173c94 100644 --- a/dojo/management/commands/dupecheck.py +++ b/dojo/management/commands/dupecheck.py @@ -17,13 +17,13 @@ def count_the_duplicates(self, model, column): print(" Table:" + str(model) + " Column: " + column) print("===================================") duplicates = model.objects.values(column).annotate(Count('id')).order_by().filter(id__count__gt=1) - kwargs = {'{0}__{1}'.format(column, 'in'): [item[column] for item in duplicates]} + kwargs = {'{}__{}'.format(column, 'in'): [item[column] for item in duplicates]} duplicates = model.objects.filter(**kwargs) if not duplicates: print("No duplicates found") for dupe in duplicates: - print('{0}, Duplicate value: {1}, Object: {2}'.format(dupe.id, getattr(dupe, column), dupe)) + print(f'{dupe.id}, Duplicate value: {getattr(dupe, column)}, Object: {dupe}') def handle(self, *args, **options): self.count_the_duplicates(Product, 'name') diff --git a/dojo/management/commands/import_github_languages.py b/dojo/management/commands/import_github_languages.py index b92ff7921f7..bed838e9f6d 100644 --- a/dojo/management/commands/import_github_languages.py +++ b/dojo/management/commands/import_github_languages.py @@ -33,7 +33,7 @@ def handle(self, *args, **options): try: language_type, created = Language_Type.objects.get_or_create(language=name) except Language_Type.MultipleObjectsReturned: - logger.warning('Language_Type {} exists multiple times'.format(name)) + logger.warning(f'Language_Type {name} exists multiple times') continue if created: @@ -42,4 +42,4 @@ def handle(self, *args, **options): language_type.color = element.get('color', 0) language_type.save() - logger.info('Finished importing languages from GitHub, added {} Language_Types'.format(new_language_types)) + logger.info(f'Finished importing languages from GitHub, added {new_language_types} Language_Types') diff --git a/dojo/management/commands/import_surveys.py b/dojo/management/commands/import_surveys.py index 75c28247262..5fc230fbb4c 100644 --- a/dojo/management/commands/import_surveys.py +++ b/dojo/management/commands/import_surveys.py @@ -28,7 +28,7 @@ def handle(self, *args, **options): # Find the current id in the surveys file path = os.path.dirname(os.path.abspath(__file__)) path = path[:-19] + 'fixtures/initial_surveys.json' - contents = open(path, "rt").readlines() + contents = open(path).readlines() for line in contents: if '"polymorphic_ctype": ' in line: matchedLine = line @@ -37,7 +37,7 @@ def handle(self, *args, **options): old_id = ''.join(c for c in matchedLine if c.isdigit()) new_line = matchedLine.replace(old_id, str(ctype_id)) # Replace the all lines in the file - with open(path, "wt") as fout: + with open(path, "w") as fout: for line in contents: fout.write(line.replace(matchedLine, new_line)) # Delete the temp question diff --git a/dojo/metrics/views.py b/dojo/metrics/views.py index 03872c33eda..c7bef42c76e 100644 --- a/dojo/metrics/views.py +++ b/dojo/metrics/views.py @@ -1033,7 +1033,7 @@ def view_engineer(request, eid): severity='Low' ).count() prod = Product.objects.get(id=product) - all_findings_link = "%s" % ( + all_findings_link = "{}".format( reverse('product_open_findings', args=(prod.id,)), escape(prod.name)) update.append([all_findings_link, z_count, o_count, t_count, h_count, z_count + o_count + t_count + h_count]) @@ -1066,7 +1066,7 @@ def view_engineer(request, eid): mitigated__isnull=True, severity='Low').count() prod = Product.objects.get(id=product) - all_findings_link = "%s" % ( + all_findings_link = "{}".format( reverse('product_open_findings', args=(prod.id,)), escape(prod.name)) total_update.append([all_findings_link, z_count, o_count, t_count, h_count, z_count + o_count + t_count + h_count]) diff --git a/dojo/middleware.py b/dojo/middleware.py index 733c66f4cd4..4e54fe9e813 100644 --- a/dojo/middleware.py +++ b/dojo/middleware.py @@ -63,7 +63,7 @@ def __call__(self, request): return self.get_response(request) -class DojoSytemSettingsMiddleware(object): +class DojoSytemSettingsMiddleware: _thread_local = local() def __init__(self, get_response): @@ -106,7 +106,7 @@ class System_Settings_Manager(models.Manager): def get_from_db(self, *args, **kwargs): # logger.debug('refreshing system_settings from db') try: - from_db = super(System_Settings_Manager, self).get(*args, **kwargs) + from_db = super().get(*args, **kwargs) except: from dojo.models import System_Settings # this mimics the existing code that was in filters.py and utils.py. diff --git a/dojo/models.py b/dojo/models.py index a59af55ee9a..53d9637e01c 100755 --- a/dojo/models.py +++ b/dojo/models.py @@ -142,7 +142,7 @@ def __init__(self, directory=None, keep_basename=False, keep_ext=True): def __call__(self, model_instance, filename): base, ext = os.path.splitext(filename) - filename = "%s_%s" % (base, uuid4()) if self.keep_basename else str(uuid4()) + filename = f"{base}_{uuid4()}" if self.keep_basename else str(uuid4()) if self.keep_ext: filename += ext if self.directory is None: @@ -219,9 +219,7 @@ def generate_full_name(user): """ Returns the first_name plus the last_name, with a space in between. """ - full_name = '%s %s (%s)' % (user.first_name, - user.last_name, - user.username) + full_name = f'{user.first_name} {user.last_name} ({user.username})' return full_name.strip() @@ -690,12 +688,12 @@ def copy(self): copy.pk = None copy.id = None # Add unique modifier to file name - copy.title = '{} - clone-{}'.format(self.title, str(uuid4())[:8]) + copy.title = f'{self.title} - clone-{str(uuid4())[:8]}' # Create new unique file name current_url = self.file.url _, current_full_filename = current_url.rsplit('/', 1) _, extension = current_full_filename.split('.', 1) - new_file = ContentFile(self.file.read(), name='{}.{}'.format(uuid4(), extension)) + new_file = ContentFile(self.file.read(), name=f'{uuid4()}.{extension}') copy.file = new_file copy.save() @@ -709,11 +707,7 @@ def get_accessible_url(self, obj, obj_id): elif isinstance(obj, Finding): obj_type = 'Finding' - return 'access_file/{file_id}/{obj_id}/{obj_type}'.format( - file_id=self.id, - obj_id=obj_id, - obj_type=obj_type - ) + return f'access_file/{self.id}/{obj_id}/{obj_type}' class Product_Type(models.Model): @@ -857,7 +851,7 @@ def clean(self): raise ValidationError('Metadata entries may not have more than one relation, either a product, an endpoint either or a finding') def __str__(self): - return "%s: %s" % (self.name, self.value) + return f"{self.name}: {self.value}" class Meta: unique_together = (('product', 'name'), @@ -899,7 +893,7 @@ def save(self, *args, **kwargs): self.medium = initial_sla_config.medium self.low = initial_sla_config.low - super(SLA_Configuration, self).save(*args, **kwargs) + super().save(*args, **kwargs) # if the initial sla config exists and async finding update is not running if initial_sla_config is not None and not self.async_updating: @@ -917,7 +911,7 @@ def save(self, *args, **kwargs): if len(severities): # set the async updating flag to true for this sla config self.async_updating = True - super(SLA_Configuration, self).save(*args, **kwargs) + super().save(*args, **kwargs) # set the async updating flag to true for all products using this sla config products = Product.objects.filter(sla_configuration=self) for product in products: @@ -1060,7 +1054,7 @@ def save(self, *args, **kwargs): if initial_sla_config and self.async_updating: self.sla_configuration = initial_sla_config - super(Product, self).save(*args, **kwargs) + super().save(*args, **kwargs) # if the initial sla config exists and async finding update is not running if initial_sla_config is not None and not self.async_updating: @@ -1070,7 +1064,7 @@ def save(self, *args, **kwargs): if new_sla_config and (initial_sla_config != new_sla_config): # set the async updating flag to true for this product self.async_updating = True - super(Product, self).save(*args, **kwargs) + super().save(*args, **kwargs) # set the async updating flag to true for the sla config assigned to this product sla_config = getattr(self, 'sla_configuration', None) if sla_config: @@ -1552,7 +1546,7 @@ def age(self): return days if days > 0 else 0 def __str__(self): - return "'{}' on '{}'".format(str(self.finding), str(self.endpoint)) + return f"'{str(self.finding)}' on '{str(self.endpoint)}'" def copy(self, finding=None): copy = self @@ -1617,13 +1611,13 @@ def clean(self): db_type = connection.vendor if self.protocol or self.protocol == '': if not re.match(r'^[A-Za-z][A-Za-z0-9\.\-\+]+$', self.protocol): # https://tools.ietf.org/html/rfc3986#section-3.1 - errors.append(ValidationError('Protocol "{}" has invalid format'.format(self.protocol))) + errors.append(ValidationError(f'Protocol "{self.protocol}" has invalid format')) if self.protocol == '': self.protocol = None if self.userinfo or self.userinfo == '': if not re.match(r'^[A-Za-z0-9\.\-_~%\!\$&\'\(\)\*\+,;=:]+$', self.userinfo): # https://tools.ietf.org/html/rfc3986#section-3.2.1 - errors.append(ValidationError('Userinfo "{}" has invalid format'.format(self.userinfo))) + errors.append(ValidationError(f'Userinfo "{self.userinfo}" has invalid format')) if self.userinfo == '': self.userinfo = None @@ -1632,7 +1626,7 @@ def clean(self): try: validate_ipv46_address(self.host) except ValidationError: - errors.append(ValidationError('Host "{}" has invalid format'.format(self.host))) + errors.append(ValidationError(f'Host "{self.host}" has invalid format')) else: errors.append(ValidationError('Host must not be empty')) @@ -1640,10 +1634,10 @@ def clean(self): try: int_port = int(self.port) if not (0 <= int_port < 65536): - errors.append(ValidationError('Port "{}" has invalid format - out of range'.format(self.port))) + errors.append(ValidationError(f'Port "{self.port}" has invalid format - out of range')) self.port = int_port except ValueError: - errors.append(ValidationError('Port "{}" has invalid format - it is not a number'.format(self.port))) + errors.append(ValidationError(f'Port "{self.port}" has invalid format - it is not a number')) if self.path or self.path == '': while len(self.path) > 0 and self.path[0] == "/": # Endpoint store "root-less" path @@ -1654,7 +1648,7 @@ def clean(self): action_string = 'Postgres does not accept NULL character. Attempting to replace with %00...' for remove_str in null_char_list: self.path = self.path.replace(remove_str, '%00') - errors.append(ValidationError('Path "{}" has invalid format - It contains the NULL character. The following action was taken: {}'.format(old_value, action_string))) + errors.append(ValidationError(f'Path "{old_value}" has invalid format - It contains the NULL character. The following action was taken: {action_string}')) if self.path == '': self.path = None @@ -1667,7 +1661,7 @@ def clean(self): action_string = 'Postgres does not accept NULL character. Attempting to replace with %00...' for remove_str in null_char_list: self.query = self.query.replace(remove_str, '%00') - errors.append(ValidationError('Query "{}" has invalid format - It contains the NULL character. The following action was taken: {}'.format(old_value, action_string))) + errors.append(ValidationError(f'Query "{old_value}" has invalid format - It contains the NULL character. The following action was taken: {action_string}')) if self.query == '': self.query = None @@ -1680,7 +1674,7 @@ def clean(self): action_string = 'Postgres does not accept NULL character. Attempting to replace with %00...' for remove_str in null_char_list: self.fragment = self.fragment.replace(remove_str, '%00') - errors.append(ValidationError('Fragment "{}" has invalid format - It contains the NULL character. The following action was taken: {}'.format(old_value, action_string))) + errors.append(ValidationError(f'Fragment "{old_value}" has invalid format - It contains the NULL character. The following action was taken: {action_string}')) if self.fragment == '': self.fragment = None @@ -1699,11 +1693,11 @@ def __str__(self): path=tuple(self.path.split('/')) if self.path else (), query=tuple( ( - qe.split(u"=", 1) - if u"=" in qe + qe.split("=", 1) + if "=" in qe else (qe, None) ) - for qe in self.query.split(u"&") + for qe in self.query.split("&") ) if self.query else (), # inspired by https://github.com/python-hyper/hyperlink/blob/b8c9152cd826bbe8e6cc125648f3738235019705/src/hyperlink/_url.py#L1427 fragment=self.fragment or '' ) @@ -1722,19 +1716,19 @@ def __str__(self): except: url = '' if self.protocol: - url += '{}://'.format(self.protocol) + url += f'{self.protocol}://' if self.userinfo: - url += '{}@'.format(self.userinfo) + url += f'{self.userinfo}@' if self.host: url += self.host if self.port: - url += ':{}'.format(self.port) + url += f':{self.port}' if self.path: url += '{}{}'.format('/' if self.path[0] != '/' else '', self.path) if self.query: - url += '?{}'.format(self.query) + url += f'?{self.query}' if self.fragment: - url += '#{}'.format(self.fragment) + url += f'#{self.fragment}' return url def __hash__(self): @@ -1905,7 +1899,7 @@ def from_uri(uri): from urllib.parse import urlparse url = hyperlink.parse(url="//" + urlparse(uri).netloc) except hyperlink.URLParseError as e: - raise ValidationError('Invalid URL format: {}'.format(e)) + raise ValidationError(f'Invalid URL format: {e}') query_parts = [] # inspired by https://github.com/python-hyper/hyperlink/blob/b8c9152cd826bbe8e6cc125648f3738235019705/src/hyperlink/_url.py#L1768 for k, v in url.query: @@ -1913,7 +1907,7 @@ def from_uri(uri): query_parts.append(k) else: query_parts.append(f"{k}={v}") - query_string = u"&".join(query_parts) + query_string = "&".join(query_parts) protocol = url.scheme if url.scheme != '' else None userinfo = ':'.join(url.userinfo) if url.userinfo not in [(), ('',)] else None @@ -2019,7 +2013,7 @@ def test_type_name(self) -> str: def __str__(self): if self.title: - return "%s (%s)" % (self.title, self.test_type) + return f"{self.title} ({self.test_type})" return str(self.test_type) def get_breadcrumbs(self): @@ -2561,7 +2555,7 @@ class Meta: ] def __init__(self, *args, **kwargs): - super(Finding, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.unsaved_endpoints = [] self.unsaved_request = None @@ -3053,7 +3047,7 @@ def save(self, dedupe_option=True, rules_option=True, product_grading_option=Tru self.set_sla_expiration_date() logger.debug("Saving finding of id " + str(self.id) + " dedupe_option:" + str(dedupe_option) + " (self.pk is %s)", "None" if self.pk is None else "not None") - super(Finding, self).save(*args, **kwargs) + super().save(*args, **kwargs) self.found_by.add(self.test.test_type) @@ -3087,7 +3081,7 @@ def get_breadcrumbs(self): return bc def get_valid_request_response_pairs(self): - empty_value = base64.b64encode("".encode()) + empty_value = base64.b64encode(b"") # Get a list of all req/resp pairs all_req_resps = self.burprawrequestresponse_set.all() # Filter away those that do not have any contents @@ -3711,7 +3705,7 @@ class FileAccessToken(models.Model): def save(self, *args, **kwargs): if not self.token: self.token = uuid4() - return super(FileAccessToken, self).save(*args, **kwargs) + return super().save(*args, **kwargs) ANNOUNCEMENT_STYLE_CHOICES = ( diff --git a/dojo/notifications/helper.py b/dojo/notifications/helper.py index 4cfa65bdda2..1ee9a9be207 100644 --- a/dojo/notifications/helper.py +++ b/dojo/notifications/helper.py @@ -93,7 +93,7 @@ def create_notification(event=None, **kwargs): queryset=Notifications.objects.filter(Q(product_id=product) | Q(product__isnull=True)), to_attr="applicable_notifications" )).annotate(applicable_notifications_count=Count('notifications__id', filter=Q(notifications__product_id=product) | Q(notifications__product__isnull=True)))\ - .filter((Q(applicable_notifications_count__gt=0) | Q(is_superuser=True))) + .filter(Q(applicable_notifications_count__gt=0) | Q(is_superuser=True)) # only send to authorized users or admin/superusers logger.debug('Filtering users for the product %s', product) @@ -126,11 +126,11 @@ def create_notification(event=None, **kwargs): def create_description(event, *args, **kwargs): if "description" not in kwargs.keys(): if event == 'product_added': - kwargs["description"] = _('Product %(title)s has been created successfully.' % {'title': kwargs['title']}) + kwargs["description"] = _('Product {title} has been created successfully.'.format(title=kwargs['title'])) elif event == 'product_type_added': - kwargs["description"] = _('Product Type %(title)s has been created successfully.' % {'title': kwargs['title']}) + kwargs["description"] = _('Product Type {title} has been created successfully.'.format(title=kwargs['title'])) else: - kwargs["description"] = _('Event %(event)s has occurred.' % {'event': str(event)}) + kwargs["description"] = _('Event {event} has occurred.'.format(event=str(event))) return kwargs["description"] @@ -227,7 +227,7 @@ def _post_slack_message(channel): # only send notification if we managed to find the slack_user_id if slack_user_id: - channel = '@{}'.format(slack_user_id) + channel = f'@{slack_user_id}' _post_slack_message(channel) else: logger.info("The user %s does not have a email address informed for Slack in profile.", user) @@ -235,7 +235,7 @@ def _post_slack_message(channel): # System scope slack notifications, and not personal would still see this go through if get_system_setting('slack_channel') is not None: channel = get_system_setting('slack_channel') - logger.info("Sending system notification to system channel {}.".format(channel)) + logger.info(f"Sending system notification to system channel {channel}.") _post_slack_message(channel) else: logger.debug('slack_channel not configured: skipping system notification') @@ -354,10 +354,10 @@ def get_slack_user_id(user_email): if user_email == user["user"]["profile"]["email"]: if "id" in user["user"]: user_id = user["user"]["id"] - logger.debug("Slack user ID is {}".format(user_id)) + logger.debug(f"Slack user ID is {user_id}") slack_user_is_found = True else: - logger.warning("A user with email {} could not be found in this Slack workspace.".format(user_email)) + logger.warning(f"A user with email {user_email} could not be found in this Slack workspace.") if not slack_user_is_found: logger.warning("The Slack user was not found.") diff --git a/dojo/okta.py b/dojo/okta.py index 27ea21084db..856f6004e8d 100644 --- a/dojo/okta.py +++ b/dojo/okta.py @@ -12,7 +12,7 @@ from social_core.backends.open_id_connect import OpenIdConnectAuth -class OktaMixin(object): +class OktaMixin: def api_url(self): return append_slash(self.setting('API_URL')) diff --git a/dojo/product_type/views.py b/dojo/product_type/views.py index 84bb14c108e..fbc4aae063e 100644 --- a/dojo/product_type/views.py +++ b/dojo/product_type/views.py @@ -139,7 +139,7 @@ def delete_product_type(request, ptid): create_notification(event='other', title='Deletion of %s' % product_type.name, no_users=True, - description='The product type "%s" was deleted by %s' % (product_type.name, request.user), + description=f'The product type "{product_type.name}" was deleted by {request.user}', url=request.build_absolute_uri(reverse('product_type')), icon="exclamation-triangle") return HttpResponseRedirect(reverse('product_type')) diff --git a/dojo/reports/widgets.py b/dojo/reports/widgets.py index 36831c4ad0c..09eb1646203 100644 --- a/dojo/reports/widgets.py +++ b/dojo/reports/widgets.py @@ -57,7 +57,7 @@ def __init__(self, attrs=None): default_attrs = {'style': 'width:100%;min-height:400px'} if attrs: default_attrs.update(attrs) - super(Div, self).__init__(default_attrs) + super().__init__(default_attrs) def render(self, name, value, attrs=None, renderer=None): if value is None: @@ -109,7 +109,7 @@ class Meta: # base Widget class others will inherit from -class Widget(object): +class Widget: def __init__(self, *args, **kwargs): self.title = 'Base Widget' self.form = None @@ -130,7 +130,7 @@ def get_option_form(self): class PageBreak(Widget): def __init__(self, *args, **kwargs): - super(PageBreak, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.title = 'Page Break' self.form = None self.multiple = "true" @@ -151,7 +151,7 @@ def get_option_form(self): class ReportOptions(Widget): def __init__(self, *args, **kwargs): - super(ReportOptions, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.title = 'Report Options' self.form = CustomReportOptionsForm() self.extra_help = "Choose additional report options. These will apply to the overall report." @@ -172,7 +172,7 @@ def get_option_form(self): class CoverPage(Widget): def __init__(self, *args, **kwargs): - super(CoverPage, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.title = 'Cover Page' self.form = CoverPageForm() self.help_text = "The cover page includes a page break after its content." @@ -197,7 +197,7 @@ def get_option_form(self): class TableOfContents(Widget): def __init__(self, *args, **kwargs): - super(TableOfContents, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.title = 'Table Of Contents' self.form = TableOfContentsForm() self.help_text = "The table of contents includes a page break after its content." @@ -220,7 +220,7 @@ def get_option_form(self): class WYSIWYGContent(Widget): def __init__(self, *args, **kwargs): - super(WYSIWYGContent, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.title = 'WYSIWYG Content' self.form = WYSIWYGContentForm() self.multiple = 'true' @@ -267,7 +267,7 @@ def __init__(self, *args, **kwargs): else: self.finding_images = False - super(FindingList, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.title = 'Finding List' if hasattr(self.findings, 'form'): @@ -342,7 +342,7 @@ def __init__(self, *args, **kwargs): else: self.finding_images = False - super(EndpointList, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.title = 'Endpoint List' self.form = self.endpoints.form diff --git a/dojo/risk_acceptance/helper.py b/dojo/risk_acceptance/helper.py index 8034ce713c4..7fcaa796c9a 100644 --- a/dojo/risk_acceptance/helper.py +++ b/dojo/risk_acceptance/helper.py @@ -208,8 +208,7 @@ def accepted_message_creator(risk_acceptance, heads_up_days=0): def unaccepted_message_creator(risk_acceptance, heads_up_days=0): if risk_acceptance: - return 'finding was unaccepted/deleted from risk acceptance [(%s)|%s]' % \ - (escape_for_jira(risk_acceptance.name), + return 'finding was unaccepted/deleted from risk acceptance [({})|{}]'.format(escape_for_jira(risk_acceptance.name), get_full_url(reverse('view_risk_acceptance', args=(risk_acceptance.engagement.id, risk_acceptance.id)))) else: return 'Finding is no longer risk accepted' diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index 2126ad82d9f..add788caaa7 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -1261,10 +1261,10 @@ def saml2_attrib_map_format(dict): env_hashcode_fields_per_scanner = json.loads(env('DD_HASHCODE_FIELDS_PER_SCANNER')) for key, value in env_hashcode_fields_per_scanner.items(): if key in HASHCODE_FIELDS_PER_SCANNER: - logger.info("Replacing {} with value {} (previously set to {}) from env var DD_HASHCODE_FIELDS_PER_SCANNER".format(key, value, HASHCODE_FIELDS_PER_SCANNER[key])) + logger.info(f"Replacing {key} with value {value} (previously set to {HASHCODE_FIELDS_PER_SCANNER[key]}) from env var DD_HASHCODE_FIELDS_PER_SCANNER") HASHCODE_FIELDS_PER_SCANNER[key] = value if key not in HASHCODE_FIELDS_PER_SCANNER: - logger.info("Adding {} with value {} from env var DD_HASHCODE_FIELDS_PER_SCANNER".format(key, value)) + logger.info(f"Adding {key} with value {value} from env var DD_HASHCODE_FIELDS_PER_SCANNER") HASHCODE_FIELDS_PER_SCANNER[key] = value @@ -1477,10 +1477,10 @@ def saml2_attrib_map_format(dict): env_dedup_algorithm_per_parser = json.loads(env('DD_DEDUPLICATION_ALGORITHM_PER_PARSER')) for key, value in env_dedup_algorithm_per_parser.items(): if key in DEDUPLICATION_ALGORITHM_PER_PARSER: - logger.info("Replacing {} with value {} (previously set to {}) from env var DD_DEDUPLICATION_ALGORITHM_PER_PARSER".format(key, value, DEDUPLICATION_ALGORITHM_PER_PARSER[key])) + logger.info(f"Replacing {key} with value {value} (previously set to {DEDUPLICATION_ALGORITHM_PER_PARSER[key]}) from env var DD_DEDUPLICATION_ALGORITHM_PER_PARSER") DEDUPLICATION_ALGORITHM_PER_PARSER[key] = value if key not in DEDUPLICATION_ALGORITHM_PER_PARSER: - logger.info("Adding {} with value {} from env var DD_DEDUPLICATION_ALGORITHM_PER_PARSER".format(key, value)) + logger.info(f"Adding {key} with value {value} from env var DD_DEDUPLICATION_ALGORITHM_PER_PARSER") DEDUPLICATION_ALGORITHM_PER_PARSER[key] = value DUPE_DELETE_MAX_PER_RUN = env('DD_DUPE_DELETE_MAX_PER_RUN') diff --git a/dojo/survey/views.py b/dojo/survey/views.py index 02fc9f74d59..02191342a85 100644 --- a/dojo/survey/views.py +++ b/dojo/survey/views.py @@ -484,8 +484,7 @@ def create_question(request): error = True if '_popup' in request.GET and not error: - resp = '' \ - % (escape(created_question._get_pk_val()), escape(created_question.text)) + resp = f'' resp += '' return HttpResponse(resp) @@ -577,8 +576,7 @@ def add_choices(request): if '_popup' in request.GET: resp = '' if created: - resp = '' \ - % (escape(choice._get_pk_val()), escape(choice.label)) + resp = f'' resp += '' return HttpResponse(resp) add_breadcrumb(title="Add Choice", top_level=False, request=request) diff --git a/dojo/tasks.py b/dojo/tasks.py index 50d48049a80..25d258f9ee8 100644 --- a/dojo/tasks.py +++ b/dojo/tasks.py @@ -43,7 +43,7 @@ def add_alerts(self, runinterval): for eng in stale_engagements: create_notification(event='stale_engagement', title='Stale Engagement: %s' % eng.name, - description='The engagement "%s" is stale. Target end was %s.' % (eng.name, eng.target_end.strftime("%b. %d, %Y")), + description='The engagement "{}" is stale. Target end was {}.'.format(eng.name, eng.target_end.strftime("%b. %d, %Y")), url=reverse('view_engagement', args=(eng.id,)), recipients=[eng.lead]) @@ -57,7 +57,7 @@ def add_alerts(self, runinterval): for eng in unclosed_engagements: create_notification(event='auto_close_engagement', title=eng.name, - description='The engagement "%s" has auto-closed. Target end was %s.' % (eng.name, eng.target_end.strftime("%b. %d, %Y")), + description='The engagement "{}" has auto-closed. Target end was {}.'.format(eng.name, eng.target_end.strftime("%b. %d, %Y")), url=reverse('view_engagement', args=(eng.id,)), recipients=[eng.lead]) @@ -139,8 +139,8 @@ def async_dupe_delete(*args, **kwargs): originals_with_too_many_duplicates = Finding.objects.filter(id__in=originals_with_too_many_duplicates_ids).order_by('id') # prefetch to make it faster - originals_with_too_many_duplicates = originals_with_too_many_duplicates.prefetch_related((Prefetch("original_finding", - queryset=Finding.objects.filter(duplicate=True).order_by('date')))) + originals_with_too_many_duplicates = originals_with_too_many_duplicates.prefetch_related(Prefetch("original_finding", + queryset=Finding.objects.filter(duplicate=True).order_by('date'))) total_deleted_count = 0 for original in originals_with_too_many_duplicates: @@ -148,7 +148,7 @@ def async_dupe_delete(*args, **kwargs): dupe_count = len(duplicate_list) - dupe_max for finding in duplicate_list: - deduplicationLogger.debug('deleting finding {}:{} ({}))'.format(finding.id, finding.title, finding.hash_code)) + deduplicationLogger.debug(f'deleting finding {finding.id}:{finding.title} ({finding.hash_code}))') finding.delete() total_deleted_count += 1 dupe_count -= 1 @@ -177,7 +177,7 @@ def async_sla_compute_and_notify_task(*args, **kwargs): sla_compute_and_notify(*args, **kwargs) except Exception as e: logger.exception(e) - logger.error("An unexpected error was thrown calling the SLA code: {}".format(e)) + logger.error(f"An unexpected error was thrown calling the SLA code: {e}") @app.task diff --git a/dojo/templatetags/display_tags.py b/dojo/templatetags/display_tags.py index f49fba54bc9..221daf04132 100644 --- a/dojo/templatetags/display_tags.py +++ b/dojo/templatetags/display_tags.py @@ -135,7 +135,7 @@ def dojo_version(): version = __version__ if settings.FOOTER_VERSION: version = settings.FOOTER_VERSION - return "v. {}".format(version) + return f"v. {version}" @register.simple_tag diff --git a/dojo/templatetags/navigation_tags.py b/dojo/templatetags/navigation_tags.py index a6d53f00820..e446dd842de 100644 --- a/dojo/templatetags/navigation_tags.py +++ b/dojo/templatetags/navigation_tags.py @@ -69,7 +69,7 @@ def dojo_sort(request, display='Name', value='title', default=None): return safe(link) -class PaginationNav(object): +class PaginationNav: def __init__(self, page_number=None, display=None, is_current=False): self.page_number = page_number self.is_current = is_current diff --git a/dojo/test/views.py b/dojo/test/views.py index ca3d60e16fa..0ddd4464040 100644 --- a/dojo/test/views.py +++ b/dojo/test/views.py @@ -346,7 +346,7 @@ def copy_test(request, tid): extra_tags='alert-success') create_notification(event='other', title='Copying of %s' % test.title, - description='The test "%s" was copied by %s to %s' % (test.title, request.user, engagement.name), + description=f'The test "{test.title}" was copied by {request.user} to {engagement.name}', product=product, url=request.build_absolute_uri(reverse('view_test', args=(test_copy.id,))), recipients=[test.engagement.lead], @@ -412,7 +412,7 @@ def test_ics(request, tid): _("Set aside for test %(test_type_name)s, on product %(product_name)s. Additional detail can be found at %(detail_url)s") % { 'test_type_name': test.test_type.name, 'product_name': test.engagement.product.name, - 'detail_url': request.build_absolute_uri((reverse("view_test", args=(test.id,)))) + 'detail_url': request.build_absolute_uri(reverse("view_test", args=(test.id,))) }, uid) output = cal.serialize() diff --git a/dojo/tools/acunetix/parse_acunetix360_json.py b/dojo/tools/acunetix/parse_acunetix360_json.py index f9fff0b109c..0f8c01c5817 100644 --- a/dojo/tools/acunetix/parse_acunetix360_json.py +++ b/dojo/tools/acunetix/parse_acunetix360_json.py @@ -5,7 +5,7 @@ from dojo.models import Endpoint, Finding -class AcunetixJSONParser(object): +class AcunetixJSONParser: """This parser is written for Acunetix JSON Findings.""" def get_findings(self, filename, test): dupes = dict() diff --git a/dojo/tools/acunetix/parse_acunetix_xml.py b/dojo/tools/acunetix/parse_acunetix_xml.py index 12ca4100a03..529da45e998 100644 --- a/dojo/tools/acunetix/parse_acunetix_xml.py +++ b/dojo/tools/acunetix/parse_acunetix_xml.py @@ -9,7 +9,7 @@ logger = logging.getLogger(__name__) -class AcunetixXMLParser(object): +class AcunetixXMLParser: """This parser is written for Acunetix XML reports""" def get_findings(self, filename, test): dupes = dict() @@ -54,7 +54,7 @@ def get_findings(self, filename, test): for reference in item.findall("References/Reference"): url = reference.findtext("URL") db = reference.findtext("Database") or url - references.append(" * [{}]({})".format(db, url)) + references.append(f" * [{db}]({url})") if len(references) > 0: finding.references = "\n".join(references) if item.findtext("CVSS3/Descriptor"): @@ -128,9 +128,7 @@ def get_findings(self, filename, test): find.unsaved_req_resp.extend(finding.unsaved_req_resp) find.nb_occurences += finding.nb_occurences logger.debug( - "Duplicate finding : {defectdojo_title}".format( - defectdojo_title=finding.title - ) + f"Duplicate finding : {finding.title}" ) else: dupes[dupe_key] = finding diff --git a/dojo/tools/acunetix/parser.py b/dojo/tools/acunetix/parser.py index 9d0ee771230..272f295acf4 100644 --- a/dojo/tools/acunetix/parser.py +++ b/dojo/tools/acunetix/parser.py @@ -2,7 +2,7 @@ from dojo.tools.acunetix.parse_acunetix_xml import AcunetixXMLParser -class AcunetixParser(object): +class AcunetixParser: """Parser for Acunetix XML files and Acunetix 360 JSON files.""" def get_scan_types(self): diff --git a/dojo/tools/anchore_engine/parser.py b/dojo/tools/anchore_engine/parser.py index 3b9e0bc546d..f734b4a4890 100644 --- a/dojo/tools/anchore_engine/parser.py +++ b/dojo/tools/anchore_engine/parser.py @@ -3,7 +3,7 @@ from dojo.models import Finding -class AnchoreEngineParser(object): +class AnchoreEngineParser: def get_scan_types(self): return ["Anchore Engine Scan"] diff --git a/dojo/tools/anchore_enterprise/parser.py b/dojo/tools/anchore_enterprise/parser.py index 899e600a51e..82dad174d74 100644 --- a/dojo/tools/anchore_enterprise/parser.py +++ b/dojo/tools/anchore_enterprise/parser.py @@ -69,9 +69,7 @@ def get_findings(self, filename, test): test=test, description=description, severity=severity, - references="Policy ID: {}\nTrigger ID: {}".format( - policyid, triggerid - ), + references=f"Policy ID: {policyid}\nTrigger ID: {triggerid}", file_path=search_filepath(description), component_name=repo, component_version=tag, @@ -86,9 +84,7 @@ def get_findings(self, filename, test): items.append(find) except (KeyError, IndexError) as err: raise ValueError( - "Invalid format: {} key not found".format( - err - ) + f"Invalid format: {err} key not found" ) except AttributeError as err: # import empty policies without error (e.g. policies or images diff --git a/dojo/tools/anchore_grype/parser.py b/dojo/tools/anchore_grype/parser.py index 9854bf34d5b..ad04aaf3b75 100644 --- a/dojo/tools/anchore_grype/parser.py +++ b/dojo/tools/anchore_grype/parser.py @@ -5,7 +5,7 @@ from dojo.models import Finding -class AnchoreGrypeParser(object): +class AnchoreGrypeParser: """Anchore Grype JSON report format generated with `-o json` option. command: `grype defectdojo/defectdojo-django:1.13.1 -o json > many_vulns.json` diff --git a/dojo/tools/anchorectl_policies/parser.py b/dojo/tools/anchorectl_policies/parser.py index 1df2fa94f95..3de3f5d0c19 100644 --- a/dojo/tools/anchorectl_policies/parser.py +++ b/dojo/tools/anchorectl_policies/parser.py @@ -54,9 +54,7 @@ def get_findings(self, filename, test): test=test, description=description, severity=severity, - references="Policy ID: {}\nTrigger ID: {}".format( - policy_id, trigger_id - ), + references=f"Policy ID: {policy_id}\nTrigger ID: {trigger_id}", file_path=search_filepath(description), component_name=repo, component_version=tag, @@ -69,7 +67,7 @@ def get_findings(self, filename, test): items.append(find) except (KeyError, IndexError) as err: raise ValueError( - "Invalid format: {} key not found".format(err) + f"Invalid format: {err} key not found" ) except AttributeError as err: # import empty policies without error (e.g. policies or images diff --git a/dojo/tools/anchorectl_vulns/parser.py b/dojo/tools/anchorectl_vulns/parser.py index 77c350b56bd..652ac821afb 100644 --- a/dojo/tools/anchorectl_vulns/parser.py +++ b/dojo/tools/anchorectl_vulns/parser.py @@ -3,7 +3,7 @@ from dojo.models import Finding -class AnchoreCTLVulnsParser(object): +class AnchoreCTLVulnsParser: def get_scan_types(self): return ["AnchoreCTL Vuln Report"] diff --git a/dojo/tools/api_blackduck/api_client.py b/dojo/tools/api_blackduck/api_client.py index fb8058bc8ba..a2895dc5bee 100644 --- a/dojo/tools/api_blackduck/api_client.py +++ b/dojo/tools/api_blackduck/api_client.py @@ -17,9 +17,7 @@ def __init__(self, tool_config): ) else: raise ValueError( - "Authentication type {} not supported".format( - tool_config.authentication_type - ) + f"Authentication type {tool_config.authentication_type} not supported" ) # TODO diff --git a/dojo/tools/api_blackduck/importer.py b/dojo/tools/api_blackduck/importer.py index cf7a143bb9c..af0917a0e5e 100644 --- a/dojo/tools/api_blackduck/importer.py +++ b/dojo/tools/api_blackduck/importer.py @@ -4,7 +4,7 @@ from .api_client import BlackduckAPI -class BlackduckApiImporter(object): +class BlackduckApiImporter: """ Import from BlackDuck API """ diff --git a/dojo/tools/api_blackduck/parser.py b/dojo/tools/api_blackduck/parser.py index be76f28c6ef..0be66807877 100644 --- a/dojo/tools/api_blackduck/parser.py +++ b/dojo/tools/api_blackduck/parser.py @@ -7,7 +7,7 @@ SCAN_TYPE_ID = "BlackDuck API" -class ApiBlackduckParser(object): +class ApiBlackduckParser: """ Import from Synopsys BlackDuck API /findings """ diff --git a/dojo/tools/api_bugcrowd/api_client.py b/dojo/tools/api_bugcrowd/api_client.py index ddb73f0b0ae..f1207bd77a4 100644 --- a/dojo/tools/api_bugcrowd/api_client.py +++ b/dojo/tools/api_bugcrowd/api_client.py @@ -19,14 +19,12 @@ def __init__(self, tool_config): if tool_config.authentication_type == "API": self.api_token = tool_config.api_key self.session.headers.update( - {"Authorization": "Token {}".format(self.api_token)} + {"Authorization": f"Token {self.api_token}"} ) self.session.headers.update(self.default_headers) else: raise Exception( - "bugcrowd Authentication type {} not supported".format( - tool_config.authentication_type - ) + f"bugcrowd Authentication type {tool_config.authentication_type} not supported" ) def get_findings(self, program, target): @@ -53,9 +51,7 @@ def get_findings(self, program, target): else: params_encoded = urlencode(params_default) - next = "{}/submissions?{}".format( - self.bugcrowd_api_url, params_encoded - ) + next = f"{self.bugcrowd_api_url}/submissions?{params_encoded}" while next != "": response = self.session.get(url=next) response.raise_for_status() @@ -79,13 +75,13 @@ def get_findings(self, program, target): def test_connection(self): # Request programs response_programs = self.session.get( - url="{}/programs".format(self.bugcrowd_api_url) + url=f"{self.bugcrowd_api_url}/programs" ) response_programs.raise_for_status() # Request submissions to validate the org token response_subs = self.session.get( - url="{}/submissions".format(self.bugcrowd_api_url) + url=f"{self.bugcrowd_api_url}/submissions" ) response_subs.raise_for_status() if response_programs.ok and response_subs.ok: @@ -99,7 +95,7 @@ def test_connection(self): ) # Request targets to validate the org token response_targets = self.session.get( - url="{}/targets".format(self.bugcrowd_api_url) + url=f"{self.bugcrowd_api_url}/targets" ) response_targets.raise_for_status() if response_targets.ok: @@ -119,16 +115,12 @@ def test_connection(self): else: raise Exception( "Bugcrowd API test not successful, no targets were defined in Bugcrowd which is used for " - "filtering, check your configuration, HTTP response was: {}".format( - response_targets.text - ) + f"filtering, check your configuration, HTTP response was: {response_targets.text}" ) else: raise Exception( "Bugcrowd API test not successful, could not retrieve the programs or submissions, check your " - "configuration, HTTP response for programs was: {}, HTTP response for submissions was: {}".format( - response_programs.text, response_subs.text - ) + f"configuration, HTTP response for programs was: {response_programs.text}, HTTP response for submissions was: {response_subs.text}" ) def test_product_connection(self, api_scan_configuration): diff --git a/dojo/tools/api_bugcrowd/importer.py b/dojo/tools/api_bugcrowd/importer.py index 3e41b6be20a..56b7ca1a228 100644 --- a/dojo/tools/api_bugcrowd/importer.py +++ b/dojo/tools/api_bugcrowd/importer.py @@ -6,7 +6,7 @@ logger = logging.getLogger(__name__) -class BugcrowdApiImporter(object): +class BugcrowdApiImporter: """ Import from Bugcrowd API """ @@ -14,9 +14,7 @@ class BugcrowdApiImporter(object): def get_findings(self, test): client, config = self.prepare_client(test) logger.debug( - "Fetching submissions program {} and target {}".format( - str(config.service_key_1), str(config.service_key_2) - ) + f"Fetching submissions program {str(config.service_key_1)} and target {str(config.service_key_2)}" ) submissions_paged = client.get_findings( @@ -29,7 +27,7 @@ def get_findings(self, test): for page in submissions_paged: submissions += page counter += 1 - logger.debug("{} Bugcrowd submissions pages fetched".format(counter)) + logger.debug(f"{counter} Bugcrowd submissions pages fetched") return submissions, config diff --git a/dojo/tools/api_bugcrowd/parser.py b/dojo/tools/api_bugcrowd/parser.py index 17cc04f84d5..df119ed576e 100644 --- a/dojo/tools/api_bugcrowd/parser.py +++ b/dojo/tools/api_bugcrowd/parser.py @@ -16,7 +16,7 @@ logger = logging.getLogger(__name__) -class ApiBugcrowdParser(object): +class ApiBugcrowdParser: """ Import from Bugcrowd API /submissions """ @@ -158,15 +158,11 @@ def get_findings(self, file, test): finding.unsaved_endpoints = [bug_endpoint] except Exception as e: logger.error( - "{} bug url from bugcrowd failed to parse to endpoint, error= {}".format( - str(bug_endpoint), e - ) + f"{str(bug_endpoint)} bug url from bugcrowd failed to parse to endpoint, error= {e}" ) except ValidationError: logger.error( - "Broken Bugcrowd endpoint {} was skipped.".format( - bug_endpoint.host - ) + f"Broken Bugcrowd endpoint {bug_endpoint.host} was skipped." ) findings.append(finding) diff --git a/dojo/tools/api_cobalt/api_client.py b/dojo/tools/api_cobalt/api_client.py index f51cef1dbee..e18ed6f01f6 100644 --- a/dojo/tools/api_cobalt/api_client.py +++ b/dojo/tools/api_cobalt/api_client.py @@ -15,9 +15,7 @@ def __init__(self, tool_config): self.org_token = tool_config.extras else: raise Exception( - "Cobalt.io Authentication type {} not supported".format( - tool_config.authentication_type - ) + f"Cobalt.io Authentication type {tool_config.authentication_type} not supported" ) def get_asset(self, asset_id): @@ -32,12 +30,12 @@ def get_asset(self, asset_id): if asset["resource"]["id"] == asset_id: return asset - raise Exception("Asset {} not found in organisation".format(asset_id)) + raise Exception(f"Asset {asset_id} not found in organisation") def get_assets(self): """Returns all org assets""" response = self.session.get( - url="{}/assets?limit=1000".format(self.cobalt_api_url), + url=f"{self.cobalt_api_url}/assets?limit=1000", headers=self.get_headers(), ) @@ -57,9 +55,7 @@ def get_findings(self, asset_id): :return: """ response = self.session.get( - url="{}/findings?limit=1000&asset={}".format( - self.cobalt_api_url, asset_id - ), + url=f"{self.cobalt_api_url}/findings?limit=1000&asset={asset_id}", headers=self.get_headers(), ) @@ -75,13 +71,13 @@ def get_findings(self, asset_id): def test_connection(self): # Request orgs for the org name response_orgs = self.session.get( - url="{}/orgs".format(self.cobalt_api_url), + url=f"{self.cobalt_api_url}/orgs", headers=self.get_headers(), ) # Request assets to validate the org token response_assets = self.session.get( - url="{}/assets".format(self.cobalt_api_url), + url=f"{self.cobalt_api_url}/assets", headers=self.get_headers(), ) @@ -111,7 +107,7 @@ def test_product_connection(self, api_scan_configuration): def get_headers(self): headers = { "accept": "application/vnd.cobalt.v1+json", - "Authorization": "Bearer {}".format(self.api_token), + "Authorization": f"Bearer {self.api_token}", "User-Agent": "DefectDojo", } diff --git a/dojo/tools/api_cobalt/importer.py b/dojo/tools/api_cobalt/importer.py index 93ba6a06e0c..9f3a291f4af 100644 --- a/dojo/tools/api_cobalt/importer.py +++ b/dojo/tools/api_cobalt/importer.py @@ -6,7 +6,7 @@ logger = logging.getLogger(__name__) -class CobaltApiImporter(object): +class CobaltApiImporter: """ Import from Cobalt.io API """ diff --git a/dojo/tools/api_cobalt/parser.py b/dojo/tools/api_cobalt/parser.py index 0e77b0d279b..6be1a4e855a 100644 --- a/dojo/tools/api_cobalt/parser.py +++ b/dojo/tools/api_cobalt/parser.py @@ -8,7 +8,7 @@ SCAN_COBALTIO_API = "Cobalt.io API Import" -class ApiCobaltParser(object): +class ApiCobaltParser: """ Import from Cobalt.io API /findings """ diff --git a/dojo/tools/api_edgescan/api_client.py b/dojo/tools/api_edgescan/api_client.py index 8e2f11ad095..a49c7686ebf 100644 --- a/dojo/tools/api_edgescan/api_client.py +++ b/dojo/tools/api_edgescan/api_client.py @@ -3,7 +3,7 @@ from json.decoder import JSONDecodeError -class EdgescanAPI(object): +class EdgescanAPI: """ A simple client for the Edgescan API """ @@ -17,9 +17,7 @@ def __init__(self, tool_config): self.options = self.get_extra_options(tool_config) else: raise Exception( - "Edgescan Authentication type {} not supported".format( - tool_config.authentication_type - ) + f"Edgescan Authentication type {tool_config.authentication_type} not supported" ) @staticmethod diff --git a/dojo/tools/api_edgescan/importer.py b/dojo/tools/api_edgescan/importer.py index dc97edf82a8..5857b188ab3 100644 --- a/dojo/tools/api_edgescan/importer.py +++ b/dojo/tools/api_edgescan/importer.py @@ -3,7 +3,7 @@ from .api_client import EdgescanAPI -class EdgescanImporter(object): +class EdgescanImporter: """ Import from Edgescan API """ diff --git a/dojo/tools/api_edgescan/parser.py b/dojo/tools/api_edgescan/parser.py index 3e186e6d6b0..8442c7cc666 100644 --- a/dojo/tools/api_edgescan/parser.py +++ b/dojo/tools/api_edgescan/parser.py @@ -8,7 +8,7 @@ SCANTYPE_EDGESCAN = "Edgescan Scan" -class ApiEdgescanParser(object): +class ApiEdgescanParser: """ Import from Edgescan API or JSON file """ diff --git a/dojo/tools/api_sonarqube/importer.py b/dojo/tools/api_sonarqube/importer.py index 31a5c62e77c..6d4d1577f59 100644 --- a/dojo/tools/api_sonarqube/importer.py +++ b/dojo/tools/api_sonarqube/importer.py @@ -14,7 +14,7 @@ logger = logging.getLogger(__name__) -class SonarQubeApiImporter(object): +class SonarQubeApiImporter: """ This class imports from SonarQube (SQ) all open/confirmed SQ issues related to the project related to the test as findings. diff --git a/dojo/tools/api_sonarqube/parser.py b/dojo/tools/api_sonarqube/parser.py index f4e7162d311..8a57a8d80b3 100644 --- a/dojo/tools/api_sonarqube/parser.py +++ b/dojo/tools/api_sonarqube/parser.py @@ -4,7 +4,7 @@ SCAN_SONARQUBE_API = "SonarQube API Import" -class ApiSonarQubeParser(object): +class ApiSonarQubeParser: def get_scan_types(self): return [SCAN_SONARQUBE_API] diff --git a/dojo/tools/api_sonarqube/updater.py b/dojo/tools/api_sonarqube/updater.py index b4815687730..4cbf28dc3e2 100644 --- a/dojo/tools/api_sonarqube/updater.py +++ b/dojo/tools/api_sonarqube/updater.py @@ -7,7 +7,7 @@ logger = logging.getLogger(__name__) -class SonarQubeApiUpdater(object): +class SonarQubeApiUpdater: """ This class updates in SonarQube, a SonarQube issue previously imported as a DefectDojo Findings. This class maps the finding status to a SQ issue status and later on it transitions the issue @@ -119,9 +119,7 @@ def update_sonarqube_finding(self, finding): return logger.debug( - "Checking if finding '{}' needs to be updated in SonarQube".format( - finding - ) + f"Checking if finding '{finding}' needs to be updated in SonarQube" ) client, _ = SonarQubeApiImporter.prepare_client(finding.test) @@ -142,9 +140,7 @@ def update_sonarqube_finding(self, finding): current_status = issue.get("status") logger.debug( - "--> SQ Current status: {}. Current target status: {}".format( - current_status, target_status - ) + f"--> SQ Current status: {current_status}. Current target status: {target_status}" ) transitions = self.get_sonarqube_required_transitions_for( @@ -152,7 +148,7 @@ def update_sonarqube_finding(self, finding): ) if transitions: logger.info( - "Updating finding '{}' in SonarQube".format(finding) + f"Updating finding '{finding}' in SonarQube" ) for transition in transitions: diff --git a/dojo/tools/api_sonarqube/updater_from_source.py b/dojo/tools/api_sonarqube/updater_from_source.py index 137c55dbd79..ee8fdb33d6a 100644 --- a/dojo/tools/api_sonarqube/updater_from_source.py +++ b/dojo/tools/api_sonarqube/updater_from_source.py @@ -9,7 +9,7 @@ logger = logging.getLogger(__name__) -class SonarQubeApiUpdaterFromSource(object): +class SonarQubeApiUpdaterFromSource: """ The responsibility of this class is to update the Finding status if current SonarQube issue status doesn't match. @@ -40,9 +40,7 @@ def update(self, finding): current_status = issue.get("resolution") or issue.get("status") current_finding_status = self.get_sonarqube_status_for(finding) logger.debug( - "--> SQ Current status: {}. Finding status: {}".format( - current_status, current_finding_status - ) + f"--> SQ Current status: {current_status}. Finding status: {current_finding_status}" ) if ( @@ -50,9 +48,7 @@ def update(self, finding): and current_finding_status != current_status ): logger.info( - "Original SonarQube issue '{}' has changed. Updating DefectDojo finding '{}'...".format( - sonarqube_issue, finding - ) + f"Original SonarQube issue '{sonarqube_issue}' has changed. Updating DefectDojo finding '{finding}'..." ) self.update_finding_status(finding, current_status) diff --git a/dojo/tools/api_vulners/api_client.py b/dojo/tools/api_vulners/api_client.py index c12996abbc5..9441fada342 100644 --- a/dojo/tools/api_vulners/api_client.py +++ b/dojo/tools/api_vulners/api_client.py @@ -16,9 +16,7 @@ def __init__(self, tool_config): self.vulners_api_url = tool_config.url else: raise Exception( - "Vulners.com Authentication type {} not supported".format( - tool_config.authentication_type - ) + f"Vulners.com Authentication type {tool_config.authentication_type} not supported" ) def get_client(self): diff --git a/dojo/tools/api_vulners/importer.py b/dojo/tools/api_vulners/importer.py index fef0d40c669..0017122f97d 100644 --- a/dojo/tools/api_vulners/importer.py +++ b/dojo/tools/api_vulners/importer.py @@ -6,7 +6,7 @@ logger = logging.getLogger(__name__) -class VulnersImporter(object): +class VulnersImporter: """ Import from Vulners API """ diff --git a/dojo/tools/api_vulners/parser.py b/dojo/tools/api_vulners/parser.py index deba3c5762f..50674e5130a 100644 --- a/dojo/tools/api_vulners/parser.py +++ b/dojo/tools/api_vulners/parser.py @@ -17,7 +17,7 @@ } -class ApiVulnersParser(object): +class ApiVulnersParser: """Parser that can load data from Vulners Scanner API""" def get_scan_types(self): diff --git a/dojo/tools/appspider/parser.py b/dojo/tools/appspider/parser.py index 4d3e5eccc7b..6b76df3718a 100644 --- a/dojo/tools/appspider/parser.py +++ b/dojo/tools/appspider/parser.py @@ -4,7 +4,7 @@ from dojo.models import Endpoint, Finding -class AppSpiderParser(object): +class AppSpiderParser: """Parser for Rapid7 AppSpider reports""" def get_scan_types(self): diff --git a/dojo/tools/aqua/parser.py b/dojo/tools/aqua/parser.py index d29d6128a6a..d6ea61edc9a 100644 --- a/dojo/tools/aqua/parser.py +++ b/dojo/tools/aqua/parser.py @@ -3,7 +3,7 @@ from dojo.models import Finding -class AquaParser(object): +class AquaParser: def get_scan_types(self): return ["Aqua Scan"] @@ -61,7 +61,7 @@ def get_item(resource, vuln, test): score = vuln.get("aqua_severity") severity = aqua_severity_of(score) used_for_classification = ( - "Aqua security score ({}) used for classification.\n".format(score) + f"Aqua security score ({score}) used for classification.\n" ) severity_justification = vuln.get("aqua_severity_classification") if "nvd_score_v3" in vuln: @@ -70,17 +70,17 @@ def get_item(resource, vuln, test): if "aqua_score" in vuln: score = vuln.get("aqua_score") used_for_classification = ( - "Aqua score ({}) used for classification.\n".format(score) + f"Aqua score ({score}) used for classification.\n" ) elif "vendor_score" in vuln: score = vuln.get("vendor_score") used_for_classification = ( - "Vendor score ({}) used for classification.\n".format(score) + f"Vendor score ({score}) used for classification.\n" ) elif "nvd_score_v3" in vuln: score = vuln.get("nvd_score_v3") used_for_classification = ( - "NVD score v3 ({}) used for classification.\n".format(score) + f"NVD score v3 ({score}) used for classification.\n" ) severity_justification += "\nNVD v3 vectors: {}".format( vuln.get("nvd_vectors_v3") @@ -90,13 +90,13 @@ def get_item(resource, vuln, test): elif "nvd_score" in vuln: score = vuln.get("nvd_score") used_for_classification = ( - "NVD score v2 ({}) used for classification.\n".format(score) + f"NVD score v2 ({score}) used for classification.\n" ) severity_justification += "\nNVD v2 vectors: {}".format( vuln.get("nvd_vectors") ) severity = severity_of(score) - severity_justification += "\n{}".format(used_for_classification) + severity_justification += f"\n{used_for_classification}" finding = Finding( title=vulnerability_id diff --git a/dojo/tools/arachni/parser.py b/dojo/tools/arachni/parser.py index 22e67fe1b89..334b671b01e 100755 --- a/dojo/tools/arachni/parser.py +++ b/dojo/tools/arachni/parser.py @@ -7,7 +7,7 @@ from dojo.models import Endpoint, Finding -class ArachniParser(object): +class ArachniParser: """Arachni Web Scanner (http://arachni-scanner.com/wiki) Reports are generated with arachni_reporter tool: diff --git a/dojo/tools/asff/parser.py b/dojo/tools/asff/parser.py index c5831d025b3..1f4d96f5679 100644 --- a/dojo/tools/asff/parser.py +++ b/dojo/tools/asff/parser.py @@ -13,7 +13,7 @@ } -class AsffParser(object): +class AsffParser: def get_scan_types(self): return ["AWS Security Finding Format (ASFF) Scan"] diff --git a/dojo/tools/auditjs/parser.py b/dojo/tools/auditjs/parser.py index 69031dc16bb..6249f1b045c 100644 --- a/dojo/tools/auditjs/parser.py +++ b/dojo/tools/auditjs/parser.py @@ -6,7 +6,7 @@ import cvss.parser -class AuditJSParser(object): +class AuditJSParser: """Parser for AuditJS Scan tool""" def get_scan_types(self): diff --git a/dojo/tools/aws_prowler/parser.py b/dojo/tools/aws_prowler/parser.py index b7320039308..9659262fb8b 100644 --- a/dojo/tools/aws_prowler/parser.py +++ b/dojo/tools/aws_prowler/parser.py @@ -10,7 +10,7 @@ from dojo.models import Finding -class AWSProwlerParser(object): +class AWSProwlerParser: def get_scan_types(self): return ["AWS Prowler Scan"] diff --git a/dojo/tools/aws_prowler_v3/parser.py b/dojo/tools/aws_prowler_v3/parser.py index c36c87ad9be..e0e6910fd7c 100644 --- a/dojo/tools/aws_prowler_v3/parser.py +++ b/dojo/tools/aws_prowler_v3/parser.py @@ -7,7 +7,7 @@ from dojo.models import Finding -class AWSProwlerV3Parser(object): +class AWSProwlerV3Parser: SCAN_TYPE = ["AWS Prowler V3"] def get_scan_types(self): diff --git a/dojo/tools/aws_scout2/parser.py b/dojo/tools/aws_scout2/parser.py index 55b6d31afaa..8fd45eba17c 100644 --- a/dojo/tools/aws_scout2/parser.py +++ b/dojo/tools/aws_scout2/parser.py @@ -6,7 +6,7 @@ from html2text import html2text -class AWSScout2Parser(object): +class AWSScout2Parser: # FIXME bad very bad item_data = "" pdepth = 0 @@ -31,20 +31,17 @@ def get_findings(self, filename, test): test_description = "" aws_account_id = data["aws_account_id"] - test_description = "%s **AWS Account:** %s\n" % ( - test_description, - aws_account_id, - ) + test_description = f"{test_description} **AWS Account:** {aws_account_id}\n" last_run = data["last_run"] - test_description = "%s **Ruleset:** %s\n" % ( + test_description = "{} **Ruleset:** {}\n".format( test_description, last_run["ruleset_name"], ) - test_description = "%s **Ruleset Description:** %s\n" % ( + test_description = "{} **Ruleset Description:** {}\n".format( test_description, last_run["ruleset_about"], ) - test_description = "%s **Command:** %s\n" % ( + test_description = "{} **Command:** {}\n".format( test_description, last_run["cmd"], ) @@ -52,27 +49,24 @@ def get_findings(self, filename, test): # Summary for AWS Services test_description = "%s\n**AWS Services** \n\n" % (test_description) for service, items in list(last_run["summary"].items()): - test_description = "%s\n**%s** \n" % ( - test_description, - service.upper(), - ) - test_description = "%s\n* **Checked Items:** %s\n" % ( + test_description = f"{test_description}\n**{service.upper()}** \n" + test_description = "{}\n* **Checked Items:** {}\n".format( test_description, items["checked_items"], ) - test_description = "%s* **Flagged Items:** %s\n" % ( + test_description = "{}* **Flagged Items:** {}\n".format( test_description, items["flagged_items"], ) - test_description = "%s* **Max Level:** %s\n" % ( + test_description = "{}* **Max Level:** {}\n".format( test_description, items["max_level"], ) - test_description = "%s* **Resource Count:** %s\n" % ( + test_description = "{}* **Resource Count:** {}\n".format( test_description, items["resources_count"], ) - test_description = "%s* **Rules Count:** %s\n\n" % ( + test_description = "{}* **Rules Count:** {}\n\n".format( test_description, items["rules_count"], ) @@ -178,7 +172,7 @@ def tabs(n): self.item_data = ( self.item_data + self.formatview(depth) - + "**%s:** %s\n\n" % (key.title(), src) + + f"**{key.title()}:** {src}\n\n" ) else: self.item_data = ( diff --git a/dojo/tools/awssecurityhub/compliance.py b/dojo/tools/awssecurityhub/compliance.py index 3898442d69e..914a196b802 100644 --- a/dojo/tools/awssecurityhub/compliance.py +++ b/dojo/tools/awssecurityhub/compliance.py @@ -2,7 +2,7 @@ from dojo.models import Finding -class Compliance(object): +class Compliance: def get_item(self, finding: dict, test): finding_id = finding.get("Id", "") title = finding.get("Title", "") diff --git a/dojo/tools/awssecurityhub/guardduty.py b/dojo/tools/awssecurityhub/guardduty.py index 3b22498ddc3..7a663dcf1d8 100644 --- a/dojo/tools/awssecurityhub/guardduty.py +++ b/dojo/tools/awssecurityhub/guardduty.py @@ -2,7 +2,7 @@ from dojo.models import Finding, Endpoint -class GuardDuty(object): +class GuardDuty: def get_item(self, finding: dict, test): finding_id = finding.get("Id", "") title = finding.get("Title", "") diff --git a/dojo/tools/awssecurityhub/inspector.py b/dojo/tools/awssecurityhub/inspector.py index 2c4c79db4ed..ce0b7701adb 100644 --- a/dojo/tools/awssecurityhub/inspector.py +++ b/dojo/tools/awssecurityhub/inspector.py @@ -2,7 +2,7 @@ from dojo.models import Finding, Endpoint -class Inspector(object): +class Inspector: def get_item(self, finding: dict, test): finding_id = finding.get("Id", "") title = finding.get("Title", "") diff --git a/dojo/tools/awssecurityhub/parser.py b/dojo/tools/awssecurityhub/parser.py index 7380ece6954..d7110c6daf7 100644 --- a/dojo/tools/awssecurityhub/parser.py +++ b/dojo/tools/awssecurityhub/parser.py @@ -4,7 +4,7 @@ from dojo.tools.awssecurityhub.compliance import Compliance -class AwsSecurityHubParser(object): +class AwsSecurityHubParser: def get_scan_types(self): return ["AWS Security Hub Scan"] diff --git a/dojo/tools/azure_security_center_recommendations/parser.py b/dojo/tools/azure_security_center_recommendations/parser.py index 9d90519fb21..e2e9faf5ebe 100644 --- a/dojo/tools/azure_security_center_recommendations/parser.py +++ b/dojo/tools/azure_security_center_recommendations/parser.py @@ -5,7 +5,7 @@ from dojo.models import Finding -class AzureSecurityCenterRecommendationsParser(object): +class AzureSecurityCenterRecommendationsParser: def get_scan_types(self): return ["Azure Security Center Recommendations Scan"] diff --git a/dojo/tools/bandit/parser.py b/dojo/tools/bandit/parser.py index 18b03967ad2..e1f83277563 100644 --- a/dojo/tools/bandit/parser.py +++ b/dojo/tools/bandit/parser.py @@ -4,7 +4,7 @@ from dojo.models import Finding -class BanditParser(object): +class BanditParser: def get_scan_types(self): return ["Bandit Scan"] diff --git a/dojo/tools/bearer_cli/parser.py b/dojo/tools/bearer_cli/parser.py index 9c0126c3a6c..856b752c28f 100644 --- a/dojo/tools/bearer_cli/parser.py +++ b/dojo/tools/bearer_cli/parser.py @@ -2,7 +2,7 @@ from dojo.models import Finding -class BearerParser(object): +class BearerParser: """ Bearer CLI tool is a SAST scanner for multiple languages """ diff --git a/dojo/tools/blackduck/importer.py b/dojo/tools/blackduck/importer.py index 6cf5eb95d94..8ab2c0d89d6 100644 --- a/dojo/tools/blackduck/importer.py +++ b/dojo/tools/blackduck/importer.py @@ -33,7 +33,7 @@ def _process_csvfile(self, report): No file information then. """ security_issues = dict() - with open(str(report), "r") as f: + with open(str(report)) as f: security_issues = self.__partition_by_key(f) project_ids = set(security_issues.keys()) @@ -80,7 +80,7 @@ def _process_project_findings( path = file_entry_dict.get("Path") archive_context = file_entry_dict.get("Archive context") if archive_context: - full_path = "{}{}".format(archive_context, path[1:]) + full_path = f"{archive_context}{path[1:]}" else: full_path = path diff --git a/dojo/tools/blackduck/parser.py b/dojo/tools/blackduck/parser.py index 804bb1bf5a7..94c35e9881a 100644 --- a/dojo/tools/blackduck/parser.py +++ b/dojo/tools/blackduck/parser.py @@ -4,7 +4,7 @@ from .importer import BlackduckImporter -class BlackduckParser(object): +class BlackduckParser: """ Can import as exported from Blackduck: - from a zip file containing a security.csv and files.csv @@ -45,16 +45,14 @@ def ingest_findings(self, normalized_findings, test): references = self.format_reference(i) dupe_key = hashlib.md5( - "{} | {}".format(title, i.vuln_source).encode("utf-8") + f"{title} | {i.vuln_source}".encode() ).hexdigest() if dupe_key in dupes: finding = dupes[dupe_key] if finding.description: finding.description += ( - "Vulnerability ID: {}\n {}\n".format( - vulnerability_id, i.vuln_source - ) + f"Vulnerability ID: {vulnerability_id}\n {i.vuln_source}\n" ) dupes[dupe_key] = finding else: @@ -87,31 +85,27 @@ def format_title(self, i): else: component_title = i.component_origin_id - return "{} - {}".format(i.vuln_id, component_title) + return f"{i.vuln_id} - {component_title}" def format_description(self, i): - description = "Published on: {}\n\n".format(str(i.published_date)) - description += "Updated on: {}\n\n".format(str(i.updated_date)) - description += "Base score: {}\n\n".format(str(i.base_score)) - description += "Exploitability: {}\n\n".format(str(i.exploitability)) - description += "Description: {}\n".format(i.description) + description = f"Published on: {str(i.published_date)}\n\n" + description += f"Updated on: {str(i.updated_date)}\n\n" + description += f"Base score: {str(i.base_score)}\n\n" + description += f"Exploitability: {str(i.exploitability)}\n\n" + description += f"Description: {i.description}\n" return description def format_mitigation(self, i): - mitigation = "Remediation status: {}\n".format(i.remediation_status) - mitigation += "Remediation target date: {}\n".format( - i.remediation_target_date - ) - mitigation += "Remediation actual date: {}\n".format( - i.remediation_actual_date - ) - mitigation += "Remediation comment: {}\n".format(i.remediation_comment) + mitigation = f"Remediation status: {i.remediation_status}\n" + mitigation += f"Remediation target date: {i.remediation_target_date}\n" + mitigation += f"Remediation actual date: {i.remediation_actual_date}\n" + mitigation += f"Remediation comment: {i.remediation_comment}\n" return mitigation def format_reference(self, i): - reference = "Source: {}\n".format(i.vuln_source) - reference += "URL: {}\n".format(i.url) + reference = f"Source: {i.vuln_source}\n" + reference += f"URL: {i.url}\n" return reference diff --git a/dojo/tools/blackduck_binary_analysis/importer.py b/dojo/tools/blackduck_binary_analysis/importer.py index fcbe4d49a88..3060838d7e8 100644 --- a/dojo/tools/blackduck_binary_analysis/importer.py +++ b/dojo/tools/blackduck_binary_analysis/importer.py @@ -27,7 +27,7 @@ def _process_csvfile(self, report, orig_report_name): If passed a CSV file, process. """ vulnerabilities = dict() - with open(str(report), "r") as f: + with open(str(report)) as f: vulnerabilities = self.__partition_by_key(f) sha1_hash_keys = set(vulnerabilities.keys()) diff --git a/dojo/tools/blackduck_binary_analysis/parser.py b/dojo/tools/blackduck_binary_analysis/parser.py index 55049312569..baab6cd9352 100644 --- a/dojo/tools/blackduck_binary_analysis/parser.py +++ b/dojo/tools/blackduck_binary_analysis/parser.py @@ -5,7 +5,7 @@ from cvss import CVSS2, CVSS3 -class BlackduckBinaryAnalysisParser(object): +class BlackduckBinaryAnalysisParser: """ Report type(s) from Blackduck Binary Analysis compatible with DefectDojo: - Single CSV file containing vulnerable components @@ -66,7 +66,7 @@ def ingest_findings(self, sorted_findings, test): references = self.format_references(i) unique_finding_key = hashlib.sha256( - "{}".format(file_path + object_sha1 + title).encode("utf-8") + f"{file_path + object_sha1 + title}".encode() ).hexdigest() if unique_finding_key in findings: @@ -105,11 +105,7 @@ def ingest_findings(self, sorted_findings, test): return findings.values() def format_title(self, i): - title = "{}: {} {} Vulnerable".format( - i.object_name, - i.component, - i.version, - ) + title = f"{i.object_name}: {i.component} {i.version} Vulnerable" if i.cve is not None: title += f" to {i.cve}" @@ -117,47 +113,30 @@ def format_title(self, i): return title def format_description(self, i): - description = "CSV Result: {}\n".format(str(i.report_name)) - description += "Vulnerable Component: {}\n".format(str(i.component)) - description += "Vulnerable Component Version in Use: {}\n".format(str(i.version)) - description += "Vulnerable Component Latest Version: {}\n".format( - str(i.latest_version) - ) - description += "Matching Type: {}\n".format(str(i.matching_type)) - description += "Object Name: {}\n".format( - str(i.object_name) - ) - description += "Object Extraction Path: {}\n".format( - str(i.object_full_path) - ) - description += "Object Compilation Date: {}\n".format( - str(i.object_compilation_date) - ) - description += "Object SHA1: {}\n".format(str(i.object_sha1)) - description += "CVE: {}\n".format(str(i.cve)) - description += "CVE Publication Date: {}\n".format( - str(i.cve_publication_date) - ) - description += "Distribution Package: {}\n".format( - str(i.distribution_package) - ) - description += "Missing Exploit Mitigations: {}\n".format( - str(i.missing_exploit_mitigations) - ) - description += "BDSA: {}\n".format(str(i.bdsa)) - description += "Summary:\n{}\n".format(str(i.summary)) - description += "Note Type:\n{}\n".format(str(i.note_type)) - description += "Note Reason:\n{}\n".format(str(i.note_reason)) - description += "Triage Vectors:\n{}\n".format(str(i.triage_vectors)) - description += "Unresolving Triage Vectors:\n{}\n".format(str(i.triage_vectors)) + description = f"CSV Result: {str(i.report_name)}\n" + description += f"Vulnerable Component: {str(i.component)}\n" + description += f"Vulnerable Component Version in Use: {str(i.version)}\n" + description += f"Vulnerable Component Latest Version: {str(i.latest_version)}\n" + description += f"Matching Type: {str(i.matching_type)}\n" + description += f"Object Name: {str(i.object_name)}\n" + description += f"Object Extraction Path: {str(i.object_full_path)}\n" + description += f"Object Compilation Date: {str(i.object_compilation_date)}\n" + description += f"Object SHA1: {str(i.object_sha1)}\n" + description += f"CVE: {str(i.cve)}\n" + description += f"CVE Publication Date: {str(i.cve_publication_date)}\n" + description += f"Distribution Package: {str(i.distribution_package)}\n" + description += f"Missing Exploit Mitigations: {str(i.missing_exploit_mitigations)}\n" + description += f"BDSA: {str(i.bdsa)}\n" + description += f"Summary:\n{str(i.summary)}\n" + description += f"Note Type:\n{str(i.note_type)}\n" + description += f"Note Reason:\n{str(i.note_reason)}\n" + description += f"Triage Vectors:\n{str(i.triage_vectors)}\n" + description += f"Unresolving Triage Vectors:\n{str(i.triage_vectors)}\n" return description def format_mitigation(self, i): - mitigation = "Upgrade {} to latest version: {}.\n".format( - str(i.component), - str(i.latest_version) - ) + mitigation = f"Upgrade {str(i.component)} to latest version: {str(i.latest_version)}.\n" return mitigation @@ -171,7 +150,7 @@ def format_impact(self, i): return impact def format_references(self, i): - references = "BDSA: {}\n".format(str(i.bdsa)) - references += "NIST CVE Details: {}\n".format(str(i.vulnerability_url)) + references = f"BDSA: {str(i.bdsa)}\n" + references += f"NIST CVE Details: {str(i.vulnerability_url)}\n" return references diff --git a/dojo/tools/blackduck_component_risk/importer.py b/dojo/tools/blackduck_component_risk/importer.py index c1c26d8dc4e..da1f8c53cc7 100644 --- a/dojo/tools/blackduck_component_risk/importer.py +++ b/dojo/tools/blackduck_component_risk/importer.py @@ -7,7 +7,7 @@ logger = logging.getLogger(__name__) -class BlackduckCRImporter(object): +class BlackduckCRImporter: """ Importer for blackduck. V3 is different in that it creates a Finding in defect dojo for each vulnerable component version used in a project, for each license that is diff --git a/dojo/tools/blackduck_component_risk/parser.py b/dojo/tools/blackduck_component_risk/parser.py index 644d525bcd5..7f6916962b0 100644 --- a/dojo/tools/blackduck_component_risk/parser.py +++ b/dojo/tools/blackduck_component_risk/parser.py @@ -3,7 +3,7 @@ from dojo.models import Finding -class BlackduckComponentRiskParser(object): +class BlackduckComponentRiskParser: """ Can import as exported from Blackduck: - from a zip file containing a security.csv, sources.csv and components.csv diff --git a/dojo/tools/brakeman/parser.py b/dojo/tools/brakeman/parser.py index 77e32603f1a..50d130a13fb 100644 --- a/dojo/tools/brakeman/parser.py +++ b/dojo/tools/brakeman/parser.py @@ -7,7 +7,7 @@ from dojo.models import Finding -class BrakemanParser(object): +class BrakemanParser: def get_scan_types(self): return ["Brakeman Scan"] diff --git a/dojo/tools/bugcrowd/parser.py b/dojo/tools/bugcrowd/parser.py index 941b55fd694..1414e711a1b 100644 --- a/dojo/tools/bugcrowd/parser.py +++ b/dojo/tools/bugcrowd/parser.py @@ -6,7 +6,7 @@ from dojo.models import Endpoint, Finding -class BugCrowdParser(object): +class BugCrowdParser: def get_scan_types(self): return ["BugCrowd Scan"] diff --git a/dojo/tools/bundler_audit/parser.py b/dojo/tools/bundler_audit/parser.py index 8d567af2ec5..a098e6e3e63 100644 --- a/dojo/tools/bundler_audit/parser.py +++ b/dojo/tools/bundler_audit/parser.py @@ -6,7 +6,7 @@ from dojo.models import Finding -class BundlerAuditParser(object): +class BundlerAuditParser: def get_scan_types(self): return ["Bundler-Audit Scan"] diff --git a/dojo/tools/burp/parser.py b/dojo/tools/burp/parser.py index bd599598672..82b24118d7b 100755 --- a/dojo/tools/burp/parser.py +++ b/dojo/tools/burp/parser.py @@ -8,7 +8,7 @@ logger = logging.getLogger(__name__) -class BurpParser(object): +class BurpParser: """ The objective of this class is to parse an xml file generated by the burp tool. diff --git a/dojo/tools/burp_api/parser.py b/dojo/tools/burp_api/parser.py index 7d62f9b637a..01f30faffba 100644 --- a/dojo/tools/burp_api/parser.py +++ b/dojo/tools/burp_api/parser.py @@ -15,7 +15,7 @@ """ -class BurpApiParser(object): +class BurpApiParser: """Parser that can load data from Burp API""" def get_scan_types(self): diff --git a/dojo/tools/burp_dastardly/parser.py b/dojo/tools/burp_dastardly/parser.py index e546c83978c..70ee436a813 100755 --- a/dojo/tools/burp_dastardly/parser.py +++ b/dojo/tools/burp_dastardly/parser.py @@ -5,7 +5,7 @@ logger = logging.getLogger(__name__) -class BurpDastardlyParser(object): +class BurpDastardlyParser: def get_scan_types(self): return ["Burp Dastardly Scan"] diff --git a/dojo/tools/burp_enterprise/parser.py b/dojo/tools/burp_enterprise/parser.py index b80e0c54b7d..b54603ea995 100644 --- a/dojo/tools/burp_enterprise/parser.py +++ b/dojo/tools/burp_enterprise/parser.py @@ -8,7 +8,7 @@ logger = logging.getLogger(__name__) -class BurpEnterpriseParser(object): +class BurpEnterpriseParser: def get_scan_types(self): return ["Burp Enterprise Scan"] diff --git a/dojo/tools/burp_graphql/parser.py b/dojo/tools/burp_graphql/parser.py index 90d91c640c3..34ebfbdbdf6 100644 --- a/dojo/tools/burp_graphql/parser.py +++ b/dojo/tools/burp_graphql/parser.py @@ -9,7 +9,7 @@ logger = logging.getLogger(__name__) -class BurpGraphQLParser(object): +class BurpGraphQLParser: def get_scan_types(self): return ["Burp GraphQL API"] diff --git a/dojo/tools/cargo_audit/parser.py b/dojo/tools/cargo_audit/parser.py index fddf3be36a0..ccff968cc68 100644 --- a/dojo/tools/cargo_audit/parser.py +++ b/dojo/tools/cargo_audit/parser.py @@ -3,7 +3,7 @@ from dojo.models import Finding -class CargoAuditParser(object): +class CargoAuditParser: """ A class that can be used to parse the cargo audit JSON report file """ diff --git a/dojo/tools/checkmarx/parser.py b/dojo/tools/checkmarx/parser.py index 1c2a7220cef..3f67f97a51f 100755 --- a/dojo/tools/checkmarx/parser.py +++ b/dojo/tools/checkmarx/parser.py @@ -11,7 +11,7 @@ logger = logging.getLogger(__name__) -class CheckmarxParser(object): +class CheckmarxParser: def get_scan_types(self): return ["Checkmarx Scan", "Checkmarx Scan detailed"] @@ -68,23 +68,17 @@ def _get_findings_xml(self, filename, test): for result in query.findall("Result"): if categories is not None: - findingdetail = "{}**Category:** {}\n".format( - findingdetail, categories - ) + findingdetail = f"{findingdetail}**Category:** {categories}\n" if language is not None: - findingdetail = "{}**Language:** {}\n".format( - findingdetail, language - ) + findingdetail = f"{findingdetail}**Language:** {language}\n" if language not in language_list: language_list[language] = 1 else: language_list[language] = language_list[language] + 1 if group is not None: - findingdetail = "{}**Group:** {}\n".format( - findingdetail, group - ) + findingdetail = f"{findingdetail}**Group:** {group}\n" if result.get("Status") is not None: findingdetail = "{}**Status:** {}\n".format( @@ -94,9 +88,7 @@ def _get_findings_xml(self, filename, test): deeplink = "[{}]({})".format( result.get("DeepLink"), result.get("DeepLink") ) - findingdetail = "{}**Finding Link:** {}\n".format( - findingdetail, deeplink - ) + findingdetail = f"{findingdetail}**Finding Link:** {deeplink}\n" if self.mode == "detailed": self._process_result_detailed( @@ -154,7 +146,7 @@ def _process_result_file_name_aggregated( title = titleStart false_p = result.get("FalsePositive") sev = result.get("Severity") - aggregateKeys = "{}{}{}".format(cwe, sev, sinkFilename) + aggregateKeys = f"{cwe}{sev}{sinkFilename}" state = result.get("state") active = self.isActive(state) verified = self.isVerified(state) @@ -190,16 +182,8 @@ def _process_result_file_name_aggregated( find = dupes[aggregateKeys] find.nb_occurences = find.nb_occurences + 1 if find.nb_occurences == 2: - find.description = "### 1. {}\n{}".format( - find.title, find.description - ) - find.description = "{}\n\n-----\n### {}. {}\n{}\n{}".format( - find.description, - find.nb_occurences, - title, - findingdetail, - description, - ) + find.description = f"### 1. {find.title}\n{find.description}" + find.description = f"{find.description}\n\n-----\n### {find.nb_occurences}. {title}\n{findingdetail}\n{description}" if queryId not in vuln_ids_from_tool[aggregateKeys]: vuln_ids_from_tool[aggregateKeys].append(queryId) # If at least one of the findings in the aggregate is exploitable, @@ -236,12 +220,8 @@ def get_description_file_name_aggregated(self, query, result): sinkFilename, sinkLineNumber, sinkObject = self.get_pathnode_elements( pathnode ) - description = "Source file: {} (line {})\nSource object: {}".format( - sourceFilename, sourceLineNumber, sourceObject - ) - description = "{}\nSink file: {} (line {})\nSink object: {}".format( - description, sinkFilename, sinkLineNumber, sinkObject - ) + description = f"Source file: {sourceFilename} (line {sourceLineNumber})\nSource object: {sourceObject}" + description = f"{description}\nSink file: {sinkFilename} (line {sinkLineNumber})\nSink object: {sinkObject}" return description, pathnode def _process_result_detailed( @@ -273,7 +253,7 @@ def _process_result_detailed( similarityId = str(path.get("SimilarityId")) path_id = str(path.get("PathId")) pathId = similarityId + path_id - findingdetail = "{}-----\n".format(findingdetail) + findingdetail = f"{findingdetail}-----\n" # Loop over function calls / assignments in the data flow graph for pathnode in path.findall("PathNode"): findingdetail = self.get_description_detailed( @@ -294,9 +274,7 @@ def _process_result_detailed( ) = self.get_pathnode_elements(pathnode) # pathId is the unique id from tool which means that there is # basically no aggregation except real duplicates - aggregateKeys = "{}{}{}{}{}".format( - categories, cwe, name, sinkFilename, pathId - ) + aggregateKeys = f"{categories}{cwe}{name}{sinkFilename}{pathId}" if title and sinkFilename: title = "{} ({})".format(title, sinkFilename.split("/")[-1]) @@ -355,7 +333,7 @@ def get_description_detailed(self, pathnode, findingdetail): codefragment.find("Code").text.strip(), ) - findingdetail = "{}-----\n".format(findingdetail) + findingdetail = f"{findingdetail}-----\n" return findingdetail # Get name, cwe and categories from the global query tag (1 query = 1 type diff --git a/dojo/tools/checkmarx_one/parser.py b/dojo/tools/checkmarx_one/parser.py index 699ac64e42a..8769a2220fc 100644 --- a/dojo/tools/checkmarx_one/parser.py +++ b/dojo/tools/checkmarx_one/parser.py @@ -4,7 +4,7 @@ from dojo.models import Finding -class CheckmarxOneParser(object): +class CheckmarxOneParser: def get_scan_types(self): return ["Checkmarx One Scan"] diff --git a/dojo/tools/checkmarx_osa/parser.py b/dojo/tools/checkmarx_osa/parser.py index 30ae18e0f45..c61ce2f8689 100644 --- a/dojo/tools/checkmarx_osa/parser.py +++ b/dojo/tools/checkmarx_osa/parser.py @@ -6,7 +6,7 @@ logger = logging.getLogger(__name__) -class CheckmarxOsaParser(object): +class CheckmarxOsaParser: def get_scan_types(self): return ["Checkmarx OSA"] @@ -51,7 +51,7 @@ def get_findings(self, filehandle, test): status = item["state"]["name"] vulnerability_id = item.get("cveName", "NC") finding_item = Finding( - title="{0} {1} | {2}".format( + title="{} {} | {}".format( library["name"], library["version"], vulnerability_id ), severity=item["severity"]["name"], diff --git a/dojo/tools/checkov/parser.py b/dojo/tools/checkov/parser.py index c98e94537fa..ad4878d389e 100644 --- a/dojo/tools/checkov/parser.py +++ b/dojo/tools/checkov/parser.py @@ -3,7 +3,7 @@ from dojo.models import Finding -class CheckovParser(object): +class CheckovParser: def get_scan_types(self): return ["Checkov Scan"] diff --git a/dojo/tools/chefinspect/parser.py b/dojo/tools/chefinspect/parser.py index adf85eb5eaa..30feaa586e3 100644 --- a/dojo/tools/chefinspect/parser.py +++ b/dojo/tools/chefinspect/parser.py @@ -2,7 +2,7 @@ from dojo.models import Finding -class ChefInspectParser(object): +class ChefInspectParser: def get_scan_types(self): return ["Chef Inspect Log"] diff --git a/dojo/tools/clair/clair_parser.py b/dojo/tools/clair/clair_parser.py index 55fc94ad392..381a1b97ede 100644 --- a/dojo/tools/clair/clair_parser.py +++ b/dojo/tools/clair/clair_parser.py @@ -3,7 +3,7 @@ logger = logging.getLogger(__name__) -class ClairScan(object): +class ClairScan: def get_items_clair(self, tree, test): items = {} for node in tree: diff --git a/dojo/tools/clair/clairklar_parser.py b/dojo/tools/clair/clairklar_parser.py index 5a24dbb05f9..263c18872fa 100644 --- a/dojo/tools/clair/clairklar_parser.py +++ b/dojo/tools/clair/clairklar_parser.py @@ -3,7 +3,7 @@ logger = logging.getLogger(__name__) -class ClairKlarScan(object): +class ClairKlarScan: def get_items_klar(self, tree, test): items = list() clair_severities = [ diff --git a/dojo/tools/clair/parser.py b/dojo/tools/clair/parser.py index b0701de287f..269bbcf5a05 100644 --- a/dojo/tools/clair/parser.py +++ b/dojo/tools/clair/parser.py @@ -3,7 +3,7 @@ from dojo.tools.clair.clairklar_parser import ClairKlarScan -class ClairParser(object): +class ClairParser: def get_scan_types(self): return ["Clair Scan"] diff --git a/dojo/tools/cloudsploit/parser.py b/dojo/tools/cloudsploit/parser.py index 38e518fc6ed..b7b7d346e1c 100644 --- a/dojo/tools/cloudsploit/parser.py +++ b/dojo/tools/cloudsploit/parser.py @@ -8,7 +8,7 @@ # from urllib.parse import urlparse -class CloudsploitParser(object): +class CloudsploitParser: """ AquaSecurity CloudSploit https://github.com/aquasecurity/cloudsploit """ diff --git a/dojo/tools/cobalt/parser.py b/dojo/tools/cobalt/parser.py index 172982dd67d..11592d2ab9c 100644 --- a/dojo/tools/cobalt/parser.py +++ b/dojo/tools/cobalt/parser.py @@ -7,7 +7,7 @@ __author__ = "dr3dd589" -class CobaltParser(object): +class CobaltParser: def get_scan_types(self): return ["Cobalt.io Scan"] diff --git a/dojo/tools/codechecker/parser.py b/dojo/tools/codechecker/parser.py index 4866145c02e..f73302e7da0 100644 --- a/dojo/tools/codechecker/parser.py +++ b/dojo/tools/codechecker/parser.py @@ -3,7 +3,7 @@ from dojo.models import Finding -class CodeCheckerParser(object): +class CodeCheckerParser: def get_scan_types(self): return ["Codechecker Report native"] @@ -48,7 +48,7 @@ def get_item(vuln): if "type" in vuln: vuln_type = vuln.get("type", "None") if vuln_type != "None": - description += "Type: {}\n".format(vuln_type) + description += f"Type: {vuln_type}\n" if "message" in vuln: description += "{}\n".format(vuln["message"]) @@ -57,15 +57,13 @@ def get_item(vuln): file_path = location["path"] if "path" in location else None if file_path: - description += "File path: {}\n".format(file_path) + description += f"File path: {file_path}\n" line = vuln["line"] if "line" in vuln else None column = vuln["column"] if "column" in vuln else None if line is not None and column is not None: - description += "Location in file: line {}, column {}\n".format( - line, column - ) + description += f"Location in file: line {line}, column {column}\n" sast_source_line = line diff --git a/dojo/tools/contrast/parser.py b/dojo/tools/contrast/parser.py index f689b392dac..e5352606581 100644 --- a/dojo/tools/contrast/parser.py +++ b/dojo/tools/contrast/parser.py @@ -7,7 +7,7 @@ from dojo.models import Endpoint, Finding -class ContrastParser(object): +class ContrastParser: """Contrast Scanner CSV Report""" def get_scan_types(self): @@ -80,7 +80,7 @@ def get_findings(self, filename, test): ) dupe_key = hashlib.sha256( - f"{finding.vuln_id_from_tool}".encode("utf-8") + f"{finding.vuln_id_from_tool}".encode() ).digest() if dupe_key in dupes: diff --git a/dojo/tools/coverity_api/parser.py b/dojo/tools/coverity_api/parser.py index e25f819a8df..38c361e882b 100644 --- a/dojo/tools/coverity_api/parser.py +++ b/dojo/tools/coverity_api/parser.py @@ -4,7 +4,7 @@ from dojo.models import Finding -class CoverityApiParser(object): +class CoverityApiParser: """Parser that can load data from Synopsys Coverity API""" def get_scan_types(self): diff --git a/dojo/tools/crashtest_security/parser.py b/dojo/tools/crashtest_security/parser.py index 0ac2b37c0b4..efe086ed4f1 100755 --- a/dojo/tools/crashtest_security/parser.py +++ b/dojo/tools/crashtest_security/parser.py @@ -8,7 +8,7 @@ from dojo.models import Finding -class CrashtestSecurityJsonParser(object): +class CrashtestSecurityJsonParser: """ The objective of this class is to parse a json file generated by the crashtest security suite. @@ -141,7 +141,7 @@ def get_severity(self, cvss_base_score): return "Critical" -class CrashtestSecurityXmlParser(object): +class CrashtestSecurityXmlParser: """ The objective of this class is to parse an xml file generated by the crashtest security suite. @@ -224,7 +224,7 @@ def get_items(self, tree, test): return items -class CrashtestSecurityParser(object): +class CrashtestSecurityParser: """SSLYze support JSON and XML""" def get_scan_types(self): diff --git a/dojo/tools/cred_scan/parser.py b/dojo/tools/cred_scan/parser.py index 2a2e616f44d..9a8ab21e5df 100644 --- a/dojo/tools/cred_scan/parser.py +++ b/dojo/tools/cred_scan/parser.py @@ -4,7 +4,7 @@ from dojo.models import Finding -class CredScanParser(object): +class CredScanParser: """ Credential Scanner (aka CredScan) is a tool developed and maintained by Microsoft to identify credential leaks such as those in source code and diff --git a/dojo/tools/crunch42/parser.py b/dojo/tools/crunch42/parser.py index e1a841e29a3..fade8ddb42e 100644 --- a/dojo/tools/crunch42/parser.py +++ b/dojo/tools/crunch42/parser.py @@ -2,7 +2,7 @@ from dojo.models import Finding -class Crunch42Parser(object): +class Crunch42Parser: def get_scan_types(self): return ["Crunch42 Scan"] diff --git a/dojo/tools/cyclonedx/helpers.py b/dojo/tools/cyclonedx/helpers.py index ed64843c7d2..98a76bfe105 100644 --- a/dojo/tools/cyclonedx/helpers.py +++ b/dojo/tools/cyclonedx/helpers.py @@ -3,7 +3,7 @@ LOGGER = logging.getLogger(__name__) -class Cyclonedxhelper(object): +class Cyclonedxhelper: def _get_cvssv3(self, raw_vector): if raw_vector is None or "" == raw_vector: return None diff --git a/dojo/tools/cyclonedx/json_parser.py b/dojo/tools/cyclonedx/json_parser.py index 1c4b0490b49..9ee31275fc9 100644 --- a/dojo/tools/cyclonedx/json_parser.py +++ b/dojo/tools/cyclonedx/json_parser.py @@ -6,7 +6,7 @@ LOGGER = logging.getLogger(__name__) -class CycloneDXJSONParser(object): +class CycloneDXJSONParser: def _get_findings_json(self, file, test): """Load a CycloneDX file in JSON format""" data = json.load(file) @@ -126,9 +126,7 @@ def _get_findings_json(self, file, test): if detail: finding.mitigation = ( finding.mitigation - + "\n**This vulnerability is mitigated and/or suppressed:** {}\n".format( - detail - ) + + f"\n**This vulnerability is mitigated and/or suppressed:** {detail}\n" ) findings.append(finding) return findings diff --git a/dojo/tools/cyclonedx/parser.py b/dojo/tools/cyclonedx/parser.py index dfb01b8a38e..8fe80a51136 100644 --- a/dojo/tools/cyclonedx/parser.py +++ b/dojo/tools/cyclonedx/parser.py @@ -2,7 +2,7 @@ from dojo.tools.cyclonedx.xml_parser import CycloneDXXMLParser -class CycloneDXParser(object): +class CycloneDXParser: """CycloneDX is a lightweight software bill of materials (SBOM) standard designed for use in application security contexts and supply chain component analysis. https://www.cyclonedx.org/ diff --git a/dojo/tools/cyclonedx/xml_parser.py b/dojo/tools/cyclonedx/xml_parser.py index 5e0bda3985b..517dbc54780 100644 --- a/dojo/tools/cyclonedx/xml_parser.py +++ b/dojo/tools/cyclonedx/xml_parser.py @@ -7,7 +7,7 @@ LOGGER = logging.getLogger(__name__) -class CycloneDXXMLParser(object): +class CycloneDXXMLParser: def _get_findings_xml(self, file, test): nscan = ElementTree.parse(file) root = nscan.getroot() @@ -294,9 +294,7 @@ def _manage_vulnerability_xml( if detail: finding.mitigation = ( finding.mitigation - + "\n**This vulnerability is mitigated and/or suppressed:** {}\n".format( - detail - ) + + f"\n**This vulnerability is mitigated and/or suppressed:** {detail}\n" ) findings.append(finding) return findings diff --git a/dojo/tools/dawnscanner/parser.py b/dojo/tools/dawnscanner/parser.py index e191d2da062..98b91d36ad9 100644 --- a/dojo/tools/dawnscanner/parser.py +++ b/dojo/tools/dawnscanner/parser.py @@ -5,7 +5,7 @@ from dojo.models import Finding -class DawnScannerParser(object): +class DawnScannerParser: CVE_REGEX = re.compile(r"CVE-\d{4}-\d{4,7}") def get_scan_types(self): diff --git a/dojo/tools/dependency_check/parser.py b/dojo/tools/dependency_check/parser.py index 89b634d13c6..01ff28e2fd9 100644 --- a/dojo/tools/dependency_check/parser.py +++ b/dojo/tools/dependency_check/parser.py @@ -13,7 +13,7 @@ logger = logging.getLogger(__name__) -class DependencyCheckParser(object): +class DependencyCheckParser: SEVERITY_MAPPING = { "info": "Info", "low": "Low", @@ -232,9 +232,7 @@ def get_finding_from_vulnerability( if component_name is None: logger.warning( - "component_name was None for File: {}, using dependency file name instead.".format( - dependency_filename - ) + f"component_name was None for File: {dependency_filename}, using dependency file name instead." ) component_name = dependency_filename @@ -296,18 +294,14 @@ def get_finding_from_vulnerability( if related_dependency is not None: tags.append("related") - if vulnerability.tag == "{}suppressedVulnerability".format(namespace): + if vulnerability.tag == f"{namespace}suppressedVulnerability": if notes is None: notes = "Document on why we are suppressing this vulnerability is missing!" tags.append("no_suppression_document") - mitigation = "**This vulnerability is mitigated and/or suppressed:** {}\n".format( - notes - ) + mitigation = f"**This vulnerability is mitigated and/or suppressed:** {notes}\n" mitigation = ( mitigation - + "Update {}:{} to at least the version recommended in the description".format( - component_name, component_version - ) + + f"Update {component_name}:{component_version} to at least the version recommended in the description" ) mitigated = datetime.utcnow() is_Mitigated = True @@ -315,9 +309,7 @@ def get_finding_from_vulnerability( tags.append("suppressed") else: - mitigation = "Update {}:{} to at least the version recommended in the description".format( - component_name, component_version - ) + mitigation = f"Update {component_name}:{component_version} to at least the version recommended in the description" description += "\n**Filepath:** " + str(dependency_filepath) active = True diff --git a/dojo/tools/dependency_track/parser.py b/dojo/tools/dependency_track/parser.py index 965e3e32362..c4e3dad351d 100644 --- a/dojo/tools/dependency_track/parser.py +++ b/dojo/tools/dependency_track/parser.py @@ -6,7 +6,7 @@ logger = logging.getLogger(__name__) -class DependencyTrackParser(object): +class DependencyTrackParser: """ A class that can be used to parse the JSON Finding Packaging Format (FPF) export from OWASP Dependency Track. @@ -138,8 +138,7 @@ def _convert_dependency_track_finding_to_dojo_finding(self, dependency_track_fin else: version_description = '' - title = "{component_name}:{version_description} affected by: {vuln_id} ({source})"\ - .format(vuln_id=vuln_id, source=source, version_description=version_description, component_name=component_name) + title = f"{component_name}:{version_description} affected by: {vuln_id} ({source})" # We should collect all the vulnerability ids, the FPF format can add additional IDs as aliases # we add these aliases in the vulnerability_id list making sure duplicate findings get correctly deduplicated @@ -168,17 +167,16 @@ def _convert_dependency_track_finding_to_dojo_finding(self, dependency_track_fin # Build the description of the Dojo finding # We already know (from above) that the version information is not always present if component_version is not None: - component_description = "Version {component_version} of the {component_name} component".format(component_version=component_version, component_name=component_name) + component_description = f"Version {component_version} of the {component_name} component" else: - component_description = "The {component_name} component".format(component_name=component_name) + component_description = f"The {component_name} component" vulnerability_description = "You are using a component with a known vulnerability. " \ - "{component_description} is affected by the vulnerability with an id of {vuln_id} as " \ - "identified by {source}." \ - .format(component_description=component_description, vuln_id=vuln_id, source=source) + f"{component_description} is affected by the vulnerability with an id of {vuln_id} as " \ + f"identified by {source}." # Append purl info if it is present if 'purl' in dependency_track_finding['component'] and dependency_track_finding['component']['purl'] is not None: component_purl = dependency_track_finding['component']['purl'] - vulnerability_description = vulnerability_description + "\nThe purl of the affected component is: {purl}.".format(purl=component_purl) + vulnerability_description = vulnerability_description + f"\nThe purl of the affected component is: {component_purl}." # there is no file_path in the report, but defect dojo needs it otherwise it skips deduplication: # see https://github.com/DefectDojo/django-DefectDojo/issues/3647 # might be no longer needed in the future, and is not needed if people use the default diff --git a/dojo/tools/detect_secrets/parser.py b/dojo/tools/detect_secrets/parser.py index 0da274ba9f7..3e450a02a1c 100644 --- a/dojo/tools/detect_secrets/parser.py +++ b/dojo/tools/detect_secrets/parser.py @@ -4,7 +4,7 @@ from dojo.models import Finding -class DetectSecretsParser(object): +class DetectSecretsParser: """ A class that can be used to parse the detect-secrets JSON report file """ diff --git a/dojo/tools/dockerbench/parser.py b/dojo/tools/dockerbench/parser.py index 870c3bc31be..a00db912d2c 100644 --- a/dojo/tools/dockerbench/parser.py +++ b/dojo/tools/dockerbench/parser.py @@ -4,7 +4,7 @@ from datetime import datetime -class DockerBenchParser(object): +class DockerBenchParser: def get_scan_types(self): return ["docker-bench-security Scan"] @@ -90,7 +90,7 @@ def get_item(vuln, test, test_start, test_end, description): description += unique_id_from_tool if reason: description += "\n" - description += "desc: {}\n".format(reason) + description += f"desc: {reason}\n" if vuln.get("details"): description += "\n" description += vuln["details"] diff --git a/dojo/tools/dockle/parser.py b/dojo/tools/dockle/parser.py index 5c07472bedd..e2d0be9256f 100644 --- a/dojo/tools/dockle/parser.py +++ b/dojo/tools/dockle/parser.py @@ -3,7 +3,7 @@ from dojo.models import Finding -class DockleParser(object): +class DockleParser: """ A class that can be used to parse the Dockle JSON report files """ diff --git a/dojo/tools/drheader/parser.py b/dojo/tools/drheader/parser.py index eeeed1e5e17..158da541bd3 100644 --- a/dojo/tools/drheader/parser.py +++ b/dojo/tools/drheader/parser.py @@ -3,7 +3,7 @@ from dojo.models import Endpoint, Finding -class DrHeaderParser(object): +class DrHeaderParser: def get_scan_types(self): return ["DrHeader JSON Importer"] diff --git a/dojo/tools/eslint/parser.py b/dojo/tools/eslint/parser.py index c3e2167b8c3..df8628f5330 100644 --- a/dojo/tools/eslint/parser.py +++ b/dojo/tools/eslint/parser.py @@ -3,7 +3,7 @@ from dojo.models import Finding -class ESLintParser(object): +class ESLintParser: def get_scan_types(self): return ["ESLint Scan"] @@ -37,7 +37,7 @@ def get_findings(self, filename, test): for message in item["messages"]: if message["message"] is None: - title = str("Finding Not defined") + title = "Finding Not defined" else: title = str(message["message"]) diff --git a/dojo/tools/fortify/fpr_parser.py b/dojo/tools/fortify/fpr_parser.py index de745d236d6..8110a23cb9c 100644 --- a/dojo/tools/fortify/fpr_parser.py +++ b/dojo/tools/fortify/fpr_parser.py @@ -4,7 +4,7 @@ from dojo.models import Finding -class FortifyFPRParser(object): +class FortifyFPRParser: def parse_fpr(self, filename, test): if str(filename.__class__) == "": input_zip = zipfile.ZipFile(filename.name, 'r') diff --git a/dojo/tools/fortify/parser.py b/dojo/tools/fortify/parser.py index 7ef6a07f859..3eef4071d55 100644 --- a/dojo/tools/fortify/parser.py +++ b/dojo/tools/fortify/parser.py @@ -2,7 +2,7 @@ from dojo.tools.fortify.fpr_parser import FortifyFPRParser -class FortifyParser(object): +class FortifyParser: def get_scan_types(self): return ["Fortify Scan"] diff --git a/dojo/tools/fortify/xml_parser.py b/dojo/tools/fortify/xml_parser.py index 80b54f9f850..a6f1d11d4ae 100644 --- a/dojo/tools/fortify/xml_parser.py +++ b/dojo/tools/fortify/xml_parser.py @@ -2,7 +2,7 @@ from dojo.models import Finding -class FortifyXMLParser(object): +class FortifyXMLParser: def parse_xml(self, filename, test): fortify_scan = ElementTree.parse(filename) root = fortify_scan.getroot() @@ -120,7 +120,7 @@ def format_description(self, issue, meta_info) -> str: ) ) if explanation: - desc += "##Explanation:\n {}".format(explanation) + desc += f"##Explanation:\n {explanation}" return desc def format_title(self, category, filename, line_no): @@ -131,7 +131,7 @@ def format_title(self, category, filename, line_no): :param line_no: Line number of offending line :return: str """ - return "{} - {}: {}".format(category, filename, line_no) + return f"{category} - {filename}: {line_no}" def format_mitigation(self, issue, meta_info) -> str: """ @@ -144,8 +144,8 @@ def format_mitigation(self, issue, meta_info) -> str: mitigation = "" recommendation = meta_info[issue["Category"]].get("Recommendations") if recommendation: - mitigation += "###Recommendation:\n {}\n".format(recommendation) + mitigation += f"###Recommendation:\n {recommendation}\n" tips = meta_info[issue["Category"]].get("Tips") if tips: - mitigation += "###Tips:\n {}".format(tips) + mitigation += f"###Tips:\n {tips}" return mitigation diff --git a/dojo/tools/gcloud_artifact_scan/parser.py b/dojo/tools/gcloud_artifact_scan/parser.py index 9785d12d678..833d0b21e44 100644 --- a/dojo/tools/gcloud_artifact_scan/parser.py +++ b/dojo/tools/gcloud_artifact_scan/parser.py @@ -2,7 +2,7 @@ from dojo.models import Finding -class GCloudArtifactScanParser(object): +class GCloudArtifactScanParser: def get_scan_types(self): return ["Google Cloud Artifact Vulnerability Scan"] diff --git a/dojo/tools/generic/parser.py b/dojo/tools/generic/parser.py index 03144e71d00..07d1d5144d8 100644 --- a/dojo/tools/generic/parser.py +++ b/dojo/tools/generic/parser.py @@ -9,7 +9,7 @@ from dojo.tools.parser_test import ParserTest -class GenericParser(object): +class GenericParser: ID = "Generic Findings Import" def get_scan_types(self): @@ -232,7 +232,7 @@ def _get_findings_csv(self, filename): # manage internal de-duplication key = hashlib.sha256( - f"{finding.severity}|{finding.title}|{finding.description}".encode("utf-8") + f"{finding.severity}|{finding.title}|{finding.description}".encode() ).hexdigest() if key in dupes: find = dupes[key] diff --git a/dojo/tools/ggshield/parser.py b/dojo/tools/ggshield/parser.py index 3d6373c87e9..a5b362f00b0 100755 --- a/dojo/tools/ggshield/parser.py +++ b/dojo/tools/ggshield/parser.py @@ -4,7 +4,7 @@ from dateutil import parser -class GgshieldParser(object): +class GgshieldParser: """ A class that can be used to parse the Gitleaks JSON report files """ diff --git a/dojo/tools/github_vulnerability/parser.py b/dojo/tools/github_vulnerability/parser.py index ac44d815e86..da848f1b7c3 100644 --- a/dojo/tools/github_vulnerability/parser.py +++ b/dojo/tools/github_vulnerability/parser.py @@ -5,7 +5,7 @@ from dojo.models import Finding -class GithubVulnerabilityParser(object): +class GithubVulnerabilityParser: def get_scan_types(self): return ["Github Vulnerability Scan"] @@ -42,7 +42,7 @@ def get_findings(self, filename, test): + "/security/dependabot/{}".format(alert["number"]) ) description = ( - "[{}]({})\n".format(dependabot_url, dependabot_url) + f"[{dependabot_url}]({dependabot_url})\n" + description ) finding = Finding( diff --git a/dojo/tools/gitlab_api_fuzzing/parser.py b/dojo/tools/gitlab_api_fuzzing/parser.py index 270abdc0536..a0992b0e516 100644 --- a/dojo/tools/gitlab_api_fuzzing/parser.py +++ b/dojo/tools/gitlab_api_fuzzing/parser.py @@ -3,7 +3,7 @@ from dojo.models import Finding -class GitlabAPIFuzzingParser(object): +class GitlabAPIFuzzingParser: """ GitLab API Fuzzing Report diff --git a/dojo/tools/gitlab_container_scan/parser.py b/dojo/tools/gitlab_container_scan/parser.py index 0912d2fd2e6..65ceb27f281 100644 --- a/dojo/tools/gitlab_container_scan/parser.py +++ b/dojo/tools/gitlab_container_scan/parser.py @@ -5,7 +5,7 @@ from dojo.models import Finding -class GitlabContainerScanParser(object): +class GitlabContainerScanParser: """ GitLab's container scanning report See more: https://gitlab.com/gitlab-org/security-products/security-report-schemas/-/blob/master/dist/container-scanning-report-format.json diff --git a/dojo/tools/gitlab_dast/parser.py b/dojo/tools/gitlab_dast/parser.py index 5627a0a06ef..0c6eab1dbe2 100644 --- a/dojo/tools/gitlab_dast/parser.py +++ b/dojo/tools/gitlab_dast/parser.py @@ -4,7 +4,7 @@ from dojo.models import Finding, Endpoint -class GitlabDastParser(object): +class GitlabDastParser: """ Import GitLab DAST Report in JSON format """ diff --git a/dojo/tools/gitlab_dep_scan/parser.py b/dojo/tools/gitlab_dep_scan/parser.py index 16692e88199..3928d82d1d7 100644 --- a/dojo/tools/gitlab_dep_scan/parser.py +++ b/dojo/tools/gitlab_dep_scan/parser.py @@ -3,7 +3,7 @@ from dojo.models import Finding -class GitlabDepScanParser(object): +class GitlabDepScanParser: def get_scan_types(self): return ["GitLab Dependency Scanning Report"] diff --git a/dojo/tools/gitlab_sast/parser.py b/dojo/tools/gitlab_sast/parser.py index 91fec1e1451..b00a04a5e63 100644 --- a/dojo/tools/gitlab_sast/parser.py +++ b/dojo/tools/gitlab_sast/parser.py @@ -4,7 +4,7 @@ from dojo.tools.parser_test import ParserTest -class GitlabSastParser(object): +class GitlabSastParser: def get_scan_types(self): return ["GitLab SAST Report"] diff --git a/dojo/tools/gitlab_secret_detection_report/parser.py b/dojo/tools/gitlab_secret_detection_report/parser.py index f6e89adb844..ce72f04bf92 100644 --- a/dojo/tools/gitlab_secret_detection_report/parser.py +++ b/dojo/tools/gitlab_secret_detection_report/parser.py @@ -3,7 +3,7 @@ from dojo.models import Finding -class GitlabSecretDetectionReportParser(object): +class GitlabSecretDetectionReportParser: """ GitLab's secret detection report See more: https://gitlab.com/gitlab-org/security-products/security-report-schemas/-/blob/master/dist/secret-detection-report-format.json diff --git a/dojo/tools/gitleaks/parser.py b/dojo/tools/gitleaks/parser.py index 513d43dd752..ae53b192279 100644 --- a/dojo/tools/gitleaks/parser.py +++ b/dojo/tools/gitleaks/parser.py @@ -4,7 +4,7 @@ from dojo.models import Finding -class GitleaksParser(object): +class GitleaksParser: """ A class that can be used to parse the Gitleaks JSON report files """ diff --git a/dojo/tools/gosec/parser.py b/dojo/tools/gosec/parser.py index 4d3824913bc..3efcbd93850 100644 --- a/dojo/tools/gosec/parser.py +++ b/dojo/tools/gosec/parser.py @@ -3,7 +3,7 @@ from dojo.models import Finding -class GosecParser(object): +class GosecParser: def get_scan_types(self): return ["Gosec Scanner"] @@ -33,11 +33,9 @@ def get_findings(self, filename, test): title = item["details"] + " - rule " + item["rule_id"] # Finding details information - findingdetail += "Filename: {}\n\n".format(filename) - findingdetail += "Line number: {}\n\n".format(str(line)) - findingdetail += "Issue Confidence: {}\n\n".format( - scanner_confidence - ) + findingdetail += f"Filename: {filename}\n\n" + findingdetail += f"Line number: {str(line)}\n\n" + findingdetail += f"Issue Confidence: {scanner_confidence}\n\n" findingdetail += "Code:\n\n" findingdetail += "```{}```".format(item["code"]) diff --git a/dojo/tools/h1/parser.py b/dojo/tools/h1/parser.py index 8d3409799f9..48c367684fa 100644 --- a/dojo/tools/h1/parser.py +++ b/dojo/tools/h1/parser.py @@ -7,7 +7,7 @@ __author__ = "Kirill Gotsman" -class H1Parser(object): +class H1Parser: """ A class that can be used to parse the Get All Reports JSON export from HackerOne API. """ @@ -53,9 +53,7 @@ def get_findings(self, file, test): issue_tracker_url = content["attributes"][ "issue_tracker_reference_url" ] - references = "[{}]({})\n".format( - issue_tracker_id, issue_tracker_url - ) + references = f"[{issue_tracker_id}]({issue_tracker_url})\n" except Exception: references = "" @@ -72,7 +70,7 @@ def get_findings(self, file, test): ref_link = "https://hackerone.com/reports/{}".format( content.get("id") ) - references += "[{}]({})".format(ref_link, ref_link) + references += f"[{ref_link}]({ref_link})" # Set active state of the Dojo finding if content["attributes"]["state"] in ["triaged", "new"]: @@ -132,7 +130,7 @@ def build_description(self, content): # Build the description of the Dojo finding description = "#" + content["attributes"]["title"] - description += "\nSubmitted: {}\nBy: {}\n".format(date, reporter) + description += f"\nSubmitted: {date}\nBy: {reporter}\n" # Add triaged date if triaged_date is not None: @@ -140,14 +138,14 @@ def build_description(self, content): datetime.strptime(triaged_date, "%Y-%m-%dT%H:%M:%S.%fZ"), "%Y-%m-%d", ) - description += "Triaged: {}\n".format(triaged_date) + description += f"Triaged: {triaged_date}\n" # Try to grab CVSS try: cvss = content["relationships"]["severity"]["data"]["attributes"][ "score" ] - description += "CVSS: {}\n".format(cvss) + description += f"CVSS: {cvss}\n" except Exception: pass @@ -164,9 +162,7 @@ def build_description(self, content): weakness_desc = content["relationships"]["weakness"]["data"][ "attributes" ]["description"] - description += "\n##Weakness: {}\n{}".format( - weakness_title, weakness_desc - ) + description += f"\n##Weakness: {weakness_title}\n{weakness_desc}" except Exception: pass diff --git a/dojo/tools/hadolint/parser.py b/dojo/tools/hadolint/parser.py index 9e907160fc5..4624dcbf994 100644 --- a/dojo/tools/hadolint/parser.py +++ b/dojo/tools/hadolint/parser.py @@ -3,7 +3,7 @@ from dojo.models import Finding -class HadolintParser(object): +class HadolintParser: def get_scan_types(self): return ["Hadolint Dockerfile check"] diff --git a/dojo/tools/harbor_vulnerability/parser.py b/dojo/tools/harbor_vulnerability/parser.py index 7f5d2b88986..5d7db07ed18 100644 --- a/dojo/tools/harbor_vulnerability/parser.py +++ b/dojo/tools/harbor_vulnerability/parser.py @@ -3,7 +3,7 @@ from dojo.models import Finding -class HarborVulnerabilityParser(object): +class HarborVulnerabilityParser: """ Read JSON data from Harbor compatible format and import it to DefectDojo """ diff --git a/dojo/tools/hcl_appscan/parser.py b/dojo/tools/hcl_appscan/parser.py index 8f559acc53e..b29cc465d7a 100755 --- a/dojo/tools/hcl_appscan/parser.py +++ b/dojo/tools/hcl_appscan/parser.py @@ -3,7 +3,7 @@ from dojo.models import Finding, Endpoint -class HCLAppScanParser(object): +class HCLAppScanParser: def get_scan_types(self): return ["HCLAppScan XML"] diff --git a/dojo/tools/horusec/parser.py b/dojo/tools/horusec/parser.py index 8eeecc1dbc6..a64fa2f091a 100644 --- a/dojo/tools/horusec/parser.py +++ b/dojo/tools/horusec/parser.py @@ -6,7 +6,7 @@ from dojo.tools.parser_test import ParserTest -class HorusecParser(object): +class HorusecParser: """Horusec (https://github.com/ZupIT/horusec)""" ID = "Horusec" diff --git a/dojo/tools/humble/parser.py b/dojo/tools/humble/parser.py index 68ec2741bd3..c99619dcfd9 100644 --- a/dojo/tools/humble/parser.py +++ b/dojo/tools/humble/parser.py @@ -2,7 +2,7 @@ from dojo.models import Finding, Endpoint -class HumbleParser(object): +class HumbleParser: """Humble (https://github.com/rfc-st/humble)""" def get_scan_types(self): diff --git a/dojo/tools/huskyci/parser.py b/dojo/tools/huskyci/parser.py index 455204bd524..be0caeeb3a0 100644 --- a/dojo/tools/huskyci/parser.py +++ b/dojo/tools/huskyci/parser.py @@ -4,7 +4,7 @@ from dojo.models import Finding -class HuskyCIParser(object): +class HuskyCIParser: """ Read JSON data from huskyCI compatible format and import it to DefectDojo """ diff --git a/dojo/tools/hydra/parser.py b/dojo/tools/hydra/parser.py index f24160ac7f6..3abb2b1c1b4 100644 --- a/dojo/tools/hydra/parser.py +++ b/dojo/tools/hydra/parser.py @@ -20,7 +20,7 @@ def __init__(self, generator): self.server = generator.get("server") -class HydraParser(object): +class HydraParser: """ Weak password findings from THC-Hydra (https://github.com/vanhauser-thc/thc-hydra) """ diff --git a/dojo/tools/ibm_app/parser.py b/dojo/tools/ibm_app/parser.py index 8e4147a2282..f4389a9c5bb 100644 --- a/dojo/tools/ibm_app/parser.py +++ b/dojo/tools/ibm_app/parser.py @@ -8,7 +8,7 @@ LOGGER = logging.getLogger(__name__) -class IbmAppParser(object): +class IbmAppParser: def get_scan_types(self): return ["IBM AppScan DAST"] diff --git a/dojo/tools/immuniweb/parser.py b/dojo/tools/immuniweb/parser.py index 6265d1f6203..9b7af871d93 100644 --- a/dojo/tools/immuniweb/parser.py +++ b/dojo/tools/immuniweb/parser.py @@ -7,7 +7,7 @@ __author__ = "properam" -class ImmuniwebParser(object): +class ImmuniwebParser: def get_scan_types(self): return ["Immuniweb Scan"] diff --git a/dojo/tools/intsights/parser.py b/dojo/tools/intsights/parser.py index 2c97225fae2..7b69598c802 100644 --- a/dojo/tools/intsights/parser.py +++ b/dojo/tools/intsights/parser.py @@ -7,7 +7,7 @@ from dojo.models import Finding -class IntSightsParser(object): +class IntSightsParser: """ IntSights Threat Intelligence Report """ diff --git a/dojo/tools/jfrog_xray_api_summary_artifact/parser.py b/dojo/tools/jfrog_xray_api_summary_artifact/parser.py index f62d3532297..09a6066d047 100644 --- a/dojo/tools/jfrog_xray_api_summary_artifact/parser.py +++ b/dojo/tools/jfrog_xray_api_summary_artifact/parser.py @@ -8,7 +8,7 @@ from dojo.models import Finding -class JFrogXrayApiSummaryArtifactParser(object): +class JFrogXrayApiSummaryArtifactParser: # This function return a list of all the scan_type supported by your parser def get_scan_types(self): return ["JFrog Xray API Summary Artifact Scan"] diff --git a/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py b/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py index b6901c289c1..0839ef5acfa 100644 --- a/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py +++ b/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py @@ -6,7 +6,7 @@ from dojo.models import Finding -class JFrogXrayOnDemandBinaryScanParser(object): +class JFrogXrayOnDemandBinaryScanParser: """jfrog_xray_scan JSON reports""" def get_scan_types(self): diff --git a/dojo/tools/jfrog_xray_unified/parser.py b/dojo/tools/jfrog_xray_unified/parser.py index 23e739101cd..c7e48897a2d 100644 --- a/dojo/tools/jfrog_xray_unified/parser.py +++ b/dojo/tools/jfrog_xray_unified/parser.py @@ -4,7 +4,7 @@ from dojo.models import Finding -class JFrogXrayUnifiedParser(object): +class JFrogXrayUnifiedParser: """JFrog Xray JSON reports""" def get_scan_types(self): @@ -131,9 +131,7 @@ def get_item(vulnerability, test): component_name=component_name, component_version=component_version, file_path=vulnerability["path"], - severity_justification="CVSS v3 base score: {}\nCVSS v2 base score: {}".format( - cvss_v3, cvss_v2 - ), + severity_justification=f"CVSS v3 base score: {cvss_v3}\nCVSS v2 base score: {cvss_v2}", static_finding=True, dynamic_finding=False, references=references, diff --git a/dojo/tools/jfrogxray/parser.py b/dojo/tools/jfrogxray/parser.py index 9f45abd6bed..5017bdb7d3e 100644 --- a/dojo/tools/jfrogxray/parser.py +++ b/dojo/tools/jfrogxray/parser.py @@ -6,7 +6,7 @@ from dojo.models import Finding -class JFrogXrayParser(object): +class JFrogXrayParser: """JFrog Xray JSON reports""" def get_scan_types(self): diff --git a/dojo/tools/kics/parser.py b/dojo/tools/kics/parser.py index 3f5a279762f..7974db2ba55 100644 --- a/dojo/tools/kics/parser.py +++ b/dojo/tools/kics/parser.py @@ -3,7 +3,7 @@ from dojo.models import Finding -class KICSParser(object): +class KICSParser: """ A class that can be used to parse the KICS JSON report file """ diff --git a/dojo/tools/kiuwan/parser.py b/dojo/tools/kiuwan/parser.py index a79c828ecda..98ba9b29529 100644 --- a/dojo/tools/kiuwan/parser.py +++ b/dojo/tools/kiuwan/parser.py @@ -25,7 +25,7 @@ def eval_column(self, column_value): self.severity = "Info" -class KiuwanParser(object): +class KiuwanParser: def get_scan_types(self): return ["Kiuwan Scan"] diff --git a/dojo/tools/kubeaudit/parser.py b/dojo/tools/kubeaudit/parser.py index 63a998f8acf..4b37d6d8ee5 100644 --- a/dojo/tools/kubeaudit/parser.py +++ b/dojo/tools/kubeaudit/parser.py @@ -2,7 +2,7 @@ from dojo.models import Finding -class KubeAuditParser(object): +class KubeAuditParser: def get_scan_types(self): return ["Kubeaudit Scan"] diff --git a/dojo/tools/kubebench/parser.py b/dojo/tools/kubebench/parser.py index a54bcaf480e..f288da95426 100644 --- a/dojo/tools/kubebench/parser.py +++ b/dojo/tools/kubebench/parser.py @@ -3,7 +3,7 @@ from dojo.models import Finding -class KubeBenchParser(object): +class KubeBenchParser: def get_scan_types(self): return ["kube-bench Scan"] diff --git a/dojo/tools/kubehunter/parser.py b/dojo/tools/kubehunter/parser.py index 95cc6cddb5c..c7688033dd9 100644 --- a/dojo/tools/kubehunter/parser.py +++ b/dojo/tools/kubehunter/parser.py @@ -3,7 +3,7 @@ from dojo.models import Finding -class KubeHunterParser(object): +class KubeHunterParser: """ kube-hunter hunts for security weaknesses in Kubernetes clusters. The tool was developed to increase awareness and visibility for security issues in Kubernetes environments. """ diff --git a/dojo/tools/kubescape/parser.py b/dojo/tools/kubescape/parser.py index 716244a3231..8f4c790a869 100644 --- a/dojo/tools/kubescape/parser.py +++ b/dojo/tools/kubescape/parser.py @@ -2,7 +2,7 @@ from dojo.models import Finding -class KubescapeParser(object): +class KubescapeParser: def get_scan_types(self): return ["Kubescape JSON Importer"] diff --git a/dojo/tools/mend/parser.py b/dojo/tools/mend/parser.py index c6d59340882..965a725d32c 100644 --- a/dojo/tools/mend/parser.py +++ b/dojo/tools/mend/parser.py @@ -9,7 +9,7 @@ logger = logging.getLogger(__name__) -class MendParser(object): +class MendParser: def get_scan_types(self): return ["Mend Scan"] diff --git a/dojo/tools/meterian/parser.py b/dojo/tools/meterian/parser.py index e47cb469011..1fee4cbc863 100644 --- a/dojo/tools/meterian/parser.py +++ b/dojo/tools/meterian/parser.py @@ -4,7 +4,7 @@ from dojo.models import Finding -class MeterianParser(object): +class MeterianParser: def get_scan_types(self): return ["Meterian Scan"] diff --git a/dojo/tools/microfocus_webinspect/parser.py b/dojo/tools/microfocus_webinspect/parser.py index 65032214c2d..c6c5ccd7a49 100644 --- a/dojo/tools/microfocus_webinspect/parser.py +++ b/dojo/tools/microfocus_webinspect/parser.py @@ -7,7 +7,7 @@ from dojo.models import Endpoint, Finding -class MicrofocusWebinspectParser(object): +class MicrofocusWebinspectParser: """Micro Focus Webinspect XML report parser""" def get_scan_types(self): @@ -82,7 +82,7 @@ def get_findings(self, file, test): # make dupe hash key dupe_key = hashlib.sha256( - f"{finding.description}|{finding.title}|{finding.severity}".encode("utf-8") + f"{finding.description}|{finding.title}|{finding.severity}".encode() ).hexdigest() # check if dupes are present. if dupe_key in dupes: diff --git a/dojo/tools/mobsf/parser.py b/dojo/tools/mobsf/parser.py index 7c11a242396..02e43944f3a 100644 --- a/dojo/tools/mobsf/parser.py +++ b/dojo/tools/mobsf/parser.py @@ -7,7 +7,7 @@ from dojo.models import Finding -class MobSFParser(object): +class MobSFParser: def get_scan_types(self): return ["MobSF Scan"] @@ -30,45 +30,45 @@ def get_findings(self, filename, test): if "name" in data: test_description = "**Info:**\n" if "packagename" in data: - test_description = "%s **Package Name:** %s\n" % (test_description, data["packagename"]) + test_description = "{} **Package Name:** {}\n".format(test_description, data["packagename"]) if "mainactivity" in data: - test_description = "%s **Main Activity:** %s\n" % (test_description, data["mainactivity"]) + test_description = "{} **Main Activity:** {}\n".format(test_description, data["mainactivity"]) if "pltfm" in data: - test_description = "%s **Platform:** %s\n" % (test_description, data["pltfm"]) + test_description = "{} **Platform:** {}\n".format(test_description, data["pltfm"]) if "sdk" in data: - test_description = "%s **SDK:** %s\n" % (test_description, data["sdk"]) + test_description = "{} **SDK:** {}\n".format(test_description, data["sdk"]) if "min" in data: - test_description = "%s **Min SDK:** %s\n" % (test_description, data["min"]) + test_description = "{} **Min SDK:** {}\n".format(test_description, data["min"]) if "targetsdk" in data: - test_description = "%s **Target SDK:** %s\n" % (test_description, data["targetsdk"]) + test_description = "{} **Target SDK:** {}\n".format(test_description, data["targetsdk"]) if "minsdk" in data: - test_description = "%s **Min SDK:** %s\n" % (test_description, data["minsdk"]) + test_description = "{} **Min SDK:** {}\n".format(test_description, data["minsdk"]) if "maxsdk" in data: - test_description = "%s **Max SDK:** %s\n" % (test_description, data["maxsdk"]) + test_description = "{} **Max SDK:** {}\n".format(test_description, data["maxsdk"]) test_description = "%s\n**File Information:**\n" % (test_description) if "name" in data: - test_description = "%s **Name:** %s\n" % (test_description, data["name"]) + test_description = "{} **Name:** {}\n".format(test_description, data["name"]) if "md5" in data: - test_description = "%s **MD5:** %s\n" % (test_description, data["md5"]) + test_description = "{} **MD5:** {}\n".format(test_description, data["md5"]) if "sha1" in data: - test_description = "%s **SHA-1:** %s\n" % (test_description, data["sha1"]) + test_description = "{} **SHA-1:** {}\n".format(test_description, data["sha1"]) if "sha256" in data: - test_description = "%s **SHA-256:** %s\n" % (test_description, data["sha256"]) + test_description = "{} **SHA-256:** {}\n".format(test_description, data["sha256"]) if "size" in data: - test_description = "%s **Size:** %s\n" % (test_description, data["size"]) + test_description = "{} **Size:** {}\n".format(test_description, data["size"]) if "urls" in data: curl = "" @@ -77,10 +77,10 @@ def get_findings(self, filename, test): curl = "%s\n" % (curl) if curl: - test_description = "%s\n**URL's:**\n %s\n" % (test_description, curl) + test_description = f"{test_description}\n**URL's:**\n {curl}\n" if "bin_anal" in data: - test_description = "%s \n**Binary Analysis:** %s\n" % (test_description, data["bin_anal"]) + test_description = "{} \n**Binary Analysis:** {}\n".format(test_description, data["bin_anal"]) test.description = html2text(test_description) diff --git a/dojo/tools/mobsfscan/parser.py b/dojo/tools/mobsfscan/parser.py index 58514eaea80..b65f63c9802 100644 --- a/dojo/tools/mobsfscan/parser.py +++ b/dojo/tools/mobsfscan/parser.py @@ -4,7 +4,7 @@ from dojo.models import Finding -class MobsfscanParser(object): +class MobsfscanParser: """ A class that can be used to parse the mobsfscan (https://github.com/MobSF/mobsfscan) JSON report file. """ diff --git a/dojo/tools/mozilla_observatory/parser.py b/dojo/tools/mozilla_observatory/parser.py index 72e6a6d6236..2f901de698a 100644 --- a/dojo/tools/mozilla_observatory/parser.py +++ b/dojo/tools/mozilla_observatory/parser.py @@ -3,7 +3,7 @@ from dojo.models import Finding -class MozillaObservatoryParser(object): +class MozillaObservatoryParser: """Mozilla Observatory See: https://observatory.mozilla.org diff --git a/dojo/tools/ms_defender/parser.py b/dojo/tools/ms_defender/parser.py index 3bcdf56e074..ab6346fac0a 100644 --- a/dojo/tools/ms_defender/parser.py +++ b/dojo/tools/ms_defender/parser.py @@ -3,7 +3,7 @@ import zipfile -class MSDefenderParser(object): +class MSDefenderParser: """ Import from MSDefender findings """ diff --git a/dojo/tools/nancy/parser.py b/dojo/tools/nancy/parser.py index 4f8a65c4ed6..2c3111cb9fa 100644 --- a/dojo/tools/nancy/parser.py +++ b/dojo/tools/nancy/parser.py @@ -4,7 +4,7 @@ from dojo.models import Finding -class NancyParser(object): +class NancyParser: def get_scan_types(self): return ["Nancy Scan"] diff --git a/dojo/tools/netsparker/parser.py b/dojo/tools/netsparker/parser.py index b79e043c814..37714c2c0fd 100644 --- a/dojo/tools/netsparker/parser.py +++ b/dojo/tools/netsparker/parser.py @@ -6,7 +6,7 @@ from dojo.models import Finding, Endpoint -class NetsparkerParser(object): +class NetsparkerParser: def get_scan_types(self): return ["Netsparker Scan"] diff --git a/dojo/tools/neuvector/parser.py b/dojo/tools/neuvector/parser.py index 17be7635686..b4e77abb11a 100644 --- a/dojo/tools/neuvector/parser.py +++ b/dojo/tools/neuvector/parser.py @@ -10,7 +10,7 @@ NEUVECTOR_CONTAINER_SCAN_ENGAGEMENT_NAME = "NV container scan" -class NeuVectorJsonParser(object): +class NeuVectorJsonParser: def parse(self, json_output, test): tree = self.parse_json(json_output) items = [] @@ -102,9 +102,7 @@ def get_item(vulnerability, test): duplicate=False, out_of_scope=False, mitigated=None, - severity_justification="{} (CVSS v3 base score: {})\n".format( - vector, score_v3 - ), + severity_justification=f"{vector} (CVSS v3 base score: {score_v3})\n", impact=severity, ) finding.unsaved_vulnerability_ids = [vulnerability.get("name")] @@ -129,7 +127,7 @@ def convert_severity(severity): return severity.title() -class NeuVectorParser(object): +class NeuVectorParser: def get_scan_types(self): return [NEUVECTOR_SCAN_NAME] diff --git a/dojo/tools/neuvector_compliance/parser.py b/dojo/tools/neuvector_compliance/parser.py index 74e5e515fd1..c16ecebe4bb 100644 --- a/dojo/tools/neuvector_compliance/parser.py +++ b/dojo/tools/neuvector_compliance/parser.py @@ -80,28 +80,26 @@ def get_item(node, test): test_profile = node.get("profile", "profile unknown") - full_description = "{} ({}), {}:\n".format( - test_number, category, test_profile - ) - full_description += "{}\n".format(test_description) - full_description += "Audit: {}\n".format(test_severity) + full_description = f"{test_number} ({category}), {test_profile}:\n" + full_description += f"{test_description}\n" + full_description += f"Audit: {test_severity}\n" if "evidence" in node: full_description += "Evidence:\n{}\n".format(node.get("evidence")) if "location" in node: full_description += "Location:\n{}\n".format(node.get("location")) - full_description += "Mitigation:\n{}\n".format(mitigation) + full_description += f"Mitigation:\n{mitigation}\n" tags = node.get("tags", []) if len(tags) > 0: full_description += "Tags:\n" for t in tags: - full_description += "{}\n".format(str(t).rstrip()) + full_description += f"{str(t).rstrip()}\n" messages = node.get("message", []) if len(messages) > 0: full_description += "Messages:\n" for m in messages: - full_description += "{}\n".format(str(m).rstrip()) + full_description += f"{str(m).rstrip()}\n" finding = Finding( title=title, @@ -135,7 +133,7 @@ def convert_severity(severity): return severity.title() -class NeuVectorComplianceParser(object): +class NeuVectorComplianceParser: def get_scan_types(self): return [NEUVECTOR_SCAN_NAME] diff --git a/dojo/tools/nexpose/parser.py b/dojo/tools/nexpose/parser.py index 4fd1c85b677..8a9e5dfdb8d 100644 --- a/dojo/tools/nexpose/parser.py +++ b/dojo/tools/nexpose/parser.py @@ -8,7 +8,7 @@ from dojo.models import Finding, Endpoint -class NexposeParser(object): +class NexposeParser: """ The objective of this class is to parse Nexpose's XML 2.0 Report. diff --git a/dojo/tools/nikto/json_parser.py b/dojo/tools/nikto/json_parser.py index 2d06902e468..dc463d8385e 100644 --- a/dojo/tools/nikto/json_parser.py +++ b/dojo/tools/nikto/json_parser.py @@ -2,7 +2,7 @@ from dojo.models import Endpoint, Finding -class NiktoJSONParser(object): +class NiktoJSONParser: def process_json(self, file, test): data = json.load(file) if len(data) == 1 and isinstance(data, list): diff --git a/dojo/tools/nikto/parser.py b/dojo/tools/nikto/parser.py index 99ea2f26975..414a0a882db 100644 --- a/dojo/tools/nikto/parser.py +++ b/dojo/tools/nikto/parser.py @@ -2,7 +2,7 @@ from dojo.tools.nikto.xml_parser import NiktoXMLParser -class NiktoParser(object): +class NiktoParser: """Nikto web server scanner - https://cirt.net/Nikto2 The current parser support 3 sources: diff --git a/dojo/tools/nikto/xml_parser.py b/dojo/tools/nikto/xml_parser.py index 3480ffadd86..2686c3e8f65 100644 --- a/dojo/tools/nikto/xml_parser.py +++ b/dojo/tools/nikto/xml_parser.py @@ -7,7 +7,7 @@ logger = logging.getLogger(__name__) -class NiktoXMLParser(object): +class NiktoXMLParser: def process_xml(self, file, test): dupes = dict() tree = ET.parse(file) diff --git a/dojo/tools/nmap/parser.py b/dojo/tools/nmap/parser.py index 171795126c9..6c62a1af887 100755 --- a/dojo/tools/nmap/parser.py +++ b/dojo/tools/nmap/parser.py @@ -5,7 +5,7 @@ from dojo.models import Endpoint, Finding -class NmapParser(object): +class NmapParser: def get_scan_types(self): return ["Nmap Scan"] @@ -54,7 +54,7 @@ def get_findings(self, file, test): "**Host OS:** %s\n" % os_match.attrib["name"] ) if "accuracy" in os_match.attrib: - host_info += "**Accuracy:** {0}%\n".format( + host_info += "**Accuracy:** {}%\n".format( os_match.attrib["accuracy"] ) @@ -74,12 +74,9 @@ def get_findings(self, file, test): # filter on open ports if "open" != port_element.find("state").attrib.get("state"): continue - title = "Open port: %s/%s" % (endpoint.port, endpoint.protocol) + title = f"Open port: {endpoint.port}/{endpoint.protocol}" description = host_info - description += "**Port/Protocol:** %s/%s\n" % ( - endpoint.port, - endpoint.protocol, - ) + description += f"**Port/Protocol:** {endpoint.port}/{endpoint.protocol}\n" service_info = "\n\n" if port_element.find("service") is not None: diff --git a/dojo/tools/noseyparker/parser.py b/dojo/tools/noseyparker/parser.py index acb28056f10..c475b93d103 100644 --- a/dojo/tools/noseyparker/parser.py +++ b/dojo/tools/noseyparker/parser.py @@ -5,7 +5,7 @@ from dojo.models import Finding -class NoseyParkerParser(object): +class NoseyParkerParser: """ Scanning secrets from repos """ diff --git a/dojo/tools/npm_audit/parser.py b/dojo/tools/npm_audit/parser.py index a9b6dfe9bf9..f1255d54755 100644 --- a/dojo/tools/npm_audit/parser.py +++ b/dojo/tools/npm_audit/parser.py @@ -8,7 +8,7 @@ logger = logging.getLogger(__name__) -class NpmAuditParser(object): +class NpmAuditParser: def get_scan_types(self): return ["NPM Audit Scan"] diff --git a/dojo/tools/npm_audit_7_plus/parser.py b/dojo/tools/npm_audit_7_plus/parser.py index 1a500b8e3b9..42abc86330b 100644 --- a/dojo/tools/npm_audit_7_plus/parser.py +++ b/dojo/tools/npm_audit_7_plus/parser.py @@ -20,7 +20,7 @@ ''' -class NpmAudit7PlusParser(object): +class NpmAudit7PlusParser: """Represents the parser class.""" def get_scan_types(self): @@ -57,8 +57,8 @@ def parse_json(self, json_output): if tree.get("audit"): if not tree.get("audit").get("auditReportVersion"): raise ValueError( - ("This parser only supports output from npm audit version" - " 7 and above.") + "This parser only supports output from npm audit version" + " 7 and above." ) subtree = tree.get("audit").get("vulnerabilities") # output from npm audit --dry-run --json @@ -67,8 +67,8 @@ def parse_json(self, json_output): else: if not tree.get("auditReportVersion"): raise ValueError( - ("This parser only supports output from npm audit version" - " 7 and above.") + "This parser only supports output from npm audit version" + " 7 and above." ) subtree = tree.get("vulnerabilities") @@ -125,7 +125,7 @@ def get_item(item_node, tree, test): if isinstance(item_node["fixAvailable"], dict): fix_name = item_node["fixAvailable"]["name"] fix_version = item_node["fixAvailable"]["version"] - mitigation = "Update {0} to version {1}".format(fix_name, fix_version) + mitigation = f"Update {fix_name} to version {fix_version}" else: mitigation = "No specific mitigation provided by tool." @@ -187,8 +187,7 @@ def get_vuln_description(item_node, tree): if isinstance(item_node["fixAvailable"], dict): fix_name = item_node["fixAvailable"]["name"] fix_version = item_node["fixAvailable"]["version"] - mitigation = "Fix Available: Update {0} to version {1}".format( - fix_name, fix_version) + mitigation = f"Fix Available: Update {fix_name} to version {fix_version}" else: mitigation = "No specific mitigation provided by tool." diff --git a/dojo/tools/nsp/parser.py b/dojo/tools/nsp/parser.py index 40a7dcb66ab..35c0fca9760 100644 --- a/dojo/tools/nsp/parser.py +++ b/dojo/tools/nsp/parser.py @@ -3,7 +3,7 @@ from dojo.models import Finding -class NspParser(object): +class NspParser: def get_scan_types(self): return ["Node Security Platform Scan"] diff --git a/dojo/tools/nuclei/parser.py b/dojo/tools/nuclei/parser.py index 76ed959eac6..f26f888811a 100644 --- a/dojo/tools/nuclei/parser.py +++ b/dojo/tools/nuclei/parser.py @@ -9,7 +9,7 @@ logger = logging.getLogger(__name__) -class NucleiParser(object): +class NucleiParser: """ A class that can be used to parse the nuclei (https://github.com/projectdiscovery/nuclei) JSON report file """ diff --git a/dojo/tools/openscap/parser.py b/dojo/tools/openscap/parser.py index 9f3ba66132d..25bb392cdd2 100644 --- a/dojo/tools/openscap/parser.py +++ b/dojo/tools/openscap/parser.py @@ -8,7 +8,7 @@ from django.core.exceptions import ValidationError -class OpenscapParser(object): +class OpenscapParser: def get_scan_types(self): return ["Openscap Vulnerability Scan"] @@ -36,26 +36,26 @@ def get_findings(self, file, test): # read rules rules = {} - for rule in root.findall(".//{0}Rule".format(namespace)): + for rule in root.findall(f".//{namespace}Rule"): rules[rule.attrib["id"]] = { - "title": rule.findtext("./{0}title".format(namespace)) + "title": rule.findtext(f"./{namespace}title") } # go to test result - test_result = tree.find("./{0}TestResult".format(namespace)) + test_result = tree.find(f"./{namespace}TestResult") ips = [] # append all target in a list. - for ip in test_result.findall("./{0}target".format(namespace)): + for ip in test_result.findall(f"./{namespace}target"): ips.append(ip.text) - for ip in test_result.findall("./{0}target-address".format(namespace)): + for ip in test_result.findall(f"./{namespace}target-address"): ips.append(ip.text) dupes = dict() # run both rule, and rule-result in parallel so that we can get title # for failed test from rule. for rule_result in test_result.findall( - "./{0}rule-result".format(namespace) + f"./{namespace}rule-result" ): - result = rule_result.findtext("./{0}result".format(namespace)) + result = rule_result.findtext(f"./{namespace}result") # find only failed report. if "fail" in result: # get rule corresponding to rule-result @@ -69,9 +69,7 @@ def get_findings(self, file, test): ) vulnerability_ids = [] for vulnerability_id in rule_result.findall( - "./{0}ident[@system='http://cve.mitre.org']".format( - namespace - ) + f"./{namespace}ident[@system='http://cve.mitre.org']" ): vulnerability_ids.append(vulnerability_id.text) # get severity. @@ -86,7 +84,7 @@ def get_findings(self, file, test): references = "" # get references. for check_content in rule_result.findall( - "./{0}check/{0}check-content-ref".format(namespace) + f"./{namespace}check/{namespace}check-content-ref" ): references += ( "**name:** : " + check_content.attrib["name"] + "\n" diff --git a/dojo/tools/openvas/csv_parser.py b/dojo/tools/openvas/csv_parser.py index 1d2eb4428a2..00c5c09991d 100644 --- a/dojo/tools/openvas/csv_parser.py +++ b/dojo/tools/openvas/csv_parser.py @@ -6,7 +6,7 @@ from dojo.models import Finding, Endpoint -class ColumnMappingStrategy(object): +class ColumnMappingStrategy: mapped_column = None def __init__(self): @@ -37,7 +37,7 @@ def process_column(self, column_name, column_value, finding): class DateColumnMappingStrategy(ColumnMappingStrategy): def __init__(self): self.mapped_column = "timestamp" - super(DateColumnMappingStrategy, self).__init__() + super().__init__() def map_column_value(self, finding, column_value): finding.date = parse(column_value).date() @@ -46,7 +46,7 @@ def map_column_value(self, finding, column_value): class TitleColumnMappingStrategy(ColumnMappingStrategy): def __init__(self): self.mapped_column = "nvt name" - super(TitleColumnMappingStrategy, self).__init__() + super().__init__() def map_column_value(self, finding, column_value): finding.title = column_value @@ -55,7 +55,7 @@ def map_column_value(self, finding, column_value): class CweColumnMappingStrategy(ColumnMappingStrategy): def __init__(self): self.mapped_column = "cweid" - super(CweColumnMappingStrategy, self).__init__() + super().__init__() def map_column_value(self, finding, column_value): if column_value.isdigit(): @@ -65,7 +65,7 @@ def map_column_value(self, finding, column_value): class PortColumnMappingStrategy(ColumnMappingStrategy): def __init__(self): self.mapped_column = "port" - super(PortColumnMappingStrategy, self).__init__() + super().__init__() def map_column_value(self, finding, column_value): if column_value.isdigit(): @@ -75,7 +75,7 @@ def map_column_value(self, finding, column_value): class CveColumnMappingStrategy(ColumnMappingStrategy): def __init__(self): self.mapped_column = "cves" - super(CveColumnMappingStrategy, self).__init__() + super().__init__() def map_column_value(self, finding, column_value): if column_value != "": @@ -90,7 +90,7 @@ def map_column_value(self, finding, column_value): class NVDCVEColumnMappingStrategy(ColumnMappingStrategy): def __init__(self): self.mapped_column = "nvt oid" - super(NVDCVEColumnMappingStrategy, self).__init__() + super().__init__() def map_column_value(self, finding, column_value): cve_pattern = r'CVE-\d{4}-\d{4,7}' @@ -102,7 +102,7 @@ def map_column_value(self, finding, column_value): class ProtocolColumnMappingStrategy(ColumnMappingStrategy): def __init__(self): self.mapped_column = "port protocol" - super(ProtocolColumnMappingStrategy, self).__init__() + super().__init__() def map_column_value(self, finding, column_value): if column_value: # do not store empty protocol @@ -112,7 +112,7 @@ def map_column_value(self, finding, column_value): class IpColumnMappingStrategy(ColumnMappingStrategy): def __init__(self): self.mapped_column = "ip" - super(IpColumnMappingStrategy, self).__init__() + super().__init__() def map_column_value(self, finding, column_value): if not finding.unsaved_endpoints[ @@ -124,7 +124,7 @@ def map_column_value(self, finding, column_value): class HostnameColumnMappingStrategy(ColumnMappingStrategy): def __init__(self): self.mapped_column = "hostname" - super(HostnameColumnMappingStrategy, self).__init__() + super().__init__() def map_column_value(self, finding, column_value): if column_value: # do not override IP if hostname is empty @@ -139,7 +139,7 @@ def is_valid_severity(severity): def __init__(self): self.mapped_column = "severity" - super(SeverityColumnMappingStrategy, self).__init__() + super().__init__() def map_column_value(self, finding, column_value): if self.is_valid_severity(column_value): @@ -151,7 +151,7 @@ def map_column_value(self, finding, column_value): class DescriptionColumnMappingStrategy(ColumnMappingStrategy): def __init__(self): self.mapped_column = "summary" - super(DescriptionColumnMappingStrategy, self).__init__() + super().__init__() def map_column_value(self, finding, column_value): finding.description = column_value @@ -160,7 +160,7 @@ def map_column_value(self, finding, column_value): class MitigationColumnMappingStrategy(ColumnMappingStrategy): def __init__(self): self.mapped_column = "solution" - super(MitigationColumnMappingStrategy, self).__init__() + super().__init__() def map_column_value(self, finding, column_value): finding.mitigation = column_value @@ -169,7 +169,7 @@ def map_column_value(self, finding, column_value): class ImpactColumnMappingStrategy(ColumnMappingStrategy): def __init__(self): self.mapped_column = "vulnerability insight" - super(ImpactColumnMappingStrategy, self).__init__() + super().__init__() def map_column_value(self, finding, column_value): finding.impact = column_value @@ -178,7 +178,7 @@ def map_column_value(self, finding, column_value): class ReferencesColumnMappingStrategy(ColumnMappingStrategy): def __init__(self): self.mapped_column = "specific result" - super(ReferencesColumnMappingStrategy, self).__init__() + super().__init__() def map_column_value(self, finding, column_value): finding.references = column_value @@ -187,7 +187,7 @@ def map_column_value(self, finding, column_value): class ActiveColumnMappingStrategy(ColumnMappingStrategy): def __init__(self): self.mapped_column = "active" - super(ActiveColumnMappingStrategy, self).__init__() + super().__init__() def map_column_value(self, finding, column_value): finding.active = self.evaluate_bool_value(column_value) @@ -196,7 +196,7 @@ def map_column_value(self, finding, column_value): class VerifiedColumnMappingStrategy(ColumnMappingStrategy): def __init__(self): self.mapped_column = "verified" - super(VerifiedColumnMappingStrategy, self).__init__() + super().__init__() def map_column_value(self, finding, column_value): finding.verified = self.evaluate_bool_value(column_value) @@ -205,7 +205,7 @@ def map_column_value(self, finding, column_value): class FalsePositiveColumnMappingStrategy(ColumnMappingStrategy): def __init__(self): self.mapped_column = "falsepositive" - super(FalsePositiveColumnMappingStrategy, self).__init__() + super().__init__() def map_column_value(self, finding, column_value): finding.false_p = self.evaluate_bool_value(column_value) @@ -214,13 +214,13 @@ def map_column_value(self, finding, column_value): class DuplicateColumnMappingStrategy(ColumnMappingStrategy): def __init__(self): self.mapped_column = "duplicate" - super(DuplicateColumnMappingStrategy, self).__init__() + super().__init__() def map_column_value(self, finding, column_value): finding.duplicate = self.evaluate_bool_value(column_value) -class OpenVASCSVParser(object): +class OpenVASCSVParser: def create_chain(self): date_column_strategy = DateColumnMappingStrategy() title_column_strategy = TitleColumnMappingStrategy() diff --git a/dojo/tools/openvas/parser.py b/dojo/tools/openvas/parser.py index 6a1399f28ef..ce548db5878 100755 --- a/dojo/tools/openvas/parser.py +++ b/dojo/tools/openvas/parser.py @@ -2,7 +2,7 @@ from dojo.tools.openvas.xml_parser import OpenVASXMLParser -class OpenVASParser(object): +class OpenVASParser: def get_scan_types(self): return ["OpenVAS Parser"] diff --git a/dojo/tools/openvas/xml_parser.py b/dojo/tools/openvas/xml_parser.py index bc2c63dd828..5937b3c11bf 100644 --- a/dojo/tools/openvas/xml_parser.py +++ b/dojo/tools/openvas/xml_parser.py @@ -3,7 +3,7 @@ from dojo.models import Finding -class OpenVASXMLParser(object): +class OpenVASXMLParser: def get_findings(self, filename, test): findings = [] tree = ET.parse(filename) diff --git a/dojo/tools/ort/parser.py b/dojo/tools/ort/parser.py index d2811d3e170..d59b48be430 100644 --- a/dojo/tools/ort/parser.py +++ b/dojo/tools/ort/parser.py @@ -5,7 +5,7 @@ from dojo.models import Finding -class OrtParser(object): +class OrtParser: """Oss Review Toolkit Parser""" def get_scan_types(self): diff --git a/dojo/tools/ossindex_devaudit/parser.py b/dojo/tools/ossindex_devaudit/parser.py index 8d04bac2d48..f5ada25b3cf 100644 --- a/dojo/tools/ossindex_devaudit/parser.py +++ b/dojo/tools/ossindex_devaudit/parser.py @@ -4,7 +4,7 @@ from dojo.models import Finding -class OssIndexDevauditParser(object): +class OssIndexDevauditParser: """OssIndex Devaudit Results Parser Parses files created by the Sonatype OssIndex Devaudit tool https://github.com/sonatype-nexus-community/DevAudit diff --git a/dojo/tools/osv_scanner/parser.py b/dojo/tools/osv_scanner/parser.py index 4d6fff7ab43..9afbefb02dc 100644 --- a/dojo/tools/osv_scanner/parser.py +++ b/dojo/tools/osv_scanner/parser.py @@ -2,7 +2,7 @@ from dojo.models import Finding -class OSVScannerParser(object): +class OSVScannerParser: def get_scan_types(self): return ["OSV Scan"] diff --git a/dojo/tools/outpost24/parser.py b/dojo/tools/outpost24/parser.py index af07759f1ec..097664e7578 100644 --- a/dojo/tools/outpost24/parser.py +++ b/dojo/tools/outpost24/parser.py @@ -7,7 +7,7 @@ logger = logging.getLogger(__name__) -class Outpost24Parser(object): +class Outpost24Parser: def get_scan_types(self): return ["Outpost24 Scan"] @@ -57,9 +57,7 @@ def get_findings(self, file, test): else: severity = "Critical" cvss_description = detail.findtext("cvss_vector_description") - severity_justification = "{}\n{}".format( - cvss_score, cvss_description - ) + severity_justification = f"{cvss_score}\n{cvss_description}" finding = Finding( title=title, test=test, diff --git a/dojo/tools/parser_test.py b/dojo/tools/parser_test.py index 34dec50e359..6a28a08998c 100644 --- a/dojo/tools/parser_test.py +++ b/dojo/tools/parser_test.py @@ -1,4 +1,4 @@ -class ParserTest(object): +class ParserTest: def __init__(self, name: str, type: str, version: str): self.name = name self.type = type diff --git a/dojo/tools/php_security_audit_v2/parser.py b/dojo/tools/php_security_audit_v2/parser.py index f1ee8022c1a..36fe5b48a71 100644 --- a/dojo/tools/php_security_audit_v2/parser.py +++ b/dojo/tools/php_security_audit_v2/parser.py @@ -4,7 +4,7 @@ from dojo.models import Finding -class PhpSecurityAuditV2Parser(object): +class PhpSecurityAuditV2Parser: def get_scan_types(self): return ["PHP Security Audit v2"] diff --git a/dojo/tools/php_symfony_security_check/parser.py b/dojo/tools/php_symfony_security_check/parser.py index c5fb5118804..3691e60639a 100644 --- a/dojo/tools/php_symfony_security_check/parser.py +++ b/dojo/tools/php_symfony_security_check/parser.py @@ -3,7 +3,7 @@ from dojo.models import Finding -class PhpSymfonySecurityCheckParser(object): +class PhpSymfonySecurityCheckParser: def get_scan_types(self): return ["PHP Symfony Security Check"] diff --git a/dojo/tools/pmd/parser.py b/dojo/tools/pmd/parser.py index e34c1c21f5e..2eea92daae2 100644 --- a/dojo/tools/pmd/parser.py +++ b/dojo/tools/pmd/parser.py @@ -4,7 +4,7 @@ from dojo.models import Finding -class PmdParser(object): +class PmdParser: def get_scan_types(self): return ["PMD Scan"] @@ -54,7 +54,7 @@ def get_findings(self, filename, test): finding.mitigation = "No mitigation provided" key = hashlib.sha256( - f"{finding.title}|{finding.description}|{finding.file_path}|{finding.line}".encode("utf-8") + f"{finding.title}|{finding.description}|{finding.file_path}|{finding.line}".encode() ).hexdigest() if key not in dupes: diff --git a/dojo/tools/popeye/parser.py b/dojo/tools/popeye/parser.py index 67e176a9110..12548d0dcaf 100644 --- a/dojo/tools/popeye/parser.py +++ b/dojo/tools/popeye/parser.py @@ -4,7 +4,7 @@ from dojo.models import Finding -class PopeyeParser(object): +class PopeyeParser: """ Popeye is a kubernetes cluster resource analyzer. """ diff --git a/dojo/tools/pwn_sast/parser.py b/dojo/tools/pwn_sast/parser.py index 0b5a942eb41..ac0b0a0912a 100644 --- a/dojo/tools/pwn_sast/parser.py +++ b/dojo/tools/pwn_sast/parser.py @@ -4,7 +4,7 @@ from dojo.models import Finding -class PWNSASTParser(object): +class PWNSASTParser: """ A class that can be used to parse pwn_sast source code scanning results in JSON format. See https://github.com/0dayinc/pwn for additional details. """ diff --git a/dojo/tools/qualys/parser.py b/dojo/tools/qualys/parser.py index a415b4487f5..b8498962f1a 100644 --- a/dojo/tools/qualys/parser.py +++ b/dojo/tools/qualys/parser.py @@ -187,9 +187,7 @@ def parse_finding(host, tree): # DefectDojo does not support cvssv2 _temp["CVSS_vector"] = None - search = ".//GLOSSARY/VULN_DETAILS_LIST/VULN_DETAILS[@id='{}']".format( - _gid - ) + search = f".//GLOSSARY/VULN_DETAILS_LIST/VULN_DETAILS[@id='{_gid}']" vuln_item = tree.find(search) if vuln_item is not None: finding = Finding() @@ -298,7 +296,7 @@ def qualys_parser(qualys_xml_file): return finding_list -class QualysParser(object): +class QualysParser: def get_scan_types(self): return ["Qualys Scan"] diff --git a/dojo/tools/qualys_infrascan_webgui/parser.py b/dojo/tools/qualys_infrascan_webgui/parser.py index e60084619a7..1cd6f2bf146 100644 --- a/dojo/tools/qualys_infrascan_webgui/parser.py +++ b/dojo/tools/qualys_infrascan_webgui/parser.py @@ -126,7 +126,7 @@ def qualys_convert_severity(raw_val): return "Info" -class QualysInfrascanWebguiParser(object): +class QualysInfrascanWebguiParser: def get_scan_types(self): return ["Qualys Infrastructure Scan (WebGUI XML)"] diff --git a/dojo/tools/qualys_webapp/parser.py b/dojo/tools/qualys_webapp/parser.py index c564c76cd22..4cd236f7395 100644 --- a/dojo/tools/qualys_webapp/parser.py +++ b/dojo/tools/qualys_webapp/parser.py @@ -464,7 +464,7 @@ def qualys_webapp_parser(qualys_xml_file, test, unique, enable_weakness=False): return items -class QualysWebAppParser(object): +class QualysWebAppParser: def get_scan_types(self): return ["Qualys Webapp Scan"] diff --git a/dojo/tools/redhatsatellite/parser.py b/dojo/tools/redhatsatellite/parser.py index dcfb67e71ad..1168205b06f 100644 --- a/dojo/tools/redhatsatellite/parser.py +++ b/dojo/tools/redhatsatellite/parser.py @@ -2,7 +2,7 @@ from dojo.models import Finding -class RedHatSatelliteParser(object): +class RedHatSatelliteParser: def get_scan_types(self): return ["Red Hat Satellite"] diff --git a/dojo/tools/retirejs/parser.py b/dojo/tools/retirejs/parser.py index 2482d517dc2..aaf038f8985 100644 --- a/dojo/tools/retirejs/parser.py +++ b/dojo/tools/retirejs/parser.py @@ -4,7 +4,7 @@ from dojo.models import Finding -class RetireJsParser(object): +class RetireJsParser: def get_scan_types(self): return ["Retire.js Scan"] diff --git a/dojo/tools/risk_recon/api.py b/dojo/tools/risk_recon/api.py index 0ac61f805d5..8b1a07dfbf0 100644 --- a/dojo/tools/risk_recon/api.py +++ b/dojo/tools/risk_recon/api.py @@ -29,7 +29,7 @@ def __init__(self, api_key, endpoint, data): def map_toes(self): response = self.session.get( - url="{}/toes".format(self.url), + url=f"{self.url}/toes", headers={"accept": "application/json", "Authorization": self.key}, ) @@ -51,9 +51,7 @@ def map_toes(self): self.toe_map[toe_id] = filters if filters else self.data else: raise Exception( - "Unable to query Target of Evaluations due to {} - {}".format( - response.status_code, response.content - ) + f"Unable to query Target of Evaluations due to {response.status_code} - {response.content}" ) def filter_finding(self, finding): @@ -71,7 +69,7 @@ def filter_finding(self, finding): def get_findings(self): for toe in self.toe_map.keys(): response = self.session.get( - url="{}/findings/{}".format(self.url, toe), + url=f"{self.url}/findings/{toe}", headers={ "accept": "application/json", "Authorization": self.key, @@ -85,7 +83,5 @@ def get_findings(self): self.findings.append(finding) else: raise Exception( - "Unable to collect findings from toe: {} due to {} - {}".format( - toe, response.status_code, response.content - ) + f"Unable to collect findings from toe: {toe} due to {response.status_code} - {response.content}" ) diff --git a/dojo/tools/risk_recon/parser.py b/dojo/tools/risk_recon/parser.py index 8c70496d691..21bbfbbdbeb 100644 --- a/dojo/tools/risk_recon/parser.py +++ b/dojo/tools/risk_recon/parser.py @@ -5,7 +5,7 @@ from dojo.tools.risk_recon.api import RiskReconAPI -class RiskReconParser(object): +class RiskReconParser: def get_scan_types(self): return ["Risk Recon API Importer"] diff --git a/dojo/tools/rusty_hog/parser.py b/dojo/tools/rusty_hog/parser.py index da0baa6c83e..0488270e095 100644 --- a/dojo/tools/rusty_hog/parser.py +++ b/dojo/tools/rusty_hog/parser.py @@ -3,7 +3,7 @@ from dojo.models import Finding -class RustyhogParser(object): +class RustyhogParser: def get_scan_types(self): return ["Rusty Hog Scan"] @@ -27,7 +27,7 @@ def get_items(self, json_output, scanner, test): vulnerabilities=self.parse_json(json_output), scanner=scanner ) for finding in findings: - unique_key = "Finding {}".format(finding) + unique_key = f"Finding {finding}" items[unique_key] = finding return list(items.values()) @@ -81,9 +81,7 @@ def __getitem(self, vulnerabilities, scanner): elif scanner == "Choctaw Hog": """Choctaw Hog""" found_secret_string = vulnerability.get("stringsFound") - description = "**This string was found:** {}".format( - found_secret_string - ) + description = f"**This string was found:** {found_secret_string}" if vulnerability.get("commit") is not None: description += "\n**Commit message:** {}".format( vulnerability.get("commit") @@ -119,9 +117,7 @@ def __getitem(self, vulnerabilities, scanner): elif scanner == "Duroc Hog": """Duroc Hog""" found_secret_string = vulnerability.get("stringsFound") - description = "**This string was found:** {}".format( - found_secret_string - ) + description = f"**This string was found:** {found_secret_string}" if vulnerability.get("path") is not None: description += "\n**Path of Issue:** {}".format( vulnerability.get("path") @@ -137,9 +133,7 @@ def __getitem(self, vulnerabilities, scanner): elif scanner == "Gottingen Hog": """Gottingen Hog""" found_secret_string = vulnerability.get("stringsFound") - description = "**This string was found:** {}".format( - found_secret_string - ) + description = f"**This string was found:** {found_secret_string}" if vulnerability.get("issue_id") is not None: description += "\n**JIRA Issue ID:** {}".format( vulnerability.get("issue_id") @@ -154,9 +148,7 @@ def __getitem(self, vulnerabilities, scanner): ) elif scanner == "Essex Hog": found_secret_string = vulnerability.get("stringsFound") - description = "**This string was found:** {}".format( - found_secret_string - ) + description = f"**This string was found:** {found_secret_string}" if vulnerability.get("page_id") is not None: description += "\n**Confluence URL:** [{}]({})".format( vulnerability.get("url"), vulnerability.get("url") diff --git a/dojo/tools/sarif/parser.py b/dojo/tools/sarif/parser.py index e7963612b44..67af4d28bf8 100644 --- a/dojo/tools/sarif/parser.py +++ b/dojo/tools/sarif/parser.py @@ -13,7 +13,7 @@ CWE_REGEX = r"cwe-\d+" -class SarifParser(object): +class SarifParser: """OASIS Static Analysis Results Interchange Format (SARIF) for version 2.1.0 only. https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=sarif @@ -267,9 +267,9 @@ def get_description(result, rule): message = get_message_from_multiformatMessageString( result["message"], rule ) - description += "**Result message:** {}\n".format(message) + description += f"**Result message:** {message}\n" if get_snippet(result) is not None: - description += "**Snippet:**\n```{}```\n".format(get_snippet(result)) + description += f"**Snippet:**\n```{get_snippet(result)}```\n" if rule is not None: if "name" in rule: description += f"**{_('Rule name')}:** {rule.get('name')}\n" diff --git a/dojo/tools/scantist/parser.py b/dojo/tools/scantist/parser.py index d4b1e6c0766..0da36bbe195 100644 --- a/dojo/tools/scantist/parser.py +++ b/dojo/tools/scantist/parser.py @@ -4,7 +4,7 @@ from dojo.models import Finding -class ScantistParser(object): +class ScantistParser: """ Scantist Parser: Scantist does a deep scan of source code and binaries for vulnerabilities and has reports following three main categories diff --git a/dojo/tools/scout_suite/parser.py b/dojo/tools/scout_suite/parser.py index 1d019606e54..9e2d305c66c 100644 --- a/dojo/tools/scout_suite/parser.py +++ b/dojo/tools/scout_suite/parser.py @@ -6,7 +6,7 @@ from dojo.tools.parser_test import ParserTest -class ScoutSuiteParser(object): +class ScoutSuiteParser: """"ScoutSuite Wiki: https://github.com/nccgroup/ScoutSuite/wiki""" ID = "Scout Suite" @@ -34,19 +34,16 @@ def get_tests(self, scan_type, handle): last_run = data["last_run"] test_description = "" - test_description = "%s**Account:** `%s`\n" % ( - test_description, - account_id, - ) - test_description = "%s**Provider:** %s\n" % ( + test_description = f"{test_description}**Account:** `{account_id}`\n" + test_description = "{}**Provider:** {}\n".format( test_description, data["provider_name"], ) - test_description = "%s**Ruleset:** `%s`\n" % ( + test_description = "{}**Ruleset:** `{}`\n".format( test_description, last_run["ruleset_name"], ) - test_description = "%s**Ruleset Description:** %s\n" % ( + test_description = "{}**Ruleset Description:** {}\n".format( test_description, last_run["ruleset_about"], ) @@ -177,7 +174,7 @@ def tabs(n): self.item_data = ( self.item_data + self.formatview(depth) - + "**%s:** %s\n\n" % (key.title(), src) + + f"**{key.title()}:** {src}\n\n" ) else: self.item_data = ( diff --git a/dojo/tools/semgrep/parser.py b/dojo/tools/semgrep/parser.py index f22364854ab..b61cba1d137 100644 --- a/dojo/tools/semgrep/parser.py +++ b/dojo/tools/semgrep/parser.py @@ -3,7 +3,7 @@ from dojo.models import Finding -class SemgrepParser(object): +class SemgrepParser: def get_scan_types(self): return ["Semgrep JSON Report"] @@ -97,14 +97,14 @@ def get_description(self, item): description = "" message = item["extra"]["message"] - description += "**Result message:** {}\n".format(message) + description += f"**Result message:** {message}\n" snippet = item["extra"].get("lines") if snippet is not None: if "{4}" + "**CVE:** {} ({})\n" + "CVS Score: {} ({})\n" + "Summary: \n>{}" "\n\n-----\n\n".format( xml_node.attrib["cve_id"], xml_node.attrib.get("first_found_date"), diff --git a/dojo/tools/veracode_sca/parser.py b/dojo/tools/veracode_sca/parser.py index 7134615d98f..c660a66ca20 100644 --- a/dojo/tools/veracode_sca/parser.py +++ b/dojo/tools/veracode_sca/parser.py @@ -12,7 +12,7 @@ from dojo.models import Finding -class VeracodeScaParser(object): +class VeracodeScaParser: vc_severity_mapping = { 1: "Info", 2: "Low", @@ -79,8 +79,8 @@ def _get_findings_json(self, file, test): severity = self.__cvss_to_severity(cvss_score) description = ( - "Project name: {0}\n" - "Title: \n>{1}" + "Project name: {}\n" + "Title: \n>{}" "\n\n-----\n\n".format( issue.get("project_name"), vulnerability.get("title") ) @@ -189,8 +189,8 @@ def get_findings_csv(self, file, test): date = None description = ( - "Project name: {0}\n" - "Title: \n>{1}" + "Project name: {}\n" + "Title: \n>{}" "\n\n-----\n\n".format(row.get("Project"), row.get("Title")) ) diff --git a/dojo/tools/wapiti/parser.py b/dojo/tools/wapiti/parser.py index 85925de9905..3703e2226d8 100644 --- a/dojo/tools/wapiti/parser.py +++ b/dojo/tools/wapiti/parser.py @@ -10,7 +10,7 @@ logger = logging.getLogger(__name__) -class WapitiParser(object): +class WapitiParser: """The web-application vulnerability scanner see: https://wapiti.sourceforge.io/ diff --git a/dojo/tools/wazuh/parser.py b/dojo/tools/wazuh/parser.py index 762847b81a8..f4899d9cca1 100644 --- a/dojo/tools/wazuh/parser.py +++ b/dojo/tools/wazuh/parser.py @@ -3,7 +3,7 @@ from dojo.models import Finding, Endpoint -class WazuhParser(object): +class WazuhParser: """ The vulnerabilities with condition "Package unfixed" are skipped because there is no fix out yet. https://github.com/wazuh/wazuh/issues/14560 diff --git a/dojo/tools/wfuzz/parser.py b/dojo/tools/wfuzz/parser.py index eb6b3186694..6fcbf62b1f3 100644 --- a/dojo/tools/wfuzz/parser.py +++ b/dojo/tools/wfuzz/parser.py @@ -5,7 +5,7 @@ from dojo.models import Finding, Endpoint -class WFuzzParser(object): +class WFuzzParser: """ A class that can be used to parse the WFuzz JSON report files """ diff --git a/dojo/tools/whispers/parser.py b/dojo/tools/whispers/parser.py index 42b79ee7c70..5c819df6acc 100644 --- a/dojo/tools/whispers/parser.py +++ b/dojo/tools/whispers/parser.py @@ -3,7 +3,7 @@ from dojo.models import Finding -class WhispersParser(object): +class WhispersParser: """ Identify hardcoded secrets in static structured text """ diff --git a/dojo/tools/whitehat_sentinel/parser.py b/dojo/tools/whitehat_sentinel/parser.py index 82596b33b8f..8286ad3ca01 100644 --- a/dojo/tools/whitehat_sentinel/parser.py +++ b/dojo/tools/whitehat_sentinel/parser.py @@ -7,7 +7,7 @@ from dojo.models import Finding, Endpoint -class WhiteHatSentinelParser(object): +class WhiteHatSentinelParser: """ A class to parse WhiteHat Sentinel vulns from the WhiteHat Sentinel API vuln?query_site=[ SITE_ID]&format=json&display_attack_vectors=all&display_custom_risk=1&display_risk=1&display_description=custom diff --git a/dojo/tools/wiz/parser.py b/dojo/tools/wiz/parser.py index bec5fee29c3..07fbf8e2d12 100644 --- a/dojo/tools/wiz/parser.py +++ b/dojo/tools/wiz/parser.py @@ -4,7 +4,7 @@ from dojo.models import Finding -class WizParser(object): +class WizParser: def get_scan_types(self): return ["Wiz Scan"] diff --git a/dojo/tools/wpscan/parser.py b/dojo/tools/wpscan/parser.py index b6f3bd01afe..6fc25a43856 100644 --- a/dojo/tools/wpscan/parser.py +++ b/dojo/tools/wpscan/parser.py @@ -5,7 +5,7 @@ from dojo.models import Endpoint, Finding -class WpscanParser(object): +class WpscanParser: """WPScan - WordPress Security Scanner""" def get_scan_types(self): diff --git a/dojo/tools/xanitizer/parser.py b/dojo/tools/xanitizer/parser.py index 791aec06efc..80b3096b4e6 100644 --- a/dojo/tools/xanitizer/parser.py +++ b/dojo/tools/xanitizer/parser.py @@ -7,7 +7,7 @@ from dojo.models import Finding -class XanitizerParser(object): +class XanitizerParser: def get_scan_types(self): return ["Xanitizer Scan"] @@ -36,9 +36,7 @@ def parse_xml(self, filename): root = tree.getroot() if "XanitizerFindingsList" not in root.tag: raise ValueError( - "'{}' is not a valid Xanitizer findings list report XML file.".format( - filename - ) + f"'{filename}' is not a valid Xanitizer findings list report XML file." ) return root @@ -90,14 +88,14 @@ def generate_title(self, finding, line): file = finding.find("file") if pckg is not None and cl is not None: if line: - title = "{} ({}.{}:{})".format(title, pckg.text, cl.text, line) + title = f"{title} ({pckg.text}.{cl.text}:{line})" else: - title = "{} ({}.{})".format(title, pckg.text, cl.text) + title = f"{title} ({pckg.text}.{cl.text})" else: if line: - title = "{} ({}:{})".format(title, file.text, line) + title = f"{title} ({file.text}:{line})" else: - title = "{} ({})".format(title, file.text) + title = f"{title} ({file.text})" return title @@ -109,7 +107,7 @@ def generate_description(self, finding): if finding.find("startNode") is not None: startnode = finding.find("startNode") endnode = finding.find("endNode") - description = "{}\n-----\n".format(description) + description = f"{description}\n-----\n" description = "{}\n**Starting at:** {} - **Line** {}".format( description, startnode.get("classFQN"), startnode.get("lineNo") ) @@ -120,19 +118,15 @@ def generate_description(self, finding): description = self.add_code(endnode, True, description) elif finding.find("node") is not None: node = finding.find("node") - description = "{}\n-----\n".format(description) + description = f"{description}\n-----\n" line = node.get("lineNo") location = node.get("classFQN") if location is None: location = node.get("relativePath") if line is not None and int(line) > 0: - description = "{}\n**Finding at:** {} - **Line** {}".format( - description, location, line - ) + description = f"{description}\n**Finding at:** {location} - **Line** {line}" else: - description = "{}\n**Finding at:** {}".format( - description, location - ) + description = f"{description}\n**Finding at:** {location}" description = self.add_code(node, True, description) return description @@ -146,12 +140,10 @@ def add_code(self, node, showline, description): if showline or len(codelines) == 1: for code in codelines: if code.get("finding") == "true": - description = "{}\n**Finding Line:** {}".format( - description, code.text - ) + description = f"{description}\n**Finding Line:** {code.text}" if len(codelines) > 1: - description = "{}\n**Code Excerpt:** ".format(description) + description = f"{description}\n**Code Excerpt:** " for code in codelines: if code.text: description = "{}\n{}: {}".format( diff --git a/dojo/tools/yarn_audit/parser.py b/dojo/tools/yarn_audit/parser.py index 5f0ae8b39a1..a4feda6c465 100644 --- a/dojo/tools/yarn_audit/parser.py +++ b/dojo/tools/yarn_audit/parser.py @@ -4,7 +4,7 @@ from dojo.tools.utils import get_npm_cwe -class YarnAuditParser(object): +class YarnAuditParser: def get_scan_types(self): return ["Yarn Audit Scan"] diff --git a/dojo/tools/zap/parser.py b/dojo/tools/zap/parser.py index f7411daea21..b835c1543e0 100755 --- a/dojo/tools/zap/parser.py +++ b/dojo/tools/zap/parser.py @@ -4,7 +4,7 @@ from dojo.models import Endpoint, Finding -class ZapParser(object): +class ZapParser: """Parser for XML file generated by the OWASP Zed Attacl Proxy (ZAP) tool https://www.zaproxy.org/.""" MAPPING_SEVERITY = {"0": "Info", "1": "Low", "2": "Medium", "3": "High"} diff --git a/dojo/user/validators.py b/dojo/user/validators.py index c3eee9cfbee..767bc560b57 100644 --- a/dojo/user/validators.py +++ b/dojo/user/validators.py @@ -6,7 +6,7 @@ from dojo.utils import get_system_setting -class MinLengthValidator(object): +class MinLengthValidator: def validate(self, password, user=None): if len(password) < get_system_setting('minimum_password_length'): raise ValidationError( @@ -20,7 +20,7 @@ def get_help_text(self): minimum_length=get_system_setting('minimum_password_length'))) -class MaxLengthValidator(object): +class MaxLengthValidator: def validate(self, password, user=None): if len(password) > get_system_setting('maximum_password_length'): raise ValidationError( @@ -34,7 +34,7 @@ def get_help_text(self): maximum_length=get_system_setting('maximum_password_length'))) -class NumberValidator(object): +class NumberValidator: def validate(self, password, user=None): if not re.findall(r'\d', password) and get_system_setting('number_character_required'): raise ValidationError( @@ -47,7 +47,7 @@ def get_help_text(self): return gettext('Password must contain at least 1 digit, 0-9.') -class UppercaseValidator(object): +class UppercaseValidator: def validate(self, password, user=None): if not re.findall('[A-Z]', password) and get_system_setting('uppercase_character_required'): raise ValidationError( @@ -60,7 +60,7 @@ def get_help_text(self): return gettext('Password must contain at least 1 uppercase letter, A-Z.') -class LowercaseValidator(object): +class LowercaseValidator: def validate(self, password, user=None): if not re.findall('[a-z]', password) and get_system_setting('lowercase_character_required'): raise ValidationError( @@ -73,7 +73,7 @@ def get_help_text(self): return gettext('Password must contain at least 1 lowercase letter, a-z.') -class SymbolValidator(object): +class SymbolValidator: def validate(self, password, user=None): contains_special_character = re.findall(r'[(){}\[\]|~!@#$%^&*_\-+=;:\'",\`<>\./?]', password) if not contains_special_character and get_system_setting('special_character_required'): diff --git a/dojo/utils.py b/dojo/utils.py index 3e7c0af47ef..e442730fe72 100644 --- a/dojo/utils.py +++ b/dojo/utils.py @@ -251,7 +251,7 @@ def are_endpoints_duplicates(new_finding, to_duplicate_finding): list1 = get_endpoints_as_url(new_finding) list2 = get_endpoints_as_url(to_duplicate_finding) - deduplicationLogger.debug("Starting deduplication by endpoint fields for finding {} with urls {} and finding {} with urls {}".format(new_finding.id, list1, to_duplicate_finding.id, list2)) + deduplicationLogger.debug(f"Starting deduplication by endpoint fields for finding {new_finding.id} with urls {list1} and finding {to_duplicate_finding.id} with urls {list2}") if list1 == [] and list2 == []: return True @@ -1173,7 +1173,7 @@ def opened_in_period(start_date, end_date, **kwargs): return oip -class FileIterWrapper(object): +class FileIterWrapper: def __init__(self, flo, chunk_size=1024**2): self.flo = flo self.chunk_size = chunk_size @@ -1287,23 +1287,21 @@ def handle_uploaded_threat(f, eng): if not os.path.isdir(settings.MEDIA_ROOT + '/threat/'): # Create the folder os.mkdir(settings.MEDIA_ROOT + '/threat/') - with open(settings.MEDIA_ROOT + '/threat/%s%s' % (eng.id, extension), + with open(settings.MEDIA_ROOT + f'/threat/{eng.id}{extension}', 'wb+') as destination: for chunk in f.chunks(): destination.write(chunk) - eng.tmodel_path = settings.MEDIA_ROOT + '/threat/%s%s' % (eng.id, - extension) + eng.tmodel_path = settings.MEDIA_ROOT + f'/threat/{eng.id}{extension}' eng.save() def handle_uploaded_selenium(f, cred): _name, extension = os.path.splitext(f.name) - with open(settings.MEDIA_ROOT + '/selenium/%s%s' % (cred.id, extension), + with open(settings.MEDIA_ROOT + f'/selenium/{cred.id}{extension}', 'wb+') as destination: for chunk in f.chunks(): destination.write(chunk) - cred.selenium_script = settings.MEDIA_ROOT + '/selenium/%s%s' % (cred.id, - extension) + cred.selenium_script = settings.MEDIA_ROOT + f'/selenium/{cred.id}{extension}' cred.save() @@ -1513,8 +1511,7 @@ def calculate_grade(product, *args, **kwargs): low = severity_count['numerical_severity__count'] aeval = Interpreter() aeval(system_settings.product_grade) - grade_product = "grade_product(%s, %s, %s, %s)" % ( - critical, high, medium, low) + grade_product = f"grade_product({critical}, {high}, {medium}, {low})" product.prod_numeric_grade = aeval(grade_product) super(Product, product).save() @@ -1561,7 +1558,7 @@ def get_work_days(start: date, end: date): # Used to display the counts and enabled tabs in the product view -class Product_Tab(): +class Product_Tab: def __init__(self, product, title=None, tab=None): self.product = product self.title = title @@ -1896,7 +1893,7 @@ def _create_notifications(): findings_list.append(n.finding) # producing a "combined" SLA breach notification - title_combined = "SLA alert (%s): product type '%s', product '%s'" % (kind, pt, p) + title_combined = f"SLA alert ({kind}): product type '{pt}', product '{p}'" product = combined_notifications[pt][p][kind][0].finding.test.engagement.product create_notification( event='sla_breach_combined', @@ -1920,20 +1917,14 @@ def _create_notifications(): try: if system_settings.enable_finding_sla: logger.info("About to process findings for SLA notifications.") - logger.debug("Active {}, Verified {}, Has JIRA {}, pre-breach {}, post-breach {}".format( - system_settings.enable_notify_sla_active, - system_settings.enable_notify_sla_active_verified, - system_settings.enable_notify_sla_jira_only, - settings.SLA_NOTIFY_PRE_BREACH, - settings.SLA_NOTIFY_POST_BREACH, - )) + logger.debug(f"Active {system_settings.enable_notify_sla_active}, Verified {system_settings.enable_notify_sla_active_verified}, Has JIRA {system_settings.enable_notify_sla_jira_only}, pre-breach {settings.SLA_NOTIFY_PRE_BREACH}, post-breach {settings.SLA_NOTIFY_POST_BREACH}") query = None if system_settings.enable_notify_sla_active_verified: query = Q(active=True, verified=True, is_mitigated=False, duplicate=False) elif system_settings.enable_notify_sla_active: query = Q(active=True, is_mitigated=False, duplicate=False) - logger.debug("My query: {}".format(query)) + logger.debug(f"My query: {query}") no_jira_findings = {} if system_settings.enable_notify_sla_jira_only: @@ -1966,7 +1957,7 @@ def _create_notifications(): if (sla_age < 0) and (settings.SLA_NOTIFY_POST_BREACH < abs(sla_age)): post_breach_no_notify_count += 1 # Skip finding notification if breached for too long - logger.debug("Finding {} breached the SLA {} days ago. Skipping notifications.".format(finding.id, abs(sla_age))) + logger.debug(f"Finding {finding.id} breached the SLA {abs(sla_age)} days ago. Skipping notifications.") continue do_jira_sla_comment = False @@ -1980,29 +1971,26 @@ def _create_notifications(): jira_count += 1 jira_instance = jira_helper.get_jira_instance(finding) if jira_instance is not None: - logger.debug("JIRA config for finding is {}".format(jira_instance)) + logger.debug(f"JIRA config for finding is {jira_instance}") # global config or product config set, product level takes precedence try: # TODO: see new property from #2649 to then replace, somehow not working with prefetching though. product_jira_sla_comment_enabled = jira_helper.get_jira_project(finding).product_jira_sla_notification except Exception as e: logger.error("The product is not linked to a JIRA configuration! Something is weird here.") - logger.error("Error is: {}".format(e)) + logger.error(f"Error is: {e}") jiraconfig_sla_notification_enabled = jira_instance.global_jira_sla_notification if jiraconfig_sla_notification_enabled or product_jira_sla_comment_enabled: - logger.debug("Global setting {} -- Product setting {}".format( - jiraconfig_sla_notification_enabled, - product_jira_sla_comment_enabled - )) + logger.debug(f"Global setting {jiraconfig_sla_notification_enabled} -- Product setting {product_jira_sla_comment_enabled}") do_jira_sla_comment = True - logger.debug("JIRA issue is {}".format(jira_issue.jira_key)) + logger.debug(f"JIRA issue is {jira_issue.jira_key}") - logger.debug("Finding {} has {} days left to breach SLA.".format(finding.id, sla_age)) + logger.debug(f"Finding {finding.id} has {sla_age} days left to breach SLA.") if (sla_age < 0): post_breach_count += 1 - logger.info("Finding {} has breached by {} days.".format(finding.id, abs(sla_age))) + logger.info(f"Finding {finding.id} has breached by {abs(sla_age)} days.") abs_sla_age = abs(sla_age) if not system_settings.enable_notify_sla_exponential_backoff or abs_sla_age == 1 or (abs_sla_age & (abs_sla_age - 1) == 0): _add_notification(finding, 'breached') @@ -2011,23 +1999,16 @@ def _create_notifications(): # The finding is within the pre-breach period elif (sla_age > 0) and (sla_age <= settings.SLA_NOTIFY_PRE_BREACH): pre_breach_count += 1 - logger.info("Security SLA pre-breach warning for finding ID {}. Days remaining: {}".format(finding.id, sla_age)) + logger.info(f"Security SLA pre-breach warning for finding ID {finding.id}. Days remaining: {sla_age}") _add_notification(finding, 'prebreach') # The finding breaches the SLA today elif (sla_age == 0): at_breach_count += 1 - logger.info("Security SLA breach warning. Finding ID {} breaching today ({})".format(finding.id, sla_age)) + logger.info(f"Security SLA breach warning. Finding ID {finding.id} breaching today ({sla_age})") _add_notification(finding, 'breaching') _create_notifications() - logger.info("SLA run results: Pre-breach: {}, at-breach: {}, post-breach: {}, post-breach-no-notify: {}, with-jira: {}, TOTAL: {}".format( - pre_breach_count, - at_breach_count, - post_breach_count, - post_breach_no_notify_count, - jira_count, - total_count - )) + logger.info(f"SLA run results: Pre-breach: {pre_breach_count}, at-breach: {at_breach_count}, post-breach: {post_breach_count}, post-breach-no-notify: {post_breach_no_notify_count}, with-jira: {jira_count}, TOTAL: {total_count}") except System_Settings.DoesNotExist: logger.info("Findings SLA is not enabled.") @@ -2213,7 +2194,7 @@ def mass_model_updater(model_type, models, function, fields, page_size=1000, ord def to_str_typed(obj): """ for code that handles multiple types of objects, print not only __str__ but prefix the type of the object""" - return '%s: %s' % (type(obj), obj) + return f'{type(obj)}: {obj}' def get_product(obj): @@ -2266,7 +2247,7 @@ def get_enabled_notifications_list(): # Alerts need to enabled by default enabled = ['alert'] for choice in NOTIFICATION_CHOICES: - if get_system_setting('enable_{}_notifications'.format(choice[0])): + if get_system_setting(f'enable_{choice[0]}_notifications'): enabled.append(choice[0]) return enabled @@ -2276,7 +2257,7 @@ def is_finding_groups_enabled(): return get_system_setting("enable_finding_groups") -class async_delete(): +class async_delete: def __init__(self, *args, **kwargs): self.mapping = { 'Product_Type': [ diff --git a/dojo/views.py b/dojo/views.py index 829f749d64d..f6df1bdac58 100755 --- a/dojo/views.py +++ b/dojo/views.py @@ -216,8 +216,6 @@ def access_file(request, fid, oid, obj_type, url=False): raise Http404() # If reaching this far, user must have permission to get file file = get_object_or_404(FileUpload, pk=fid) - redirect_url = '{media_root}/{file_name}'.format( - media_root=settings.MEDIA_ROOT, - file_name=file.file.url.lstrip(settings.MEDIA_URL)) + redirect_url = f'{settings.MEDIA_ROOT}/{file.file.url.lstrip(settings.MEDIA_URL)}' print(redirect_url) return FileResponse(open(redirect_url, "rb")) diff --git a/dojo/wsgi.py b/dojo/wsgi.py index 49ae1d27be3..83f57ec1723 100644 --- a/dojo/wsgi.py +++ b/dojo/wsgi.py @@ -38,7 +38,7 @@ def is_debugger_listening(port): # Checking for RUN_MAIN for those that want to run the app locally with the python interpreter instead of uwsgi if os.environ.get("DD_DEBUG") == "True" and not os.getenv("RUN_MAIN") and is_debugger_listening(debugpy_port) != 0: - logger.info("DD_DEBUG is set to True, setting remote debugging on port {}".format(debugpy_port)) + logger.info(f"DD_DEBUG is set to True, setting remote debugging on port {debugpy_port}") try: import debugpy @@ -49,7 +49,7 @@ def is_debugger_listening(port): }) debugpy.listen(("0.0.0.0", debugpy_port)) if os.environ.get("DD_DEBUG_WAIT_FOR_CLIENT") == "True": - logger.info("Waiting for the debugging client to connect on port {}".format(debugpy_port)) + logger.info(f"Waiting for the debugging client to connect on port {debugpy_port}") debugpy.wait_for_client() print("Debugging client connected, resuming execution") except RuntimeError as e: diff --git a/pyproject.toml b/pyproject.toml index b8965e31682..0ced12b1861 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ # Enable the pycodestyle (`E`) and Pyflakes (`F`) rules by default. # Unlike Flake8, Ruff doesn't enable pycodestyle warnings (`W`) or # McCabe complexity (`C901`) by default. - lint.select = ["E", "F", "FLY", "TRY004", "TRY2"] + lint.select = ["E", "F", "UP", "FLY", "TRY004", "TRY2"] lint.ignore = ["E501", "E722", "F821"] # Allow autofix for all enabled rules (when `--fix`) is provided. diff --git a/tests/announcement_banner_test.py b/tests/announcement_banner_test.py index 05658982e23..d2b6bd6c532 100644 --- a/tests/announcement_banner_test.py +++ b/tests/announcement_banner_test.py @@ -11,7 +11,7 @@ class AnnouncementBannerTest(BaseTestCase): def __init__(self, method_name, type): - super(AnnouncementBannerTest, self).__init__(method_name) + super().__init__(method_name) self.type = type def test_setup(self): diff --git a/tests/base_test_class.py b/tests/base_test_class.py index 64ce2189c45..a418a5b4645 100644 --- a/tests/base_test_class.py +++ b/tests/base_test_class.py @@ -464,7 +464,7 @@ def tearDownDriver(cls): dd_driver.quit() -class WebdriverOnlyNewLogFacade(object): +class WebdriverOnlyNewLogFacade: last_timestamp = 0 diff --git a/tests/notifications_test.py b/tests/notifications_test.py index 4c82f3e3ac2..24069547f38 100644 --- a/tests/notifications_test.py +++ b/tests/notifications_test.py @@ -12,14 +12,14 @@ class NotificationTest(BaseTestCase): def __init__(self, method_name, type): - super(NotificationTest, self).__init__(method_name) + super().__init__(method_name) self.type = type def enable_notification(self): driver = self.driver # Navigate to the System Settings driver.get(self.base_url + "system_settings") - mail_control = driver.find_element(By.ID, "id_enable_{}_notifications".format(self.type)) + mail_control = driver.find_element(By.ID, f"id_enable_{self.type}_notifications") if not mail_control.is_selected(): mail_control.click() driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() @@ -28,7 +28,7 @@ def disable_notification(self): driver = self.driver # Navigate to the System Settings driver.get(self.base_url + "system_settings") - mail_control = driver.find_element(By.ID, "id_enable_{}_notifications".format(self.type)) + mail_control = driver.find_element(By.ID, f"id_enable_{self.type}_notifications") if mail_control.is_selected(): mail_control.click() driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click() @@ -42,7 +42,7 @@ def test_disable_personal_notification(self): self.disable_notification() driver.get(self.base_url + "notifications") try: - driver.find_element(By.XPATH, "//input[@name='product_added' and @value='{}']".format(self.type)) + driver.find_element(By.XPATH, f"//input[@name='product_added' and @value='{self.type}']") assert False except NoSuchElementException: assert True @@ -55,7 +55,7 @@ def test_enable_personal_notification(self): self.enable_notification() driver.get(self.base_url + "notifications") try: - driver.find_element(By.XPATH, "//input[@name='product_added' and @value='{}']".format(self.type)) + driver.find_element(By.XPATH, f"//input[@name='product_added' and @value='{self.type}']") assert True except NoSuchElementException: if self.type == 'msteams': @@ -73,7 +73,7 @@ def test_disable_system_notification(self): self.disable_notification() driver.get(self.base_url + "notifications/system") try: - driver.find_element(By.XPATH, "//input[@name='product_added' and @value='{}']".format(self.type)) + driver.find_element(By.XPATH, f"//input[@name='product_added' and @value='{self.type}']") assert False except NoSuchElementException: assert True @@ -86,7 +86,7 @@ def test_enable_system_notification(self): self.enable_notification() driver.get(self.base_url + "notifications/system") try: - driver.find_element(By.XPATH, "//input[@name='product_added' and @value='{}']".format(self.type)) + driver.find_element(By.XPATH, f"//input[@name='product_added' and @value='{self.type}']") assert True except NoSuchElementException: assert False @@ -100,7 +100,7 @@ def test_disable_template_notification(self): self.disable_notification() driver.get(self.base_url + "notifications/template") try: - driver.find_element(By.XPATH, "//input[@name='product_added' and @value='{}']".format(self.type)) + driver.find_element(By.XPATH, f"//input[@name='product_added' and @value='{self.type}']") assert False except NoSuchElementException: assert True @@ -113,7 +113,7 @@ def test_enable_template_notification(self): self.enable_notification() driver.get(self.base_url + "notifications/template") try: - driver.find_element(By.XPATH, "//input[@name='product_added' and @value='{}']".format(self.type)) + driver.find_element(By.XPATH, f"//input[@name='product_added' and @value='{self.type}']") assert True except NoSuchElementException: if self.type == 'msteams': diff --git a/tests/product_test.py b/tests/product_test.py index b50ce7395b3..efe868dde9c 100644 --- a/tests/product_test.py +++ b/tests/product_test.py @@ -8,7 +8,7 @@ from notifications_test import NotificationTest -class WaitForPageLoad(object): +class WaitForPageLoad: def __init__(self, browser, timeout): self.browser = browser self.timeout = time.time() + timeout @@ -27,7 +27,7 @@ def __exit__(self, *_): else: time.sleep(0.2) raise Exception( - 'Timeout waiting for {}s'.format(self.timeout) + f'Timeout waiting for {self.timeout}s' ) diff --git a/tests/zap.py b/tests/zap.py index 18a5d7b6f8a..a62aa546913 100755 --- a/tests/zap.py +++ b/tests/zap.py @@ -14,12 +14,12 @@ class Main: address = "127.0.0.1" port = 8080 - print(("Checking if ZAP is running, connecting to ZAP on http://" + address + ":" + str(port))) + print("Checking if ZAP is running, connecting to ZAP on http://" + address + ":" + str(port)) s = socket.socket() try: s.connect((address, port)) - except socket.error: + except OSError: print("Error connecting to ZAP, exiting.") sys.exit(0) @@ -35,19 +35,19 @@ class Main: # Defining context name as hostname from URL and creating context using it. contextname = urlparse(targetURL).netloc - print(("Context Name: " + contextname)) + print("Context Name: " + contextname) # Step1: Create context contextid = zap.context.new_context(contextname, apikey) - print(("ContextID: " + contextid)) + print("ContextID: " + contextid) # Step2: Include in the context result = zap.context.include_in_context(contextname, targetURLregex, apikey) - print(("URL regex defined in context: " + result)) + print("URL regex defined in context: " + result) # Step3: Session Management - Default is cookieBasedSessionManagement result = zap.sessionManagement.set_session_management_method(contextid, "cookieBasedSessionManagement", None, apikey) - print(("Session method defined: " + result)) + print("Session method defined: " + result) loginUrl = "http://os.environ['DD_BASE_URL']/login" # loginUrlregex = "\Q" + loginUrl + "\E.*" @@ -58,14 +58,14 @@ class Main: # Wait for passive scanning to complete while (int(zap.pscan.records_to_scan) > 0): - print(('Records to passive scan : ' + zap.pscan.records_to_scan)) + print('Records to passive scan : ' + zap.pscan.records_to_scan) time.sleep(15) print('Passive scanning complete') - print(('Actively Scanning target ' + targetURL)) + print('Actively Scanning target ' + targetURL) ascan_id = zap.ascan.scan(targetURL, None, None, None, None, None, apikey) # Can provide more options for active scan here instead of using None. while (int(zap.ascan.status(ascan_id)) < 100): - print(('Scan progress %: ' + zap.ascan.status(ascan_id))) + print('Scan progress %: ' + zap.ascan.status(ascan_id)) time.sleep(15) print('Scan completed') diff --git a/unittests/dojo_test_case.py b/unittests/dojo_test_case.py index 62c9f465465..97683a4968d 100644 --- a/unittests/dojo_test_case.py +++ b/unittests/dojo_test_case.py @@ -27,7 +27,7 @@ def get_unit_tests_path(): return os.path.dirname(os.path.realpath(__file__)) -class DojoTestUtilsMixin(object): +class DojoTestUtilsMixin: def get_test_admin(self, *args, **kwargs): return User.objects.get(username='admin') @@ -377,7 +377,7 @@ def assert_jira_updated_map_changed(self, test_id, updated_map): findings = Test.objects.get(id=test_id).finding_set.all() for finding in findings: logger.debug('finding!') - self.assertNotEquals(jira_helper.get_jira_updated(finding), updated_map[finding.id]) + self.assertNotEqual(jira_helper.get_jira_updated(finding), updated_map[finding.id]) # Toggle epic mapping on jira product def toggle_jira_project_epic_mapping(self, obj, value): diff --git a/unittests/test_adminsite.py b/unittests/test_adminsite.py index bc48a7ea454..eb639f279eb 100644 --- a/unittests/test_adminsite.py +++ b/unittests/test_adminsite.py @@ -13,7 +13,7 @@ def test_is_model_defined(self): if subclass.__module__ == 'dojo.models': if not ((subclass.__name__[:9] == "Tagulous_") and (subclass.__name__[-5:] == "_tags")): with self.subTest(type="base", subclass=subclass): - self.assertIn(subclass, admin.site._registry.keys(), "{} is not registered in 'admin.site' in models.py".format(subclass)) + self.assertIn(subclass, admin.site._registry.keys(), f"{subclass} is not registered in 'admin.site' in models.py") else: with self.subTest(type="tag", subclass=subclass): - self.assertIn(subclass, admin.site._registry.keys(), "{} is not registered in 'tagulous.admin' in models.py".format(subclass)) + self.assertIn(subclass, admin.site._registry.keys(), f"{subclass} is not registered in 'tagulous.admin' in models.py") diff --git a/unittests/test_bulk_risk_acceptance_api.py b/unittests/test_bulk_risk_acceptance_api.py index f19ccb96022..9fdf1f90ee7 100644 --- a/unittests/test_bulk_risk_acceptance_api.py +++ b/unittests/test_bulk_risk_acceptance_api.py @@ -39,29 +39,29 @@ def setUpTestData(cls): target_start=datetime.datetime(2000, 1, 1, tzinfo=datetime.timezone.utc), target_end=datetime.datetime(2000, 2, 1, tzinfo=datetime.timezone.utc)) def create_finding(test: Test, reporter: User, cve: str) -> Finding: - return Finding(test=test, title='Finding {}'.format(cve), cve=cve, severity='High', verified=True, + return Finding(test=test, title=f'Finding {cve}', cve=cve, severity='High', verified=True, description='Hello world!', mitigation='Delete system32', impact='Everything', reporter=reporter, numerical_severity='S1', static_finding=True, dynamic_finding=False) Finding.objects.bulk_create( - map(lambda i: create_finding(cls.test_a, cls.user, 'CVE-1999-{}'.format(i)), range(50, 150, 3))) + map(lambda i: create_finding(cls.test_a, cls.user, f'CVE-1999-{i}'), range(50, 150, 3))) for finding in Finding.objects.filter(test=cls.test_a): Vulnerability_Id.objects.get_or_create(finding=finding, vulnerability_id=finding.cve) Finding.objects.bulk_create( - map(lambda i: create_finding(cls.test_b, cls.user, 'CVE-1999-{}'.format(i)), range(51, 150, 3))) + map(lambda i: create_finding(cls.test_b, cls.user, f'CVE-1999-{i}'), range(51, 150, 3))) for finding in Finding.objects.filter(test=cls.test_b): Vulnerability_Id.objects.get_or_create(finding=finding, vulnerability_id=finding.cve) Finding.objects.bulk_create( - map(lambda i: create_finding(cls.test_c, cls.user, 'CVE-1999-{}'.format(i)), range(52, 150, 3))) + map(lambda i: create_finding(cls.test_c, cls.user, f'CVE-1999-{i}'), range(52, 150, 3))) for finding in Finding.objects.filter(test=cls.test_c): Vulnerability_Id.objects.get_or_create(finding=finding, vulnerability_id=finding.cve) Finding.objects.bulk_create( - map(lambda i: create_finding(cls.test_d, cls.user, 'CVE-2000-{}'.format(i)), range(50, 150, 3))) + map(lambda i: create_finding(cls.test_d, cls.user, f'CVE-2000-{i}'), range(50, 150, 3))) for finding in Finding.objects.filter(test=cls.test_d): Vulnerability_Id.objects.get_or_create(finding=finding, vulnerability_id=finding.cve) Finding.objects.bulk_create( - map(lambda i: create_finding(cls.test_e, cls.user, 'CVE-1999-{}'.format(i)), range(50, 150, 3))) + map(lambda i: create_finding(cls.test_e, cls.user, f'CVE-1999-{i}'), range(50, 150, 3))) for finding in Finding.objects.filter(test=cls.test_e): Vulnerability_Id.objects.get_or_create(finding=finding, vulnerability_id=finding.cve) @@ -70,7 +70,7 @@ def setUp(self) -> None: self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key) def test_test_accept_risks(self): - accepted_risks = [{'vulnerability_id': 'CVE-1999-{}'.format(i), 'justification': 'Demonstration purposes', + accepted_risks = [{'vulnerability_id': f'CVE-1999-{i}', 'justification': 'Demonstration purposes', 'accepted_by': 'King of the Internet'} for i in range(100, 150)] result = self.client.post(reverse('test-accept-risks', kwargs={'pk': self.test_a.id}), data=accepted_risks, format='json') @@ -83,7 +83,7 @@ def test_test_accept_risks(self): self.assertEqual(self.engagement_2a.risk_acceptance.count(), 0) def test_engagement_accept_risks(self): - accepted_risks = [{'vulnerability_id': 'CVE-1999-{}'.format(i), 'justification': 'Demonstration purposes', + accepted_risks = [{'vulnerability_id': f'CVE-1999-{i}', 'justification': 'Demonstration purposes', 'accepted_by': 'King of the Internet'} for i in range(100, 150)] result = self.client.post(reverse('engagement-accept-risks', kwargs={'pk': self.engagement.id}), data=accepted_risks, format='json') @@ -94,7 +94,7 @@ def test_engagement_accept_risks(self): self.assertEqual(self.engagement_2a.unaccepted_open_findings.count(), 34) def test_finding_accept_risks(self): - accepted_risks = [{'vulnerability_id': 'CVE-1999-{}'.format(i), 'justification': 'Demonstration purposes', + accepted_risks = [{'vulnerability_id': f'CVE-1999-{i}', 'justification': 'Demonstration purposes', 'accepted_by': 'King of the Internet'} for i in range(60, 140)] result = self.client.post(reverse('finding-accept-risks'), data=accepted_risks, format='json') self.assertEqual(len(result.json()), 106) diff --git a/unittests/test_deduplication_logic.py b/unittests/test_deduplication_logic.py index 86d05f67c73..5b7ae6bdd3c 100644 --- a/unittests/test_deduplication_logic.py +++ b/unittests/test_deduplication_logic.py @@ -1158,12 +1158,12 @@ def log_findings(self, findings): else: logger.debug('\t\t' + 'findings:') for finding in findings: - logger.debug('\t\t\t{:4.4}'.format(str(finding.id)) + ': "' + '{:20.20}'.format(finding.title) + '": ' + '{:5.5}'.format(finding.severity) + ': act: ' + '{:5.5}'.format(str(finding.active)) - + ': ver: ' + '{:5.5}'.format(str(finding.verified)) + ': mit: ' + '{:5.5}'.format(str(finding.is_mitigated)) - + ': dup: ' + '{:5.5}'.format(str(finding.duplicate)) + ': dup_id: ' - + ('{:4.4}'.format(str(finding.duplicate_finding.id)) if finding.duplicate_finding else 'None') + ': hash_code: ' + str(finding.hash_code) + logger.debug(f'\t\t\t{str(finding.id):4.4}' + ': "' + f'{finding.title:20.20}' + '": ' + f'{finding.severity:5.5}' + ': act: ' + f'{str(finding.active):5.5}' + + ': ver: ' + f'{str(finding.verified):5.5}' + ': mit: ' + f'{str(finding.is_mitigated):5.5}' + + ': dup: ' + f'{str(finding.duplicate):5.5}' + ': dup_id: ' + + (f'{str(finding.duplicate_finding.id):4.4}' if finding.duplicate_finding else 'None') + ': hash_code: ' + str(finding.hash_code) + ': eps: ' + str(finding.endpoints.count()) + ": notes: " + str([n.id for n in finding.notes.all()]) - + ': uid: ' + '{:5.5}'.format(str(finding.unique_id_from_tool)) + (' fp' if finding.false_p else '') + + ': uid: ' + f'{str(finding.unique_id_from_tool):5.5}' + (' fp' if finding.false_p else '') ) logger.debug('\t\tendpoints') diff --git a/unittests/test_endpoint_meta_import.py b/unittests/test_endpoint_meta_import.py index e0ec437c42f..23981ab0b7d 100644 --- a/unittests/test_endpoint_meta_import.py +++ b/unittests/test_endpoint_meta_import.py @@ -12,7 +12,7 @@ # test methods to be used both by API Test and UI Test -class EndpointMetaImportMixin(object): +class EndpointMetaImportMixin: def __init__(self, *args, **kwargs): self.meta_import_full = 'endpoint_meta_import/full_endpoint_meta_import.csv' self.meta_import_no_hostname = 'endpoint_meta_import/no_hostname_endpoint_meta_import.csv' diff --git a/unittests/test_endpoint_model.py b/unittests/test_endpoint_model.py index 71535e07321..634130bb3a1 100644 --- a/unittests/test_endpoint_model.py +++ b/unittests/test_endpoint_model.py @@ -37,7 +37,7 @@ def test_truncates_large_attributes(self): path = "foo" * 1000 query = "bar" * 1000 fragment = "baz" * 1000 - endpoint = Endpoint.from_uri('http://alice@foo.bar:8080/{}?{}#{}'.format(path, query, fragment)) + endpoint = Endpoint.from_uri(f'http://alice@foo.bar:8080/{path}?{query}#{fragment}') self.assertEqual(len(endpoint.path), 500) self.assertEqual(len(endpoint.query), 1000) self.assertEqual(len(endpoint.fragment), 500) diff --git a/unittests/test_false_positive_history_logic.py b/unittests/test_false_positive_history_logic.py index 853737850f8..808af00f746 100644 --- a/unittests/test_false_positive_history_logic.py +++ b/unittests/test_false_positive_history_logic.py @@ -1675,12 +1675,12 @@ def log_findings(self, findings): else: logger.debug('\t\t' + 'findings:') for finding in findings: - logger.debug('\t\t\t{:4.4}'.format(str(finding.id)) + ': "' + '{:20.20}'.format(finding.title) + '": ' + '{:5.5}'.format(finding.severity) + ': act: ' + '{:5.5}'.format(str(finding.active)) - + ': ver: ' + '{:5.5}'.format(str(finding.verified)) + ': mit: ' + '{:5.5}'.format(str(finding.is_mitigated)) - + ': dup: ' + '{:5.5}'.format(str(finding.duplicate)) + ': dup_id: ' - + ('{:4.4}'.format(str(finding.duplicate_finding.id)) if finding.duplicate_finding else 'None') + ': hash_code: ' + str(finding.hash_code) + logger.debug(f'\t\t\t{str(finding.id):4.4}' + ': "' + f'{finding.title:20.20}' + '": ' + f'{finding.severity:5.5}' + ': act: ' + f'{str(finding.active):5.5}' + + ': ver: ' + f'{str(finding.verified):5.5}' + ': mit: ' + f'{str(finding.is_mitigated):5.5}' + + ': dup: ' + f'{str(finding.duplicate):5.5}' + ': dup_id: ' + + (f'{str(finding.duplicate_finding.id):4.4}' if finding.duplicate_finding else 'None') + ': hash_code: ' + str(finding.hash_code) + ': eps: ' + str(finding.endpoints.count()) + ": notes: " + str([n.id for n in finding.notes.all()]) - + ': uid: ' + '{:5.5}'.format(str(finding.unique_id_from_tool)) + (' fp' if finding.false_p else '') + + ': uid: ' + f'{str(finding.unique_id_from_tool):5.5}' + (' fp' if finding.false_p else '') ) logger.debug('\t\tendpoints') @@ -1734,7 +1734,7 @@ def copy_and_reset_product(self, id): org = Product.objects.get(id=id) new = org new.pk = None - new.name = '%s (Copy %s)' % (org.name, datetime.now()) + new.name = f'{org.name} (Copy {datetime.now()})' # return unsaved new product and reloaded existing product return new, Product.objects.get(id=id) @@ -1746,7 +1746,7 @@ def change_finding_unique_id(self, finding): return finding def change_finding_title(self, finding): - finding.title = '%s (Copy %s)' % (finding.title, datetime.now()) + finding.title = f'{finding.title} (Copy {datetime.now()})' return finding def change_finding_severity(self, finding): diff --git a/unittests/test_import_reimport.py b/unittests/test_import_reimport.py index 535bc488d19..79524465f48 100644 --- a/unittests/test_import_reimport.py +++ b/unittests/test_import_reimport.py @@ -50,7 +50,7 @@ # 5 active sev medium # test methods to be used both by API Test and UI Test -class ImportReimportMixin(object): +class ImportReimportMixin: def __init__(self, *args, **kwargs): self.scans_path = '/scans/' diff --git a/unittests/test_importers_importer.py b/unittests/test_importers_importer.py index 096539b2920..a51b21e4560 100644 --- a/unittests/test_importers_importer.py +++ b/unittests/test_importers_importer.py @@ -297,7 +297,7 @@ def test_import_with_invalid_parameters(self): Product_Type.objects.create(name=another_product_type_name) import0 = self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, engagement=None, product_type_name=another_product_type_name, product_name=PRODUCT_NAME_DEFAULT, engagement_name='valentijn', expected_http_status_code=400) - self.assertEqual(import0, ["Product '%s' doesn't exist in Product_Type '%s'" % (PRODUCT_NAME_DEFAULT, another_product_type_name)]) + self.assertEqual(import0, [f"Product '{PRODUCT_NAME_DEFAULT}' doesn't exist in Product_Type '{another_product_type_name}'"]) with self.subTest('invalid engagement'): import0 = self.import_scan_with_params(NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, @@ -505,7 +505,7 @@ def test_reimport_with_invalid_parameters(self): import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, engagement=None, product_type_name=another_product_type_name, product_name=PRODUCT_NAME_DEFAULT, engagement_name='valentijn', expected_http_status_code=400) - self.assertEqual(import0, ["Product '%s' doesn't exist in Product_Type '%s'" % (PRODUCT_NAME_DEFAULT, another_product_type_name)]) + self.assertEqual(import0, [f"Product '{PRODUCT_NAME_DEFAULT}' doesn't exist in Product_Type '{another_product_type_name}'"]) with self.subTest('invalid engagement'): import0 = self.reimport_scan_with_params(None, NPM_AUDIT_NO_VULN_FILENAME, scan_type=NPM_AUDIT_SCAN_TYPE, diff --git a/unittests/test_jira_config_engagement.py b/unittests/test_jira_config_engagement.py index 9506a264f60..0fede4ca68c 100644 --- a/unittests/test_jira_config_engagement.py +++ b/unittests/test_jira_config_engagement.py @@ -11,7 +11,7 @@ logger = logging.getLogger(__name__) -class JIRAConfigEngagementBase(object): +class JIRAConfigEngagementBase: def get_new_engagement_with_jira_project_data(self): return { 'name': 'new engagement', diff --git a/unittests/test_jira_import_and_pushing_api.py b/unittests/test_jira_import_and_pushing_api.py index dd50794c0a2..76593c19677 100644 --- a/unittests/test_jira_import_and_pushing_api.py +++ b/unittests/test_jira_import_and_pushing_api.py @@ -44,7 +44,7 @@ def assert_cassette_played(self): self.assertTrue(self.cassette.all_played) def _get_vcr(self, **kwargs): - my_vcr = super(JIRAImportAndPushTestApi, self)._get_vcr(**kwargs) + my_vcr = super()._get_vcr(**kwargs) my_vcr.record_mode = 'once' my_vcr.path_transformer = VCR.ensure_suffix('.yaml') my_vcr.filter_headers = ['Authorization', 'X-Atlassian-Token'] diff --git a/unittests/test_parsers.py b/unittests/test_parsers.py index ac2b75730c0..afd849aa7cb 100644 --- a/unittests/test_parsers.py +++ b/unittests/test_parsers.py @@ -79,7 +79,7 @@ def test_file_existence(self): if file.is_file() and file.name != '__pycache__' and file.name != "__init__.py": f = os.path.join(basedir, 'dojo', 'tools', parser_dir.name, file.name) read_true = False - for line in open(f, "r").readlines(): + for line in open(f).readlines(): if read_true is True: if ('"utf-8"' in str(line) or "'utf-8'" in str(line) or '"utf-8-sig"' in str(line) or "'utf-8-sig'" in str(line)) and i <= 4: read_true = False diff --git a/unittests/test_rest_framework.py b/unittests/test_rest_framework.py index 1120419c894..377cf1dcbf3 100644 --- a/unittests/test_rest_framework.py +++ b/unittests/test_rest_framework.py @@ -111,7 +111,7 @@ def format_url(path): return f"{BASE_API_URL}{path}" -class SchemaChecker(): +class SchemaChecker: def __init__(self, components): self._prefix = [] self._has_failed = False @@ -272,7 +272,7 @@ class TestType(Enum): CONFIGURATION_PERMISSIONS = 3 -class BaseClass(): +class BaseClass: class RESTEndpointTest(DojoAPITestCase): def __init__(self, *args, **kwargs): DojoAPITestCase.__init__(self, *args, **kwargs) @@ -1043,7 +1043,7 @@ def test_request_response_post_and_download(self): length = FileUpload.objects.count() payload = { "title": level, - "file": open(f'{str(self.path)}/scans/acunetix/one_finding.xml', 'r') + "file": open(f'{str(self.path)}/scans/acunetix/one_finding.xml') } response = self.client.post(f'/api/v2/{level}/files/', payload) self.assertEqual(201, response.status_code, response.data) @@ -1051,7 +1051,7 @@ def test_request_response_post_and_download(self): # Save the ID of the newly created file object self.url_levels[level] = response.data.get('id') # Test the download - with open(f'{str(self.path)}/scans/acunetix/one_finding.xml', 'r') as file: + with open(f'{str(self.path)}/scans/acunetix/one_finding.xml') as file: file_data = file.read() for level, file_id in self.url_levels.items(): response = self.client.get(f'/api/v2/{level}/files/download/{file_id}/') diff --git a/unittests/test_utils.py b/unittests/test_utils.py index 30b47216217..3cad8f7e003 100644 --- a/unittests/test_utils.py +++ b/unittests/test_utils.py @@ -162,7 +162,7 @@ def test_user_post_save_email_pattern_does_not_match(self, mock_notifications, m save_mock_member.save.assert_not_called() -class assertNumOfModelsCreated(): +class assertNumOfModelsCreated: def __init__(self, test_case, queryset, num): self.test_case = test_case self.queryset = queryset diff --git a/unittests/tools/test_asff_parser.py b/unittests/tools/test_asff_parser.py index bcbe7bb56b5..df8c38cd357 100644 --- a/unittests/tools/test_asff_parser.py +++ b/unittests/tools/test_asff_parser.py @@ -12,7 +12,7 @@ def sample_path(file_name): class TestAsffParser(DojoTestCase): def load_sample_json(self, file_name): - with open(sample_path(file_name), "r") as file: + with open(sample_path(file_name)) as file: return json.load(file) def common_check_finding(self, finding, data, index, guarddutydate=False): @@ -35,7 +35,7 @@ def common_check_finding(self, finding, data, index, guarddutydate=False): def test_asff_one_vuln(self): data = self.load_sample_json("one_vuln.json") - with open(sample_path("one_vuln.json"), "r") as file: + with open(sample_path("one_vuln.json")) as file: parser = AsffParser() findings = parser.get_findings(file, Test()) self.assertEqual(1, len(findings)) @@ -43,7 +43,7 @@ def test_asff_one_vuln(self): def test_asff_many_vulns(self): data = self.load_sample_json("many_vulns.json") - with open(sample_path("many_vulns.json"), "r") as file: + with open(sample_path("many_vulns.json")) as file: parser = AsffParser() findings = parser.get_findings(file, Test()) self.assertEqual(len(findings), 5) @@ -52,7 +52,7 @@ def test_asff_many_vulns(self): def test_asff_guardduty(self): data = self.load_sample_json("guardduty/Unusual Behaviors-User-Persistence IAMUser-NetworkPermissions.json") - with open(sample_path("guardduty/Unusual Behaviors-User-Persistence IAMUser-NetworkPermissions.json"), "r") as file: + with open(sample_path("guardduty/Unusual Behaviors-User-Persistence IAMUser-NetworkPermissions.json")) as file: parser = AsffParser() findings = parser.get_findings(file, Test()) self.assertEqual(len(findings), 1) diff --git a/unittests/tools/test_dependency_check_parser.py b/unittests/tools/test_dependency_check_parser.py index 7963acdcd7a..0d22d7e1b0d 100644 --- a/unittests/tools/test_dependency_check_parser.py +++ b/unittests/tools/test_dependency_check_parser.py @@ -8,7 +8,7 @@ from ..dojo_test_case import DojoTestCase -class TestFile(object): +class TestFile: def read(self): return self.content diff --git a/unittests/tools/test_generic_parser.py b/unittests/tools/test_generic_parser.py index de4b8252ece..fd10079b07a 100644 --- a/unittests/tools/test_generic_parser.py +++ b/unittests/tools/test_generic_parser.py @@ -4,7 +4,7 @@ from dojo.tools.generic.parser import GenericParser -class TestFile(object): +class TestFile: def read(self): return self.content diff --git a/unittests/tools/test_tenable_parser.py b/unittests/tools/test_tenable_parser.py index b79c48c3d76..7ad54858510 100644 --- a/unittests/tools/test_tenable_parser.py +++ b/unittests/tools/test_tenable_parser.py @@ -109,7 +109,7 @@ def test_parse_some_findings_csv_bytes_nessus_legacy(self): for finding in findings: for endpoint in finding.unsaved_endpoints: endpoint.clean() - testfile = open(path.join(path.dirname(__file__), "../scans/tenable/nessus/nessus_many_vuln2-all.csv"), "rt") + testfile = open(path.join(path.dirname(__file__), "../scans/tenable/nessus/nessus_many_vuln2-all.csv")) parser = TenableParser() findings = parser.get_findings(testfile, self.create_test()) for finding in findings: diff --git a/unittests/tools/test_vcg_parser.py b/unittests/tools/test_vcg_parser.py index 5b030be780b..7e7abf55b3a 100644 --- a/unittests/tools/test_vcg_parser.py +++ b/unittests/tools/test_vcg_parser.py @@ -10,7 +10,7 @@ from dojo.tools.vcg.parser import VCGXmlParser -class TestFile(object): +class TestFile: def read(self): return self.content