diff --git a/dojo/api_v2/serializers.py b/dojo/api_v2/serializers.py index 87ea0003d49..e7c49012723 100644 --- a/dojo/api_v2/serializers.py +++ b/dojo/api_v2/serializers.py @@ -370,10 +370,7 @@ def to_representation(self, value): if not isinstance(value, RequestResponseDict): if not isinstance(value, list): # this will trigger when a queryset is found... - if self.order_by: - burps = value.all().order_by(*self.order_by) - else: - burps = value.all() + burps = value.all().order_by(*self.order_by) if self.order_by else value.all() value = [ { "request": burp.get_request(), @@ -507,10 +504,7 @@ def update(self, instance, validated_data): return instance def create(self, validated_data): - if "password" in validated_data: - password = validated_data.pop("password") - else: - password = None + password = validated_data.pop("password") if "password" in validated_data else None new_configuration_permissions = None if ( @@ -536,10 +530,7 @@ def create(self, validated_data): return user def validate(self, data): - if self.instance is not None: - instance_is_superuser = self.instance.is_superuser - else: - instance_is_superuser = False + instance_is_superuser = self.instance.is_superuser if self.instance is not None else False data_is_superuser = data.get("is_superuser", False) if not self.context["request"].user.is_superuser and ( instance_is_superuser or data_is_superuser @@ -1184,7 +1175,7 @@ class Meta: def validate(self, data): - if not self.context["request"].method == "PATCH": + if self.context["request"].method != "PATCH": if "product" not in data: msg = "Product is required" raise serializers.ValidationError(msg) @@ -2447,7 +2438,7 @@ def set_context( """ context = dict(data) # update some vars - context["scan"] = data.get("file", None) + context["scan"] = data.get("file") context["environment"] = Development_Environment.objects.get( name=data.get("environment", "Development"), ) diff --git a/dojo/api_v2/views.py b/dojo/api_v2/views.py index ae77e923553..9253dbe68fe 100644 --- a/dojo/api_v2/views.py +++ b/dojo/api_v2/views.py @@ -1457,9 +1457,7 @@ def metadata(self, request, pk=None): return self._get_metadata(request, finding) if request.method == "POST": return self._add_metadata(request, finding) - if request.method == "PUT": - return self._edit_metadata(request, finding) - if request.method == "PATCH": + if request.method in ["PUT", "PATCH"]: return self._edit_metadata(request, finding) if request.method == "DELETE": return self._remove_metadata(request, finding) @@ -3027,24 +3025,15 @@ def report_generate(request, obj, options): if eng.name: engagement_name = eng.name engagement_target_start = eng.target_start - if eng.target_end: - engagement_target_end = eng.target_end - else: - engagement_target_end = "ongoing" + engagement_target_end = eng.target_end or "ongoing" if eng.test_set.all(): for t in eng.test_set.all(): test_type_name = t.test_type.name if t.environment: test_environment_name = t.environment.name test_target_start = t.target_start - if t.target_end: - test_target_end = t.target_end - else: - test_target_end = "ongoing" - if eng.test_strategy: - test_strategy_ref = eng.test_strategy - else: - test_strategy_ref = "" + test_target_end = t.target_end or "ongoing" + test_strategy_ref = eng.test_strategy or "" total_findings = len(findings.qs.all()) elif type(obj).__name__ == "Product": @@ -3054,20 +3043,14 @@ def report_generate(request, obj, options): if eng.name: engagement_name = eng.name engagement_target_start = eng.target_start - if eng.target_end: - engagement_target_end = eng.target_end - else: - engagement_target_end = "ongoing" + engagement_target_end = eng.target_end or "ongoing" if eng.test_set.all(): for t in eng.test_set.all(): test_type_name = t.test_type.name if t.environment: test_environment_name = t.environment.name - if eng.test_strategy: - test_strategy_ref = eng.test_strategy - else: - test_strategy_ref = "" + test_strategy_ref = eng.test_strategy or "" total_findings = len(findings.qs.all()) elif type(obj).__name__ == "Engagement": @@ -3075,38 +3058,26 @@ def report_generate(request, obj, options): if eng.name: engagement_name = eng.name engagement_target_start = eng.target_start - if eng.target_end: - engagement_target_end = eng.target_end - else: - engagement_target_end = "ongoing" + engagement_target_end = eng.target_end or "ongoing" if eng.test_set.all(): for t in eng.test_set.all(): test_type_name = t.test_type.name if t.environment: test_environment_name = t.environment.name - if eng.test_strategy: - test_strategy_ref = eng.test_strategy - else: - test_strategy_ref = "" + test_strategy_ref = eng.test_strategy or "" total_findings = len(findings.qs.all()) elif type(obj).__name__ == "Test": t = obj test_type_name = t.test_type.name test_target_start = t.target_start - if t.target_end: - test_target_end = t.target_end - else: - test_target_end = "ongoing" + test_target_end = t.target_end or "ongoing" total_findings = len(findings.qs.all()) if t.engagement.name: engagement_name = t.engagement.name engagement_target_start = t.engagement.target_start - if t.engagement.target_end: - engagement_target_end = t.engagement.target_end - else: - engagement_target_end = "ongoing" + engagement_target_end = t.engagement.target_end or "ongoing" else: pass # do nothing diff --git a/dojo/authorization/authorization.py b/dojo/authorization/authorization.py index 8f013b60061..f2873dc5bce 100644 --- a/dojo/authorization/authorization.py +++ b/dojo/authorization/authorization.py @@ -46,7 +46,7 @@ def user_has_permission(user, obj, permission): if user.is_superuser: return True - if isinstance(obj, Product_Type) or isinstance(obj, Product): + if isinstance(obj, (Product_Type, Product)): # Global roles are only relevant for product types, products and their # dependent objects if user_has_global_permission(user, permission): @@ -97,13 +97,9 @@ def user_has_permission(user, obj, permission): and permission in Permissions.get_test_permissions() ): return user_has_permission(user, obj.engagement.product, permission) - if ( - isinstance(obj, Finding) or isinstance(obj, Stub_Finding) - ) and permission in Permissions.get_finding_permissions(): - return user_has_permission( - user, obj.test.engagement.product, permission, - ) - if ( + if (( + isinstance(obj, (Finding, Stub_Finding)) + ) and permission in Permissions.get_finding_permissions()) or ( isinstance(obj, Finding_Group) and permission in Permissions.get_finding_group_permissions() ): @@ -113,23 +109,17 @@ def user_has_permission(user, obj, permission): if ( isinstance(obj, Endpoint) and permission in Permissions.get_endpoint_permissions() - ): - return user_has_permission(user, obj.product, permission) - if ( + ) or ( isinstance(obj, Languages) and permission in Permissions.get_language_permissions() - ): - return user_has_permission(user, obj.product, permission) - if ( + ) or (( isinstance(obj, App_Analysis) and permission in Permissions.get_technology_permissions() - ): - return user_has_permission(user, obj.product, permission) - if ( + ) or ( isinstance(obj, Product_API_Scan_Configuration) and permission in Permissions.get_product_api_scan_configuration_permissions() - ): + )): return user_has_permission(user, obj.product, permission) if ( isinstance(obj, Product_Type_Member) @@ -351,10 +341,7 @@ def get_product_groups_dict(user): .select_related("role") .filter(group__users=user) ): - if pg_dict.get(product_group.product.id) is None: - pgu_list = [] - else: - pgu_list = pg_dict[product_group.product.id] + pgu_list = [] if pg_dict.get(product_group.product.id) is None else pg_dict[product_group.product.id] pgu_list.append(product_group) pg_dict[product_group.product.id] = pgu_list return pg_dict diff --git a/dojo/benchmark/views.py b/dojo/benchmark/views.py index 04f4fb68e3b..0d0c7174b96 100644 --- a/dojo/benchmark/views.py +++ b/dojo/benchmark/views.py @@ -1,3 +1,4 @@ +import contextlib import logging from crum import get_current_user @@ -37,10 +38,8 @@ def add_benchmark(queryset, product): benchmark_product.control = requirement requirements.append(benchmark_product) - try: + with contextlib.suppress(Exception): Benchmark_Product.objects.bulk_create(requirements) - except Exception: - pass @user_is_authorized(Product, Permissions.Benchmark_Edit, "pid") diff --git a/dojo/cred/queries.py b/dojo/cred/queries.py index 28419772328..beb84129bab 100644 --- a/dojo/cred/queries.py +++ b/dojo/cred/queries.py @@ -11,10 +11,7 @@ def get_authorized_cred_mappings(permission, queryset=None): if user is None: return Cred_Mapping.objects.none() - if queryset is None: - cred_mappings = Cred_Mapping.objects.all().order_by("id") - else: - cred_mappings = queryset + cred_mappings = Cred_Mapping.objects.all().order_by("id") if queryset is None else queryset if user.is_superuser: return cred_mappings diff --git a/dojo/cred/views.py b/dojo/cred/views.py index 2fc373c3ac9..f8f7756e340 100644 --- a/dojo/cred/views.py +++ b/dojo/cred/views.py @@ -1,3 +1,4 @@ +import contextlib import logging from django.contrib import messages @@ -585,10 +586,8 @@ def new_cred_finding(request, fid): @user_is_authorized(Cred_User, Permissions.Credential_Delete, "ttid") def delete_cred_controller(request, destination_url, id, ttid): cred = None - try: + with contextlib.suppress(Exception): cred = Cred_Mapping.objects.get(pk=ttid) - except: - pass if request.method == "POST": tform = CredMappingForm(request.POST, instance=cred) message = "" diff --git a/dojo/endpoint/queries.py b/dojo/endpoint/queries.py index 684eeab7b1a..4a6f2ae56cd 100644 --- a/dojo/endpoint/queries.py +++ b/dojo/endpoint/queries.py @@ -20,10 +20,7 @@ def get_authorized_endpoints(permission, queryset=None, user=None): if user is None: return Endpoint.objects.none() - if queryset is None: - endpoints = Endpoint.objects.all().order_by("id") - else: - endpoints = queryset + endpoints = Endpoint.objects.all().order_by("id") if queryset is None else queryset if user.is_superuser: return endpoints @@ -66,10 +63,7 @@ def get_authorized_endpoint_status(permission, queryset=None, user=None): if user is None: return Endpoint_Status.objects.none() - if queryset is None: - endpoint_status = Endpoint_Status.objects.all().order_by("id") - else: - endpoint_status = queryset + endpoint_status = Endpoint_Status.objects.all().order_by("id") if queryset is None else queryset if user.is_superuser: return endpoint_status diff --git a/dojo/endpoint/utils.py b/dojo/endpoint/utils.py index d5c378e5e97..0d584ff6c6c 100644 --- a/dojo/endpoint/utils.py +++ b/dojo/endpoint/utils.py @@ -20,20 +20,11 @@ def endpoint_filter(**kwargs): qs = Endpoint.objects.all() - if kwargs.get("protocol"): - qs = qs.filter(protocol__iexact=kwargs["protocol"]) - else: - qs = qs.filter(protocol__isnull=True) + qs = qs.filter(protocol__iexact=kwargs["protocol"]) if kwargs.get("protocol") else qs.filter(protocol__isnull=True) - if kwargs.get("userinfo"): - qs = qs.filter(userinfo__exact=kwargs["userinfo"]) - else: - qs = qs.filter(userinfo__isnull=True) + qs = qs.filter(userinfo__exact=kwargs["userinfo"]) if kwargs.get("userinfo") else qs.filter(userinfo__isnull=True) - if kwargs.get("host"): - qs = qs.filter(host__iexact=kwargs["host"]) - else: - qs = qs.filter(host__isnull=True) + qs = qs.filter(host__iexact=kwargs["host"]) if kwargs.get("host") else qs.filter(host__isnull=True) if kwargs.get("port"): if (kwargs.get("protocol")) and \ @@ -48,20 +39,11 @@ def endpoint_filter(**kwargs): else: qs = qs.filter(port__isnull=True) - if kwargs.get("path"): - qs = qs.filter(path__exact=kwargs["path"]) - else: - qs = qs.filter(path__isnull=True) + qs = qs.filter(path__exact=kwargs["path"]) if kwargs.get("path") else qs.filter(path__isnull=True) - if kwargs.get("query"): - qs = qs.filter(query__exact=kwargs["query"]) - else: - qs = qs.filter(query__isnull=True) + qs = qs.filter(query__exact=kwargs["query"]) if kwargs.get("query") else qs.filter(query__isnull=True) - if kwargs.get("fragment"): - qs = qs.filter(fragment__exact=kwargs["fragment"]) - else: - qs = qs.filter(fragment__isnull=True) + qs = qs.filter(fragment__exact=kwargs["fragment"]) if kwargs.get("fragment") else qs.filter(fragment__isnull=True) if kwargs.get("product"): qs = qs.filter(product__exact=kwargs["product"]) @@ -267,12 +249,11 @@ def validate_endpoints_to_add(endpoints_to_add): endpoints = endpoints_to_add.split() for endpoint in endpoints: try: - if "://" in endpoint: # is it full uri? - endpoint_ins = Endpoint.from_uri(endpoint) # from_uri validate URI format + split to components - else: - # from_uri parse any '//localhost', '//127.0.0.1:80', '//foo.bar/path' correctly - # format doesn't follow RFC 3986 but users use it - endpoint_ins = Endpoint.from_uri("//" + endpoint) + # is it full uri? + # 1. from_uri validate URI format + split to components + # 2. from_uri parse any '//localhost', '//127.0.0.1:80', '//foo.bar/path' correctly + # format doesn't follow RFC 3986 but users use it + endpoint_ins = Endpoint.from_uri(endpoint) if "://" in endpoint else Endpoint.from_uri("//" + endpoint) endpoint_ins.clean() endpoint_list.append([ endpoint_ins.protocol, diff --git a/dojo/endpoint/views.py b/dojo/endpoint/views.py index 06ee7ac24a1..cd2b82eabfc 100644 --- a/dojo/endpoint/views.py +++ b/dojo/endpoint/views.py @@ -61,10 +61,7 @@ def process_endpoints_view(request, host_view=False, vulnerable=False): paged_endpoints = get_page_items(request, endpoints.qs, 25) - if vulnerable: - view_name = "Vulnerable" - else: - view_name = "All" + view_name = "Vulnerable" if vulnerable else "All" if host_view: view_name += " Hosts" diff --git a/dojo/engagement/views.py b/dojo/engagement/views.py index d9d3cef0340..0eb03c8376a 100644 --- a/dojo/engagement/views.py +++ b/dojo/engagement/views.py @@ -316,10 +316,7 @@ def edit_engagement(request, eid): logger.debug("showing jira-epic-form") jira_epic_form = JIRAEngagementForm(instance=engagement) - if is_ci_cd: - title = "Edit CI/CD Engagement" - else: - title = "Edit Interactive Engagement" + title = "Edit CI/CD Engagement" if is_ci_cd else "Edit Interactive Engagement" product_tab = Product_Tab(engagement.product, title=title, tab="engagements") product_tab.setEngagement(engagement) @@ -465,10 +462,7 @@ def get(self, request, eid, *args, **kwargs): available_note_types = find_available_notetypes(notes) form = DoneForm() files = eng.files.all() - if note_type_activation: - form = TypedNoteForm(available_note_types=available_note_types) - else: - form = NoteForm() + form = TypedNoteForm(available_note_types=available_note_types) if note_type_activation else NoteForm() creds = Cred_Mapping.objects.filter( product=eng.product).select_related("cred_id").order_by("cred_id") @@ -551,10 +545,7 @@ def post(self, request, eid, *args, **kwargs): new_note.date = timezone.now() new_note.save() eng.notes.add(new_note) - if note_type_activation: - form = TypedNoteForm(available_note_types=available_note_types) - else: - form = NoteForm() + form = TypedNoteForm(available_note_types=available_note_types) if note_type_activation else NoteForm() title = f"Engagement: {eng.name} on {eng.product.name}" messages.add_message(request, messages.SUCCESS, diff --git a/dojo/finding/queries.py b/dojo/finding/queries.py index 47386e43f86..f71866dbd0d 100644 --- a/dojo/finding/queries.py +++ b/dojo/finding/queries.py @@ -45,10 +45,7 @@ def get_authorized_findings(permission, queryset=None, user=None): user = get_current_user() if user is None: return Finding.objects.none() - if queryset is None: - findings = Finding.objects.all().order_by("id") - else: - findings = queryset + findings = Finding.objects.all().order_by("id") if queryset is None else queryset if user.is_superuser: return findings @@ -114,10 +111,7 @@ def get_authorized_vulnerability_ids(permission, queryset=None, user=None): if user is None: return Vulnerability_Id.objects.none() - if queryset is None: - vulnerability_ids = Vulnerability_Id.objects.all() - else: - vulnerability_ids = queryset + vulnerability_ids = Vulnerability_Id.objects.all() if queryset is None else queryset if user.is_superuser: return vulnerability_ids diff --git a/dojo/finding/views.py b/dojo/finding/views.py index 8d453ab5fed..93a0e90eff7 100644 --- a/dojo/finding/views.py +++ b/dojo/finding/views.py @@ -1217,10 +1217,7 @@ def close_finding(request, fid): # in order to close a finding, we need to capture why it was closed # we can do this with a Note note_type_activation = Note_Type.objects.filter(is_active=True) - if len(note_type_activation): - missing_note_types = get_missing_mandatory_notetypes(finding) - else: - missing_note_types = note_type_activation + missing_note_types = get_missing_mandatory_notetypes(finding) if len(note_type_activation) else note_type_activation form = CloseFindingForm(missing_note_types=missing_note_types) if request.method == "POST": form = CloseFindingForm(request.POST, missing_note_types=missing_note_types) @@ -2274,10 +2271,7 @@ def apply_cwe_mitigation(apply_to_findings, template, update=True): cwe=title_template.cwe, title__icontains=title_template.title, ).values_list("id", flat=True) - if result_list is None: - result_list = finding_ids - else: - result_list = list(chain(result_list, finding_ids)) + result_list = finding_ids if result_list is None else list(chain(result_list, finding_ids)) # If result_list is None the filter exclude won't work if result_list: @@ -2379,16 +2373,7 @@ def edit_template(request, tid): count = apply_cwe_mitigation( form.cleaned_data["apply_to_findings"], template, ) - if count > 0: - apply_message = ( - " and " - + str(count) - + " " - + pluralize(count, "finding,findings") - + " " - ) - else: - apply_message = "" + apply_message = " and " + str(count) + " " + pluralize(count, "finding,findings") + " " if count > 0 else "" messages.add_message( request, diff --git a/dojo/finding_group/queries.py b/dojo/finding_group/queries.py index 39b91c02665..987cf7f6901 100644 --- a/dojo/finding_group/queries.py +++ b/dojo/finding_group/queries.py @@ -13,10 +13,7 @@ def get_authorized_finding_groups(permission, queryset=None, user=None): if user is None: return Finding_Group.objects.none() - if queryset is None: - finding_groups = Finding_Group.objects.all() - else: - finding_groups = queryset + finding_groups = Finding_Group.objects.all() if queryset is None else queryset if user.is_superuser: return finding_groups diff --git a/dojo/finding_group/views.py b/dojo/finding_group/views.py index 546dae93763..3b7dfed2c6e 100644 --- a/dojo/finding_group/views.py +++ b/dojo/finding_group/views.py @@ -77,7 +77,7 @@ def view_finding_group(request, fgid): if jira_issue.startswith(jira_instance.url + "/browse/"): jira_issue = jira_issue[len(jira_instance.url + "/browse/"):] - if finding_group.has_jira_issue and not jira_issue == jira_helper.get_jira_key(finding_group): + if finding_group.has_jira_issue and jira_issue != jira_helper.get_jira_key(finding_group): jira_helper.unlink_jira(request, finding_group) jira_helper.finding_group_link_jira(request, finding_group, jira_issue) elif not finding_group.has_jira_issue: diff --git a/dojo/forms.py b/dojo/forms.py index cdff2b53d52..ed5df8be8ce 100644 --- a/dojo/forms.py +++ b/dojo/forms.py @@ -175,10 +175,7 @@ def render(self, name, value, attrs=None, renderer=None): output = [] - if "id" in self.attrs: - id_ = self.attrs["id"] - else: - id_ = f"id_{name}" + id_ = self.attrs.get("id", f"id_{name}") month_choices = list(MONTHS.items()) if not (self.required and value): @@ -3218,10 +3215,7 @@ def __init__(self, *args, **kwargs): question=self.question, ) - if initial_answer.exists(): - initial_answer = initial_answer[0].answer - else: - initial_answer = "" + initial_answer = initial_answer[0].answer if initial_answer.exists() else "" self.fields["answer"] = forms.CharField( label=self.question.text, diff --git a/dojo/group/utils.py b/dojo/group/utils.py index 1a6bc68b137..d2245dac2a6 100644 --- a/dojo/group/utils.py +++ b/dojo/group/utils.py @@ -11,10 +11,7 @@ def get_auth_group_name(group, attempt=0): if attempt > 999: msg = f"Cannot find name for authorization group for Dojo_Group {group.name}, aborted after 999 attempts." raise Exception(msg) - if attempt == 0: - auth_group_name = group.name - else: - auth_group_name = group.name + "_" + str(attempt) + auth_group_name = group.name if attempt == 0 else group.name + "_" + str(attempt) try: # Attempt to fetch an existing group before moving forward with the real operation diff --git a/dojo/importers/auto_create_context.py b/dojo/importers/auto_create_context.py index 9f2a1cb7e76..a527c4fda2b 100644 --- a/dojo/importers/auto_create_context.py +++ b/dojo/importers/auto_create_context.py @@ -49,7 +49,7 @@ def process_object_fields( test such that passing the whole object, or just the ID will suffice """ - if object_id := data.get(key, None): + if object_id := data.get(key): # Convert to just the ID if the whole object as passed if isinstance(object_id, object_type): object_id = object_id.id diff --git a/dojo/jira_link/helper.py b/dojo/jira_link/helper.py index ee844280555..f052e3f1f9b 100644 --- a/dojo/jira_link/helper.py +++ b/dojo/jira_link/helper.py @@ -174,13 +174,11 @@ def get_jira_project(obj, use_inheritance=True): if obj.jira_project: return obj.jira_project # some old jira_issue records don't have a jira_project, so try to go via the finding instead - if hasattr(obj, "finding") and obj.finding: - return get_jira_project(obj.finding, use_inheritance=use_inheritance) - if hasattr(obj, "engagement") and obj.engagement: + if (hasattr(obj, "finding") and obj.finding) or (hasattr(obj, "engagement") and obj.engagement): return get_jira_project(obj.finding, use_inheritance=use_inheritance) return None - if isinstance(obj, Finding) or isinstance(obj, Stub_Finding): + if isinstance(obj, (Finding, Stub_Finding)): finding = obj return get_jira_project(finding.test) @@ -264,10 +262,7 @@ def get_jira_issue_url(issue): def get_jira_project_url(obj): logger.debug("getting jira project url") - if not isinstance(obj, JIRA_Project): - jira_project = get_jira_project(obj) - else: - jira_project = obj + jira_project = get_jira_project(obj) if not isinstance(obj, JIRA_Project) else obj if jira_project: logger.debug("getting jira project url2") @@ -323,14 +318,14 @@ def get_jira_issue_template(obj): def get_jira_creation(obj): - if isinstance(obj, Finding) or isinstance(obj, Engagement) or isinstance(obj, Finding_Group): + if isinstance(obj, (Finding, Engagement, Finding_Group)): if obj.has_jira_issue: return obj.jira_issue.jira_creation return None def get_jira_change(obj): - if isinstance(obj, Finding) or isinstance(obj, Engagement) or isinstance(obj, Finding_Group): + if isinstance(obj, (Finding, Engagement, Finding_Group)): if obj.has_jira_issue: return obj.jira_issue.jira_change else: @@ -350,7 +345,7 @@ def has_jira_issue(obj): def get_jira_issue(obj): - if isinstance(obj, Finding) or isinstance(obj, Engagement) or isinstance(obj, Finding_Group): + if isinstance(obj, (Finding, Engagement, Finding_Group)): try: return obj.jira_issue except JIRA_Issue.DoesNotExist: @@ -571,7 +566,7 @@ def get_labels(obj): def get_tags(obj): # Update Label with system setttings label tags = [] - if isinstance(obj, Finding) or isinstance(obj, Engagement): + if isinstance(obj, (Finding, Engagement)): obj_tags = obj.tags.all() if obj_tags: for tag in obj_tags: @@ -1051,11 +1046,8 @@ def issue_from_jira_is_active(issue_from_jira): if not issue_from_jira.fields.resolution: return True - if issue_from_jira.fields.resolution == "None": - return True - # some kind of resolution is present that is not null or None - return False + return issue_from_jira.fields.resolution == "None" def push_status_to_jira(obj, jira_instance, jira, issue, save=False): diff --git a/dojo/metrics/views.py b/dojo/metrics/views.py index 631130b58da..f5e25a89c4f 100644 --- a/dojo/metrics/views.py +++ b/dojo/metrics/views.py @@ -118,7 +118,7 @@ def metrics(request, mtype): punchcard = [] ticks = [] - if "view" in request.GET and "dashboard" == request.GET["view"]: + if "view" in request.GET and request.GET["view"] == "dashboard": punchcard, ticks = get_punchcard_data(all_findings, filters["start_date"], filters["weeks_between"], view) page_name = _("%(team_name)s Metrics") % {"team_name": get_system_setting("team_name")} template = "dojo/dashboard-metrics.html" diff --git a/dojo/models.py b/dojo/models.py index 2346c1e916c..e7e04b8f239 100644 --- a/dojo/models.py +++ b/dojo/models.py @@ -1521,17 +1521,11 @@ def copy(self): return copy def is_overdue(self): - if self.engagement_type == "CI/CD": - overdue_grace_days = 10 - else: - overdue_grace_days = 0 + overdue_grace_days = 10 if self.engagement_type == "CI/CD" else 0 max_end_date = timezone.now() - relativedelta(days=overdue_grace_days) - if self.target_end < max_end_date.date(): - return True - - return False + return self.target_end < max_end_date.date() def get_breadcrumbs(self): bc = self.product.get_breadcrumbs() @@ -1622,10 +1616,7 @@ def copy(self, finding=None): @property def age(self): - if self.mitigated: - diff = self.mitigated_time.date() - self.date - else: - diff = get_current_date() - self.date + diff = self.mitigated_time.date() - self.date if self.mitigated else get_current_date() - self.date days = diff.days return max(0, days) @@ -1820,9 +1811,7 @@ def is_broken(self): except: return True else: - if self.product: - return False - return True + return not self.product @property def mitigated(self): @@ -2910,7 +2899,7 @@ def get_number_severity(severity): @staticmethod def get_severity(num_severity): severities = {0: "Info", 1: "Low", 2: "Medium", 3: "High", 4: "Critical"} - if num_severity in severities.keys(): + if num_severity in severities: return severities[num_severity] return None diff --git a/dojo/notifications/helper.py b/dojo/notifications/helper.py index ce3f52bf1a5..c3f431d5749 100644 --- a/dojo/notifications/helper.py +++ b/dojo/notifications/helper.py @@ -137,7 +137,7 @@ def create_notification(event=None, **kwargs): def create_description(event, *args, **kwargs): - if "description" not in kwargs.keys(): + if "description" not in kwargs: if event == "product_added": kwargs["description"] = _("Product %s has been created successfully.") % kwargs["title"] elif event == "product_type_added": @@ -431,10 +431,7 @@ def send_webhooks_notification(event, user=None, *args, **kwargs): continue # HTTP request passed successfully but we still need to check status code - if 500 <= res.status_code < 600 or res.status_code == 429: - error = ERROR_TEMPORARY - else: - error = ERROR_PERMANENT + error = ERROR_TEMPORARY if 500 <= res.status_code < 600 or res.status_code == 429 else ERROR_PERMANENT endpoint.note = f"Response status code: {res.status_code}" logger.error(f"Error when sending message to Webhooks '{endpoint.name}' (status: {res.status_code}): {res.text}") @@ -569,10 +566,7 @@ def notify_scan_added(test, updated_count, new_findings=[], findings_mitigated=[ title = "Created/Updated " + str(updated_count) + " findings for " + str(test.engagement.product) + ": " + str(test.engagement.name) + ": " + str(test) - if updated_count == 0: - event = "scan_added_empty" - else: - event = "scan_added" + event = "scan_added_empty" if updated_count == 0 else "scan_added" create_notification(event=event, title=title, findings_new=new_findings, findings_mitigated=findings_mitigated, findings_reactivated=findings_reactivated, finding_count=updated_count, test=test, engagement=test.engagement, product=test.engagement.product, findings_untouched=findings_untouched, diff --git a/dojo/pipeline.py b/dojo/pipeline.py index ee2dc0ae186..6410902826f 100644 --- a/dojo/pipeline.py +++ b/dojo/pipeline.py @@ -103,9 +103,7 @@ def update_azure_groups(backend, uid, user=None, social=None, *args, **kwargs): def is_group_id(group): - if re.search(r"^[a-zA-Z0-9]{8,}-[a-zA-Z0-9]{4,}-[a-zA-Z0-9]{4,}-[a-zA-Z0-9]{4,}-[a-zA-Z0-9]{12,}$", group): - return True - return False + return bool(re.search("^[a-zA-Z0-9]{8,}-[a-zA-Z0-9]{4,}-[a-zA-Z0-9]{4,}-[a-zA-Z0-9]{4,}-[a-zA-Z0-9]{12,}$", group)) def assign_user_to_groups(user, group_names, social_provider): diff --git a/dojo/product/views.py b/dojo/product/views.py index 6680c2e5340..0d010200f4e 100644 --- a/dojo/product/views.py +++ b/dojo/product/views.py @@ -1,6 +1,7 @@ # # product import base64 import calendar as tcalendar +import contextlib import logging from collections import OrderedDict from datetime import date, datetime, timedelta @@ -349,9 +350,7 @@ def identify_view(request): return view msg = 'invalid view, view must be "Endpoint" or "Finding"' raise ValueError(msg) - if get_data.get("finding__severity", None): - return "Endpoint" - if get_data.get("false_positive", None): + if get_data.get("finding__severity", None) or get_data.get("false_positive", None): return "Endpoint" referer = request.META.get("HTTP_REFERER", None) if referer: @@ -608,13 +607,11 @@ def view_product_metrics(request, pid): open_close_weekly[unix_timestamp] = {"closed": 0, "open": 1, "accepted": 0} open_close_weekly[unix_timestamp]["week"] = html_date - if view == "Finding": - severity = finding.get("severity") - elif view == "Endpoint": + if view == "Finding" or view == "Endpoint": severity = finding.get("severity") finding_age = calculate_finding_age(finding) - if open_objs_by_age.get(finding_age, None): + if open_objs_by_age.get(finding_age): open_objs_by_age[finding_age] += 1 else: open_objs_by_age[finding_age] = 1 @@ -909,10 +906,7 @@ def new_product(request, ptid=None): if get_system_setting("enable_jira"): jira_project_form = JIRAProjectForm() - if get_system_setting("enable_github"): - gform = GITHUB_Product_Form() - else: - gform = None + gform = GITHUB_Product_Form() if get_system_setting("enable_github") else None add_breadcrumb(title=_("New Product"), top_level=False, request=request) return render(request, "dojo/new_product.html", @@ -959,10 +953,8 @@ def edit_product(request, pid): if get_system_setting("enable_github") and github_inst: gform = GITHUB_Product_Form(request.POST, instance=github_inst) # need to handle delete - try: + with contextlib.suppress(Exception): gform.save() - except: - pass elif get_system_setting("enable_github"): gform = GITHUB_Product_Form(request.POST) if gform.is_valid(): @@ -986,10 +978,7 @@ def edit_product(request, pid): jform = None if github_enabled: - if github_inst is not None: - gform = GITHUB_Product_Form(instance=github_inst) - else: - gform = GITHUB_Product_Form() + gform = GITHUB_Product_Form(instance=github_inst) if github_inst is not None else GITHUB_Product_Form() else: gform = None @@ -1119,10 +1108,7 @@ def new_eng_for_app(request, pid, cicd=False): logger.debug("showing jira-epic-form") jira_epic_form = JIRAEngagementForm() - if cicd: - title = _("New CI/CD Engagement") - else: - title = _("New Interactive Engagement") + title = _("New CI/CD Engagement") if cicd else _("New Interactive Engagement") product_tab = Product_Tab(product, title=title, tab="engagements") return render(request, "dojo/new_eng.html", { diff --git a/dojo/search/views.py b/dojo/search/views.py index 604e9ecd68c..ad78daedcda 100644 --- a/dojo/search/views.py +++ b/dojo/search/views.py @@ -189,10 +189,10 @@ def simple_search(request): # some over the top tag displaying happening... findings.object_list = findings.object_list.prefetch_related("test__engagement__product__tags") - tag = operators["tag"] if "tag" in operators else keywords - tags = operators["tags"] if "tags" in operators else keywords - not_tag = operators["not-tag"] if "not-tag" in operators else keywords - not_tags = operators["not-tags"] if "not-tags" in operators else keywords + tag = operators.get("tag", keywords) + tags = operators.get("tags", keywords) + not_tag = operators.get("not-tag", keywords) + not_tags = operators.get("not-tags", keywords) if (search_tags and tag) or tags or not_tag or not_tags: logger.debug("searching tags") @@ -544,7 +544,7 @@ def apply_vulnerability_id_filter(qs, operators): def perform_keyword_search_for_operator(qs, operators, operator, keywords_query): watson_results = None operator_query = "" - keywords_query = "" if not keywords_query else keywords_query + keywords_query = keywords_query or "" if operator in operators: operator_query = " ".join(operators[operator]) diff --git a/dojo/templatetags/authorization_tags.py b/dojo/templatetags/authorization_tags.py index fd4d17d60de..befe2cf27ae 100644 --- a/dojo/templatetags/authorization_tags.py +++ b/dojo/templatetags/authorization_tags.py @@ -21,10 +21,7 @@ def has_global_permission(permission): @register.filter def has_configuration_permission(permission, request): - if request is None: - user = crum.get_current_user() - else: - user = crum.get_current_user() or request.user + user = crum.get_current_user() if request is None else crum.get_current_user() or request.user return configuration_permission(user, permission) @@ -36,10 +33,7 @@ def get_user_permissions(user): @register.filter def user_has_configuration_permission_without_group(user, codename): permissions = get_user_permissions(user) - for permission in permissions: - if permission.codename == codename: - return True - return False + return any(permission.codename == codename for permission in permissions) @cache_for_request @@ -49,10 +43,7 @@ def get_group_permissions(group): @register.filter def group_has_configuration_permission(group, codename): - for permission in get_group_permissions(group): - if permission.codename == codename: - return True - return False + return any(permission.codename == codename for permission in get_group_permissions(group)) @register.simple_tag diff --git a/dojo/templatetags/display_tags.py b/dojo/templatetags/display_tags.py index 483e16fe4f2..b79b7920124 100644 --- a/dojo/templatetags/display_tags.py +++ b/dojo/templatetags/display_tags.py @@ -1,4 +1,5 @@ import base64 +import contextlib import datetime import logging import mimetypes @@ -175,10 +176,8 @@ def remove_string(string, value): def percentage(fraction, value): return_value = "" if int(value) > 0: - try: + with contextlib.suppress(ValueError): return_value = "%.1f%%" % ((float(fraction) / float(value)) * 100) - except ValueError: - pass return return_value @@ -327,7 +326,7 @@ def action_log_entry(value, autoescape=None): import json history = json.loads(value) text = "" - for k in history.keys(): + for k in history: text += k.capitalize() + ' changed from "' + \ history[k][0] + '" to "' + history[k][1] + '"\n' return text @@ -696,9 +695,7 @@ def get_severity_count(id, table): if table == "test": display_counts.append("Total: " + str(total) + " Findings") - elif table == "engagement": - display_counts.append("Total: " + str(total) + " Active Findings") - elif table == "product": + elif table == "engagement" or table == "product": display_counts.append("Total: " + str(total) + " Active Findings") return ", ".join([str(item) for item in display_counts]) @@ -767,10 +764,7 @@ def has_vulnerability_url(vulnerability_id): if not vulnerability_id: return False - for key in settings.VULNERABILITY_URLS: - if vulnerability_id.upper().startswith(key): - return True - return False + return any(vulnerability_id.upper().startswith(key) for key in settings.VULNERABILITY_URLS) @register.filter @@ -923,7 +917,7 @@ def esc(x): """ jira_project_no_inheritance = jira_helper.get_jira_project(product_or_engagement, use_inheritance=False) - inherited = True if not jira_project_no_inheritance else False + inherited = bool(not jira_project_no_inheritance) icon = "fa-bug" color = "" diff --git a/dojo/templatetags/event_tags.py b/dojo/templatetags/event_tags.py index ff1ffe8f068..bf66abdd761 100644 --- a/dojo/templatetags/event_tags.py +++ b/dojo/templatetags/event_tags.py @@ -63,8 +63,7 @@ def is_file(field): @register.filter def is_text(field): - return isinstance(field.field.widget, forms.TextInput) or \ - isinstance(field.field.widget, forms.Textarea) + return isinstance(field.field.widget, (forms.TextInput, forms.Textarea)) @register.filter diff --git a/dojo/templatetags/get_config_setting.py b/dojo/templatetags/get_config_setting.py index ca917968b75..08ab9251dbd 100644 --- a/dojo/templatetags/get_config_setting.py +++ b/dojo/templatetags/get_config_setting.py @@ -7,7 +7,5 @@ @register.filter def get_config_setting(config_setting): if hasattr(settings, config_setting): - if getattr(settings, config_setting, None): - return True - return False + return bool(getattr(settings, config_setting, None)) return False diff --git a/dojo/templatetags/get_endpoint_status.py b/dojo/templatetags/get_endpoint_status.py index 42a5bdb8eaa..37ca83d7070 100644 --- a/dojo/templatetags/get_endpoint_status.py +++ b/dojo/templatetags/get_endpoint_status.py @@ -8,7 +8,7 @@ @register.filter(name="has_endpoints") def has_endpoints(finding): - return True if finding.endpoints.all() else False + return bool(finding.endpoints.all()) @register.filter(name="get_vulnerable_endpoints") diff --git a/dojo/test/views.py b/dojo/test/views.py index b93ebe12933..a194d66ab73 100644 --- a/dojo/test/views.py +++ b/dojo/test/views.py @@ -871,10 +871,7 @@ def handle_request( # by default we keep a trace of the scan_type used to create the test # if it's not here, we use the "name" of the test type # this feature exists to provide custom label for tests for some parsers - if test.scan_type: - scan_type = test.scan_type - else: - scan_type = test.test_type.name + scan_type = test.scan_type or test.test_type.name # Set the product tab product_tab = Product_Tab(test.engagement.product, title=_("Re-upload a %s") % scan_type, tab="engagements") product_tab.setEngagement(test.engagement) diff --git a/dojo/tools/acunetix/parse_acunetix360_json.py b/dojo/tools/acunetix/parse_acunetix360_json.py index fcff232a553..848770cbb3d 100644 --- a/dojo/tools/acunetix/parse_acunetix360_json.py +++ b/dojo/tools/acunetix/parse_acunetix360_json.py @@ -49,10 +49,7 @@ def get_findings(self, filename, test): + references ) url = item["Url"] - if item["Impact"] is not None: - impact = text_maker.handle(item.get("Impact", "")) - else: - impact = None + impact = text_maker.handle(item.get("Impact", "")) if item["Impact"] is not None else None dupe_key = title request = item["HttpRequest"]["Content"] if request is None or len(request) <= 0: diff --git a/dojo/tools/acunetix/parse_acunetix_xml.py b/dojo/tools/acunetix/parse_acunetix_xml.py index 4b86d947318..386ca39fa7d 100644 --- a/dojo/tools/acunetix/parse_acunetix_xml.py +++ b/dojo/tools/acunetix/parse_acunetix_xml.py @@ -22,7 +22,7 @@ def get_findings(self, filename, test): if ":" not in start_url: start_url = "//" + start_url # get report date - if scan.findtext("StartTime") and "" != scan.findtext("StartTime"): + if scan.findtext("StartTime") and scan.findtext("StartTime") != "": report_date = dateutil.parser.parse( scan.findtext("StartTime"), ).date() @@ -41,11 +41,11 @@ def get_findings(self, filename, test): dynamic_finding=False, nb_occurences=1, ) - if item.findtext("Impact") and "" != item.findtext("Impact"): + if item.findtext("Impact") and item.findtext("Impact") != "": finding.impact = item.findtext("Impact") - if item.findtext("Recommendation") and "" != item.findtext( + if item.findtext("Recommendation") and item.findtext( "Recommendation", - ): + ) != "": finding.mitigation = item.findtext("Recommendation") if report_date: finding.date = report_date @@ -103,7 +103,7 @@ def get_findings(self, filename, test): port=url.port, path=item.findtext("Affects"), ) - if url.scheme is not None and "" != url.scheme: + if url.scheme is not None and url.scheme != "": endpoint.protocol = url.scheme finding.unsaved_endpoints = [endpoint] dupe_key = hashlib.sha256( @@ -169,6 +169,4 @@ def get_false_positive(self, false_p): :param false_p: :return: """ - if false_p: - return True - return False + return bool(false_p) diff --git a/dojo/tools/anchore_grype/parser.py b/dojo/tools/anchore_grype/parser.py index 48e18b686e0..c65b87a7ca2 100644 --- a/dojo/tools/anchore_grype/parser.py +++ b/dojo/tools/anchore_grype/parser.py @@ -185,9 +185,7 @@ def get_findings(self, file, test): return list(dupes.values()) def _convert_severity(self, val): - if "Unknown" == val: - return "Info" - if "Negligible" == val: + if val == "Unknown" or val == "Negligible": return "Info" return val.title() diff --git a/dojo/tools/api_blackduck/parser.py b/dojo/tools/api_blackduck/parser.py index ccd228c89c6..5eab851b1c2 100644 --- a/dojo/tools/api_blackduck/parser.py +++ b/dojo/tools/api_blackduck/parser.py @@ -37,10 +37,7 @@ def api_scan_configuration_hint(self): ) def get_findings(self, file, test): - if file is None: - data = BlackduckApiImporter().get_findings(test) - else: - data = json.load(file) + data = BlackduckApiImporter().get_findings(test) if file is None else json.load(file) findings = [] for entry in data: vulnerability_id = entry["vulnerabilityWithRemediation"][ diff --git a/dojo/tools/api_cobalt/parser.py b/dojo/tools/api_cobalt/parser.py index 5ec50de6c45..8ec9fab7a23 100644 --- a/dojo/tools/api_cobalt/parser.py +++ b/dojo/tools/api_cobalt/parser.py @@ -39,10 +39,7 @@ def api_scan_configuration_hint(self): ) def get_findings(self, file, test): - if file is None: - data = CobaltApiImporter().get_findings(test) - else: - data = json.load(file) + data = CobaltApiImporter().get_findings(test) if file is None else json.load(file) findings = [] for entry in data["data"]: @@ -130,9 +127,7 @@ def include_finding(self, resource): "wont_fix", # Risk of finding has been accepted ] - if resource["state"] in allowed_states: - return True - return False + return resource["state"] in allowed_states def convert_endpoints(self, affected_targets): """Convert Cobalt affected_targets into DefectDojo endpoints""" diff --git a/dojo/tools/api_edgescan/parser.py b/dojo/tools/api_edgescan/parser.py index 66b00f92465..24e22fb9bfb 100644 --- a/dojo/tools/api_edgescan/parser.py +++ b/dojo/tools/api_edgescan/parser.py @@ -34,10 +34,7 @@ def api_scan_configuration_hint(self): return "In the field Service key 1, provide the Edgescan asset ID(s). Leaving it blank will import all assets' findings." def get_findings(self, file, test): - if file: - data = json.load(file) - else: - data = EdgescanImporter().get_findings(test) + data = json.load(file) if file else EdgescanImporter().get_findings(test) return self.process_vulnerabilities(test, data) @@ -68,7 +65,7 @@ def make_finding(self, test, vulnerability): finding.severity = ES_SEVERITIES[vulnerability["severity"]] finding.description = vulnerability["description"] finding.mitigation = vulnerability["remediation"] - finding.active = True if vulnerability["status"] == "open" else False + finding.active = vulnerability["status"] == "open" if vulnerability["asset_tags"]: finding.tags = vulnerability["asset_tags"].split(",") finding.unique_id_from_tool = vulnerability["id"] diff --git a/dojo/tools/api_sonarqube/importer.py b/dojo/tools/api_sonarqube/importer.py index 7e5856707d4..f507a67b26b 100644 --- a/dojo/tools/api_sonarqube/importer.py +++ b/dojo/tools/api_sonarqube/importer.py @@ -141,10 +141,7 @@ def import_issues(self, test): continue issue_type = issue["type"] - if len(issue["message"]) > 511: - title = issue["message"][0:507] + "..." - else: - title = issue["message"] + title = issue["message"][0:507] + "..." if len(issue["message"]) > 511 else issue["message"] component_key = issue["component"] line = issue.get("line") rule_id = issue["rule"] diff --git a/dojo/tools/api_sonarqube/updater.py b/dojo/tools/api_sonarqube/updater.py index c8bcd7e0664..d4f6166bd6b 100644 --- a/dojo/tools/api_sonarqube/updater.py +++ b/dojo/tools/api_sonarqube/updater.py @@ -61,10 +61,7 @@ def get_sonarqube_status_for(finding): elif finding.risk_accepted: target_status = "RESOLVED / WONTFIX" elif finding.active: - if finding.verified: - target_status = "CONFIRMED" - else: - target_status = "REOPENED" + target_status = "CONFIRMED" if finding.verified else "REOPENED" return target_status def get_sonarqube_required_transitions_for( diff --git a/dojo/tools/api_sonarqube/updater_from_source.py b/dojo/tools/api_sonarqube/updater_from_source.py index 93afa04c4ad..37b9ad155b6 100644 --- a/dojo/tools/api_sonarqube/updater_from_source.py +++ b/dojo/tools/api_sonarqube/updater_from_source.py @@ -63,10 +63,7 @@ def get_sonarqube_status_for(finding): elif finding.risk_accepted: target_status = "WONTFIX" elif finding.active: - if finding.verified: - target_status = "CONFIRMED" - else: - target_status = "REOPENED" + target_status = "CONFIRMED" if finding.verified else "REOPENED" return target_status @staticmethod diff --git a/dojo/tools/aqua/parser.py b/dojo/tools/aqua/parser.py index 076c2d71dc5..a3b80281d0e 100644 --- a/dojo/tools/aqua/parser.py +++ b/dojo/tools/aqua/parser.py @@ -20,10 +20,7 @@ def get_findings(self, json_output, test): def get_items(self, tree, test): self.items = {} if isinstance(tree, list): # Aqua Scan Report coming from Azure Devops jobs. - if tree: - vulnerabilitytree = tree[0]["results"]["resources"] - else: - vulnerabilitytree = [] + vulnerabilitytree = tree[0]["results"]["resources"] if tree else [] self.vulnerability_tree(vulnerabilitytree, test) elif "resources" in tree: # Aqua Scan Report not from Azure Devops jobs. vulnerabilitytree = tree["resources"] diff --git a/dojo/tools/arachni/parser.py b/dojo/tools/arachni/parser.py index 7b28d7e9f0a..72864750ae8 100644 --- a/dojo/tools/arachni/parser.py +++ b/dojo/tools/arachni/parser.py @@ -85,9 +85,7 @@ def get_item(self, item_node, report_date): description = html2text.html2text(description) remediation = ( - item_node["remedy_guidance"] - if "remedy_guidance" in item_node - else "n/a" + item_node.get("remedy_guidance", "n/a") ) if remediation: remediation = html2text.html2text(remediation) @@ -103,7 +101,7 @@ def get_item(self, item_node, report_date): references = html2text.html2text(references) severity = item_node.get("severity", "Info").capitalize() - if "Informational" == severity: + if severity == "Informational": severity = "Info" # Finding and Endpoint objects returned have not been saved to the diff --git a/dojo/tools/asff/parser.py b/dojo/tools/asff/parser.py index ccd5eb3110c..4e90bed1e87 100644 --- a/dojo/tools/asff/parser.py +++ b/dojo/tools/asff/parser.py @@ -46,10 +46,7 @@ def get_findings(self, file, test): else: mitigation = None references = None - if item.get("RecordState") and item.get("RecordState") == "ACTIVE": - active = True - else: - active = False + active = bool(item.get("RecordState") and item.get("RecordState") == "ACTIVE") # Adding the Resources:0/Id value to the description. # diff --git a/dojo/tools/aws_prowler/parser.py b/dojo/tools/aws_prowler/parser.py index 7093a596012..e24ac155296 100644 --- a/dojo/tools/aws_prowler/parser.py +++ b/dojo/tools/aws_prowler/parser.py @@ -65,16 +65,10 @@ def process_csv(self, file, test): # title = re.sub(r"\[.*\]\s", "", result_extended) control = re.sub(r"\[.*\]\s", "", title_text) sev = self.getCriticalityRating(result, level, severity) - if result == "INFO" or result == "PASS": - active = False - else: - active = True + active = not (result == "INFO" or result == "PASS") # creating description early will help with duplication control - if not level: - level = "" - else: - level = ", " + level + level = "" if not level else ", " + level description = ( "**Issue:** " + str(result_extended) @@ -160,10 +154,7 @@ def process_json(self, file, test): sev = self.getCriticalityRating("FAIL", level, severity) # creating description early will help with duplication control - if not level: - level = "" - else: - level = ", " + level + level = "" if not level else ", " + level description = ( "**Issue:** " + str(result_extended) @@ -221,19 +212,15 @@ def formatview(self, depth): # Criticality rating def getCriticalityRating(self, result, level, severity): - criticality = "Info" if result == "INFO" or result == "PASS": - criticality = "Info" - elif result == "FAIL": + return "Info" + if result == "FAIL": if severity: # control is failing but marked as Info so we want to mark as # Low to appear in the Dojo if severity == "Informational": return "Low" return severity - if level == "Level 1": - criticality = "Critical" - else: - criticality = "High" + return "Critical" if level == "Level 1" else "High" - return criticality + return "Info" diff --git a/dojo/tools/awssecurityhub/compliance.py b/dojo/tools/awssecurityhub/compliance.py index 5fea1a8a786..2e17081a3f2 100644 --- a/dojo/tools/awssecurityhub/compliance.py +++ b/dojo/tools/awssecurityhub/compliance.py @@ -29,7 +29,7 @@ def get_item(self, finding: dict, test): if finding.get("Compliance", {}).get("Status", "PASSED") == "PASSED": is_Mitigated = True active = False - if finding.get("LastObservedAt", None): + if finding.get("LastObservedAt"): try: mitigated = datetime.strptime(finding.get("LastObservedAt"), "%Y-%m-%dT%H:%M:%S.%fZ") except Exception: diff --git a/dojo/tools/awssecurityhub/guardduty.py b/dojo/tools/awssecurityhub/guardduty.py index 40b26649500..637c7f4eccd 100644 --- a/dojo/tools/awssecurityhub/guardduty.py +++ b/dojo/tools/awssecurityhub/guardduty.py @@ -23,7 +23,7 @@ def get_item(self, finding: dict, test): mitigated = None else: is_Mitigated = True - if finding.get("LastObservedAt", None): + if finding.get("LastObservedAt"): try: mitigated = datetime.strptime(finding.get("LastObservedAt"), "%Y-%m-%dT%H:%M:%S.%fZ") except Exception: diff --git a/dojo/tools/awssecurityhub/inspector.py b/dojo/tools/awssecurityhub/inspector.py index 60c27e0b600..f856cc7ebed 100644 --- a/dojo/tools/awssecurityhub/inspector.py +++ b/dojo/tools/awssecurityhub/inspector.py @@ -43,7 +43,7 @@ def get_item(self, finding: dict, test): else: is_Mitigated = True active = False - if finding.get("LastObservedAt", None): + if finding.get("LastObservedAt"): try: mitigated = datetime.strptime(finding.get("LastObservedAt"), "%Y-%m-%dT%H:%M:%S.%fZ") except Exception: diff --git a/dojo/tools/awssecurityhub/parser.py b/dojo/tools/awssecurityhub/parser.py index 3d07d2554c7..8e841adc450 100644 --- a/dojo/tools/awssecurityhub/parser.py +++ b/dojo/tools/awssecurityhub/parser.py @@ -47,7 +47,7 @@ def get_findings(self, filehandle, test): def get_items(self, tree: dict, test): items = {} - findings = tree.get("Findings", tree.get("findings", None)) + findings = tree.get("Findings", tree.get("findings")) if not isinstance(findings, list): msg = "Incorrect Security Hub report format" raise TypeError(msg) diff --git a/dojo/tools/azure_security_center_recommendations/parser.py b/dojo/tools/azure_security_center_recommendations/parser.py index 9838f65ae58..5fc42a7a193 100644 --- a/dojo/tools/azure_security_center_recommendations/parser.py +++ b/dojo/tools/azure_security_center_recommendations/parser.py @@ -35,7 +35,7 @@ def process_csv(self, file, test): findings = [] for row in reader: - if "unhealthy" == row.get("state").lower(): + if row.get("state").lower() == "unhealthy": subscription_id = row.get("subscriptionId") subscription_name = row.get("subscriptionName") resource_group = row.get("resourceGroup") diff --git a/dojo/tools/bandit/parser.py b/dojo/tools/bandit/parser.py index 3e4e54fcd8b..6beca950ef9 100644 --- a/dojo/tools/bandit/parser.py +++ b/dojo/tools/bandit/parser.py @@ -64,10 +64,10 @@ def get_findings(self, filename, test): return results def convert_confidence(self, value): - if "high" == value.lower(): + if value.lower() == "high": return 2 - if "medium" == value.lower(): + if value.lower() == "medium": return 3 - if "low" == value.lower(): + if value.lower() == "low": return 6 return None diff --git a/dojo/tools/blackduck/importer.py b/dojo/tools/blackduck/importer.py index 80db2714490..240bc2eab7f 100644 --- a/dojo/tools/blackduck/importer.py +++ b/dojo/tools/blackduck/importer.py @@ -78,10 +78,7 @@ def _process_project_findings( file_entry_dict = dict(file_entry) path = file_entry_dict.get("Path") archive_context = file_entry_dict.get("Archive context") - if archive_context: - full_path = f"{archive_context}{path[1:]}" - else: - full_path = path + full_path = f"{archive_context}{path[1:]}" if archive_context else path # 4000 character limit on this field total_len = len(full_path) @@ -127,10 +124,7 @@ def __partition_by_key(self, csv_file): findings = defaultdict(set) # Backwards compatibility. Newer versions of Blackduck use Component # id. - if "Project id" in records.fieldnames: - key = "Project id" - else: - key = "Component id" + key = "Project id" if "Project id" in records.fieldnames else "Component id" for record in records: findings[record.get(key)].add(frozenset(record.items())) return findings diff --git a/dojo/tools/bugcrowd/parser.py b/dojo/tools/bugcrowd/parser.py index a643499976a..7cad12ead9d 100644 --- a/dojo/tools/bugcrowd/parser.py +++ b/dojo/tools/bugcrowd/parser.py @@ -249,8 +249,5 @@ def convert_severity(self, sev_num): def get_endpoint(self, url): stripped_url = url.strip() - if "://" in stripped_url: # is the host full uri? - endpoint = Endpoint.from_uri(stripped_url) - else: - endpoint = Endpoint.from_uri("//" + stripped_url) - return endpoint + # is the host full uri? + return Endpoint.from_uri(stripped_url) if "://" in stripped_url else Endpoint.from_uri("//" + stripped_url) diff --git a/dojo/tools/bundler_audit/parser.py b/dojo/tools/bundler_audit/parser.py index c960bb374c8..1534e170ed7 100644 --- a/dojo/tools/bundler_audit/parser.py +++ b/dojo/tools/bundler_audit/parser.py @@ -43,10 +43,7 @@ def get_findings(self, filename, test): advisory_id = field.replace("GHSA: ", "") elif field.startswith("Criticality"): criticality = field.replace("Criticality: ", "") - if criticality.lower() == "unknown": - sev = "Medium" - else: - sev = criticality + sev = "Medium" if criticality.lower() == "unknown" else criticality elif field.startswith("URL"): advisory_url = field.replace("URL: ", "") elif field.startswith("Title"): diff --git a/dojo/tools/burp/parser.py b/dojo/tools/burp/parser.py index 1edfdf0b0ed..9a1460f0803 100644 --- a/dojo/tools/burp/parser.py +++ b/dojo/tools/burp/parser.py @@ -235,7 +235,7 @@ def get_item(item_node, test): references = text_maker.handle(references) severity = item_node.findall("severity")[0].text - if "information" == severity.lower(): + if severity.lower() == "information": severity = "Info" scanner_confidence = item_node.findall("confidence")[0].text diff --git a/dojo/tools/burp_api/parser.py b/dojo/tools/burp_api/parser.py index c54726f384e..f449d18a723 100644 --- a/dojo/tools/burp_api/parser.py +++ b/dojo/tools/burp_api/parser.py @@ -35,7 +35,7 @@ def get_findings(self, file, test): # for each issue found for issue_event in tree.get("issue_events", []): if ( - "issue_found" == issue_event.get("type") + issue_event.get("type") == "issue_found" and "issue" in issue_event ): issue = issue_event.get("issue") @@ -51,7 +51,7 @@ def get_findings(self, file, test): ) false_p = False # manage special case of false positives - if "false_positive" == issue.get("severity", "undefined"): + if issue.get("severity", "undefined") == "false_positive": false_p = True finding = Finding( @@ -157,10 +157,10 @@ def convert_confidence(issue): }, """ value = issue.get("confidence", "undefined").lower() - if "certain" == value: + if value == "certain": return 2 - if "firm" == value: + if value == "firm": return 3 - if "tentative" == value: + if value == "tentative": return 6 return None diff --git a/dojo/tools/cargo_audit/parser.py b/dojo/tools/cargo_audit/parser.py index 1447bf59081..8fb9a9187c5 100644 --- a/dojo/tools/cargo_audit/parser.py +++ b/dojo/tools/cargo_audit/parser.py @@ -26,12 +26,7 @@ def get_findings(self, filename, test): advisory = item.get("advisory") vuln_id = advisory.get("id") vulnerability_ids = [advisory.get("id")] - if "categories" in advisory: - categories = ( - f"**Categories:** {', '.join(advisory['categories'])}" - ) - else: - categories = "" + categories = f"**Categories:** {', '.join(advisory['categories'])}" if "categories" in advisory else "" description = ( categories + f"\n**Description:** `{advisory.get('description')}`" @@ -63,10 +58,7 @@ def get_findings(self, filename, test): package_version = item.get("package").get("version") title = f"[{package_name} {package_version}] {advisory.get('title')}" severity = "High" - if "keywords" in advisory: - tags = advisory.get("keywords") - else: - tags = [] + tags = advisory.get("keywords") if "keywords" in advisory else [] try: mitigation = f"**Update {package_name} to** {', '.join(item['versions']['patched'])}" except KeyError: diff --git a/dojo/tools/checkmarx/parser.py b/dojo/tools/checkmarx/parser.py index c278612344e..e1347d4c642 100644 --- a/dojo/tools/checkmarx/parser.py +++ b/dojo/tools/checkmarx/parser.py @@ -139,10 +139,7 @@ def _process_result_file_name_aggregated( query, result, ) sinkFilename = lastPathnode.find("FileName").text - if sinkFilename: - title = "{} ({})".format(titleStart, sinkFilename.split("/")[-1]) - else: - title = titleStart + title = "{} ({})".format(titleStart, sinkFilename.split("/")[-1]) if sinkFilename else titleStart false_p = result.get("FalsePositive") sev = result.get("Severity") aggregateKeys = f"{cwe}{sev}{sinkFilename}" diff --git a/dojo/tools/checkov/parser.py b/dojo/tools/checkov/parser.py index 5628e53576a..d6fbd832cf6 100644 --- a/dojo/tools/checkov/parser.py +++ b/dojo/tools/checkov/parser.py @@ -63,7 +63,7 @@ def get_items(self, tree, test, check_type): def get_item(vuln, test, check_type): title = ( - vuln["check_name"] if "check_name" in vuln else "check_name not found" + vuln.get("check_name", "check_name not found") ) description = f"Check Type: {check_type}\n" if "check_id" in vuln: @@ -71,7 +71,7 @@ def get_item(vuln, test, check_type): if "check_name" in vuln: description += f"{vuln['check_name']}\n" - file_path = vuln["file_path"] if "file_path" in vuln else None + file_path = vuln.get("file_path", None) source_line = None if "file_line_range" in vuln: lines = vuln["file_line_range"] @@ -87,7 +87,7 @@ def get_item(vuln, test, check_type): mitigation = "" - references = vuln["guideline"] if "guideline" in vuln else "" + references = vuln.get("guideline", "") return Finding( title=title, test=test, diff --git a/dojo/tools/clair/clairklar_parser.py b/dojo/tools/clair/clairklar_parser.py index bc168fbabab..3934599bf6a 100644 --- a/dojo/tools/clair/clairklar_parser.py +++ b/dojo/tools/clair/clairklar_parser.py @@ -45,9 +45,7 @@ def get_items_clairklar(self, tree_severity, test): def get_item_clairklar(self, item_node, test): if item_node["Severity"] == "Negligible": severity = "Info" - elif item_node["Severity"] == "Unknown": - severity = "Critical" - elif item_node["Severity"] == "Defcon1": + elif item_node["Severity"] == "Unknown" or item_node["Severity"] == "Defcon1": severity = "Critical" else: severity = item_node["Severity"] diff --git a/dojo/tools/codechecker/parser.py b/dojo/tools/codechecker/parser.py index 5e96c75be35..7bdccf5b36e 100644 --- a/dojo/tools/codechecker/parser.py +++ b/dojo/tools/codechecker/parser.py @@ -56,13 +56,13 @@ def get_item(vuln): description += "{}\n".format(vuln["message"]) location = vuln["file"] - file_path = location["path"] if "path" in location else None + file_path = location.get("path", None) if file_path: description += f"File path: {file_path}\n" - line = vuln["line"] if "line" in vuln else None - column = vuln["column"] if "column" in vuln else None + line = vuln.get("line", None) + column = vuln.get("column", None) if line is not None and column is not None: description += f"Location in file: line {line}, column {column}\n" diff --git a/dojo/tools/coverity_api/parser.py b/dojo/tools/coverity_api/parser.py index 194939de987..fda8a5cbea3 100644 --- a/dojo/tools/coverity_api/parser.py +++ b/dojo/tools/coverity_api/parser.py @@ -26,7 +26,7 @@ def get_findings(self, file, test): items = [] for issue in tree["viewContentsV1"]["rows"]: # get only security findings - if "Security" != issue.get("displayIssueKind"): + if issue.get("displayIssueKind") != "Security": continue description_formated = "\n".join( @@ -65,17 +65,17 @@ def get_findings(self, file, test): else: finding.nb_occurences = 1 - if "New" == issue.get("status"): + if issue.get("status") == "New": finding.active = True finding.verified = False - elif "Triaged" == issue.get("status"): + elif issue.get("status") == "Triaged": finding.active = True finding.verified = True - elif "Fixed" == issue.get("status"): + elif issue.get("status") == "Fixed": finding.active = False finding.verified = True else: - if "False Positive" == issue.get("classification"): + if issue.get("classification") == "False Positive": finding.false_p = True if "lastTriaged" in issue: ds = issue["lastTriaged"][0:10] @@ -91,13 +91,13 @@ def get_findings(self, file, test): def convert_displayImpact(self, val): if val is None: return "Info" - if "Audit" == val: + if val == "Audit": return "Info" - if "Low" == val: + if val == "Low": return "Low" - if "Medium" == val: + if val == "Medium": return "Medium" - if "High" == val: + if val == "High": return "High" msg = f"Unknown value for Coverity displayImpact {val}" raise ValueError(msg) @@ -105,17 +105,17 @@ def convert_displayImpact(self, val): def convert_severity(self, val): if val is None: return "Info" - if "Unspecified" == val: + if val == "Unspecified": return "Info" - if "Severe" == val: + if val == "Severe": return "Critical" - if "Major" == val: + if val == "Major": return "High" - if "Minor" == val: + if val == "Minor": return "Medium" - if "New Value" == val: + if val == "New Value": return "Info" - if "Various" == val: + if val == "Various": return "Info" msg = f"Unknown value for Coverity severity {val}" raise ValueError(msg) diff --git a/dojo/tools/crashtest_security/parser.py b/dojo/tools/crashtest_security/parser.py index 71278115ecb..a9649ef52fa 100644 --- a/dojo/tools/crashtest_security/parser.py +++ b/dojo/tools/crashtest_security/parser.py @@ -186,10 +186,7 @@ def get_items(self, tree, test): title = re.sub(r" \([0-9]*\)$", "", title) # Attache CVEs - if "CVE" in title: - vulnerability_id = re.findall(r"CVE-\d{4}-\d{4,10}", title)[0] - else: - vulnerability_id = None + vulnerability_id = re.findall("CVE-\\d{4}-\\d{4,10}", title)[0] if "CVE" in title else None description = failure.get("message") severity = failure.get("type").capitalize() diff --git a/dojo/tools/crunch42/parser.py b/dojo/tools/crunch42/parser.py index d4d19ff35e4..d83e8eb24e2 100644 --- a/dojo/tools/crunch42/parser.py +++ b/dojo/tools/crunch42/parser.py @@ -57,7 +57,7 @@ def get_items(self, tree, test): def get_item(self, issue, title, test): fingerprint = issue["fingerprint"] pointer = issue["pointer"] - message = issue["specificDescription"] if "specificDescription" in issue else title + message = issue.get("specificDescription", title) score = issue["score"] criticality = issue["criticality"] if criticality == 1: diff --git a/dojo/tools/cyclonedx/helpers.py b/dojo/tools/cyclonedx/helpers.py index 8e2bd29d24a..1bc11399f05 100644 --- a/dojo/tools/cyclonedx/helpers.py +++ b/dojo/tools/cyclonedx/helpers.py @@ -7,7 +7,7 @@ class Cyclonedxhelper: def _get_cvssv3(self, raw_vector): - if raw_vector is None or "" == raw_vector: + if raw_vector is None or raw_vector == "": return None if not raw_vector.startswith("CVSS:3"): raw_vector = "CVSS:3.1/" + raw_vector @@ -34,6 +34,6 @@ def fix_severity(self, severity): severity = severity.capitalize() if severity is None: severity = "Medium" - elif "Unknown" == severity or "None" == severity: + elif severity == "Unknown" or severity == "None": severity = "Info" return severity diff --git a/dojo/tools/cyclonedx/json_parser.py b/dojo/tools/cyclonedx/json_parser.py index 6a329cfdfa7..64bd02b5dd7 100644 --- a/dojo/tools/cyclonedx/json_parser.py +++ b/dojo/tools/cyclonedx/json_parser.py @@ -115,13 +115,13 @@ def _get_findings_json(self, file, test): state = analysis.get("state") if state: if ( - "resolved" == state - or "resolved_with_pedigree" == state - or "not_affected" == state + state == "resolved" + or state == "resolved_with_pedigree" + or state == "not_affected" ): finding.is_mitigated = True finding.active = False - elif "false_positive" == state: + elif state == "false_positive": finding.false_p = True finding.active = False if not finding.active: diff --git a/dojo/tools/cyclonedx/xml_parser.py b/dojo/tools/cyclonedx/xml_parser.py index 320e0ed34e6..70682c0c6a8 100644 --- a/dojo/tools/cyclonedx/xml_parser.py +++ b/dojo/tools/cyclonedx/xml_parser.py @@ -141,7 +141,7 @@ def manage_vulnerability_legacy( for rating in vulnerability.findall( "v:ratings/v:rating", namespaces=ns, ): - if "CVSSv3" == rating.findtext("v:method", namespaces=ns): + if rating.findtext("v:method", namespaces=ns) == "CVSSv3": raw_vector = rating.findtext("v:vector", namespaces=ns) severity = rating.findtext("v:severity", namespaces=ns) cvssv3 = Cyclonedxhelper()._get_cvssv3(raw_vector) @@ -253,7 +253,7 @@ def _manage_vulnerability_xml( "b:ratings/b:rating", namespaces=ns, ): method = rating.findtext("b:method", namespaces=ns) - if "CVSSv3" == method or "CVSSv31" == method: + if method == "CVSSv3" or method == "CVSSv31": raw_vector = rating.findtext("b:vector", namespaces=ns) severity = rating.findtext("b:severity", namespaces=ns) cvssv3 = Cyclonedxhelper()._get_cvssv3(raw_vector) @@ -280,13 +280,13 @@ def _manage_vulnerability_xml( state = analysis[0].findtext("b:state", namespaces=ns) if state: if ( - "resolved" == state - or "resolved_with_pedigree" == state - or "not_affected" == state + state == "resolved" + or state == "resolved_with_pedigree" + or state == "not_affected" ): finding.is_mitigated = True finding.active = False - elif "false_positive" == state: + elif state == "false_positive": finding.false_p = True finding.active = False if not finding.active: diff --git a/dojo/tools/deepfence_threatmapper/compliance.py b/dojo/tools/deepfence_threatmapper/compliance.py index f948a18c929..3e0ebb9172c 100644 --- a/dojo/tools/deepfence_threatmapper/compliance.py +++ b/dojo/tools/deepfence_threatmapper/compliance.py @@ -42,9 +42,7 @@ def get_findings(self, row, headers, test): ) def compliance_severity(self, input): - if input == "pass": - output = "Info" - elif input == "info": + if input == "pass" or input == "info": output = "Info" elif input == "warn": output = "Medium" diff --git a/dojo/tools/dependency_track/parser.py b/dojo/tools/dependency_track/parser.py index eecc09670a3..fd0f3336ab5 100644 --- a/dojo/tools/dependency_track/parser.py +++ b/dojo/tools/dependency_track/parser.py @@ -137,10 +137,7 @@ def _convert_dependency_track_finding_to_dojo_finding(self, dependency_track_fin component_version = dependency_track_finding["component"]["version"] else: component_version = None - if component_version is not None: - version_description = component_version - else: - version_description = "" + version_description = component_version if component_version is not None else "" title = f"{component_name}:{version_description} affected by: {vuln_id} ({source})" @@ -211,18 +208,12 @@ def _convert_dependency_track_finding_to_dojo_finding(self, dependency_track_fin # Use the analysis state from Dependency Track to determine if the finding has already been marked as a false positive upstream analysis = dependency_track_finding.get("analysis") - is_false_positive = True if analysis is not None and analysis.get("state") == "FALSE_POSITIVE" else False + is_false_positive = bool(analysis is not None and analysis.get("state") == "FALSE_POSITIVE") # Get the EPSS details - if "epssPercentile" in dependency_track_finding["vulnerability"]: - epss_percentile = dependency_track_finding["vulnerability"]["epssPercentile"] - else: - epss_percentile = None + epss_percentile = dependency_track_finding["vulnerability"].get("epssPercentile", None) - if "epssScore" in dependency_track_finding["vulnerability"]: - epss_score = dependency_track_finding["vulnerability"]["epssScore"] - else: - epss_score = None + epss_score = dependency_track_finding["vulnerability"].get("epssScore", None) # Build and return Finding model finding = Finding( diff --git a/dojo/tools/dockle/parser.py b/dojo/tools/dockle/parser.py index 6bb70769dd0..e4b668d0b98 100644 --- a/dojo/tools/dockle/parser.py +++ b/dojo/tools/dockle/parser.py @@ -34,10 +34,7 @@ def get_findings(self, filename, test): title = item["title"] if dockle_severity == "IGNORE": continue - if dockle_severity in self.SEVERITY: - severity = self.SEVERITY[dockle_severity] - else: - severity = "Medium" + severity = self.SEVERITY.get(dockle_severity, "Medium") description = sorted(item.get("alerts", [])) description = "\n".join(description) dupe_key = hashlib.sha256( diff --git a/dojo/tools/drheader/parser.py b/dojo/tools/drheader/parser.py index bf8435f63ab..7fa515b4855 100644 --- a/dojo/tools/drheader/parser.py +++ b/dojo/tools/drheader/parser.py @@ -15,10 +15,7 @@ def get_description_for_scan_types(self, scan_type): def return_finding(self, test, finding, url=None): title = "Header : " + finding["rule"] - if url is not None: - message = finding["message"] + "\nURL : " + url - else: - message = finding["message"] + message = finding["message"] + "\nURL : " + url if url is not None else finding["message"] if finding.get("value") is not None: message += "\nObserved values: " + finding["value"] if finding.get("expected") is not None: diff --git a/dojo/tools/dsop/parser.py b/dojo/tools/dsop/parser.py index 0d01e5b8d8d..bf729cb006c 100644 --- a/dojo/tools/dsop/parser.py +++ b/dojo/tools/dsop/parser.py @@ -43,10 +43,7 @@ def __parse_disa(self, test, items, sheet): continue title = row[headers["title"]] unique_id = row[headers["ruleid"]] - if row[headers["severity"]] == "unknown": - severity = "Info" - else: - severity = row[headers["severity"]].title() + severity = "Info" if row[headers["severity"]] == "unknown" else row[headers["severity"]].title() references = row[headers["refs"]] description = row[headers["desc"]] impact = row[headers["rationale"]] diff --git a/dojo/tools/eslint/parser.py b/dojo/tools/eslint/parser.py index 329e2fac751..613545e214b 100644 --- a/dojo/tools/eslint/parser.py +++ b/dojo/tools/eslint/parser.py @@ -35,10 +35,7 @@ def get_findings(self, filename, test): continue for message in item["messages"]: - if message["message"] is None: - title = "Finding Not defined" - else: - title = str(message["message"]) + title = "Finding Not defined" if message["message"] is None else str(message["message"]) if message["ruleId"] is not None: title = title + " Test ID: " + str(message["ruleId"]) diff --git a/dojo/tools/github_vulnerability/parser.py b/dojo/tools/github_vulnerability/parser.py index c0ad99ac9a4..e6680cfbad1 100644 --- a/dojo/tools/github_vulnerability/parser.py +++ b/dojo/tools/github_vulnerability/parser.py @@ -63,7 +63,7 @@ def get_findings(self, filename, test): if "createdAt" in alert: finding.date = dateutil.parser.parse(alert["createdAt"]) if "state" in alert and ( - "FIXED" == alert["state"] or "DISMISSED" == alert["state"] + alert["state"] == "FIXED" or alert["state"] == "DISMISSED" ): finding.active = False finding.is_mitigated = True @@ -136,10 +136,7 @@ def get_findings(self, filename, test): for vuln in data: url = vuln["url"] html_url = vuln["html_url"] - if vuln["state"] == "open": - active = True - else: - active = False + active = vuln["state"] == "open" ruleid = vuln["rule"]["id"] ruleseverity = vuln["rule"]["severity"] ruledescription = vuln["rule"]["description"] diff --git a/dojo/tools/gitlab_container_scan/parser.py b/dojo/tools/gitlab_container_scan/parser.py index 7dd65305e8a..a8fe218b1a6 100644 --- a/dojo/tools/gitlab_container_scan/parser.py +++ b/dojo/tools/gitlab_container_scan/parser.py @@ -22,7 +22,7 @@ def get_description_for_scan_types(self, scan_type): return "GitLab Container Scan report file can be imported in JSON format (option --json)." def _get_dependency_version(self, dependency): - return dependency["version"] if "version" in dependency else "" + return dependency.get("version", "") def _get_dependency_name(self, dependency): if "package" in dependency and "name" in dependency["package"]: diff --git a/dojo/tools/gitlab_dast/parser.py b/dojo/tools/gitlab_dast/parser.py index 7728dd00ef5..6f92ea60bdb 100644 --- a/dojo/tools/gitlab_dast/parser.py +++ b/dojo/tools/gitlab_dast/parser.py @@ -58,7 +58,7 @@ def get_confidence_numeric(self, confidence): "Unknown": 8, # Tentative "Ignore": 10, # Tentative } - return switcher.get(confidence, None) + return switcher.get(confidence) # iterating through properties of each vulnerability def get_item(self, vuln, test, scanner): @@ -97,7 +97,7 @@ def get_item(self, vuln, test, scanner): # title finding.title = ( - vuln["name"] if "name" in vuln else finding.unique_id_from_tool + vuln.get("name", finding.unique_id_from_tool) ) # cwe for identifier in vuln["identifiers"]: diff --git a/dojo/tools/gitlab_dep_scan/parser.py b/dojo/tools/gitlab_dep_scan/parser.py index cc365c8acba..d2999002f79 100644 --- a/dojo/tools/gitlab_dep_scan/parser.py +++ b/dojo/tools/gitlab_dep_scan/parser.py @@ -46,12 +46,9 @@ def get_items(self, tree, test): return list(items.values()) def get_item(self, vuln, test, scan): - if "id" in vuln: - unique_id_from_tool = vuln["id"] - else: - # If the new unique id is not provided, fall back to deprecated - # "cve" fingerprint (old version) - unique_id_from_tool = vuln["cve"] + # If the new unique id is not provided, fall back to deprecated + # "cve" fingerprint (old version) + unique_id_from_tool = vuln["id"] if "id" in vuln else vuln["cve"] title = "" if "name" in vuln: @@ -74,21 +71,17 @@ def get_item(self, vuln, test, scan): description += f"{vuln['description']}\n" location = vuln["location"] - file_path = location["file"] if "file" in location else None + file_path = location.get("file", None) component_name = None component_version = None if "dependency" in location: component_version = ( - location["dependency"]["version"] - if "version" in location["dependency"] - else None + location["dependency"].get("version", None) ) if "package" in location["dependency"]: component_name = ( - location["dependency"]["package"]["name"] - if "name" in location["dependency"]["package"] - else None + location["dependency"]["package"].get("name", None) ) severity = vuln["severity"] diff --git a/dojo/tools/gitlab_sast/parser.py b/dojo/tools/gitlab_sast/parser.py index ebe5071ce6e..1e8aa3fa00f 100644 --- a/dojo/tools/gitlab_sast/parser.py +++ b/dojo/tools/gitlab_sast/parser.py @@ -70,7 +70,7 @@ def get_confidence_numeric(self, argument): "Low": 6, # Tentative "Experimental": 7, # Tentative } - return switcher.get(argument, None) + return switcher.get(argument) def get_item(self, vuln, scanner): unique_id_from_tool = vuln["id"] if "id" in vuln else vuln["cve"] @@ -92,9 +92,9 @@ def get_item(self, vuln, scanner): description += f"{vuln['description']}\n" location = vuln["location"] - file_path = location["file"] if "file" in location else None + file_path = location.get("file", None) - line = location["start_line"] if "start_line" in location else None + line = location.get("start_line", None) sast_object = None sast_source_file_path = None @@ -121,7 +121,7 @@ def get_item(self, vuln, scanner): severity = "Info" scanner_confidence = self.get_confidence_numeric(vuln.get("confidence", "Unkown")) - mitigation = vuln["solution"] if "solution" in vuln else "" + mitigation = vuln.get("solution", "") cwe = None vulnerability_id = None references = "" diff --git a/dojo/tools/gitleaks/parser.py b/dojo/tools/gitleaks/parser.py index 9f32cd26a52..7038befa682 100644 --- a/dojo/tools/gitleaks/parser.py +++ b/dojo/tools/gitleaks/parser.py @@ -107,10 +107,7 @@ def get_finding_legacy(self, issue, test, dupes): def get_finding_current(self, issue, test, dupes): reason = issue.get("Description") line = issue.get("StartLine") - if line: - line = int(line) - else: - line = 0 + line = int(line) if line else 0 match = issue.get("Match") secret = issue.get("Secret") file_path = issue.get("File") diff --git a/dojo/tools/gosec/parser.py b/dojo/tools/gosec/parser.py index cbcf3b4507d..20ccbcae062 100644 --- a/dojo/tools/gosec/parser.py +++ b/dojo/tools/gosec/parser.py @@ -58,10 +58,7 @@ def get_findings(self, filename, test): if "-" in line: # if this is a range, only point to the beginning. line = line.split("-", 1)[0] - if line.isdigit(): - line = int(line) - else: - line = None + line = int(line) if line.isdigit() else None dupe_key = title + item["file"] + str(line) diff --git a/dojo/tools/govulncheck/parser.py b/dojo/tools/govulncheck/parser.py index 0c5bb4191b4..f8764cca319 100644 --- a/dojo/tools/govulncheck/parser.py +++ b/dojo/tools/govulncheck/parser.py @@ -41,7 +41,7 @@ def get_finding_trace_info(self, data, osv_id): # Browse the findings to look for matching OSV-id. If the OSV-id is matching, extract traces. trace_info_strs = [] for elem in data: - if "finding" in elem.keys(): + if "finding" in elem: finding = elem["finding"] if finding.get("osv") == osv_id: trace_info = finding.get("trace", []) @@ -59,12 +59,12 @@ def get_finding_trace_info(self, data, osv_id): def get_affected_version(self, data, osv_id): # Browse the findings to look for matching OSV-id. If the OSV-id is matching, extract the first affected version. for elem in data: - if "finding" in elem.keys(): + if "finding" in elem: finding = elem["finding"] if finding.get("osv") == osv_id: trace_info = finding.get("trace", []) for trace in trace_info: - if "version" in trace.keys(): + if "version" in trace: return trace.get("version") return "" @@ -127,7 +127,7 @@ def get_findings(self, scan_file, test): elif isinstance(data, list): # Parsing for new govulncheck output format for elem in data: - if "osv" in elem.keys(): + if "osv" in elem: cve = elem["osv"]["aliases"][0] osv_data = elem["osv"] affected_package = osv_data["affected"][0]["package"] @@ -179,10 +179,7 @@ def get_findings(self, scan_file, test): affected_version = self.get_affected_version(data, osv_data["id"]) - if "severity" in elem["osv"].keys(): - severity = elem["osv"]["severity"] - else: - severity = SEVERITY + severity = elem["osv"].get("severity", SEVERITY) d = { "cve": cve, diff --git a/dojo/tools/h1/parser.py b/dojo/tools/h1/parser.py index 457e01c06f5..7a28f6f8e17 100644 --- a/dojo/tools/h1/parser.py +++ b/dojo/tools/h1/parser.py @@ -73,10 +73,7 @@ def get_findings(self, file, test): references += f"[{ref_link}]({ref_link})" # Set active state of the Dojo finding - if content["attributes"]["state"] in ["triaged", "new"]: - active = True - else: - active = False + active = content["attributes"]["state"] in ["triaged", "new"] # Set CWE of the Dojo finding try: diff --git a/dojo/tools/harbor_vulnerability/parser.py b/dojo/tools/harbor_vulnerability/parser.py index b1f2ab23633..b0a9d68b89c 100644 --- a/dojo/tools/harbor_vulnerability/parser.py +++ b/dojo/tools/harbor_vulnerability/parser.py @@ -1,3 +1,4 @@ +import contextlib import json from dojo.models import Finding @@ -27,15 +28,12 @@ def get_findings(self, filename, test): # When doing dictionary, we can detect duplications dupes = {} - try: - vulnerability = data["vulnerabilities"] # json output of https://pypi.org/project/harborapi/ - except (KeyError): - pass + # json output of https://pypi.org/project/harborapi/ + with contextlib.suppress(KeyError): + vulnerability = data["vulnerabilities"] # To be compatible with update in version - try: + with contextlib.suppress(KeyError, StopIteration, TypeError): vulnerability = data[next(iter(data.keys()))]["vulnerabilities"] - except (KeyError, StopIteration, TypeError): - pass # Early exit if empty if "vulnerability" not in locals() or vulnerability is None: @@ -54,10 +52,7 @@ def get_findings(self, filename, test): title = f"{id} - {package_name} ({package_version})" severity = transpose_severity(severity) - if fix_version: - mitigation = f"Upgrade {package_name} to version {fix_version}" - else: - mitigation = None + mitigation = f"Upgrade {package_name} to version {fix_version}" if fix_version else None if links: references = "" @@ -66,15 +61,9 @@ def get_findings(self, filename, test): else: references = None - if cwe_ids and cwe_ids[0] != "": - cwe = cwe_ids[0].strip("CWE-") - else: - cwe = None + cwe = cwe_ids[0].strip("CWE-") if cwe_ids and cwe_ids[0] != "" else None - if id and id.startswith("CVE"): - vulnerability_id = id - else: - vulnerability_id = None + vulnerability_id = id if id and id.startswith("CVE") else None dupe_key = title diff --git a/dojo/tools/hcl_appscan/parser.py b/dojo/tools/hcl_appscan/parser.py index eaff922e2e8..7d4d51b8d9a 100644 --- a/dojo/tools/hcl_appscan/parser.py +++ b/dojo/tools/hcl_appscan/parser.py @@ -42,10 +42,7 @@ def get_findings(self, file, test): match item.tag: case "severity": output = self.xmltreehelper(item) - if output is None: - severity = "Info" - else: - severity = output.strip(" ").capitalize() + severity = "Info" if output is None else output.strip(" ").capitalize() case "cwe": cwe = int(self.xmltreehelper(item)) case "remediation": diff --git a/dojo/tools/immuniweb/parser.py b/dojo/tools/immuniweb/parser.py index 53242fcd2f4..88261692460 100644 --- a/dojo/tools/immuniweb/parser.py +++ b/dojo/tools/immuniweb/parser.py @@ -41,10 +41,7 @@ def get_findings(self, file, test): cwe = "".join( i for i in vulnerability.find("CWE-ID").text if i.isdigit() ) - if cwe: - cwe = cwe - else: - cwe = None + cwe = cwe or None vulnerability_id = vulnerability.find("CVE-ID").text steps_to_reproduce = vulnerability.find("PoC").text # just to make sure severity is in the recognised sentence casing diff --git a/dojo/tools/intsights/parser.py b/dojo/tools/intsights/parser.py index 74eda25dc80..f8ee971fc58 100644 --- a/dojo/tools/intsights/parser.py +++ b/dojo/tools/intsights/parser.py @@ -58,7 +58,7 @@ def get_findings(self, file, test): alert = Finding( title=alert["title"], test=test, - active=False if alert["status"] == "Closed" else True, + active=alert["status"] != "Closed", verified=True, description=self._build_finding_description(alert), severity=alert["severity"], diff --git a/dojo/tools/jfrog_xray_api_summary_artifact/parser.py b/dojo/tools/jfrog_xray_api_summary_artifact/parser.py index 5261b802f23..5d4301d3ab7 100644 --- a/dojo/tools/jfrog_xray_api_summary_artifact/parser.py +++ b/dojo/tools/jfrog_xray_api_summary_artifact/parser.py @@ -1,3 +1,4 @@ +import contextlib import hashlib import json import re @@ -65,10 +66,7 @@ def get_item( impact_path = ImpactPath("", "", "") if "severity" in vulnerability: - if vulnerability["severity"] == "Unknown": - severity = "Informational" - else: - severity = vulnerability["severity"].title() + severity = "Informational" if vulnerability["severity"] == "Unknown" else vulnerability["severity"].title() else: severity = "Informational" @@ -81,12 +79,10 @@ def get_item( cwe = decode_cwe_number(cves[0].get("cwe", [])[0]) if "cvss_v3" in cves[0]: cvss_v3 = cves[0]["cvss_v3"] - try: + # Note: Xray sometimes takes over malformed cvss scores like `5.9` that can not be parsed. + # Without the try-except block here the whole import of all findings would fail. + with contextlib.suppress(CVSS3RHScoreDoesNotMatch, CVSS3RHMalformedError): cvssv3 = CVSS3.from_rh_vector(cvss_v3).clean_vector() - except (CVSS3RHScoreDoesNotMatch, CVSS3RHMalformedError): - # Note: Xray sometimes takes over malformed cvss scores like `5.9` that can not be parsed. - # Without the try-except block here the whole import of all findings would fail. - pass impact_paths = vulnerability.get("impact_path", []) if len(impact_paths) > 0: diff --git a/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py b/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py index 456b23a7330..6834f690cfb 100644 --- a/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py +++ b/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py @@ -47,10 +47,7 @@ def get_component_name_version(name): def get_severity(vulnerability): if "severity" in vulnerability: - if vulnerability["severity"] == "Unknown": - severity = "Info" - else: - severity = vulnerability["severity"].title() + severity = "Info" if vulnerability["severity"] == "Unknown" else vulnerability["severity"].title() else: severity = "Info" return severity diff --git a/dojo/tools/jfrog_xray_unified/parser.py b/dojo/tools/jfrog_xray_unified/parser.py index 12efe1afffd..a30fdf8561a 100644 --- a/dojo/tools/jfrog_xray_unified/parser.py +++ b/dojo/tools/jfrog_xray_unified/parser.py @@ -53,10 +53,7 @@ def get_item(vulnerability, test): # Following the CVSS Scoring per https://nvd.nist.gov/vuln-metrics/cvss if "severity" in vulnerability: - if vulnerability["severity"] == "Unknown": - severity = "Info" - else: - severity = vulnerability["severity"].title() + severity = "Info" if vulnerability["severity"] == "Unknown" else vulnerability["severity"].title() # TODO: Needs UNKNOWN new status in the model. else: severity = "Info" diff --git a/dojo/tools/jfrogxray/parser.py b/dojo/tools/jfrogxray/parser.py index a1351dc0777..264e5c26ed3 100644 --- a/dojo/tools/jfrogxray/parser.py +++ b/dojo/tools/jfrogxray/parser.py @@ -65,10 +65,7 @@ def decode_cwe_number(value): def get_item(vulnerability, test): # Following the CVSS Scoring per https://nvd.nist.gov/vuln-metrics/cvss if "severity" in vulnerability: - if vulnerability["severity"] == "Unknown": - severity = "Info" - else: - severity = vulnerability["severity"].title() + severity = "Info" if vulnerability["severity"] == "Unknown" else vulnerability["severity"].title() # TODO: Needs UNKNOWN new status in the model. else: severity = "Info" diff --git a/dojo/tools/kics/parser.py b/dojo/tools/kics/parser.py index 27080473991..b884c9fc8c3 100644 --- a/dojo/tools/kics/parser.py +++ b/dojo/tools/kics/parser.py @@ -32,10 +32,7 @@ def get_findings(self, filename, test): for query in data["queries"]: name = query.get("query_name") query_url = query.get("query_url") - if query.get("severity") in self.SEVERITY: - severity = self.SEVERITY[query.get("severity")] - else: - severity = "Medium" + severity = self.SEVERITY.get(query.get("severity"), "Medium") platform = query.get("platform") category = query.get("category") for item in query.get("files"): diff --git a/dojo/tools/kiuwan/parser.py b/dojo/tools/kiuwan/parser.py index 5d91e5a315e..34601b05aae 100644 --- a/dojo/tools/kiuwan/parser.py +++ b/dojo/tools/kiuwan/parser.py @@ -1,3 +1,4 @@ +import contextlib import csv import hashlib import io @@ -62,7 +63,7 @@ def get_findings(self, filename, test): + row["Software characteristic"] + "\n\n" + "**Vulnerability type** : " - + (row["Vulnerability type"] if "Vulnerability type" in row else "") + + (row.get("Vulnerability type", "")) + "\n\n" + "**CWE Scope** : " + row["CWE Scope"] @@ -104,10 +105,8 @@ def get_findings(self, filename, test): finding.mitigation = "Not provided!" finding.severity = findingdict["severity"] finding.static_finding = True - try: + with contextlib.suppress(Exception): finding.cwe = int(row["CWE"]) - except Exception: - pass if finding is not None: if finding.title is None: diff --git a/dojo/tools/kubehunter/parser.py b/dojo/tools/kubehunter/parser.py index 61d8a8a052c..158cee73f2a 100644 --- a/dojo/tools/kubehunter/parser.py +++ b/dojo/tools/kubehunter/parser.py @@ -40,10 +40,7 @@ def get_findings(self, file, test): # Finding severity severity = item.get("severity", "info") allowed_severity = ["info", "low", "medium", "high", "critical"] - if severity.lower() in allowed_severity: - severity = severity.capitalize() - else: - severity = "Info" + severity = severity.capitalize() if severity.lower() in allowed_severity else "Info" # Finding mitigation and reference avd_reference = item.get("avd_reference") diff --git a/dojo/tools/kubescape/parser.py b/dojo/tools/kubescape/parser.py index c371f477901..0a864b51062 100644 --- a/dojo/tools/kubescape/parser.py +++ b/dojo/tools/kubescape/parser.py @@ -77,10 +77,7 @@ def get_findings(self, filename, test): else: severity = self.severity_mapper(controlSummary.get("scoreFactor", 0)) # Define mitigation if available - if "mitigation" in controlSummary: - mitigation = controlSummary["mitigation"] - else: - mitigation = "" + mitigation = controlSummary.get("mitigation", "") armoLink = f"https://hub.armosec.io/docs/{controlID.lower()}" description = "**Summary:** " + f"The ressource '{resourceid}' has failed the control '{control_name}'." + "\n" diff --git a/dojo/tools/mend/parser.py b/dojo/tools/mend/parser.py index 60ad8931098..da5f6cb2f1d 100644 --- a/dojo/tools/mend/parser.py +++ b/dojo/tools/mend/parser.py @@ -61,16 +61,10 @@ def _build_common_output(node, lib_name=None): description = node.get("description") cve = node.get("name") - if cve is None: - title = "CVE-None | " + lib_name - else: - title = cve + " | " + lib_name + title = "CVE-None | " + lib_name if cve is None else cve + " | " + lib_name # cvss2 by default in CLI, but cvss3 in UI. Adapting to have # homogeneous behavior. - if "cvss3_severity" in node: - cvss_sev = node.get("cvss3_severity") - else: - cvss_sev = node.get("severity") + cvss_sev = node.get("cvss3_severity") if "cvss3_severity" in node else node.get("severity") severity = cvss_sev.lower().capitalize() cvss3_score = node.get("cvss3_score", None) diff --git a/dojo/tools/meterian/parser.py b/dojo/tools/meterian/parser.py index cb5f0193be5..333120d379a 100644 --- a/dojo/tools/meterian/parser.py +++ b/dojo/tools/meterian/parser.py @@ -73,7 +73,7 @@ def do_get_findings(self, single_security_report, scan_date, test): ) if "cve" in advisory: - if "N/A" != advisory["cve"]: + if advisory["cve"] != "N/A": finding.unsaved_vulnerability_ids = [advisory["cve"]] if "cwe" in advisory: diff --git a/dojo/tools/microfocus_webinspect/parser.py b/dojo/tools/microfocus_webinspect/parser.py index d5a2611f95c..8bc7ac42375 100644 --- a/dojo/tools/microfocus_webinspect/parser.py +++ b/dojo/tools/microfocus_webinspect/parser.py @@ -58,7 +58,7 @@ def get_findings(self, file, test): for content in classifications.findall("Classification"): # detect CWE number # TODO: support more than one CWE number - if "kind" in content.attrib and "CWE" == content.attrib["kind"]: + if "kind" in content.attrib and content.attrib["kind"] == "CWE": cwe = MicrofocusWebinspectParser.get_cwe(content.attrib["identifier"]) description += "\n\n" + content.text + "\n" diff --git a/dojo/tools/mobsf/parser.py b/dojo/tools/mobsf/parser.py index b86d7bf041b..53f62cfb3c2 100644 --- a/dojo/tools/mobsf/parser.py +++ b/dojo/tools/mobsf/parser.py @@ -207,7 +207,7 @@ def get_findings(self, filename, test): if isinstance(data["binary_analysis"], list): for details in data["binary_analysis"]: for binary_analysis_type in details: - if "name" != binary_analysis_type: + if binary_analysis_type != "name": mobsf_item = { "category": "Binary Analysis", "title": details[binary_analysis_type]["description"].split(".")[0], @@ -376,7 +376,7 @@ def getSeverityForPermission(self, status): signature => Info (it's positive so... Info) signatureOrSystem => Info (it's positive so... Info) """ - if "dangerous" == status: + if status == "dangerous": return "High" return "Info" diff --git a/dojo/tools/mobsfscan/parser.py b/dojo/tools/mobsfscan/parser.py index bd9fd5167a3..8db8ef08642 100644 --- a/dojo/tools/mobsfscan/parser.py +++ b/dojo/tools/mobsfscan/parser.py @@ -47,10 +47,7 @@ def get_findings(self, filename, test): ], ) references = metadata.get("reference") - if metadata.get("severity") in self.SEVERITY: - severity = self.SEVERITY[metadata.get("severity")] - else: - severity = "Info" + severity = self.SEVERITY.get(metadata.get("severity"), "Info") finding = Finding( title=f"{key}", diff --git a/dojo/tools/mozilla_observatory/parser.py b/dojo/tools/mozilla_observatory/parser.py index 19e4c7febd7..77a5abd8b7c 100644 --- a/dojo/tools/mozilla_observatory/parser.py +++ b/dojo/tools/mozilla_observatory/parser.py @@ -25,10 +25,7 @@ def get_description_for_scan_types(self, scan_type): def get_findings(self, file, test): data = json.load(file) # format from the CLI - if "tests" in data: - nodes = data["tests"] - else: - nodes = data + nodes = data.get("tests", data) findings = [] for key in nodes: diff --git a/dojo/tools/ms_defender/parser.py b/dojo/tools/ms_defender/parser.py index ccf348cb468..748cbd0e23b 100644 --- a/dojo/tools/ms_defender/parser.py +++ b/dojo/tools/ms_defender/parser.py @@ -37,9 +37,9 @@ def get_findings(self, file, test): vulnerabilityfiles = [] machinefiles = [] for content in list(zipdata): - if "vulnerabilities/" in content and "vulnerabilities/" != content: + if "vulnerabilities/" in content and content != "vulnerabilities/": vulnerabilityfiles.append(content) - if "machines/" in content and "machines/" != content: + if "machines/" in content and content != "machines/": machinefiles.append(content) vulnerabilities = [] machines = {} diff --git a/dojo/tools/neuvector_compliance/parser.py b/dojo/tools/neuvector_compliance/parser.py index b3bd18bf6cf..d9e0b21dacd 100644 --- a/dojo/tools/neuvector_compliance/parser.py +++ b/dojo/tools/neuvector_compliance/parser.py @@ -36,10 +36,7 @@ def get_items(tree, test): # /v1/host/{id}/compliance or similar. thus, we need to support items in a # bit different leafs. testsTree = None - if "report" in tree: - testsTree = tree.get("report").get("checks", []) - else: - testsTree = tree.get("items", []) + testsTree = tree.get("report").get("checks", []) if "report" in tree else tree.get("items", []) for node in testsTree: item = get_item(node, test) @@ -121,11 +118,7 @@ def convert_severity(severity): return "Medium" if severity.lower() == "info": return "Low" - if severity.lower() == "pass": - return "Info" - if severity.lower() == "note": - return "Info" - if severity.lower() == "error": + if severity.lower() in ["pass", "note", "error"]: return "Info" return severity.title() diff --git a/dojo/tools/nikto/json_parser.py b/dojo/tools/nikto/json_parser.py index 4a4c24cc530..fa33cf384f3 100644 --- a/dojo/tools/nikto/json_parser.py +++ b/dojo/tools/nikto/json_parser.py @@ -30,7 +30,7 @@ def process_json(self, file, test): references=vulnerability.get("references"), ) # manage if we have an ID from OSVDB - if "OSVDB" in vulnerability and "0" != vulnerability.get("OSVDB"): + if "OSVDB" in vulnerability and vulnerability.get("OSVDB") != "0": finding.unique_id_from_tool = "OSVDB-" + vulnerability.get( "OSVDB", ) diff --git a/dojo/tools/nikto/xml_parser.py b/dojo/tools/nikto/xml_parser.py index bb831b7c3cf..8a356ee361d 100644 --- a/dojo/tools/nikto/xml_parser.py +++ b/dojo/tools/nikto/xml_parser.py @@ -35,10 +35,7 @@ def process_scandetail(self, scan, test, dupes): sentences = re.split( r"(? 0: - titleText = sentences[0][:900] - else: - titleText = description[:900] + titleText = sentences[0][:900] if len(sentences) > 0 else description[:900] # Description description = "\n".join( [ @@ -49,7 +46,7 @@ def process_scandetail(self, scan, test, dupes): ) # Manage severity the same way with JSON severity = "Info" # Nikto doesn't assign severity, default to Info - if item.get("osvdbid") is not None and "0" != item.get("osvdbid"): + if item.get("osvdbid") is not None and item.get("osvdbid") != "0": severity = "Medium" finding = Finding( title=titleText, diff --git a/dojo/tools/nmap/parser.py b/dojo/tools/nmap/parser.py index f1a62892c69..0d1182d5cda 100644 --- a/dojo/tools/nmap/parser.py +++ b/dojo/tools/nmap/parser.py @@ -1,3 +1,4 @@ +import contextlib import datetime from cpe import CPE @@ -25,12 +26,10 @@ def get_findings(self, file, test): raise ValueError(msg) report_date = None - try: + with contextlib.suppress(ValueError): report_date = datetime.datetime.fromtimestamp( int(root.attrib["start"]), ) - except ValueError: - pass for host in root.findall("host"): host_info = "### Host\n\n" @@ -74,7 +73,7 @@ def get_findings(self, file, test): endpoint.port = int(port_element.attrib["portid"]) # filter on open ports - if "open" != port_element.find("state").attrib.get("state"): + if port_element.find("state").attrib.get("state") != "open": continue title = f"Open port: {endpoint.port}/{endpoint.protocol}" description = host_info @@ -198,7 +197,7 @@ def manage_vulner_script( # manage if CVE is in metadata if ( "type" in vuln_attributes - and "cve" == vuln_attributes["type"] + and vuln_attributes["type"] == "cve" ): finding.unsaved_vulnerability_ids = [vuln_attributes["id"]] diff --git a/dojo/tools/npm_audit/parser.py b/dojo/tools/npm_audit/parser.py index f5143f72af3..6296477a971 100644 --- a/dojo/tools/npm_audit/parser.py +++ b/dojo/tools/npm_audit/parser.py @@ -86,9 +86,7 @@ def get_item(item_node, test): for npm_finding in item_node["findings"]: # use first version as component_version component_version = ( - npm_finding["version"] - if not component_version - else component_version + component_version or npm_finding["version"] ) paths += ( "\n - " diff --git a/dojo/tools/npm_audit_7_plus/parser.py b/dojo/tools/npm_audit_7_plus/parser.py index 88198844a04..296d9de3280 100644 --- a/dojo/tools/npm_audit_7_plus/parser.py +++ b/dojo/tools/npm_audit_7_plus/parser.py @@ -121,10 +121,7 @@ def get_item(item_node, tree, test): elif item_node["via"] and isinstance(item_node["via"][0], dict): title = item_node["via"][0]["title"] component_name = item_node["nodes"][0] - if len(item_node["via"][0]["cwe"]) > 0: - cwe = item_node["via"][0]["cwe"][0] - else: - cwe = None + cwe = item_node["via"][0]["cwe"][0] if len(item_node["via"][0]["cwe"]) > 0 else None references.append(item_node["via"][0]["url"]) unique_id_from_tool = str(item_node["via"][0]["source"]) cvssv3 = item_node["via"][0]["cvss"]["vectorString"] diff --git a/dojo/tools/nuclei/parser.py b/dojo/tools/nuclei/parser.py index 4c843c6dca1..c1ccd71c3ad 100644 --- a/dojo/tools/nuclei/parser.py +++ b/dojo/tools/nuclei/parser.py @@ -60,10 +60,7 @@ def get_findings(self, filename, test): if item_type is None: item_type = "" matched = item.get("matched", item.get("matched-at", "")) - if "://" in matched: - endpoint = Endpoint.from_uri(matched) - else: - endpoint = Endpoint.from_uri("//" + matched) + endpoint = Endpoint.from_uri(matched) if "://" in matched else Endpoint.from_uri("//" + matched) finding = Finding( title=f"{name}", diff --git a/dojo/tools/openscap/parser.py b/dojo/tools/openscap/parser.py index 186243526b8..0795abf6ce2 100644 --- a/dojo/tools/openscap/parser.py +++ b/dojo/tools/openscap/parser.py @@ -108,10 +108,7 @@ def get_findings(self, file, test): validate_ipv46_address(ip) endpoint = Endpoint(host=ip) except ValidationError: - if "://" in ip: - endpoint = Endpoint.from_uri(ip) - else: - endpoint = Endpoint.from_uri("//" + ip) + endpoint = Endpoint.from_uri(ip) if "://" in ip else Endpoint.from_uri("//" + ip) finding.unsaved_endpoints.append(endpoint) dupe_key = hashlib.sha256( diff --git a/dojo/tools/ort/parser.py b/dojo/tools/ort/parser.py index 9f0927bd1cf..67294cd4001 100644 --- a/dojo/tools/ort/parser.py +++ b/dojo/tools/ort/parser.py @@ -127,10 +127,7 @@ def get_rule_violation_model( for id in project_ids: project_names.append(get_name_id_for_package(packages, id)) package = find_package_by_id(packages, rule_violation_unresolved["pkg"]) - if "license" in rule_violation_unresolved: - license_tmp = rule_violation_unresolved["license"] - else: - license_tmp = "unset" + license_tmp = rule_violation_unresolved.get("license", "unset") if "license_source" not in rule_violation_unresolved: rule_violation_unresolved["license_source"] = "unset" license_id = find_license_id(licenses, license_tmp) diff --git a/dojo/tools/osv_scanner/parser.py b/dojo/tools/osv_scanner/parser.py index 42e9408825c..2e6ff2a1dc8 100644 --- a/dojo/tools/osv_scanner/parser.py +++ b/dojo/tools/osv_scanner/parser.py @@ -15,14 +15,7 @@ def get_description_for_scan_types(self, scan_type): return "OSV scan output can be imported in JSON format (option --format json)." def classify_severity(self, input): - if input != "": - if input == "MODERATE": - severity = "Medium" - else: - severity = input.lower().capitalize() - else: - severity = "Low" - return severity + return ("Medium" if input == "MODERATE" else input.lower().capitalize()) if input != "" else "Low" def get_findings(self, file, test): try: diff --git a/dojo/tools/pip_audit/parser.py b/dojo/tools/pip_audit/parser.py index b3e023d3c66..5bb935dcd3d 100644 --- a/dojo/tools/pip_audit/parser.py +++ b/dojo/tools/pip_audit/parser.py @@ -26,16 +26,8 @@ def requires_file(self, scan_type): def get_findings(self, scan_file, test): """Return the collection of Findings ingested.""" data = json.load(scan_file) - findings = None # this parser can handle two distinct formats see sample scan files - if "dependencies" in data: - # new format of report - findings = get_file_findings(data, test) - else: - # legacy format of report - findings = get_legacy_findings(data, test) - - return findings + return get_file_findings(data, test) if "dependencies" in data else get_legacy_findings(data, test) def get_file_findings(data, test): diff --git a/dojo/tools/qualys/csv_parser.py b/dojo/tools/qualys/csv_parser.py index 2f88814b447..8d4cd237edb 100644 --- a/dojo/tools/qualys/csv_parser.py +++ b/dojo/tools/qualys/csv_parser.py @@ -44,9 +44,7 @@ def get_report_findings(csv_reader) -> [dict]: report_findings = [] for row in csv_reader: - if row.get("Title") and row["Title"] != "Title": - report_findings.append(row) - elif row.get("VULN TITLE"): + if (row.get("Title") and row["Title"] != "Title") or row.get("VULN TITLE"): report_findings.append(row) return report_findings diff --git a/dojo/tools/qualys/parser.py b/dojo/tools/qualys/parser.py index 96f14a9441b..59e2c64afb2 100644 --- a/dojo/tools/qualys/parser.py +++ b/dojo/tools/qualys/parser.py @@ -129,10 +129,7 @@ def parse_finding(host, tree): issue_row["fqdn"] = host.findtext("DNS") # Create Endpoint - if issue_row["fqdn"]: - ep = Endpoint(host=issue_row["fqdn"]) - else: - ep = Endpoint(host=issue_row["ip_address"]) + ep = Endpoint(host=issue_row["fqdn"]) if issue_row["fqdn"] else Endpoint(host=issue_row["ip_address"]) # OS NAME issue_row["os"] = host.findtext("OPERATING_SYSTEM") diff --git a/dojo/tools/qualys_infrascan_webgui/parser.py b/dojo/tools/qualys_infrascan_webgui/parser.py index 2d26eeafc06..047d48ff2bf 100644 --- a/dojo/tools/qualys_infrascan_webgui/parser.py +++ b/dojo/tools/qualys_infrascan_webgui/parser.py @@ -31,10 +31,7 @@ def issue_r(raw_row, vuln, scan_date): _port = raw_row.get("port") # Create Endpoint - if issue_row["fqdn"]: - ep = Endpoint(host=issue_row["fqdn"]) - else: - ep = Endpoint(host=issue_row["ip_address"]) + ep = Endpoint(host=issue_row["fqdn"]) if issue_row["fqdn"] else Endpoint(host=issue_row["ip_address"]) # OS NAME issue_row["os"] = raw_row.findtext("OS") @@ -112,15 +109,15 @@ def issue_r(raw_row, vuln, scan_date): def qualys_convert_severity(raw_val): val = str(raw_val).strip() - if "1" == val: + if val == "1": return "Info" - if "2" == val: + if val == "2": return "Low" - if "3" == val: + if val == "3": return "Medium" - if "4" == val: + if val == "4": return "High" - if "5" == val: + if val == "5": return "Critical" return "Info" diff --git a/dojo/tools/qualys_webapp/parser.py b/dojo/tools/qualys_webapp/parser.py index 9565f1dd1ee..e17c7fc48ad 100644 --- a/dojo/tools/qualys_webapp/parser.py +++ b/dojo/tools/qualys_webapp/parser.py @@ -200,10 +200,7 @@ def get_unique_vulnerabilities( if access_path is not None: urls += [url.text for url in access_path.iter("URL")] payloads = vuln.find("PAYLOADS") - if payloads is not None: - req_resps = get_request_response(payloads) - else: - req_resps = [[], []] + req_resps = get_request_response(payloads) if payloads is not None else [[], []] if is_info: raw_finding_date = vuln.findtext("LAST_TIME_DETECTED") @@ -267,10 +264,7 @@ def get_vulnerabilities( if access_path is not None: urls += [url.text for url in access_path.iter("URL")] payloads = vuln.find("PAYLOADS") - if payloads is not None: - req_resps = get_request_response(payloads) - else: - req_resps = [[], []] + req_resps = get_request_response(payloads) if payloads is not None else [[], []] if is_info: raw_finding_date = vuln.findtext("LAST_TIME_DETECTED") @@ -292,7 +286,7 @@ def get_vulnerabilities( else: finding_date = None - finding = findings.get(qid, None) + finding = findings.get(qid) findings[qid] = attach_extras( urls, req_resps[0], req_resps[1], finding, finding_date, qid, test, ) diff --git a/dojo/tools/risk_recon/api.py b/dojo/tools/risk_recon/api.py index 898db341ec7..49e5e936dd4 100644 --- a/dojo/tools/risk_recon/api.py +++ b/dojo/tools/risk_recon/api.py @@ -49,7 +49,7 @@ def map_toes(self): toe_id = item.get("toe_id", None) name = item.get("toe_short_name", None) if not comps or name in name_list: - filters = comps.get(name, None) + filters = comps.get(name) self.toe_map[toe_id] = filters or self.data else: msg = f"Unable to query Target of Evaluations due to {response.status_code} - {response.content}" @@ -60,7 +60,7 @@ def filter_finding(self, finding): if not filters: return False - for filter_item in filters.keys(): + for filter_item in filters: filter_list = filters.get(filter_item, None) if filter_list and finding[filter_item] not in filter_list: return True @@ -68,7 +68,7 @@ def filter_finding(self, finding): return False def get_findings(self): - for toe in self.toe_map.keys(): + for toe in self.toe_map: response = self.session.get( url=f"{self.url}/findings/{toe}", headers={ diff --git a/dojo/tools/risk_recon/parser.py b/dojo/tools/risk_recon/parser.py index 4ddcf64e16c..24b4661021d 100644 --- a/dojo/tools/risk_recon/parser.py +++ b/dojo/tools/risk_recon/parser.py @@ -83,7 +83,7 @@ def _get_findings_internal(self, findings, test): date = dateutil.parser.parse(item.get("first_seen")) sev = item.get("severity", "").capitalize() - sev = "Info" if not sev else sev + sev = sev or "Info" tags = ( item.get("security_domain")[:20] diff --git a/dojo/tools/sarif/parser.py b/dojo/tools/sarif/parser.py index eb83977f0c9..33a6a1fdee9 100644 --- a/dojo/tools/sarif/parser.py +++ b/dojo/tools/sarif/parser.py @@ -342,11 +342,11 @@ def get_severity(result, rule): if "defaultConfiguration" in rule: severity = rule["defaultConfiguration"].get("level") - if "note" == severity: + if severity == "note": return "Info" - if "warning" == severity: + if severity == "warning": return "Medium" - if "error" == severity: + if severity == "error": return "High" return "Medium" diff --git a/dojo/tools/semgrep/parser.py b/dojo/tools/semgrep/parser.py index a9afb107426..8017f9b9f99 100644 --- a/dojo/tools/semgrep/parser.py +++ b/dojo/tools/semgrep/parser.py @@ -130,13 +130,13 @@ def get_findings(self, filename, test): return list(dupes.values()) def convert_severity(self, val): - if "CRITICAL" == val.upper(): + if val.upper() == "CRITICAL": return "Critical" - if "WARNING" == val.upper(): + if val.upper() == "WARNING": return "Medium" - if "ERROR" == val.upper() or "HIGH" == val.upper(): + if val.upper() in ["ERROR", "HIGH"]: return "High" - if "INFO" == val.upper(): + if val.upper() == "INFO": return "Info" msg = f"Unknown value for severity: {val}" raise ValueError(msg) diff --git a/dojo/tools/sonarqube/sonarqube_restapi_zip.py b/dojo/tools/sonarqube/sonarqube_restapi_zip.py index 3ad7ba0a9cf..338c5316e68 100644 --- a/dojo/tools/sonarqube/sonarqube_restapi_zip.py +++ b/dojo/tools/sonarqube/sonarqube_restapi_zip.py @@ -6,7 +6,7 @@ class SonarQubeRESTAPIZIP: def get_items(self, files, test, mode): total_findings_per_file = [] - for dictkey in files.keys(): + for dictkey in files: if dictkey.endswith(".json"): json_content = json.loads(files[dictkey].decode("ascii")) total_findings_per_file += SonarQubeRESTAPIJSON().get_json_items(json_content, test, mode) diff --git a/dojo/tools/spotbugs/parser.py b/dojo/tools/spotbugs/parser.py index 65ecac21535..e6629275dd2 100644 --- a/dojo/tools/spotbugs/parser.py +++ b/dojo/tools/spotbugs/parser.py @@ -85,10 +85,7 @@ def get_findings(self, filename, test): desc += message + "\n" shortmessage_extract = bug.find("ShortMessage") - if shortmessage_extract is not None: - title = shortmessage_extract.text - else: - title = bug.get("type") + title = shortmessage_extract.text if shortmessage_extract is not None else bug.get("type") severity = SEVERITY[bug.get("priority")] description = desc diff --git a/dojo/tools/sslscan/parser.py b/dojo/tools/sslscan/parser.py index 621ded3daf1..218bba9d302 100644 --- a/dojo/tools/sslscan/parser.py +++ b/dojo/tools/sslscan/parser.py @@ -88,9 +88,6 @@ def get_findings(self, file, test): dupes[dupe_key] = finding if host: - if "://" in host: - endpoint = Endpoint.from_uri(host) - else: - endpoint = Endpoint(host=host, port=port) + endpoint = Endpoint.from_uri(host) if "://" in host else Endpoint(host=host, port=port) finding.unsaved_endpoints.append(endpoint) return dupes.values() diff --git a/dojo/tools/tenable/csv_format.py b/dojo/tools/tenable/csv_format.py index c1ea9fc2c8d..cf7183f2afb 100644 --- a/dojo/tools/tenable/csv_format.py +++ b/dojo/tools/tenable/csv_format.py @@ -15,7 +15,7 @@ class TenableCSVParser: def _validated_severity(self, severity): - if severity not in Finding.SEVERITIES.keys(): + if severity not in Finding.SEVERITIES: severity = "Info" return severity @@ -189,10 +189,7 @@ def get_findings(self, filename: str, test: Test): if isinstance(port, str) and port in ["", "0"]: port = None # Update the endpoints - if "://" in host: - endpoint = Endpoint.from_uri(host) - else: - endpoint = Endpoint(protocol=protocol, host=host, port=port) + endpoint = Endpoint.from_uri(host) if "://" in host else Endpoint(protocol=protocol, host=host, port=port) # Add the list to be processed later find.unsaved_endpoints.append(endpoint) diff --git a/dojo/tools/tenable/xml_format.py b/dojo/tools/tenable/xml_format.py index 7094e82d626..c976ed9c538 100644 --- a/dojo/tools/tenable/xml_format.py +++ b/dojo/tools/tenable/xml_format.py @@ -23,7 +23,7 @@ def get_text_severity(self, severity_id): elif severity_id == 1: severity = "Low" # Ensure the severity is a valid choice. Fall back to info otherwise - if severity not in Finding.SEVERITIES.keys(): + if severity not in Finding.SEVERITIES: severity = "Info" return severity diff --git a/dojo/tools/terrascan/parser.py b/dojo/tools/terrascan/parser.py index c8b07f4e143..bc93829e82c 100644 --- a/dojo/tools/terrascan/parser.py +++ b/dojo/tools/terrascan/parser.py @@ -36,10 +36,7 @@ def get_findings(self, filename, test): for item in data.get("results").get("violations"): rule_name = item.get("rule_name") description = item.get("description") - if item.get("severity") in self.SEVERITY: - severity = self.SEVERITY[item.get("severity")] - else: - severity = "Info" + severity = self.SEVERITY.get(item.get("severity"), "Info") rule_id = item.get("rule_id") category = item.get("category") resource_name = item.get("resource_name") diff --git a/dojo/tools/tfsec/parser.py b/dojo/tools/tfsec/parser.py index d0bc390f3aa..32ad0c5a4dd 100644 --- a/dojo/tools/tfsec/parser.py +++ b/dojo/tools/tfsec/parser.py @@ -51,14 +51,8 @@ def get_findings(self, filename, test): ) impact = item.get("impact") resolution = item.get("resolution") - if item.get("links", None) is not None: - references = "\n".join(item.get("links")) - else: - references = item.get("link", None) - if item.get("severity").upper() in self.SEVERITY: - severity = self.SEVERITY[item.get("severity").upper()] - else: - severity = "Low" + references = "\n".join(item.get("links")) if item.get("links", None) is not None else item.get("link", None) + severity = self.SEVERITY.get(item.get("severity").upper(), "Low") dupe_key = hashlib.sha256( ( diff --git a/dojo/tools/threagile/parser.py b/dojo/tools/threagile/parser.py index 796d260e6d9..93f78ee2f83 100644 --- a/dojo/tools/threagile/parser.py +++ b/dojo/tools/threagile/parser.py @@ -80,7 +80,7 @@ def get_items(self, tree, test): findings = [] for item in tree: for field in self.REQUIRED_FIELDS: - if field not in item.keys(): + if field not in item: msg = f"Invalid ThreAgile risks file, missing field {field}" raise ValueError(msg) severity = item.get("severity", "info").capitalize() diff --git a/dojo/tools/trivy/parser.py b/dojo/tools/trivy/parser.py index 1fde84a80f0..de06a150a84 100644 --- a/dojo/tools/trivy/parser.py +++ b/dojo/tools/trivy/parser.py @@ -196,10 +196,7 @@ def get_result_items(self, test, results, service_name=None, artifact_name=""): package_version = vuln.get("InstalledVersion", "") references = "\n".join(vuln.get("References", [])) mitigation = vuln.get("FixedVersion", "") - if len(vuln.get("CweIDs", [])) > 0: - cwe = int(vuln["CweIDs"][0].split("-")[1]) - else: - cwe = 0 + cwe = int(vuln["CweIDs"][0].split("-")[1]) if len(vuln.get("CweIDs", [])) > 0 else 0 type = target_data.get("Type", "") title = f"{vuln_id} {package_name} {package_version}" description = DESCRIPTION_TEMPLATE.format( diff --git a/dojo/tools/trufflehog3/parser.py b/dojo/tools/trufflehog3/parser.py index c4879bc4ccb..fa013cce35e 100644 --- a/dojo/tools/trufflehog3/parser.py +++ b/dojo/tools/trufflehog3/parser.py @@ -101,10 +101,7 @@ def get_finding_current(self, json_data, test, dupes): severity = severity.capitalize() file = json_data.get("path") line = json_data.get("line") - if line: - line = int(line) - else: - line = 0 + line = int(line) if line else 0 secret = json_data.get("secret") context = json_data.get("context") json_data.get("id") diff --git a/dojo/tools/trustwave/parser.py b/dojo/tools/trustwave/parser.py index 4e0d1562ccd..71517715f1b 100644 --- a/dojo/tools/trustwave/parser.py +++ b/dojo/tools/trustwave/parser.py @@ -41,11 +41,9 @@ def get_findings(self, filename, test): if host is None or host == "": host = row.get("IP") finding.unsaved_endpoints = [Endpoint(host=host)] - if row.get("Port") is not None and not "" == row.get("Port"): + if row.get("Port") is not None and row.get("Port") != "": finding.unsaved_endpoints[0].port = int(row["Port"]) - if row.get("Protocol") is not None and not "" == row.get( - "Protocol", - ): + if row.get("Protocol") is not None and row.get("Protocol") != "": finding.unsaved_endpoints[0].protocol = row["Protocol"] finding.title = row["Vulnerability Name"] finding.description = row["Description"] @@ -53,10 +51,7 @@ def get_findings(self, filename, test): finding.mitigation = row.get("Remediation") # manage severity - if row["Severity"] in severity_mapping: - finding.severity = severity_mapping[row["Severity"]] - else: - finding.severity = "Low" + finding.severity = severity_mapping.get(row["Severity"], "Low") finding.unsaved_vulnerability_ids = [row.get("CVE")] dupes_key = hashlib.sha256( diff --git a/dojo/tools/twistlock/parser.py b/dojo/tools/twistlock/parser.py index 740d72f8e68..2719d51ae5a 100644 --- a/dojo/tools/twistlock/parser.py +++ b/dojo/tools/twistlock/parser.py @@ -135,24 +135,16 @@ def get_item(vulnerability, test): else "Info" ) vector = ( - vulnerability["vector"] - if "vector" in vulnerability - else "CVSS vector not provided. " + vulnerability.get("vector", "CVSS vector not provided. ") ) status = ( - vulnerability["status"] - if "status" in vulnerability - else "There seems to be no fix yet. Please check description field." + vulnerability.get("status", "There seems to be no fix yet. Please check description field.") ) cvss = ( - vulnerability["cvss"] - if "cvss" in vulnerability - else "No CVSS score yet." + vulnerability.get("cvss", "No CVSS score yet.") ) riskFactors = ( - vulnerability["riskFactors"] - if "riskFactors" in vulnerability - else "No risk factors." + vulnerability.get("riskFactors", "No risk factors.") ) # create the finding object @@ -192,11 +184,7 @@ def convert_severity(severity): return "High" if severity.lower() == "moderate": return "Medium" - if severity.lower() == "information": - return "Info" - if severity.lower() == "informational": - return "Info" - if severity == "": + if severity.lower() in ["information", "informational", ""]: return "Info" return severity.title() diff --git a/dojo/tools/veracode/json_parser.py b/dojo/tools/veracode/json_parser.py index fe48bbb46fd..4b75e10e981 100644 --- a/dojo/tools/veracode/json_parser.py +++ b/dojo/tools/veracode/json_parser.py @@ -144,10 +144,7 @@ def add_static_details(self, finding, finding_details, backup_title=None) -> Fin finding.dynamic_finding = False finding.static_finding = True # Get the finding category to get the high level info about the vuln - if category := finding_details.get("finding_category"): - category_title = category.get("name") - else: - category_title = None + category_title = category.get("name") if (category := finding_details.get("finding_category")) else None # Set the title of the finding to the name of the finding category. # If not present, fall back on CWE title. If that is not present, do nothing if category_title: @@ -183,10 +180,7 @@ def add_dynamic_details(self, finding, finding_details, backup_title=None) -> Fi finding.dynamic_finding = True finding.static_finding = False # Get the finding category to get the high level info about the vuln - if category := finding_details.get("finding_category"): - category_title = category.get("name") - else: - category_title = None + category_title = category.get("name") if (category := finding_details.get("finding_category")) else None # Set the title of the finding to the name of the finding category. # If not present, fall back on CWE title. If that is not present, do nothing if category_title: diff --git a/dojo/tools/veracode_sca/parser.py b/dojo/tools/veracode_sca/parser.py index 66c7e36ca89..81dcb48d589 100644 --- a/dojo/tools/veracode_sca/parser.py +++ b/dojo/tools/veracode_sca/parser.py @@ -229,7 +229,7 @@ def fix_severity(self, severity): severity = severity.capitalize() if severity is None: severity = "Medium" - elif "Unknown" == severity or "None" == severity: + elif severity == "Unknown" or severity == "None": severity = "Info" return severity diff --git a/dojo/tools/wapiti/parser.py b/dojo/tools/wapiti/parser.py index 3b6c6dfd4fd..adcd0ef4778 100644 --- a/dojo/tools/wapiti/parser.py +++ b/dojo/tools/wapiti/parser.py @@ -64,10 +64,7 @@ def get_findings(self, file, test): title = category + ": " + entry.findtext("info") # get numerical severity. num_severity = entry.findtext("level") - if num_severity in severity_mapping: - severity = severity_mapping[num_severity] - else: - severity = "Info" + severity = severity_mapping.get(num_severity, "Info") finding = Finding( title=title, diff --git a/dojo/tools/wazuh/parser.py b/dojo/tools/wazuh/parser.py index ae4bf98c22f..e2057c18561 100644 --- a/dojo/tools/wazuh/parser.py +++ b/dojo/tools/wazuh/parser.py @@ -48,10 +48,7 @@ def get_findings(self, file, test): agent_ip = item.get("agent_ip") detection_time = item.get("detection_time").split("T")[0] - if links: - references = "\n".join(links) - else: - references = None + references = "\n".join(links) if links else None title = ( item.get("title") + " (version: " + package_version + ")" diff --git a/dojo/tools/wfuzz/parser.py b/dojo/tools/wfuzz/parser.py index 2042fe5c17d..f9e7e77f2be 100644 --- a/dojo/tools/wfuzz/parser.py +++ b/dojo/tools/wfuzz/parser.py @@ -19,7 +19,7 @@ def severity_mapper(self, input): return "Low" if 400 <= int(input) <= 499: return "Medium" - if 500 <= int(input): + if int(input) >= 500: return "Low" return None @@ -38,10 +38,7 @@ def get_findings(self, filename, test): for item in data: url = hyperlink.parse(item["url"]) return_code = item.get("code", None) - if return_code is None: - severity = "Low" - else: - severity = self.severity_mapper(input=return_code) + severity = "Low" if return_code is None else self.severity_mapper(input=return_code) description = f"The URL {url.to_text()} must not be exposed\n Please review your configuration\n" dupe_key = hashlib.sha256( (url.to_text() + str(return_code)).encode("utf-8"), diff --git a/dojo/tools/whitehat_sentinel/parser.py b/dojo/tools/whitehat_sentinel/parser.py index 0c5ca4ff024..a1bde0b87be 100644 --- a/dojo/tools/whitehat_sentinel/parser.py +++ b/dojo/tools/whitehat_sentinel/parser.py @@ -34,7 +34,7 @@ def get_findings(self, file, test): # Make sure the findings key exists in the dictionary and that it is # not null or an empty list if ( - "collection" not in findings_collection.keys() + "collection" not in findings_collection or not findings_collection["collection"] ): msg = "collection key not present or there were not findings present." diff --git a/dojo/tools/xanitizer/parser.py b/dojo/tools/xanitizer/parser.py index b6a7cabdd55..b4eca87b40f 100644 --- a/dojo/tools/xanitizer/parser.py +++ b/dojo/tools/xanitizer/parser.py @@ -85,15 +85,9 @@ def generate_title(self, finding, line): cl = finding.find("class") file = finding.find("file") if pckg is not None and cl is not None: - if line: - title = f"{title} ({pckg.text}.{cl.text}:{line})" - else: - title = f"{title} ({pckg.text}.{cl.text})" + title = f"{title} ({pckg.text}.{cl.text}:{line})" if line else f"{title} ({pckg.text}.{cl.text})" else: - if line: - title = f"{title} ({file.text}:{line})" - else: - title = f"{title} ({file.text})" + title = f"{title} ({file.text}:{line})" if line else f"{title} ({file.text})" return title diff --git a/dojo/user/views.py b/dojo/user/views.py index f43b6b7b600..0f8914e4adf 100644 --- a/dojo/user/views.py +++ b/dojo/user/views.py @@ -227,10 +227,7 @@ def view_profile(request): group_members = get_authorized_group_members_for_user(user) user_contact = user.usercontactinfo if hasattr(user, "usercontactinfo") else None - if user_contact is None: - contact_form = UserContactInfoForm() - else: - contact_form = UserContactInfoForm(instance=user_contact) + contact_form = UserContactInfoForm() if user_contact is None else UserContactInfoForm(instance=user_contact) global_role = user.global_role if hasattr(user, "global_role") else None if global_role is None: @@ -393,16 +390,10 @@ def edit_user(request, uid): form = EditDojoUserForm(instance=user) user_contact = user.usercontactinfo if hasattr(user, "usercontactinfo") else None - if user_contact is None: - contact_form = UserContactInfoForm() - else: - contact_form = UserContactInfoForm(instance=user_contact) + contact_form = UserContactInfoForm() if user_contact is None else UserContactInfoForm(instance=user_contact) global_role = user.global_role if hasattr(user, "global_role") else None - if global_role is None: - global_role_form = GlobalRoleForm() - else: - global_role_form = GlobalRoleForm(instance=global_role) + global_role_form = GlobalRoleForm() if global_role is None else GlobalRoleForm(instance=global_role) if request.method == "POST": form = EditDojoUserForm(request.POST, instance=user) diff --git a/dojo/utils.py b/dojo/utils.py index 683bec737fc..915a06f1662 100644 --- a/dojo/utils.py +++ b/dojo/utils.py @@ -778,11 +778,7 @@ def is_title_in_breadcrumbs(title): if breadcrumbs is None: return False - for breadcrumb in breadcrumbs: - if breadcrumb.get("title") == title: - return True - - return False + return any(breadcrumb.get("title") == title for breadcrumb in breadcrumbs) def get_punchcard_data(objs, start_date, weeks, view="Finding"): @@ -1261,15 +1257,9 @@ def build_query(query_string, search_fields): for field_name in search_fields: q = Q(**{f"{field_name}__icontains": term}) - if or_query: - or_query = or_query | q - else: - or_query = q + or_query = or_query | q if or_query else q - if query: - query = query & or_query - else: - query = or_query + query = query & or_query if query else or_query return query @@ -1779,7 +1769,7 @@ def get_return_url(request): return_url = request.POST.get("return_url", None) if return_url is None or not return_url.strip(): # for some reason using request.GET.get('return_url') never works - return_url = request.GET["return_url"] if "return_url" in request.GET else None + return_url = request.GET["return_url"] if "return_url" in request.GET else None # noqa: SIM401 return return_url or None @@ -1978,7 +1968,7 @@ def _create_notifications(): if sla_age is None: sla_age = 0 - if (sla_age < 0) and (settings.SLA_NOTIFY_POST_BREACH < abs(sla_age)): + if (sla_age < 0) and (abs(sla_age) > settings.SLA_NOTIFY_POST_BREACH): post_breach_no_notify_count += 1 # Skip finding notification if breached for too long logger.debug(f"Finding {finding.id} breached the SLA {abs(sla_age)} days ago. Skipping notifications.") @@ -2229,7 +2219,7 @@ def get_product(obj): if not obj: return None - if isinstance(obj, Finding) or isinstance(obj, Finding_Group): + if isinstance(obj, (Finding, Finding_Group)): return obj.test.engagement.product if isinstance(obj, Test): diff --git a/tests/base_test_class.py b/tests/base_test_class.py index bea137e4844..64dbbcbd2a1 100644 --- a/tests/base_test_class.py +++ b/tests/base_test_class.py @@ -1,3 +1,4 @@ +import contextlib import logging import os import re @@ -238,10 +239,8 @@ def goto_all_findings_list(self, driver): def wait_for_datatable_if_content(self, no_content_id, wrapper_id): no_content = None - try: + with contextlib.suppress(Exception): no_content = self.driver.find_element(By.ID, no_content_id) - except: - pass if no_content is None: # wait for product_wrapper div as datatables javascript modifies the DOM on page load. diff --git a/unittests/test_import_reimport.py b/unittests/test_import_reimport.py index 2d68989c180..f8cf8d6c1e9 100644 --- a/unittests/test_import_reimport.py +++ b/unittests/test_import_reimport.py @@ -1188,27 +1188,12 @@ def test_import_6_reimport_6_gitlab_dep_scan_component_name_and_version(self): count = 0 for finding in active_findings_after["results"]: - if "v0.0.0-20190219172222-a4c6cb3142f2" == finding["component_version"]: + if finding["component_version"] == "v0.0.0-20190219172222-a4c6cb3142f2" or finding["component_version"] == "v0.0.0-20190308221718-c2843e01d9a2" or finding["component_version"] == "v0.0.0-20200302210943-78000ba7a073": self.assertEqual("CVE-2020-29652: Nil Pointer Dereference", finding["title"]) self.assertEqual("CVE-2020-29652", finding["vulnerability_ids"][0]["vulnerability_id"]) self.assertEqual("golang.org/x/crypto", finding["component_name"]) count = count + 1 - elif "v0.0.0-20190308221718-c2843e01d9a2" == finding["component_version"]: - self.assertEqual("CVE-2020-29652: Nil Pointer Dereference", finding["title"]) - self.assertEqual("CVE-2020-29652", finding["vulnerability_ids"][0]["vulnerability_id"]) - self.assertEqual("golang.org/x/crypto", finding["component_name"]) - count = count + 1 - elif "v0.0.0-20200302210943-78000ba7a073" == finding["component_version"]: - self.assertEqual("CVE-2020-29652: Nil Pointer Dereference", finding["title"]) - self.assertEqual("CVE-2020-29652", finding["vulnerability_ids"][0]["vulnerability_id"]) - self.assertEqual("golang.org/x/crypto", finding["component_name"]) - count = count + 1 - elif "v0.3.0" == finding["component_version"]: - self.assertEqual("CVE-2020-14040: Loop With Unreachable Exit Condition (Infinite Loop)", finding["title"]) - self.assertEqual("CVE-2020-14040", finding["vulnerability_ids"][0]["vulnerability_id"]) - self.assertEqual("golang.org/x/text", finding["component_name"]) - count = count + 1 - elif "v0.3.2" == finding["component_version"]: + elif finding["component_version"] == "v0.3.0" or finding["component_version"] == "v0.3.2": self.assertEqual("CVE-2020-14040: Loop With Unreachable Exit Condition (Infinite Loop)", finding["title"]) self.assertEqual("CVE-2020-14040", finding["vulnerability_ids"][0]["vulnerability_id"]) self.assertEqual("golang.org/x/text", finding["component_name"]) diff --git a/unittests/test_rest_framework.py b/unittests/test_rest_framework.py index ee758ddaedb..a33f52a0cb7 100644 --- a/unittests/test_rest_framework.py +++ b/unittests/test_rest_framework.py @@ -279,7 +279,7 @@ def _check_helper(check): _check_helper(isinstance(obj, list)) return None if schema_type == TYPE_OBJECT: - _check_helper(isinstance(obj, OrderedDict) or isinstance(obj, dict)) + _check_helper(isinstance(obj, (OrderedDict, dict))) return None if schema_type == TYPE_STRING: _check_helper(isinstance(obj, str)) @@ -320,10 +320,10 @@ def _check(schema, obj): # self._with_prefix(name, _check, prop, obj_child) _check(prop, obj_child) - for child_name in obj.keys(): + for child_name in obj: # TODO: prefetch mixins not picked up by spectcular? if child_name != "prefetch": - if not properties or child_name not in properties.keys(): + if not properties or child_name not in properties: self._has_failed = True self._register_error(f'unexpected property "{child_name}" found') @@ -439,7 +439,7 @@ def test_detail_prefetch(self): @skipIfNotSubclass(RetrieveModelMixin) def test_detail_object_not_authorized(self): - if not self.test_type == TestType.OBJECT_PERMISSIONS: + if self.test_type != TestType.OBJECT_PERMISSIONS: self.skipTest("Authorization is not object based") self.setUp_not_authorized() @@ -451,7 +451,7 @@ def test_detail_object_not_authorized(self): @skipIfNotSubclass(RetrieveModelMixin) def test_detail_configuration_not_authorized(self): - if not self.test_type == TestType.CONFIGURATION_PERMISSIONS: + if self.test_type != TestType.CONFIGURATION_PERMISSIONS: self.skipTest("Authorization is not configuration based") self.setUp_not_authorized() @@ -533,7 +533,7 @@ def test_list_prefetch(self): @skipIfNotSubclass(ListModelMixin) def test_list_object_not_authorized(self): - if not self.test_type == TestType.OBJECT_PERMISSIONS: + if self.test_type != TestType.OBJECT_PERMISSIONS: self.skipTest("Authorization is not object based") self.setUp_not_authorized() @@ -544,7 +544,7 @@ def test_list_object_not_authorized(self): @skipIfNotSubclass(ListModelMixin) def test_list_configuration_not_authorized(self): - if not self.test_type == TestType.CONFIGURATION_PERMISSIONS: + if self.test_type != TestType.CONFIGURATION_PERMISSIONS: self.skipTest("Authorization is not configuration based") self.setUp_not_authorized() @@ -574,7 +574,7 @@ def test_create(self): @skipIfNotSubclass(CreateModelMixin) @patch("dojo.api_v2.permissions.user_has_permission") def test_create_object_not_authorized(self, mock): - if not self.test_type == TestType.OBJECT_PERMISSIONS: + if self.test_type != TestType.OBJECT_PERMISSIONS: self.skipTest("Authorization is not object based") mock.return_value = False @@ -587,7 +587,7 @@ def test_create_object_not_authorized(self, mock): @skipIfNotSubclass(CreateModelMixin) def test_create_configuration_not_authorized(self): - if not self.test_type == TestType.CONFIGURATION_PERMISSIONS: + if self.test_type != TestType.CONFIGURATION_PERMISSIONS: self.skipTest("Authorization is not configuration based") self.setUp_not_authorized() @@ -637,7 +637,7 @@ def test_update(self): @skipIfNotSubclass(UpdateModelMixin) @patch("dojo.api_v2.permissions.user_has_permission") def test_update_object_not_authorized(self, mock): - if not self.test_type == TestType.OBJECT_PERMISSIONS: + if self.test_type != TestType.OBJECT_PERMISSIONS: self.skipTest("Authorization is not object based") mock.return_value = False @@ -666,7 +666,7 @@ def test_update_object_not_authorized(self, mock): @skipIfNotSubclass(UpdateModelMixin) def test_update_configuration_not_authorized(self): - if not self.test_type == TestType.CONFIGURATION_PERMISSIONS: + if self.test_type != TestType.CONFIGURATION_PERMISSIONS: self.skipTest("Authorization is not configuration based") self.setUp_not_authorized() @@ -726,7 +726,7 @@ def test_delete_preview(self): @skipIfNotSubclass(DestroyModelMixin) @patch("dojo.api_v2.permissions.user_has_permission") def test_delete_object_not_authorized(self, mock): - if not self.test_type == TestType.OBJECT_PERMISSIONS: + if self.test_type != TestType.OBJECT_PERMISSIONS: self.skipTest("Authorization is not object based") mock.return_value = False @@ -748,7 +748,7 @@ def test_delete_object_not_authorized(self, mock): @skipIfNotSubclass(DestroyModelMixin) def test_delete_configuration_not_authorized(self): - if not self.test_type == TestType.CONFIGURATION_PERMISSIONS: + if self.test_type != TestType.CONFIGURATION_PERMISSIONS: self.skipTest("Authorization is not configuration based") self.setUp_not_authorized() @@ -785,7 +785,7 @@ def test_update(self): @skipIfNotSubclass(UpdateModelMixin) @patch("dojo.api_v2.permissions.user_has_permission") def test_update_object_not_authorized(self, mock): - if not self.test_type == TestType.OBJECT_PERMISSIONS: + if self.test_type != TestType.OBJECT_PERMISSIONS: self.skipTest("Authorization is not object based") mock.return_value = False @@ -802,7 +802,7 @@ def test_update_object_not_authorized(self, mock): class AuthenticatedViewTest(BaseClassTest): @skipIfNotSubclass(ListModelMixin) def test_list_configuration_not_authorized(self): - if not self.test_type == TestType.CONFIGURATION_PERMISSIONS: + if self.test_type != TestType.CONFIGURATION_PERMISSIONS: self.skipTest("Authorization is not configuration based") self.setUp_not_authorized() @@ -812,7 +812,7 @@ def test_list_configuration_not_authorized(self): @skipIfNotSubclass(RetrieveModelMixin) def test_detail_configuration_not_authorized(self): - if not self.test_type == TestType.CONFIGURATION_PERMISSIONS: + if self.test_type != TestType.CONFIGURATION_PERMISSIONS: self.skipTest("Authorization is not configuration based") self.setUp_not_authorized() @@ -1118,7 +1118,7 @@ def setUp(self): def test_request_response_post_and_download(self): # Test the creation - for level in self.url_levels.keys(): + for level in self.url_levels: length = FileUpload.objects.count() with open(f"{str(self.path)}/scans/acunetix/one_finding.xml", encoding="utf-8") as testfile: payload = { @@ -1140,7 +1140,7 @@ def test_request_response_post_and_download(self): self.assertEqual(file_data, downloaded_file) def test_request_response_get(self): - for level in self.url_levels.keys(): + for level in self.url_levels: response = self.client.get(f"/api/v2/{level}/files/") self.assertEqual(200, response.status_code) diff --git a/unittests/test_risk_acceptance.py b/unittests/test_risk_acceptance.py index 97afb3e1f7d..211fe45f4e4 100644 --- a/unittests/test_risk_acceptance.py +++ b/unittests/test_risk_acceptance.py @@ -52,10 +52,7 @@ def setUp(self): def add_risk_acceptance(self, eid, data_risk_accceptance, fid=None): - if fid: - args = (eid, fid) - else: - args = (eid, ) + args = (eid, fid) if fid else (eid,) response = self.client.post(reverse("add_risk_acceptance", args=args), data_risk_accceptance) self.assertEqual(302, response.status_code, response.content[:1000]) @@ -65,19 +62,13 @@ def assert_all_active_not_risk_accepted(self, findings): if not all(finding.active for finding in findings): return False - if not any(finding.risk_accepted for finding in findings): - return True - - return False + return bool(not any(finding.risk_accepted for finding in findings)) def assert_all_inactive_risk_accepted(self, findings): if any(finding.active for finding in findings): return False - if all(finding.risk_accepted for finding in findings): - return True - - return False + return bool(all(finding.risk_accepted for finding in findings)) def test_add_risk_acceptance_single_findings_accepted(self): ra_data = copy.copy(self.data_risk_accceptance) diff --git a/unittests/tools/test_cyclonedx_parser.py b/unittests/tools/test_cyclonedx_parser.py index 08233e00220..65e377496fe 100644 --- a/unittests/tools/test_cyclonedx_parser.py +++ b/unittests/tools/test_cyclonedx_parser.py @@ -238,7 +238,7 @@ def test_cyclonedx_1_4_jake_json(self): self.assertEqual(7, len(findings)) for finding in findings: finding.clean() - if "c7129ff8-08bc-4afe-82ec-7d97b9491741" == finding.vuln_id_from_tool: + if finding.vuln_id_from_tool == "c7129ff8-08bc-4afe-82ec-7d97b9491741": with self.subTest(i="CVE-2021-33203"): self.assertIn(finding.severity, Finding.SEVERITIES) self.assertEqual("Django:2.0 | c7129ff8-08bc-4afe-82ec-7d97b9491741", finding.title) @@ -254,7 +254,7 @@ def test_cyclonedx_1_4_jake_json(self): finding.description, ) self.assertEqual(datetime.date(2022, 1, 28), datetime.datetime.date(finding.date)) - elif "c9b6a6a5-01a4-4d4c-b480-b9d6825dc4d0" == finding.vuln_id_from_tool: + elif finding.vuln_id_from_tool == "c9b6a6a5-01a4-4d4c-b480-b9d6825dc4d0": with self.subTest(i="CVE-2018-7536"): self.assertEqual("Django:2.0 | c9b6a6a5-01a4-4d4c-b480-b9d6825dc4d0", finding.title) self.assertEqual("Medium", finding.severity) @@ -269,7 +269,7 @@ def test_cyclonedx_1_4_jake_json(self): finding.description, ) self.assertEqual(datetime.date(2022, 1, 28), datetime.datetime.date(finding.date)) - elif "90cfba6a-ddc9-4708-b131-5d875e8c558d" == finding.vuln_id_from_tool: + elif finding.vuln_id_from_tool == "90cfba6a-ddc9-4708-b131-5d875e8c558d": with self.subTest(i="CVE-2018-6188"): self.assertEqual("High", finding.severity) self.assertEqual("Django", finding.component_name) diff --git a/unittests/tools/test_mozilla_observatory_parser.py b/unittests/tools/test_mozilla_observatory_parser.py index d15a97a54f3..96c5d0719e2 100644 --- a/unittests/tools/test_mozilla_observatory_parser.py +++ b/unittests/tools/test_mozilla_observatory_parser.py @@ -12,7 +12,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self): # test that all findings are not active for finding in findings: self.assertFalse(finding.active) - if "strict-transport-security" == finding.vuln_id_from_tool: + if finding.vuln_id_from_tool == "strict-transport-security": with self.subTest(vuln_id_from_tool=finding.vuln_id_from_tool): self.assertEqual("Preloaded via the HTTP Strict Transport Security (HSTS) preloading process", finding.title) self.assertEqual("Info", finding.severity) @@ -37,7 +37,7 @@ def test_parse_file_cli_mozilla_org(self): findings = parser.get_findings(testfile, Test()) self.assertEqual(12, len(findings)) for finding in findings: - if "content-security-policy" == finding.vuln_id_from_tool: + if finding.vuln_id_from_tool == "content-security-policy": with self.subTest(vuln_id_from_tool=finding.vuln_id_from_tool): self.assertTrue(finding.active) self.assertEqual("Content Security Policy (CSP) implemented unsafely. This includes 'unsafe-inline' or data: inside script-src, overly broad sources such as https: inside object-src or script-src, or not restricting the sources for object-src or script-src.", finding.title) @@ -54,20 +54,20 @@ def test_parse_file_cli_demo(self): findings = parser.get_findings(testfile, Test()) self.assertEqual(12, len(findings)) for finding in findings: - if "content-security-policy" == finding.vuln_id_from_tool: + if finding.vuln_id_from_tool == "content-security-policy": with self.subTest(vuln_id_from_tool="content-security-policy"): self.assertTrue(finding.active) self.assertEqual("Content Security Policy (CSP) header not implemented", finding.title) self.assertEqual("Medium", finding.severity) self.assertIn("Content Security Policy (CSP) header not implemented", finding.description) self.assertEqual("content-security-policy", finding.vuln_id_from_tool) - elif "cookies" == finding.vuln_id_from_tool: + elif finding.vuln_id_from_tool == "cookies": with self.subTest(vuln_id_from_tool="cookies"): self.assertTrue(finding.active) self.assertEqual("Cookies set without using the Secure flag or set over HTTP", finding.title) self.assertEqual("Medium", finding.severity) self.assertIn("Cookies set without using the Secure flag or set over HTTP", finding.description) - elif "strict-transport-security" == finding.vuln_id_from_tool: + elif finding.vuln_id_from_tool == "strict-transport-security": with self.subTest(vuln_id_from_tool="strict-transport-security"): self.assertTrue(finding.active) self.assertEqual("HTTP Strict Transport Security (HSTS) header not implemented", finding.title) @@ -84,31 +84,31 @@ def test_parse_file_cli_juicy(self): findings = parser.get_findings(testfile, Test()) self.assertEqual(12, len(findings)) for finding in findings: - if "content-security-policy" == finding.vuln_id_from_tool: + if finding.vuln_id_from_tool == "content-security-policy": with self.subTest(vuln_id_from_tool=finding.vuln_id_from_tool): self.assertTrue(finding.active) self.assertEqual("Content Security Policy (CSP) header not implemented", finding.title) self.assertEqual("Medium", finding.severity) self.assertIn("Content Security Policy (CSP) header not implemented", finding.description) - elif "strict-transport-security" == finding.vuln_id_from_tool: + elif finding.vuln_id_from_tool == "strict-transport-security": with self.subTest(vuln_id_from_tool=finding.vuln_id_from_tool): self.assertTrue(finding.active) self.assertEqual("HTTP Strict Transport Security (HSTS) header not implemented", finding.title) self.assertEqual("Medium", finding.severity) self.assertIn("HTTP Strict Transport Security (HSTS) header not implemented", finding.description) - elif "x-xss-protection" == finding.vuln_id_from_tool: + elif finding.vuln_id_from_tool == "x-xss-protection": with self.subTest(vuln_id_from_tool=finding.vuln_id_from_tool): self.assertTrue(finding.active) self.assertEqual("X-XSS-Protection header not implemented", finding.title) self.assertEqual("Low", finding.severity) self.assertIn("X-XSS-Protection header not implemented", finding.description) - elif "subresource-integrity" == finding.vuln_id_from_tool: + elif finding.vuln_id_from_tool == "subresource-integrity": with self.subTest(vuln_id_from_tool=finding.vuln_id_from_tool): self.assertTrue(finding.active) self.assertEqual('Subresource Integrity (SRI) not implemented, and external scripts are loaded over HTTP or use protocol-relative URLs via src="//..."', finding.title) self.assertEqual("High", finding.severity) self.assertIn("Subresource Integrity (SRI) not implemented", finding.description) - elif "redirection" == finding.vuln_id_from_tool: + elif finding.vuln_id_from_tool == "redirection": with self.subTest(vuln_id_from_tool=finding.vuln_id_from_tool): self.assertTrue(finding.active) self.assertEqual("Does not redirect to an HTTPS site", finding.title) @@ -125,49 +125,49 @@ def test_parse_file_cli_nmap_scanme(self): findings = parser.get_findings(testfile, Test()) self.assertEqual(12, len(findings)) for finding in findings: - if "content-security-policy" == finding.vuln_id_from_tool: + if finding.vuln_id_from_tool == "content-security-policy": with self.subTest(vuln_id_from_tool=finding.vuln_id_from_tool): self.assertTrue(finding.active) self.assertEqual("Content Security Policy (CSP) header not implemented", finding.title) self.assertEqual("Medium", finding.severity) self.assertIn("Content Security Policy (CSP) header not implemented", finding.description) - elif "strict-transport-security" == finding.vuln_id_from_tool: + elif finding.vuln_id_from_tool == "strict-transport-security": with self.subTest(vuln_id_from_tool=finding.vuln_id_from_tool): self.assertTrue(finding.active) self.assertEqual("HTTP Strict Transport Security (HSTS) header cannot be set, as site contains an invalid certificate chain", finding.title) self.assertEqual("Medium", finding.severity) self.assertIn("HTTP Strict Transport Security (HSTS) header cannot be set, as site contains an invalid certificate chain", finding.description) - elif "x-xss-protection" == finding.vuln_id_from_tool: + elif finding.vuln_id_from_tool == "x-xss-protection": with self.subTest(vuln_id_from_tool=finding.vuln_id_from_tool): self.assertTrue(finding.active) self.assertEqual("X-XSS-Protection header not implemented", finding.title) self.assertEqual("Low", finding.severity) self.assertIn("X-XSS-Protection header not implemented", finding.description) - elif "x-frame-options" == finding.vuln_id_from_tool: + elif finding.vuln_id_from_tool == "x-frame-options": with self.subTest(vuln_id_from_tool=finding.vuln_id_from_tool): self.assertTrue(finding.active) self.assertEqual("X-Frame-Options (XFO) header not implemented", finding.title) self.assertEqual("Medium", finding.severity) self.assertIn("X-Frame-Options (XFO) header not implemented", finding.description) - elif "x-content-type-options" == finding.vuln_id_from_tool: + elif finding.vuln_id_from_tool == "x-content-type-options": with self.subTest(vuln_id_from_tool=finding.vuln_id_from_tool): self.assertTrue(finding.active) self.assertEqual("X-Content-Type-Options header not implemented", finding.title) self.assertEqual("Low", finding.severity) self.assertIn("X-Content-Type-Options header not implemented", finding.description) - elif "subresource-integrity" == finding.vuln_id_from_tool: + elif finding.vuln_id_from_tool == "subresource-integrity": with self.subTest(vuln_id_from_tool=finding.vuln_id_from_tool): self.assertTrue(finding.active) self.assertEqual('Subresource Integrity (SRI) not implemented, and external scripts are loaded over HTTP or use protocol-relative URLs via src="//..."', finding.title) self.assertEqual("High", finding.severity) self.assertIn("Subresource Integrity (SRI) not implemented", finding.description) - elif "redirection" == finding.vuln_id_from_tool: + elif finding.vuln_id_from_tool == "redirection": with self.subTest(vuln_id_from_tool=finding.vuln_id_from_tool): self.assertTrue(finding.active) self.assertEqual("Initial redirection from HTTP to HTTPS is to a different host, preventing HSTS", finding.title) self.assertEqual("Low", finding.severity) self.assertIn("Initial redirection from HTTP to HTTPS is to a different host, preventing HSTS", finding.description) - elif "referrer-policy-private" == finding.vuln_id_from_tool: + elif finding.vuln_id_from_tool == "referrer-policy-private": with self.subTest(vuln_id_from_tool=finding.vuln_id_from_tool): self.assertTrue(finding.active) self.assertEqual("Referrer-Policy header not implemented", finding.title) @@ -184,49 +184,49 @@ def test_parse_file_cli_nmap_scanme_no_name_attribute(self): findings = parser.get_findings(testfile, Test()) self.assertEqual(12, len(findings)) for finding in findings: - if "content-security-policy" == finding.vuln_id_from_tool: + if finding.vuln_id_from_tool == "content-security-policy": with self.subTest(vuln_id_from_tool=finding.vuln_id_from_tool): self.assertTrue(finding.active) self.assertEqual("Content Security Policy (CSP) header not implemented", finding.title) self.assertEqual("Medium", finding.severity) self.assertIn("Content Security Policy (CSP) header not implemented", finding.description) - elif "strict-transport-security" == finding.vuln_id_from_tool: + elif finding.vuln_id_from_tool == "strict-transport-security": with self.subTest(vuln_id_from_tool=finding.vuln_id_from_tool): self.assertTrue(finding.active) self.assertEqual("HTTP Strict Transport Security (HSTS) header cannot be set for sites not available over HTTPS", finding.title) self.assertEqual("Medium", finding.severity) self.assertIn("HTTP Strict Transport Security (HSTS) header cannot be set for sites not available over HTTPS", finding.description) - elif "x-xss-protection" == finding.vuln_id_from_tool: + elif finding.vuln_id_from_tool == "x-xss-protection": with self.subTest(vuln_id_from_tool=finding.vuln_id_from_tool): self.assertTrue(finding.active) self.assertEqual("X-XSS-Protection header not implemented", finding.title) self.assertEqual("Low", finding.severity) self.assertIn("X-XSS-Protection header not implemented", finding.description) - elif "x-frame-options" == finding.vuln_id_from_tool: + elif finding.vuln_id_from_tool == "x-frame-options": with self.subTest(vuln_id_from_tool=finding.vuln_id_from_tool): self.assertTrue(finding.active) self.assertEqual("X-Frame-Options (XFO) header not implemented", finding.title) self.assertEqual("Medium", finding.severity) self.assertIn("X-Frame-Options (XFO) header not implemented", finding.description) - elif "x-content-type-options" == finding.vuln_id_from_tool: + elif finding.vuln_id_from_tool == "x-content-type-options": with self.subTest(vuln_id_from_tool=finding.vuln_id_from_tool): self.assertTrue(finding.active) self.assertEqual("X-Content-Type-Options header not implemented", finding.title) self.assertEqual("Low", finding.severity) self.assertIn("X-Content-Type-Options header not implemented", finding.description) - elif "subresource-integrity" == finding.vuln_id_from_tool: + elif finding.vuln_id_from_tool == "subresource-integrity": with self.subTest(vuln_id_from_tool=finding.vuln_id_from_tool): self.assertFalse(finding.active) self.assertEqual("Subresource Integrity (SRI) not implemented, but all scripts are loaded from a similar origin", finding.title) self.assertEqual("Info", finding.severity) self.assertIn("Subresource Integrity (SRI) not implemented", finding.description) - elif "redirection" == finding.vuln_id_from_tool: + elif finding.vuln_id_from_tool == "redirection": with self.subTest(vuln_id_from_tool=finding.vuln_id_from_tool): self.assertTrue(finding.active) self.assertEqual("Does not redirect to an HTTPS site", finding.title) self.assertEqual("Medium", finding.severity) self.assertIn("Does not redirect to an HTTPS site", finding.description) - elif "referrer-policy-private" == finding.vuln_id_from_tool: + elif finding.vuln_id_from_tool == "referrer-policy-private": with self.subTest(vuln_id_from_tool=finding.vuln_id_from_tool): self.assertTrue(finding.active) self.assertEqual("Referrer-Policy header not implemented", finding.title) diff --git a/unittests/tools/test_nikto_parser.py b/unittests/tools/test_nikto_parser.py index ef24221b9ab..1c4da0b6a19 100644 --- a/unittests/tools/test_nikto_parser.py +++ b/unittests/tools/test_nikto_parser.py @@ -59,7 +59,7 @@ def test_parse_file_json_with_multiple_vuln_has_multiple_findings(self): endpoint.clean() self.assertEqual(11, len(findings)) for finding in findings: - if "OSVDB-3092" == finding.unique_id_from_tool: + if finding.unique_id_from_tool == "OSVDB-3092": self.assertEqual("001811", finding.vuln_id_from_tool) self.assertEqual(1, finding.nb_occurences) self.assertEqual("Medium", finding.severity) @@ -68,9 +68,9 @@ def test_parse_file_json_with_multiple_vuln_has_multiple_findings(self): self.assertEqual(443, endpoint.port) self.assertEqual("juice-shop.herokuapp.com", endpoint.host) self.assertEqual("public/", endpoint.path) - if ("Retrieved via header: 1.1 vegur" == finding.title and "Info" == finding.severity): + if (finding.title == "Retrieved via header: 1.1 vegur" and finding.severity == "Info"): self.assertEqual(1, len(finding.unsaved_endpoints)) - if ("Potentially Interesting Backup/Cert File Found. " == finding.title and "Info" == finding.severity): + if (finding.title == "Potentially Interesting Backup/Cert File Found. " and finding.severity == "Info"): self.assertEqual(140, len(finding.unsaved_endpoints)) def test_parse_file_json_with_uri_errors(self): @@ -82,7 +82,7 @@ def test_parse_file_json_with_uri_errors(self): endpoint.clean() self.assertEqual(13, len(findings)) for finding in findings: - if "favicon.ico file identifies this server as: Apache Tomcat" == finding.title: + if finding.title == "favicon.ico file identifies this server as: Apache Tomcat": self.assertEqual("500008", finding.vuln_id_from_tool) self.assertEqual(1, finding.nb_occurences) self.assertEqual("Medium", finding.severity) @@ -92,7 +92,7 @@ def test_parse_file_json_with_uri_errors(self): # self.assertEqual(443, endpoint.port) # self.assertEqual("juice-shop.herokuapp.com", endpoint.host) # self.assertEqual("public/", endpoint.path) - elif "/examples/servlets/index.html: Apache Tomcat default JSP pages present." == finding.title: + elif finding.title == "/examples/servlets/index.html: Apache Tomcat default JSP pages present.": self.assertEqual("000366", finding.vuln_id_from_tool) self.assertEqual(1, finding.nb_occurences) self.assertEqual("Info", finding.severity)