diff --git a/.github/workflows/k8s-tests.yml b/.github/workflows/k8s-tests.yml index dd34b88d76f..f5ec107d83f 100644 --- a/.github/workflows/k8s-tests.yml +++ b/.github/workflows/k8s-tests.yml @@ -29,6 +29,14 @@ env: --set mysql.enabled=false \ --set createPostgresqlSecret=true \ " + HELM_PGHA_DATABASE_SETTINGS: " \ + --set database=postgresqlha \ + --set postgresql.enabled=false \ + --set mysql.enabled=false \ + --set postgresqlha.enabled=true \ + --set createPostgresqlHaSecret=true \ + --set createPostgresqlHaPgpoolSecret=true \ + " jobs: setting_minikube_cluster: name: Kubernetes Deployment @@ -56,6 +64,10 @@ jobs: brokers: redis k8s: 'v1.23.9' os: debian + - databases: pgsqlha + brokers: rabbit + k8s: 'v1.23.9' + os: debian - databases: pgsql brokers: rabbit k8s: 'v1.23.9' diff --git a/Dockerfile.integration-tests-debian b/Dockerfile.integration-tests-debian index 545e4e3ef79..c7db1f1feef 100644 --- a/Dockerfile.integration-tests-debian +++ b/Dockerfile.integration-tests-debian @@ -1,7 +1,7 @@ # code: language=Dockerfile -FROM openapitools/openapi-generator-cli:v7.4.0@sha256:579832bed49ea6c275ce2fb5f2d515f5b03d2b6243f3c80fa8430e4f5a770e9a as openapitools +FROM openapitools/openapi-generator-cli:v7.3.0@sha256:74b9992692c836e42a02980db4b76bee94e17075e4487cd80f5c540dd57126b9 as openapitools FROM python:3.11.4-slim-bullseye@sha256:40319d0a897896e746edf877783ef39685d44e90e1e6de8d964d0382df0d4952 as build WORKDIR /app RUN \ diff --git a/components/yarn.lock b/components/yarn.lock index d3d65c363f5..ffe72a3aaf0 100644 --- a/components/yarn.lock +++ b/components/yarn.lock @@ -538,6 +538,10 @@ fast-levenshtein@~2.0.6: resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917" integrity sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw== +flot-axis@markrcote/flot-axislabels#*: + version "0.0.0" + resolved "https://codeload.github.com/markrcote/flot-axislabels/tar.gz/a181e09d04d120d05e5bc2baaa8738b5b3670428" + flot@flot/flot#~0.8.3: version "0.8.3" resolved "https://codeload.github.com/flot/flot/tar.gz/453b017cc5acfd75e252b93e8635f57f4196d45d" diff --git a/docs/content/en/integrations/parsers/file/checkmarx_one.md b/docs/content/en/integrations/parsers/file/checkmarx_one.md deleted file mode 100644 index 1d5a07f0ca2..00000000000 --- a/docs/content/en/integrations/parsers/file/checkmarx_one.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: "Checkmarx One Scan" -toc_hide: true ---- -Import JSON Checkmarx One scanner reports - -### Sample Scan Data -Sample Checkmarx One scans can be found [here](https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans/checkmarx_one). \ No newline at end of file diff --git a/docs/content/en/integrations/parsers/file/crunch42.md b/docs/content/en/integrations/parsers/file/crunch42.md deleted file mode 100644 index e8aa1b1e556..00000000000 --- a/docs/content/en/integrations/parsers/file/crunch42.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: "Crunch42 Scan" -toc_hide: true ---- -Import JSON findings from Crunch42 vulnerability scan tool. - -### Sample Scan Data -Sample Crunch42 Scan scans can be found [here](https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans/crunch42). \ No newline at end of file diff --git a/docs/content/en/integrations/parsers/file/npm_audit_7_plus.md b/docs/content/en/integrations/parsers/file/npm_audit_7_plus.md deleted file mode 100644 index a4b4a090b08..00000000000 --- a/docs/content/en/integrations/parsers/file/npm_audit_7_plus.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: "NPM Audit Version 7+" -toc_hide: true ---- - -**Note: This parser only supports import from NPM Audit v7 or newer.** - -Node Package Manager (NPM) Audit plugin output file can be imported in -JSON format. Only imports the \'vulnerabilities\' subtree. - -### File Types -This parser expects a JSON file. Can only import NPM Audit files from NPM Audit v7 or newer. It aims to provide the same -information as the non-JSON formatted output. - -Attempting to import a file from a version less than 7 of NPM Audit will raise an error message. - -### Command Used To Generate Output -Either of these commands will work: -- \`npm audit --json\` -- \`npm audit fix --dry-run --json\` - -### Sample Scan Data -Sample NPM Audit scans can be found [here](https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans/npm_audit_7_plus). - -### Link To Tool -See NPM-Audit-Report on GitHub: https://github.com/npm/npm-audit-report/ diff --git a/docs/content/en/integrations/parsers/file/pip_audit.md b/docs/content/en/integrations/parsers/file/pip_audit.md index 96b9b250d58..df24cdbe7a3 100644 --- a/docs/content/en/integrations/parsers/file/pip_audit.md +++ b/docs/content/en/integrations/parsers/file/pip_audit.md @@ -2,41 +2,7 @@ title: "pip-audit Scan" toc_hide: true --- - -Import pip-audit JSON scan report. - -### File Types -This parser expects a JSON file. - -The parser can handle legacy and current JSON format. - -The current format has added a `dependencies` element: - - { - "dependencies": [ - { - "name": "pyopenssl", - "version": "23.1.0", - "vulns": [] - }, - ... - ] - ... - } - -The legacy format does not include the `dependencies` key: - - [ - { - "name": "adal", - "version": "1.2.2", - "vulns": [] - }, - ... - ] +Import pip-audit JSON scan report ### Sample Scan Data -Sample pip-audit Scan scans can be found [here](https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans/pip_audit). - -### Link To Tool -[pip-audit](https://pypi.org/project/pip-audit/) +Sample pip-audit Scan scans can be found [here](https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans/pip_audit). \ No newline at end of file diff --git a/dojo/endpoint/views.py b/dojo/endpoint/views.py index 0f5b7676c75..c2b491eb1a1 100644 --- a/dojo/endpoint/views.py +++ b/dojo/endpoint/views.py @@ -33,6 +33,12 @@ def process_endpoints_view(request, host_view=False, vulnerable=False): if vulnerable: endpoints = Endpoint.objects.filter( + finding__active=True, + finding__verified=True, + finding__out_of_scope=False, + finding__mitigated__isnull=True, + finding__false_p=False, + finding__duplicate=False, status_endpoint__mitigated=False, status_endpoint__false_positive=False, status_endpoint__out_of_scope=False, @@ -118,12 +124,12 @@ def process_endpoint_view(request, eid, host_view=False): endpoints = endpoint.host_endpoints() endpoint_metadata = None all_findings = endpoint.host_findings() - active_findings = endpoint.host_active_findings() + active_verified_findings = endpoint.host_active_verified_findings() else: endpoints = None endpoint_metadata = dict(endpoint.endpoint_meta.values_list('name', 'value')) all_findings = endpoint.findings.all() - active_findings = endpoint.active_findings() + active_verified_findings = endpoint.active_verified_findings() if all_findings: start_date = timezone.make_aware(datetime.combine(all_findings.last().date, datetime.min.time())) @@ -142,8 +148,12 @@ def process_endpoint_view(request, eid, host_view=False): monthly_counts = get_period_counts(all_findings, closed_findings, None, months_between, start_date, relative_delta='months') - paged_findings = get_page_items(request, active_findings, 25) - vulnerable = active_findings.count() != 0 + paged_findings = get_page_items(request, active_verified_findings, 25) + + vulnerable = False + + if active_verified_findings.count() != 0: + vulnerable = True product_tab = Product_Tab(endpoint.product, "Host" if host_view else "Endpoint", tab="endpoints") return render(request, diff --git a/dojo/jira_link/helper.py b/dojo/jira_link/helper.py index 7b20a8cc10b..4f7360fc465 100644 --- a/dojo/jira_link/helper.py +++ b/dojo/jira_link/helper.py @@ -1,5 +1,4 @@ import logging -from typing import Any from dojo.utils import add_error_message_to_response, get_system_setting, to_str_typed import os import io @@ -696,13 +695,6 @@ def prepare_jira_issue_fields( def add_jira_issue(obj, *args, **kwargs): - def failure_to_add_message(message: str, exception: Exception, object: Any) -> bool: - if exception: - logger.exception(exception) - logger.error(message) - log_jira_alert(message, obj) - return False - logger.info('trying to create a new jira issue for %d:%s', obj.id, to_str_typed(obj)) if not is_jira_enabled(): @@ -710,7 +702,9 @@ def failure_to_add_message(message: str, exception: Exception, object: Any) -> b if not is_jira_configured_and_enabled(obj): message = 'Object %s cannot be pushed to JIRA as there is no JIRA configuration for %s.' % (obj.id, to_str_typed(obj)) - return failure_to_add_message(message, None, obj) + logger.error(message) + log_jira_alert(message, obj) + return False jira_project = get_jira_project(obj) jira_instance = get_jira_instance(obj) @@ -725,23 +719,19 @@ def failure_to_add_message(message: str, exception: Exception, object: Any) -> b logger.warning("The JIRA issue will NOT be created.") return False logger.debug('Trying to create a new JIRA issue for %s...', to_str_typed(obj)) - # Attempt to get the jira connection + meta = None try: JIRAError.log_to_tempfile = False jira = get_jira_connection(jira_instance) - except Exception as e: - message = f"The following jira instance could not be connected: {jira_instance} - {e.text}" - return failure_to_add_message(message, e, obj) - # Set the list of labels to set on the jira issue - labels = get_labels(obj) + get_tags(obj) - if labels: - labels = list(dict.fromkeys(labels)) # de-dup - # Determine what due date to set on the jira issue - duedate = None - if System_Settings.objects.get().enable_finding_sla: - duedate = obj.sla_deadline() - # Set the fields that will compose the jira issue - try: + + labels = get_labels(obj) + get_tags(obj) + if labels: + labels = list(dict.fromkeys(labels)) # de-dup + + duedate = None + if System_Settings.objects.get().enable_finding_sla: + duedate = obj.sla_deadline() + issuetype_fields = get_issuetype_fields(jira, jira_project.project_key, jira_instance.default_issue_type) fields = prepare_jira_issue_fields( project_key=jira_project.project_key, @@ -757,40 +747,16 @@ def failure_to_add_message(message: str, exception: Exception, object: Any) -> b duedate=duedate, issuetype_fields=issuetype_fields, default_assignee=jira_project.default_assignee) - except TemplateDoesNotExist as e: - message = f"Failed to find a jira issue template to be used - {e}" - return failure_to_add_message(message, e, obj) - except Exception as e: - message = f"Failed to fetch fields for {jira_instance.default_issue_type} under project {jira_project.project_key} - {e}" - return failure_to_add_message(message, e, obj) - # Create a new issue in Jira with the fields set in the last step - try: + logger.debug('sending fields to JIRA: %s', fields) new_issue = jira.create_issue(fields) - logger.debug('saving JIRA_Issue for %s finding %s', new_issue.key, obj.id) - j_issue = JIRA_Issue(jira_id=new_issue.id, jira_key=new_issue.key, jira_project=jira_project) - j_issue.set_obj(obj) - j_issue.jira_creation = timezone.now() - j_issue.jira_change = timezone.now() - j_issue.save() - jira.issue(new_issue.id) - logger.info('Created the following jira issue for %d:%s', obj.id, to_str_typed(obj)) - except Exception as e: - message = f"Failed to create jira issue with the following payload: {fields} - {e}" - return failure_to_add_message(message, e, obj) - # Attempt to set a default assignee - try: if jira_project.default_assignee: created_assignee = str(new_issue.get_field('assignee')) logger.debug("new issue created with assignee %s", created_assignee) if created_assignee != jira_project.default_assignee: jira.assign_issue(new_issue.key, jira_project.default_assignee) - except Exception as e: - message = f"Failed to assign the default user: {jira_project.default_assignee} - {e}" - # Do not return here as this should be a soft failure that should be logged - failure_to_add_message(message, e, obj) - # Upload dojo finding screenshots to Jira - try: + + # Upload dojo finding screenshots to Jira findings = [obj] if isinstance(obj, Finding_Group): findings = obj.findings.all() @@ -805,22 +771,7 @@ def failure_to_add_message(message: str, exception: Exception, object: Any) -> b settings.MEDIA_ROOT + '/' + pic) except FileNotFoundError as e: logger.info(e) - except Exception as e: - message = f"Failed to attach attachments to the jira issue: {e}" - # Do not return here as this should be a soft failure that should be logged - failure_to_add_message(message, e, obj) - # Add any notes that already exist in the finding to the JIRA - try: - for find in findings: - if find.notes.all(): - for note in find.notes.all().reverse(): - add_comment(obj, note) - except Exception as e: - message = f"Failed to add notes to the jira ticket: {e}" - # Do not return here as this should be a soft failure that should be logged - failure_to_add_message(message, e, obj) - # Determine whether to assign this new jira issue to a mapped epic - try: + if jira_project.enable_engagement_epic_mapping: eng = obj.test.engagement logger.debug('Adding to EPIC Map: %s', eng.name) @@ -829,11 +780,36 @@ def failure_to_add_message(message: str, exception: Exception, object: Any) -> b add_issues_to_epic(jira, obj, epic_id=epic.jira_id, issue_keys=[str(new_issue.id)], ignore_epics=True) else: logger.info('The following EPIC does not exist: %s', eng.name) - except Exception as e: - message = f"Failed to assign jira issue to existing epic: {e}" - return failure_to_add_message(message, e, obj) - return True + # only link the new issue if it was successfully created, incl attachments and epic link + logger.debug('saving JIRA_Issue for %s finding %s', new_issue.key, obj.id) + j_issue = JIRA_Issue( + jira_id=new_issue.id, jira_key=new_issue.key, jira_project=jira_project) + j_issue.set_obj(obj) + + j_issue.jira_creation = timezone.now() + j_issue.jira_change = timezone.now() + j_issue.save() + jira.issue(new_issue.id) + + logger.info('Created the following jira issue for %d:%s', obj.id, to_str_typed(obj)) + + # Add any notes that already exist in the finding to the JIRA + for find in findings: + if find.notes.all(): + for note in find.notes.all().reverse(): + add_comment(obj, note) + + return True + except TemplateDoesNotExist as e: + logger.exception(e) + log_jira_alert(str(e), obj) + return False + except JIRAError as e: + logger.exception(e) + logger.error("jira_meta for project: %s and url: %s meta: %s", jira_project.project_key, jira_project.jira_instance.url, json.dumps(meta, indent=4)) # this is None safe + log_jira_alert(e.text, obj) + return False # we need two separate celery tasks due to the decorators we're using to map to/from ids @@ -855,13 +831,6 @@ def update_jira_issue_for_finding_group(finding_group, *args, **kwargs): def update_jira_issue(obj, *args, **kwargs): - def failure_to_update_message(message: str, exception: Exception, obj: Any) -> bool: - if exception: - logger.exception(exception) - logger.error(message) - log_jira_alert(message, obj) - return False - logger.debug('trying to update a linked jira issue for %d:%s', obj.id, to_str_typed(obj)) if not is_jira_enabled(): @@ -872,22 +841,21 @@ def failure_to_update_message(message: str, exception: Exception, obj: Any) -> b if not is_jira_configured_and_enabled(obj): message = 'Object %s cannot be pushed to JIRA as there is no JIRA configuration for %s.' % (obj.id, to_str_typed(obj)) - return failure_to_update_message(message, None, obj) + logger.error(message) + log_jira_alert(message, obj) + return False j_issue = obj.jira_issue + meta = None try: JIRAError.log_to_tempfile = False jira = get_jira_connection(jira_instance) issue = jira.issue(j_issue.jira_id) - except Exception as e: - message = f"The following jira instance could not be connected: {jira_instance} - {e}" - return failure_to_update_message(message, e, obj) - # Set the list of labels to set on the jira issue - labels = get_labels(obj) + get_tags(obj) - if labels: - labels = list(dict.fromkeys(labels)) # de-dup - # Set the fields that will compose the jira issue - try: + + labels = get_labels(obj) + get_tags(obj) + if labels: + labels = list(dict.fromkeys(labels)) # de-dup + issuetype_fields = get_issuetype_fields(jira, jira_project.project_key, jira_instance.default_issue_type) fields = prepare_jira_issue_fields( project_key=jira_project.project_key, @@ -900,38 +868,26 @@ def failure_to_update_message(message: str, exception: Exception, obj: Any) -> b # Do not update the priority in jira after creation as this could have changed in jira, but should not change in dojo # priority_name=jira_priority(obj), issuetype_fields=issuetype_fields) - except Exception as e: - message = f"Failed to fetch fields for {jira_instance.default_issue_type} under project {jira_project.project_key} - {e}" - return failure_to_update_message(message, e, obj) - # Update the issue in jira - try: + logger.debug('sending fields to JIRA: %s', fields) + issue.update( summary=fields['summary'], description=fields['description'], # Do not update the priority in jira after creation as this could have changed in jira, but should not change in dojo # priority=fields['priority'], fields=fields) - j_issue.jira_change = timezone.now() - j_issue.save() - except Exception as e: - message = f"Failed to update the jira issue with the following payload: {fields} - {e}" - return failure_to_update_message(message, e, obj) - # Update the status in jira - try: + push_status_to_jira(obj, jira_instance, jira, issue) - except Exception as e: - message = f"Failed to update the jira issue status - {e}" - return failure_to_update_message(message, e, obj) - # Upload dojo finding screenshots to Jira - try: + + # Upload dojo finding screenshots to Jira findings = [obj] if isinstance(obj, Finding_Group): findings = obj.findings.all() for find in findings: for pic in get_file_images(find): - # It doesn't look like the celery container has anything in the media + # It doesn't look like the celery cotainer has anything in the media # folder. Has this feature ever worked? try: jira_attachment( @@ -939,12 +895,7 @@ def failure_to_update_message(message: str, exception: Exception, obj: Any) -> b settings.MEDIA_ROOT + '/' + pic) except FileNotFoundError as e: logger.info(e) - except Exception as e: - message = f"Failed to attach attachments to the jira issue: {e}" - # Do not return here as this should be a soft failure that should be logged - failure_to_update_message(message, e, obj) - # Determine whether to assign this new jira issue to a mapped epic - try: + if jira_project.enable_engagement_epic_mapping: eng = find.test.engagement logger.debug('Adding to EPIC Map: %s', eng.name) @@ -953,11 +904,20 @@ def failure_to_update_message(message: str, exception: Exception, obj: Any) -> b add_issues_to_epic(jira, obj, epic_id=epic.jira_id, issue_keys=[str(j_issue.jira_id)], ignore_epics=True) else: logger.info('The following EPIC does not exist: %s', eng.name) - except Exception as e: - message = f"Failed to assign jira issue to existing epic: {e}" - return failure_to_update_message(message, e, obj) - return True + j_issue.jira_change = timezone.now() + j_issue.save() + + logger.debug('Updated the following linked jira issue for %d:%s', find.id, find.title) + return True + + except JIRAError as e: + logger.exception(e) + logger.error("jira_meta for project: %s and url: %s meta: %s", jira_project.project_key, jira_project.jira_instance.url, json.dumps(meta, indent=4)) # this is None safe + if issue_from_jira_is_active(issue): + # Only alert if the upstream JIRA is active, we don't care about closed issues + log_jira_alert(e.text, obj) + return False def get_jira_issue_from_jira(find): diff --git a/dojo/locale/en/LC_MESSAGES/django.po b/dojo/locale/en/LC_MESSAGES/django.po index 92e365e334b..ab26c8cbdb4 100644 --- a/dojo/locale/en/LC_MESSAGES/django.po +++ b/dojo/locale/en/LC_MESSAGES/django.po @@ -3748,7 +3748,7 @@ msgid "" "tags, references, languages or technologies contain the search query and " "products whose\n" " name, tags or description contain the " -"search query.
Advanced search operators: (Restrict results to a certain " +"search query.
Advanced search operators: (Restrict results to a certain " "type) product:,\n" " engagement:, finding:, endpoint:, tag:, " "language:, technology: or vulnerability_id:.\n" diff --git a/dojo/models.py b/dojo/models.py index 36a7d2e5200..362ec399b69 100755 --- a/dojo/models.py +++ b/dojo/models.py @@ -1124,7 +1124,7 @@ def endpoint_count(self): endpoints = getattr(self, 'active_endpoints', None) if endpoints: return len(self.active_endpoints) - return 0 + return None def open_findings(self, start_date=None, end_date=None): if start_date is None or end_date is None: diff --git a/dojo/product/views.py b/dojo/product/views.py index 6291540342e..ee7c3b35e80 100755 --- a/dojo/product/views.py +++ b/dojo/product/views.py @@ -112,11 +112,8 @@ def prefetch_for_product(prods): prefetched_prods = prefetched_prods.prefetch_related('members') prefetched_prods = prefetched_prods.prefetch_related('prod_type__members') active_endpoint_query = Endpoint.objects.filter( - status_endpoint__mitigated=False, - status_endpoint__false_positive=False, - status_endpoint__out_of_scope=False, - status_endpoint__risk_accepted=False, - ).distinct() + finding__active=True, + finding__mitigated__isnull=True).distinct() prefetched_prods = prefetched_prods.prefetch_related( Prefetch('endpoint_set', queryset=active_endpoint_query, to_attr='active_endpoints')) prefetched_prods = prefetched_prods.prefetch_related('tags') @@ -326,15 +323,15 @@ def finding_querys(request, prod): end_date = timezone.now() week = end_date - timedelta(days=7) # seven days and /newer are considered "new" - filters['accepted'] = findings_qs.filter(finding_helper.ACCEPTED_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date") + filters['accepted'] = findings_qs.filter(finding_helper.ACCEPTED_FINDINGS_QUERY).filter(date__range=[start_date, end_date]) filters['verified'] = findings_qs.filter(finding_helper.VERIFIED_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date") filters['new_verified'] = findings_qs.filter(finding_helper.VERIFIED_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date") - filters['open'] = findings_qs.filter(finding_helper.OPEN_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date") - filters['inactive'] = findings_qs.filter(finding_helper.INACTIVE_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date") - filters['closed'] = findings_qs.filter(finding_helper.CLOSED_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date") - filters['false_positive'] = findings_qs.filter(finding_helper.FALSE_POSITIVE_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date") - filters['out_of_scope'] = findings_qs.filter(finding_helper.OUT_OF_SCOPE_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date") - filters['all'] = findings_qs.order_by("date") + filters['open'] = findings_qs.filter(finding_helper.OPEN_FINDINGS_QUERY).filter(date__range=[start_date, end_date]) + filters['inactive'] = findings_qs.filter(finding_helper.INACTIVE_FINDINGS_QUERY).filter(date__range=[start_date, end_date]) + filters['closed'] = findings_qs.filter(finding_helper.CLOSED_FINDINGS_QUERY).filter(date__range=[start_date, end_date]) + filters['false_positive'] = findings_qs.filter(finding_helper.FALSE_POSITIVE_FINDINGS_QUERY).filter(date__range=[start_date, end_date]) + filters['out_of_scope'] = findings_qs.filter(finding_helper.OUT_OF_SCOPE_FINDINGS_QUERY).filter(date__range=[start_date, end_date]) + filters['all'] = findings_qs filters['open_vulns'] = findings_qs.filter(finding_helper.OPEN_FINDINGS_QUERY).filter( cwe__isnull=False, ).order_by('cwe').values( @@ -479,7 +476,6 @@ def view_product_metrics(request, pid): add_breadcrumb(parent=prod, top_level=False, request=request) - # An ordered dict does not make sense here. open_close_weekly = OrderedDict() severity_weekly = OrderedDict() critical_weekly = OrderedDict() @@ -487,83 +483,81 @@ def view_product_metrics(request, pid): medium_weekly = OrderedDict() open_objs_by_severity = get_zero_severity_level() - closed_objs_by_severity = get_zero_severity_level() accepted_objs_by_severity = get_zero_severity_level() - for finding in filters.get("all", []): - iso_cal = finding.date.isocalendar() - date = iso_to_gregorian(iso_cal[0], iso_cal[1], 1) - html_date = date.strftime("%m/%d
%Y
") - unix_timestamp = (tcalendar.timegm(date.timetuple()) * 1000) - - # Open findings - if finding in filters.get("open", []): - if unix_timestamp not in critical_weekly: - critical_weekly[unix_timestamp] = {'count': 0, 'week': html_date} - if unix_timestamp not in high_weekly: - high_weekly[unix_timestamp] = {'count': 0, 'week': html_date} - if unix_timestamp not in medium_weekly: - medium_weekly[unix_timestamp] = {'count': 0, 'week': html_date} - - if unix_timestamp in open_close_weekly: - open_close_weekly[unix_timestamp]['open'] += 1 + for v in filters.get('open', None): + iso_cal = v.date.isocalendar() + x = iso_to_gregorian(iso_cal[0], iso_cal[1], 1) + y = x.strftime("%m/%d
%Y
") + x = (tcalendar.timegm(x.timetuple()) * 1000) + if x not in critical_weekly: + critical_weekly[x] = {'count': 0, 'week': y} + if x not in high_weekly: + high_weekly[x] = {'count': 0, 'week': y} + if x not in medium_weekly: + medium_weekly[x] = {'count': 0, 'week': y} + + if x in open_close_weekly: + if v.mitigated: + open_close_weekly[x]['closed'] += 1 else: - open_close_weekly[unix_timestamp] = {'closed': 0, 'open': 1, 'accepted': 0} - open_close_weekly[unix_timestamp]['week'] = html_date + open_close_weekly[x]['open'] += 1 + else: + if v.mitigated: + open_close_weekly[x] = {'closed': 1, 'open': 0, 'accepted': 0} + else: + open_close_weekly[x] = {'closed': 0, 'open': 1, 'accepted': 0} + open_close_weekly[x]['week'] = y - if view == 'Finding': - severity = finding.severity - elif view == 'Endpoint': - severity = finding.finding.severity + if view == 'Finding': + severity = v.severity + elif view == 'Endpoint': + severity = v.finding.severity - if unix_timestamp in severity_weekly: - if severity in severity_weekly[unix_timestamp]: - severity_weekly[unix_timestamp][severity] += 1 - else: - severity_weekly[unix_timestamp][severity] = 1 + if x in severity_weekly: + if severity in severity_weekly[x]: + severity_weekly[x][severity] += 1 else: - severity_weekly[unix_timestamp] = get_zero_severity_level() - severity_weekly[unix_timestamp][severity] = 1 - severity_weekly[unix_timestamp]['week'] = html_date + severity_weekly[x][severity] = 1 + else: + severity_weekly[x] = get_zero_severity_level() + severity_weekly[x][severity] = 1 + severity_weekly[x]['week'] = y - if severity == 'Critical': - if unix_timestamp in critical_weekly: - critical_weekly[unix_timestamp]['count'] += 1 - else: - critical_weekly[unix_timestamp] = {'count': 1, 'week': html_date} - elif severity == 'High': - if unix_timestamp in high_weekly: - high_weekly[unix_timestamp]['count'] += 1 - else: - high_weekly[unix_timestamp] = {'count': 1, 'week': html_date} - elif severity == 'Medium': - if unix_timestamp in medium_weekly: - medium_weekly[unix_timestamp]['count'] += 1 - else: - medium_weekly[unix_timestamp] = {'count': 1, 'week': html_date} - # Optimization: count severity level on server side - if open_objs_by_severity.get(finding.severity) is not None: - open_objs_by_severity[finding.severity] += 1 - # Close findings - if finding in filters.get("closed", []): - if unix_timestamp in open_close_weekly: - open_close_weekly[unix_timestamp]['closed'] += 1 + if severity == 'Critical': + if x in critical_weekly: + critical_weekly[x]['count'] += 1 else: - open_close_weekly[unix_timestamp] = {'closed': 1, 'open': 0, 'accepted': 0} - open_close_weekly[unix_timestamp]['week'] = html_date - # Optimization: count severity level on server side - if closed_objs_by_severity.get(finding.severity) is not None: - closed_objs_by_severity[finding.severity] += 1 - # Risk Accepted findings - if finding in filters.get("accepted", []): - if unix_timestamp in open_close_weekly: - open_close_weekly[unix_timestamp]['accepted'] += 1 + critical_weekly[x] = {'count': 1, 'week': y} + elif severity == 'High': + if x in high_weekly: + high_weekly[x]['count'] += 1 else: - open_close_weekly[unix_timestamp] = {'closed': 0, 'open': 0, 'accepted': 1} - open_close_weekly[unix_timestamp]['week'] = html_date - # Optimization: count severity level on server side - if accepted_objs_by_severity.get(finding.severity) is not None: - accepted_objs_by_severity[finding.severity] += 1 + high_weekly[x] = {'count': 1, 'week': y} + elif severity == 'Medium': + if x in medium_weekly: + medium_weekly[x]['count'] += 1 + else: + medium_weekly[x] = {'count': 1, 'week': y} + + # Optimization: count severity level on server side + if open_objs_by_severity.get(v.severity) is not None: + open_objs_by_severity[v.severity] += 1 + + for a in filters.get('accepted', None): + iso_cal = a.date.isocalendar() + x = iso_to_gregorian(iso_cal[0], iso_cal[1], 1) + y = x.strftime("%m/%d
%Y
") + x = (tcalendar.timegm(x.timetuple()) * 1000) + + if x in open_close_weekly: + open_close_weekly[x]['accepted'] += 1 + else: + open_close_weekly[x] = {'closed': 0, 'open': 0, 'accepted': 1} + open_close_weekly[x]['week'] = y + + if accepted_objs_by_severity.get(a.severity) is not None: + accepted_objs_by_severity[a.severity] += 1 test_data = {} for t in tests: @@ -590,7 +584,7 @@ def view_product_metrics(request, pid): 'inactive_objs': filters.get('inactive', None), 'inactive_objs_by_severity': sum_by_severity_level(filters.get('inactive')), 'closed_objs': filters.get('closed', None), - 'closed_objs_by_severity': closed_objs_by_severity, + 'closed_objs_by_severity': sum_by_severity_level(filters.get('closed')), 'false_positive_objs': filters.get('false_positive', None), 'false_positive_objs_by_severity': sum_by_severity_level(filters.get('false_positive')), 'out_of_scope_objs': filters.get('out_of_scope', None), diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index 54e83542eba..c2d85ec3975 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -1187,7 +1187,6 @@ def saml2_attrib_map_format(dict): 'Nexpose Scan': ['title', 'severity', 'vulnerability_ids', 'cwe'], # possible improvement: in the scanner put the library name into file_path, then dedup on cwe + file_path + severity 'NPM Audit Scan': ['title', 'severity', 'file_path', 'vulnerability_ids', 'cwe'], - 'NPM Audit v7+ Scan': ['title', 'severity', 'cwe', 'vuln_id_from_tool'], # possible improvement: in the scanner put the library name into file_path, then dedup on cwe + file_path + severity 'Yarn Audit Scan': ['title', 'severity', 'file_path', 'vulnerability_ids', 'cwe'], # possible improvement: in the scanner put the library name into file_path, then dedup on vulnerability_ids + file_path + severity @@ -1281,7 +1280,6 @@ def saml2_attrib_map_format(dict): 'Tenable Scan': True, 'Nexpose Scan': True, 'NPM Audit Scan': True, - 'NPM Audit v7+ Scan': True, 'Yarn Audit Scan': True, 'Mend Scan': True, 'ZAP Scan': False, @@ -1364,12 +1362,10 @@ def saml2_attrib_map_format(dict): 'CargoAudit Scan': DEDUPE_ALGO_HASH_CODE, 'Checkmarx Scan detailed': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, 'Checkmarx Scan': DEDUPE_ALGO_HASH_CODE, - 'Checkmarx One Scan': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, 'Checkmarx OSA': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL_OR_HASH_CODE, 'Codechecker Report native': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, 'Coverity API': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, 'Cobalt.io API': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, - 'Crunch42 Scan': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, 'Dependency Track Finding Packaging Format (FPF) Export': DEDUPE_ALGO_HASH_CODE, 'Mobsfscan Scan': DEDUPE_ALGO_HASH_CODE, 'SonarQube Scan detailed': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, @@ -1381,7 +1377,6 @@ def saml2_attrib_map_format(dict): 'Tenable Scan': DEDUPE_ALGO_HASH_CODE, 'Nexpose Scan': DEDUPE_ALGO_HASH_CODE, 'NPM Audit Scan': DEDUPE_ALGO_HASH_CODE, - 'NPM Audit v7+ Scan': DEDUPE_ALGO_HASH_CODE, 'Yarn Audit Scan': DEDUPE_ALGO_HASH_CODE, 'Mend Scan': DEDUPE_ALGO_HASH_CODE, 'ZAP Scan': DEDUPE_ALGO_HASH_CODE, diff --git a/dojo/static/dojo/js/metrics.js b/dojo/static/dojo/js/metrics.js index 2e95555d379..392ad2ac6f8 100644 --- a/dojo/static/dojo/js/metrics.js +++ b/dojo/static/dojo/js/metrics.js @@ -1618,6 +1618,8 @@ function open_close_weekly(opened, closed, accepted, ticks) { var options = { xaxes: [{ ticks: ticks, + transform: function(v) { return -v; }, + inverseTransform: function(v) { return -v; } }], yaxes: [{ min: 0 @@ -1659,6 +1661,8 @@ function severity_weekly(critical, high, medium, low, info, ticks) { var options = { xaxes: [{ ticks: ticks, + transform: function(v) { return -v; }, + inverseTransform: function(v) { return -v; } }], yaxes: [{ min: 0 @@ -1709,6 +1713,8 @@ function severity_counts_weekly(critical, high, medium, ticks) { var options = { xaxes: [{ ticks: ticks, + transform: function(v) { return -v; }, + inverseTransform: function(v) { return -v; } }], yaxes: [{ min: 0 diff --git a/dojo/templates/base.html b/dojo/templates/base.html index 2f1cace966c..f4043d42e3c 100644 --- a/dojo/templates/base.html +++ b/dojo/templates/base.html @@ -765,8 +765,10 @@

diff --git a/dojo/templates/dojo/endpoints.html b/dojo/templates/dojo/endpoints.html index 6597e1f7474..ecaaef6d528 100644 --- a/dojo/templates/dojo/endpoints.html +++ b/dojo/templates/dojo/endpoints.html @@ -87,7 +87,7 @@

{% comment %} The display field is translated in the function. No need to translate here as well{% endcomment %} {% dojo_sort request 'Product' 'product' 'asc' %} {% endif %} - Active (Verified) Findings + Active Verified Findings Status @@ -117,10 +117,13 @@

{% endif %} {% if host_view %} - {{ e.host_active_findings_count }} ({{ e.host_active_verified_findings_count }}) + {{ e.host_active_verified_findings_count }} {% else %} - {{ e.active_findings_count }} - ({{ e.active_verified_findings_count }}) + {% if e.active_verified_findings_count > 0 %} + {{ e.active_verified_findings_count }} + {% else %} + 0 + {% endif %} {% endif %} @@ -130,10 +133,10 @@

{% if e.mitigated %} Mitigated {% else %} - {% if e.active_findings_count > 0 %} + {% if e.active_verified_findings_count > 0 %} Vulnerable {% else %} - No active findings + No active verified findings {% endif %} {% endif %} {% endif %} diff --git a/dojo/templates/dojo/product.html b/dojo/templates/dojo/product.html index d022812de8e..e328557c87d 100644 --- a/dojo/templates/dojo/product.html +++ b/dojo/templates/dojo/product.html @@ -248,8 +248,12 @@

{% endif %} + {% if prod.endpoint_count %} {{ prod.endpoint_host_count }} / {{ prod.endpoint_count }} + {% else %} + 0 + {% endif %} {% if prod.product_manager %} diff --git a/dojo/templates/dojo/view_endpoint.html b/dojo/templates/dojo/view_endpoint.html index d09261e5ecd..30d974b8a67 100644 --- a/dojo/templates/dojo/view_endpoint.html +++ b/dojo/templates/dojo/view_endpoint.html @@ -103,7 +103,7 @@

  - Finding Age ({{ all_findings|length|apnumber }} + Finding Age ({{ all_findings|length|apnumber }} verified finding{{ all_findings|length|pluralize }})
@@ -178,9 +178,9 @@

{% if item %} {% if item.vulnerable %} - + {% else %} - + {% endif %}  {{ item|url_shortner }}{% if endpoint.is_broken %} 🚩{% endif %} {% endif %} @@ -248,7 +248,7 @@

Additional Information
-

Open Findings

+

Active Verified Findings

{% if findings %}
diff --git a/dojo/tools/checkmarx/parser.py b/dojo/tools/checkmarx/parser.py index 4f1f07d7256..d8be5b8b680 100755 --- a/dojo/tools/checkmarx/parser.py +++ b/dojo/tools/checkmarx/parser.py @@ -58,7 +58,7 @@ def _get_findings_xml(self, filename, test): language = "" findingdetail = "" group = "" - find_date = parser.parse(root.get("ScanStart")).date() + find_date = parser.parse(root.get("ScanStart")) if query.get("Language") is not None: language = query.get("Language") @@ -389,9 +389,9 @@ def get_findings(self, file, test): def _parse_date(self, value): if isinstance(value, str): - return parser.parse(value).date() + return parser.parse(value) elif isinstance(value, dict) and isinstance(value.get("seconds"), int): - return datetime.datetime.utcfromtimestamp(value.get("seconds")).date() + return datetime.datetime.utcfromtimestamp(value.get("seconds")) else: return None diff --git a/dojo/tools/checkmarx_one/__init__.py b/dojo/tools/checkmarx_one/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/dojo/tools/checkmarx_one/parser.py b/dojo/tools/checkmarx_one/parser.py deleted file mode 100644 index 699ac64e42a..00000000000 --- a/dojo/tools/checkmarx_one/parser.py +++ /dev/null @@ -1,110 +0,0 @@ -import datetime -import json -from dateutil import parser -from dojo.models import Finding - - -class CheckmarxOneParser(object): - def get_scan_types(self): - return ["Checkmarx One Scan"] - - def get_label_for_scan_types(self, scan_type): - return scan_type - - def get_description_for_scan_types(self, scan_type): - return "Checkmarx One Scan" - - def _parse_date(self, value): - if isinstance(value, str): - return parser.parse(value) - elif isinstance(value, dict) and isinstance(value.get("seconds"), int): - return datetime.datetime.utcfromtimestamp(value.get("seconds")) - else: - return None - - def get_findings(self, file, test): - data = json.load(file) - findings = [] - if "vulnerabilities" in data: - results = data.get("vulnerabilities", []) - for result in results: - id = result.get("identifiers")[0].get("value") - cwe = None - if 'vulnerabilityDetails' in result: - cwe = result.get("vulnerabilites").get("cweId") - severity = result.get("severity") - locations_uri = result.get("location").get("file") - locations_startLine = result.get("location").get("start_line") - locations_endLine = result.get("location").get("end_line") - finding = Finding( - unique_id_from_tool=id, - file_path=locations_uri, - title=id + "_" + locations_uri, - test=test, - cwe=cwe, - severity=severity, - description="**id**: " + str(id) + "\n" - + "**uri**: " + locations_uri + "\n" - + "**startLine**: " + str(locations_startLine) + "\n" - + "**endLine**: " + str(locations_endLine) + "\n", - false_p=False, - duplicate=False, - out_of_scope=False, - static_finding=True, - dynamic_finding=False, - ) - findings.append(finding) - elif "results" in data: - results = data.get("results", []) - for vulnerability in results: - result_type = vulnerability.get("type") - date = self._parse_date(vulnerability.get("firstFoundAt")) - cwe = None - if 'vulnerabilityDetails' in vulnerability: - cwe = vulnerability.get("vulnerabilites", {}).get("cweId") - if result_type == "sast": - descriptionDetails = vulnerability.get("description") - file_path = vulnerability.get("data").get("nodes")[0].get("fileName") - finding = Finding( - description=descriptionDetails, - title=descriptionDetails, - file_path=file_path, - date=date, - cwe=cwe, - severity=vulnerability.get("severity").title(), - test=test, - static_finding=True, - ) - if vulnerability.get("id"): - finding.unique_id_from_tool = ( - vulnerability.get("id") - ) - else: - finding.unique_id_from_tool = str( - vulnerability.get("similarityId") - ) - findings.append(finding) - if result_type == "kics": - description = vulnerability.get("description") - file_path = vulnerability.get("data").get("filename") - finding = Finding( - title=f'{description}', - description=description, - date=date, - cwe=cwe, - severity=vulnerability.get("severity").title(), - verified=vulnerability.get("state") != "TO_VERIFY", - file_path=file_path, - test=test, - static_finding=True, - ) - if vulnerability.get("id"): - finding.unique_id_from_tool = vulnerability.get( - "id" - ) - else: - finding.unique_id_from_tool = str( - vulnerability.get("similarityId") - ) - findings.append(finding) - return findings diff --git a/dojo/tools/crunch42/__init__.py b/dojo/tools/crunch42/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/dojo/tools/crunch42/parser.py b/dojo/tools/crunch42/parser.py deleted file mode 100644 index e1a841e29a3..00000000000 --- a/dojo/tools/crunch42/parser.py +++ /dev/null @@ -1,88 +0,0 @@ -import json -from dojo.models import Finding - - -class Crunch42Parser(object): - - def get_scan_types(self): - return ["Crunch42 Scan"] - - def get_label_for_scan_types(self, scan_type): - return "Crunch42 Scan" - - def get_description_for_scan_types(self, scan_type): - return "Import JSON output of Crunch42 scan report." - - def parse_json(self, json_output): - try: - data = json_output.read() - try: - tree = json.loads(str(data, "utf-8")) - except Exception: - tree = json.loads(data) - except Exception: - raise ValueError("Invalid format") - - return tree - - def process_tree(self, tree, test): - return list(self.get_items(tree, test)) if tree else [] - - def get_findings(self, filename, test): - reportTree = self.parse_json(filename) - - if isinstance(reportTree, list): - temp = [] - for moduleTree in reportTree: - temp += self.process_tree(moduleTree, test) - return temp - else: - return self.process_tree(reportTree, test) - - def get_items(self, tree, test): - items = {} - iterator = 0 - if "report" in tree and tree["report"].get("security"): - results = tree["report"].get("security").get("issues") - for key, node in results.items(): - for issue in node["issues"]: - item = self.get_item( - issue, key, test - ) - items[iterator] = item - iterator += 1 - return list(items.values()) - - def get_item(self, issue, title, test): - fingerprint = issue["fingerprint"] - pointer = issue["pointer"] - message = issue["specificDescription"] if 'specificDescription' in issue else title - score = issue["score"] - criticality = issue["criticality"] - if criticality == 1: - severity = "Info" - elif criticality == 2: - severity = "Low" - elif criticality == 3: - severity = "Medium" - elif criticality <= 4: - severity = "High" - else: - severity = "Critical" - # create the finding object - finding = Finding( - unique_id_from_tool=fingerprint, - title=title, - test=test, - severity=severity, - description="**fingerprint**: " + str(fingerprint) + "\n" - + "**pointer**: " + str(pointer) + "\n" - + "**message**: " + str(message) + "\n" - + "**score**: " + str(score) + "\n", - false_p=False, - duplicate=False, - out_of_scope=False, - static_finding=True, - dynamic_finding=False, - ) - return finding diff --git a/dojo/tools/npm_audit_7_plus/__init__.py b/dojo/tools/npm_audit_7_plus/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/dojo/tools/npm_audit_7_plus/parser.py b/dojo/tools/npm_audit_7_plus/parser.py deleted file mode 100644 index c72c01cfad4..00000000000 --- a/dojo/tools/npm_audit_7_plus/parser.py +++ /dev/null @@ -1,225 +0,0 @@ -"""Parser for NPM Audit v7+ Scan.""" -import json -import logging -from dojo.models import Finding - -logger = logging.getLogger(__name__) - -''' -the npm audit json output depends on the params used. this parser -accepts the formats for any of: - -npm audit --json -npm audit fix --dry-run --json -npm audit --dry-run --json - -In order for this parser to import the same number of findings -as the report's meta block indicates, all top level keys -are consiered a vulnerability and as much information as provided -is added to each -''' - - -class NpmAudit7PlusParser(object): - """Represents the parser class.""" - - def get_scan_types(self): - """Return the scan type.""" - return ["NPM Audit v7+ Scan"] - - def get_label_for_scan_types(self, scan_type): - """Return the scan label.""" - return scan_type # no custom label for now - - def get_description_for_scan_types(self, scan_type): - """Return the scan description.""" - return "NPM Audit Scan json output from v7 and above." - - def get_findings(self, json_output, test): - """Return the findings gathered from file upload.""" - tree = self.parse_json(json_output) - return self.get_items(tree, test) - - def parse_json(self, json_output): - """Parse the json format to get findings.""" - if json_output is None: - return - try: - data = json_output.read() - try: - tree = json.loads(str(data, "utf-8")) - except Exception: - tree = json.loads(data) - except Exception: - raise ValueError("Invalid format, unable to parse json.") - - # output from npm audit fix --dry-run --json - if tree.get("audit"): - if not tree.get("audit").get("auditReportVersion"): - raise ValueError( - ("This parser only supports output from npm audit version" - " 7 and above.") - ) - subtree = tree.get("audit").get("vulnerabilities") - # output from npm audit --dry-run --json - # or - # output from npm audit --json - else: - if not tree.get("auditReportVersion"): - raise ValueError( - ("This parser only supports output from npm audit version" - " 7 and above.") - ) - subtree = tree.get("vulnerabilities") - - return subtree - - def get_items(self, tree, test): - """Return the individual items found in report.""" - items = {} - - for key, node in tree.items(): - item = get_item(node, tree, test) - unique_key = item.title + item.severity - items[unique_key] = item - - return list(items.values()) - - -def get_item(item_node, tree, test): - """Return the individual Findigns from items found in report.""" - references = [] - mitigation = "" - test = test - static_finding = True - title = "" - unique_id_from_tool = "" - cvssv3 = "" - cwe = "" - - if item_node["severity"] == "low": - severity = "Low" - elif item_node["severity"] == "moderate": - severity = "Medium" - elif item_node["severity"] == "high": - severity = "High" - elif item_node["severity"] == "critical": - severity = "Critical" - else: - severity = "Info" - - if item_node["via"] and isinstance(item_node["via"][0], str): - # this is a top level key (a vulnerability) - title = item_node["name"] - cwe = "CWE-1035" # default - component_name = title - - elif item_node["via"] and isinstance(item_node["via"][0], dict): - title = item_node["via"][0]["title"] - component_name = item_node["nodes"][0] - cwe = item_node["via"][0]["cwe"][0] - references.append(item_node["via"][0]["url"]) - unique_id_from_tool = str(item_node["via"][0]["source"]) - cvssv3 = item_node["via"][0]["cvss"]["vectorString"] - - if isinstance(item_node["fixAvailable"], dict): - fix_name = item_node["fixAvailable"]["name"] - fix_version = item_node["fixAvailable"]["version"] - mitigation = "Update {0} to version {1}".format(fix_name, fix_version) - else: - mitigation = "No specific mitigation provided by tool." - - description = get_vuln_description(item_node, tree) - - if (item_node["via"] and - isinstance(item_node["via"][0], dict) and - len(item_node["via"]) > 1): - # we have a multiple CWE vuln which we will capture in the - # vulnerability_ids and references - for vuln in item_node["via"][1:]: # have to decide if str or object - if isinstance(vuln, dict): - references.append(vuln["url"]) - - if len(cwe): - cwe = int(cwe.split("-")[1]) - - dojo_finding = Finding( - title=title, - test=test, - severity=severity, - description=description, - cwe=cwe, - mitigation=mitigation, - references=", ".join(references), - component_name=component_name, - false_p=False, - duplicate=False, - out_of_scope=False, - mitigated=None, - impact="No impact provided", - static_finding=static_finding, - dynamic_finding=False, - vuln_id_from_tool=unique_id_from_tool, - ) - - if (cvssv3 is not None) and (len(cvssv3) > 0): - dojo_finding.cvssv3 = cvssv3 - - return dojo_finding - - -def get_vuln_description(item_node, tree): - """Make output pretty of details.""" - effects_handled = [] - description = "" - - description += (item_node["name"] + " " + - item_node["range"] + "\n") - description += "Severity: " + item_node["severity"] + "\n" - - for via in item_node["via"]: - if isinstance(via, str): - description += ("Depends on vulnerable versions of " + - via + "\n") - else: - description += (via["title"] + " - " + via["url"] + "\n") - - if isinstance(item_node["fixAvailable"], dict): - fix_name = item_node["fixAvailable"]["name"] - fix_version = item_node["fixAvailable"]["version"] - mitigation = "Fix Available: Update {0} to version {1}".format( - fix_name, fix_version) - else: - mitigation = "No specific mitigation provided by tool." - - description += mitigation + "\n" - - for node in item_node["nodes"]: - description += node + "\n" - - for effect in item_node["effects"]: - # look up info in the main tree - description += (" " + tree[effect]["name"] + " " + - tree[effect]["range"] + "\n") - effects_handled.append(tree[effect]["name"]) - for ev in tree[effect]["via"]: - if isinstance(ev, dict): - if tree[effect]["name"] != ev["name"]: - description += (" Depends on vulnerable versions of " + - ev["name"] + "\n") - else: - if tree[effect]["name"] != ev: - description += (" Depends on vulnerable versions of " + - ev + "\n") - for en in tree[effect]["nodes"]: - description += " " + en + "\n" - - for ee in tree[effect]["effects"]: - if ee in effects_handled: - continue # already added to description - description += (" " + tree[ee]["name"] + " " + - tree[ee]["range"] + "\n") - for en in tree[effect]["nodes"]: - description += " " + en + "\n" - - return description diff --git a/dojo/tools/pip_audit/parser.py b/dojo/tools/pip_audit/parser.py index 4b3ffba9b1a..726667987fb 100644 --- a/dojo/tools/pip_audit/parser.py +++ b/dojo/tools/pip_audit/parser.py @@ -1,110 +1,70 @@ -"""Parser for pip-audit.""" import json from dojo.models import Finding class PipAuditParser: - """Represents a file parser capable of ingesting pip-audit results.""" - def get_scan_types(self): - """Return the type of scan this parser ingests.""" return ["pip-audit Scan"] def get_label_for_scan_types(self, scan_type): - """Return the friendly name for this parser.""" return "pip-audit Scan" def get_description_for_scan_types(self, scan_type): - """Return the description for this parser.""" return "Import pip-audit JSON scan report." def requires_file(self, scan_type): - """Return boolean indicating if parser requires a file to process.""" return True def get_findings(self, scan_file, test): - """Return the collection of Findings ingested.""" data = json.load(scan_file) - findings = None - # this parser can handle two distinct formats see sample scan files - if "dependencies" in data: - # new format of report - findings = get_file_findings(data, test) - else: - # legacy format of report - findings = get_legacy_findings(data, test) - - return findings - - -def get_file_findings(data, test): - """Return the findings in the vluns array inside the dependencies key.""" - findings = list() - for dependency in data["dependencies"]: - item_findings = get_item_findings(dependency, test) - if item_findings is not None: - findings.extend(item_findings) - return findings - - -def get_legacy_findings(data, test): - """Return the findings gathered from the vulns element.""" - findings = list() - for item in data: - item_findings = get_item_findings(item, test) - if item_findings is not None: - findings.extend(item_findings) - return findings + findings = list() + for item in data: + vulnerabilities = item.get("vulns", []) + if vulnerabilities: + component_name = item["name"] + component_version = item.get("version") + for vulnerability in vulnerabilities: + vuln_id = vulnerability.get("id") + vuln_fix_versions = vulnerability.get("fix_versions") + vuln_description = vulnerability.get("description") + + title = ( + f"{vuln_id} in {component_name}:{component_version}" + ) + + description = "" + description += vuln_description + + mitigation = None + if vuln_fix_versions: + mitigation = "Upgrade to version:" + if len(vuln_fix_versions) == 1: + mitigation += f" {vuln_fix_versions[0]}" + else: + for fix_version in vuln_fix_versions: + mitigation += f"\n- {fix_version}" + + finding = Finding( + test=test, + title=title, + cwe=1352, + severity="Medium", + description=description, + mitigation=mitigation, + component_name=component_name, + component_version=component_version, + vuln_id_from_tool=vuln_id, + static_finding=True, + dynamic_finding=False, + ) + vulnerability_ids = list() + if vuln_id: + vulnerability_ids.append(vuln_id) + if vulnerability_ids: + finding.unsaved_vulnerability_ids = vulnerability_ids + + findings.append(finding) -def get_item_findings(item, test): - """Return list of Findings.""" - findings = list() - vulnerabilities = item.get("vulns", []) - if vulnerabilities: - component_name = item["name"] - component_version = item.get("version") - for vulnerability in vulnerabilities: - vuln_id = vulnerability.get("id") - vuln_fix_versions = vulnerability.get("fix_versions") - vuln_description = vulnerability.get("description") - - title = ( - f"{vuln_id} in {component_name}:{component_version}" - ) - - description = "" - description += vuln_description - - mitigation = None - if vuln_fix_versions: - mitigation = "Upgrade to version:" - if len(vuln_fix_versions) == 1: - mitigation += f" {vuln_fix_versions[0]}" - else: - for fix_version in vuln_fix_versions: - mitigation += f"\n- {fix_version}" - - finding = Finding( - test=test, - title=title, - cwe=1395, - severity="Medium", - description=description, - mitigation=mitigation, - component_name=component_name, - component_version=component_version, - vuln_id_from_tool=vuln_id, - static_finding=True, - dynamic_finding=False, - ) - vulnerability_ids = list() - if vuln_id: - vulnerability_ids.append(vuln_id) - if vulnerability_ids: - finding.unsaved_vulnerability_ids = vulnerability_ids - - findings.append(finding) - - return findings + return findings diff --git a/dojo/utils.py b/dojo/utils.py index b41c82966a4..25cf46d2af1 100644 --- a/dojo/utils.py +++ b/dojo/utils.py @@ -1575,12 +1575,7 @@ def __init__(self, product, title=None, tab=None): active=True, mitigated__isnull=True).count() active_endpoints = Endpoint.objects.filter( - product=self.product, - status_endpoint__mitigated=False, - status_endpoint__false_positive=False, - status_endpoint__out_of_scope=False, - status_endpoint__risk_accepted=False, - ) + product=self.product, finding__active=True, finding__mitigated__isnull=True) self.endpoints_count = active_endpoints.distinct().count() self.endpoint_hosts_count = active_endpoints.values('host').distinct().count() self.benchmark_type = Benchmark_Type.objects.filter( diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index 2ce3da74735..e5eb9b0e926 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 appVersion: "2.33.0-dev" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.6.116-dev +version: 1.6.115-dev icon: https://www.defectdojo.org/img/favicon.ico maintainers: - name: madchap diff --git a/unittests/scans/checkmarx_one/checkmarx_one.json b/unittests/scans/checkmarx_one/checkmarx_one.json deleted file mode 100644 index a9e432abf60..00000000000 --- a/unittests/scans/checkmarx_one/checkmarx_one.json +++ /dev/null @@ -1,284 +0,0 @@ -{ - "scan": { - "end_time": "2024-01-18T09:12:43", - "analyzer": { - "id": "CxOne-SAST", - "name": "Checkmarx", - "url": "https://checkmarx.com/", - "vendor": { - "name": "Checkmarx" - }, - "version": "2.0.63" - }, - "scanner": { - "id": "CxOne-SAST", - "name": "Checkmarx", - "vendor": { - "name": "Checkmarx" - }, - "version": "2.0.63" - }, - "start_time": "2024-01-18T09:12:43", - "status": "success", - "type": "sast" - }, - "schema": "https://gitlab.com/gitlab-org/gitlab/-/raw/master/lib/gitlab/ci/parsers/security/validators/schemas/15.0.0/sast-report-format.json", - "version": "15.0.0", - "vulnerabilities": [ - { - "id": "Client_HTML5_Store_Sensitive_data_In_Web_Storage:/src/helpers/Constants.ts:450", - "category": "Checkmarx-sast", - "name": "Client_HTML5_Store_Sensitive_data_In_Web_Storage", - "message": "Client_HTML5_Store_Sensitive_data_In_Web_Storage@/src/helpers/Constants.ts:450", - "description": "The application stores data makeKey on the client, in an insecure manner, at line 115 of /src/helpers/Utility.ts.", - "cve": "Client_HTML5_Store_Sensitive_data_In_Web_Storage:/src/helpers/Constants.ts:450", - "severity": "Medium", - "confidence": "Medium", - "solution": "", - "scanner": { - "id": "Checkmarx-sast", - "name": "Checkmarx-sast", - "vendor": { - "name": "" - }, - "version": "" - }, - "identifiers": [ - { - "type": "cxOneScan", - "name": "CxOne Scan", - "url": "https://ast.checkmarx.net/projects/4c5703d8-dddf-11ee-8275-bb5b871f4ca1/scans?id=56efc3de-dddf-11ee-91f7-17d54222fb10\u0026branch=release%2FRC-6", - "value": "511341974" - } - ], - "links": [], - "tracking": { - "type": "source", - "items": [ - { - "signatures": [ - { - "algorithm": "sast-Algorithm ", - "value": "NA" - } - ], - "file": "/src/helpers/Constants.ts", - "end_line": 451, - "start_line": 450 - } - ] - }, - "flags": [], - "location": { - "file": "/src/helpers/Constants.ts", - "start_line": 450, - "end_line": 451, - "class": "" - } - }, - { - "id": "Client_HTML5_Store_Sensitive_data_In_Web_Storage:/src/helpers/Helper.ts:349", - "category": "Checkmarx-sast", - "name": "Client_HTML5_Store_Sensitive_data_In_Web_Storage", - "message": "Client_HTML5_Store_Sensitive_data_In_Web_Storage@/src/helpers/Helper.ts:349", - "description": "The application stores data Key on the client, in an insecure manner, at line 349 of /src/helpers/Helper.ts.", - "cve": "Client_HTML5_Store_Sensitive_data_In_Web_Storage:/src/helpers/Helper.ts:349", - "severity": "Medium", - "confidence": "Medium", - "solution": "", - "scanner": { - "id": "Checkmarx-sast", - "name": "Checkmarx-sast", - "vendor": { - "name": "" - }, - "version": "" - }, - "identifiers": [ - { - "type": "cxOneScan", - "name": "CxOne Scan", - "url": "https://ast.checkmarx.net/projects/7c649cf6-dde0-11ee-a703-43244b0a9879/scans?id=86fc33ea-dde0-11ee-ba5f-3beb4c589dd3\u0026branch=release%2FRC-6", - "value": "832413795" - } - ], - "links": [], - "tracking": { - "type": "source", - "items": [ - { - "signatures": [ - { - "algorithm": "sast-Algorithm ", - "value": "NA" - } - ], - "file": "/src/helpers/Helper.ts", - "end_line": 350, - "start_line": 339 - } - ] - }, - "flags": [], - "location": { - "file": "/src/helpers/Helper.ts", - "start_line": 349, - "end_line": 350, - "class": "" - } - }, - { - "id": "Use_Of_Hardcoded_Password:/src/pages/UserError_test.tsx:71", - "category": "Checkmarx-sast", - "name": "Use_Of_Hardcoded_Password", - "message": "Use_Of_Hardcoded_Password@/src/pages/UserError_test.tsx:71", - "description": "The application uses the hard-coded password \u0026#34;testPassword\u0026#34; for authentication purposes, either using it to verify users\u0026#39; identities, or to access another remote system. This password at line 71 of /src/pages/UserError_test.tsx appears in the code, implying it is accessible to anyone with source code access, and cannot be changed without rebuilding the application.\n\n", - "cve": "Use_Of_Hardcoded_Password:/src/pages/UserError_test.tsx:71", - "severity": "Low", - "confidence": "Low", - "solution": "", - "scanner": { - "id": "Checkmarx-sast", - "name": "Checkmarx-sast", - "vendor": { - "name": "" - }, - "version": "" - }, - "identifiers": [ - { - "type": "cxOneScan", - "name": "CxOne Scan", - "url": "https://ast.checkmarx.net/projects/53d5b99a-dde1-11ee-ab71-9be9755a4da6/scans?id=5e592014-dde1-11ee-8985-f37d989e23db\u0026branch=release%2FRC-6", - "value": "143486243" - } - ], - "links": [], - "tracking": { - "type": "source", - "items": [ - { - "signatures": [ - { - "algorithm": "sast-Algorithm ", - "value": "NA" - } - ], - "file": "/src/pages/UserError_test.tsx", - "end_line": 72, - "start_line": 71 - } - ] - }, - "flags": [], - "location": { - "file": "/src/pages/UserError_test.tsx", - "start_line": 71, - "end_line": 72, - "class": "" - } - }, - { - "id": "Client_Hardcoded_Domain:/public/index.html:32", - "category": "Checkmarx-sast", - "name": "Client_Hardcoded_Domain", - "message": "Client_Hardcoded_Domain@/public/index.html:32", - "description": "The JavaScript file imported in https://fonts.googleapis.com/icon?family=Material+Icons in /public/index.html at line 32 is from a remote domain, which may allow attackers to replace its contents with malicious code.", - "cve": "Client_Hardcoded_Domain:/public/index.html:32", - "severity": "Info", - "confidence": "Info", - "solution": "", - "scanner": { - "id": "Checkmarx-sast", - "name": "Checkmarx-sast", - "vendor": { - "name": "" - }, - "version": "" - }, - "identifiers": [ - { - "type": "cxOneScan", - "name": "CxOne Scan", - "url": "https://ast.checkmarx.net/projects/34480339-8f8c-4b68-b8fb-4eea09a2045d/scans?id=78adc5f1-0864-411e-b8d6-bfa134458bd8\u0026branch=release%2Fpilot-1", - "value": "2595392" - } - ], - "links": [], - "tracking": { - "type": "source", - "items": [ - { - "signatures": [ - { - "algorithm": "sast-Algorithm ", - "value": "NA" - } - ], - "file": "/public/index.html", - "end_line": 87, - "start_line": 32 - } - ] - }, - "flags": [], - "location": { - "file": "/public/index.html", - "start_line": 32, - "end_line": 87, - "class": "" - } - }, - { - "id": "Client_DOM_XSS:/src/app/App_test.tsx:744", - "category": "Checkmarx-sast", - "name": "Client_DOM_XSS", - "message": "Client_DOM_XSS@/src/app/App_test.tsx:744", - "description": "The method TrustMe embeds untrusted data in generated output with location, at line 298 of /src/app/App_test.tsx. This untrusted data is embedded into the output without proper sanitization or encoding, enabling an attacker to inject malicious code into the generated web-page.\n\n", - "cve": "Client_DOM_XSS:/src/app/App_test.tsx:744", - "severity": "Info", - "confidence": "Info", - "solution": "", - "scanner": { - "id": "Checkmarx-sast", - "name": "Checkmarx-sast", - "vendor": { - "name": "" - }, - "version": "" - }, - "identifiers": [ - { - "type": "cxOneScan", - "name": "CxOne Scan", - "url": "https://ast.checkmarx.net/projects/38ebbafc-dde2-11ee-ae0c-b72e7e0d42ae/scans?id=42ff549a-dde2-11ee-8c8c-83e0db45059d\u0026branch=release%2FRC-6", - "value": "836714351" - } - ], - "links": [], - "tracking": { - "type": "source", - "items": [ - { - "signatures": [ - { - "algorithm": "sast-Algorithm ", - "value": "NA" - } - ], - "file": "/src/app/App_test.tsx", - "end_line": 746, - "start_line": 744 - } - ] - }, - "flags": [], - "location": { - "file": "/src/app/App_test.tsx", - "start_line": 744, - "end_line": 746, - "class": "" - } - } - ] -} \ No newline at end of file diff --git a/unittests/scans/checkmarx_one/many_findings.json b/unittests/scans/checkmarx_one/many_findings.json deleted file mode 100644 index 13a030e2e3d..00000000000 --- a/unittests/scans/checkmarx_one/many_findings.json +++ /dev/null @@ -1,258 +0,0 @@ -{ - "results": [ - { - "type": "kics", - "label": "IaC Security", - "id": "98727183", - "similarityId": "fbed62efe2786d647806451d0480f57b4bc08786633fb73c29579faee8f9d252", - "status": "RECURRENT", - "state": "TO_VERIFY", - "severity": "HIGH", - "created": "2023-11-21T10:07:38Z", - "firstFoundAt": "2022-12-26T09:31:48Z", - "foundAt": "2023-11-21T10:07:38Z", - "firstScanId": "79cd6248-ddcc-11ee-80c3-c34e822ea27f", - "description": "A user should be specified in the dockerfile, otherwise the image will run as root", - "descriptionHTML": "\u003cp\u003eA user should be specified in the dockerfile, otherwise the image will run as root\u003c/p\u003e\n", - "data": { - "queryId": "94d39580-ddcc-11ee-b570-27d2d85c4cb8 [Taken from query_id]", - "queryName": "Missing User Instruction", - "group": "Build Process [Taken from category]", - "line": 1, - "platform": "Dockerfile", - "issueType": "MissingAttribute", - "expectedValue": "The 'Dockerfile' should contain the 'USER' instruction", - "value": "The 'Dockerfile' does not contain any 'USER' instruction", - "filename": "/qe/testharness/Dockerfile" - }, - "comments": {}, - "vulnerabilityDetails": { - "cvss": {} - } - }, - { - "type": "kics", - "label": "IaC Security", - "id": "28307228", - "similarityId": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "status": "RECURRENT", - "state": "TO_VERIFY", - "severity": "HIGH", - "created": "2023-11-21T10:07:38Z", - "firstFoundAt": "2022-12-26T09:31:48Z", - "foundAt": "2023-11-21T10:07:38Z", - "firstScanId": "811759c2-ddd7-11ee-9b56-d34cc93fb257", - "description": "A user should be specified in the dockerfile, otherwise the image will run as root", - "descriptionHTML": "\u003cp\u003eA user should be specified in the dockerfile, otherwise the image will run as root\u003c/p\u003e\n", - "data": { - "queryId": "5d2efac8-ddd8-11ee-9117-b34a238abecc [Taken from query_id]", - "queryName": "Missing User Instruction", - "group": "Build Process [Taken from category]", - "line": 1, - "platform": "Dockerfile", - "issueType": "MissingAttribute", - "expectedValue": "The 'Dockerfile' should contain the 'USER' instruction", - "value": "The 'Dockerfile' does not contain any 'USER' instruction", - "filename": "/qe/testharness/Dockerfile" - }, - "comments": {}, - "vulnerabilityDetails": { - "cvss": {} - } - }, - { - "type": "sast", - "label": "sast", - "id": "04894977", - "similarityId": "697307927", - "status": "RECURRENT", - "state": "TO_VERIFY", - "severity": "MEDIUM", - "created": "2023-11-21T09:16:10Z", - "firstFoundAt": "2022-03-17T14:45:41Z", - "foundAt": "2023-11-21T09:16:10Z", - "firstScanId": "9d120bda-ddd8-11ee-bd4c-8b5b82bf6c89", - "description": "Method getObject at line 96 of /shared/src/main/java/com/example/api/clients/ObjectsManagerUtil.java sends user information outside the application. This may constitute a Privacy Violation.\n\n", - "descriptionHTML": "\u003cp\u003eMethod getObject at line 96 of /shared/src/main/java/com/example/api/clients/ObjectsManagerUtil.java sends user information outside the application. This may constitute a Privacy Violation.\u003c/p\u003e\n", - "data": { - "queryId": 12956636075206043460, - "queryName": "Privacy_Violation", - "group": "Java_Medium_Threat", - "resultHash": "2417044825981779912395719508", - "languageName": "Java", - "nodes": [ - { - "id": "9823731082518796021644390089", - "line": 96, - "name": "secret", - "column": 48, - "length": 12, - "method": "getObject", - "nodeID": 55222, - "domType": "ParamDecl", - "fileName": "/shared/src/main/java/com/example/api/clients/ObjectsManagerUtil.java", - "fullName": "com.example.api.clients.ObjectsManagerUtil.getObject.secret", - "typeName": "String", - "methodLine": 96, - "definitions": "1" - }, - { - "id": "ahpeiL2gaeboi8aqueiv8liekah=", - "line": 48, - "name": "secret", - "column": 71, - "length": 12, - "method": "getObject", - "nodeID": 55222, - "domType": "UnknownReference", - "fileName": "/shared/src/main/java/com/example/api/clients/ObjectsManagerUtil.java", - "fullName": "com.example.api.clients.ObjectsManagerUtil.getObject.secret", - "typeName": "String", - "methodLine": 76, - "definitions": "1" - }, - { - "id": "Aewo6hui2ek5guNgaesie4ioPha=", - "line": 56, - "name": "error", - "column": 27, - "length": 12, - "method": "getObject", - "nodeID": 55222, - "domType": "MethodInvokeExpr", - "fileName": "/shared/src/main/java/com/example/api/clients/ObjectsManagerUtil.java", - "fullName": "com.example.api.clients.ObjectsManagerUtil.log.error", - "typeName": "error", - "methodLine": 96, - "definitions": "0" - } - ] - }, - "comments": {}, - "vulnerabilityDetails": { - "cweId": 359, - "cvss": {}, - "compliances": [ - "FISMA 2014", - "NIST SP 800-53", - "OWASP Top 10 2013", - "OWASP Top 10 2017", - "OWASP Top 10 2021", - "PCI DSS v3.2.1", - "ASD STIG 4.10" - ] - } - }, - { - "type": "kics", - "label": "IaC Security", - "id": "9930754", - "similarityId": "df0b5ce1f88f1af07e63731e0a9628920a008ea0ca4bbd117d75a3cdbdd283ff", - "status": "RECURRENT", - "state": "TO_VERIFY", - "severity": "MEDIUM", - "created": "2023-11-21T10:07:38Z", - "firstFoundAt": "2022-08-01T08:30:25Z", - "foundAt": "2023-11-21T10:07:38Z", - "firstScanId": "eff24b42-ddda-11ee-9e73-83b44de11797", - "description": "Incoming container traffic should be bound to a specific host interface", - "descriptionHTML": "\u003cp\u003eIncoming container traffic should be bound to a specific host interface\u003c/p\u003e\n", - "data": { - "queryId": "fd070ec6-ddda-11ee-a521-73cad7abf17a [Taken from query_id]", - "queryName": "Container Traffic Not Bound To Host Interface", - "group": "Networking and Firewall [Taken from category]", - "line": 16, - "platform": "DockerCompose", - "issueType": "IncorrectValue", - "expectedValue": "Docker compose file to have 'ports' attribute bound to a specific host interface.", - "value": "Docker compose file doesn't have 'ports' attribute bound to a specific host interface", - "filename": "/qe/integration/docker-compose.yml" - }, - "comments": {}, - "vulnerabilityDetails": { - "cvss": {} - } - }, - { - "type": "sast", - "label": "sast", - "id": "47966330", - "similarityId": "2994069268", - "status": "RECURRENT", - "state": "TO_VERIFY", - "severity": "LOW", - "created": "2023-11-21T09:16:10Z", - "firstFoundAt": "2023-02-09T09:32:55Z", - "foundAt": "2023-11-21T09:16:10Z", - "firstScanId": "4f9f7b28-dddb-11ee-b736-53a846e9935e", - "description": "Method getClient at line 43 of /qe/integration-tests/src/java/com/example/api/integrationtests/utils/IntegratHelper.java defines testPassword, which is designated to contain user passwords. However, while plaintext passwords are later assigned to testPassword, this variable is never cleared from memory.\n\n", - "descriptionHTML": "\u003cp\u003eMethod getClient at line 43 of /qe/integration-tests/src/java/com/example/api/integrationtests/utils/IntegratHelper.java defines testPassword, which is designated to contain user passwords. However, while plaintext passwords are later assigned to testPassword, this variable is never cleared from memory.\u003c/p\u003e\n", - "data": { - "queryId": 7846472296093057013, - "queryName": "Heap_Inspection", - "group": "Java_Low_Visibility", - "resultHash": "oochiuquiede0IeVeijaWooTieh=", - "languageName": "Java", - "nodes": [ - { - "id": "Oec6Nie9ool0too4chieNoh5zoo=", - "line": 84, - "name": "testPassword", - "column": 18, - "length": 12, - "method": "getClient", - "nodeID": 6459, - "domType": "Declarator", - "fileName": "/qe/integration-tests/src/java/com/example/api/integrationtests/utils/IntegratHelper.java", - "fullName": "com.example.api.integrationtests.utils.IntegratHelper.getClient.testPassword", - "typeName": "char", - "methodLine": 35, - "definitions": "1" - } - ] - }, - "comments": {}, - "vulnerabilityDetails": { - "cweId": 244, - "cvss": {}, - "compliances": [ - "OWASP Top 10 2013", - "OWASP Top 10 2021", - "ASD STIG 4.10" - ] - } - }, - { - "type": "kics", - "label": "IaC Security", - "id": "87775678", - "similarityId": "d2b3d5c205f6e52f7588c4ecab08caec2a9d53dc2ded74e1fffd9f2ebf3fa203", - "status": "RECURRENT", - "state": "TO_VERIFY", - "severity": "LOW", - "created": "2023-11-21T10:07:38Z", - "firstFoundAt": "2023-01-05T09:31:43Z", - "foundAt": "2023-11-21T10:07:38Z", - "firstScanId": "82a21764-dddc-11ee-9364-1f3a853093bf", - "description": "Ensure that HEALTHCHECK is being used. The HEALTHCHECK instruction tells Docker how to test a container to check that it is still working", - "descriptionHTML": "\u003cp\u003eEnsure that HEALTHCHECK is being used. The HEALTHCHECK instruction tells Docker how to test a container to check that it is still working\u003c/p\u003e\n", - "data": { - "queryId": "90b50eba-dddc-11ee-acec-cf20c0abdb94 [Taken from query_id]", - "queryName": "Healthcheck Instruction Missing", - "group": "Insecure Configurations [Taken from category]", - "line": 1, - "platform": "Dockerfile", - "issueType": "MissingAttribute", - "expectedValue": "Dockerfile should contain instruction 'HEALTHCHECK'", - "value": "Dockerfile doesn't contain instruction 'HEALTHCHECK'", - "filename": "/qe/unitests/Dockerfile" - }, - "comments": {}, - "vulnerabilityDetails": { - "cvss": {} - } - } - ], - "totalCount": 6, - "scanID": "fc1ab89e-ddc8-11ee-96d4-97cff7d4e776" -} \ No newline at end of file diff --git a/unittests/scans/checkmarx_one/no_findings.json b/unittests/scans/checkmarx_one/no_findings.json deleted file mode 100644 index c526fa4dc0f..00000000000 --- a/unittests/scans/checkmarx_one/no_findings.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "results": [ - ], - "totalCount": 0, - "scanID": "4fc677bc-dddd-11ee-8004-6fd4f0411f73" -} \ No newline at end of file diff --git a/unittests/scans/crunch42/crunch42_many_findings.json b/unittests/scans/crunch42/crunch42_many_findings.json deleted file mode 100644 index 1ea3aca89fd..00000000000 --- a/unittests/scans/crunch42/crunch42_many_findings.json +++ /dev/null @@ -1,251 +0,0 @@ -{ - "end": "1709535630", - "report": { - "index": [ - "/components/security/ApiKey", - "/paths/~1integration-test~1generate/post/security/0/ApiKeyAuth", - "/paths/~1integration-test~1health/get/security", - "/paths/~1integration-test~1invalidate/delete/security/0/ApiKeyAuth", - "/paths/~1integration-test~1ping/get/security", - "/paths/~1integration-test~1refresh/get/security/0/ApiKeyAuth", - "/paths/~1integration-test~1refresh/put/security/0/ApiKeyAuth", - "/paths/~1integration-test~1verify/get/security/0/ApiKeyAuth" - ], - "assessmentVersion": "3.1.6", - "assessmentReportVersion": "1.0.1", - "commit": "ahso2mom3neiviungoh4ENgahXie2Aer4ain5oba-E", - "oasVersion": "3.0.0", - "apiVersion": "1.0.0", - "fileId": "c65d4166-ddf7-11ee-a7f6-bf9763730afb", - "apiId": "", - "openapiState": "valid", - "score": 82.86, - "valid": true, - "criticality": 4, - "issueCounter": 8, - "minimalReport": false, - "maxEntriesPerIssue": 30, - "maxImpactedPerEntry": 30, - "security": { - "issueCounter": 8, - "score": 12.86, - "criticality": 4, - "issues": { - "v3-global-securityscheme-apikey-inheader": { - "description": "Transporting API keys in a header over network allowed", - "issues": [ - { - "score": 0, - "pointer": 0, - "tooManyImpacted": false, - "criticality": 1, - "request": true, - "fingerprint": "teephei0aes4ohxur7Atie6zuiCh9weeshue0kai" - } - ], - "issueCounter": 1, - "score": 0, - "criticality": 1, - "tooManyError": false - }, - "v3-operation-securityrequirement-apikey-inheader": { - "description": "Operation accepts API keys transported in a header over network", - "issues": [ - { - "score": -2.14, - "pointer": 1, - "tooManyImpacted": false, - "criticality": 3, - "request": true, - "fingerprint": "Iibooquavie0hah0quoh7thooghiith7utoow6th" - }, - { - "score": -2.14, - "pointer": 3, - "tooManyImpacted": false, - "criticality": 3, - "request": true, - "fingerprint": "roz6Iph0eiPaih1shooPi1geiyuziitei0aiGhed" - }, - { - "score": -2.14, - "pointer": 5, - "tooManyImpacted": false, - "criticality": 3, - "request": true, - "fingerprint": "lae4iet6XeiyiSheeZof3sheik9lahdaiph7edah" - }, - { - "score": -2.14, - "pointer": 6, - "tooManyImpacted": false, - "criticality": 3, - "request": true, - "fingerprint": "oNgie5Ieke9fiep6yochaT2ain8oona4xeiphiCh" - }, - { - "score": -2.14, - "pointer": 7, - "tooManyImpacted": false, - "criticality": 3, - "request": true, - "fingerprint": "aiShievooyi1Gohn1aeque5Mae3aiBoh8oquaphe" - } - ], - "issueCounter": 5, - "score": -10.71, - "criticality": 3, - "tooManyError": false - }, - "v3-operation-securityrequirement-emptyarray": { - "description": "The security section contains an empty array", - "issues": [ - { - "specificDescription": "The security section of the operation 'get' contains an empty array", - "score": -3.21, - "pointer": 2, - "tooManyImpacted": false, - "criticality": 4, - "request": true, - "fingerprint": "oofushaeQuiev6Shegai2roh0ceighae5Daij7pi" - }, - { - "specificDescription": "The security section of the operation 'get' contains an empty array", - "score": -3.21, - "pointer": 4, - "tooManyImpacted": false, - "criticality": 4, - "request": true, - "fingerprint": "Eife6Tu5liequiec8AhZ6booGheegh5oShues2bi" - } - ], - "issueCounter": 2, - "score": -6.43, - "criticality": 4, - "tooManyError": false - } - }, - "subgroupIssueCounter": { - "authentication": { - "none": 0, - "info": 1, - "low": 0, - "medium": 5, - "high": 2, - "critical": 0 - }, - "authorization": { - "none": 0, - "info": 0, - "low": 0, - "medium": 0, - "high": 0, - "critical": 0 - }, - "transport": { - "none": 0, - "info": 0, - "low": 0, - "medium": 0, - "high": 0, - "critical": 0 - } - } - }, - "data": { - "issueCounter": 0, - "score": 70, - "criticality": 0, - "issues": {}, - "subgroupIssueCounter": { - "parameters": { - "none": 0, - "info": 0, - "low": 0, - "medium": 0, - "high": 0, - "critical": 0 - }, - "responseHeader": { - "none": 0, - "info": 0, - "low": 0, - "medium": 0, - "high": 0, - "critical": 0 - }, - "responseDefinition": { - "none": 0, - "info": 0, - "low": 0, - "medium": 0, - "high": 0, - "critical": 0 - }, - "schema": { - "none": 0, - "info": 0, - "low": 0, - "medium": 0, - "high": 0, - "critical": 0 - }, - "paths": { - "none": 0, - "info": 0, - "low": 0, - "medium": 0, - "high": 0, - "critical": 0 - } - } - }, - "issuesKey": [ - "v3-operation-securityrequirement-emptyarray", - "v3-global-securityscheme-apikey-inheader", - "v3-operation-securityrequirement-apikey-inheader" - ], - "summary": { - "oasVersion": "3.0.0", - "apiVersion": "1.0.0", - "basepath": "", - "apiName": "Example Authentication Service", - "description": "Authentication Service", - "endpoints": [ - "https://auth-dev-internal.example.com/", - "https://auth-dev-internal.example.com/" - ], - "pathCounter": 1, - "operationCounter": 7, - "parameterCounter": 4, - "requestBodyCounter": 0, - "schemesCounter": { - "https": 7 - }, - "requestContentType": {}, - "responseContentType": { - "application/json": 19 - }, - "securitySchemes": { - "ApiKeyAuth": { - "counterInsecure": 0, - "counterSecure": 5, - "type": "apiKey", - "apiKeyIn": "header", - "apiKeyName": "X-API-Key" - } - }, - "componentsSchemasCounter": 6, - "componentsResponsesCounter": 0, - "componentsParametersCounter": 2, - "componentsExamplesCounter": 0, - "componentsRequestBodiesCounter": 0, - "componentsHeadersCounter": 0, - "componentsSecuritySchemesCounter": 1, - "componentsLinksCounter": 0, - "componentsCallbacksCounter": 0 - } - }, - "start": "1702028474", - "taskId": "0ccd5572-ddf9-11ee-935d-d7d416afd73f" -} \ No newline at end of file diff --git a/unittests/scans/crunch42/crunch42_many_findings2.json b/unittests/scans/crunch42/crunch42_many_findings2.json deleted file mode 100644 index b9aa1f75fa1..00000000000 --- a/unittests/scans/crunch42/crunch42_many_findings2.json +++ /dev/null @@ -1,442 +0,0 @@ -{ - "end": "2131451849", - "report": { - "index": [ - "/definitions/Objects/additionalProperties", - "/definitions/Objects/properties/all_objects/items", - "/definitions/ObjectsList/additionalProperties", - "/definitions/auth_claims", - "/definitions/auth_claims/additionalProperties", - "/definitions/auth_claims/properties/level/format", - "/paths/~1admin~1all_objects/get/parameters/0", - "/paths/~1admin~1all_objects/get/responses/403", - "/paths/~1admin~1all_objects/get/security/0/access-token", - "/paths/~1admin~1objects~1search/get/parameters/0", - "/paths/~1admin~1objects~1search/get/parameters/1", - "/paths/~1admin~1objects~1search/get/responses/403", - "/paths/~1admin~1objects~1search/get/security/0/access-token", - "/paths/~1login/post", - "/paths/~1login/post/parameters/0", - "/paths/~1login/post/parameters/1", - "/paths/~1register/post", - "/paths/~1object~1edit_info/put/parameters/1", - "/paths/~1object~1edit_info/put/responses/403", - "/paths/~1object~1edit_info/put/security/0/access-token", - "/paths/~1object~1info/get/security/0/access-token", - "/securityDefinitions/access-token" - ], - "assessmentVersion": "3.1.6", - "assessmentReportVersion": "1.0.1", - "commit": "theePhohphooQuoh6ii3naiS1Goalee9Chooghei-N", - "oasVersion": "2.0", - "apiVersion": "UAT-JWT-Validation", - "fileId": "2eeb479e-ddfa-11ee-9768-bb6e68d5b5fa", - "apiId": "", - "openapiState": "valid", - "score": 79.94, - "valid": true, - "criticality": 3, - "issueCounter": 13, - "warnings": { - "issues": { - "warning-global-schema-unused": { - "description": "Reusable schema definition is not used in the OpenAPI definition", - "totalIssues": 1, - "issues": [ - { - "pointer": 3, - "specificDescription": "The reusable schema definition 'acme_claims' is not used in the OpenAPI definition", - "fingerprint": "ahthi2Ahshaeghah2iewoo0aiF4quoath5Iej0ku" - } - ], - "tooManyError": false - }, - "warning-sample-undefined": { - "description": "No sample values or examples were provided for API Conformance Scan", - "totalIssues": 5, - "issues": [ - { - "pointer": 17, - "specificDescription": "No sample defined in the 'Parameter' object", - "fingerprint": "aereePheeb0puh5tahwoshi8Yei9woophahr7koh" - }, - { - "pointer": 9, - "specificDescription": "No sample defined in the 'Parameter' object", - "fingerprint": "aiseiquohNaik9aThae9oshu8te8ree9Yayie7Ha" - }, - { - "pointer": 10, - "specificDescription": "No sample defined in the 'Parameter' object", - "fingerprint": "thuf5Imiefe3aeTee4soh8quae8ahtho0ap8wen4" - }, - { - "pointer": 6, - "specificDescription": "No sample defined in the 'Parameter' object", - "fingerprint": "faeti4aide0ahTho0shiixo5cheipha9Eigahr3s" - }, - { - "pointer": 14, - "specificDescription": "No sample defined in the 'Parameter' object", - "fingerprint": "Dei9Ahraer7iech8iuk6eeyeero8quea3nahc8ah" - } - ], - "tooManyError": false - }, - "warning-schema-additionalproperties-boolean": { - "description": "Schema defines additionalProperties as a boolean value", - "totalIssues": 3, - "issues": [ - { - "pointer": 2, - "specificDescription": "", - "fingerprint": "shoo1diedoh2aex6mivi9geab9saeyoo7Dae6oth" - }, - { - "pointer": 4, - "specificDescription": "", - "fingerprint": "ooreiz0gepaeSephah6ToN8eC7tioseez4auQu3U" - }, - { - "pointer": 0, - "specificDescription": "", - "fingerprint": "aedaal8uu5aabuohuoSheidoonohSheef2iquee6" - } - ], - "tooManyError": false - }, - "warning-schema-format-improper": { - "description": "Schema format is not applicable to the schema's type", - "totalIssues": 1, - "issues": [ - { - "pointer": 5, - "specificDescription": "The format 'int32' of the schema is not applicable to the schema's type 'number'", - "fingerprint": "va8Lieweu5SieTh1ahcoole0Nahhai5ivaechith" - } - ], - "tooManyError": false - } - } - }, - "operationsNoAuthentication": [ - 13, - 16 - ], - "minimalReport": false, - "maxEntriesPerIssue": 30, - "maxImpactedPerEntry": 30, - "security": { - "issueCounter": 5, - "score": 20, - "criticality": 3, - "issues": { - "global-securityscheme-apikey-inheader": { - "description": "Transporting API keys in a header over network allowed", - "issues": [ - { - "score": 0, - "pointer": 21, - "tooManyImpacted": false, - "criticality": 1, - "request": true, - "fingerprint": "auCh0yi8sheumohruegh7of4EiT0ahngooK1aeje" - } - ], - "issueCounter": 1, - "score": 0, - "criticality": 1, - "tooManyError": false - }, - "operation-securityrequirement-apikey-inheader": { - "description": "Operation accepts API keys transported in a header over network", - "issues": [ - { - "score": -2.5, - "pointer": 8, - "tooManyImpacted": false, - "criticality": 3, - "request": true, - "fingerprint": "Eima0iu4xaatoh1lohboophohpheiBai1iR0opei" - }, - { - "score": -2.5, - "pointer": 12, - "tooManyImpacted": false, - "criticality": 3, - "request": true, - "fingerprint": "Ud1ohcetah5iongai8yee0veishogai2vuQuu7me" - }, - { - "score": -2.5, - "pointer": 19, - "tooManyImpacted": false, - "criticality": 3, - "request": true, - "fingerprint": "wooN7xoof5bieChie9Aech5ohm4eerae1enu6ohr" - }, - { - "score": -2.5, - "pointer": 20, - "tooManyImpacted": false, - "criticality": 3, - "request": true, - "fingerprint": "eeliequooliexohfookosang7hooruR4pae9Aiph" - } - ], - "issueCounter": 4, - "score": -10, - "criticality": 3, - "tooManyError": false - } - }, - "subgroupIssueCounter": { - "authentication": { - "none": 0, - "info": 1, - "low": 0, - "medium": 4, - "high": 0, - "critical": 0 - }, - "authorization": { - "none": 0, - "info": 0, - "low": 0, - "medium": 0, - "high": 0, - "critical": 0 - }, - "transport": { - "none": 0, - "info": 0, - "low": 0, - "medium": 0, - "high": 0, - "critical": 0 - } - } - }, - "data": { - "issueCounter": 8, - "score": 59.94, - "criticality": 3, - "issues": { - "parameter-string-maxlength": { - "description": "String parameter has no maximum length defined", - "issues": [ - { - "specificDescription": "String parameter 'user' has no maximum length defined", - "score": -1.87, - "pointer": 14, - "tooManyImpacted": false, - "pointersAffected": [ - 13 - ], - "criticality": 3, - "request": true, - "fingerprint": "eeT0queiSahchohc5meik9Zoomoolah6Weo3phes" - }, - { - "specificDescription": "String parameter 'pass' has no maximum length defined", - "score": -1.87, - "pointer": 15, - "tooManyImpacted": false, - "pointersAffected": [ - 13 - ], - "criticality": 3, - "request": true, - "fingerprint": "ohvieX1AhzuphoocheeVoi0echoGh9coo7thai1o" - } - ], - "issueCounter": 2, - "score": -3.73, - "criticality": 3, - "tooManyError": false - }, - "parameter-string-pattern": { - "description": "String parameter has no pattern defined", - "issues": [ - { - "specificDescription": "String parameter 'user' has no pattern defined", - "score": -2.8, - "pointer": 14, - "tooManyImpacted": false, - "pointersAffected": [ - 13 - ], - "criticality": 3, - "request": true, - "fingerprint": "oveedeisohwahThae4Ier5oghaebaingai5iqueS" - }, - { - "specificDescription": "String parameter 'pass' has no pattern defined", - "score": -2.8, - "pointer": 15, - "tooManyImpacted": false, - "pointersAffected": [ - 13 - ], - "criticality": 3, - "request": true, - "fingerprint": "Iyung2laiGaish6kos6quiedeiX5uob3Bozee3mu" - } - ], - "issueCounter": 2, - "score": -5.6, - "criticality": 3, - "tooManyError": false - }, - "response-schema-undefined": { - "description": "Response that should contain a body has no schema defined", - "issues": [ - { - "score": -0.18, - "pointer": 7, - "tooManyImpacted": false, - "criticality": 3, - "response": true, - "fingerprint": "aeVahquu6chai1beaf9neithu8epha0Ohsh6echi" - }, - { - "score": -0.18, - "pointer": 11, - "tooManyImpacted": false, - "criticality": 3, - "response": true, - "fingerprint": "ai8Meishei0oHixuSucaiceL0aqu8uocahyahG6l" - }, - { - "score": -0.18, - "pointer": 18, - "tooManyImpacted": false, - "criticality": 3, - "response": true, - "fingerprint": "euN9zohhohPeesoY8ahbaichae6Ood0nohbio5ke" - } - ], - "issueCounter": 3, - "score": -0.53, - "criticality": 3, - "tooManyError": false - }, - "schema-response-object-without-properties": { - "description": "Schema of a JSON object in a response has no properties defined", - "issues": [ - { - "score": -0.2, - "pointer": 1, - "tooManyImpacted": false, - "criticality": 3, - "response": true, - "fingerprint": "ufuPheiyaelaePood3AeW8ooc3pooj2AiwaiCeil" - } - ], - "issueCounter": 1, - "score": -0.2, - "criticality": 3, - "tooManyError": false - } - }, - "subgroupIssueCounter": { - "parameters": { - "none": 0, - "info": 0, - "low": 0, - "medium": 0, - "high": 0, - "critical": 0 - }, - "responseHeader": { - "none": 0, - "info": 0, - "low": 0, - "medium": 0, - "high": 0, - "critical": 0 - }, - "responseDefinition": { - "none": 0, - "info": 0, - "low": 0, - "medium": 3, - "high": 0, - "critical": 0 - }, - "schema": { - "none": 0, - "info": 0, - "low": 0, - "medium": 1, - "high": 0, - "critical": 0 - }, - "paths": { - "none": 0, - "info": 0, - "low": 0, - "medium": 0, - "high": 0, - "critical": 0 - } - } - }, - "issuesKey": [ - "schema-response-object-without-properties", - "warning-schema-additionalproperties-boolean", - "parameter-string-pattern", - "parameter-string-maxlength", - "global-securityscheme-apikey-inheader", - "operation-securityrequirement-apikey-inheader", - "response-schema-undefined", - "warning-schema-format-improper", - "warning-sample-undefined", - "warning-global-schema-unused" - ], - "summary": { - "oasVersion": "2.0", - "apiVersion": "UAT-JWT-Validation", - "basepath": "", - "apiName": "Example App API", - "description": "Example Sharing API", - "endpoints": [ - "https//example.asia-1.cloud.provider.com/api" - ], - "pathCounter": 6, - "operationCounter": 6, - "parameterCounter": 4, - "requestBodyCounter": 3, - "schemesCounter": { - "https": 6 - }, - "requestContentType": { - "application/json": 2, - "application/x-www-form-urlencoded": 1 - }, - "responseContentType": { - "application/json": 16 - }, - "securitySchemes": { - "access-token": { - "counterInsecure": 0, - "counterSecure": 4, - "type": "apiKey", - "apiKeyIn": "header", - "apiKeyName": "x-access-token" - } - }, - "componentsSchemasCounter": 6, - "componentsResponsesCounter": 0, - "componentsParametersCounter": 0, - "componentsExamplesCounter": 0, - "componentsRequestBodiesCounter": 0, - "componentsHeadersCounter": 0, - "componentsSecuritySchemesCounter": 0, - "componentsLinksCounter": 0, - "componentsCallbacksCounter": 0 - } - }, - "start": "1693265564", - "taskId": "970e33ac-ddfc-11ee-a42e-af596b69b8f4" -} \ No newline at end of file diff --git a/unittests/scans/npm_audit_7_plus/many_vulns.json b/unittests/scans/npm_audit_7_plus/many_vulns.json deleted file mode 100644 index 2831c8be155..00000000000 --- a/unittests/scans/npm_audit_7_plus/many_vulns.json +++ /dev/null @@ -1,188 +0,0 @@ -{ - "auditReportVersion": 2, - "vulnerabilities": { - "@vercel/fun": { - "name": "@vercel/fun", - "severity": "moderate", - "isDirect": false, - "via": [ - "debug", - "semver" - ], - "effects": [ - "vercel" - ], - "range": "<=1.1.0", - "nodes": [ - "node_modules/@vercel/fun" - ], - "fixAvailable": { - "name": "vercel", - "version": "32.3.0", - "isSemVerMajor": true - } - }, - "@vercel/node": { - "name": "@vercel/node", - "severity": "low", - "isDirect": false, - "via": [ - "undici" - ], - "effects": [ - "vercel" - ], - "range": "2.14.0 || >=3.0.2", - "nodes": [ - "node_modules/@vercel/node" - ], - "fixAvailable": { - "name": "vercel", - "version": "32.3.0", - "isSemVerMajor": true - } - }, - "debug": { - "name": "debug", - "severity": "moderate", - "isDirect": false, - "via": [ - { - "source": 1094219, - "name": "debug", - "dependency": "debug", - "title": "Regular Expression Denial of Service in debug", - "url": "https://github.com/advisories/GHSA-gxpj-cx7g-858c", - "severity": "moderate", - "cwe": [ - "CWE-400" - ], - "cvss": { - "score": 5.3, - "vectorString": "CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:L" - }, - "range": ">=4.0.0 <4.3.1" - } - ], - "effects": [ - "@vercel/fun" - ], - "range": "4.0.0 - 4.3.0", - "nodes": [ - "node_modules/@vercel/fun/node_modules/debug" - ], - "fixAvailable": { - "name": "vercel", - "version": "32.3.0", - "isSemVerMajor": true - } - }, - "semver": { - "name": "semver", - "severity": "moderate", - "isDirect": false, - "via": [ - { - "source": 1096482, - "name": "semver", - "dependency": "semver", - "title": "semver vulnerable to Regular Expression Denial of Service", - "url": "https://github.com/advisories/GHSA-c2qf-rxjj-qqgw", - "severity": "moderate", - "cwe": [ - "CWE-1333" - ], - "cvss": { - "score": 5.3, - "vectorString": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:L" - }, - "range": ">=7.0.0 <7.5.2" - } - ], - "effects": [ - "@vercel/fun" - ], - "range": "7.0.0 - 7.5.1", - "nodes": [ - "node_modules/@vercel/fun/node_modules/semver" - ], - "fixAvailable": { - "name": "vercel", - "version": "32.3.0", - "isSemVerMajor": true - } - }, - "undici": { - "name": "undici", - "severity": "low", - "isDirect": false, - "via": [ - { - "source": 1096586, - "name": "undici", - "dependency": "undici", - "title": "Undici proxy-authorization header not cleared on cross-origin redirect in fetch", - "url": "https://github.com/advisories/GHSA-3787-6prv-h9w3", - "severity": "low", - "cwe": [ - "CWE-200" - ], - "cvss": { - "score": 3.9, - "vectorString": "CVSS:3.1/AV:N/AC:H/PR:H/UI:R/S:U/C:L/I:L/A:L" - }, - "range": "<=5.28.2" - } - ], - "effects": [ - "@vercel/node" - ], - "range": "<=5.28.2", - "nodes": [ - "node_modules/undici" - ], - "fixAvailable": { - "name": "vercel", - "version": "32.3.0", - "isSemVerMajor": true - } - }, - "vercel": { - "name": "vercel", - "severity": "moderate", - "isDirect": true, - "via": [ - "@vercel/fun", - "@vercel/node" - ], - "effects": [], - "range": "28.12.3 || 29.0.1 - 29.0.3 || >=32.0.2", - "nodes": [ - "node_modules/vercel" - ], - "fixAvailable": { - "name": "vercel", - "version": "32.3.0", - "isSemVerMajor": true - } - } - }, - "metadata": { - "vulnerabilities": { - "info": 0, - "low": 2, - "moderate": 4, - "high": 0, - "critical": 0, - "total": 6 - }, - "dependencies": { - "prod": 737, - "dev": 306, - "optional": 153, - "peer": 50, - "peerOptional": 0, - "total": 1180 - } - } -} diff --git a/unittests/scans/npm_audit_7_plus/no_vuln.json b/unittests/scans/npm_audit_7_plus/no_vuln.json deleted file mode 100644 index 9f407cae89c..00000000000 --- a/unittests/scans/npm_audit_7_plus/no_vuln.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "auditReportVersion": 2, - "vulnerabilities": { - }, - "metadata": { - "vulnerabilities": { - "info": 0, - "low": 0, - "moderate": 0, - "high": 0, - "critical": 0, - "total": 0 - }, - "dependencies": { - "prod": 98, - "dev": 0, - "optional": 0, - "peer": 0, - "peerOptional": 0, - "total": 97 - } - } -} diff --git a/unittests/scans/npm_audit_7_plus/one_vuln.json b/unittests/scans/npm_audit_7_plus/one_vuln.json deleted file mode 100644 index 89b48e280f1..00000000000 --- a/unittests/scans/npm_audit_7_plus/one_vuln.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "auditReportVersion": 2, - "vulnerabilities": { - "debug": { - "name": "debug", - "severity": "high", - "isDirect": true, - "via": [ - { - "source": 1094222, - "name": "debug", - "dependency": "debug", - "title": "Regular Expression Denial of Service in debug", - "url": "https://github.com/advisories/GHSA-gxpj-cx7g-858c", - "severity": "moderate", - "cwe": [ - "CWE-400" - ], - "cvss": { - "score": 5.3, - "vectorString": "CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:L" - }, - "range": "<2.6.9" - }, - { - "source": 1094457, - "name": "debug", - "dependency": "debug", - "title": "debug Inefficient Regular Expression Complexity vulnerability", - "url": "https://github.com/advisories/GHSA-9vvw-cc9w-f27h", - "severity": "high", - "cwe": [ - "CWE-1333" - ], - "cvss": { - "score": 7.5, - "vectorString": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H" - }, - "range": "<2.6.9" - }, - "ms" - ], - "effects": [ - ], - "range": "<=2.6.8", - "nodes": [ - "node_modules/debug" - ], - "fixAvailable": { - "name": "express", - "version": "4.18.3", - "isSemVerMajor": false - } - } - }, - "metadata": { - "vulnerabilities": { - "info": 0, - "low": 0, - "moderate": 0, - "high": 1, - "critical": 0, - "total": 1 - }, - "dependencies": { - "prod": 98, - "dev": 0, - "optional": 0, - "peer": 0, - "peerOptional": 0, - "total": 97 - } - } -} - diff --git a/unittests/scans/pip_audit/empty_new.json b/unittests/scans/pip_audit/empty_new.json deleted file mode 100644 index 45f00a3dece..00000000000 --- a/unittests/scans/pip_audit/empty_new.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "dependencies":[] -} diff --git a/unittests/scans/pip_audit/many_vulns_new.json b/unittests/scans/pip_audit/many_vulns_new.json deleted file mode 100644 index 877ebf78ed8..00000000000 --- a/unittests/scans/pip_audit/many_vulns_new.json +++ /dev/null @@ -1,91 +0,0 @@ -{ - "dependencies":[ - { - "name": "adal", - "version": "1.2.2", - "vulns": [] - }, - { - "name": "aiohttp", - "version": "3.6.2", - "vulns": [ - { - "id": "PYSEC-2021-76", - "fix_versions": [ - "3.7.4" - ], - "description": "aiohttp is an asynchronous HTTP client/server framework for asyncio and Python. In aiohttp before version 3.7.4 there is an open redirect vulnerability. A maliciously crafted link to an aiohttp-based web-server could redirect the browser to a different website. It is caused by a bug in the `aiohttp.web_middlewares.normalize_path_middleware` middleware. This security problem has been fixed in 3.7.4. Upgrade your dependency using pip as follows \"pip install aiohttp >= 3.7.4\". If upgrading is not an option for you, a workaround can be to avoid using `aiohttp.web_middlewares.normalize_path_middleware` in your applications." - } - ] - }, - { - "name": "alabaster", - "version": "0.7.12", - "vulns": [] - }, - { - "name": "azure-devops", - "skip_reason": "Dependency not found on PyPI and could not be audited: azure-devops (0.17.0)" - }, - { - "name": "django", - "version": "3.2.9", - "vulns": [ - { - "id": "PYSEC-2021-439", - "fix_versions": [ - "2.2.25", - "3.1.14", - "3.2.10" - ], - "description": "In Django 2.2 before 2.2.25, 3.1 before 3.1.14, and 3.2 before 3.2.10, HTTP requests for URLs with trailing newlines could bypass upstream access control based on URL paths." - } - ] - }, - { - "name": "lxml", - "version": "4.6.4", - "vulns": [ - { - "id": "PYSEC-2021-852", - "fix_versions": [], - "description": "lxml is a library for processing XML and HTML in the Python language. Prior to version 4.6.5, the HTML Cleaner in lxml.html lets certain crafted script content pass through, as well as script content in SVG files embedded using data URIs. Users that employ the HTML cleaner in a security relevant context should upgrade to lxml 4.6.5 to receive a patch. There are no known workarounds available." - } - ] - }, - { - "name": "twisted", - "version": "18.9.0", - "vulns": [ - { - "id": "PYSEC-2019-128", - "fix_versions": [ - "19.2.1" - ], - "description": "In Twisted before 19.2.1, twisted.web did not validate or sanitize URIs or HTTP methods, allowing an attacker to inject invalid characters such as CRLF." - }, - { - "id": "PYSEC-2020-260", - "fix_versions": [ - "20.3.0rc1" - ], - "description": "In Twisted Web through 19.10.0, there was an HTTP request splitting vulnerability. When presented with a content-length and a chunked encoding header, the content-length took precedence and the remainder of the request body was interpreted as a pipelined request." - }, - { - "id": "PYSEC-2019-129", - "fix_versions": [ - "19.7.0rc1" - ], - "description": "In words.protocols.jabber.xmlstream in Twisted through 19.2.1, XMPP support did not verify certificates when used with TLS, allowing an attacker to MITM connections." - }, - { - "id": "PYSEC-2020-259", - "fix_versions": [ - "20.3.0rc1" - ], - "description": "In Twisted Web through 19.10.0, there was an HTTP request splitting vulnerability. When presented with two content-length headers, it ignored the first header. When the second content-length value was set to zero, the request body was interpreted as a pipelined request." - } - ] - } - ] -} diff --git a/unittests/scans/pip_audit/zero_vulns_new.json b/unittests/scans/pip_audit/zero_vulns_new.json deleted file mode 100644 index f32e9b1b25e..00000000000 --- a/unittests/scans/pip_audit/zero_vulns_new.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "dependencies":[ - { - "name": "adal", - "version": "1.2.2", - "vulns": [] - }, - { - "name": "alabaster", - "version": "0.7.12", - "vulns": [] - }, - { - "name": "azure-devops", - "skip_reason": "Dependency not found on PyPI and could not be audited: azure-devops (0.17.0)" - } - ] -} diff --git a/unittests/tools/test_checkmarx_one_parser.py b/unittests/tools/test_checkmarx_one_parser.py deleted file mode 100644 index 31d6fdbed55..00000000000 --- a/unittests/tools/test_checkmarx_one_parser.py +++ /dev/null @@ -1,47 +0,0 @@ -from dojo.models import Test -from dojo.tools.checkmarx_one.parser import CheckmarxOneParser -from ..dojo_test_case import DojoTestCase - - -class TestCheckmarxOneParser(DojoTestCase): - - def test_checkmarx_one_many_vulns(self): - with open("unittests/scans/checkmarx_one/checkmarx_one.json") as testfile: - parser = CheckmarxOneParser() - findings = parser.get_findings(testfile, Test()) - testfile.close() - self.assertEqual(5, len(findings)) - with self.subTest(i=0): - for finding in findings: - self.assertIsNotNone(finding.unique_id_from_tool) - self.assertIsNotNone(finding.title) - self.assertIsNotNone(finding.test) - self.assertIsNotNone(finding.date) - self.assertIsNotNone(finding.severity) - self.assertIsNotNone(finding.description) - finding_test = findings[0] - self.assertEqual("Medium", finding_test.severity) - self.assertEqual("/src/helpers/Constants.ts", finding_test.file_path) - - def test_checkmarx_one_many_findings(self): - with open("unittests/scans/checkmarx_one/many_findings.json") as testfile: - parser = CheckmarxOneParser() - findings = parser.get_findings(testfile, Test()) - self.assertEqual(6, len(findings)) - with self.subTest(i=0): - for finding in findings: - self.assertIsNotNone(finding.unique_id_from_tool) - self.assertIsNotNone(finding.title) - self.assertIsNotNone(finding.test) - self.assertIsNotNone(finding.date) - self.assertIsNotNone(finding.severity) - self.assertIsNotNone(finding.description) - finding_test = findings[0] - self.assertEqual("High", finding_test.severity) - self.assertEqual("/qe/testharness/Dockerfile", finding_test.file_path) - - def test_checkmarx_one_no_findings(self): - with open("unittests/scans/checkmarx_one/no_findings.json") as testfile: - parser = CheckmarxOneParser() - findings = parser.get_findings(testfile, Test()) - self.assertEqual(0, len(findings)) diff --git a/unittests/tools/test_checkmarx_parser.py b/unittests/tools/test_checkmarx_parser.py index f09e7d7da13..c43e24fb572 100644 --- a/unittests/tools/test_checkmarx_parser.py +++ b/unittests/tools/test_checkmarx_parser.py @@ -203,8 +203,8 @@ def check_parse_file_with_single_vulnerability_has_single_finding(self, findings item.file_path, ) # ScanStart - self.assertEqual(datetime.date, type(item.date)) - self.assertEqual(datetime.date(2018, 2, 25), item.date) + self.assertEqual(datetime.datetime, type(item.date)) + self.assertEqual(datetime.datetime(2018, 2, 25, 11, 35, 52), item.date) self.assertEqual(bool, type(item.static_finding)) self.assertEqual(True, item.static_finding) @@ -293,7 +293,7 @@ def test_file_name_aggregated_parse_file_with_multiple_vulnerabilities_has_multi finding = findings[0] self.assertEqual("SQL Injection (Assignment5.java)", finding.title) self.assertEqual("High", finding.severity) - self.assertEqual(datetime.date(2018, 2, 25), finding.date) + self.assertEqual(datetime.datetime(2018, 2, 25, 11, 35, 52), finding.date) self.assertEqual(True, finding.static_finding) self.assertEqual("WebGoat/webgoat-lessons/challenge/src/main/java/org/owasp/webgoat/plugin/challenge5/challenge6/Assignment5.java", finding.file_path) @@ -312,7 +312,7 @@ def test_detailed_parse_file_with_multiple_vulnerabilities_has_multiple_findings finding = findings[0] self.assertEqual("SQL Injection (Assignment5.java)", finding.title) self.assertEqual("High", finding.severity) - self.assertEqual(datetime.date(2018, 2, 25), finding.date) + self.assertEqual(datetime.datetime(2018, 2, 25, 11, 35, 52), finding.date) self.assertEqual(True, finding.static_finding) self.assertEqual("WebGoat/webgoat-lessons/challenge/src/main/java/org/owasp/webgoat/plugin/challenge5/challenge6/Assignment5.java", finding.file_path) self.assertEqual(50, finding.line) @@ -516,8 +516,8 @@ def check_parse_file_with_utf8_replacement_char(self, findings): item.file_path, ) # ScanStart - self.assertEqual(datetime.date, type(item.date)) - self.assertEqual(datetime.date(2018, 2, 25), item.date) + self.assertEqual(datetime.datetime, type(item.date)) + self.assertEqual(datetime.datetime(2018, 2, 25, 11, 35, 52), item.date) self.assertEqual(bool, type(item.static_finding)) self.assertEqual(True, item.static_finding) @@ -665,8 +665,8 @@ def check_parse_file_with_utf8_various_non_ascii_char(self, findings): item.file_path, ) # ScanStart - self.assertEqual(datetime.date, type(item.date)) - self.assertEqual(datetime.date(2018, 2, 25), item.date) + self.assertEqual(datetime.datetime, type(item.date)) + self.assertEqual(datetime.datetime(2018, 2, 25, 11, 35, 52), item.date) self.assertEqual(bool, type(item.static_finding)) self.assertEqual(True, item.static_finding) @@ -685,8 +685,8 @@ def test_file_with_multiple_findings_is_aggregated_with_query_id(self, mock): # ScanStart self.assertEqual("Client Potential ReDoS In Match (prettify.js)", finding.title) self.assertEqual("Low", finding.severity) - self.assertEqual(datetime.date, type(finding.date)) - self.assertEqual(datetime.date(2021, 11, 17), finding.date) + self.assertEqual(datetime.datetime, type(finding.date)) + self.assertEqual(datetime.datetime(2021, 11, 17, 13, 50, 45), finding.date) self.assertEqual(bool, type(finding.static_finding)) self.assertEqual(True, finding.static_finding) @@ -705,8 +705,8 @@ def test_file_with_empty_filename(self, mock): # ScanStart self.assertEqual("Missing HSTS Header", finding.title) self.assertEqual("Medium", finding.severity) - self.assertEqual(datetime.date, type(finding.date)) - self.assertEqual(datetime.date(2021, 12, 24), finding.date) + self.assertEqual(datetime.datetime, type(finding.date)) + self.assertEqual(datetime.datetime(2021, 12, 24, 9, 12, 14), finding.date) self.assertEqual(bool, type(finding.static_finding)) self.assertEqual(True, finding.static_finding) @@ -791,7 +791,7 @@ def test_file_issue6956(self, mock): self.assertEqual(89, finding.cwe) self.assertEqual("/webgoat-lessons/challenge/src/main/java/org/owasp/webgoat/challenges/challenge5/Assignment5.java", finding.file_path) self.assertEqual(61, finding.line) - self.assertEqual(datetime.date(2022, 5, 6), finding.date) + self.assertEqual(datetime.date(2022, 5, 6), finding.date.date()) if finding.unique_id_from_tool == "SYlu22e7ZQydKJFOlC/o1EsyixQ=": with self.subTest(i="SYlu22e7ZQydKJFOlC/o1EsyixQ="): self.assertEqual("SQL Injection", finding.title) @@ -799,7 +799,7 @@ def test_file_issue6956(self, mock): self.assertEqual(89, finding.cwe) self.assertEqual("/webgoat-lessons/sql-injection/src/main/java/org/owasp/webgoat/sql_injection/introduction/SqlInjectionLesson5.java", finding.file_path) self.assertEqual(72, finding.line) - self.assertEqual(datetime.date(2022, 5, 6), finding.date) + self.assertEqual(datetime.date(2022, 5, 6), finding.date.date()) # test one in SCA part if finding.unique_id_from_tool == "GkVx1zoIKcd1EF72zqWrGzeVTmo=": with self.subTest(i="GkVx1zoIKcd1EF72zqWrGzeVTmo="): @@ -812,7 +812,7 @@ def test_file_issue6956(self, mock): self.assertTrue(finding.active) self.assertFalse(finding.verified) self.assertIsNone(finding.line) - self.assertEqual(datetime.date(2022, 5, 6), finding.date) + self.assertEqual(datetime.date(2022, 5, 6), finding.date.date()) # test one in KICS part if finding.unique_id_from_tool == "eZrh18HAPbe2LbDAprSPrwncAC0=": with self.subTest(i="eZrh18HAPbe2LbDAprSPrwncAC0="): @@ -822,26 +822,4 @@ def test_file_issue6956(self, mock): self.assertTrue(finding.active) self.assertFalse(finding.verified) self.assertEqual("/webgoat-server/Dockerfile", finding.file_path) - self.assertEqual(datetime.date(2022, 5, 6), finding.date) - - @patch('dojo.tools.checkmarx.parser.add_language') - def test_finding_date_should_be_date_xml(self, mock): - my_file_handle, product, engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx/single_finding.xml" - ) - parser = CheckmarxParser() - parser.set_mode('detailed') - findings = parser.get_findings(my_file_handle, test) - self.teardown(my_file_handle) - self.assertEqual(findings[0].date, datetime.date(2018, 2, 25)) - - @patch('dojo.tools.checkmarx.parser.add_language') - def test_finding_date_should_be_date_json(self, mock): - my_file_handle, product, engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx/multiple_findings.json" - ) - parser = CheckmarxParser() - parser.set_mode('detailed') - findings = parser.get_findings(my_file_handle, test) - self.teardown(my_file_handle) - self.assertEqual(findings[0].date, datetime.date(2022, 2, 25)) + self.assertEqual(datetime.date(2022, 5, 6), finding.date.date()) diff --git a/unittests/tools/test_crunch42_parser.py b/unittests/tools/test_crunch42_parser.py deleted file mode 100644 index ea5188d3034..00000000000 --- a/unittests/tools/test_crunch42_parser.py +++ /dev/null @@ -1,32 +0,0 @@ -from ..dojo_test_case import DojoTestCase -from dojo.models import Test -from dojo.tools.crunch42.parser import Crunch42Parser - - -class TestCrunch42Parser(DojoTestCase): - - def test_crunch42parser_single_has_many_findings(self): - testfile = open("unittests/scans/crunch42/crunch42_many_findings.json") - parser = Crunch42Parser() - findings = parser.get_findings(testfile, Test()) - testfile.close() - self.assertEqual(8, len(findings)) - with self.subTest(i=0): - finding = findings[0] - self.assertEqual("teephei0aes4ohxur7Atie6zuiCh9weeshue0kai", finding.unique_id_from_tool) - self.assertEqual("Info", finding.severity) - self.assertIsNotNone(finding.description) - self.assertGreater(len(finding.description), 0) - - def test_crunch42parser_single_has_many_findings2(self): - testfile = open("unittests/scans/crunch42/crunch42_many_findings2.json") - parser = Crunch42Parser() - findings = parser.get_findings(testfile, Test()) - testfile.close() - self.assertEqual(5, len(findings)) - with self.subTest(i=0): - finding = findings[0] - self.assertEqual("auCh0yi8sheumohruegh7of4EiT0ahngooK1aeje", finding.unique_id_from_tool) - self.assertEqual("Info", finding.severity) - self.assertIsNotNone(finding.description) - self.assertGreater(len(finding.description), 0) diff --git a/unittests/tools/test_npm_audit_7_plus_parser.py b/unittests/tools/test_npm_audit_7_plus_parser.py deleted file mode 100644 index cf1cb339e73..00000000000 --- a/unittests/tools/test_npm_audit_7_plus_parser.py +++ /dev/null @@ -1,41 +0,0 @@ -from os import path -from ..dojo_test_case import DojoTestCase -from dojo.tools.npm_audit_7_plus.parser import NpmAudit7PlusParser -from dojo.models import Test - - -class TestNpmAudit7PlusParser(DojoTestCase): - def test_npm_audit_7_plus_parser_with_no_vuln_has_no_findings(self): - testfile = open(path.join(path.dirname(__file__), "../scans/npm_audit_7_plus/no_vuln.json")) - parser = NpmAudit7PlusParser() - findings = parser.get_findings(testfile, Test()) - testfile.close() - self.assertEqual(0, len(findings)) - - def test_npm_audit_7_plus_parser_with_one_vuln_has_one_findings(self): - testfile = open(path.join(path.dirname(__file__), "../scans/npm_audit_7_plus/one_vuln.json")) - parser = NpmAudit7PlusParser() - findings = parser.get_findings(testfile, Test()) - testfile.close() - self.assertEqual(1, len(findings)) - with self.subTest(i=0): - finding = findings[0] - self.assertEqual("High", finding.severity) - self.assertEqual(400, finding.cwe) - self.assertIsNotNone(finding.description) - self.assertGreater(len(finding.description), 0) - self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:L", finding.cvssv3) - - def test_npm_audit_7_plus_parser_with_many_vuln_has_many_findings(self): - testfile = open(path.join(path.dirname(__file__), "../scans/npm_audit_7_plus/many_vulns.json")) - parser = NpmAudit7PlusParser() - findings = parser.get_findings(testfile, Test()) - testfile.close() - self.assertEqual(6, len(findings)) - with self.subTest(i=0): - finding = findings[0] - self.assertEqual("Medium", finding.severity) - self.assertEqual(1035, finding.cwe) - self.assertIsNotNone(finding.description) - self.assertGreater(len(finding.description), 0) - self.assertEqual("@vercel/fun", finding.title) diff --git a/unittests/tools/test_pip_audit_parser.py b/unittests/tools/test_pip_audit_parser.py index 237945cfc67..eb421f761a0 100644 --- a/unittests/tools/test_pip_audit_parser.py +++ b/unittests/tools/test_pip_audit_parser.py @@ -7,83 +7,80 @@ class TestPipAuditParser(DojoTestCase): def test_parser_empty(self): - testfiles = ["unittests/scans/pip_audit/empty.json", - "unittests/scans/pip_audit/empty_new.json"] - for path in testfiles: - testfile = open(path) - parser = PipAuditParser() - findings = parser.get_findings(testfile, Test()) - testfile.close() - self.assertEqual(0, len(findings)) + testfile = open("unittests/scans/pip_audit/empty.json") + parser = PipAuditParser() + findings = parser.get_findings(testfile, Test()) + testfile.close() + self.assertEqual(0, len(findings)) def test_parser_zero_findings(self): - testfiles = ["unittests/scans/pip_audit/zero_vulns.json", - "unittests/scans/pip_audit/zero_vulns_new.json"] - for path in testfiles: - testfile = open(path) - parser = PipAuditParser() - findings = parser.get_findings(testfile, Test()) - testfile.close() - self.assertEqual(0, len(findings)) + testfile = open("unittests/scans/pip_audit/zero_vulns.json") + parser = PipAuditParser() + findings = parser.get_findings(testfile, Test()) + testfile.close() + self.assertEqual(0, len(findings)) def test_parser_many_vulns(self): - testfiles = ["unittests/scans/pip_audit/many_vulns.json", - "unittests/scans/pip_audit/many_vulns_new.json"] - for path in testfiles: - testfile = open(path) - parser = PipAuditParser() - findings = parser.get_findings(testfile, Test()) - testfile.close() - self.assertEqual(7, len(findings)) + testfile = open("unittests/scans/pip_audit/many_vulns.json") + parser = PipAuditParser() + findings = parser.get_findings(testfile, Test()) + testfile.close() + self.assertEqual(7, len(findings)) - finding = findings[0] - self.assertEqual('PYSEC-2021-76 in aiohttp:3.6.2', finding.title) - description = 'aiohttp is an asynchronous HTTP client/server framework for asyncio and Python. In aiohttp before version 3.7.4 there is an open redirect vulnerability. A maliciously crafted link to an aiohttp-based web-server could redirect the browser to a different website. It is caused by a bug in the `aiohttp.web_middlewares.normalize_path_middleware` middleware. This security problem has been fixed in 3.7.4. Upgrade your dependency using pip as follows "pip install aiohttp >= 3.7.4". If upgrading is not an option for you, a workaround can be to avoid using `aiohttp.web_middlewares.normalize_path_middleware` in your applications.' - self.assertEqual(description, finding.description) - self.assertEqual(1395, finding.cwe) - vulnerability_ids = finding.unsaved_vulnerability_ids - self.assertEqual(1, len(vulnerability_ids)) - self.assertEqual('PYSEC-2021-76', vulnerability_ids[0]) - self.assertEqual('Medium', finding.severity) - self.assertEqual('Upgrade to version: 3.7.4', finding.mitigation) - self.assertEqual('aiohttp', finding.component_name) - self.assertEqual('3.6.2', finding.component_version) - self.assertEqual('PYSEC-2021-76', finding.vuln_id_from_tool) + finding = findings[0] + self.assertEqual('PYSEC-2021-76 in aiohttp:3.6.2', finding.title) + description = 'aiohttp is an asynchronous HTTP client/server framework for asyncio and Python. In aiohttp before version 3.7.4 there is an open redirect vulnerability. A maliciously crafted link to an aiohttp-based web-server could redirect the browser to a different website. It is caused by a bug in the `aiohttp.web_middlewares.normalize_path_middleware` middleware. This security problem has been fixed in 3.7.4. Upgrade your dependency using pip as follows "pip install aiohttp >= 3.7.4". If upgrading is not an option for you, a workaround can be to avoid using `aiohttp.web_middlewares.normalize_path_middleware` in your applications.' + self.assertEqual(description, finding.description) + self.assertEqual(1352, finding.cwe) + vulnerability_ids = finding.unsaved_vulnerability_ids + self.assertEqual(1, len(vulnerability_ids)) + self.assertEqual('PYSEC-2021-76', vulnerability_ids[0]) + self.assertEqual('Medium', finding.severity) + self.assertEqual('Upgrade to version: 3.7.4', finding.mitigation) + self.assertEqual('aiohttp', finding.component_name) + self.assertEqual('3.6.2', finding.component_version) + self.assertEqual('PYSEC-2021-76', finding.vuln_id_from_tool) - finding = findings[1] - self.assertEqual('PYSEC-2021-439 in django:3.2.9', finding.title) - description = 'In Django 2.2 before 2.2.25, 3.1 before 3.1.14, and 3.2 before 3.2.10, HTTP requests for URLs with trailing newlines could bypass upstream access control based on URL paths.' - self.assertEqual(description, finding.description) - vulnerability_ids = finding.unsaved_vulnerability_ids - self.assertEqual(1, len(vulnerability_ids)) - self.assertEqual('PYSEC-2021-439', vulnerability_ids[0]) - self.assertEqual(1395, finding.cwe) - self.assertEqual('Medium', finding.severity) - self.assertEqual('django', finding.component_name) - self.assertEqual('3.2.9', finding.component_version) - self.assertEqual('PYSEC-2021-439', finding.vuln_id_from_tool) + finding = findings[1] + self.assertEqual('PYSEC-2021-439 in django:3.2.9', finding.title) + description = 'In Django 2.2 before 2.2.25, 3.1 before 3.1.14, and 3.2 before 3.2.10, HTTP requests for URLs with trailing newlines could bypass upstream access control based on URL paths.' + self.assertEqual(description, finding.description) + vulnerability_ids = finding.unsaved_vulnerability_ids + self.assertEqual(1, len(vulnerability_ids)) + self.assertEqual('PYSEC-2021-439', vulnerability_ids[0]) + self.assertEqual(1352, finding.cwe) + self.assertEqual('Medium', finding.severity) + mitigation = '''Upgrade to version: +- 2.2.25 +- 3.1.14 +- 3.2.10''' + self.assertEqual(mitigation, finding.mitigation) + self.assertEqual('django', finding.component_name) + self.assertEqual('3.2.9', finding.component_version) + self.assertEqual('PYSEC-2021-439', finding.vuln_id_from_tool) - finding = findings[2] - self.assertEqual('PYSEC-2021-852 in lxml:4.6.4', finding.title) - description = 'lxml is a library for processing XML and HTML in the Python language. Prior to version 4.6.5, the HTML Cleaner in lxml.html lets certain crafted script content pass through, as well as script content in SVG files embedded using data URIs. Users that employ the HTML cleaner in a security relevant context should upgrade to lxml 4.6.5 to receive a patch. There are no known workarounds available.' - self.assertEqual(description, finding.description) - vulnerability_ids = finding.unsaved_vulnerability_ids - self.assertEqual(1, len(vulnerability_ids)) - self.assertEqual('PYSEC-2021-852', vulnerability_ids[0]) - self.assertEqual(1395, finding.cwe) - self.assertEqual('Medium', finding.severity) - self.assertEqual('lxml', finding.component_name) - self.assertEqual('4.6.4', finding.component_version) - self.assertEqual('PYSEC-2021-852', finding.vuln_id_from_tool) + finding = findings[2] + self.assertEqual('PYSEC-2021-852 in lxml:4.6.4', finding.title) + description = 'lxml is a library for processing XML and HTML in the Python language. Prior to version 4.6.5, the HTML Cleaner in lxml.html lets certain crafted script content pass through, as well as script content in SVG files embedded using data URIs. Users that employ the HTML cleaner in a security relevant context should upgrade to lxml 4.6.5 to receive a patch. There are no known workarounds available.' + self.assertEqual(description, finding.description) + vulnerability_ids = finding.unsaved_vulnerability_ids + self.assertEqual(1, len(vulnerability_ids)) + self.assertEqual('PYSEC-2021-852', vulnerability_ids[0]) + self.assertEqual(1352, finding.cwe) + self.assertEqual('Medium', finding.severity) + self.assertIsNone(finding.mitigation) + self.assertEqual('lxml', finding.component_name) + self.assertEqual('4.6.4', finding.component_version) + self.assertEqual('PYSEC-2021-852', finding.vuln_id_from_tool) - finding = findings[3] - self.assertEqual('PYSEC-2019-128 in twisted:18.9.0', finding.title) + finding = findings[3] + self.assertEqual('PYSEC-2019-128 in twisted:18.9.0', finding.title) - finding = findings[4] - self.assertEqual('PYSEC-2020-260 in twisted:18.9.0', finding.title) + finding = findings[4] + self.assertEqual('PYSEC-2020-260 in twisted:18.9.0', finding.title) - finding = findings[5] - self.assertEqual('PYSEC-2019-129 in twisted:18.9.0', finding.title) + finding = findings[5] + self.assertEqual('PYSEC-2019-129 in twisted:18.9.0', finding.title) - finding = findings[6] - self.assertEqual('PYSEC-2020-259 in twisted:18.9.0', finding.title) + finding = findings[6] + self.assertEqual('PYSEC-2020-259 in twisted:18.9.0', finding.title)