`\u0018\u000f@q=\u0018`j\u0004}\u001ant`9q\tH@o\"cH$$c\u0003gZ\u001a\b^).Y2Ptf?':qL\u0012crRC-FvM\u007f*Y\u001e2LX*wE#O*V\u0016\"c\u0004>Ga\u0000T\"\u001c\u0003k3\t?U:\u0000&K\u0012\u00187e=\u007fg';-\u0016\r?1 jh%\"qR6\u0000VL%I+i.\u0010^!\u0010U\u0000T\u0000YFl?zAc?\u0010j&v\u00111nJ\"\u0000'Uv\u007f\u0010ji\n[:m\u0013G\u0014C?^CW.&=>AF\bq&qH\u0016.ug3\u000e?Tk\u0006d2Jv\u0002\u001e0w\u001f\u0000\u001d|d [\u0004oun>bJ>\u0019\u0019,ej-in \u0016B\u0002\u0011Z7\fZ\u000f\u0010sos\u0007\u001e\\f.\u0018H2.J:j>\u0000wYsS^\u007f/\u000f\u0000v\u001f\u001a v.}iaTz\u0019\u001bicL[>y7$ %\u001d&M*i\u0011N>U\u001f.T\u0019oQ&w}CsjJc}Z2[S\u0002\u001c4&\n\u00125k\u0010}[~\u000f?rV<\u001e\r%3\u007f+Yu\u001dd#SwI>|;$R'*@\u0012D#\u001fJu2m)\\E=\u001et!?a\u0019K|8\u0005\u0005]7\u001cI.;l\\1y^;Xk\nby&\u0011c\u0015J\u007f\\=\u001a\u001f\b\f3H+#SJJ3@Xit`Q\u001al4\t<\u007f\u0015\r(b:P-\\hq\u0016\u0013FI\u001cE,\u0019v\u001dZ\u0004\u001c\u001e@E\n0\u0002IR\u001f\u0003\u0014\u001e\b\u0002iI4\u0001\u007f\u000e<0?W#\u007f.?W\\+\u0000\u0016AE\u0014UZiSM!\"E!b`\u0000\u001f:\u001as>ZKa\u0012V\u0004}?*+f\u0004a\\6e_X\u0002\u0016$CU\u0000>-?nMU.a[do=\u000fjy[R.m^+nR\u0003\u0013\u0015^\u0019$>Km$9?@\u0017\u0018\u0000=\u001a)\u001c@\u001fFv`mXX\u001ez\u0012\u0007/c\u0011kV^MO2\u0000d\u001fQ\u0002o[hnX\u001c\u0003#*\u001b\u007f\u007fhEVK[\u0017I%\u0018~4$c\u0015\u007fi\u001b^\u0012\fG\u001c6\u0013T>dn\u001aaQ\"\fOfW%mn;i%\u0019\u001f\u000592\u0011Q\\>\u0018leN7U.$mI-on4;yfbv7`\u0006qWs\u0016SO\u0000\u0000_NHfw\u000eTvsU?R\u0002j\rP\u0017\u0010dd\t*3\u0007\u001cT\u0000'o\u0013;\u0000\bGzp>.\r/jicU\u0019CSA\u0014\u0014\u0000(\u001e\u0019\u00008\u0001N\u0014sN\u001d@\u000e\u0004RH\u001bv)P\u000ei=y{L\u001e\u0017C+WY\\!'hO\u0005H|(z$\u0014QEY^NM46zSq@\ry\u0014\u0019HFi7\u0014\r\f\"\u0015!\u0014DE4i\u0014FNW4Vh\f\u001awG0 VZc/\r\\/mQ:4~ S\u001f#~\u001do&k!\u001c?%d\\\u001a4\u000bkZ9WR:\u0011\u0015-B2PxTW\n%Uu#$\fWo#*9I\u0014\u00162G3}Et\f\u0003C\u001d\u0016LQ\u0001t\\\u0003o(ps+R=hN\u0015U\u0005}\u007fKTG3:`sIF4G11~\u0015\"nqi7njR\u0018mYL\u00186A?qSe+j*mfQ\fy\u0001J q58&u\u000b^ `\b\u001d\u001fU;\u0004ib+9!\u001eC\u0003_%g\u0010\u0007GFM\u001aa\u001ax\fZ]*Ah$\u000f|T6&A/\u0015Z\tQ4I ~3*O\u0018m\u0013\u001f\\cZ7\u0011&jR%0Z\u0013\u001b\u001cw#Fm\u0012\\)?QeCh$A\u000f*=Y_gKT\u0017p\u001bu|\t\u001f99VjrtkC\u001e\u0000gF'`~o~0\u0014U^MU\rTj\u000eI\u0015\u0011GeTdN\u0005?`koa,\u0000ku_\"A\u0019{y?5zW>4[>\u0018\u0010x9=Q!nX\u0010{p \u001ax}+\u001eq\u0007\u0001PN84\\E\u0007v&0\u0000CG\u0017\u000b\u0016sKO67b]U\u00044\\V.\u0003NU\u0016_zIM\u0005\u0002KOJ[5x?Hm[\u0010!\u0019\"$s?qWv25\b93Koh?\\VF\u0018\u0002Q++\u001f*3l(b!:\u0018)P\u0003qHiE\u00004HIB(\u0001S\u0003C\b\u0014AC1I~)\u000fHDDbM\"\u0011\u00143\u0015dSJ\u0003=H*\rr-54-,{d\u0003\u0006\u0018WjD\u001cN)\\jJ.\u000bxNMs$ezO\u0017\fQ\u0000+%gtU[\u0012,F1}k\u001b9%\u0007\u001c\u0011x\u001a\u0019\u0000GT|\u0015:\u0016'G\u001bCG\u0000Ll\u0011e>4Sb\u0018-1q*8%I#\u007fWtZx\u0012'(\u001a+J<]\u0015tK\u000e\u000f>hP>|w~\u0007j\u0014};P\u001e!\u000b8da\u000f'\u001e\u0017,R\u0019\u001dZxqD\u001c/u>\u001e\u0005}\u0012};e;YN\u0003\u001fOUs\u0016\u0011c\u0012K5\u0016[hbO\u0010}9\u001cu~xk\u0004\u0013 x~LJ:\u000f'UR\u0017\u0007f,'W\u0000)\u0001gOzi\u000e\u0006/=i%X|\u00006\u0004gMH9\u001c\u001a|1ILKr\u0006BzT95$E\u0016:pl\\\u0006H]\u0013V=ENbwgmyrGflv\u0017\u0018-X(\u001d>\u0019\b\u001fTf\u0018\u0019J=Nu\u0000u\b\u000b\u0013d\u0000*m<\u0019c\u0019\u0004\u000fkrA\u0012\u00040\"\u000fT\naSY(\u00184Rf\u001b)p{\n?SZ\u000b\u0018\u0006X\u0000A^\u0016A\u0005\u0018\"XxX8\u000bO\u0000Z>4g;\u0005tV\u001e\u0010-H1XwqE`Z(ZEt9Zove[X\bD\n=\u0014b#\u0019I\u0002XV[opJ&\b5\n\u000fjG44\"w\rS+:3V\u007fZ6OE 9AE\u0014P\u00031E?\u0014P\u0004tS\u0014\u0001\u001d%M1H\bqHj|Rmd\u0014UP\u0005lR\u001az\n6J\u0000A\u0015o`\u0014l\u001eh+Wv/GF\u0005\u001bi\b\rQ\u007f?*\u0007s7\u0014\u0011iykRykP\u00172SqZZ\u0000u\u007f*\u0007O\u0000<<\r\u001eXK\u0015ZECY'Kx\u007f4X9R;`50\u0001]/\u0000k\u0000| Q`9uH#\u0015\u0017O/Eb\u0005|Tyi\\H\u0015\"9\n\u0018/E.P&0T\u0014\u0014QE\u0000\u0014QE\u0000\u0014QE\u0000\u0014QE\u0000\u0014QE\u0000\u0014QE\u0000\u0014QE\u0000\u0014QE\u0000\u0014QE\u0000\u0014QE\u0000\u0014QE\u0000\u0014QE\u0000\u0014QE\u0000\u0014QE\u0000\u0014QE\u0000\u0014QE\u0000\u0014QE\u0000\u0014QE\u0000\u0014QE\u0000\u0014QE\u0000\u0014QE\u0000\u0014QE\u0000\u0014QE\u0000\u0014QE\u0000\u0014QE\u0000\u007f",
+ "HTTP2 Headers": "[[markup]]The following HTTP/2 headers were set within the request:\n\n+-------------------+-----------------------------------------------------------------------------------------------------------+\n| Header | Value |\n+===================+===========================================================================================================+\n| :method | POST |\n+-------------------+-----------------------------------------------------------------------------------------------------------+\n| :scheme | https |\n+-------------------+-----------------------------------------------------------------------------------------------------------+\n| :authority | www.xzzvwy.com |\n+-------------------+-----------------------------------------------------------------------------------------------------------+\n| :path | /media/mmzzvwy/pingpong2.jpg?rmode=max&height=500 |\n+-------------------+-----------------------------------------------------------------------------------------------------------+\n| accept | text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8 |\n+-------------------+-----------------------------------------------------------------------------------------------------------+\n| referer | https://www.xzzvwy.com/vidyas/lowkey-fruit-picking/ |\n+-------------------+-----------------------------------------------------------------------------------------------------------+\n| user-agent | Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36 |\n+-------------------+-----------------------------------------------------------------------------------------------------------+\n| accept-encoding | identity |\n+-------------------+-----------------------------------------------------------------------------------------------------------+\n| transfer-encoding | chunked |\n+-------------------+-----------------------------------------------------------------------------------------------------------+\n\n",
+ "_meta": {
+ "ordered_keys": [
+ "Technical Details",
+ "Messages",
+ "HTTP2 Headers"
+ ]
+ }
+ },
+ "cvss_score": 0.0,
+ "type": "WEB_APP",
+ "web_app": "https://www.xzzvwy.com",
+ "cvss_v4_vector": "CVSS:4.0/AV:N/AC:L/AT:N/PR:N/UI:N/VC:N/VI:N/VA:N/SC:N/SI:N/SA:N",
+ "mss_confirmed": false,
+ "category": "web_app",
+ "description": "[[markup]]The remote server accepted a malformed HTTP/2 message. In come cases this tolerance to malformed requests\ncan introduce security flaws.\n\nThis finding is reported to aid in the research of HTTP/2 and H2 to H1 translation vulnerabilities and may not\nbe readily exploitable.",
+ "tags": [
+ "web_app"
+ ],
+ "cvss_v4_base_score": 0.0,
+ "ipv4_address": "127.0.0.1",
+ "host": "www.xzzvwy.com",
+ "cvss_v3_vector": "CVSS:3.0/AV:L/AC:H/PR:H/UI:R/S:U/C:N/I:N/A:N",
+ "internal": false,
+ "OWASP": [],
+ "_meta": {
+ "ordered_keys": [
+ "mss_confirmed",
+ "domain",
+ "manually_altered",
+ "probability",
+ "cvss_base_score",
+ "results_set_id",
+ "cvss_v3_base_score",
+ "solution",
+ "cvss_v3_impact",
+ "meta",
+ "first_detected_at",
+ "cvss_v4_impact",
+ "cves",
+ "impact",
+ "last_detected_at",
+ "authenticated",
+ "title",
+ "epss_base_score",
+ "signature",
+ "integration",
+ "port",
+ "priority",
+ "internal",
+ "details",
+ "cvss_score",
+ "type",
+ "web_app",
+ "cvss_v4_vector",
+ "status",
+ "category",
+ "description",
+ "tags",
+ "cpe",
+ "ipv4_address",
+ "host",
+ "cvss_v3_vector",
+ "OWASP",
+ "appcheck",
+ "disabled",
+ "cvss_vector",
+ "fixed",
+ "best_practice",
+ "target",
+ "created",
+ "url",
+ "latest_detected_result",
+ "plugin",
+ "modified",
+ "patch",
+ "attachments",
+ "suppression_days",
+ "synopsis",
+ "meta_migration",
+ "trashed",
+ "cvss_v4_base_score",
+ "_id"
+ ]
+ },
+ "appcheck": {
+ "_meta": {
+ "ordered_keys": [
+ "cls"
+ ]
+ },
+ "cls": "HTTP2_ProtocolViolation"
+ },
+ "disabled": false,
+ "cvss_vector": "AV:L/AC:H/Au:M/C:N/I:N/A:N",
+ "_id": "4e7c0b570ff6083376b99e1897102a87907effe2199dc8d4",
+ "best_practice": "2024-08-06T13:59:27.680000",
+ "target": "/media/mmzzvwy/pingpong2.jpg",
+ "created": "2024-08-06T13:59:28.656000",
+ "url": "https://www.xzzvwy.com/media/mmzzvwy/pingpong2.jpg?rmode=max&height=500",
+ "latest_detected_result": true,
+ "plugin": "HTTP2ProtocolAnalysis",
+ "solution": "[[markup]]Ensure that the system accepting HTTP/2 connections is kept up-to-date. Several vulnerabilities and attack\ntechniques were published in August and September 2021 that leveraged discrepancies in processing HTTP/2\nrequests, therefore updates may follow to harden the affected device.\n\nThis finding is reported as part of the HTTP/2 assessment process and may not constitute a specific\nvulnerability.",
+ "patch": [],
+ "attachments": [],
+ "suppression_days": 14,
+ "synopsis": "[[markup]]The remote server accepted a malformed HTTP/2 message. In come cases this tolerance to malformed requests\ncan introduce security flaws.",
+ "meta_migration": [],
+ "trashed": false,
+ "signature": -7541453225234650774,
+ "fixed": false
+ },
+ {
+ "status": "unfixed",
+ "domain": "d10b91e0ff3947b7",
+ "manually_altered": false,
+ "probability": "1.0-info",
+ "cvss_base_score": -0.0,
+ "cvss_v3_base_score": 0.0,
+ "cpe": [],
+ "cvss_v3_impact": "1.0-info",
+ "meta": {
+ "discovered_after": 32062.534193992615,
+ "scanning_engine": {
+ "type": "web_app_scanner",
+ "name": "NewAppCheckScannerMultiple",
+ "_meta": {
+ "ordered_keys": [
+ "type",
+ "name"
+ ]
+ }
+ },
+ "_meta": {
+ "ordered_keys": [
+ "discovered_after",
+ "scanning_engine"
+ ]
+ }
+ },
+ "modified": "2024-08-06T13:59:31.939000",
+ "first_detected_at": "2024-08-06T13:59:31.876000",
+ "cvss_v4_impact": "1.0-info",
+ "cves": [],
+ "impact": "1.0-info",
+ "last_detected_at": "2024-08-06T13:59:31.876000",
+ "authenticated": false,
+ "title": "HTTP/2 Protocol: Transfer-Encoding Header Accepted",
+ "epss_base_score": 0.0,
+ "integration": [],
+ "port": 443,
+ "priority": "2.0-low",
+ "results_set_id": [
+ "004c9847d35244cf"
+ ],
+ "details": {
+ "Technical Details": "[[markup]]The target system accepts a **Transfer-Encoding: Chunked** header and body which is not permitted in the HTTP/2 spec. This can allow HTTP request smuggling in some configurations.",
+ "Messages": "HTTP/2 Request Headers:\n\n:method = POST\r\n:scheme = https\r\n:authority = www.xzzvwy.com\r\n:path = /media/bnhfz2s2/transport-hubs.jpeg?width=768&height=505&mode=crop&format=webp&quality=60\r\naccept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nreferer: https://www.xzzvwy.com/vidyas\r\nuser-agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36\r\naccept-encoding: identity\r\ntransfer-encoding: chunked\r\n\r\n0\r\n\r\n\r\nHTTP/2 Response Headers:\n\n:status: 200\r\ndate: Tue, 06 Aug 2024 13:59:27 GMT\r\ncontent-type: image/webp\r\ncontent-length: 54870\r\ncache-control: public, must-revalidate, max-age=604800\r\netag: \"1da2925345e78d6\"\r\nlast-modified: Thu, 07 Dec 2023 15:51:13 GMT\r\nstrict-transport-security: max-age=31536000; includeSubDomains\r\nx-frame-options: SAMEORIGIN\r\nx-content-type-options: nosniff\r\nx-xss-protection: 1; mode=block\r\ncontent-security-policy: default-src 'self'; script-src 'self' 'unsafe-inline' 'unsafe-eval' *.facebook.net unpkg.com *.doubleclick.net *.google.com *.google-analytics.com *.googleadservices.com *.googletagmanager.com *.gstatic.com *.recaptcha.net; style-src 'self' 'unsafe-inline' unpkg.com; object-src 'none'; base-uri 'self'; connect-src 'self' https://api.postguys.io *.example.com *.bugsnag.com *.google-analytics.com; font-src 'self' data:; frame-src 'self' *.facebook.com *.google.com *.recaptcha.net; img-src 'self' data: *.umbraco.com *.openstreetmap.org *.doubleclick.net *.facebook.com *.google-analytics.com *.google.co.uk *.google.com *.amazonaws.com; manifest-src 'self'; media-src 'self'; worker-src 'none';\r\nx-powered-by: ASP.NET\r\nx-azure-ref: 20240806T135927Z-164ff454849d559tzvqg7w2rmw00000002pg000000001hn2\r\nx-cache: CONFIG_NOCACHE\r\naccept-ranges: bytes\r\ncf-cache-status: DYNAMIC\r\nserver: cloudflare\r\ncf-ray: 8aef8c2b08c8b10a-MAN\r\n\r\nRIFFN\u0000\u0000WEBPVP8 B\u0000\u0000p\u0003\u0001*\u0000\u0003\u0001>LK'$*%+\u0014M@\u0016en$o\u000b}f_M{u\u0000o>^Zt[3/?TO:[\u000f{\u0017^f~g'\t9/\u0003S\u001d?7\u0017~_xm&?>\u007f__\u007f{g\u001fG|/n^[~\\i.N(5\u001f\u0002vT`WuNl\u0014\u0019pi498S\u0013d\u001dt\u0019>Fm}d6$-Jt\u0010\u0015}\u0001oB\u001eE9\u0010{3o\\gc1RH@OeK\u0012b}+\u000b |\u0007\t#Ve5\u0007\u0000_:A0\u0004|$kEWH\n\n\u000e:T|2K6JcJJW\rh\u0003A\u0019tgTnn|S\b]B\bj\u0010!M=\u001als+rR^+Jg6^'}g;:}g;:\u001aFB(!u7\u0019\u0002=\u0011\"\rt\t B?;\u001cua\u0019\u001d\u001a\u0018iw\u000fO.dk\\ 6rM\nF]\u0017rmr9?Sp\u0013]=`2F:vFd,x&x\u0010~Bs]#J]\u0017\nX\u0017?\u0016\u0000+]EgS4?\u001dF\u0005{\u0012\u000ba&;CUCk4<@\u0007mci^yDisc`0Y[\u0003V\fJJ\u0000\n&XFgTz xD\u0016FC/uLP#~\u0019T\u0010cE/t\u0007o\b^~fL{\u0001w+\u00145}\n\u0017bpw\u0013\u001eJ*}8:\u001d\u001f+\u0006FG4Az7#B/\u000f~9\u0012\u0012!K\u000f.G7wSN\u001f\u0012\u007ff>&\u0013|x\u0000Geg|\u001e5yF}I*W.dQ\u0006t\"bIm\u0007i@xgRf\u0005o/q'u\u0018\u001d*i+5XSC\u0013k\u001f\fe~\u0002-&UY1\u0017G['/,\u001c\u0003\u001dQ\u0014%\u0017PMzHig-Y\u001bD\u0005\u0012\u0017\u0015z'AJ\u0007/8'\f\u001c52\u001f\u00177&\u000e\u0018\u0000Su\fZ\u000b;;g_Yk\u0005m)q3=\u0013%G<\u0018>\u0002HAQ\u007fOG\u0016b$XBg\u0005i\u0018GR)a\u000bf:\u007fK5R\b\u0015[:ssD3C;;\tI8\\pa2\bAf:\u0001\u0006()\u0001I\u0018\u0005\\ q6\\\u0004$\rE\u001a\u000f.$r|Q]b\u001fcU]t6v\fEC\u0014\\\u000fs#\u000f\u001bq[yU]_\u0019J%\u000ed@@]v{f.Y\u000b/\rV\fhQmDv9.89XP.$\u0016)$gczt\u0019w\u0013\u0012F\u0014\u0010\u001e-M`D\u0007v_tj\u001c9\u0003p\u0003T\u001c.Mh\u0019\u0017d;>)Qx\ncI~F.w\u0014\u000b@tLAQQ;zEs\u000b(/\u0005\u0004>{8Q\t\u000e\u001f\u0002K\tc\u001eBwMbBxDm+,L\t6\u0000{]tWF'Z\u001dT1\u0015WV\u001f?;{\u0006Q/unpI_F\u0015_\u0017mFL.%)yh\u0006:5(.\ne$\u0016W(f\u0004,K&\u007f*+*Y\u000f7G\u0006Ky?i%\u000767\"\u0014Y3z4\u001c;7\u0019RA\u001fj5&3|JCecTB\u0011\u0003j\u001b\u00044qA6F1[0[^\u00053z4DHSR\u0018bhGL!Ln Q\n=;\u000eT\u0011h\u000eW\u0019.\u001cX5-\u0007\u0015\u001dI\u0006}3{Ha^\fLX\f\u0000DS''\u0006~M\u0003\\\tgmZn|S2M\u0003\u0011\u0007>F=b\u0001;3)\u001b\nUSUmN.\\y\u0012@T{\u0015#/F)W@[\\\u0017p]m\u0004@\u000eJC@zn\u0015*hyl\u001ab\"A\u000b}D~0y\f\u0003f 41\u0010H/\u0000*T\u001b\f,/V6S\u0004\txX\r\u001e\u0013N&{~\u00165ld?Hl,\u0014\u0014$(/7\u0002Ut?`plf\u0004SX3sc'd\u0006I\u0013\\v\u001bx85=k7$AE\u0011]fR\u0010EyR\u000fXE\u001c\u007f\u001e0F_(\u0003_\u0012L\"|6~)\u0001=Q\b-C\u0019\u0017xR^@\u007fQ\u001a|+\u0012GAe\u0010:|@\u001c\u007f\fx|Z@\u0015B{d\"&eSB\raC{yf4T\u00166_Pmk\"j3v\u0012\"G\u00029RG\u001c,.+Q\u0006Q[a\u001byjQ\f]\u0003\u0001\r+u\u0010[s]hiHm5pE/^ z\u0000jJ05E\u0019atqw\u00110/\u0002:s47]K2\nw\u0006(>O8v^,\u0004\u0017\u001bHZ\u0002k\u0000bG$?CY\u0013nhHMJB)\bgI?rC.Gh|\u0004\u00191Q*7.\b S\u001fKS~W\u0016>E\\Ex\u000fC7N|nk_\u0017r\u0017r-{DQ4Pn?HM\u0005p2\tmWjip'2\u0014*>-\u0013\u001f_4\u007f\r\u001dllr\u0003+U\f$-$c{\b\u001e{Z*X]!Q\b;ks\u0003(\u0011\u0014X2=^''wg\";^^\u0017\u001c\u0006\u0004x\u000b6c@^!\u001b|5Z4R\u001aFQk\u0015AC+ pY\u0018\u0003^Y\u0011\u000f=H{ie\u0012\u00123Bcb\u0007k\u000b4\fqYo<\u0019*f/\u001cW3j1AmN%/{6q\u001e\b\u0016A$\u0002\u0015\u0014N\u0005\u000f}oiN\u0010F\u00022cJ^b\u0004w{@B\\X^=I|c)\u0002e|\bx?&I\u0003\u001bFw\u000f\u0004\u0013HO\u000f+'fit@\u0014 >#\u00151oG#v_e\u0000\u0005\u0015XYLs\u0003SJ~\u0016{ob9\n)\u001a7`H;\u0002\b\u0005gOB\u007fy8T\u0014\u001e `I)Oh= H\u0004(}P~yK{Z9f\u0019W\fb\t\u0003GP\u0004\u001abmn.6b\u001bt\u00150\u0014CB\u0017\u0003\u001c\b1\"M\"R%\u001c\t\u000b\u0004Q\u0011f\u001f\u007f.=i_6hIh2J-lxn&7\u0018xh`\u001224w\u001b\u001boi>\u000bq-WoV[3:z%\u007f~Az\u0015\u000b\u0019em?\"\u0005\u0003E2\u0012pK\"\u0001\u001f@[)\u0013HM`[[\u0014\u001e\u0016\u000e\u001b<\u001b7*l\u007fv~oA(\fO\u0015]ve\u001fx~\u001d\u001aF\u0000wd\b\u0011\n\u00068G\n~\u0018)\u00128\u007fy,\u0019xDri\t}J0\u0011faCk8>\u0005\u0001<\u0019\u0003d\u0015\u0017K\u001f>\u0005\u0011S\r\u0013F:\n\u0011\u0001X* sC|2-b|\u001e\n.4\u000f\u000e\u0001\u0001DK\u0012{yJB{)~B.MU\b0j\u0010_%tW!iCYuhx1\u0018z;Q\u0001&HxC/j\u0005\u0015\u001f\u0004\b.>q\u0013vU?x1weiV V/}}_N>\t\u0011,uuF\rz\u0007>C'IOJDwEzG tk\u0005pC\ri]XZ.,\u0000DJ3XV\u0012Q\"n_MsGI\u0006o~,^~+v70\u0003!+{3\u000eJs\rQ:\u001eU?IEi\u0004\u001a\u0007L\"2\u007f~\u001czPj#g\u001e\"Z$D`\u0003~0\u001e&l\u000fdfR9_$B-\u0001XDekTI.w\u001b4t}?f'>O[9\u001d=\u0007\u0006C\u001d4}o,B-IHz\u0002cvu\u0017FJ\nY&!\u000e9\u0002\fe1\u0002$<4g\u001b\u0002xi\u001aBk4dP{\u0002p4<-\u0017n\u001a]\"$\u0016\u0005$\nX\n\u0011m4Gx`%cE[->U\u001aY\u000ec\\$lB\t@5~h\u001aq\u0006y$\u0012\u0007\u0014#|g!LL;\u0012\u0012UAI<\\o9Dy\u0002\u001f\u001bH\u007fm3>\f\u001b\n?\u0015pP\n'\u000e>L\u0019\u001bO_,Yngm=\u001c;\u0012Nf4]O`Ht\u001a<[D\u0011\u001ehD+.\u000b3\u001acV\u0015\u0016\u0002\u0006LJ\u0012z9O\u0006s*{/<[H\u001e)\"\u001c\u0013s\u000f\u0018\u000b#MZ\u0011Lr\u007fb3/w\u0017\\b=\u001a7Bt\u0016kn@#\u007f\b#T\u0016\\.wDF\u0004y{0MWhpR4]wmh37K\u0002Xi?+?d\u0018\u0016\u0012jFGU=,(,\u0014WT\u0016W{]4Bow>dTml\u0006scr$,v<\"e/\u001fz\ns\u0019T=\"hIEg|_B<\u0011f|X17]e\u0002zE6\b0;&\u001e&\u0012\u0014\u001e)}\u001eP6!f\b(;=\"bz\fu\u000edc\u001e4+~6\u0007\u0002\u000f\r6Hk\t\rjo^\u0014!1\u0006Kz+`R#H|p\bc6MO\u0003\u001e_VfJf\f\n*dz\u001eC{jL`?\u0013C\fX\\\u007fdo-F@BK.\u001bBp\u000bg1%(5@^[\f\u0017\u0018\u001f KC1i\"v>Eg\u007fm3\u001a\u00074[,Gx\u0007E'$Tln9\u001c?\u00102R3\u001b]\u001a:KK8Q 90`\u0004\u000042O\u0004PI\u001dj9~:{e\u000041#I\u0000\u0000x\u0011\u000f]Lr\"\u0007hW{0,\u0014Cff6n[QI\u0001\u0019\"{h\u0002&z\u0004}\fOk\\+\u0005\u0006|L\u001b^\u000f``/K\u001a\u0013Iz\u00154/:o%g\u0004!>cO \u0019a#1l(H.i{-M\u0013\u0006iiqZ\u0007m}bDn(3)0;&[T,00N|<\u0007j\tL`-\u000570h6?,|)\tmO\u0006\u0018p1ZOs\nI$\u0015C\f~\u0016\u000f2\u001b\u0001V\n\u001e\u000f..\u0005G\b\u0016sf1}dt GKRNN\u0015t$vmMV=$A.3\u0005\u000e)\u00000\u0012`81\u000eg\u0001\u0007BuxrqsbC\u0000$:\u0000\u0018<}\u0018\u0017b-l\u0017Y\n\u0015\u0015^itn\u0018{pavUZ^5\u001d(\u0011\u0001LakK]%6/JASo\u0014r\u0011:,WK?Z&HC|W\n\u000f^\"\u00044\u0002DYR\bB=E\u0017\u000eu|:O5{<@\u00192?U\u0014]i\u001eG\u0010\u007f0P\u0004xv\fq\f:4Mnf3cx(#Ku.^97?8^<~/ \u0005'hj\u0011\u000ft?#%p\u0003LS-Vr5x*,^;\u0013qp\u001f4\u0016\u000ei\u001ea:2^%af\u0015.dGa\u001eJ[C\f\u000bD{\u0005SuaD.\u001b*\r&j\u0014\u0005\u0012;:^\u0016Z~t\u0013gWn\u001b\u001d\u0019<\u001c{R\u000eMxrC&\u001dQ\u000bx\u001f/'\n^\u0016xZL\u0003\bsG-Vs\u0005\\MOD\b\u0015\u0001(],1srH-WV_\u00026k>zq+vM\u0007A\u0012Db2UN\u001ch\u0012\u001c}iLOh%*W6y\u00139\f_H%43C |b\u0000z\u001e}IPaqy0zqvxM\f-ok\u000fE$E[}\u0000\u0004R?Zj~1L&\u0003P'Q0\u000fi#\u0014\u001d/\u0011\u0000\u0012\"s\u0001\u00169\u0001pW\u001d'b_+\ff\u0013Vk&=\n]J}XnF\u0012yFngL%.L>*\u0011&\b\u0011@\u000fsj\u0006}\\\u0014yrE\u00196p\nd\u001b\u0000d\"TD\u0017\u0018\b*jP0CO\u0016\u000eJ\u0018i:Yw?K)vkWKehM\u0017h\u0016T:$\u0010[+A @r\u0018/\tJ\u0014Q\\\u001dj\u001e\u0001\u0003\b/\u0004V(BHwzAVV=\u000b\u0003_h\u0002\u0015{\u0018=\u007ffveTXj\\vs\\d8\u0015OG@*4os\u001a$\u0013\u0015woN#\u0002m|!\u001fZ\u0016,5|j.`_jp+Zo\u001e]\u001c\\Mc\u0001WpW\u0019#a_56k?r)+\u0015)eGJ-qOJw\u0018\u0013|\u0000\u0003T\\\u0006\u0010Vta\u0010~8>UZ\u00132\u001d R\u0015`1a\u000bYDC\u00134x~Twm?\"L\u000b\u0012H\u007fG@\u0005\u0004\u001a|z\u00136B\u0011(K{2Q1(#Fa\u0001;ZowSX6EdXA'\r\nE5\u0003V4\u0018\u000f:\u000e#7=L|[To.\\\u00033V*w;\u0012T\u0007d\u0006Fos\u001b\u001f{QQ3XX#1Q\u0015/1AsOX!\nA5YW8 \u0017t\u007f\u001f\u000bJyr5-+\u0000\u0000f\u00147\u0012+\t5Z\u0015RsD\t\u0014\nc8>7X2\rP)\u001fW*%~9\u001a%V=\u0010\u0012A;3k!\\\u001e\u007f:\nK\u0005b1`\u0014\u0007c^x!`2#}c\u0017$T\u0011\u0001<(\u0001cV\u0016\u0014\u0012{s\u0011\u0005;*JXV<`2c%c\u000bge\u0016?Kb7\rjn\u0006\u0012'Q\"\u001a\u001de\fm]\u001e$+\u001eZ2YG1`\u0005M\u0015s\u0007}fme\u0017r,a\u000e)7\u0016\u001e~\u0019\u00102k\u0018\u007f[\u0015$\u0015h{\b]s_6gTEk4A\u001c]<~p\u000eC&7+\u0019\u0005'\u001dS\fwg\u0001\roXP?S\"\u00073[]\u0015\u0011\u0013#_Q0\u001eJ#\u000b>K\"a\u000e\b\u0003J55|\\K.i=\n~w3|w}S;xyH\u0015\u000f,.X'K\u0013r+o\u0011*y\u0013[{d6I\u0007\u0000)^\u0012\u0013r\u001fN0Q-0zl,s72\t;t\u007f\u0018S%\u0015f!E\u0000\n)[I}fsi3)Y}\u0005Tn=49Yr8m=Y;Mh\u0018`#p`V\u0000\fl\u0014y'T]sU\\ir\u0012b#\u001dfu\u001fb\u000b%\u0003\n(\u001eR>W/Y\u001a*\u0018+\u001f\u001bR\u0006jNlhdjy$A uaX\u0005~\u0004Gn\r3\u0001\f~N<^;YU\u0019VKanS0\f:F\u0002/\b)\u0013Q\u0007G&Yu[\u0017bg8n~u\u0010\u0002qRs [Ef}C6QX ()W]UV\u007f\u000bfOy;6!M*X\";IkS+V\t4L:t{0~5\u001fsm#\u007f\b+B/\u000e2\u0011t[3u;(zZD\u0003\u0006\u000enc)~'$\u0003laG0M]E*\u0018\u001c\u0010\u00130=$2qL,'8\u001bR\u001dfvP\u0001MJeR\u000502\u0019-dl#9{\u0004s\u000b6%\u0003\u00160\u001eO}%o>\u0014{'\ni`c>>AZ\u0017Mk\u0006\u0003\bfNQ8@?vd \tx\u0018\u0017zf\u001cS1q#\u0015\u000fb*5bjJ%+uy`mP\u0019V%=0\u00037u\u0010z\u001bH\u007f'{5R??d1Vw-\u0016oRVLQ\rT&\u001b\tVMcJDY|d;H\u001eZ|m9f`*^\r\fE\u001e\u0004FX\u0019:j\u0010\u001c#fF%Fx\u001aNv3V\u000e\u001bD\"~#fA\u001aW\u0005x?\u00017&\u007f[=!L^\u0013w\u000b%YVD3+*~0Hw}t W@T \u0004wD[\u0010,1Zk>\u0000/8S\r$\t?\u0001%kE\"p#jX\f2 \u001cG'\u0004$L\u001d\u00110X\u001f\u0016&\u0015\u000f+K\u0000ePHg~m8X\u00116a%\u0005pKJn\u001c.\u00018\f\u007fx2\u0019o2.4.Qvd\u0012:1\u001fT9Sa_\u0016]Sq\f\u007f]\nH{\f`]\u000f\u0013i\b4Of|\u0015c\u0006 5kagq%\u0018-h]DB'z?F?W4Is/?yzmZ\u0012\u000b?)#\u0018o\u0015\u001dqrrq:F'\u0015\u000f \u0016\b\",j\u001c/xE\u0016\u000eEnWKT4+Z\u0011[qo\u0014+\u0017K!\u001e!Z<\u00156,E')Zy`0P2\u0014\t\u0012TkN\u001b\u0010Z\u001454t_5JRjbt\u0014W\u0018+ gkK5.,rn2\u0003\fIK6R\u0010G 6H-%\u00042+9J\u0019\u0007IG\t\u0004IY\u0006q\u0015*\\1m*\u000e\u001d;\u0011_57rd6%z94P\u001e4'\n/\u00038o\"=Ys\u0003\u001cs\u000bl~;\bIhF`:mOOHF?aV5\u000e+;jK\u001a}1\u00027}`;X\u000eRz5D&\u001f\u0012\u0004_?a}\u0004l\b-F\u000f\r:o\u0019_>08T*[inMq>JO|#\r9f\b_>\u000e8~\u001b&j\u000f\u0000a\u000f2#\u0012\u0006>+\u00049NiY\u0003W\u001cFJ\u0016\"^h\u001bC'+\u0003:EDnv)J\u0018\u0001\u0015$o\t`)x3\u007f>\u0017?'#V3e%]v\u0011\u007f&p\u000fb\u0015.~P?=\u0000>\r\u0014kZ\u0019w\\kyKuU>$ur;q47.\u0001=\u000bD\u0012\u0018\u0003Ym;_JYkDCm\tI;\u0017RT)Ss\u0012m\b\u0001\u001fYz\u001d0@r\u000b15\u000fwR\nI\u000bu'I{p\u00105]b0Xh'X4(F\u0005O9K\u0012_t2VRg?9ba!9'>.2r!\u0013\b\u0011e\u0019'NbeW\u000745;\u0000}P^z1@qmKR+\nX;%\u0011\u000e2]\u001d\f\u0016nBc ;\u00055\\(X\u001fs\u001fj\u0003P\u001dl!`M\u00040l\u001agu\u0018vCDv*\u000f9z_P=\u0016Y6N,G).TWS/e(g\r?+7qY-s#~PJnt(\u001f(1\r{\u0014DV/Q'\u0017]?`q0\u007fH:\u001f\u007fV;/\\u;$\u0003\t[>t]s#%2\u001eS\u001e)Yt)o\"\"sSM!\u007ff\u0018]\b I\u001dk\u0016\u0014T>.$LApcGz\u000fzv\u00144 -k\u0001W7e7\u001d\u0017)M(\u0005~\u0017f\u0019>&\f&<.\u001dE>\u000b}J\roT}\u0002-.\"AFmyp{\\]~l\u001fk8G2/]4\fyv&f.PJ\u001eX$6@\u000f\u0007\fF\u0005x\u0015\u00194] N+\u0016\u0018\u0011\u0012BTi\u0018vd`|M\u0012\u0013q3\u001bI(\u0006S\u0005s.!IC\u00042\t?}-cm}eHNiV\\c\u0001R\u001b?2\r\"7[Y-l\u000e\b\u0007\u000b9uRIV\u001e\u0003\u0018w+\u000b&WpXD\u0006\u0014U}\"(BV\u0004*#N\u001bFR\u0014Vz\u0004$\u0004!cErMc\u0003Zk6T\u0007(\u00116f\bM\u001157\b\u0014W{sEcQ?4|\u0017KAxtI\\\u001b4/;\u000f,h\u0004E%\n\u0015\fF\u0017^\u001c\t<.0?g\n$=NDN#nSz9\u001f:\u0001)S}\u0007sM\u001c(b^x\u001ddu\u0000\t6\u0000~j 1R*#!\u001bKmu%D@g@7(\u0002~^\nGF\u0004A5S})~uD\n\\,]7g;Y\bp\f\\rEn[5\rC=G\u000eh-\n\u0007#<\u0007Rrf\u0000v6\u001fx\u0004\u000b\u001eOMkQa\u001b\t\u0001!\u001as\u0017]\f!\u0015y\u0012KuC\u001a>os2h\u000fO}F\u0000\tG\u001b\u001cJO\u0006q\u000e7Dx*^\u001eW\\\u0014G$E\"s-!\u0010UT\u001eyGx\tjNb|l(/\f\u0011l\t\u0005%@.%[\u000b\t]C\u000b\u0006.3\u0011B{\u0010cS\u001f\u0003\u00007(,:l<%a\u0016c\u0014{|u\u0010\"\u007fbx>Ic\u001bXb1o\u001b\u0013$\u0016\u0005}Rdn\u0007d\u0007U\u001ebdfkEt\u001d::`-:\u0017b\u0005\u0001\u007f\u001b\nA3`mmv\u00111;G#\u001cgA\u0015Z\u0005S\u001aG4Z\u00123\u0016P\u0003Tc\u0001fM#^b`o/B!\u0003$-\u0016\u0000!Gb\u001fb\u0018&iC\u0006L\u0002$NK\u001b7|\\.\u001boG)F\u000b\u001f\f>q&\u0011E.x\"0edf}j I|?>\u0010g\u001bRfS3\u0007ZM$\b?\n\u0011]c\u001dAl\u0019\u007fQ*A-^\u0006\u0012sF4(ddM-x=ps8K!\u0011-\u0017Hwpx#E7\u001e*i[MY1}Z\nGui\"l#\u0014\u00105\u0007\u001cZ\u0002'\\7GA/1*\"p{e\t9E\u0013\u0012vF.\u0016^:\\'Gn'=GN-N\u0002ZRG*ny\u0000\u0019l\u000f<3x\u001ee\u001e\u0001P@C1{QrU\u0017w\u0003dv0\u0017O\\\u0001&Qs`<.\u0000)-\b7=\u0012\u0018\u0006d\u0005\u0006N\n<35cr\u0015{\u0010\"\u0018k\u007f\t1LZM$\u0018-\u0017%2\u0006wDw)s\u0011=L7\bz\u000f+vuD\b\u000e!FA@j9[3\u001ap(TY\u0003\u0013L=\u0013\u0000=Hs\u0013Pu]'\fHB$rWN\u001a\"(-\u0012J\u001cO*=)FS\bvo@\u0011Yk-\u001f\ni,jS$\u0006\u000b!\u0011W}\u0000)Ct|\u001a,\bQ/\u001dZD\u00178$0Q`u\u001b%w9R\u001dIs\u001a\u001cAOg-}usVu[\u0012\u000f)\u0011\u0000\u001c!!\\U%QzE2Rl;WlZ|h\u001d4!\n2\u0019}M!P\u000f<\t)~)b&\u000f,\u001eGx$\u0002\nw]\u0003;4t!L^\u001a\u0010\u001ei?\u001dS%>rb\f\u0002b9Yz\u0013!`\u001bPj53\"xj\u0013\nkV\u0002Y\u001fT4\r\u001f5\u0012|CN\u000b$\t\u000bz\u001f\u0002sy\u0016\u0002U/4\u0000\u0000%\u0003%\\qu0\u0003D0#\u001b\u007fc6\u0017#\u000f_\u0006?J%,\u0004v\\![c\u000ftEN\u0014<\r3\u0018aK\n\u0017n`S\f\u0010t~zHWYb>vB\u0013M@*z\u0003*=\u001f\u0012WH\t\nM96)\u001e`\\\u0003c\u0017Ug\u0001\u0016jy_\u001a\u001aon\u001d\n\u0003H|O AI\u001f\u001a\r0F\b\u00120\u000fQEvHy\u0006cN\u0014\f\u001d-\u0006uw39U_.\u0011aJOl6l}'4\u0014\u0015z\u001d\b\u00160xa[\u0018K9 \u001cd3@\u0003>\u0001K(J{\u007fs]W#\\g\u0005JRc.#~M\u00195A}\u000e0B\u0004|E~j\r\u007f\u0003]cS\u0006\u0016\u0018eI\u0015cg6B\u000eloD\"Dqc'\u001c\\\u0012\\\u0001my.QC+a;+j\u000b\"\u001f\u0015ZD\u0013\u007fp%\f\u0005\u001c\u0003\u001bB>c\u0004-s\u000b<\u000b,T\u0013\u000b_3!\u001e[\u0012W.~\u0014gM\r(\u0001G\"M_S\u001bY od1-c\u000b\u0000\u0018)ef?\r\u000bi\u001dc&/8U9\u0003N\b\u0007ov|Bo\u0002o^\u0019\u001aW=!\u0014U\u0012-\u0007\u00111\u0001'yu:\u0012,\b7\u001flRW\u0018d\u0000\u0006Evz\u000eI)s)_T+Vr2v[C1]\u0011>L;Ta,?yi\u0017\u0005ud{q\u001e\u0014|\u0018\u0003I\u000fogK@lUY\u000f\u0017En9c\u0001\u0005\u001aZA>-0\u0002*'v\u0011*NQY\"2_9e|1+79|\u0014*I\u0017;r3IYKac\u00025E\u0011-b\u00015Y\\d2=eV\u0007m<\u0011jv\u0004ND{Gz2\u001fvg\u000b\u0019i-\u0010E!\u0015T\u0016M!w3^\u000e9\u0006d55Z\n\u0013`N\n-@\u0013@{BM:5DNB*I{\u0014!GWxeE(x\u001at0rsI,gd\u00043E[\u000e\\7\u001f3\u0003$A\u007f:Z\"~}@\u0016\u0002qv*\u0003cy]\u0012nR`@\f4QnOzX?g\u001bNp(\u0003\u001f-YNUPK[k%G\u0010m^F7\u0004aP\u0014.0`OsRr\u000et4<\"\u000b>9KPXr`\u0005\u0010T\\\u0007\u001b\u0015K)\u0019V1%u\u007f!4c\u00189\u000b\u0019\u0014dG\u0001A\u0006\u0002Cj\u0004I4;M\u001aQ'P*\u000b./2\u000fQ\u0006\u001b\u0004)xK\u001azh\u0015\ry%\n\u001aFln\t\u0003/f7Bv\u0015#o\tFI3d\u0000Tufv>9+|%r9[\u0000h\u0011\\~OZ\u001c\nE*0=.T^}\u000fG^HZ9Qg]i6-&\t)\u0019XQpg=v\u0007AE\u000bCF{\u001a\u00037\u001c|[\u0005iA{m#J3iI=-\u000e\u000b}]!I\u001cY]l\u001cN\u0001b#\u0017\u0000\u0010O\u001eV\u00109k?\u000ba/Sk\\\tsrS\nU\r7\f]\u0012\u0012\u0002\u0007v7K=z3$\\A\u0005\tr]\bZ|4\u0002\u0004\u001b\t-\u001e{[Qm\u000b\f\u007foD*\n\u001ev`xU\rxW\npY4q\u0006\u001bc#PC=\u0018\u0014B$@^\u0007\f(U\u001a\r:\"e3yVHpw_G\u0017\u0007\tk\u001f;Q\f/zaE\u000eGF_\u0019}-\u00179\u00048f'B\n>\n(<\u001bA\u0010.{\u0016V.l\u001e v\u000f\tn>:o\u000fo\u000f,\b\u0013M4{\bq\bG6v\u00115d5 4{\u001d5uzr6\u001fstC\u001bTa\u0017!_B8\r'\f6>(>r\u000b\u0017\u0006iy\\\\E0XT+\u0018`\u0012=K\u001aip$\u0006U9swBT\u001bcDl*\u0010\u001dt*o\u001c\t|\u0001>*\u0002\u0011}\u0004G8\u0004\u0017a/=\u0001=;\u001cQr\u0007nB\u0016\u001eB\u0018z\u0000G\\(\u0004iN\u0007~\u0013g[\u001bCP\u001e\u0019/k/#N\u0004h\u000b\u0019)\u000e,|mD*k'i\u0012]d\u0001J3)\u0013g\u0014<2>^\t0[\u001e_ZN?$\u0007\u0005^3\u0005\u0011\u0010p4\tI\u00056T}=-@K\rF3gavZE\bi+ch8M/{#\u0004k\u007f\u0015]`L\u0007<\u001e&N7DyjP+tXu\\i\n\u0019`(D\r?\u0016\u001bb[\u0001A\u0017`\u001flI\u001e\u001aN%\u0014@\u0005;\u0005,xt,\u0014lxy.smj');\u0004\u00149\u0012\u0006Q\u0016qh\u001c\u000fHdw?\tcS4iQk\n9`[T;\"uz\u007f\u0018-O6\u001eUX\u0001uX\u001c5*\u0011\u001f$\u0011O.W0PZ+\u0011B\u001bM\u001aCrv\u001c`][l\u0017H\u0012\f3=\u0012O\u0013\nWj\u0004,nV\u000f=[`+\u000ep^l!rk\u0016&\u001d#zY\u007f\u000bTugF\u0017\u0015hl)~H\u001a'[4WK\u0019\u0010\u0005u\u0001\u001d\u0019\u0000a;\u0016\u0004<0UtW\u0002=z\rs|\u0013\u0002[\u0019$G\u0013U+Z\u0019YL\u0011h\u000env?>7g%h\"ZX^-1WT8k*J vGzF}aoe^$Y1?Vr*-\u0011juw\u0003\u0011 Y}\u000eN4L8\u0019oM\u001dV0&\u0016v`aa&\t`\u00039O/#eFnI\"z4\u0001NR/@]|63\"\u0014M\u0000\u0019bB\u001aR\u000fQ\u001fd\u0017\u0019\u0012\u001c\u00133KT\u0012ym#l)\u001fk-\f\u0012\n \u0016~=&_?6\"C`W|mvHzx\u0014Jm\u001c>\u0005iP\u0013op\u0004@\n]!\u0019.\u0011qb\u0015\u00004Xj.,w.\u0003xx\u0005wE\u0007\u000fR(N\u00071oq}\r\u0015@\u0018`3b4\u0011bz\u0002\u0005gFhDZ0U\u0016T\u001c\u0004=\u0007$D\n\u0017W\u0012X0uhR\u000b\u000f>\u0002=l? \u001aX0;y\u0000K=\u0004\u00110x#FPS0d(\u001aBK4D(6W\u0013`4GMeu\u0000Q\u0013Ii\u001d4mx[\u0003M8\u0011tcO\u0006U},{ \u0017{=a\u0016Z}I\b\u001c1xG=\u00003a7\u0013w\u0004\u0005)I;H\u000e43(7t\u000b=\u0000v9\tV\u000f\t\u0013\b\u000e\u001dI=4^qPCB~cGy\u0003\u0017\u007f)=,H\r\n7OihQ\nc\u000f$q\u0015\f\"AB\u0015H\u000fS\u0010?}\u001cEt~y~fkOA]\u001d4L\u0014R@d.r5)T\u0017\u0007\u0011\u001bb^*\u0017Va*^<@^0\boD`gd+4\u001c\u0005\t\u0011\u0004\u0000^!L\u0001`HTZ84nA,[\u0002A\u0004\u0012\b\u001a\u0002#U`<\u000f#**Bz\u0003\u0004%\u001f=a^2(4;\u000f\u0013gTFV7L\u0003zy\rT)DWq\u0014k.\r\u0014BqAZ\riDu\u0000\u0000\nL\u001a\u0015#\u0002bZ:ed{>he0&G\u0016,\u0001LE=Z4C1v\u000fD(|yWuF\f\u001b%\u0010w\u001a(7O(\u0013M\u0014\u0000B&\u0019h\u0002+J\u0013LG3\u0003Q<\u0005\tl\u0015(U~\u001cUq$p{\u0017%%\u000fdv8i\bgF\r\u001d\u0012A1xQyN/~\u0004/i\u0019u!;\u0000B=.t\u0012\u0010\u001cUJ']Z>\u0017;\u0018TK$+1im\u0000E\u000ea(e\u0015\\G\u0011YPx\u000b\u00176>Cym\u0012@x\u0003Hcd94#\u0017\u000426[A9M\u0004?^e^hk&Q\u0003xx02i\u0003\u001b7\u0014\u0012Sj\u0005}._ub@-xx(l\\\u001a\u0013o#~\u0000\u001a\u001cYhW|U?\u001a\u0017\u001f2M~hff&sn\u0004\u0003o^&omc\u0002o~>|=\u0011^O\u007f~4!\u000bB?\u000b&zs9v\u001fh]y;x|PK@2\"\u000e\u0002-@\u000ed8e\u0001;\u001e\u0004B^z\u001bg#^\u0007\u0002#{R[\tT\u0006\r\u00146I\n\u001d\u0002\u0015-*n*\t9nX\u001a@X\u0012E\"jn#K\u0016[Dr\u0017V>#sn0CE\u0002K_o\u0019RLa\u0004a:kS\u000544\bN'vsC\t:&JMqg4+]}qI\u000bk}dE\f\u0006{Y^\u001a/nUH\u0000\u001dk\u00138^1\u001b.HGj\u0007Ni~FL?]HqZ\u001bv\u000bCR[\tYpWZzsK82=\u000eVO\u0012X&2z# o\u0010QG8e\u0015zf#\u000b\u0006|.N\u0000l\u0010!o\u0007\u0017dyay\u0006RS<@%1(L)\u00066v\u0012e\u001d2Q\u0014Se\u0002 F?\u0006q]CXGUw\u0019GJf\u000b1\u001a(4;#jX\u0014#\u0014RRF[F^\u001f\u0005/\"_\u0015\u000bh\u001d@!)>?\u0005~tm=yM![\u0001\\\u0005@1V\u0011- .;\u0014<\u0018\u0015UVPt\"_\"H\"\u0001|7I\u00078\u001b\u0014%\u0013oTCf`pZ9\f9.f@3\u0003w\u000brwwGIiV\u0014jS\u0002\u001dNgUV\u000f\u0010@u\u001c\u0010p*<;\n@d@ \u000bYTVn\u000f6;whCY\b\u00146DK|\n}C\t3NnP&\u001eW\u007fHp\u0003\u000btlk7\u0012\u001a8,\n=i\u0012[\u0015\u000691=o \\3\u0017i\\Xh\b\u00044H%di[\u0013\u007f\u0013`y~~r\u001af/\u000eC0\r9\u001b(C\u0007E-[&T\u0001:\u0019i@RX7\u001e\"-_\u0019>fX[$M%&u@zQ7DP\u0011\u0003!aJ\u00193\u0004!]*|\u0013z\u001b?\u001b2s8\u0002Rj++c`n4q:\u00074'6}\u00025gp^\bZh\u000f:>z`\u0012\u0005~y\\\u001c\u0001\bC%\u007f\u0000\u0013Ym\\wu\u0010)TPN\b;O\f\u0004C>\u0019>5|,tv\u0002'W$\u001cH\u0019\u001bWgV\n{`m>V^\u0015d\u0014SdVl^jy\u0014&q\u0010A8N,t\u0013wF\u0019v\u001987f0Pl\u001f5K0=\\\u0018\u0004\"zO\tIp\bwP{\u0012\u0011d`-*x*\u0005zsNRkKwVXQ?Dah$w \u0004\u0019xZ=\u001e\u007f \u000bP\tO\u000b5BO\u0004\"\u0011ux\u001fa*H\u001e\u0013b\u0002NE(R7\\\u000e0t!3DBR\f\nE\u0006@n=xT\u001fyfj{lFvg@\u0003mX6\u000em(am6I{^\\\rSo:\u000fx3 =B\u001crC\u007f%\u001cCQ)n\u0012r\u0002&L:&{\u0013\"x\u001ax\u007f\u000bx\u0019V%1DXL9Fr3W\u0012Z-9\u007f:OZ\u0013\u000bD\bg\u00153 '\u001bcy\u0010\u001eY\u0019r'wFM&g[J}\u0002\u0014Hw\t\u00053QAW\u0012#;W3\u001b~B\f\u000b{\u001b\u0007\u0011N;\u0001gWk\u0003\u0007S\u001f\u007fmW0=l[\u001eT\u0019\nUurpNeee79)m\u0006\bs\u001dI4]<:\u001b8l?\u0005w\u001b\u001ei.\t\u0004CyZvxI{\u000b\u000bVrkoc\"0\u0016Z2!\ne6\u001d\fJK9\rm\b\u000f!Wd{<70G3\u001c(\u0016h#,\"\u0013\u0012+\u007f\fR~\f{\u0011d\nA$i\u00107\u0019D16Mk9\u0002\u0018[={\fZ{'\u001a2\u001a\u000eS\"t2*1s\u001ce[Q\u001b9J]3`\u000fW\u0006\"Uxbg>j\u000f!\fcb\r\u0002m\u007flm1-W_+q\u0014&j\u001b[\u0000}QM348\u001b\u0010L{?Hkf3rM\u0017\u0017}\u0013\u00146!:{\u0016#w*n\u0014O\u0018\"\u001e\u0001Z0\u0018\u0018G*h}~/?\u0012[\u001a\u0013lj\\\u001a\u0014|3)S8Z\u0019\u007f\u0016K\u0015\u001e\u0016eZ\u00011\u001a\u0016J#X\u001a!\u0007\u000b|]\u0011GWu\r\fh?h\u001agH\u001b\u0013diP\u001e)&9v4N\u0002eZL[/\u001dTGbcDE<-k}D\u001c:\u0004i12HOA\u0004Wc[p\u0010>k+*d\bC+\u0000i\u0015E\u001f^\u0019\u0004{\u0018>k0\"Ct\u0014G\b\u0001AI&m\u001fr\u007fZJ%\u007f\rFU_z\f?\u000f\u0003Gcs[\u001cJ\u001bea\u0012<\u0004\u0004(&\u0006|tm\"k]Y\u001bv_\"Z\u0001\u0017M!pmTL\f JfWXKP.}dmM]WgbA6#7+S\u0005I?R$c8XMh\tXo\u0002=qT5+\u0014H\"\u007f<\u0014!@\u0012\u00119KO4\u001e+^CXe/\u0001+ ~\t'$e:\u001an\n{~I\u0000\u007f0\u0003i\u0010\"L^\u007ft(r|].TCL\rJ)O0CM/\u000f\u000fV\u0011(\f#\u00001p.Gt}E:\f|7\u0002M%\u0015uiet)L X`|\\\u0012hw+)!\u0001{m.\t\u001a`\u001f\u0015MwT-\fu\u000bD6)$+_]v\u0002|/\b8`\u0010KAWz\u0019\u0015Cw=?Q+vZq<\u0000;x\u0001\u001eT\byb0[H\u0010 \u0011%J\t8B\u007f.S$}9\u0010__'\u000f\u0005\u0004mK > R\ruq\bR07\u0010doZmm4\nv[^K\u0001%LU'\u0005nt}\u0019\u0003\u0011\u0013$S\u007f;3\u001d\u000eC\u0017\r_:\u001f\u00122\u0004s\n[j\u00176DSvTiME2?@Rd\u0007F4!_0S]U\u0004NWx\u00192)3\u000f(F\u000bcHN;%\u001804\u0012-zIU=h4G]\u000e\u00031{R:H9\u0013|\u0014a\u000f[NFEm\u001a+2VCrtmw'@b\u00101Yq-yDj@1\u0013\u0001#%\\la{yAg{\u0015c>2\u0018,m\u0001V\n\u0017+\b \u0015\u0002v`\u00148\u0018nUY\u0010m\u007fL\u0011\u00130>-`KM2EehN|E|.|=P\u00135\u0017;)c!\"gsDU\u0006uTbo#9#\u0010qfAI(uW>\u007f\u0013\u0002-yE[\u0015>!\u00151^E/W\\Qd!\\\u00061\u000e\u00144WG)2|9\u0013Uq|L\u001e \u007f%G_D\u0005I\u0004\u001cxfU\b\\{ jrt\u0001fV\u0005\u001e\u0019BFzp5nk}&R\u0010r[Ixs\u0011\u001aP\r\u0000Ltja+a2(HsGjL\"Gz{s\u001a*%\u0000zH^mp~\u000f3*f3\u007f\u001d\u0014aq.\u001dZ+\u0002%V\u0005o\u0010~\u0013AdN\nao\"n[pLc?\u007f\u0005f\u0006Yn\u0000\u0011\u0017PS_\u0017+-udn^`^c\u0015uE?q9\u0014h\u0011F\u0005\njY\u0000e\r\u0012\u0015\u001fp\u001e\b\u007f/W\u001d\u001e\"_qr\u00107am9\rlJ\u0012)j[V5[\nc>QK\rJ0SoI]\u001f\u0019Hbn\u0005b&B\u001dK-\u001cX\u001bGA\u0007s\u0013<\u0001]O\u0016op\u0012/Vm\u001aWBW\u007feY}\u0013J@|W9)(\u0001/\u0019\f\u0002\u007fLa/\u0012\u0006|3\u0002\t?\u000bWb>:\u00103>j\u001a@\u0010o \u007f.`Tw!Wmw4T\fgZ\u000f*\bPSm b\u0011\u0003#\u0016#2up@~\u0017}p\u0011m\u0013N86\tlU\u0013O\u0006\\\u0014PG%QI\u0014H|5\u0004J\u001eA,aT\u0019\u00180}\u0019 X;Pw+\u00134\u000e\u001eV{p\u0018\u000fl\u000b\u0013D~l\u000fO\u0005\u001a#\u001c\u007f\u0005*-{\n!X\fI\nCXOQ\nCI\rI\u001f\u000b;^8\u001bmfiB\u0016=aXg\u0006-[1kkw#\u0016RR+# 2[5D*\tq\u001fV\u0007I,%M9\u0006\u001c[&\u0016'M\u0007}:\u000f#\u001f\u0019~\u007faN\u0017psS!\"-2\r\u001b94&\u001f\f\u0014o\\!%bH-\u0017\u001584R7\u001cP@@\b\u007f:3\u0003Y\u001c\u0010\u0014\rvt(.\u001bJtCNJ@!Y\u0013>qV4m\u0006@/HDKX\u0012\t{\u0001n-J.=\u000eo\u001d)%\u001bQ[3\ra>\u001b{=&b\u001a)YsC\\-#sN| y\u0010\fm7IQ\"X$\u0016\u0006^\fW\t`\u0015oPVxi~o*~M\u001bDa\u0012\u001dE$Lr9\u0002rjQ2g\u001f\u001a{\u001eV\u001dd2x9\f-b&p>m\r>C0l\u0013XW\u000f\u000ea\u0018;\tFk;\u001eI(\u0001[ao49V\u0017_\u007ff\u0012\u0012Oh!O\u0011x 34\u000bl\u0007yeU_\u001bQ\u0018\u000bm\u0007I1d!+%2Nb\u0005c8%DOa~\u001e<\t\b0a%.\u0003\u0002Q9'/zN\u001dw8cO=6Lv?\u000e\u000eg4mNZa\u0002\u0019rIGxN\u0001L\u0014=cV}R\u007f0\u0005)O4V\u0002R\f&\u0006M6^$wk\u001b8\u0012TR0\u0017jKdVP\u0004_YHKnp99q:\u001e%\u000bx+q\u0019lxI|&\u000bR&o\\=\u0016_\u007f&&UO\u0005m'm\u0002FPe\u0010A\u0014g2=Sm\u0013Ch00<\n\u00107c*tsC\\$\u000e\u0017\u0019XYF8,\u000f8'\u000eq\u001e?WI/\u0006u\u000bwi\u0000(Qq\bbwz(uz3^ =z\u0012SI?o\u0012[G\u0005-\\[\u0003\u0003O#27de=<\u001fy\u000er\u0002]E\u0015Ow\u001d``6\u0001\u001fXT\u001c\t\u0001\u0004,Z>8/(\u0015\u0015\u0013\"S $D\u0010x\f&\u0011\u001ag)OgI=\u0012Ymw\"06US\u0017\u000ff\u000b`fC#'\u001e\u0014P8\u0015f\u00069hEKM\f\\tN\u001c^1^X.LAX\u001c[\u0019\u0000F\f^U@X|\u00147QB\u001d\u0006S\u001b(2\u000eMG4x\u0018\u0010\u007f\"U2&OE;L\u0003;\u0013#SC=+\u0017\u0014z\u000b;\u001eIhNBC _#5\u0007\u0002iz\u00063Y\u007f@\u0010p\u001c>n2t71:fX.R4\u0000\"s;,F2E\u0002{\rGgB\u00002I\u0004m&iPJ\u001bw9 KP@r?!)\u0011)R\u0018.`[#\u001ek\fg\u0005I}\u000f\u000b]]FQZ\u0014&GX\u0013_:\u0001iZB\u000f,5_NMD}\u0017\n\b=XD1\f\u001arpA'\u000ebi#i\u0005\u0006]\u000bc\u0006\u0013\u001e3*h\u0012\u0007\u001e\u0010O]\u0014Q;\u000fH8l\u0006r\u000f\u0019<-^~gM/;dK/p'$\u0002Q\b\nY^Q\u001f W\u0000'\u000ef*\u001a=g?Qb?\u0000\u001f!5e&\b\u00075-6v,nVc\u001aYC\u0000y6\u001cV}\b sPNE1YUk5|>\n#,Rv\u000f\u001bU\u0015V.\u007f\t1eg8\u000bi2k;p[R\u0013u\u007f\r.\\:a`)izk%\u000b\u0018|=%m\u0015P1$)1\u001f\"d\u001f\u001397\u000bZe&\u0012DV\t\u0004#?\u001bgUm8bMU6\u000e)f@CO\u001e~\u0002+q\u0004lp\u001d]3\u0011krK\u00043M7A@\u000b9X\u001b\fRj\u0006\u000e\b):,\u00038\u0002`33:T\u001c@#\u0006\u0001K\u001e'\u001bS\u001e/\u0002/\\u\u0000\u0012,\u0011x~8;\u0001\u001bJ?a;u1&+(\u001f4\n_\u000fIuk\u0011*5[Gj}+g\u0019V>\"\u001d%yK\u0012\u0011\u0014>\u0012yfr74bg\u0016KGIlk/C3O7hKKG]=+[eeUE\u0005\u0001\rla[3\ry\u0017\u0017\r5`'+\u001e]2Kx\u001am#\u0007_EAZo\u0016\u00050T\u000e\u0011\u001aq'\u0002Z%\u001b.\tYY?\n?mp=fT\u001d,\u0015P6<\u001d8\u0006>F\u0003!7\u0003c2&2:c&\u0015j?\u00070\\5!0=+\u000ein\u000bCfy\u0003>3pOK*Dr\f%j\u0004<\u0001\u001dfv$'\u0005S\u000b\u001c>r8\u0001T|\u0012}\u0004\u0016\u0018z!J$5~8\u0019Xf\u0015\u0013+XC\u0010\u0006\u000f\u001b\u0015(k\u0001d<-\u0017\u0011\u0003OLhpm\u001c\n\u000b?\u000bnx\u0015K`\u0001n\fn\u0004E\u0019Z\u000enj1\u0007\b\u0014\u0019G\u001cX\u0014\u0016\u0016I*\u0005kf8Jq-\u0004/}\u001fq\u001f#2?'h\u0000B7\u0013,/mI'M\u001bh\"7\"w9K?\u0013@$ai\u0013\u0000|\f1W{&5-}1a\u0002\u000f8+ekO'\u0018Hrj&-hCpmN\u000e5I7>\u000fwAn !T:Je\r\u001aSj\u001cw\u0004|\u0004ev\u0010K&:uZ\u0005\u001bo9P)]%JlCup\u0011\"\u0013;\t7&im\u0018\u001dEt2f\u0007\u0006\u001cH\b\u000e[O\u0018\u001bCoP\u0015=\u0015j\b c\u001c2-\"RAV\\'\b\bHm\b\f92{&\u0005Q s\f&\u0010\u001a0\u0011\t}\u0019@uEU\"v\u001a\u001aQ=_\u00191d\u001fgD\u001eb^Kjkp./S}\u001e|?5\u0005[Q?krd5\u0013\u001c[\u000b\u0003NXFzeyA`tuPw\u0006c\u00133;Yr\u0000_0}\fy\u0004H>%]%&\u0015HZrd\u0001\u0017\n\u0007f:??Xw$z496\u000ev^\u000fgAb@|G\f\u0019o>aE\u0015K%z:g\u0016kj=x)pVj\u0001^$H8F5n\u001aL\u001eaO\u0015\u001blrJ]}\u000frX:~C\u0017\",8\r,\u001e\\l+oj|0lGj=}j+Bu\u0013Hc,)BC5e7.\u001b 3\u001eYE]F&x.[\u000f\u0014s %)&\u0003:S\u0007[\u001c\u0011\u0007(GUqbA+d60\u0017Y\txT\b\u001a\u0006M\u0014k\u001b*o\u0001/\rY_{Fp6\u001b&L~\u0014Boz:Bg;{\u0001\u001dZ8>h*O!u &l7Fr\f^\b5\u0019!zpT ,\u0013b12\u001cU ,mG]\b sL$8|%V4KglM\u001eK<@\u0000m\u0004k\u0000\u0007\u001c&Za@;B^\u001eMFs\u00153u;u{h+eLmP?'b\u0004{bhcn;V;\u000fNhe\u0001\u001a\u007f)\u001f\u0010\u001aBZzrC\u0001y}d5>&\u0013Z_5=IwGuS\r\u0010\u0000!\u0005\u0019\u0018\u001fc\u0007n*9S\u001dI\n}.+qUi#}\u000b\u001e-^HP[}az)N\\JT:\u0013\r2yS\u001f``|:6i-^+w?c\u0010\u00197\\\u0012\u007f\u0005oxg4}\\D&q|&\t\u0007e\u0002#\u001d\u0003\u000e\\K\u001a\n^eg\u001d\f+\u001a\u0014G=oE[WhO)^\u0002tL\u000b\u001c5\u000e\u0000\u001aCdtBb8k4=#4\u0012TpQR.rIR\u0002-w5.T8P\u000f{S*\t/*8\u001d\u0018\nq5\u0004RCZ\u00109]\u0000\u0007\u001e\u0012[$\u000e\u0011T}>jj\u0005\u0019[5h]\u0003\u001c9$^\u0017\u0000$\tWo\u0018e;A]&B7|B\u00001\fyf%I[-\u000f@ WM\b(ixgHg]xq\u0000\u001dfH\u0000A|0\u000fc3w\u000fj#hihe88MU\r\"\u001f!)cp\u0006VN?DUQ^t&l\r`s\u001c5\u0017`*\u0011\u0007$3BN\u000b\b/\t/VV$8kSTvp4!?`WA\u001apQ\u0004>\\e\u0017V[Kh\u00022=Z\b%`\u0010+\u001e\n>\u0017n#w9\t&CC\u000f@(f.N5\u0018;j.uNA\u0004IeEI\u0002h8\u001by\u0000\u001b\u000fH,\u0000\u001bQIGOoHO\u0018,\u0014*{I\"KKZ?F&Mrk\u0004ptL\rK~@1d\u000ei\u0002\u0001\u001e\u001c\u0016$\u001c\u000eP5}\u000ex\u0019$d\u001dcAeE\u00131\u001e\u001c\u0018\u0012ggI%0\u000bn(BqH\u0016-T\"!\u001f\r>UM\u0006j/~Nr\u0019Z\u0003-,z{(j\u0007j*z'Y\u0004\u0003W_iFi\u007f%z\u0016j\u0015pio\u001d!\u000fN P,\u00192FC\u0001\fMkTl0{\u0007H2I=Yh|Ps'ec@o\u0002\u0006IRc\\*\u0013\n\u001f~?\u0005\u0018-4Q\u0002:_|WV~g\u0018E2k@i\u000bM.0'RU\u0005KIe9s\u0016\ba\u007f! \u0015LUHTSTj5dw#GR:WQ\u0019jy7\u0016/\u0005Mg\u0014$r|Y/\u001a\u000f|1)!g*!^^E\"\u0017T4(gRs&KlE=.U,3\u0001\u0016\u000e=\u0001\r\u001bW(FFcQ\u0014?-\u0014|;h\"ax;1.'\u000b3x\u0003x;}054Y<\"s)w\u0016?+JJ2eaZ?W\u0004%!=\u007fm4^dehC\u00142?C\u0011D3P3\t\u0010Dz ,0X-hrP\u0018F3\u0013DB,*@sTD\u0018\u0003\u001e\u001b7N>{8X\rM?HVQ\u001bSX4u?-^\u001f;u\u001fbB\u0013A4ObQ\u001cNWM4:Apwp4\bHixm(w\u001a\u001fgw\u007f\u0014\u0014#;^\u0010Q|'7\u0019PgC]fk\\*6@f7mWPm< dbx\u0017==N6:c{s'oC\u001e\u000e6\u007f\u001cbd2Cy-=\u0019\nKn) \r\u000f\u000e:11\u0011\u001eO\u0001I'qQ2cS.~~scOCK\u0019!Gh+XdH\u001csf\n\u0016%Q\u0017\u0001a8y\u00067\n\u000e\u00059^@$vKE\u0015{\u0012QNN5!2w\n'k \u0013\u001b^Kg\u001bb\u001dS\u001fo\u0016\u0007\u001b\u0004$[?\u0000\u001bB\u007fO>j%Ba>\u0012\u000feT,\u001cT\u000eXyA>fn!]jwTIe\u0018PQtGr\u0007Z~@J7\u0012qKI9w\u0001;nr!l-L\u001b\u007fR\u0018\u0013\u0010$;\u000eMVe\u0006\\\u0010\u0003\\\u000et0\u007fr\u001dun4\u007f[\u000b\u001b*jE\u00160#p4_@mr\u0000\u0018HZc\u0006/`^}*=Kh^o lA#\u0002C\\\"`&`eFfcA\u0002w\u001d!|8/b\tsYU'#{$\u000b\u0012\u0010\u0002\u001f\u0007E(ni*X\u001bbs8*EZ[\u001f\u001by1^\u0003mj\u0001\u0015\u001c\u001e\u0013\u0011\u0012\u001c9R1T\u0019H~*d\u0007kE-\u000bIQ\u0003B-\u0012\u0016\f\u001f9cN\u0004W\u0010iY^p\u0017i\u0007X\u001c\u00025\u0013hk\u0016mT9XO/#\u000b>\u0015_'Wj\u00164\u0010Qss`\u0003\u0012~\u001b\"Sp25'tqu\u00192@C-,j\u0004]1-Lc\u0003\u0014\u000e\u0017(+b2|GnT\u0012?~}w1e=j\nZTU{MExsEv@\r3Oy\u001cW\b\u0011w\u0016b1s'nd,\u0013&D\u001ep9Y7@#\\Hvd\u0002XQ,*quwYe\u0007\u0015\u0018\u00124\u0003wY.@tg\u0016e#?2yLZU\fh\u007f_z\"sCro^J>/<[UEUT2B\u0004\u000f\r=}\u001fD\u001d~2=\u0014\u001bX%\"N\u001bB1)mq\u00163U2}MhE,Gf*z\u0012ieSXy[,RdM\u0002\u001cac\u0010\u0015\u0013w\u001a8\u0019\u001f%S\u0018[8<|)^I3V3.F\brUF];;ND\b\u0010@TW!FN5\u0019\u001e\u0010~L~\tjT\b~r0oMu\u001e\bJ\u0002aZ\u0019M\u0005(9nFHE\u0006n\u001d\u001c\r\u0014o\u007fMcX)\u0006'D0\rWmi\u0007y`bg6*\bc\"?!\u0018VHo\u001b/>S>}Vz'\u0000\u001a\"u)-%\u001dr38=c\rr\u007fP\n2|i}Fs=D*\u0010~p\u0018\u0007\u0002z\u0002XZl&W\bV3\u0013)\u001825pVqDy2G\u0000P\r\\bPIs\fi\u0000jmdF\u0012-WlhqI!*o\u00034X$?o\u001dV^\"6:n;Z\u0002n_\u001b\u0013\ng2I\u000e7\u0019$Bh;!k`\u001dj\u0002%1Q)$\u001fQ2)GOcpYv!\u001ecJ*Qy\u0005g\u0005\rXe/`]!\\x\\ykdd\b0$pDp:S8\u007f\u0018lX\u0003EX\f@A%WJE\u0000X'\u001by0K\u007fS<>9\u0002X\u0003%\u007f\u0003-=.\u000fc\u001f^()u|5Jsu3W[@}\u0011G(_\u007f\u000b\u0019/h\u001dA2\b\u001c\u0006p\"]i\u0004)>8\"\u0005\u001c\u0003&$.\u001ah2JR\u0016R\u001chI!\u0007\u000e1UZ\u0012.\t_8oG\u001c0j\bm:\u0018M0`8Zi> qT\t4Oy\u000eZi[\u000fQrj\bGa\u0014pt\u0012X=!G\u0015xA\u0007 5qcXW(?y\u0018Vn~H\u0006.w}\u000f,V$4{\u001cD\u001f\u007f+JT_\u0017\u001fXdG\u0005\u0006+W2\u007f\u001dB7tw~\u000b;Jb\u0002\tQK\u001f\u001brv\u0006N-n\u001f\t*s+\u001d\u0007\u0005a]|7 zL>AGV\u0018W8'z`1\\w.!\u00038\u0004v\rPA61r!q\u0014V2z0\u0015\\b0@c\u0003H $\u0019.K\u001fUmhyvT,Q~M\u001b\u0000J$,jv4FoX\u001dQ?Ki\u00142\u001fB\u0007\r)4_UFz\fZ0^$p\u0004o\u0014\u001a_Z)T\u001fXb\u0002]0qczLgft(1?!\u001ewur0t'mB7\u001d!-0~\u001e6\u0019)W&\u000e\u001a\u0011\t\u0003{:\u001aBz|i\u0010\u0015L4_pKDc\u0003\u0018.CNQhBEX\u001e\u0017\u001db&S(\u0018\bN6]'r\u001ey\u0004&J\u00065;\u001e\u0004FRU>\u0013!\f*\u0019\u001b\u001cyT\u000f\bQ\u0001>l\u0003^rb\u000bJ\u0000gB\"Q\u0013/\u0013\u001d.hv\u007f'\u001f\u0007};{\t}xj7\fZIjMtN/\u000e\u001e\u0017D>5\u0018I,)p\rX\u0002T1,9\u0015T?\u0015WR0{\u00173==vX\u007f7?UqS\nh+M{*\u0000qH7'b#0h\tm\u0019P\u000eg/V\fja\u00192q?\u001a\u0010b\u0016\u00043\u007fY,\u0012c-mRl$QNw@\u0004) \u00056QC3SX\u000eH66\u0017>kTX*V\t\u0003RR#$VV%\u000bT3Q \u001f\u0016F\b'YUHe\u0015eK$\"\tZ\u0011$\u0015KLU[JbTB\u00161RG\u001dT#m-z0,\"\u000fY\u000e\u001fc\u0014\u0007tWi\u0018j>vuO9,\u007f\u0005xUm[\u000f9tlzN\u0015\b\u0018(sjc#\u0005\u0006\u007f}\"w4\u0001\u001b'^E$\u00175wE\n=C6jc\u001c\u0005\u001fR_[\u001byJ\u0012|%GCT>{\u001eU#g\u000f){$ 4\u0011^u\u0007\u000e<]S22*64Vr\u001eu+?B\rjhU/%zws\u001e\u0007O\u0015Ir`;3fM5\r7\u001b&^t\rS@ykM\f\u000bT;ZiG\u001b}D2\r@R|\u007f:\u0010\u0006MM\"3[)\u001e1\u001aE\u0002P\u001aPqUO\"-\fH\u001a8\u0004rM\u0019H$k>-G\u000e/QsC^i1M]GC#\u0014\u001a+\\\u0000T>\u001e?\tlZ}:\u000225\r\u001dR%}|AGD\tRzf0%q\u001bw\nI\f\u0018V'\u0012-?=S\u0010f\u0011\u001b#'c$t'*yhxM:sD\u0004\u0007\\\u0007*\u001aJ\u0013aW n\u000f\u0014(z\u001dhx\u0017U$\u001f4}[tCK;ax\fdlX\u000f\f\u0017 \u00034wc\t\u000ep&87\u0007_\u0017}i2t\u001c\u0012\"iI.ATScr\u0005/R^M1E\u0006\u007f%ei[XA\u0011\u0016Lv!\u0005S?\u0011]\u0005fpi\u001bb\u000e4j?4\u0015NG(nHcfehGVeX;\u0007u?>#cM\u0012b@dlxr\u0012w$,NS$&8jW'\u0014=\u007fubRL\u0006W1\rK:\u0012\nT\u0007#\u0017(=\u001c\u0016c\u0003\u0010@-)n\u001f\u0012d%y'4J\u007fU3?VH)%x\u0006\r8j:Q\rx\u0003)DK\u001e9vrVV0J\t!>]-\u0012%Yzv\u001dV4u!B7E#h`w&TW\u0018\u0015\u001anJu\u001f\u000f~\u000bL?zB'\u0010\u0014b\u0005\u0015O\u0003oJ\u0016c\u0018o8t\u0015(:E4j\u001a.e\u001d\u00128\u001b}\u0001\u0016Qt\u000bR\u0013H)*MSc\u001b\u0001'@F)\u0018h\u0013]yw(\u0004fp\u0002z4g\u0010E!\f\u0003\u0017qT4hF\u0013U\u0003q2%2zpr\u000b`7\u0006H/#\u001aU{\u001f;I1,[+\tgZ{|\t\u0002W\nUUMO1\t|1.jhs>FP9\u001a\u001eR\u001a\r\u0014AT\u0007+K:Lw}\u0000\u001e3\t~h}\u001d\u0003KE\u0005~Z{5hS\u000bJ\\N $av\u000f\u0012_,\u0007@?SBq\u0004\n`coP\u0018'\bI\u0002>p\fC&l\u0016\u001f\u0010@vY\u0005k2(p0\u0003tzj=e%}-@kE?~\u000e\u0019\u0012mS\u00042\u0017>@#Q\u001e8,\u0003\u000bR-=4DE\r\u0005\"e$\u001e'57J\u0012RZL\u0013i]x\u001eV\u001a\u0004T[\u001a\u0012~d\tp\u0019=$\b\"W%\u0011h4wz5vaWU\u0002$uR\u0015N\rw\u000f\u0018;)G\raVH[\u00126c\u0013n\tlq;@S(,Ra\u007fs\u0014y.9\u001e\u000f]\u0015`11\u0010I5&`]WC\u0002vK#p\u0016\\\u0006IN(m;\u0006B\u001a\u0017f\u0003(*GY\r5\\\u0012k]w4d\u0011w2\bp&\u0006\u0001\u000e Ba\beqQe0]\u001f!\nhE\u000b:`sn#!LUBlT\u0016/nuN#dei.}U\u0007~\u0006aU@1m\\I_\u000f.qWeh\u0007ydo<>*\u001a{ZnACz+\u0016X\u0003S\u0019U;Or$(\u001dVjS\u001a06=f\u0012\t;\u0003T)x+\rMm7CM\u0014\u0003YSb~\u0004[K\u001f(\nN~k1\tcXS\u0002~\u0018Fj;fo2\nG3m9E GFAL\"b=^FYR$\u000f\fS**PzdJ N=wL\u0006\t\nD\\4L=f5BeW\u001dC%b\u001d\u0015\u0015xv+gz#0Uq;_}^|ST*@\u001d\f\foEuh_=J\u0000I>SH(bO.@|\raUM: X8zT\u001a\u0014\f\u0002*KY*zf\u001b\u0014Cl3H\r\b76%QQhB^Hmo(\u0002c\u0014X\u0007[N7\u0016\u00045\u0004iz/.\u0010\roZ\u0010k=6Fc\u0005\u0014dH\t%Yst>l\u000f3HWq\"*!ka\u001a\u0010I\raO\u001d4\u000f%G\u0017\u0007o_\u0019\u0019\u000bh/\u0018(\u001b4\u0012\\)y\nK#$+\u0010\u0012cO\u000b_\u0007sg\u0018\u0017[0#d\u0018oUwN\u00196L[\u0000wg5zN>H\u0002ogT\u001db$>\u0007\t )~\u0014mKG)k8VXt\u0013hPV9Tu\u0007IP\u00142%(7\b\u0002O0l~2\u0018'SNZ`Q)\u0007g4K\u0007a\u001d6KVu\u0005w+6\u0011%$m\u0018d\u0012\u001b_\u0016`_*~`7\"_m\u0005:\u0014\u0012!\u001cO\t\u001f t#9\u0019Ma/Joa\u0015\u0000 \u000e:\"%~\f|\u0011s3jUW\u0018_\u0016\n3US\r)AXb^dcIPRWWmiFE\u000b\u0019j\u0019:\u0002;'^>V\u0014\\iNxf\u0006\fWrYI\u0010;=n[y\u0011\n\u001foXc\u001e@!|\u00066p\u0000\u007fR%4FL6Fs\u00072[^;jXt:O9 k)\u0015Y\r\u000f'K_k\u0015f\u0010d9+\\7;;B\u000bic'\u0003e)v\u0005l\u0017897(o(0\u000b#\u001cZ\bwN\u0000\u0001m\u001cCJfF7v\u0018V<|@4wxN\u0010zFXo*\u001d4\u0000r\fm\u007fqxT\u0012vPwk04hTMJQw\u00074\u00073B\u001e\u0018\u001b\u0011{^#\u001eXh\\hBnW\u0004mp\"m\u001fPY0_@0=[\f\"x>??fNf|\u007fMf3LjG?t3D75@\u000fUOn\u0015\u0018\u001b6AV\u0013r\u007fe>1\u001e3q#Xh[o*R\nbTvH!8g\t\u001a\u001aD;cuI&?)S0\u001aVE\u000f\u001fx\u0017UW\u001a\u0003h\u000e[U\u001225\u0018OtWvM\u0014x\u0004x\u00157h\u001c\u001cx8dbQ\u001f\u0003f\u0018\u000eF\u000e\t>\u0018SFDu\rq_\u0005OP\u0015Je\u0001x\u001fgG\u0016w\u001cp3R\u001dlBm>\\Z\nWX[\u00151m\u0005\u0015W\u0001}b\u001d\u001b\u007f\u0003\f{\u000efV\t5\u001do-\u001dhb\bSgQ5*,%vI*8Mkc9K U\"2l:9,b\u0018sqyb7yXW/XkF3l-Q>!,$R*L(j\t4*E*_#{\u0006OQ{(Nq8]QF} \u0003b\u0006=\u0007\u000f%[\u00160]\u0017WiUHv\rh\u0017cw\u001e\u001bS\u0019s\u0001(k\u0019h\\\u0004X.7YH}\u0001Wkq?s{U\b]b\u0004J|6Q7\u001d\u0010w;\u001a:&\tp~C4\u0017W; \"ST\u0015\u001d5 !T%V5;T%\u001bke\u0014L]ve*z>\u000bk,E$\u0004%AJ\u0014C5+\u0004\u001cz64Adq \u0016\t\nn\u0004\u001e\u0016\u001a<\u0012UpdtocCV^ Um]\u0007\f\u0005yO7OE\u0006d\u00179\\\u0019;\bR7w/gw8\u0001Z\u007f?^\u0017B<3`\u001d36N7A0p\u0007P\u0004B,:)-14>B\u0018\n\u0015\u001c7nRF0(=\u0015zQc\\Z$\u000b2O:T/([}P\u0000`(\u0001wk\fB*`n\u0015(AF\u0002r\\T\u0000O},rI\u0007@\u0016[mE^)njNM\u000e\u0013z:%18A`=W8;\nej\tzF+]u/$\u0016\u0016a\\+\u000e\b\u0010\u0006\r\u0019:m\u007f\u0019V\u000fa2F}32\u0006_R\n\u001fB\u0001\nz*!6\u001fvr\bCoYD=dnR\u001c*t8I\u0001m60|\u001fr\r\u001aazUlcCfwKQxP:\u007fm\b\u0015#\u001f\u001c#K'D#\u001061/e&P,Eb\u0002\t4\u001b,B\u0010\u007fjz\u001ab'R\ru$/(5S\u007f\u0012\u0016(z&W&\u0015kn60`_*Xf\u000by\u0006LVFHNx\u000bptC8ZN\t\u0007\u0014le\u0004+\u0011bvLu\u0015_c+~\f\t+P$'?,#xu@ny]An\\?;7\u001bdFC\u001aThA\u001at\u007fZh+^`(\u0007i\fp\u0007PF\u0018\u001aot\u0013\u0005\u001dw\u0017Yri\u0003=\u0002J|~7\u0012jNCs:G\b\b}(\u007ff\f\u001cGqo}EN}P\u007f5Z\u00192P#t IV_\n\u0016|\fGB\bHz:\u0002\u0019\u0017o}\u0018^O\u0002OI^QM\"q@\r\u00197\u0015*ib;\u0018w}pT\u00038t\u0013c*.\u001av[DGBE5C0\u0005za\fq2\b\u007f\\\u001c\t\u0003C\bq9\u001fmtyK\u00010\u0014\u0011\\.7\r&\"\tD\u000buz2qU%\"\u0018RE:?\"\u0001Z:\\(b\u007fkL\u001eGp\u000b%cJL\u001f$:O\u0010G\u001azP3p{JpNw[,-o4Yj*{\u001aVu#@\u001cx@(H\u0018mHz'\u001e+5I\r\f0[u~(w6@\u0004$jz\u0005\u0015mF\u0000\u001f:#]_+\u0016yX;+\u0004L\"IKJ4I\u001dqqBO\u0007\rG{>\u0006\"\u0019SA[sJTuy\u0006\u0016g`\u0005cGnKBj|\u0015G0t\u0006}.`M#sH\u0006\u0000mz5m`f6\u007fUs!i*\u0011\n\u0018T\u000f\"k\\\u00012s\\\u0018SRO\u000b\u0007P{l;o\b\u001eo>'`\u0016kQ\u0011!VT18g\u0016d\u0004)PIT(&8 $QDD\u001e?u!xB}OCZEc*%r\u0012*\u0012\u001e}\u001f_'5\\FP\u0015\u0013D\u0012g\u007f;\u0004\u007f\u0018-]a(_#\u0002\u000e\u001923G\b&7=%uUi-2OmHsu!D<\u00146U\u0019\u0000h\u0002K\u0012G+WX\u0010gt\u000fpn-BOEP]\u0018Jzz\u0013g\u00018\u0016D\tFZ8tOM@fp\u0006N%%\u0001`\u0017_@Gsr\u000e=\u0000pd\u0000(4/7Kh\b&\u000e+j;b\u001fm$a\"\u007f~\u0012QQF'4\u0019L-F\u0003!\u0000vu'\u0015\u0004hbZ\u001a'\"\u0016t0\u0000r\u0014e\nv9a?ukrJX^1=HLYk]MME\u000f \r,,\u001da*i\"!B\b\u0016*-XE\u0016B-w O\u000b*(f8=6gt\u000fX\u0016p\u0012\u000fn\u0005\u0016\u000fR9\u0019nC2+7\u0014cnuYM^!_4_M\u0013PKy\bsLwV\u001aIr0i\u0004l\u0011xNVNVfqa\f\u0013u1ho>]=\u0013~\u0015Z\u0012KniHG\u007f_\u000bJ\u001c@jD\u001dYM5V\u001fe\r[\n|D\u0018\f\u001b@\u001f\nr\u007f`WWDVkmI\u0013,I92[\u0011e9Z\u001fUr\u001e4t\u0006\r%S~5L\u000f\u0003\u0016:;lcOJ^N(-ga/\u0013y.lx\u0004lteG|N\u0019+EY?\u00034b0Fo13;o\\B\u001eQi\u001f\u001d]cE\u000bE\u0000$=RR\\wS{\\y\u0013]\"lb\u0003\u0015\u0014nM\u0001TDC<=KZM,\u0018\u0007.\u001e\u0011d)XZq.\u0012X\u0011\u0010fn#G)\n3\u00075)6,@O\u001b;6Gu\f\u0004\u0014UZ1h!\u0000\u0015j];\u0002p\u0019X7\u001frf4X&;|]an\u0019\u000bm-,lbbs0H\t>TW\u001f\f\u0013`\b\u00185\u0006nu}\nmP'\u0007l\\u\u0013\u000e\u0006*B)-H\u0001&\u007fld\u001fH315i,X\u001fF8V\u000fqQo\u001c4\u0012Jp\u0019$\u001c)@!fKf]\u000f<.&\u0000^,(7'RR7\u000e\u0003Y{k?t`P.W\u0013;7#'P\u001bH*!\u0005;Z^\u000e\u000b\np$rurktWHQ\u0007J'\u0012\bn+b\u0007\u0014Tq\u000e\u007fK[-\u0013xn\u007f\u0004y H\u0019ohyp}R}F\u001a*cR\u0002a0tQx\u0000\u001c\u007f\u001d#\u0014l/2G\u0004hxiN2iL1hk\u001b\\<=jN3h8e\f\u0015\u0019\u0007\u0010U3\u0013*?LzZ6&\tKL\u0017\u0016)t:,m+lL =Y\u00151mp>3mD\u0002x\u001b0I/RAd^\u0014K\u0006^1;J_)/\u0005BW\u0011c\u0002o\u0016;!#\u0016i\u00108\u007f/='bU\u0017O*uA\u001e\u0000.A\u0007Iv@;|sE(\u0013\u0006A`R\u0012\u0004\u000fCk|\u0012`^\u007f+OxmVY\u0006\u0007q{1u\u001d+p0:a:Y\r\u00043\"\u0005L\u001ag^U\u001erM(VQ4\u0016\u0000Y/\u0019z*1=\u001c6pz=\u0007I%b\"/N44>.\u0006%`L\u001a-\u0007zIOC\u001e.=\u00075U\u0014hq\u001cU\u0000{\u0002x\u0018=M1R$zsz'_T$'Z\u000boJY|LTj\r7U\u000f\u0011fX\u0016Eh\u0002TF\u0016vPo,.DDFQ@9jT\u0003FNy_\u0004E-TJW\"\u001cA'Y\u0007.2\u001bz9[k=c\fXs\u000bDxS*9E\u0004z=\u0015 1sUl\u0005]W\u007f\nZW\u000e|Ru\u000b\u0017^\\YNA\u000eH`\u0019\u0011Oaa\tkT\r&~M\u0004Y;K\u001e\u001bG%[\u0005VZ\u000eoz4\u0010K`\u0005\u0003e45y\u0015\\\u0018\b\u0003#2\u001bEACQ\u0006\\,\u000f.OS*%b$\u000bP,(i,]vf\b\u0005\u0004Z\u00036JkFq63DfXZHo^M/\u0005'C=-?l\\Jj\u0011\u0019\\vO%C@}'X)xqGj; \u0019Fnp$5O8!>h).!q:[X\u0017A\u001bgOf\tVo2pp92\u0015:ovV%2!%\"L[\u0007\"b2*'\r0{\u001c1\u0006\"Do\r5(i*M^`4VtP;\u000fdd\u0014Dyw\u001ej9'\u000bjLR#\u0001\n(6h\"xK#e\u001chL\u0006D7r\u0018\u0003\t\u001c\u0018qBWD\u0005'\u000bF\u0004&L)\u001c :(`CY\u000fC=%Tk\u000b0\u0015iH,\u0007\td\u0013j4Kdor:i0i\t/@\"c[{w\u0003\f7o)\\{4'MT<;I<\u00196`\u0004l\r\u0001O?d20f`fZP\u0016\u001bB\u0000hU\u001c~P1=TH@Jz\r\u001a~S\u000f^g>(\u0004\u0007y>)q(>\u001aQl\u001c<4\u007f0W\u0019p\u001bRtL3pt\"\u001b\u001d\u0011Q34{\r7\rHstE\u0012\u0006~q\fLjP4\u00008Q`\b\f>A\u007fp1\t^\u0006\u001d2Z3$%eDE?\bS\u0014GO`Q<*0\u0007EL\u00038h\u0013+\u0010!Y\u0001}}[dJw7\u0007\u0002ye&)=\u001c#MxyEIR\u0003Z\u0006ZOQ\u0013?9&}\u001b\"H_\u001cNlJ\u0003p&`^\u000bA{\u0002(S=SRo2x\u007f\u0007>z\u001eh>J\u000ez\u0019Y\fsO\u0003Q\u0011\u0013g+G*4\u0005\u0011>.,\u001e\u00069.K>|\r\u0011\u000f+(y\"uu;\u0007+\u001bp\u0002@z5\f`wn['io`TRV\u0012\r(aU/p\"%So\nSMGo\u0010\u0004\\y\t\u0007_\\PYY%%qM7N+d@S\u0001JcQ\r@6\u0000#N8\u001fA4nf\n&=L5I\u0002\u007f':T\u0001\"^Hv\u000e:\u0019zt\u001cjQ5y\\\u0007U#\u0001r\u001c\u001e_g3\u000e~\u000e\u0003`\u0013\u000f#\u0018vd\u0003pT\"a\u001a\u0018Ul\u0002\u0013>\nO\u0011T\u000fW$08u\u0014:P\u0001r{lBc\u001alP2\u0001\t\u0002\b\u0016'\u000eyb%\fC\u0001VMt$v\u0019Hd\rD@\u0000EuI4\u0015WdMP>\u0003F\u001eWkxOv\u001f[Gg8{J\u0017;[R2OMu|e(\u001ezdGL\u0010\u0002k,a\u0018\u000b\bf}>\u0000R\u0013\u000eD\u0012{~j:ZI+*ut4\u001f\u0004I/\u00177JR\u0000z^|\u0019`]Wm\u001e\u0011({GeE'\u007fiRY9v(?\u0002:{Q=\f6\u0019xTCWO\tUe\u0005BA\u0005S3=y%\u0017M!K,^\u000e|i;UjY$0:I&\u00013jY/\u0018b@RpP\f\u001b\u0010\u0010\u000e1f\u0014P\u0007U\u0019t48W8~L4AN\u001a207\\%9[xyX\u0013^@d\u001ct\u000e\t?XN4V\u0019%\u001dK)\nIG,6rq(*Y3\u000e\u001ba\u001cn~l\u0007C\u007fi5\\}\u001f\u00070A\u0001P;H(&y(KS\u007fW\u001d\u0011Al$e<;o,K\u0010Yv1mW>1V# /\rf;d\nc1SR-j\u0012\u0004{g\n\u001c1H\u0003R\u000e?\b\u001f9UQ\u001c&A*(\u0004\u0011Y\u0016,>kX4\u001f8\u001c'm\u0019 \u0006H\u0015\u0011|k.Z\r\u0002\u0007i\u0007a$>=}$+\u0019\u0010\u001eX_Rd8\u001a\u000fDL\\G}\u0005gW\u0010:\u0017\u001c\u0000&Q7p\u0005\u000b'R\u0004D-\\zO_\u007fW5/K\u007f\"8\nN\u0015'wQ_{(\u001a\u0011)\u00136=YE\r\\2lWnN\u0019JM\u0000,\u001f-&Sp\u0012qQL^@/Jok[\u0010LtY\rd^XmAAk\u001e\u000bo\u000e2\u0016*%|}xm&f\u0001\u0015wS5zWqn2v\u0018f?\u0011g\u0002!ts\u00149\u0007OY %&\u0016\u000bdjR\u0016Ev'/\u0000~\u001b1\u0007%\b5mk7A`[kEwOk9wH^\u001eo\u0019\u00100\u0011N] \u0013p|03E\b\u0004R6c3`aC')\u0005PZt! c%A\u000f\u0010)~,rU\u0005(H\u0003\tu-gO\u0002b\u0007qIb%9h pVQ\u0019\\B=d%-gdD;\\b]5Cn\u0000*\u0003Es5\u000b\u0002\u0002{AM\u007fdT\u000b\u001be\u000e`x^:\u0003dVtx\u0010)A'P,f/Qbfg\f\u000fM\u0000W\u001c4[R\u0003mO\u0001N*pC$\u0004e]qfQ\u0002YB]{\u0010Sw7K\u0001#\u0012F\u0001\u0005\u0012y2\f\tUjy&\tj\u0010H\u0007_\u0001q\u00051\u0003B6.\u0018\u0004 1yB5Q&\u0004yh6Y\fvc\u000e}[\n>#9\\;O<\u000b?[^ln\u0013B3COr:\u0012e((Sb]B\u0017B3\u0016\u0002}!-c;^Yx\u0010:}J,e\u001e\\\u0013\"=\f\u0017jow^fa\u0016ep]2bdAS=lhI`.\u001b\\t>\u001b\u0000&\u0013g|NT`0>?$92\u00126=e-qg 4\u0002q\u001dx\u0003!~1\u0006f\u00042L3n#s`(\n\u007fLm*xNn7bIF|od\u001ex4/ZB\u0013t\u0010.\u001d%\u0010vZg&'\u0007\u001a\u001c1%'\u0003i\u0012\u0018/{tZ2,i\u0004LkDu(n4\u0007[9\u000e<\u0015VY;A\u001c[w-\u0018/\u0012l&\r&\u0018LF?0$N-mPU~%\u0015b+6\u0003h}\u0000\u0015\u001b0w\u0014Y\u0005S(O[P{k[ZxIG7\f\u0013\u000278H/\u0016\u001f\u00166d0\u0017t\tNYwH\u0017j\u001a]m\u007f=\u0018'4ot\u0011\u0005\u0000M\u0002JYJ\fN\u00106B\u0000\u0007,U\u000fC\r\u0006\u0006\u001d#\u0010J[m\u0003b2J\u001d^\u001d\u0012\u0003mD/P\u0016l=g)@Ci\u001be\u000bU49e\rw\\'B.h?BO8\u0006pD/\u0019_fn1geD3\u001da/uF:ZAsDC\n}3o\u001f&to\u00165\fB><]Ud\npZJtaz\u001d[\u0017o)\"S\u0005j\u0003\u0016#\u001fg|\u0013W\u0017DO@\f8\u0018-\u0014\"q'L\u0011UYW;&\u0000::9w(\u0003#q_.}\u007fC\u007fm\u0007P\u0001G5\u0012f@\u0002a&(7eZJ\u0019\\y=\u001ajjm\\hj\u001fs\u0006Rx\"]\u0016r0>@\u001a+_/._hf\u0012:\u0019uv\u0000G\f=(4*^!*A>\u000f\n:v]\u0011\u0018\u0014e\u0007\"&\u0010\u001c\u0010f\u0001erL\u0007u\u001db\u0016\u0010*\f&\n\u0000Mis]I\u007f2\u007f\\&2 `\u0004`~mb?;2lD\u001e\rk,t,vd|-h;Zx~2N|zc`\u001c:\u0001qIW\u0012\u000fGyV\u0001[EBA!*vP=(U9.)+R}z\u0015h.dM>Q\u000bO\u00147T|s50e4I\n^\u0019t\u001cIqO\u001c?\u001f5w/XVn\u00006tI\u0000A\r%HP\u0018a\u000b(Sc`muVM9\u0011V\u0001\u007f/M\u0010\u0001\f3\u001c\u000bUg7i;]so^\u001f\u0010s\u000eS,\u000eY\u000bg\u000b\u0007p;0\u0006@\u0019o\u0005\u001dk\u000b\n'A`\u0018;z\t{*MU{\\YKf\u00100Sv:&\u0010\b@Vb%\u001f=\u0006mp)\u001dnJ_9gTq\r|g'\u0006\u0015L*[iS]V\"eg_Ep[c\u0001H9q\u001c.\u0019B4{ZkU\u0004{sgAgj!4H\u0011\u0018z\u001e*\u0012mq/s\u00165[LeZ\u0017-?N\r.I\u0011$CuR{he7\u001d\u0002\u0010\u0017The application‘s resolveSigningKeyBytes method executes an SQL query with executeQuery, at line 91 of /src/main/java/org/owasp/webgoat/lessons/jwt/claimmisuse/JWTHeaderKIDEndpoint.java. The application constructs this SQL query by embedding an untrusted string into the query without proper sanitization. The concatenated string is submitted to the database, where it is parsed and executed accordingly.
\n\nAn attacker would be able to inject arbitrary syntax and data into the SQL query, by crafting a malicious payload and providing it via the input get; this input is then read by the resolveSigningKeyBytes method at line 86 of /src/main/java/org/owasp/webgoat/lessons/jwt/claimmisuse/JWTHeaderKIDEndpoint.java. This input then flows through the code, into a query and to the database server - without sanitization.
\n\nThis may enable an SQL Injection attack.
\n",
+ "data":
+ {
+ "queryId": 14517067005933136034,
+ "queryName": "SQL_Injection",
+ "group": "Java_High_Risk",
+ "resultHash": "DAGel891N7XdIZndJNSGRnK62UE=",
+ "languageName": "Java",
+ "nodes":
+ [
+ {
+ "id": "1I4S2FV4tiQgJgms1KwOz2Aw5Ck=",
+ "line": 86,
+ "name": "get",
+ "column": 63,
+ "length": 1,
+ "method": "resolveSigningKeyBytes",
+ "nodeID": 60647,
+ "domType": "MethodInvokeExpr",
+ "fileName": "/src/main/java/org/owasp/webgoat/lessons/jwt/claimmisuse/JWTHeaderKIDEndpoint.java",
+ "fullName": "org.owasp.webgoat.lessons.jwt.claimmisuse.JWTHeaderKIDEndpoint.anonymous_class_85_37.resolveSigningKeyBytes.header.get",
+ "typeName": "get",
+ "methodLine": 85,
+ "definitions": "0"
+ },
+ {
+ "id": "Dc53B7ZoeVHJC/5yqEC2RyzVOF8=",
+ "line": 86,
+ "name": "kid",
+ "column": 38,
+ "length": 3,
+ "method": "resolveSigningKeyBytes",
+ "nodeID": 60642,
+ "domType": "Declarator",
+ "fileName": "/src/main/java/org/owasp/webgoat/lessons/jwt/claimmisuse/JWTHeaderKIDEndpoint.java",
+ "fullName": "org.owasp.webgoat.lessons.jwt.claimmisuse.JWTHeaderKIDEndpoint.anonymous_class_85_37.resolveSigningKeyBytes.kid",
+ "typeName": "String",
+ "methodLine": 85,
+ "definitions": "1"
+ },
+ {
+ "id": "JVAWf7QUjJd9tNVdBS/3wNG/8Ak=",
+ "line": 92,
+ "name": "kid",
+ "column": 81,
+ "length": 3,
+ "method": "resolveSigningKeyBytes",
+ "nodeID": 60699,
+ "domType": "UnknownReference",
+ "fileName": "/src/main/java/org/owasp/webgoat/lessons/jwt/claimmisuse/JWTHeaderKIDEndpoint.java",
+ "fullName": "org.owasp.webgoat.lessons.jwt.claimmisuse.JWTHeaderKIDEndpoint.anonymous_class_85_37.resolveSigningKeyBytes.kid",
+ "typeName": "String",
+ "methodLine": 85,
+ "definitions": "1"
+ },
+ {
+ "id": "R/lvRgYEd8lM5F0e7mLmGByWzO8=",
+ "line": 91,
+ "name": "executeQuery",
+ "column": 48,
+ "length": 1,
+ "method": "resolveSigningKeyBytes",
+ "nodeID": 60692,
+ "domType": "MethodInvokeExpr",
+ "fileName": "/src/main/java/org/owasp/webgoat/lessons/jwt/claimmisuse/JWTHeaderKIDEndpoint.java",
+ "fullName": "org.owasp.webgoat.lessons.jwt.claimmisuse.JWTHeaderKIDEndpoint.anonymous_class_85_37.resolveSigningKeyBytes.executeQuery",
+ "typeName": "executeQuery",
+ "methodLine": 85,
+ "definitions": "0"
+ }
+ ]
+ },
+ "comments":
+ {},
+ "vulnerabilityDetails":
+ {
+ "cweId": 89,
+ "cvss":
+ {},
+ "compliances":
+ [
+ "OWASP Top 10 API",
+ "OWASP Mobile Top 10 2016",
+ "ASA Mobile Premium",
+ "MOIS(KISA) Secure Coding 2021",
+ "OWASP Top 10 2013",
+ "FISMA 2014",
+ "CWE top 25",
+ "NIST SP 800-53",
+ "Top Tier",
+ "ASA Premium",
+ "ASD STIG 5.3",
+ "SANS top 25",
+ "PCI DSS v4.0",
+ "OWASP Top 10 2021",
+ "OWASP ASVS",
+ "Base Preset",
+ "PCI DSS v3.2.1"
+ ]
+ }
+ },
+ {
+ "type": "sast",
+ "label": "sast",
+ "id": "111914462",
+ "similarityId": "1978850937",
+ "status": "RECURRENT",
+ "state": "TO_VERIFY",
+ "severity": "HIGH",
+ "created": "2024-08-15T10:28:30Z",
+ "firstFoundAt": "2023-04-24T09:17:25Z",
+ "foundAt": "2024-08-15T10:28:30Z",
+ "firstScanId": "e6b8bb67-65a1-4d0e-b952-b0d5477fb8c3",
+ "description": "The application's login method executes an SQL query with executeQuery, at line 65 of /src/main/java/org/owasp/webgoat/lessons/challenges/challenge5/Assignment5.java. The application constructs this SQL query by embedding an untrusted string into the query without proper sanitization. The concatenated string is submitted to the database, where it is parsed and executed accordingly.\n\nAn attacker would be able to inject arbitrary syntax and data into the SQL query, by crafting a malicious payload and providing it via the input username_login; this input is then read by the login method at line 50 of /src/main/java/org/owasp/webgoat/lessons/challenges/challenge5/Assignment5.java. This input then flows through the code, into a query and to the database server - without sanitization.\r\n\r\nThis may enable an SQL Injection attack.\n\n",
+ "descriptionHTML": "The application‘s login method executes an SQL query with executeQuery, at line 65 of /src/main/java/org/owasp/webgoat/lessons/challenges/challenge5/Assignment5.java. The application constructs this SQL query by embedding an untrusted string into the query without proper sanitization. The concatenated string is submitted to the database, where it is parsed and executed accordingly.
\n\nAn attacker would be able to inject arbitrary syntax and data into the SQL query, by crafting a malicious payload and providing it via the input username_login; this input is then read by the login method at line 50 of /src/main/java/org/owasp/webgoat/lessons/challenges/challenge5/Assignment5.java. This input then flows through the code, into a query and to the database server - without sanitization.
\n\nThis may enable an SQL Injection attack.
\n",
+ "data":
+ {
+ "queryId": 14517067005933136034,
+ "queryName": "SQL_Injection",
+ "group": "Java_High_Risk",
+ "resultHash": "axoLDacn79rEUgFJS9/1KMVM+4E=",
+ "languageName": "Java",
+ "nodes":
+ [
+ {
+ "id": "JFB0AWnxYt6IvoFGiyulVcIQnvI=",
+ "line": 50,
+ "name": "username_login",
+ "column": 28,
+ "length": 14,
+ "method": "login",
+ "nodeID": 39508,
+ "domType": "ParamDecl",
+ "fileName": "/src/main/java/org/owasp/webgoat/lessons/challenges/challenge5/Assignment5.java",
+ "fullName": "org.owasp.webgoat.lessons.challenges.challenge5.Assignment5.login.username_login",
+ "typeName": "String",
+ "methodLine": 49,
+ "definitions": "1"
+ },
+ {
+ "id": "XqtJe9wzm2bW0OW7dp0lSk43pJ8=",
+ "line": 61,
+ "name": "username_login",
+ "column": 21,
+ "length": 14,
+ "method": "login",
+ "nodeID": 39398,
+ "domType": "UnknownReference",
+ "fileName": "/src/main/java/org/owasp/webgoat/lessons/challenges/challenge5/Assignment5.java",
+ "fullName": "org.owasp.webgoat.lessons.challenges.challenge5.Assignment5.login.username_login",
+ "typeName": "String",
+ "methodLine": 49,
+ "definitions": "1"
+ },
+ {
+ "id": "ogE7PvqIghqjQ+gIjwyUy8PXpZQ=",
+ "line": 59,
+ "name": "prepareStatement",
+ "column": 38,
+ "length": 1,
+ "method": "login",
+ "nodeID": 39389,
+ "domType": "MethodInvokeExpr",
+ "fileName": "/src/main/java/org/owasp/webgoat/lessons/challenges/challenge5/Assignment5.java",
+ "fullName": "org.owasp.webgoat.lessons.challenges.challenge5.Assignment5.login.connection.prepareStatement",
+ "methodLine": 49,
+ "definitions": "1"
+ },
+ {
+ "id": "hgfFQ7qtYVL801yUa1A0OkzWg/s=",
+ "line": 58,
+ "name": "statement",
+ "column": 25,
+ "length": 9,
+ "method": "login",
+ "nodeID": 39385,
+ "domType": "Declarator",
+ "fileName": "/src/main/java/org/owasp/webgoat/lessons/challenges/challenge5/Assignment5.java",
+ "fullName": "org.owasp.webgoat.lessons.challenges.challenge5.Assignment5.login.statement",
+ "typeName": "PreparedStatement",
+ "methodLine": 49,
+ "definitions": "1"
+ },
+ {
+ "id": "7Nci6hR/VYUj+TB2XoyUncY/XnQ=",
+ "line": 65,
+ "name": "statement",
+ "column": 29,
+ "length": 9,
+ "method": "login",
+ "nodeID": 39409,
+ "domType": "UnknownReference",
+ "fileName": "/src/main/java/org/owasp/webgoat/lessons/challenges/challenge5/Assignment5.java",
+ "fullName": "org.owasp.webgoat.lessons.challenges.challenge5.Assignment5.login.statement",
+ "typeName": "PreparedStatement",
+ "methodLine": 49,
+ "definitions": "1"
+ },
+ {
+ "id": "pSrcWFrBQDSE5DjqElG2LhLTSeI=",
+ "line": 65,
+ "name": "executeQuery",
+ "column": 51,
+ "length": 1,
+ "method": "login",
+ "nodeID": 39412,
+ "domType": "MethodInvokeExpr",
+ "fileName": "/src/main/java/org/owasp/webgoat/lessons/challenges/challenge5/Assignment5.java",
+ "fullName": "org.owasp.webgoat.lessons.challenges.challenge5.Assignment5.login.statement.executeQuery",
+ "typeName": "executeQuery",
+ "methodLine": 49,
+ "definitions": "0"
+ }
+ ]
+ },
+ "comments":
+ {},
+ "vulnerabilityDetails":
+ {
+ "cweId": 89,
+ "cvss":
+ {},
+ "compliances":
+ [
+ "OWASP Top 10 API",
+ "OWASP Mobile Top 10 2016",
+ "ASA Mobile Premium",
+ "MOIS(KISA) Secure Coding 2021",
+ "OWASP Top 10 2013",
+ "FISMA 2014",
+ "CWE top 25",
+ "NIST SP 800-53",
+ "Top Tier",
+ "ASA Premium",
+ "ASD STIG 5.3",
+ "SANS top 25",
+ "PCI DSS v4.0",
+ "OWASP Top 10 2021",
+ "OWASP ASVS",
+ "Base Preset",
+ "PCI DSS v3.2.1"
+ ]
+ }
+ },
+ {
+ "type": "sast",
+ "label": "sast",
+ "id": "111914463",
+ "similarityId": "2009875372",
+ "status": "RECURRENT",
+ "state": "TO_VERIFY",
+ "severity": "HIGH",
+ "created": "2024-08-15T10:28:30Z",
+ "firstFoundAt": "2023-04-24T09:17:25Z",
+ "foundAt": "2024-08-15T10:28:30Z",
+ "firstScanId": "e6b8bb67-65a1-4d0e-b952-b0d5477fb8c3",
+ "description": "The application's login method executes an SQL query with executeQuery, at line 65 of /src/main/java/org/owasp/webgoat/lessons/challenges/challenge5/Assignment5.java. The application constructs this SQL query by embedding an untrusted string into the query without proper sanitization. The concatenated string is submitted to the database, where it is parsed and executed accordingly.\n\nAn attacker would be able to inject arbitrary syntax and data into the SQL query, by crafting a malicious payload and providing it via the input password_login; this input is then read by the login method at line 50 of /src/main/java/org/owasp/webgoat/lessons/challenges/challenge5/Assignment5.java. This input then flows through the code, into a query and to the database server - without sanitization.\r\n\r\nThis may enable an SQL Injection attack.\n\n",
+ "descriptionHTML": "The application‘s login method executes an SQL query with executeQuery, at line 65 of /src/main/java/org/owasp/webgoat/lessons/challenges/challenge5/Assignment5.java. The application constructs this SQL query by embedding an untrusted string into the query without proper sanitization. The concatenated string is submitted to the database, where it is parsed and executed accordingly.
\n\nAn attacker would be able to inject arbitrary syntax and data into the SQL query, by crafting a malicious payload and providing it via the input password_login; this input is then read by the login method at line 50 of /src/main/java/org/owasp/webgoat/lessons/challenges/challenge5/Assignment5.java. This input then flows through the code, into a query and to the database server - without sanitization.
\n\nThis may enable an SQL Injection attack.
\n",
+ "data":
+ {
+ "queryId": 14517067005933136034,
+ "queryName": "SQL_Injection",
+ "group": "Java_High_Risk",
+ "resultHash": "srq19vKLsuPLGGpeNmQuYcO9MQs=",
+ "languageName": "Java",
+ "nodes":
+ [
+ {
+ "id": "ZBwJ2ZVWa5USjOoFeYkm20gq4xg=",
+ "line": 50,
+ "name": "password_login",
+ "column": 65,
+ "length": 14,
+ "method": "login",
+ "nodeID": 39517,
+ "domType": "ParamDecl",
+ "fileName": "/src/main/java/org/owasp/webgoat/lessons/challenges/challenge5/Assignment5.java",
+ "fullName": "org.owasp.webgoat.lessons.challenges.challenge5.Assignment5.login.password_login",
+ "typeName": "String",
+ "methodLine": 49,
+ "definitions": "1"
+ },
+ {
+ "id": "MSmOGLj52rK6EKKANJTMDzTUzq8=",
+ "line": 63,
+ "name": "password_login",
+ "column": 21,
+ "length": 14,
+ "method": "login",
+ "nodeID": 39400,
+ "domType": "UnknownReference",
+ "fileName": "/src/main/java/org/owasp/webgoat/lessons/challenges/challenge5/Assignment5.java",
+ "fullName": "org.owasp.webgoat.lessons.challenges.challenge5.Assignment5.login.password_login",
+ "typeName": "String",
+ "methodLine": 49,
+ "definitions": "1"
+ },
+ {
+ "id": "ogE7PvqIghqjQ+gIjwyUy8PXpZQ=",
+ "line": 59,
+ "name": "prepareStatement",
+ "column": 38,
+ "length": 1,
+ "method": "login",
+ "nodeID": 39389,
+ "domType": "MethodInvokeExpr",
+ "fileName": "/src/main/java/org/owasp/webgoat/lessons/challenges/challenge5/Assignment5.java",
+ "fullName": "org.owasp.webgoat.lessons.challenges.challenge5.Assignment5.login.connection.prepareStatement",
+ "methodLine": 49,
+ "definitions": "1"
+ },
+ {
+ "id": "hgfFQ7qtYVL801yUa1A0OkzWg/s=",
+ "line": 58,
+ "name": "statement",
+ "column": 25,
+ "length": 9,
+ "method": "login",
+ "nodeID": 39385,
+ "domType": "Declarator",
+ "fileName": "/src/main/java/org/owasp/webgoat/lessons/challenges/challenge5/Assignment5.java",
+ "fullName": "org.owasp.webgoat.lessons.challenges.challenge5.Assignment5.login.statement",
+ "typeName": "PreparedStatement",
+ "methodLine": 49,
+ "definitions": "1"
+ },
+ {
+ "id": "7Nci6hR/VYUj+TB2XoyUncY/XnQ=",
+ "line": 65,
+ "name": "statement",
+ "column": 29,
+ "length": 9,
+ "method": "login",
+ "nodeID": 39409,
+ "domType": "UnknownReference",
+ "fileName": "/src/main/java/org/owasp/webgoat/lessons/challenges/challenge5/Assignment5.java",
+ "fullName": "org.owasp.webgoat.lessons.challenges.challenge5.Assignment5.login.statement",
+ "typeName": "PreparedStatement",
+ "methodLine": 49,
+ "definitions": "1"
+ },
+ {
+ "id": "pSrcWFrBQDSE5DjqElG2LhLTSeI=",
+ "line": 65,
+ "name": "executeQuery",
+ "column": 51,
+ "length": 1,
+ "method": "login",
+ "nodeID": 39412,
+ "domType": "MethodInvokeExpr",
+ "fileName": "/src/main/java/org/owasp/webgoat/lessons/challenges/challenge5/Assignment5.java",
+ "fullName": "org.owasp.webgoat.lessons.challenges.challenge5.Assignment5.login.statement.executeQuery",
+ "typeName": "executeQuery",
+ "methodLine": 49,
+ "definitions": "0"
+ }
+ ]
+ },
+ "comments":
+ {},
+ "vulnerabilityDetails":
+ {
+ "cweId": 89,
+ "cvss":
+ {},
+ "compliances":
+ [
+ "OWASP Top 10 API",
+ "OWASP Mobile Top 10 2016",
+ "ASA Mobile Premium",
+ "MOIS(KISA) Secure Coding 2021",
+ "OWASP Top 10 2013",
+ "FISMA 2014",
+ "CWE top 25",
+ "NIST SP 800-53",
+ "Top Tier",
+ "ASA Premium",
+ "ASD STIG 5.3",
+ "SANS top 25",
+ "PCI DSS v4.0",
+ "OWASP Top 10 2021",
+ "OWASP ASVS",
+ "Base Preset",
+ "PCI DSS v3.2.1"
+ ]
+ }
+ },
+ {
+ "type": "kics",
+ "label": "IaC Security",
+ "id": "116516234",
+ "similarityId": "531bf8e9771fc9a38b866afcdc86e10dd80487262d98baf44f82522516f4db9e",
+ "status": "RECURRENT",
+ "state": "TO_VERIFY",
+ "severity": "HIGH",
+ "created": "2024-08-15T10:25:09Z",
+ "firstFoundAt": "2023-04-25T09:32:08Z",
+ "foundAt": "2024-08-15T10:25:09Z",
+ "firstScanId": "a2a9edd0-5e9c-40dc-a2ba-c65a7b850f6f",
+ "description": "A user should be specified in the dockerfile, otherwise the image will run as root",
+ "descriptionHTML": "A user should be specified in the dockerfile, otherwise the image will run as root
\n",
+ "data":
+ {
+ "queryId": "fd54f200-402c-4333-a5a4-36ef6709af2f [Taken from query_id]",
+ "queryName": "Missing User Instruction",
+ "group": "Build Process [Taken from category]",
+ "line": 1,
+ "platform": "Dockerfile",
+ "issueType": "MissingAttribute",
+ "expectedValue": "The 'Dockerfile' should contain the 'USER' instruction",
+ "value": "The 'Dockerfile' does not contain any 'USER' instruction",
+ "filename": "/Dockerfile_desktop"
+ },
+ "comments":
+ {},
+ "vulnerabilityDetails":
+ {
+ "cvss":
+ {}
+ }
+ },
+ {
+ "type": "sca",
+ "scaType": "Vulnerability",
+ "label": "sca",
+ "id": "CVE-2013-7285",
+ "similarityId": "CVE-2013-7285",
+ "status": "RECURRENT",
+ "state": "TO_VERIFY",
+ "severity": "HIGH",
+ "created": "2024-08-15T10:30:12Z",
+ "firstFoundAt": "2024-08-15T10:30:12Z",
+ "foundAt": "2024-08-15T10:30:12Z",
+ "firstScanId": "e1838db6-4950-4f98-9850-cc0df3660a9b",
+ "description": "Xstream API versions up to 1.4.6, if the security framework has not been initialized, may allow a remote attacker to run arbitrary shell commands by manipulating the processed input stream when unmarshaling XML or any supported format. e.g. JSON.",
+ "descriptionHTML": "Xstream API versions up to 1.4.6, if the security framework has not been initialized, may allow a remote attacker to run arbitrary shell commands by manipulating the processed input stream when unmarshaling XML or any supported format. e.g. JSON.
\n",
+ "data":
+ {
+ "packageData":
+ [
+ {
+ "comment": "https://github.com/advisories/GHSA-f554-x222-wgf7",
+ "type": "Advisory",
+ "url": "https://github.com/advisories/GHSA-f554-x222-wgf7"
+ },
+ {
+ "comment": "https://x-stream.github.io/CVE-2013-7285.html",
+ "type": "Advisory",
+ "url": "https://x-stream.github.io/CVE-2013-7285.html"
+ },
+ {
+ "comment": "http://blog.diniscruz.com/2013/12/xstream-remote-code-execution-exploit.html",
+ "type": "Disclosure",
+ "url": "http://blog.diniscruz.com/2013/12/xstream-remote-code-execution-exploit.html"
+ },
+ {
+ "comment": "https://www.mail-archive.com/user@xstream.codehaus.org/msg00604.html",
+ "type": "Mail Thread",
+ "url": "https://www.mail-archive.com/user@xstream.codehaus.org/msg00604.html"
+ },
+ {
+ "comment": "https://www.mail-archive.com/user@xstream.codehaus.org/msg00607.html",
+ "type": "Mail Thread",
+ "url": "https://www.mail-archive.com/user@xstream.codehaus.org/msg00607.html"
+ },
+ {
+ "comment": "https://github.com/x-stream/xstream/commit/94666ae6dfe839410c73bdfeeb211374f04a2059",
+ "type": "Commit",
+ "url": "https://github.com/x-stream/xstream/commit/94666ae6dfe839410c73bdfeeb211374f04a2059"
+ }
+ ],
+ "packageIdentifier": "Maven-com.thoughtworks.xstream:xstream-1.4.5",
+ "scaPackageData":
+ {
+ "id": "Maven-com.thoughtworks.xstream:xstream-1.4.5",
+ "fixLink": "https://devhub.checkmarx.com/cve-details/CVE-2013-7285",
+ "locations":
+ [
+ "pom.xml"
+ ],
+ "dependencyPaths":
+ [
+ [
+ {
+ "id": "Maven-com.thoughtworks.xstream:xstream-1.4.5",
+ "name": "com.thoughtworks.xstream:xstream",
+ "locations":
+ [
+ "pom.xml"
+ ]
+ }
+ ]
+ ],
+ "outdated": true,
+ "supportsQuickFix": false,
+ "isDirectDependency": true,
+ "typeOfDependency": "Direct Dependency"
+ },
+ "recommendedVersion": "1.4.17.redhat-00001"
+ },
+ "comments":
+ {},
+ "vulnerabilityDetails":
+ {
+ "cweId": "CWE-78",
+ "cvssScore": 9.8,
+ "cveName": "CVE-2013-7285",
+ "cvss":
+ {
+ "version": 3,
+ "attackVector": "NETWORK",
+ "availability": "HIGH",
+ "confidentiality": "HIGH",
+ "attackComplexity": "LOW"
+ }
+ }
+ },
+ {
+ "type": "sca",
+ "scaType": "Vulnerability",
+ "label": "sca",
+ "id": "CVE-2016-10707",
+ "similarityId": "CVE-2016-10707",
+ "status": "RECURRENT",
+ "state": "TO_VERIFY",
+ "severity": "HIGH",
+ "created": "2024-08-15T10:30:12Z",
+ "firstFoundAt": "2024-08-15T10:30:12Z",
+ "foundAt": "2024-08-15T10:30:12Z",
+ "firstScanId": "e1838db6-4950-4f98-9850-cc0df3660a9b",
+ "description": "Versions prior to 3.0.0 in jQuery are vulnerable to Denial of Service (DoS) due to removing a logic that lowercased attribute names. Any attribute getter using a mixed-cased name for boolean attributes goes into an infinite recursion, exceeding the stack call limit.",
+ "descriptionHTML": "Versions prior to 3.0.0 in jQuery are vulnerable to Denial of Service (DoS) due to removing a logic that lowercased attribute names. Any attribute getter using a mixed-cased name for boolean attributes goes into an infinite recursion, exceeding the stack call limit.
\n",
+ "data":
+ {
+ "packageData":
+ [
+ {
+ "comment": "https://www.npmjs.com/advisories/330",
+ "type": "Advisory",
+ "url": "https://www.npmjs.com/advisories/330"
+ },
+ {
+ "comment": "https://github.com/jquery/jquery/issues/3133",
+ "type": "Issue",
+ "url": "https://github.com/jquery/jquery/issues/3133"
+ },
+ {
+ "comment": "https://github.com/jquery/jquery/pull/3134",
+ "type": "Pull request",
+ "url": "https://github.com/jquery/jquery/pull/3134"
+ },
+ {
+ "comment": "https://github.com/jquery/jquery/pull/3134/commits/e06fda69f00082b44fd39ce8e851f72d29999011",
+ "type": "Commit",
+ "url": "https://github.com/jquery/jquery/pull/3134/commits/e06fda69f00082b44fd39ce8e851f72d29999011"
+ },
+ {
+ "comment": "https://github.com/advisories/GHSA-mhpp-875w-9cpv",
+ "type": "Advisory",
+ "url": "https://github.com/advisories/GHSA-mhpp-875w-9cpv"
+ }
+ ],
+ "packageIdentifier": "Npm-jquery-2.1.4",
+ "scaPackageData":
+ {
+ "id": "Npm-jquery-2.1.4",
+ "fixLink": "https://devhub.checkmarx.com/cve-details/CVE-2016-10707",
+ "locations":
+ [
+ "src/main/resources/webgoat/static/js/libs/jquery-2.1.4.min.js"
+ ],
+ "dependencyPaths":
+ [
+ [
+ {
+ "id": "Npm-jquery-2.1.4",
+ "name": "jquery",
+ "locations":
+ [
+ "src/main/resources/webgoat/static/js/libs/jquery-2.1.4.min.js"
+ ]
+ }
+ ]
+ ],
+ "outdated": true,
+ "supportsQuickFix": false,
+ "isDirectDependency": true,
+ "typeOfDependency": "Direct Dependency"
+ },
+ "recommendedVersion": "3.5.0"
+ },
+ "comments":
+ {},
+ "vulnerabilityDetails":
+ {
+ "cweId": "CWE-674",
+ "cvssScore": 7.5,
+ "cveName": "CVE-2016-10707",
+ "cvss":
+ {
+ "version": 3,
+ "attackVector": "NETWORK",
+ "availability": "HIGH",
+ "confidentiality": "NONE",
+ "attackComplexity": "LOW"
+ }
+ }
+ },
+ {
+ "type": "sca",
+ "scaType": "Vulnerability",
+ "label": "sca",
+ "id": "CVE-2005-2541",
+ "similarityId": "CVE-2005-2541",
+ "status": "RECURRENT",
+ "state": "TO_VERIFY",
+ "severity": "HIGH",
+ "created": "2024-08-08T14:40:37Z",
+ "firstFoundAt": "2024-08-07T15:47:19Z",
+ "foundAt": "2024-08-08T14:40:37Z",
+ "firstScanId": "9589830b-3986-4082-a3ea-c1ac50dbf7bd",
+ "description": "Tar 1.15.1 does not properly warn the user when extracting setuid or setgid files, which may allow local users or remote attackers to gain privileges.",
+ "descriptionHTML": "Tar 1.15.1 does not properly warn the user when extracting setuid or setgid files, which may allow local users or remote attackers to gain privileges.
\n",
+ "data":
+ {
+ "packageName": "tar",
+ "packageVersion": "1.34+dfsg-1+deb11u1"
+ },
+ "comments":
+ {},
+ "vulnerabilityDetails":
+ {
+ "cweId": "NVD-CWE-Other",
+ "cvss":
+ {
+ "attackVector": "NETWORK",
+ "availability": "COMPLETE",
+ "attackComplexity": "LOW",
+ "integrityImpact": "COMPLETE"
+ }
+ }
+ },
+ {
+ "type": "kics",
+ "label": "IaC Security",
+ "id": "sZzpSZBOgluFm7HfxuClfXrS4MI=",
+ "similarityId": "e914e3bbbca66ba2a33a24ba42f35ef927d29988f5ffd9300d624e3d091f7ae1",
+ "status": "RECURRENT",
+ "state": "TO_VERIFY",
+ "severity": "HIGH",
+ "created": "2024-08-08T14:33:42Z",
+ "firstFoundAt": "2024-08-07T17:10:54Z",
+ "foundAt": "2024-08-08T14:33:42Z",
+ "firstScanId": "22a8082f-c0df-4d2e-b719-56e72fa76e21",
+ "description": "A user should be specified in the dockerfile, otherwise the image will run as root",
+ "descriptionHTML": "A user should be specified in the dockerfile, otherwise the image will run as root
\n",
+ "data":
+ {
+ "queryId": "fd54f200-402c-4333-a5a4-36ef6709af2f [Taken from query_id]",
+ "queryName": "Missing User Instruction",
+ "group": "Build Process [Taken from category]",
+ "line": 1,
+ "platform": "Dockerfile",
+ "issueType": "MissingAttribute",
+ "expectedValue": "The 'Dockerfile' should contain the 'USER' instruction",
+ "value": "The 'Dockerfile' does not contain any 'USER' instruction",
+ "filename": "/src/Test.Test/Dockerfile"
+ },
+ "comments":
+ {},
+ "vulnerabilityDetails":
+ {
+ "cvss":
+ {}
+ }
+ }
+ ],
+ "totalCount": 69,
+ "scanID": "2bbbe034-7664-4117-8d1a-4b4eb01a569a"
+}
\ No newline at end of file
diff --git a/unittests/scans/legitify/legitify_many_findings.json b/unittests/scans/legitify/legitify_many_findings.json
new file mode 100644
index 00000000000..93bed4f7141
--- /dev/null
+++ b/unittests/scans/legitify/legitify_many_findings.json
@@ -0,0 +1,895 @@
+{
+ "type": "flattened",
+ "content": {
+ "data.repository.actions_can_approve_pull_requests": {
+ "policyInfo": {
+ "title": "Workflows Should Not Be Allowed To Approve Pull Requests",
+ "description": "The default GitHub Actions configuration allows for workflows to approve pull requests. This could allow users to bypass code-review restrictions.",
+ "policyName": "actions_can_approve_pull_requests",
+ "fullyQualifiedPolicyName": "data.repository.actions_can_approve_pull_requests",
+ "severity": "HIGH",
+ "threat": [
+ "Attackers can exploit this misconfiguration to bypass code-review restrictions by creating a workflow that approves their own pull request and then merging the pull request without anyone noticing, introducing malicious code that would go straight ahead to production."
+ ],
+ "remediationSteps": [
+ "1. Make sure you have admin permissions",
+ "2. Go to the org's settings page",
+ "3. Enter 'Actions - General' tab",
+ "4. Under 'Workflow permissions'",
+ "5. Uncheck 'Allow GitHub actions to create and approve pull requests.'",
+ "6. Click 'Save'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "SKIPPED"
+ }
+ ]
+ },
+ "data.repository.code_review_not_required": {
+ "policyInfo": {
+ "title": "Default Branch Should Require Code Review",
+ "description": "In order to comply with separation of duties principle and enforce secure code practices, a code review should be mandatory using the source-code-management system's built-in enforcement. This option is found in the branch protection setting of the repository.",
+ "policyName": "code_review_not_required",
+ "fullyQualifiedPolicyName": "data.repository.code_review_not_required",
+ "severity": "HIGH",
+ "threat": [
+ "Users can merge code without being reviewed, which can lead to insecure code reaching the main branch and production."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Require a pull request before merging'",
+ "7. Check 'Require approvals'",
+ "8. Set 'Required number of approvals before merging' to 1 or more",
+ "9. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "FAILED"
+ }
+ ]
+ },
+ "data.repository.repository_not_maintained": {
+ "policyInfo": {
+ "title": "Repository Should Be Updated At Least Quarterly",
+ "description": "A project which is not actively maintained may not be patched against security issues within its code and dependencies, and is therefore at higher risk of including known vulnerabilities.",
+ "policyName": "repository_not_maintained",
+ "fullyQualifiedPolicyName": "data.repository.repository_not_maintained",
+ "severity": "HIGH",
+ "threat": [
+ "As new vulnerabilities are found over time, unmaintained repositories are more likely to point to dependencies that have known vulnerabilities, exposing these repositories to 1-day attacks."
+ ],
+ "remediationSteps": [
+ "1. Make sure you have admin permissions",
+ "2. Either Delete or Archive the repository"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "FAILED"
+ }
+ ]
+ },
+ "data.repository.code_review_by_two_members_not_required": {
+ "policyInfo": {
+ "title": "Default Branch Should Require Code Review By At Least Two Reviewers",
+ "description": "In order to comply with separation of duties principle and enforce secure code practices, a code review should be mandatory using the source-code-management built-in enforcement. This option is found in the branch protection setting of the repository.",
+ "policyName": "code_review_by_two_members_not_required",
+ "fullyQualifiedPolicyName": "data.repository.code_review_by_two_members_not_required",
+ "severity": "MEDIUM",
+ "threat": [
+ "Users can merge code without being reviewed, which can lead to insecure code reaching the main branch and production.",
+ "Requiring code review by at least two reviewers further decreases the risk of an insider threat (as merging code requires compromising at least 2 identities with write permissions), and decreases the likelihood of human error in the review process."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Require a pull request before merging'",
+ "7. Check 'Require approvals'",
+ "8. Set 'Required number of approvals before merging' to 2 or more",
+ "9. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "FAILED"
+ }
+ ]
+ },
+ "data.repository.ghas_dependency_review_not_enabled": {
+ "policyInfo": {
+ "title": "GitHub Advanced Security – Dependency Review Should Be Enabled For A Repository",
+ "description": "Enable GitHub Advanced Security dependency review to avoid introducing new vulnerabilities and detect newly discovered vulnerabilities in existing packages.",
+ "policyName": "ghas_dependency_review_not_enabled",
+ "fullyQualifiedPolicyName": "data.repository.ghas_dependency_review_not_enabled",
+ "severity": "MEDIUM",
+ "threat": [
+ "A contributor may add vulnerable third-party dependencies to the repository, introducing vulnerabilities to your application that will only be detected after merge."
+ ],
+ "remediationSteps": [
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Code security and analysis' tab",
+ "4. Set 'Dependency graph' as Enabled"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "FAILED"
+ }
+ ]
+ },
+ "data.repository.missing_default_branch_protection": {
+ "policyInfo": {
+ "title": "Default Branch Should Be Protected",
+ "description": "Branch protection is not enabled for this repository’s default branch. Protecting branches ensures new code changes must go through a controlled merge process and allows enforcement of code review as well as other security tests. This issue is raised if the default branch protection is turned off.",
+ "policyName": "missing_default_branch_protection",
+ "fullyQualifiedPolicyName": "data.repository.missing_default_branch_protection",
+ "severity": "MEDIUM",
+ "threat": [
+ "Any contributor with write access may push potentially dangerous code to this repository, making it easier to compromise and difficult to audit."
+ ],
+ "remediationSteps": [
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Add rule'",
+ "6. Set 'Branch name pattern' as the default branch name (usually 'main' or 'master')",
+ "7. Set desired protections",
+ "8. Click 'Create' and save the rule"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "FAILED"
+ }
+ ]
+ },
+ "data.repository.missing_default_branch_protection_deletion": {
+ "policyInfo": {
+ "title": "Default Branch Deletion Protection Should Be Enabled",
+ "description": "The history of the default branch is not protected against deletion for this repository.",
+ "policyName": "missing_default_branch_protection_deletion",
+ "fullyQualifiedPolicyName": "data.repository.missing_default_branch_protection_deletion",
+ "severity": "MEDIUM",
+ "threat": [
+ "Rewriting project history can make it difficult to trace back when bugs or security issues were introduced, making them more difficult to remediate."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Uncheck 'Allow deletions', Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.missing_default_branch_protection_force_push": {
+ "policyInfo": {
+ "title": "Default Branch Should Not Allow Force Pushes",
+ "description": "The history of the default branch is not protected against changes for this repository. Protecting branch history ensures every change that was made to code can be retained and later examined. This issue is raised if the default branch history can be modified using force push.",
+ "policyName": "missing_default_branch_protection_force_push",
+ "fullyQualifiedPolicyName": "data.repository.missing_default_branch_protection_force_push",
+ "severity": "MEDIUM",
+ "threat": [
+ "Rewriting project history can make it difficult to trace back when bugs or security issues were introduced, making them more difficult to remediate."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Uncheck 'Allow force pushes'",
+ "7. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.non_linear_history": {
+ "policyInfo": {
+ "title": "Default Branch Should Require Linear History",
+ "description": "Prevent merge commits from being pushed to protected branches.",
+ "policyName": "non_linear_history",
+ "fullyQualifiedPolicyName": "data.repository.non_linear_history",
+ "severity": "MEDIUM",
+ "threat": [
+ "Having a non-linear history makes it harder to reverse changes, making recovery from bugs and security risks slower and more difficult."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Require linear history'",
+ "7. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "FAILED"
+ }
+ ]
+ },
+ "data.repository.repository_secret_is_stale": {
+ "policyInfo": {
+ "title": "Repository Secrets Should Be Updated At Least Yearly",
+ "description": "Some of the repository secrets have not been updated for over a year. It is recommended to refresh secret values regularly in order to minimize the risk of breach in case of an information leak.",
+ "policyName": "repository_secret_is_stale",
+ "fullyQualifiedPolicyName": "data.repository.repository_secret_is_stale",
+ "severity": "MEDIUM",
+ "threat": [
+ "Sensitive data may have been inadvertently made public in the past, and an attacker who holds this data may gain access to your current CI and services. In addition, there may be old or unnecessary tokens that have not been inspected and can be used to access sensitive information."
+ ],
+ "remediationSteps": [
+ "1. Enter your repository's landing page",
+ "2. Go to the settings tab",
+ "3. Under the 'Security' title on the left, choose 'Secrets and variables'",
+ "4. Click 'Actions'",
+ "5. Sort secrets by 'Last Updated'",
+ "6. Regenerate every secret older than one year and add the new value to GitHub's secret manager"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas",
+ "secretsList": []
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.requires_branches_up_to_date_before_merge": {
+ "policyInfo": {
+ "title": "Default Branch Should Require Branches To Be Up To Date Before Merge",
+ "description": "Status checks are required, but branches that are not up to date can be merged. This can result in previously remediated issues being merged in over fixes.",
+ "policyName": "requires_branches_up_to_date_before_merge",
+ "fullyQualifiedPolicyName": "data.repository.requires_branches_up_to_date_before_merge",
+ "severity": "MEDIUM",
+ "threat": [
+ "Required status checks may be failing on the latest version after passing on an earlier version of the code, making it easy to commit buggy or otherwise insecure code."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Require status checks to pass before merging'",
+ "7. Check 'Require branches to be up to date before merging'",
+ "8. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "FAILED"
+ }
+ ]
+ },
+ "data.repository.requires_status_checks": {
+ "policyInfo": {
+ "title": "Default Branch Should Require All Checks To Pass Before Merge",
+ "description": "Branch protection is enabled. However, the checks that validate the quality and security of the code are not required to pass before submitting new changes. The default check ensures the code is up-to-date to prevent faulty merges and unexpected behaviors, as well as other custom checks that test security and quality. It is advised to turn this control on to ensure any existing or future check will be required to pass.",
+ "policyName": "requires_status_checks",
+ "fullyQualifiedPolicyName": "data.repository.requires_status_checks",
+ "severity": "MEDIUM",
+ "threat": [
+ "Not defining a set of required status checks can make it easy for contributors to introduce buggy or insecure code as manual review, whether mandated or optional, is the only line of defense."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Require status checks to pass before merging'",
+ "7. Add the required checks that must pass before merging (tests, lint, etc...)",
+ "8. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "FAILED"
+ }
+ ]
+ },
+ "data.repository.scorecard_score_too_low": {
+ "policyInfo": {
+ "title": "OSSF Scorecard Score Should Be Above 7",
+ "description": "Scorecard is an open-source tool from the OSSF that helps to assess the security posture of repositories. A low scorecard score means your repository may be at risk.",
+ "policyName": "scorecard_score_too_low",
+ "fullyQualifiedPolicyName": "data.repository.scorecard_score_too_low",
+ "severity": "MEDIUM",
+ "threat": [
+ "A low Scorecard score can indicate that the repository is more vulnerable to attack than others, making it a prime attack target."
+ ],
+ "remediationSteps": [
+ "2. - Run legitify with --scorecard verbose",
+ "3. - Run scorecard manually",
+ "4. Fix the failed checks"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "SKIPPED"
+ }
+ ]
+ },
+ "data.repository.secret_scanning_not_enabled": {
+ "policyInfo": {
+ "title": "Secret Scanning should be enabled",
+ "description": "Repository should have secret scanning enabled. Secret scanning helps prevent the exposure of sensitive information and ensures compliance.",
+ "policyName": "secret_scanning_not_enabled",
+ "fullyQualifiedPolicyName": "data.repository.secret_scanning_not_enabled",
+ "severity": "MEDIUM",
+ "threat": [
+ "Exposed secrets increases the risk of sensitive information such as API keys, passwords, and tokens being disclosed, leading to unauthorized access to systems and services, and data breaches."
+ ],
+ "remediationSteps": [
+ "1. Go to the repository settings page",
+ "2. Under the 'Security' title on the left, select 'Code security and analysis'",
+ "3. Under 'Secret scanning', click 'Enable'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "SKIPPED"
+ }
+ ]
+ },
+ "data.repository.token_default_permissions_is_read_write": {
+ "policyInfo": {
+ "title": "Default Workflow Token Permission Should Be Set To Read Only",
+ "description": "The default GitHub Action workflow token permission is set to read-write. When creating workflow tokens, it is highly recommended to follow the Principle of Least Privilege and force workflow authors to specify explicitly which permissions they need.",
+ "policyName": "token_default_permissions_is_read_write",
+ "fullyQualifiedPolicyName": "data.repository.token_default_permissions_is_read_write",
+ "severity": "MEDIUM",
+ "threat": [
+ "In case of token compromise (due to a vulnerability or malicious third-party GitHub actions), an attacker can use this token to sabotage various assets in your CI/CD pipeline, such as packages, pull-requests, deployments, and more."
+ ],
+ "remediationSteps": [
+ "1. Make sure you have admin permissions",
+ "2. Go to the org's settings page",
+ "3. Enter 'Actions - General' tab",
+ "4. Under 'Workflow permissions'",
+ "5. Select 'Read repository contents permission'",
+ "6. Click 'Save'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "SKIPPED"
+ }
+ ]
+ },
+ "data.repository.users_allowed_to_bypass_ruleset": {
+ "policyInfo": {
+ "title": "Users Are Allowed To Bypass Ruleset Rules",
+ "description": "Rulesets rules are not enforced for some users. When defining rulesets it is recommended to make sure that no one is allowed to bypass these rules in order to avoid inadvertent or intentional alterations to critical code which can lead to potential errors or vulnerabilities in the software.",
+ "policyName": "users_allowed_to_bypass_ruleset",
+ "fullyQualifiedPolicyName": "data.repository.users_allowed_to_bypass_ruleset",
+ "severity": "MEDIUM",
+ "threat": [
+ "Attackers that gain access to a user that can bypass the ruleset rules can compromise the codebase without anyone noticing, introducing malicious code that would go straight ahead to production."
+ ],
+ "remediationSteps": [
+ "1. Go to the repository settings page",
+ "2. Under 'Code and automation', select 'Rules -\u003e Rulesets'",
+ "3. Find the relevant ruleset",
+ "4. Empty the 'Bypass list'",
+ "5. Press 'Save Changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "FAILED"
+ }
+ ]
+ },
+ "data.repository.vulnerability_alerts_not_enabled": {
+ "policyInfo": {
+ "title": "Vulnerability Alerts Should Be Enabled",
+ "description": "Enable GitHub Dependabot to regularly scan for open source vulnerabilities.",
+ "policyName": "vulnerability_alerts_not_enabled",
+ "fullyQualifiedPolicyName": "data.repository.vulnerability_alerts_not_enabled",
+ "severity": "MEDIUM",
+ "threat": [
+ "An open source vulnerability may be affecting your code without your knowledge, making it vulnerable to exploitation."
+ ],
+ "remediationSteps": [
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Code security and analysis' tab",
+ "4. Set 'Dependabot alerts' as Enabled"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "FAILED"
+ }
+ ]
+ },
+ "data.repository.code_review_not_limited_to_code_owners": {
+ "policyInfo": {
+ "title": "Default Branch Should Limit Code Review to Code-Owners",
+ "description": "It is recommended to require code review only from designated individuals specified in CODEOWNERS file. Turning this option on enforces that only the allowed owners can approve a code change. This option is found in the branch protection setting of the repository.",
+ "policyName": "code_review_not_limited_to_code_owners",
+ "fullyQualifiedPolicyName": "data.repository.code_review_not_limited_to_code_owners",
+ "severity": "LOW",
+ "threat": [
+ "A pull request may be approved by any contributor with write access. Specifying specific code owners can ensure review is only done by individuals with the correct expertise required for the review of the changed files, potentially preventing bugs and security risks."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Require a pull request before merging'",
+ "7. Check 'Require review from Code Owners'",
+ "8. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "FAILED"
+ }
+ ]
+ },
+ "data.repository.dismisses_stale_reviews": {
+ "policyInfo": {
+ "title": "Default Branch Should Require New Code Changes After Approval To Be Re-Approved",
+ "description": "This security control prevents merging code that was approved but later on changed. Turning it on ensures any new changes must be reviewed again. This setting is part of the branch protection and code-review settings, and hardens the review process. If turned off - a developer can change the code after approval, and push code that is different from the one that was previously allowed. This option is found in the branch protection setting for the repository.",
+ "policyName": "dismisses_stale_reviews",
+ "fullyQualifiedPolicyName": "data.repository.dismisses_stale_reviews",
+ "severity": "LOW",
+ "threat": [
+ "Buggy or insecure code may be committed after approval and will reach the main branch without review. Alternatively, an attacker can attempt a just-in-time attack to introduce dangerous code just before merge."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Require a pull request before merging'",
+ "7. Check 'Dismiss stale pull request approvals when new commits are pushed'",
+ "8. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "FAILED"
+ }
+ ]
+ },
+ "data.repository.forking_allowed_for_repository": {
+ "policyInfo": {
+ "title": "Forking Should Not Be Allowed for Private/Internal Repositories",
+ "description": "Forking private or internal repositories can lead to unauthorized spread and potential exposure of sensitive source code. It is recommended to disable forking for private repositories in the repository or the organization configuration to maintain control over the source code. If forking is necessary, it should be enabled selectively by admins for specific collaboration needs on private repositories.",
+ "policyName": "forking_allowed_for_repository",
+ "fullyQualifiedPolicyName": "data.repository.forking_allowed_for_repository",
+ "severity": "LOW",
+ "threat": [
+ "Forked repositories cause more code and secret sprawl in the organization as forks are independent copies of the repository and need to be tracked separately, making it more difficult to keep track of sensitive assets and contain potential incidents."
+ ],
+ "remediationSteps": [
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'General' tab",
+ "4. Under 'Features', Toggle off 'Allow forking'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "SKIPPED"
+ }
+ ]
+ },
+ "data.repository.no_conversation_resolution": {
+ "policyInfo": {
+ "title": "Default Branch Should Require All Conversations To Be Resolved Before Merge",
+ "description": "Require all Pull Request conversations to be resolved before merging. Check this to avoid bypassing/missing a Pull Request comment.",
+ "policyName": "no_conversation_resolution",
+ "fullyQualifiedPolicyName": "data.repository.no_conversation_resolution",
+ "severity": "LOW",
+ "threat": [
+ "Allowing the merging of code without resolving all conversations can promote poor and vulnerable code, as important comments may be forgotten or deliberately ignored when the code is merged."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Require conversation resolution before merging'",
+ "7. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "FAILED"
+ }
+ ]
+ },
+ "data.repository.no_signed_commits": {
+ "policyInfo": {
+ "title": "Default Branch Should Require All Commits To Be Signed",
+ "description": "Require all commits to be signed and verified",
+ "policyName": "no_signed_commits",
+ "fullyQualifiedPolicyName": "data.repository.no_signed_commits",
+ "severity": "LOW",
+ "threat": [
+ "A commit containing malicious code may be crafted by a malicious actor that has acquired write access to the repository to initiate a supply chain attack. Commit signing provides another layer of defense that can prevent this type of compromise."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Require signed commits'",
+ "7. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "FAILED"
+ }
+ ]
+ },
+ "data.repository.pushes_are_not_restricted": {
+ "policyInfo": {
+ "title": "Default Branch Should Restrict Who Can Push To It",
+ "description": "By default, commits can be pushed directly to protected branches without going through a Pull Request. Restrict who can push commits to protected branches so that commits can be added only via merges, which require Pull Request.",
+ "policyName": "pushes_are_not_restricted",
+ "fullyQualifiedPolicyName": "data.repository.pushes_are_not_restricted",
+ "severity": "LOW",
+ "threat": [
+ "An attacker with write credentials may introduce vulnerabilities to your code without your knowledge. Alternatively, contributors may commit unsafe code that is buggy or easy to exploit that could have been caught using a review process."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Restrict who can push to matching branches'",
+ "7. Choose who should be allowed to push",
+ "8. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "FAILED"
+ }
+ ]
+ },
+ "data.repository.repository_has_too_many_admins": {
+ "policyInfo": {
+ "title": "Repository Should Have A Low Admin Count",
+ "description": "Repository admins are highly privileged and could create great damage if they are compromised. It is recommended to limit the number of repository admins to the minimum required, and no more than 5% of the userbase (Up to 3 admins are always allowed).",
+ "policyName": "repository_has_too_many_admins",
+ "fullyQualifiedPolicyName": "data.repository.repository_has_too_many_admins",
+ "severity": "LOW",
+ "threat": [
+ "A compromised user with admin permissions can initiate a supply chain attack in a plethora of ways.",
+ "Having many admin users increases the overall risk of user compromise, and makes it more likely to lose track of unused admin permissions given to users in the past."
+ ],
+ "remediationSteps": [
+ "1. Make sure you have admin permissions",
+ "2. Go to the repository settings page",
+ "3. Press 'Collaborators and teams'",
+ "4. Select the unwanted admin users",
+ "5. Select 'Change Role'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "SKIPPED"
+ }
+ ]
+ },
+ "data.repository.repository_webhook_doesnt_require_ssl": {
+ "policyInfo": {
+ "title": "Webhooks Should Be Configured To Use SSL",
+ "description": "Webhooks that are not configured with SSL enabled could expose your software to man-in-the-middle attacks (MITM).",
+ "policyName": "repository_webhook_doesnt_require_ssl",
+ "fullyQualifiedPolicyName": "data.repository.repository_webhook_doesnt_require_ssl",
+ "severity": "LOW",
+ "threat": [
+ "If SSL verification is disabled, any party with access to the target DNS domain can masquerade as your designated payload URL, allowing it to freely read and affect the response of any webhook request.",
+ "In the case of GitHub Enterprise Server instances, it may be sufficient only to control the DNS configuration of the network where the instance is deployed, as an attacker can redirect traffic to the target domain in your internal network directly to them, and this is often much easier than compromising an internet-facing domain."
+ ],
+ "remediationSteps": [
+ "1. Make sure you can manage webhooks for the repository",
+ "2. Go to the repository settings page",
+ "3. Select 'Webhooks'",
+ "4. Verify URL starts with https",
+ "5. Press on the insecure webhook",
+ "6. Enable 'SSL verification'",
+ "7. Click 'Update webhook'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas",
+ "hooksList": []
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.repository_webhook_no_secret": {
+ "policyInfo": {
+ "title": "Webhooks Should Be Configured With A Secret",
+ "description": "Webhooks are not configured with a shared secret to validate the origin and content of the request. This could allow your webhook to be triggered by any bad actor with the URL.",
+ "policyName": "repository_webhook_no_secret",
+ "fullyQualifiedPolicyName": "data.repository.repository_webhook_no_secret",
+ "severity": "LOW",
+ "threat": [
+ "Not using a webhook secret makes the service receiving the webhook unable to determine the authenticity of the request.",
+ "This allows attackers to masquerade as your repository, potentially creating an unstable or insecure state in other systems."
+ ],
+ "remediationSteps": [
+ "1. Make sure you can manage webhooks for the repository",
+ "2. Go to the repository settings page",
+ "3. Select 'Webhooks'",
+ "4. Press on the insecure webhook",
+ "5. Configure a secret",
+ "6. Click 'Update webhook'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas",
+ "hooksList": []
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.review_dismissal_allowed": {
+ "policyInfo": {
+ "title": "Default Branch Should Restrict Who Can Dismiss Reviews",
+ "description": "Any user with write access to the repository can dismiss pull-request reviews. Pull-request review contains essential information on the work that needs to be done and helps keep track of the changes. Dismissing it might cause a loss of this information and should be restricted to a limited number of users.",
+ "policyName": "review_dismissal_allowed",
+ "fullyQualifiedPolicyName": "data.repository.review_dismissal_allowed",
+ "severity": "LOW",
+ "threat": [
+ "Allowing the dismissal of reviews can promote poor and vulnerable code, as important comments may be forgotten and ignored during the review process."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Restrict who can dismiss pull request reviews'",
+ "7. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "FAILED"
+ }
+ ]
+ }
+ }
+ }
\ No newline at end of file
diff --git a/unittests/scans/legitify/legitify_no_findings.json b/unittests/scans/legitify/legitify_no_findings.json
new file mode 100644
index 00000000000..590e91a37b8
--- /dev/null
+++ b/unittests/scans/legitify/legitify_no_findings.json
@@ -0,0 +1,895 @@
+{
+ "type": "flattened",
+ "content": {
+ "data.repository.actions_can_approve_pull_requests": {
+ "policyInfo": {
+ "title": "Workflows Should Not Be Allowed To Approve Pull Requests",
+ "description": "The default GitHub Actions configuration allows for workflows to approve pull requests. This could allow users to bypass code-review restrictions.",
+ "policyName": "actions_can_approve_pull_requests",
+ "fullyQualifiedPolicyName": "data.repository.actions_can_approve_pull_requests",
+ "severity": "HIGH",
+ "threat": [
+ "Attackers can exploit this misconfiguration to bypass code-review restrictions by creating a workflow that approves their own pull request and then merging the pull request without anyone noticing, introducing malicious code that would go straight ahead to production."
+ ],
+ "remediationSteps": [
+ "1. Make sure you have admin permissions",
+ "2. Go to the org's settings page",
+ "3. Enter 'Actions - General' tab",
+ "4. Under 'Workflow permissions'",
+ "5. Uncheck 'Allow GitHub actions to create and approve pull requests.'",
+ "6. Click 'Save'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "SKIPPED"
+ }
+ ]
+ },
+ "data.repository.code_review_not_required": {
+ "policyInfo": {
+ "title": "Default Branch Should Require Code Review",
+ "description": "In order to comply with separation of duties principle and enforce secure code practices, a code review should be mandatory using the source-code-management system's built-in enforcement. This option is found in the branch protection setting of the repository.",
+ "policyName": "code_review_not_required",
+ "fullyQualifiedPolicyName": "data.repository.code_review_not_required",
+ "severity": "HIGH",
+ "threat": [
+ "Users can merge code without being reviewed, which can lead to insecure code reaching the main branch and production."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Require a pull request before merging'",
+ "7. Check 'Require approvals'",
+ "8. Set 'Required number of approvals before merging' to 1 or more",
+ "9. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.repository_not_maintained": {
+ "policyInfo": {
+ "title": "Repository Should Be Updated At Least Quarterly",
+ "description": "A project which is not actively maintained may not be patched against security issues within its code and dependencies, and is therefore at higher risk of including known vulnerabilities.",
+ "policyName": "repository_not_maintained",
+ "fullyQualifiedPolicyName": "data.repository.repository_not_maintained",
+ "severity": "HIGH",
+ "threat": [
+ "As new vulnerabilities are found over time, unmaintained repositories are more likely to point to dependencies that have known vulnerabilities, exposing these repositories to 1-day attacks."
+ ],
+ "remediationSteps": [
+ "1. Make sure you have admin permissions",
+ "2. Either Delete or Archive the repository"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.code_review_by_two_members_not_required": {
+ "policyInfo": {
+ "title": "Default Branch Should Require Code Review By At Least Two Reviewers",
+ "description": "In order to comply with separation of duties principle and enforce secure code practices, a code review should be mandatory using the source-code-management built-in enforcement. This option is found in the branch protection setting of the repository.",
+ "policyName": "code_review_by_two_members_not_required",
+ "fullyQualifiedPolicyName": "data.repository.code_review_by_two_members_not_required",
+ "severity": "MEDIUM",
+ "threat": [
+ "Users can merge code without being reviewed, which can lead to insecure code reaching the main branch and production.",
+ "Requiring code review by at least two reviewers further decreases the risk of an insider threat (as merging code requires compromising at least 2 identities with write permissions), and decreases the likelihood of human error in the review process."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Require a pull request before merging'",
+ "7. Check 'Require approvals'",
+ "8. Set 'Required number of approvals before merging' to 2 or more",
+ "9. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.ghas_dependency_review_not_enabled": {
+ "policyInfo": {
+ "title": "GitHub Advanced Security – Dependency Review Should Be Enabled For A Repository",
+ "description": "Enable GitHub Advanced Security dependency review to avoid introducing new vulnerabilities and detect newly discovered vulnerabilities in existing packages.",
+ "policyName": "ghas_dependency_review_not_enabled",
+ "fullyQualifiedPolicyName": "data.repository.ghas_dependency_review_not_enabled",
+ "severity": "MEDIUM",
+ "threat": [
+ "A contributor may add vulnerable third-party dependencies to the repository, introducing vulnerabilities to your application that will only be detected after merge."
+ ],
+ "remediationSteps": [
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Code security and analysis' tab",
+ "4. Set 'Dependency graph' as Enabled"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.missing_default_branch_protection": {
+ "policyInfo": {
+ "title": "Default Branch Should Be Protected",
+ "description": "Branch protection is not enabled for this repository’s default branch. Protecting branches ensures new code changes must go through a controlled merge process and allows enforcement of code review as well as other security tests. This issue is raised if the default branch protection is turned off.",
+ "policyName": "missing_default_branch_protection",
+ "fullyQualifiedPolicyName": "data.repository.missing_default_branch_protection",
+ "severity": "MEDIUM",
+ "threat": [
+ "Any contributor with write access may push potentially dangerous code to this repository, making it easier to compromise and difficult to audit."
+ ],
+ "remediationSteps": [
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Add rule'",
+ "6. Set 'Branch name pattern' as the default branch name (usually 'main' or 'master')",
+ "7. Set desired protections",
+ "8. Click 'Create' and save the rule"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.missing_default_branch_protection_deletion": {
+ "policyInfo": {
+ "title": "Default Branch Deletion Protection Should Be Enabled",
+ "description": "The history of the default branch is not protected against deletion for this repository.",
+ "policyName": "missing_default_branch_protection_deletion",
+ "fullyQualifiedPolicyName": "data.repository.missing_default_branch_protection_deletion",
+ "severity": "MEDIUM",
+ "threat": [
+ "Rewriting project history can make it difficult to trace back when bugs or security issues were introduced, making them more difficult to remediate."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Uncheck 'Allow deletions', Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.missing_default_branch_protection_force_push": {
+ "policyInfo": {
+ "title": "Default Branch Should Not Allow Force Pushes",
+ "description": "The history of the default branch is not protected against changes for this repository. Protecting branch history ensures every change that was made to code can be retained and later examined. This issue is raised if the default branch history can be modified using force push.",
+ "policyName": "missing_default_branch_protection_force_push",
+ "fullyQualifiedPolicyName": "data.repository.missing_default_branch_protection_force_push",
+ "severity": "MEDIUM",
+ "threat": [
+ "Rewriting project history can make it difficult to trace back when bugs or security issues were introduced, making them more difficult to remediate."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Uncheck 'Allow force pushes'",
+ "7. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.non_linear_history": {
+ "policyInfo": {
+ "title": "Default Branch Should Require Linear History",
+ "description": "Prevent merge commits from being pushed to protected branches.",
+ "policyName": "non_linear_history",
+ "fullyQualifiedPolicyName": "data.repository.non_linear_history",
+ "severity": "MEDIUM",
+ "threat": [
+ "Having a non-linear history makes it harder to reverse changes, making recovery from bugs and security risks slower and more difficult."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Require linear history'",
+ "7. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.repository_secret_is_stale": {
+ "policyInfo": {
+ "title": "Repository Secrets Should Be Updated At Least Yearly",
+ "description": "Some of the repository secrets have not been updated for over a year. It is recommended to refresh secret values regularly in order to minimize the risk of breach in case of an information leak.",
+ "policyName": "repository_secret_is_stale",
+ "fullyQualifiedPolicyName": "data.repository.repository_secret_is_stale",
+ "severity": "MEDIUM",
+ "threat": [
+ "Sensitive data may have been inadvertently made public in the past, and an attacker who holds this data may gain access to your current CI and services. In addition, there may be old or unnecessary tokens that have not been inspected and can be used to access sensitive information."
+ ],
+ "remediationSteps": [
+ "1. Enter your repository's landing page",
+ "2. Go to the settings tab",
+ "3. Under the 'Security' title on the left, choose 'Secrets and variables'",
+ "4. Click 'Actions'",
+ "5. Sort secrets by 'Last Updated'",
+ "6. Regenerate every secret older than one year and add the new value to GitHub's secret manager"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas",
+ "secretsList": []
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.requires_branches_up_to_date_before_merge": {
+ "policyInfo": {
+ "title": "Default Branch Should Require Branches To Be Up To Date Before Merge",
+ "description": "Status checks are required, but branches that are not up to date can be merged. This can result in previously remediated issues being merged in over fixes.",
+ "policyName": "requires_branches_up_to_date_before_merge",
+ "fullyQualifiedPolicyName": "data.repository.requires_branches_up_to_date_before_merge",
+ "severity": "MEDIUM",
+ "threat": [
+ "Required status checks may be failing on the latest version after passing on an earlier version of the code, making it easy to commit buggy or otherwise insecure code."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Require status checks to pass before merging'",
+ "7. Check 'Require branches to be up to date before merging'",
+ "8. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.requires_status_checks": {
+ "policyInfo": {
+ "title": "Default Branch Should Require All Checks To Pass Before Merge",
+ "description": "Branch protection is enabled. However, the checks that validate the quality and security of the code are not required to pass before submitting new changes. The default check ensures the code is up-to-date to prevent faulty merges and unexpected behaviors, as well as other custom checks that test security and quality. It is advised to turn this control on to ensure any existing or future check will be required to pass.",
+ "policyName": "requires_status_checks",
+ "fullyQualifiedPolicyName": "data.repository.requires_status_checks",
+ "severity": "MEDIUM",
+ "threat": [
+ "Not defining a set of required status checks can make it easy for contributors to introduce buggy or insecure code as manual review, whether mandated or optional, is the only line of defense."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Require status checks to pass before merging'",
+ "7. Add the required checks that must pass before merging (tests, lint, etc...)",
+ "8. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.scorecard_score_too_low": {
+ "policyInfo": {
+ "title": "OSSF Scorecard Score Should Be Above 7",
+ "description": "Scorecard is an open-source tool from the OSSF that helps to assess the security posture of repositories. A low scorecard score means your repository may be at risk.",
+ "policyName": "scorecard_score_too_low",
+ "fullyQualifiedPolicyName": "data.repository.scorecard_score_too_low",
+ "severity": "MEDIUM",
+ "threat": [
+ "A low Scorecard score can indicate that the repository is more vulnerable to attack than others, making it a prime attack target."
+ ],
+ "remediationSteps": [
+ "2. - Run legitify with --scorecard verbose",
+ "3. - Run scorecard manually",
+ "4. Fix the failed checks"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "SKIPPED"
+ }
+ ]
+ },
+ "data.repository.secret_scanning_not_enabled": {
+ "policyInfo": {
+ "title": "Secret Scanning should be enabled",
+ "description": "Repository should have secret scanning enabled. Secret scanning helps prevent the exposure of sensitive information and ensures compliance.",
+ "policyName": "secret_scanning_not_enabled",
+ "fullyQualifiedPolicyName": "data.repository.secret_scanning_not_enabled",
+ "severity": "MEDIUM",
+ "threat": [
+ "Exposed secrets increases the risk of sensitive information such as API keys, passwords, and tokens being disclosed, leading to unauthorized access to systems and services, and data breaches."
+ ],
+ "remediationSteps": [
+ "1. Go to the repository settings page",
+ "2. Under the 'Security' title on the left, select 'Code security and analysis'",
+ "3. Under 'Secret scanning', click 'Enable'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "SKIPPED"
+ }
+ ]
+ },
+ "data.repository.token_default_permissions_is_read_write": {
+ "policyInfo": {
+ "title": "Default Workflow Token Permission Should Be Set To Read Only",
+ "description": "The default GitHub Action workflow token permission is set to read-write. When creating workflow tokens, it is highly recommended to follow the Principle of Least Privilege and force workflow authors to specify explicitly which permissions they need.",
+ "policyName": "token_default_permissions_is_read_write",
+ "fullyQualifiedPolicyName": "data.repository.token_default_permissions_is_read_write",
+ "severity": "MEDIUM",
+ "threat": [
+ "In case of token compromise (due to a vulnerability or malicious third-party GitHub actions), an attacker can use this token to sabotage various assets in your CI/CD pipeline, such as packages, pull-requests, deployments, and more."
+ ],
+ "remediationSteps": [
+ "1. Make sure you have admin permissions",
+ "2. Go to the org's settings page",
+ "3. Enter 'Actions - General' tab",
+ "4. Under 'Workflow permissions'",
+ "5. Select 'Read repository contents permission'",
+ "6. Click 'Save'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "SKIPPED"
+ }
+ ]
+ },
+ "data.repository.users_allowed_to_bypass_ruleset": {
+ "policyInfo": {
+ "title": "Users Are Allowed To Bypass Ruleset Rules",
+ "description": "Rulesets rules are not enforced for some users. When defining rulesets it is recommended to make sure that no one is allowed to bypass these rules in order to avoid inadvertent or intentional alterations to critical code which can lead to potential errors or vulnerabilities in the software.",
+ "policyName": "users_allowed_to_bypass_ruleset",
+ "fullyQualifiedPolicyName": "data.repository.users_allowed_to_bypass_ruleset",
+ "severity": "MEDIUM",
+ "threat": [
+ "Attackers that gain access to a user that can bypass the ruleset rules can compromise the codebase without anyone noticing, introducing malicious code that would go straight ahead to production."
+ ],
+ "remediationSteps": [
+ "1. Go to the repository settings page",
+ "2. Under 'Code and automation', select 'Rules -\u003e Rulesets'",
+ "3. Find the relevant ruleset",
+ "4. Empty the 'Bypass list'",
+ "5. Press 'Save Changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.vulnerability_alerts_not_enabled": {
+ "policyInfo": {
+ "title": "Vulnerability Alerts Should Be Enabled",
+ "description": "Enable GitHub Dependabot to regularly scan for open source vulnerabilities.",
+ "policyName": "vulnerability_alerts_not_enabled",
+ "fullyQualifiedPolicyName": "data.repository.vulnerability_alerts_not_enabled",
+ "severity": "MEDIUM",
+ "threat": [
+ "An open source vulnerability may be affecting your code without your knowledge, making it vulnerable to exploitation."
+ ],
+ "remediationSteps": [
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Code security and analysis' tab",
+ "4. Set 'Dependabot alerts' as Enabled"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.code_review_not_limited_to_code_owners": {
+ "policyInfo": {
+ "title": "Default Branch Should Limit Code Review to Code-Owners",
+ "description": "It is recommended to require code review only from designated individuals specified in CODEOWNERS file. Turning this option on enforces that only the allowed owners can approve a code change. This option is found in the branch protection setting of the repository.",
+ "policyName": "code_review_not_limited_to_code_owners",
+ "fullyQualifiedPolicyName": "data.repository.code_review_not_limited_to_code_owners",
+ "severity": "LOW",
+ "threat": [
+ "A pull request may be approved by any contributor with write access. Specifying specific code owners can ensure review is only done by individuals with the correct expertise required for the review of the changed files, potentially preventing bugs and security risks."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Require a pull request before merging'",
+ "7. Check 'Require review from Code Owners'",
+ "8. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.dismisses_stale_reviews": {
+ "policyInfo": {
+ "title": "Default Branch Should Require New Code Changes After Approval To Be Re-Approved",
+ "description": "This security control prevents merging code that was approved but later on changed. Turning it on ensures any new changes must be reviewed again. This setting is part of the branch protection and code-review settings, and hardens the review process. If turned off - a developer can change the code after approval, and push code that is different from the one that was previously allowed. This option is found in the branch protection setting for the repository.",
+ "policyName": "dismisses_stale_reviews",
+ "fullyQualifiedPolicyName": "data.repository.dismisses_stale_reviews",
+ "severity": "LOW",
+ "threat": [
+ "Buggy or insecure code may be committed after approval and will reach the main branch without review. Alternatively, an attacker can attempt a just-in-time attack to introduce dangerous code just before merge."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Require a pull request before merging'",
+ "7. Check 'Dismiss stale pull request approvals when new commits are pushed'",
+ "8. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.forking_allowed_for_repository": {
+ "policyInfo": {
+ "title": "Forking Should Not Be Allowed for Private/Internal Repositories",
+ "description": "Forking private or internal repositories can lead to unauthorized spread and potential exposure of sensitive source code. It is recommended to disable forking for private repositories in the repository or the organization configuration to maintain control over the source code. If forking is necessary, it should be enabled selectively by admins for specific collaboration needs on private repositories.",
+ "policyName": "forking_allowed_for_repository",
+ "fullyQualifiedPolicyName": "data.repository.forking_allowed_for_repository",
+ "severity": "LOW",
+ "threat": [
+ "Forked repositories cause more code and secret sprawl in the organization as forks are independent copies of the repository and need to be tracked separately, making it more difficult to keep track of sensitive assets and contain potential incidents."
+ ],
+ "remediationSteps": [
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'General' tab",
+ "4. Under 'Features', Toggle off 'Allow forking'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "SKIPPED"
+ }
+ ]
+ },
+ "data.repository.no_conversation_resolution": {
+ "policyInfo": {
+ "title": "Default Branch Should Require All Conversations To Be Resolved Before Merge",
+ "description": "Require all Pull Request conversations to be resolved before merging. Check this to avoid bypassing/missing a Pull Request comment.",
+ "policyName": "no_conversation_resolution",
+ "fullyQualifiedPolicyName": "data.repository.no_conversation_resolution",
+ "severity": "LOW",
+ "threat": [
+ "Allowing the merging of code without resolving all conversations can promote poor and vulnerable code, as important comments may be forgotten or deliberately ignored when the code is merged."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Require conversation resolution before merging'",
+ "7. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.no_signed_commits": {
+ "policyInfo": {
+ "title": "Default Branch Should Require All Commits To Be Signed",
+ "description": "Require all commits to be signed and verified",
+ "policyName": "no_signed_commits",
+ "fullyQualifiedPolicyName": "data.repository.no_signed_commits",
+ "severity": "LOW",
+ "threat": [
+ "A commit containing malicious code may be crafted by a malicious actor that has acquired write access to the repository to initiate a supply chain attack. Commit signing provides another layer of defense that can prevent this type of compromise."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Require signed commits'",
+ "7. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.pushes_are_not_restricted": {
+ "policyInfo": {
+ "title": "Default Branch Should Restrict Who Can Push To It",
+ "description": "By default, commits can be pushed directly to protected branches without going through a Pull Request. Restrict who can push commits to protected branches so that commits can be added only via merges, which require Pull Request.",
+ "policyName": "pushes_are_not_restricted",
+ "fullyQualifiedPolicyName": "data.repository.pushes_are_not_restricted",
+ "severity": "LOW",
+ "threat": [
+ "An attacker with write credentials may introduce vulnerabilities to your code without your knowledge. Alternatively, contributors may commit unsafe code that is buggy or easy to exploit that could have been caught using a review process."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Restrict who can push to matching branches'",
+ "7. Choose who should be allowed to push",
+ "8. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.repository_has_too_many_admins": {
+ "policyInfo": {
+ "title": "Repository Should Have A Low Admin Count",
+ "description": "Repository admins are highly privileged and could create great damage if they are compromised. It is recommended to limit the number of repository admins to the minimum required, and no more than 5% of the userbase (Up to 3 admins are always allowed).",
+ "policyName": "repository_has_too_many_admins",
+ "fullyQualifiedPolicyName": "data.repository.repository_has_too_many_admins",
+ "severity": "LOW",
+ "threat": [
+ "A compromised user with admin permissions can initiate a supply chain attack in a plethora of ways.",
+ "Having many admin users increases the overall risk of user compromise, and makes it more likely to lose track of unused admin permissions given to users in the past."
+ ],
+ "remediationSteps": [
+ "1. Make sure you have admin permissions",
+ "2. Go to the repository settings page",
+ "3. Press 'Collaborators and teams'",
+ "4. Select the unwanted admin users",
+ "5. Select 'Change Role'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "SKIPPED"
+ }
+ ]
+ },
+ "data.repository.repository_webhook_doesnt_require_ssl": {
+ "policyInfo": {
+ "title": "Webhooks Should Be Configured To Use SSL",
+ "description": "Webhooks that are not configured with SSL enabled could expose your software to man-in-the-middle attacks (MITM).",
+ "policyName": "repository_webhook_doesnt_require_ssl",
+ "fullyQualifiedPolicyName": "data.repository.repository_webhook_doesnt_require_ssl",
+ "severity": "LOW",
+ "threat": [
+ "If SSL verification is disabled, any party with access to the target DNS domain can masquerade as your designated payload URL, allowing it to freely read and affect the response of any webhook request.",
+ "In the case of GitHub Enterprise Server instances, it may be sufficient only to control the DNS configuration of the network where the instance is deployed, as an attacker can redirect traffic to the target domain in your internal network directly to them, and this is often much easier than compromising an internet-facing domain."
+ ],
+ "remediationSteps": [
+ "1. Make sure you can manage webhooks for the repository",
+ "2. Go to the repository settings page",
+ "3. Select 'Webhooks'",
+ "4. Verify URL starts with https",
+ "5. Press on the insecure webhook",
+ "6. Enable 'SSL verification'",
+ "7. Click 'Update webhook'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas",
+ "hooksList": []
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.repository_webhook_no_secret": {
+ "policyInfo": {
+ "title": "Webhooks Should Be Configured With A Secret",
+ "description": "Webhooks are not configured with a shared secret to validate the origin and content of the request. This could allow your webhook to be triggered by any bad actor with the URL.",
+ "policyName": "repository_webhook_no_secret",
+ "fullyQualifiedPolicyName": "data.repository.repository_webhook_no_secret",
+ "severity": "LOW",
+ "threat": [
+ "Not using a webhook secret makes the service receiving the webhook unable to determine the authenticity of the request.",
+ "This allows attackers to masquerade as your repository, potentially creating an unstable or insecure state in other systems."
+ ],
+ "remediationSteps": [
+ "1. Make sure you can manage webhooks for the repository",
+ "2. Go to the repository settings page",
+ "3. Select 'Webhooks'",
+ "4. Press on the insecure webhook",
+ "5. Configure a secret",
+ "6. Click 'Update webhook'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas",
+ "hooksList": []
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.review_dismissal_allowed": {
+ "policyInfo": {
+ "title": "Default Branch Should Restrict Who Can Dismiss Reviews",
+ "description": "Any user with write access to the repository can dismiss pull-request reviews. Pull-request review contains essential information on the work that needs to be done and helps keep track of the changes. Dismissing it might cause a loss of this information and should be restricted to a limited number of users.",
+ "policyName": "review_dismissal_allowed",
+ "fullyQualifiedPolicyName": "data.repository.review_dismissal_allowed",
+ "severity": "LOW",
+ "threat": [
+ "Allowing the dismissal of reviews can promote poor and vulnerable code, as important comments may be forgotten and ignored during the review process."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Restrict who can dismiss pull request reviews'",
+ "7. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ }
+ }
+ }
\ No newline at end of file
diff --git a/unittests/scans/legitify/legitify_one_finding.json b/unittests/scans/legitify/legitify_one_finding.json
new file mode 100644
index 00000000000..4f34bccff98
--- /dev/null
+++ b/unittests/scans/legitify/legitify_one_finding.json
@@ -0,0 +1,895 @@
+{
+ "type": "flattened",
+ "content": {
+ "data.repository.actions_can_approve_pull_requests": {
+ "policyInfo": {
+ "title": "Workflows Should Not Be Allowed To Approve Pull Requests",
+ "description": "The default GitHub Actions configuration allows for workflows to approve pull requests. This could allow users to bypass code-review restrictions.",
+ "policyName": "actions_can_approve_pull_requests",
+ "fullyQualifiedPolicyName": "data.repository.actions_can_approve_pull_requests",
+ "severity": "HIGH",
+ "threat": [
+ "Attackers can exploit this misconfiguration to bypass code-review restrictions by creating a workflow that approves their own pull request and then merging the pull request without anyone noticing, introducing malicious code that would go straight ahead to production."
+ ],
+ "remediationSteps": [
+ "1. Make sure you have admin permissions",
+ "2. Go to the org's settings page",
+ "3. Enter 'Actions - General' tab",
+ "4. Under 'Workflow permissions'",
+ "5. Uncheck 'Allow GitHub actions to create and approve pull requests.'",
+ "6. Click 'Save'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "SKIPPED"
+ }
+ ]
+ },
+ "data.repository.code_review_not_required": {
+ "policyInfo": {
+ "title": "Default Branch Should Require Code Review",
+ "description": "In order to comply with separation of duties principle and enforce secure code practices, a code review should be mandatory using the source-code-management system's built-in enforcement. This option is found in the branch protection setting of the repository.",
+ "policyName": "code_review_not_required",
+ "fullyQualifiedPolicyName": "data.repository.code_review_not_required",
+ "severity": "HIGH",
+ "threat": [
+ "Users can merge code without being reviewed, which can lead to insecure code reaching the main branch and production."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Require a pull request before merging'",
+ "7. Check 'Require approvals'",
+ "8. Set 'Required number of approvals before merging' to 1 or more",
+ "9. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "FAILED"
+ }
+ ]
+ },
+ "data.repository.repository_not_maintained": {
+ "policyInfo": {
+ "title": "Repository Should Be Updated At Least Quarterly",
+ "description": "A project which is not actively maintained may not be patched against security issues within its code and dependencies, and is therefore at higher risk of including known vulnerabilities.",
+ "policyName": "repository_not_maintained",
+ "fullyQualifiedPolicyName": "data.repository.repository_not_maintained",
+ "severity": "HIGH",
+ "threat": [
+ "As new vulnerabilities are found over time, unmaintained repositories are more likely to point to dependencies that have known vulnerabilities, exposing these repositories to 1-day attacks."
+ ],
+ "remediationSteps": [
+ "1. Make sure you have admin permissions",
+ "2. Either Delete or Archive the repository"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.code_review_by_two_members_not_required": {
+ "policyInfo": {
+ "title": "Default Branch Should Require Code Review By At Least Two Reviewers",
+ "description": "In order to comply with separation of duties principle and enforce secure code practices, a code review should be mandatory using the source-code-management built-in enforcement. This option is found in the branch protection setting of the repository.",
+ "policyName": "code_review_by_two_members_not_required",
+ "fullyQualifiedPolicyName": "data.repository.code_review_by_two_members_not_required",
+ "severity": "MEDIUM",
+ "threat": [
+ "Users can merge code without being reviewed, which can lead to insecure code reaching the main branch and production.",
+ "Requiring code review by at least two reviewers further decreases the risk of an insider threat (as merging code requires compromising at least 2 identities with write permissions), and decreases the likelihood of human error in the review process."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Require a pull request before merging'",
+ "7. Check 'Require approvals'",
+ "8. Set 'Required number of approvals before merging' to 2 or more",
+ "9. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.ghas_dependency_review_not_enabled": {
+ "policyInfo": {
+ "title": "GitHub Advanced Security – Dependency Review Should Be Enabled For A Repository",
+ "description": "Enable GitHub Advanced Security dependency review to avoid introducing new vulnerabilities and detect newly discovered vulnerabilities in existing packages.",
+ "policyName": "ghas_dependency_review_not_enabled",
+ "fullyQualifiedPolicyName": "data.repository.ghas_dependency_review_not_enabled",
+ "severity": "MEDIUM",
+ "threat": [
+ "A contributor may add vulnerable third-party dependencies to the repository, introducing vulnerabilities to your application that will only be detected after merge."
+ ],
+ "remediationSteps": [
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Code security and analysis' tab",
+ "4. Set 'Dependency graph' as Enabled"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.missing_default_branch_protection": {
+ "policyInfo": {
+ "title": "Default Branch Should Be Protected",
+ "description": "Branch protection is not enabled for this repository’s default branch. Protecting branches ensures new code changes must go through a controlled merge process and allows enforcement of code review as well as other security tests. This issue is raised if the default branch protection is turned off.",
+ "policyName": "missing_default_branch_protection",
+ "fullyQualifiedPolicyName": "data.repository.missing_default_branch_protection",
+ "severity": "MEDIUM",
+ "threat": [
+ "Any contributor with write access may push potentially dangerous code to this repository, making it easier to compromise and difficult to audit."
+ ],
+ "remediationSteps": [
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Add rule'",
+ "6. Set 'Branch name pattern' as the default branch name (usually 'main' or 'master')",
+ "7. Set desired protections",
+ "8. Click 'Create' and save the rule"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.missing_default_branch_protection_deletion": {
+ "policyInfo": {
+ "title": "Default Branch Deletion Protection Should Be Enabled",
+ "description": "The history of the default branch is not protected against deletion for this repository.",
+ "policyName": "missing_default_branch_protection_deletion",
+ "fullyQualifiedPolicyName": "data.repository.missing_default_branch_protection_deletion",
+ "severity": "MEDIUM",
+ "threat": [
+ "Rewriting project history can make it difficult to trace back when bugs or security issues were introduced, making them more difficult to remediate."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Uncheck 'Allow deletions', Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.missing_default_branch_protection_force_push": {
+ "policyInfo": {
+ "title": "Default Branch Should Not Allow Force Pushes",
+ "description": "The history of the default branch is not protected against changes for this repository. Protecting branch history ensures every change that was made to code can be retained and later examined. This issue is raised if the default branch history can be modified using force push.",
+ "policyName": "missing_default_branch_protection_force_push",
+ "fullyQualifiedPolicyName": "data.repository.missing_default_branch_protection_force_push",
+ "severity": "MEDIUM",
+ "threat": [
+ "Rewriting project history can make it difficult to trace back when bugs or security issues were introduced, making them more difficult to remediate."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Uncheck 'Allow force pushes'",
+ "7. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.non_linear_history": {
+ "policyInfo": {
+ "title": "Default Branch Should Require Linear History",
+ "description": "Prevent merge commits from being pushed to protected branches.",
+ "policyName": "non_linear_history",
+ "fullyQualifiedPolicyName": "data.repository.non_linear_history",
+ "severity": "MEDIUM",
+ "threat": [
+ "Having a non-linear history makes it harder to reverse changes, making recovery from bugs and security risks slower and more difficult."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Require linear history'",
+ "7. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.repository_secret_is_stale": {
+ "policyInfo": {
+ "title": "Repository Secrets Should Be Updated At Least Yearly",
+ "description": "Some of the repository secrets have not been updated for over a year. It is recommended to refresh secret values regularly in order to minimize the risk of breach in case of an information leak.",
+ "policyName": "repository_secret_is_stale",
+ "fullyQualifiedPolicyName": "data.repository.repository_secret_is_stale",
+ "severity": "MEDIUM",
+ "threat": [
+ "Sensitive data may have been inadvertently made public in the past, and an attacker who holds this data may gain access to your current CI and services. In addition, there may be old or unnecessary tokens that have not been inspected and can be used to access sensitive information."
+ ],
+ "remediationSteps": [
+ "1. Enter your repository's landing page",
+ "2. Go to the settings tab",
+ "3. Under the 'Security' title on the left, choose 'Secrets and variables'",
+ "4. Click 'Actions'",
+ "5. Sort secrets by 'Last Updated'",
+ "6. Regenerate every secret older than one year and add the new value to GitHub's secret manager"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas",
+ "secretsList": []
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.requires_branches_up_to_date_before_merge": {
+ "policyInfo": {
+ "title": "Default Branch Should Require Branches To Be Up To Date Before Merge",
+ "description": "Status checks are required, but branches that are not up to date can be merged. This can result in previously remediated issues being merged in over fixes.",
+ "policyName": "requires_branches_up_to_date_before_merge",
+ "fullyQualifiedPolicyName": "data.repository.requires_branches_up_to_date_before_merge",
+ "severity": "MEDIUM",
+ "threat": [
+ "Required status checks may be failing on the latest version after passing on an earlier version of the code, making it easy to commit buggy or otherwise insecure code."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Require status checks to pass before merging'",
+ "7. Check 'Require branches to be up to date before merging'",
+ "8. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.requires_status_checks": {
+ "policyInfo": {
+ "title": "Default Branch Should Require All Checks To Pass Before Merge",
+ "description": "Branch protection is enabled. However, the checks that validate the quality and security of the code are not required to pass before submitting new changes. The default check ensures the code is up-to-date to prevent faulty merges and unexpected behaviors, as well as other custom checks that test security and quality. It is advised to turn this control on to ensure any existing or future check will be required to pass.",
+ "policyName": "requires_status_checks",
+ "fullyQualifiedPolicyName": "data.repository.requires_status_checks",
+ "severity": "MEDIUM",
+ "threat": [
+ "Not defining a set of required status checks can make it easy for contributors to introduce buggy or insecure code as manual review, whether mandated or optional, is the only line of defense."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Require status checks to pass before merging'",
+ "7. Add the required checks that must pass before merging (tests, lint, etc...)",
+ "8. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.scorecard_score_too_low": {
+ "policyInfo": {
+ "title": "OSSF Scorecard Score Should Be Above 7",
+ "description": "Scorecard is an open-source tool from the OSSF that helps to assess the security posture of repositories. A low scorecard score means your repository may be at risk.",
+ "policyName": "scorecard_score_too_low",
+ "fullyQualifiedPolicyName": "data.repository.scorecard_score_too_low",
+ "severity": "MEDIUM",
+ "threat": [
+ "A low Scorecard score can indicate that the repository is more vulnerable to attack than others, making it a prime attack target."
+ ],
+ "remediationSteps": [
+ "2. - Run legitify with --scorecard verbose",
+ "3. - Run scorecard manually",
+ "4. Fix the failed checks"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "SKIPPED"
+ }
+ ]
+ },
+ "data.repository.secret_scanning_not_enabled": {
+ "policyInfo": {
+ "title": "Secret Scanning should be enabled",
+ "description": "Repository should have secret scanning enabled. Secret scanning helps prevent the exposure of sensitive information and ensures compliance.",
+ "policyName": "secret_scanning_not_enabled",
+ "fullyQualifiedPolicyName": "data.repository.secret_scanning_not_enabled",
+ "severity": "MEDIUM",
+ "threat": [
+ "Exposed secrets increases the risk of sensitive information such as API keys, passwords, and tokens being disclosed, leading to unauthorized access to systems and services, and data breaches."
+ ],
+ "remediationSteps": [
+ "1. Go to the repository settings page",
+ "2. Under the 'Security' title on the left, select 'Code security and analysis'",
+ "3. Under 'Secret scanning', click 'Enable'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "SKIPPED"
+ }
+ ]
+ },
+ "data.repository.token_default_permissions_is_read_write": {
+ "policyInfo": {
+ "title": "Default Workflow Token Permission Should Be Set To Read Only",
+ "description": "The default GitHub Action workflow token permission is set to read-write. When creating workflow tokens, it is highly recommended to follow the Principle of Least Privilege and force workflow authors to specify explicitly which permissions they need.",
+ "policyName": "token_default_permissions_is_read_write",
+ "fullyQualifiedPolicyName": "data.repository.token_default_permissions_is_read_write",
+ "severity": "MEDIUM",
+ "threat": [
+ "In case of token compromise (due to a vulnerability or malicious third-party GitHub actions), an attacker can use this token to sabotage various assets in your CI/CD pipeline, such as packages, pull-requests, deployments, and more."
+ ],
+ "remediationSteps": [
+ "1. Make sure you have admin permissions",
+ "2. Go to the org's settings page",
+ "3. Enter 'Actions - General' tab",
+ "4. Under 'Workflow permissions'",
+ "5. Select 'Read repository contents permission'",
+ "6. Click 'Save'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "SKIPPED"
+ }
+ ]
+ },
+ "data.repository.users_allowed_to_bypass_ruleset": {
+ "policyInfo": {
+ "title": "Users Are Allowed To Bypass Ruleset Rules",
+ "description": "Rulesets rules are not enforced for some users. When defining rulesets it is recommended to make sure that no one is allowed to bypass these rules in order to avoid inadvertent or intentional alterations to critical code which can lead to potential errors or vulnerabilities in the software.",
+ "policyName": "users_allowed_to_bypass_ruleset",
+ "fullyQualifiedPolicyName": "data.repository.users_allowed_to_bypass_ruleset",
+ "severity": "MEDIUM",
+ "threat": [
+ "Attackers that gain access to a user that can bypass the ruleset rules can compromise the codebase without anyone noticing, introducing malicious code that would go straight ahead to production."
+ ],
+ "remediationSteps": [
+ "1. Go to the repository settings page",
+ "2. Under 'Code and automation', select 'Rules -\u003e Rulesets'",
+ "3. Find the relevant ruleset",
+ "4. Empty the 'Bypass list'",
+ "5. Press 'Save Changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.vulnerability_alerts_not_enabled": {
+ "policyInfo": {
+ "title": "Vulnerability Alerts Should Be Enabled",
+ "description": "Enable GitHub Dependabot to regularly scan for open source vulnerabilities.",
+ "policyName": "vulnerability_alerts_not_enabled",
+ "fullyQualifiedPolicyName": "data.repository.vulnerability_alerts_not_enabled",
+ "severity": "MEDIUM",
+ "threat": [
+ "An open source vulnerability may be affecting your code without your knowledge, making it vulnerable to exploitation."
+ ],
+ "remediationSteps": [
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Code security and analysis' tab",
+ "4. Set 'Dependabot alerts' as Enabled"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.code_review_not_limited_to_code_owners": {
+ "policyInfo": {
+ "title": "Default Branch Should Limit Code Review to Code-Owners",
+ "description": "It is recommended to require code review only from designated individuals specified in CODEOWNERS file. Turning this option on enforces that only the allowed owners can approve a code change. This option is found in the branch protection setting of the repository.",
+ "policyName": "code_review_not_limited_to_code_owners",
+ "fullyQualifiedPolicyName": "data.repository.code_review_not_limited_to_code_owners",
+ "severity": "LOW",
+ "threat": [
+ "A pull request may be approved by any contributor with write access. Specifying specific code owners can ensure review is only done by individuals with the correct expertise required for the review of the changed files, potentially preventing bugs and security risks."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Require a pull request before merging'",
+ "7. Check 'Require review from Code Owners'",
+ "8. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.dismisses_stale_reviews": {
+ "policyInfo": {
+ "title": "Default Branch Should Require New Code Changes After Approval To Be Re-Approved",
+ "description": "This security control prevents merging code that was approved but later on changed. Turning it on ensures any new changes must be reviewed again. This setting is part of the branch protection and code-review settings, and hardens the review process. If turned off - a developer can change the code after approval, and push code that is different from the one that was previously allowed. This option is found in the branch protection setting for the repository.",
+ "policyName": "dismisses_stale_reviews",
+ "fullyQualifiedPolicyName": "data.repository.dismisses_stale_reviews",
+ "severity": "LOW",
+ "threat": [
+ "Buggy or insecure code may be committed after approval and will reach the main branch without review. Alternatively, an attacker can attempt a just-in-time attack to introduce dangerous code just before merge."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Require a pull request before merging'",
+ "7. Check 'Dismiss stale pull request approvals when new commits are pushed'",
+ "8. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.forking_allowed_for_repository": {
+ "policyInfo": {
+ "title": "Forking Should Not Be Allowed for Private/Internal Repositories",
+ "description": "Forking private or internal repositories can lead to unauthorized spread and potential exposure of sensitive source code. It is recommended to disable forking for private repositories in the repository or the organization configuration to maintain control over the source code. If forking is necessary, it should be enabled selectively by admins for specific collaboration needs on private repositories.",
+ "policyName": "forking_allowed_for_repository",
+ "fullyQualifiedPolicyName": "data.repository.forking_allowed_for_repository",
+ "severity": "LOW",
+ "threat": [
+ "Forked repositories cause more code and secret sprawl in the organization as forks are independent copies of the repository and need to be tracked separately, making it more difficult to keep track of sensitive assets and contain potential incidents."
+ ],
+ "remediationSteps": [
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'General' tab",
+ "4. Under 'Features', Toggle off 'Allow forking'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "SKIPPED"
+ }
+ ]
+ },
+ "data.repository.no_conversation_resolution": {
+ "policyInfo": {
+ "title": "Default Branch Should Require All Conversations To Be Resolved Before Merge",
+ "description": "Require all Pull Request conversations to be resolved before merging. Check this to avoid bypassing/missing a Pull Request comment.",
+ "policyName": "no_conversation_resolution",
+ "fullyQualifiedPolicyName": "data.repository.no_conversation_resolution",
+ "severity": "LOW",
+ "threat": [
+ "Allowing the merging of code without resolving all conversations can promote poor and vulnerable code, as important comments may be forgotten or deliberately ignored when the code is merged."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Require conversation resolution before merging'",
+ "7. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.no_signed_commits": {
+ "policyInfo": {
+ "title": "Default Branch Should Require All Commits To Be Signed",
+ "description": "Require all commits to be signed and verified",
+ "policyName": "no_signed_commits",
+ "fullyQualifiedPolicyName": "data.repository.no_signed_commits",
+ "severity": "LOW",
+ "threat": [
+ "A commit containing malicious code may be crafted by a malicious actor that has acquired write access to the repository to initiate a supply chain attack. Commit signing provides another layer of defense that can prevent this type of compromise."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Require signed commits'",
+ "7. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.pushes_are_not_restricted": {
+ "policyInfo": {
+ "title": "Default Branch Should Restrict Who Can Push To It",
+ "description": "By default, commits can be pushed directly to protected branches without going through a Pull Request. Restrict who can push commits to protected branches so that commits can be added only via merges, which require Pull Request.",
+ "policyName": "pushes_are_not_restricted",
+ "fullyQualifiedPolicyName": "data.repository.pushes_are_not_restricted",
+ "severity": "LOW",
+ "threat": [
+ "An attacker with write credentials may introduce vulnerabilities to your code without your knowledge. Alternatively, contributors may commit unsafe code that is buggy or easy to exploit that could have been caught using a review process."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Restrict who can push to matching branches'",
+ "7. Choose who should be allowed to push",
+ "8. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.repository_has_too_many_admins": {
+ "policyInfo": {
+ "title": "Repository Should Have A Low Admin Count",
+ "description": "Repository admins are highly privileged and could create great damage if they are compromised. It is recommended to limit the number of repository admins to the minimum required, and no more than 5% of the userbase (Up to 3 admins are always allowed).",
+ "policyName": "repository_has_too_many_admins",
+ "fullyQualifiedPolicyName": "data.repository.repository_has_too_many_admins",
+ "severity": "LOW",
+ "threat": [
+ "A compromised user with admin permissions can initiate a supply chain attack in a plethora of ways.",
+ "Having many admin users increases the overall risk of user compromise, and makes it more likely to lose track of unused admin permissions given to users in the past."
+ ],
+ "remediationSteps": [
+ "1. Make sure you have admin permissions",
+ "2. Go to the repository settings page",
+ "3. Press 'Collaborators and teams'",
+ "4. Select the unwanted admin users",
+ "5. Select 'Change Role'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "SKIPPED"
+ }
+ ]
+ },
+ "data.repository.repository_webhook_doesnt_require_ssl": {
+ "policyInfo": {
+ "title": "Webhooks Should Be Configured To Use SSL",
+ "description": "Webhooks that are not configured with SSL enabled could expose your software to man-in-the-middle attacks (MITM).",
+ "policyName": "repository_webhook_doesnt_require_ssl",
+ "fullyQualifiedPolicyName": "data.repository.repository_webhook_doesnt_require_ssl",
+ "severity": "LOW",
+ "threat": [
+ "If SSL verification is disabled, any party with access to the target DNS domain can masquerade as your designated payload URL, allowing it to freely read and affect the response of any webhook request.",
+ "In the case of GitHub Enterprise Server instances, it may be sufficient only to control the DNS configuration of the network where the instance is deployed, as an attacker can redirect traffic to the target domain in your internal network directly to them, and this is often much easier than compromising an internet-facing domain."
+ ],
+ "remediationSteps": [
+ "1. Make sure you can manage webhooks for the repository",
+ "2. Go to the repository settings page",
+ "3. Select 'Webhooks'",
+ "4. Verify URL starts with https",
+ "5. Press on the insecure webhook",
+ "6. Enable 'SSL verification'",
+ "7. Click 'Update webhook'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas",
+ "hooksList": []
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.repository_webhook_no_secret": {
+ "policyInfo": {
+ "title": "Webhooks Should Be Configured With A Secret",
+ "description": "Webhooks are not configured with a shared secret to validate the origin and content of the request. This could allow your webhook to be triggered by any bad actor with the URL.",
+ "policyName": "repository_webhook_no_secret",
+ "fullyQualifiedPolicyName": "data.repository.repository_webhook_no_secret",
+ "severity": "LOW",
+ "threat": [
+ "Not using a webhook secret makes the service receiving the webhook unable to determine the authenticity of the request.",
+ "This allows attackers to masquerade as your repository, potentially creating an unstable or insecure state in other systems."
+ ],
+ "remediationSteps": [
+ "1. Make sure you can manage webhooks for the repository",
+ "2. Go to the repository settings page",
+ "3. Select 'Webhooks'",
+ "4. Press on the insecure webhook",
+ "5. Configure a secret",
+ "6. Click 'Update webhook'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas",
+ "hooksList": []
+ },
+ "status": "PASSED"
+ }
+ ]
+ },
+ "data.repository.review_dismissal_allowed": {
+ "policyInfo": {
+ "title": "Default Branch Should Restrict Who Can Dismiss Reviews",
+ "description": "Any user with write access to the repository can dismiss pull-request reviews. Pull-request review contains essential information on the work that needs to be done and helps keep track of the changes. Dismissing it might cause a loss of this information and should be restricted to a limited number of users.",
+ "policyName": "review_dismissal_allowed",
+ "fullyQualifiedPolicyName": "data.repository.review_dismissal_allowed",
+ "severity": "LOW",
+ "threat": [
+ "Allowing the dismissal of reviews can promote poor and vulnerable code, as important comments may be forgotten and ignored during the review process."
+ ],
+ "remediationSteps": [
+ "Note: The remediation steps apply to legacy branch protections, rules set-based protection should be updated from the rules set page",
+ "1. Make sure you have admin permissions",
+ "2. Go to the repo's settings page",
+ "3. Enter 'Branches' tab",
+ "4. Under 'Branch protection rules'",
+ "5. Click 'Edit' on the default branch rule",
+ "6. Check 'Restrict who can dismiss pull request reviews'",
+ "7. Click 'Save changes'"
+ ],
+ "namespace": "repository"
+ },
+ "violations": [
+ {
+ "violationEntityType": "repository",
+ "canonicalLink": "https://github.com/damianpr/pruebas",
+ "aux": {
+ "entityId": "596502887",
+ "entityName": "pruebas"
+ },
+ "status": "PASSED"
+ }
+ ]
+ }
+ }
+ }
\ No newline at end of file
diff --git a/unittests/scans/npm_audit_7_plus/issue_10801.json b/unittests/scans/npm_audit_7_plus/issue_10801.json
new file mode 100644
index 00000000000..6f8b987dfb3
--- /dev/null
+++ b/unittests/scans/npm_audit_7_plus/issue_10801.json
@@ -0,0 +1,56 @@
+{
+ "auditReportVersion": 2,
+ "vulnerabilities": {
+ "got": {
+ "name": "got",
+ "severity": "moderate",
+ "isDirect": false,
+ "via": [
+ {
+ "source": 1088948,
+ "name": "got",
+ "dependency": "got",
+ "title": "Got allows a redirect to a UNIX socket",
+ "url": "https://github.com/advisories/GHSA-pfrx-2q88-qq97",
+ "severity": "moderate",
+ "cwe": [],
+ "cvss": {
+ "score": 5.3,
+ "vectorString": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:L/A:N"
+ },
+ "range": "<11.8.5"
+ }
+ ],
+ "effects": [
+ ],
+ "range": "<11.8.5",
+ "nodes": [
+ "node_modules/got"
+ ],
+ "fixAvailable": {
+ "name": "nodemon",
+ "version": "3.1.4",
+ "isSemVerMajor": true
+ }
+ }
+ },
+ "metadata": {
+ "vulnerabilities": {
+ "info": 0,
+ "low": 0,
+ "moderate": 0,
+ "high": 1,
+ "critical": 0,
+ "total": 1
+ },
+ "dependencies": {
+ "prod": 98,
+ "dev": 0,
+ "optional": 0,
+ "peer": 0,
+ "peerOptional": 0,
+ "total": 97
+ }
+ }
+}
+
diff --git a/unittests/scans/threat_composer/threat_composer_broken_assumptions.json b/unittests/scans/threat_composer/threat_composer_broken_assumptions.json
new file mode 100644
index 00000000000..69f554ae768
--- /dev/null
+++ b/unittests/scans/threat_composer/threat_composer_broken_assumptions.json
@@ -0,0 +1,119 @@
+{
+ "schema": 1,
+ "applicationInfo": {
+ "name": "Threat composer",
+ "description": ""
+ },
+ "architecture": {
+ "image": "",
+ "description": ""
+ },
+ "dataflow": {
+ "image": "",
+ "description": ""
+ },
+ "assumptions": [
+ {
+ "id": "2d2a1ddf-5bb8-4a55-8f60-e195bc0b4b90",
+ "numericId": 7,
+ "content": "lorem ipsum",
+ "tags": [
+ "lorem ipsum"
+ ],
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "lorem ipsum"
+ }
+ ],
+ "displayOrder": 7
+ }
+ ],
+ "mitigations": [
+ {
+ "id": "bdef5b69-e690-4c9c-bfc1-960390779d3b",
+ "numericId": 21,
+ "content": "lorem ipsum",
+ "tags": [
+ "lorem ipsum"
+ ],
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "lorem ipsum"
+ }
+ ],
+ "displayOrder": 21
+ },
+ {
+ "id": "11fb1c71-42f0-4004-89a7-09d8bf6f8b11",
+ "numericId": 20,
+ "content": "lorem ipsum",
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "lorem ipsum"
+ }
+ ],
+ "displayOrder": 20
+ }
+ ],
+ "assumptionLinks": [
+ {
+ "linkedId": "46db1eb4-a451-4d05-afe1-c695491e2387",
+ "assumptionId": "d8edcf30-5c76-49f7-a408-20e071bbea1c",
+ "type": "Threat"
+ }
+ ],
+ "mitigationLinks": [
+ {
+ "linkedId": "46db1eb4-a451-4d05-afe1-c695491e2387",
+ "mitigationId": "11fb1c71-42f0-4004-89a7-09d8bf6f8b11"
+ },
+ {
+ "linkedId": "46db1eb4-a451-4d05-afe1-c695491e2387",
+ "mitigationId": "bdef5b69-e690-4c9c-bfc1-960390779d3b"
+ }
+ ],
+ "threats": [
+ {
+ "id": "46db1eb4-a451-4d05-afe1-c695491e2387",
+ "numericId": 23,
+ "statement": "A lorem ipsum lorem ipsum can lorem ipsum, which leads to lorem ipsum, negatively impacting lorem ipsum",
+ "threatSource": "lorem ipsum",
+ "prerequisites": "lorem ipsum",
+ "threatAction": "lorem ipsum",
+ "threatImpact": "lorem ipsum",
+ "impactedAssets": [
+ "lorem ipsum"
+ ],
+ "displayOrder": 23,
+ "metadata": [
+ {
+ "key": "Priority",
+ "value": "High"
+ },
+ {
+ "key": "STRIDE",
+ "value": [
+ "S",
+ "T",
+ "R",
+ "I",
+ "D",
+ "E"
+ ]
+ },
+ {
+ "key": "Comments",
+ "value": "lorem ipsum. lorem ipsum lorem ipsum"
+ }
+ ],
+ "tags": [
+ "CWE-156",
+ "CVE-45",
+ "lorem ipsum"
+ ]
+ }
+ ]
+ }
\ No newline at end of file
diff --git a/unittests/scans/threat_composer/threat_composer_broken_mitigations.json b/unittests/scans/threat_composer/threat_composer_broken_mitigations.json
new file mode 100644
index 00000000000..01c4778a3aa
--- /dev/null
+++ b/unittests/scans/threat_composer/threat_composer_broken_mitigations.json
@@ -0,0 +1,125 @@
+{
+ "schema": 1,
+ "applicationInfo": {
+ "name": "Threat composer",
+ "description": ""
+ },
+ "architecture": {
+ "image": "",
+ "description": ""
+ },
+ "dataflow": {
+ "image": "",
+ "description": ""
+ },
+ "assumptions": [
+ {
+ "id": "2d2a1ddf-5bb8-4a55-8f60-e195bc0b4b90",
+ "numericId": 7,
+ "content": "lorem ipsum",
+ "tags": [
+ "lorem ipsum"
+ ],
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "lorem ipsum"
+ }
+ ],
+ "displayOrder": 7
+ },
+ {
+ "id": "d8edcf30-5c76-49f7-a408-20e071bbea1c",
+ "numericId": 6,
+ "content": "lorem ipsum",
+ "displayOrder": 6,
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "lorem ipsum"
+ }
+ ]
+ }
+ ],
+ "mitigations": [
+ {
+ "id": "bdef5b69-e690-4c9c-bfc1-960390779d3b",
+ "numericId": 21,
+ "content": "lorem ipsum",
+ "tags": [
+ "lorem ipsum"
+ ],
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "lorem ipsum"
+ }
+ ],
+ "displayOrder": 21
+ }
+ ],
+ "assumptionLinks": [
+ {
+ "linkedId": "46db1eb4-a451-4d05-afe1-c695491e2387",
+ "assumptionId": "d8edcf30-5c76-49f7-a408-20e071bbea1c",
+ "type": "Threat"
+ },
+ {
+ "linkedId": "46db1eb4-a451-4d05-afe1-c695491e2387",
+ "assumptionId": "2d2a1ddf-5bb8-4a55-8f60-e195bc0b4b90",
+ "type": "Threat"
+ },
+ {
+ "type": "Mitigation",
+ "assumptionId": "2d2a1ddf-5bb8-4a55-8f60-e195bc0b4b90",
+ "linkedId": "11fb1c71-42f0-4004-89a7-09d8bf6f8b11"
+ }
+ ],
+ "mitigationLinks": [
+ {
+ "linkedId": "46db1eb4-a451-4d05-afe1-c695491e2387",
+ "mitigationId": "11fb1c71-42f0-4004-89a7-09d8bf6f8b11"
+ }
+ ],
+ "threats": [
+ {
+ "id": "46db1eb4-a451-4d05-afe1-c695491e2387",
+ "numericId": 23,
+ "statement": "A lorem ipsum lorem ipsum can lorem ipsum, which leads to lorem ipsum, negatively impacting lorem ipsum",
+ "threatSource": "lorem ipsum",
+ "prerequisites": "lorem ipsum",
+ "threatAction": "lorem ipsum",
+ "threatImpact": "lorem ipsum",
+ "impactedAssets": [
+ "lorem ipsum"
+ ],
+ "displayOrder": 23,
+ "metadata": [
+ {
+ "key": "Priority",
+ "value": "High"
+ },
+ {
+ "key": "STRIDE",
+ "value": [
+ "S",
+ "T",
+ "R",
+ "I",
+ "D",
+ "E"
+ ]
+ },
+ {
+ "key": "Comments",
+ "value": "lorem ipsum. lorem ipsum lorem ipsum"
+ }
+ ],
+ "tags": [
+ "CWE-156",
+ "CVE-45",
+ "lorem ipsum"
+ ]
+ }
+ ]
+ }
\ No newline at end of file
diff --git a/unittests/scans/threat_composer/threat_composer_many_threats.json b/unittests/scans/threat_composer/threat_composer_many_threats.json
new file mode 100644
index 00000000000..cb61d880bde
--- /dev/null
+++ b/unittests/scans/threat_composer/threat_composer_many_threats.json
@@ -0,0 +1,1309 @@
+{
+ "schema": 1,
+ "applicationInfo": {
+ "name": "Threat composer",
+ "description": ""
+ },
+ "architecture": {
+ "image": "",
+ "description": ""
+ },
+ "dataflow": {
+ "image": "",
+ "description": ""
+ },
+ "assumptions": [
+ {
+ "id": "2d2a1ddf-5bb8-4a55-8f60-e195bc0b4b90",
+ "numericId": 7,
+ "content": "lorem ipsum",
+ "tags": [
+ "lorem ipsum"
+ ],
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "lorem ipsum"
+ }
+ ],
+ "displayOrder": 7
+ },
+ {
+ "id": "d8edcf30-5c76-49f7-a408-20e071bbea1c",
+ "numericId": 6,
+ "content": "lorem ipsum",
+ "displayOrder": 6,
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "lorem ipsum"
+ }
+ ]
+ },
+ {
+ "id": "b7d98386-906f-40ca-a631-194bc3ac709d",
+ "numericId": 5,
+ "displayOrder": 5,
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "A consuming customer should have a security awareness program to help to educate their users on how to reuce the likelihood of being socially engineered. See article [Amazon releases free cybersecurity awareness training](https://www.aboutamazon.com/news/community/amazon-releases-free-cybersecurity-awareness-training)"
+ }
+ ],
+ "content": "Security awareness training is the most effective mitigation against social engineering attacks, and one cannot rely solely on technical mitigations"
+ },
+ {
+ "id": "c9b4cc31-3ac4-40f8-82f6-131792f48949",
+ "numericId": 4,
+ "displayOrder": 4,
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "[AWS Well-Architected](https://aws.amazon.com/architecture/well-architected/) documentation"
+ }
+ ],
+ "content": "Customer deploying solution will follow AWS Well-Architected best practices"
+ },
+ {
+ "id": "08265978-d2c3-4ced-a530-20e4c84692b8",
+ "numericId": 3,
+ "displayOrder": 3,
+ "metadata": [],
+ "content": "We cannot protect against threats on the client endpoint"
+ },
+ {
+ "id": "930797fb-0611-4f66-b2ab-370ec373c852",
+ "numericId": 2,
+ "displayOrder": 2,
+ "metadata": [],
+ "content": "For general users, modern browsers have adequate protection for access to local browser storage"
+ },
+ {
+ "id": "05ac2926-5464-4071-bb58-e1a56c7ba8f0",
+ "numericId": 1,
+ "displayOrder": 1,
+ "metadata": [],
+ "content": "TLS 1.2 is an adequate mitigation for threats related to tampering and information disclosure of data in transit over the network."
+ }
+ ],
+ "mitigations": [
+ {
+ "id": "bdef5b69-e690-4c9c-bfc1-960390779d3b",
+ "numericId": 21,
+ "content": "lorem ipsum",
+ "tags": [
+ "lorem ipsum"
+ ],
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "lorem ipsum"
+ }
+ ],
+ "displayOrder": 21
+ },
+ {
+ "id": "11fb1c71-42f0-4004-89a7-09d8bf6f8b11",
+ "numericId": 20,
+ "content": "lorem ipsum",
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "lorem ipsum"
+ }
+ ],
+ "displayOrder": 20
+ },
+ {
+ "id": "baa86fce-6e2a-406b-a10e-79c30cf94589",
+ "numericId": 19,
+ "displayOrder": 19,
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "Implementation in code - [Import and data validation](https://github.com/awslabs/threat-composer/commit/68f6323ac8ada085b48dbe7bc344021fe4c97e13)"
+ }
+ ],
+ "content": "Schema validation on data import"
+ },
+ {
+ "id": "c00b1a9a-6d7a-41e2-9697-7d41244b5990",
+ "numericId": 18,
+ "displayOrder": 18,
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "Implementation in code - [PDK Static Website construct - S3 access logging](https://github.com/aws/aws-prototyping-sdk/blob/mainline/packages/static-website/src/static-website.ts#L161)"
+ }
+ ],
+ "content": "Amazon S3 - Access logging"
+ },
+ {
+ "id": "3f93baae-0997-4fe0-9d9d-1b10a1ba7973",
+ "numericId": 17,
+ "displayOrder": 17,
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "Implementation in code - [Custom security headers within CloudFront](https://github.com/awslabs/threat-composer/blob/6023777f95ede74d63bf95d0a0fac8f7787d8b35/packages/threat-composer-infra/src/application-stack.ts#L66)"
+ }
+ ],
+ "tags": [
+ "XSS"
+ ],
+ "content": "Custom security headers (including HTTP Strict Transport Security)"
+ },
+ {
+ "id": "0ce4e65d-c96c-46b1-988d-57016036bc12",
+ "numericId": 16,
+ "displayOrder": 16,
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "Implementation in code - [Content Security Policy](https://github.com/awslabs/threat-composer/blob/d1c3848fafb129d19de17db1484cca7055e56a1b/packages/threat-composer-app/public/index.html#L14)"
+ }
+ ],
+ "tags": [
+ "XSS"
+ ],
+ "content": "CSP (Content Security Policy)"
+ },
+ {
+ "id": "f4a2c3a8-f5d1-4302-aba3-4b1d715794be",
+ "numericId": 15,
+ "displayOrder": 15,
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "HTTPS is configured as enforced - [documentation](https://docs.github.com/en/pages/getting-started-with-github-pages/securing-your-github-pages-site-with-https)"
+ }
+ ],
+ "tags": [
+ "MiTM"
+ ],
+ "content": "TLS provided by GitHub Pages"
+ },
+ {
+ "id": "0cdff113-4816-496d-aa0d-36466c7331c0",
+ "numericId": 14,
+ "displayOrder": 14,
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "Implementation in code - [Default non-routable CIDR](https://github.com/awslabs/threat-composer/blob/main/packages/threat-composer-infra/cdk.context.json#L7)"
+ }
+ ],
+ "tags": [],
+ "content": "Restrictive default for WebACL associated with CloudFront distribution"
+ },
+ {
+ "id": "cda934f6-4148-46cb-8658-5c3f86735a1b",
+ "numericId": 13,
+ "displayOrder": 13,
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "See [README.md](https://github.com/awslabs/threat-composer#security-considerations)"
+ }
+ ],
+ "content": "README: Security considerations"
+ },
+ {
+ "id": "5c356be5-1c34-443c-abe2-8d7f7d3c210a",
+ "numericId": 12,
+ "displayOrder": 12,
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "By using popular, well-known and industry recognised NPM packages it is believed that this increases the likelihood that any integrity concerns with the package would be discovered more quickly, and that there would be industry and community urgency in disclosing and remediating.\n\nSee [package.json](https://github.com/awslabs/threat-composer/blob/main/package.json) for NPN packages used by this project."
+ }
+ ],
+ "content": "Using well-known and industry recognised NPM packages"
+ },
+ {
+ "id": "e06394d3-7cc6-45a7-b9cc-d3ec621f8957",
+ "numericId": 11,
+ "displayOrder": 11,
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "[GitHub DependaBot security updates](https://docs.github.com/en/code-security/dependabot/dependabot-security-updates/configuring-dependabot-security-updates]) documentation"
+ }
+ ],
+ "content": "GitHub Dependabot security updates are configured on the maintainers GitHub repository"
+ },
+ {
+ "id": "c745ffca-00d6-459b-8438-640ec7293e6d",
+ "numericId": 10,
+ "displayOrder": 10,
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "GitHub branch protection rules of `main` to ensure that a manual code review by maintainers is required before merge"
+ }
+ ],
+ "content": "GitHub branch protection rules"
+ },
+ {
+ "id": "10cf6702-9cf9-4dd6-b43d-17a2302b590c",
+ "numericId": 9,
+ "displayOrder": 9,
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "[Access permissions on GitHub](https://docs.github.com/en/get-started/learning-about-github/access-permissions-on-github) documentation"
+ }
+ ],
+ "content": "Access control to GitHub organization and repository"
+ },
+ {
+ "id": "b255d500-bddf-47dd-acdb-fb9f3edc384c",
+ "numericId": 8,
+ "displayOrder": 8,
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "- Implementation in code - [PDK Static Website construct - CloudFront distribution configuration](https://github.com/aws/aws-prototyping-sdk/blob/mainline/packages/static-website/src/static-website.ts#L215)\n\n- Implementation in code - [PDK Static Website construct - S3 Bucket configuration](https://github.com/aws/aws-prototyping-sdk/blob/mainline/packages/static-website/src/static-website.ts#L152)"
+ }
+ ],
+ "content": "TLS provided by CloudFront"
+ },
+ {
+ "id": "f47a2b78-79b1-4371-acc7-e977116b0a90",
+ "numericId": 7,
+ "displayOrder": 7,
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "Implementation in code - [threat composer UI warning](https://github.com/awslabs/threat-composer/blob/3e8f5547aed7f7d969dcdf84a9d89cd4fde4a150/packages/threat-composer/src/components/workspaces/FileImport/index.tsx#L136)"
+ }
+ ],
+ "content": "UI import warning 'Only import from trusted sources'"
+ },
+ {
+ "id": "135863f3-64e9-4fd7-8d71-4f141c42747f",
+ "numericId": 6,
+ "displayOrder": 6,
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "Implementation in code - [Markdown configuration](https://github.com/awslabs/threat-composer/commit/3e5be78ad1d1fa7d82fabd8069bad9bfa97b3a5e)"
+ }
+ ],
+ "content": "Disable HTML support on Markdown viewer"
+ },
+ {
+ "id": "d3f3befb-b64a-4311-8abf-a19f853a8eed",
+ "numericId": 5,
+ "displayOrder": 5,
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "Implementation in code - [sanitizeHtml](https://github.com/awslabs/threat-composer/blob/3e8f5547aed7f7d969dcdf84a9d89cd4fde4a150/packages/threat-composer/src/utils/sanitizeHtml/index.ts#L16)"
+ }
+ ],
+ "content": "HTML sanitisation on import"
+ },
+ {
+ "id": "ba6b8839-3629-423f-8205-76d0d5a69016",
+ "numericId": 4,
+ "displayOrder": 4,
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "AWS Shield Standard provides protection for all AWS customers against common and most frequently occurring Infrastructure (layer 3 and 4) attacks like SYN/UDP Floods, Reflection attacks, and others to support high availability of your applications on AWS. [more](https://docs.aws.amazon.com/waf/latest/developerguide/ddos-standard-summary.html)"
+ }
+ ],
+ "content": "AWS Shield Standard (on CloudFront)"
+ },
+ {
+ "id": "84c1299c-27ba-4829-a1a1-c5368a2486a6",
+ "numericId": 2,
+ "displayOrder": 2,
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "Implementation in code - [PDK Static Website construct - OAI configuration](https://github.com/aws/aws-prototyping-sdk/blob/mainline/packages/static-website/src/static-website.ts#LL213C13-L213C33)"
+ }
+ ],
+ "content": "CloudFront OAI/OAC"
+ },
+ {
+ "id": "00b07ef0-097c-4082-9a69-55abc5c111e6",
+ "numericId": 1,
+ "displayOrder": 1,
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "Implementation in code - [PDK Static Website construct - S3 Block public access](https://github.com/aws/aws-prototyping-sdk/blob/mainline/packages/static-website/src/static-website.ts#L159)"
+ }
+ ],
+ "content": "S3 Block Public Access"
+ }
+ ],
+ "assumptionLinks": [
+ {
+ "type": "Threat",
+ "assumptionId": "05ac2926-5464-4071-bb58-e1a56c7ba8f0",
+ "linkedId": "fb2ff978-1311-4061-a299-0a7f3421e037"
+ },
+ {
+ "type": "Threat",
+ "assumptionId": "c9b4cc31-3ac4-40f8-82f6-131792f48949",
+ "linkedId": "6fb7ef9a-175d-49f1-b85b-652b668581d4"
+ },
+ {
+ "type": "Threat",
+ "assumptionId": "b7d98386-906f-40ca-a631-194bc3ac709d",
+ "linkedId": "a394f5d6-9479-40cc-ae88-b1911a269f0a"
+ },
+ {
+ "type": "Threat",
+ "assumptionId": "08265978-d2c3-4ced-a530-20e4c84692b8",
+ "linkedId": "a84e701e-b370-44e4-aa1a-2c5e6edcf926"
+ },
+ {
+ "type": "Threat",
+ "assumptionId": "930797fb-0611-4f66-b2ab-370ec373c852",
+ "linkedId": "79bc7dc9-1038-4516-8f61-0a724bf4776d"
+ },
+ {
+ "type": "Threat",
+ "assumptionId": "c1cf71ce-6162-44cd-ae44-b55a81b74675",
+ "linkedId": "1e829518-ca27-454a-a759-6720349b7d6b"
+ },
+ {
+ "type": "Threat",
+ "assumptionId": "c1cf71ce-6162-44cd-ae44-b55a81b74675",
+ "linkedId": "661ab87a-a978-4442-9277-1318906a6d09"
+ },
+ {
+ "type": "Threat",
+ "assumptionId": "c1cf71ce-6162-44cd-ae44-b55a81b74675",
+ "linkedId": "19b98451-70ee-4814-af80-c1293b90cb9f"
+ },
+ {
+ "type": "Threat",
+ "assumptionId": "c9b4cc31-3ac4-40f8-82f6-131792f48949",
+ "linkedId": "16c0b3de-a08f-418b-9969-9eb905ddd4e8"
+ },
+ {
+ "type": "Mitigation",
+ "assumptionId": "d2268999-20a5-4d5b-a36a-49d6bb7d1bcc",
+ "linkedId": "5c356be5-1c34-443c-abe2-8d7f7d3c210a"
+ },
+ {
+ "type": "Threat",
+ "assumptionId": "08265978-d2c3-4ced-a530-20e4c84692b8",
+ "linkedId": "99648023-86fe-4b7d-829a-7011be545fa4"
+ },
+ {
+ "type": "Threat",
+ "assumptionId": "08265978-d2c3-4ced-a530-20e4c84692b8",
+ "linkedId": "a0f523b0-bbc0-4e6a-9064-500af1f3836e"
+ },
+ {
+ "type": "Threat",
+ "assumptionId": "24057bbe-01ff-43cd-82f8-cb6d423cb070",
+ "linkedId": "fb2ff978-1311-4061-a299-0a7f3421e037"
+ },
+ {
+ "type": "Threat",
+ "assumptionId": "c9b4cc31-3ac4-40f8-82f6-131792f48949",
+ "linkedId": "6529dd8d-0b40-4eec-b4c4-5ee4f06619c0"
+ },
+ {
+ "type": "Threat",
+ "assumptionId": "d2268999-20a5-4d5b-a36a-49d6bb7d1bcc",
+ "linkedId": "1649e8e4-f100-45f2-8c13-51dafc8a8251"
+ },
+ {
+ "type": "Threat",
+ "assumptionId": "d2268999-20a5-4d5b-a36a-49d6bb7d1bcc",
+ "linkedId": "1e829518-ca27-454a-a759-6720349b7d6b"
+ },
+ {
+ "type": "Threat",
+ "assumptionId": "d2268999-20a5-4d5b-a36a-49d6bb7d1bcc",
+ "linkedId": "661ab87a-a978-4442-9277-1318906a6d09"
+ },
+ {
+ "type": "Threat",
+ "assumptionId": "930797fb-0611-4f66-b2ab-370ec373c852",
+ "linkedId": "7b49bdbd-25e9-446f-b44a-46fa2807e182"
+ },
+ {
+ "type": "Threat",
+ "assumptionId": "c1cf71ce-6162-44cd-ae44-b55a81b74675",
+ "linkedId": "7b49bdbd-25e9-446f-b44a-46fa2807e182"
+ },
+ {
+ "type": "Threat",
+ "assumptionId": "eafa2db3-85b5-4b58-ae2a-1cec604c451a",
+ "linkedId": "7b49bdbd-25e9-446f-b44a-46fa2807e182"
+ },
+ {
+ "type": "Threat",
+ "assumptionId": "08265978-d2c3-4ced-a530-20e4c84692b8",
+ "linkedId": "f29b9a9f-94d9-4d6e-be3e-5fffb874d2a6"
+ },
+ {
+ "linkedId": "46db1eb4-a451-4d05-afe1-c695491e2387",
+ "assumptionId": "d8edcf30-5c76-49f7-a408-20e071bbea1c",
+ "type": "Threat"
+ },
+ {
+ "linkedId": "11fb1c71-42f0-4004-89a7-09d8bf6f8b11",
+ "assumptionId": "d8edcf30-5c76-49f7-a408-20e071bbea1c",
+ "type": "Mitigation"
+ },
+ {
+ "linkedId": "46db1eb4-a451-4d05-afe1-c695491e2387",
+ "assumptionId": "2d2a1ddf-5bb8-4a55-8f60-e195bc0b4b90",
+ "type": "Threat"
+ },
+ {
+ "linkedId": "11fb1c71-42f0-4004-89a7-09d8bf6f8b11",
+ "assumptionId": "2d2a1ddf-5bb8-4a55-8f60-e195bc0b4b90",
+ "type": "Mitigation"
+ },
+ {
+ "linkedId": "bdef5b69-e690-4c9c-bfc1-960390779d3b",
+ "assumptionId": "2d2a1ddf-5bb8-4a55-8f60-e195bc0b4b90",
+ "type": "Mitigation"
+ },
+ {
+ "linkedId": "bdef5b69-e690-4c9c-bfc1-960390779d3b",
+ "assumptionId": "d8edcf30-5c76-49f7-a408-20e071bbea1c",
+ "type": "Mitigation"
+ }
+ ],
+ "mitigationLinks": [
+ {
+ "mitigationId": "00b07ef0-097c-4082-9a69-55abc5c111e6",
+ "linkedId": "16c0b3de-a08f-418b-9969-9eb905ddd4e8"
+ },
+ {
+ "mitigationId": "84c1299c-27ba-4829-a1a1-c5368a2486a6",
+ "linkedId": "16c0b3de-a08f-418b-9969-9eb905ddd4e8"
+ },
+ {
+ "mitigationId": "ba6b8839-3629-423f-8205-76d0d5a69016",
+ "linkedId": "2ffb1f47-743e-4cbc-94e6-62d10817acc0"
+ },
+ {
+ "mitigationId": "ba6b8839-3629-423f-8205-76d0d5a69016",
+ "linkedId": "cbba3276-5ce2-47e0-bc77-aebc162c519f"
+ },
+ {
+ "mitigationId": "d3f3befb-b64a-4311-8abf-a19f853a8eed",
+ "linkedId": "7820db4a-1043-4dfa-bbc2-59774bb1f2fc"
+ },
+ {
+ "mitigationId": "135863f3-64e9-4fd7-8d71-4f141c42747f",
+ "linkedId": "7820db4a-1043-4dfa-bbc2-59774bb1f2fc"
+ },
+ {
+ "mitigationId": "f47a2b78-79b1-4371-acc7-e977116b0a90",
+ "linkedId": "7820db4a-1043-4dfa-bbc2-59774bb1f2fc"
+ },
+ {
+ "mitigationId": "135863f3-64e9-4fd7-8d71-4f141c42747f",
+ "linkedId": "0a264de2-c2e5-45c0-9075-0d437b9defa0"
+ },
+ {
+ "mitigationId": "b255d500-bddf-47dd-acdb-fb9f3edc384c",
+ "linkedId": "fb2ff978-1311-4061-a299-0a7f3421e037"
+ },
+ {
+ "mitigationId": "10cf6702-9cf9-4dd6-b43d-17a2302b590c",
+ "linkedId": "fec8777c-68b3-4569-84ce-9b9b1b9f5e9c"
+ },
+ {
+ "mitigationId": "c745ffca-00d6-459b-8438-640ec7293e6d",
+ "linkedId": "fec8777c-68b3-4569-84ce-9b9b1b9f5e9c"
+ },
+ {
+ "mitigationId": "e06394d3-7cc6-45a7-b9cc-d3ec621f8957",
+ "linkedId": "1649e8e4-f100-45f2-8c13-51dafc8a8251"
+ },
+ {
+ "mitigationId": "5c356be5-1c34-443c-abe2-8d7f7d3c210a",
+ "linkedId": "1e829518-ca27-454a-a759-6720349b7d6b"
+ },
+ {
+ "mitigationId": "5c356be5-1c34-443c-abe2-8d7f7d3c210a",
+ "linkedId": "661ab87a-a978-4442-9277-1318906a6d09"
+ },
+ {
+ "mitigationId": "5c356be5-1c34-443c-abe2-8d7f7d3c210a",
+ "linkedId": "19b98451-70ee-4814-af80-c1293b90cb9f"
+ },
+ {
+ "mitigationId": "cda934f6-4148-46cb-8658-5c3f86735a1b",
+ "linkedId": "6529dd8d-0b40-4eec-b4c4-5ee4f06619c0"
+ },
+ {
+ "mitigationId": "0cdff113-4816-496d-aa0d-36466c7331c0",
+ "linkedId": "6529dd8d-0b40-4eec-b4c4-5ee4f06619c0"
+ },
+ {
+ "mitigationId": "f4a2c3a8-f5d1-4302-aba3-4b1d715794be",
+ "linkedId": "fb2ff978-1311-4061-a299-0a7f3421e037"
+ },
+ {
+ "mitigationId": "0ce4e65d-c96c-46b1-988d-57016036bc12",
+ "linkedId": "0a264de2-c2e5-45c0-9075-0d437b9defa0"
+ },
+ {
+ "mitigationId": "3f93baae-0997-4fe0-9d9d-1b10a1ba7973",
+ "linkedId": "0a264de2-c2e5-45c0-9075-0d437b9defa0"
+ },
+ {
+ "mitigationId": "0ce4e65d-c96c-46b1-988d-57016036bc12",
+ "linkedId": "7820db4a-1043-4dfa-bbc2-59774bb1f2fc"
+ },
+ {
+ "mitigationId": "3f93baae-0997-4fe0-9d9d-1b10a1ba7973",
+ "linkedId": "7820db4a-1043-4dfa-bbc2-59774bb1f2fc"
+ },
+ {
+ "mitigationId": "c00b1a9a-6d7a-41e2-9697-7d41244b5990",
+ "linkedId": "16c0b3de-a08f-418b-9969-9eb905ddd4e8"
+ },
+ {
+ "mitigationId": "0ce4e65d-c96c-46b1-988d-57016036bc12",
+ "linkedId": "3793c287-f467-4227-9e25-5c53d0e09cec"
+ },
+ {
+ "mitigationId": "baa86fce-6e2a-406b-a10e-79c30cf94589",
+ "linkedId": "782adefc-b480-4e8f-83ee-64f6d680df0a"
+ },
+ {
+ "mitigationId": "cda934f6-4148-46cb-8658-5c3f86735a1b",
+ "linkedId": "3793c287-f467-4227-9e25-5c53d0e09cec"
+ },
+ {
+ "mitigationId": "cda934f6-4148-46cb-8658-5c3f86735a1b",
+ "linkedId": "7b49bdbd-25e9-446f-b44a-46fa2807e182"
+ },
+ {
+ "mitigationId": "cda934f6-4148-46cb-8658-5c3f86735a1b",
+ "linkedId": "782adefc-b480-4e8f-83ee-64f6d680df0a"
+ },
+ {
+ "mitigationId": "cda934f6-4148-46cb-8658-5c3f86735a1b",
+ "linkedId": "1649e8e4-f100-45f2-8c13-51dafc8a8251"
+ },
+ {
+ "mitigationId": "cda934f6-4148-46cb-8658-5c3f86735a1b",
+ "linkedId": "7820db4a-1043-4dfa-bbc2-59774bb1f2fc"
+ },
+ {
+ "mitigationId": "cda934f6-4148-46cb-8658-5c3f86735a1b",
+ "linkedId": "6fb7ef9a-175d-49f1-b85b-652b668581d4"
+ },
+ {
+ "mitigationId": "135863f3-64e9-4fd7-8d71-4f141c42747f",
+ "linkedId": "a0f523b0-bbc0-4e6a-9064-500af1f3836e"
+ },
+ {
+ "mitigationId": "0ce4e65d-c96c-46b1-988d-57016036bc12",
+ "linkedId": "a0f523b0-bbc0-4e6a-9064-500af1f3836e"
+ },
+ {
+ "mitigationId": "0ce4e65d-c96c-46b1-988d-57016036bc12",
+ "linkedId": "2a1052e8-fa0a-499a-9f41-7ae7f860b75e"
+ },
+ {
+ "mitigationId": "135863f3-64e9-4fd7-8d71-4f141c42747f",
+ "linkedId": "2a1052e8-fa0a-499a-9f41-7ae7f860b75e"
+ },
+ {
+ "mitigationId": "3f93baae-0997-4fe0-9d9d-1b10a1ba7973",
+ "linkedId": "2a1052e8-fa0a-499a-9f41-7ae7f860b75e"
+ },
+ {
+ "linkedId": "46db1eb4-a451-4d05-afe1-c695491e2387",
+ "mitigationId": "11fb1c71-42f0-4004-89a7-09d8bf6f8b11"
+ },
+ {
+ "linkedId": "46db1eb4-a451-4d05-afe1-c695491e2387",
+ "mitigationId": "bdef5b69-e690-4c9c-bfc1-960390779d3b"
+ }
+ ],
+ "threats": [
+ {
+ "id": "46db1eb4-a451-4d05-afe1-c695491e2387",
+ "numericId": 23,
+ "statement": "A lorem ipsum lorem ipsum can lorem ipsum, which leads to lorem ipsum, negatively impacting lorem ipsum",
+ "status": "threatResolved",
+ "threatSource": "lorem ipsum",
+ "prerequisites": "lorem ipsum",
+ "threatAction": "lorem ipsum",
+ "threatImpact": "lorem ipsum",
+ "impactedAssets": [
+ "lorem ipsum"
+ ],
+ "displayOrder": 23,
+ "metadata": [
+ {
+ "key": "Priority",
+ "value": "High"
+ },
+ {
+ "key": "STRIDE",
+ "value": [
+ "S",
+ "T",
+ "R",
+ "I",
+ "D",
+ "E"
+ ]
+ },
+ {
+ "key": "Comments",
+ "value": "lorem ipsum. lorem ipsum lorem ipsum"
+ }
+ ],
+ "tags": [
+ "CWE-156",
+ "CVE-45",
+ "lorem ipsum"
+ ]
+ },
+ {
+ "id": "f29b9a9f-94d9-4d6e-be3e-5fffb874d2a6",
+ "numericId": 22,
+ "displayOrder": 22,
+ "metadata": [
+ {
+ "key": "Priority",
+ "value": "Medium"
+ },
+ {
+ "key": "STRIDE",
+ "value": [
+ "I"
+ ]
+ }
+ ],
+ "tags": [
+ "Social engineering"
+ ],
+ "threatSource": "threat actor",
+ "prerequisites": "that is able to trick a user into installing a malicous userscript extension (e.g. tampermonkey, browser extension)",
+ "threatAction": "read the contents of local browser storage",
+ "threatImpact": "the exfiltration of the contents of browser storage to an endpoint controlled by the actor",
+ "impactedGoal": [
+ "confidentiality"
+ ],
+ "impactedAssets": [
+ "application metadata",
+ "threats",
+ "mitigations",
+ "assumptions"
+ ],
+ "statement": "A threat actor that is able to trick a user into installing a malicous userscript extension (e.g. tampermonkey, browser extension) can read the contents of local browser storage, which leads to the exfiltration of the contents of browser storage to an endpoint controlled by the actor, resulting in reduced confidentiality of application metadata, threats, mitigations and assumptions",
+ "status": "threatResolvedNotUseful"
+ },
+ {
+ "id": "2a1052e8-fa0a-499a-9f41-7ae7f860b75e",
+ "numericId": 21,
+ "displayOrder": 21,
+ "metadata": [
+ {
+ "key": "Priority",
+ "value": "Medium"
+ },
+ {
+ "key": "STRIDE",
+ "value": [
+ "I"
+ ]
+ }
+ ],
+ "tags": [
+ "XSS"
+ ],
+ "threatSource": "threat actor",
+ "prerequisites": "that is able to target a user already using a benign userscript extension (e.g. tampermonkey) that integrates directly with local browser storage for quickly viewing a Threat Composer export",
+ "threatAction": "trick them into opening a malicious threat model that contains script tags (or similar)",
+ "threatImpact": "the exfiltration of the contents of browser storage via XSS due to the extension bypassing Threat Composers import validation and sanitisation protection",
+ "impactedGoal": [
+ "confidentiality"
+ ],
+ "impactedAssets": [
+ "application metadata",
+ "threats",
+ "mitigations",
+ "assumptions"
+ ],
+ "statement": "A threat actor that is able to target a user already using a benign userscript extension (e.g. tampermonkey) that integrates directly with local browser storage for quickly viewing a Threat Composer export can trick them into opening a malicious threat model that contains script tags (or similar), which leads to the exfiltration of the contents of browser storage via XSS due to the extension bypassing Threat Composers import validation and sanitisation protection, resulting in reduced confidentiality of application metadata, threats, mitigations and assumptions",
+ "status": "threatIdentified"
+ },
+ {
+ "id": "7b49bdbd-25e9-446f-b44a-46fa2807e182",
+ "numericId": 20,
+ "displayOrder": 20,
+ "metadata": [
+ {
+ "key": "Priority",
+ "value": "Low"
+ },
+ {
+ "key": "STRIDE",
+ "value": [
+ "I"
+ ]
+ }
+ ],
+ "tags": [
+ "Risk Accepted"
+ ],
+ "threatSource": "threat actor",
+ "prerequisites": "with knowledge of a browser image render vulnerability",
+ "threatAction": "trick a user into importing a malicious JSON file containing malicious BASE64 images",
+ "threatImpact": "exfiltration the contents of local storage to and end-point controlled by the actor",
+ "impactedGoal": [
+ "confidentiality"
+ ],
+ "impactedAssets": [
+ "threats",
+ "mitigations",
+ "assumptions",
+ "application metadata"
+ ],
+ "statement": "A threat actor with knowledge of a browser image render vulnerability can trick a user into importing a malicious JSON file containing malicious BASE64 images, which leads to exfiltration the contents of local storage to and end-point controlled by the actor, resulting in reduced confidentiality of threats, mitigations, assumptions and application metadata",
+ "status": "threatResolved"
+ },
+ {
+ "id": "782adefc-b480-4e8f-83ee-64f6d680df0a",
+ "numericId": 19,
+ "displayOrder": 19,
+ "metadata": [
+ {
+ "key": "Priority",
+ "value": "Medium"
+ },
+ {
+ "key": "STRIDE",
+ "value": [
+ "D"
+ ]
+ }
+ ],
+ "tags": [
+ "DoS"
+ ],
+ "threatSource": "threat actor",
+ "prerequisites": "that can trick a user into importing a JSON file",
+ "threatAction": "input a unexpected data schema",
+ "threatImpact": "the user being unable to use the tool, until they clear the local data (or use a different browser)",
+ "impactedGoal": [],
+ "impactedAssets": [],
+ "statement": "A threat actor that can trick a user into importing a JSON file can input an unexpected data schema, which leads to the user being unable to use the tool, until they clear the local data (or use a different browser)"
+ },
+ {
+ "id": "a0f523b0-bbc0-4e6a-9064-500af1f3836e",
+ "numericId": 18,
+ "displayOrder": 18,
+ "metadata": [
+ {
+ "key": "STRIDE",
+ "value": [
+ "T"
+ ]
+ },
+ {
+ "key": "Priority",
+ "value": "Low"
+ }
+ ],
+ "tags": [
+ "XSS"
+ ],
+ "threatSource": "threat actor",
+ "prerequisites": "with access the browser DOM (e.g. via Dev Tools)",
+ "threatAction": "disable client-side input validation regex",
+ "threatImpact": "allowing them to demonstrate a Cross-site Script (XSS) vulnerability",
+ "impactedGoal": [
+ "integrity"
+ ],
+ "impactedAssets": [
+ "threat composer"
+ ],
+ "statement": "A threat actor with access the browser DOM (e.g. via Dev Tools) can disable client-side input validation regex, which leads to allowing them to demonstrate a Cross-site Script (XSS) vulnerability, resulting in reduced integrity of threat composer"
+ },
+ {
+ "id": "99648023-86fe-4b7d-829a-7011be545fa4",
+ "numericId": 17,
+ "displayOrder": 17,
+ "metadata": [
+ {
+ "key": "Priority",
+ "value": "Low"
+ },
+ {
+ "key": "STRIDE",
+ "value": [
+ "D"
+ ]
+ }
+ ],
+ "tags": [
+ "DoS"
+ ],
+ "threatSource": "threat actor",
+ "prerequisites": "with access the browser DOM (e.g. via Dev Tools)",
+ "threatAction": "can input a datatype not expected by the code",
+ "impactedGoal": [
+ "availability"
+ ],
+ "impactedAssets": [
+ "threat composer"
+ ],
+ "statement": "A threat actor with access the browser DOM (e.g. via Dev Tools) can can input a datatype not expected by the code, resulting in reduced availability of threat composer"
+ },
+ {
+ "id": "6529dd8d-0b40-4eec-b4c4-5ee4f06619c0",
+ "numericId": 16,
+ "displayOrder": 16,
+ "metadata": [
+ {
+ "key": "Priority",
+ "value": "High"
+ },
+ {
+ "key": "STRIDE",
+ "value": [
+ "I"
+ ]
+ }
+ ],
+ "threatSource": "valid user",
+ "prerequisites": "who has forked and modified the source code to include their own data (e.g. additional example threat statements)",
+ "threatAction": "deploy Threat composer without network restrictions or authentication",
+ "threatImpact": "discovery of the additional data by an adversary",
+ "impactedGoal": [
+ "confidentiality"
+ ],
+ "statement": "A valid user who has forked and modified the source code to include their own data (e.g. additional example threat statements) can deploy Threat composer without network restrictions or authentication, which leads to discovery of the additional data by an adversary, resulting in reduced confidentiality"
+ },
+ {
+ "id": "1649e8e4-f100-45f2-8c13-51dafc8a8251",
+ "numericId": 15,
+ "displayOrder": 15,
+ "metadata": [
+ {
+ "key": "Priority",
+ "value": "Medium"
+ },
+ {
+ "key": "STRIDE",
+ "value": [
+ "T"
+ ]
+ }
+ ],
+ "tags": [
+ "Supply Chain"
+ ],
+ "threatSource": "threat actor",
+ "threatAction": "take advantage of security vulnerability within a 3rd party package used by Threat Composer",
+ "threatImpact": "",
+ "impactedGoal": [
+ "integrity"
+ ],
+ "impactedAssets": [
+ "threat composer"
+ ],
+ "statement": "A threat actor can take advantage of security vulnerability within a 3rd party package used by Threat Composer, resulting in reduced integrity of threat composer"
+ },
+ {
+ "id": "fec8777c-68b3-4569-84ce-9b9b1b9f5e9c",
+ "numericId": 14,
+ "displayOrder": 14,
+ "metadata": [
+ {
+ "key": "Priority",
+ "value": "High"
+ },
+ {
+ "key": "STRIDE",
+ "value": [
+ "I",
+ "T"
+ ]
+ }
+ ],
+ "tags": [
+ "Supply Chain"
+ ],
+ "threatSource": "threat actor",
+ "prerequisites": "",
+ "threatAction": "raise a PR (Pull Request) on the source within the Threat Composer GitHub repo",
+ "threatImpact": "merging malicious code that exfiltrates user-supplied input to an end-point that they control",
+ "impactedAssets": [
+ "threats",
+ "mitigations",
+ "assumptions",
+ "application metadata"
+ ],
+ "statement": "A threat actor can raise a PR (Pull Request) on the source within the Threat Composer GitHub repo, which leads to merging malicious code that exfiltrates user-supplied input to an end-point that they control, negatively impacting threats, mitigations, assumptions and application metadata"
+ },
+ {
+ "id": "0a264de2-c2e5-45c0-9075-0d437b9defa0",
+ "numericId": 13,
+ "displayOrder": 13,
+ "metadata": [
+ {
+ "key": "STRIDE",
+ "value": [
+ "T"
+ ]
+ },
+ {
+ "key": "Priority",
+ "value": "Medium"
+ }
+ ],
+ "tags": [
+ "XSS"
+ ],
+ "threatSource": "security researcher",
+ "threatAction": "provide malicious input (e.g. script tags) into the Threat Composer UI",
+ "threatImpact": "to them finding and demonstrating an XSS vulnerability which they make public via social media",
+ "impactedAssets": [
+ "threat composer"
+ ],
+ "statement": "A security researcher can provide malicious input (e.g. script tags) into the Threat Composer UI, which leads to to them finding and demonstrating an XSS vulnerability which they make public via social media, negatively impacting threat composer"
+ },
+ {
+ "id": "7820db4a-1043-4dfa-bbc2-59774bb1f2fc",
+ "numericId": 12,
+ "displayOrder": 12,
+ "metadata": [
+ {
+ "key": "STRIDE",
+ "value": [
+ "I"
+ ]
+ },
+ {
+ "key": "Priority",
+ "value": "High"
+ }
+ ],
+ "tags": [
+ "XSS"
+ ],
+ "threatSource": "threat actor",
+ "prerequisites": "that can trick a user into importing a malicious JSON file containing script tags (or similar)",
+ "threatAction": "exfiltrate the contents of local storage using XSS",
+ "impactedGoal": [
+ "confidentiality"
+ ],
+ "impactedAssets": [
+ "threats",
+ "mitigations",
+ "assumptions",
+ "application metadata"
+ ],
+ "statement": "A threat actor that can trick a user into importing a malicious JSON file containing script tags (or similar) can exfiltrate the contents of local storage using XSS, resulting in reduced confidentiality of threats, mitigations, assumptions and application metadata"
+ },
+ {
+ "id": "19b98451-70ee-4814-af80-c1293b90cb9f",
+ "numericId": 9,
+ "displayOrder": 9,
+ "metadata": [
+ {
+ "key": "STRIDE",
+ "value": [
+ "T"
+ ]
+ },
+ {
+ "key": "Priority",
+ "value": "High"
+ }
+ ],
+ "tags": [
+ "Supply Chain"
+ ],
+ "threatSource": "threat actor",
+ "prerequisites": "who has access to a dependent software package on a npm remote registry (e.g. yarnpkg.com, npmjs.com)",
+ "threatAction": "inject malicious code into the CI/CD process",
+ "threatImpact": "untrusted code running in a users browser",
+ "impactedGoal": [
+ "integrity"
+ ],
+ "impactedAssets": [
+ "threat composer"
+ ],
+ "statement": "A threat actor who has access to a dependent software package on a npm remote registry (e.g. yarnpkg.com, npmjs.com) can inject malicious code into the CI/CD process, which leads to untrusted code running in a users browser, resulting in reduced integrity of threat composer"
+ },
+ {
+ "id": "cbba3276-5ce2-47e0-bc77-aebc162c519f",
+ "numericId": 8,
+ "displayOrder": 8,
+ "metadata": [
+ {
+ "key": "STRIDE",
+ "value": [
+ "D"
+ ]
+ },
+ {
+ "key": "Priority",
+ "value": "Low"
+ }
+ ],
+ "tags": [
+ "DDoS"
+ ],
+ "threatSource": "threat actor",
+ "prerequisites": "with a network path to the CloudFront distribution",
+ "threatAction": "submit a large number of resource intensive requests",
+ "threatImpact": "unnecessary and/or excessive costs",
+ "impactedGoal": [
+ "economy"
+ ],
+ "impactedAssets": [
+ "threat composer"
+ ],
+ "statement": "A threat actor with a network path to the CloudFront distribution can submit a large number of resource intensive requests, which leads to unnecessary and/or excessive costs, resulting in reduced economy of threat composer"
+ },
+ {
+ "id": "2ffb1f47-743e-4cbc-94e6-62d10817acc0",
+ "numericId": 7,
+ "displayOrder": 7,
+ "metadata": [
+ {
+ "key": "STRIDE",
+ "value": [
+ "D"
+ ]
+ },
+ {
+ "key": "Priority",
+ "value": "Low"
+ }
+ ],
+ "tags": [
+ "DDoS"
+ ],
+ "threatSource": "threat actor",
+ "threatAction": "create or orchestrate a distributed denial of service attack against the CloudFront Distribution serving the static content",
+ "threatImpact": "to the web tool being unresponsive to callers",
+ "impactedGoal": [
+ "availability"
+ ],
+ "impactedAssets": [
+ "threat composer"
+ ],
+ "statement": "A threat actor can create or orchestrate a distributed denial of service attack against the CloudFront Distribution serving the static content, which leads to to the web tool being unresponsive to callers, resulting in reduced availability of threat composer"
+ },
+ {
+ "id": "a394f5d6-9479-40cc-ae88-b1911a269f0a",
+ "numericId": 6,
+ "displayOrder": 6,
+ "metadata": [
+ {
+ "key": "STRIDE",
+ "value": [
+ "S"
+ ]
+ },
+ {
+ "key": "Priority",
+ "value": "Low"
+ }
+ ],
+ "tags": [
+ "Social Engineering"
+ ],
+ "threatSource": "threat actor",
+ "prerequisites": "with possession of a similar domain name",
+ "threatAction": "trick our users into interacting with an illegitimate endpoint",
+ "impactedGoal": [
+ "confidentiality"
+ ],
+ "impactedAssets": [
+ "threats",
+ "mitigations",
+ "assumptions",
+ "application metadata"
+ ],
+ "statement": "A threat actor with possession of a similar domain name can trick our users into interacting with an illegitimate endpoint, resulting in reduced confidentiality of threats, mitigations, assumptions and application metadata"
+ },
+ {
+ "id": "6fb7ef9a-175d-49f1-b85b-652b668581d4",
+ "numericId": 5,
+ "displayOrder": 5,
+ "metadata": [
+ {
+ "key": "STRIDE",
+ "value": [
+ "T"
+ ]
+ },
+ {
+ "key": "Priority",
+ "value": "Medium"
+ }
+ ],
+ "tags": [
+ "Least Privilege"
+ ],
+ "threatSource": "external threat actor",
+ "prerequisites": "who has a sufficiently privileged IAM Principal in the AWS account",
+ "threatAction": "modify the configuration of the CloudFront distribution",
+ "threatImpact": "the distribution serving content from an origin that is unexpected or contains malicious content",
+ "impactedGoal": [
+ "integrity"
+ ],
+ "impactedAssets": [
+ "code running in the user's browser",
+ "application configuration"
+ ],
+ "statement": "An external threat actor who has a sufficiently privileged IAM Principal in the AWS account can modify the configuration of the CloudFront distribution, which leads to the distribution serving content from an origin that is unexpected or contains malicious content, resulting in reduced integrity of code running in the user's browser and application configuration"
+ },
+ {
+ "id": "16c0b3de-a08f-418b-9969-9eb905ddd4e8",
+ "numericId": 4,
+ "displayOrder": 4,
+ "metadata": [
+ {
+ "key": "STRIDE",
+ "value": [
+ "I"
+ ]
+ },
+ {
+ "key": "Priority",
+ "value": "High"
+ }
+ ],
+ "tags": [
+ "Least Privilege"
+ ],
+ "threatSource": "threat actor",
+ "prerequisites": "with write access to the objects hosted on the static asset S3 Bucket",
+ "threatAction": "modify the code",
+ "threatImpact": "exfiltration of user-supplied input to an attacker controlled endpoint",
+ "impactedGoal": [
+ "confidentiality"
+ ],
+ "impactedAssets": [
+ "threats",
+ "mitigations",
+ "assumptions",
+ "application metadata"
+ ],
+ "statement": "A threat actor with write access to the objects hosted on the static asset S3 Bucket can modify the code, which leads to exfiltration of user-supplied input to an attacker controlled endpoint, resulting in reduced confidentiality of threats, mitigations, assumptions and application metadata"
+ },
+ {
+ "id": "79bc7dc9-1038-4516-8f61-0a724bf4776d",
+ "numericId": 3,
+ "displayOrder": 3,
+ "metadata": [
+ {
+ "key": "STRIDE",
+ "value": [
+ "I"
+ ]
+ },
+ {
+ "key": "Priority",
+ "value": "Low"
+ }
+ ],
+ "tags": [
+ "MiTB"
+ ],
+ "threatSource": "threat actor",
+ "prerequisites": "who is in a person-in-the-browser position",
+ "threatAction": "read or modify locally stored user input",
+ "impactedGoal": [
+ "confidentiality"
+ ],
+ "impactedAssets": [
+ "threats",
+ "mitigations",
+ "assumptions",
+ "application metadata"
+ ],
+ "statement": "A threat actor who is in a person-in-the-browser position can read or modify locally stored user input, resulting in reduced confidentiality of threats, mitigations, assumptions and application metadata"
+ },
+ {
+ "id": "a84e701e-b370-44e4-aa1a-2c5e6edcf926",
+ "numericId": 2,
+ "displayOrder": 2,
+ "metadata": [
+ {
+ "key": "STRIDE",
+ "value": [
+ "I"
+ ]
+ },
+ {
+ "key": "Priority",
+ "value": "Low"
+ }
+ ],
+ "tags": [
+ "MiTB"
+ ],
+ "threatSource": "threat actor",
+ "prerequisites": "with local access to a web browser used by a valid user",
+ "threatAction": "read local storage",
+ "impactedGoal": [
+ "confidentiality"
+ ],
+ "impactedAssets": [
+ "threats",
+ "mitigations",
+ "assumptions",
+ "application metadata"
+ ],
+ "statement": "A threat actor with local access to a web browser used by a valid user can read local storage, resulting in reduced confidentiality of threats, mitigations, assumptions and application metadata"
+ },
+ {
+ "id": "fb2ff978-1311-4061-a299-0a7f3421e037",
+ "numericId": 1,
+ "displayOrder": 1,
+ "metadata": [
+ {
+ "key": "Priority",
+ "value": "Low"
+ },
+ {
+ "key": "STRIDE",
+ "value": [
+ "T",
+ "I"
+ ]
+ }
+ ],
+ "tags": [
+ "MiTM"
+ ],
+ "threatSource": "threat actor",
+ "prerequisites": "who is in a person-in-the-middle position between the User and the hosting endpoint",
+ "threatAction": "tamper with, or replace the downloaded client-side code",
+ "threatImpact": "to exfiltrating user-specified input to an attacker controlled endpoint",
+ "impactedGoal": [
+ "confidentiality"
+ ],
+ "impactedAssets": [
+ "threats",
+ "mitigations",
+ "assumptions",
+ "application metadata"
+ ],
+ "statement": "A threat actor who is in a person-in-the-middle position between the User and the hosting endpoint can tamper with, or replace the downloaded client-side code, which leads to to exfiltrating user-specified input to an attacker controlled endpoint, resulting in reduced confidentiality of threats, mitigations, assumptions and application metadata"
+ }
+ ]
+ }
\ No newline at end of file
diff --git a/unittests/scans/threat_composer/threat_composer_no_threats_with_error.json b/unittests/scans/threat_composer/threat_composer_no_threats_with_error.json
new file mode 100644
index 00000000000..0641d4eca90
--- /dev/null
+++ b/unittests/scans/threat_composer/threat_composer_no_threats_with_error.json
@@ -0,0 +1,95 @@
+{
+ "schema": 1,
+ "applicationInfo": {
+ "name": "Threat composer",
+ "description": ""
+ },
+ "architecture": {
+ "image": "",
+ "description": ""
+ },
+ "dataflow": {
+ "image": "",
+ "description": ""
+ },
+ "assumptions": [
+ {
+ "id": "2d2a1ddf-5bb8-4a55-8f60-e195bc0b4b90",
+ "numericId": 7,
+ "content": "lorem ipsum",
+ "tags": [
+ "lorem ipsum"
+ ],
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "lorem ipsum"
+ }
+ ],
+ "displayOrder": 7
+ },
+ {
+ "id": "d8edcf30-5c76-49f7-a408-20e071bbea1c",
+ "numericId": 6,
+ "content": "lorem ipsum",
+ "displayOrder": 6,
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "lorem ipsum"
+ }
+ ]
+ }
+ ],
+ "mitigations": [
+ {
+ "id": "bdef5b69-e690-4c9c-bfc1-960390779d3b",
+ "numericId": 21,
+ "content": "lorem ipsum",
+ "tags": [
+ "lorem ipsum"
+ ],
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "lorem ipsum"
+ }
+ ],
+ "displayOrder": 21
+ },
+ {
+ "id": "11fb1c71-42f0-4004-89a7-09d8bf6f8b11",
+ "numericId": 20,
+ "content": "lorem ipsum",
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "lorem ipsum"
+ }
+ ],
+ "displayOrder": 20
+ }
+ ],
+ "assumptionLinks": [
+ {
+ "linkedId": "46db1eb4-a451-4d05-afe1-c695491e2387",
+ "assumptionId": "d8edcf30-5c76-49f7-a408-20e071bbea1c",
+ "type": "Threat"
+ },
+ {
+ "linkedId": "46db1eb4-a451-4d05-afe1-c695491e2387",
+ "assumptionId": "2d2a1ddf-5bb8-4a55-8f60-e195bc0b4b90",
+ "type": "Threat"
+ }
+ ],
+ "mitigationLinks": [
+ {
+ "linkedId": "46db1eb4-a451-4d05-afe1-c695491e2387",
+ "mitigationId": "11fb1c71-42f0-4004-89a7-09d8bf6f8b11"
+ },
+ {
+ "linkedId": "46db1eb4-a451-4d05-afe1-c695491e2387",
+ "mitigationId": "bdef5b69-e690-4c9c-bfc1-960390779d3b"
+ }
+ ]
+ }
\ No newline at end of file
diff --git a/unittests/scans/threat_composer/threat_composer_one_threat.json b/unittests/scans/threat_composer/threat_composer_one_threat.json
new file mode 100644
index 00000000000..cd900f81e95
--- /dev/null
+++ b/unittests/scans/threat_composer/threat_composer_one_threat.json
@@ -0,0 +1,141 @@
+{
+ "schema": 1,
+ "applicationInfo": {
+ "name": "Threat composer",
+ "description": ""
+ },
+ "architecture": {
+ "image": "",
+ "description": ""
+ },
+ "dataflow": {
+ "image": "",
+ "description": ""
+ },
+ "assumptions": [
+ {
+ "id": "2d2a1ddf-5bb8-4a55-8f60-e195bc0b4b90",
+ "numericId": 7,
+ "content": "lorem ipsum",
+ "tags": [
+ "lorem ipsum"
+ ],
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "lorem ipsum"
+ }
+ ],
+ "displayOrder": 7
+ },
+ {
+ "id": "d8edcf30-5c76-49f7-a408-20e071bbea1c",
+ "numericId": 6,
+ "content": "lorem ipsum",
+ "displayOrder": 6,
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "lorem ipsum"
+ }
+ ]
+ }
+ ],
+ "mitigations": [
+ {
+ "id": "bdef5b69-e690-4c9c-bfc1-960390779d3b",
+ "numericId": 21,
+ "content": "lorem ipsum",
+ "tags": [
+ "lorem ipsum"
+ ],
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "lorem ipsum"
+ }
+ ],
+ "displayOrder": 21
+ },
+ {
+ "id": "11fb1c71-42f0-4004-89a7-09d8bf6f8b11",
+ "numericId": 20,
+ "content": "lorem ipsum",
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "lorem ipsum"
+ }
+ ],
+ "displayOrder": 20
+ }
+ ],
+ "assumptionLinks": [
+ {
+ "linkedId": "46db1eb4-a451-4d05-afe1-c695491e2387",
+ "assumptionId": "d8edcf30-5c76-49f7-a408-20e071bbea1c",
+ "type": "Threat"
+ },
+ {
+ "linkedId": "46db1eb4-a451-4d05-afe1-c695491e2387",
+ "assumptionId": "2d2a1ddf-5bb8-4a55-8f60-e195bc0b4b90",
+ "type": "Threat"
+ },
+ {
+ "type": "Mitigation",
+ "assumptionId": "2d2a1ddf-5bb8-4a55-8f60-e195bc0b4b90",
+ "linkedId": "11fb1c71-42f0-4004-89a7-09d8bf6f8b11"
+ }
+ ],
+ "mitigationLinks": [
+ {
+ "linkedId": "46db1eb4-a451-4d05-afe1-c695491e2387",
+ "mitigationId": "11fb1c71-42f0-4004-89a7-09d8bf6f8b11"
+ },
+ {
+ "linkedId": "46db1eb4-a451-4d05-afe1-c695491e2387",
+ "mitigationId": "bdef5b69-e690-4c9c-bfc1-960390779d3b"
+ }
+ ],
+ "threats": [
+ {
+ "id": "46db1eb4-a451-4d05-afe1-c695491e2387",
+ "numericId": 23,
+ "statement": "A lorem ipsum lorem ipsum can lorem ipsum, which leads to lorem ipsum, negatively impacting lorem ipsum",
+ "threatSource": "lorem ipsum",
+ "prerequisites": "lorem ipsum",
+ "threatAction": "lorem ipsum",
+ "threatImpact": "lorem ipsum",
+ "impactedAssets": [
+ "lorem ipsum"
+ ],
+ "displayOrder": 23,
+ "metadata": [
+ {
+ "key": "Priority",
+ "value": "High"
+ },
+ {
+ "key": "STRIDE",
+ "value": [
+ "S",
+ "T",
+ "R",
+ "I",
+ "D",
+ "E"
+ ]
+ },
+ {
+ "key": "Comments",
+ "value": "lorem ipsum. lorem ipsum lorem ipsum"
+ }
+ ],
+ "tags": [
+ "CWE-156",
+ "CVE-45",
+ "lorem ipsum"
+ ]
+ }
+ ]
+ }
\ No newline at end of file
diff --git a/unittests/scans/threat_composer/threat_composer_zero_threats.json b/unittests/scans/threat_composer/threat_composer_zero_threats.json
new file mode 100644
index 00000000000..8c2855acc8c
--- /dev/null
+++ b/unittests/scans/threat_composer/threat_composer_zero_threats.json
@@ -0,0 +1,97 @@
+{
+ "schema": 1,
+ "applicationInfo": {
+ "name": "Threat composer",
+ "description": ""
+ },
+ "architecture": {
+ "image": "",
+ "description": ""
+ },
+ "dataflow": {
+ "image": "",
+ "description": ""
+ },
+ "assumptions": [
+ {
+ "id": "2d2a1ddf-5bb8-4a55-8f60-e195bc0b4b90",
+ "numericId": 7,
+ "content": "lorem ipsum",
+ "tags": [
+ "lorem ipsum"
+ ],
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "lorem ipsum"
+ }
+ ],
+ "displayOrder": 7
+ },
+ {
+ "id": "d8edcf30-5c76-49f7-a408-20e071bbea1c",
+ "numericId": 6,
+ "content": "lorem ipsum",
+ "displayOrder": 6,
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "lorem ipsum"
+ }
+ ]
+ }
+ ],
+ "mitigations": [
+ {
+ "id": "bdef5b69-e690-4c9c-bfc1-960390779d3b",
+ "numericId": 21,
+ "content": "lorem ipsum",
+ "tags": [
+ "lorem ipsum"
+ ],
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "lorem ipsum"
+ }
+ ],
+ "displayOrder": 21
+ },
+ {
+ "id": "11fb1c71-42f0-4004-89a7-09d8bf6f8b11",
+ "numericId": 20,
+ "content": "lorem ipsum",
+ "metadata": [
+ {
+ "key": "Comments",
+ "value": "lorem ipsum"
+ }
+ ],
+ "displayOrder": 20
+ }
+ ],
+ "assumptionLinks": [
+ {
+ "linkedId": "46db1eb4-a451-4d05-afe1-c695491e2387",
+ "assumptionId": "d8edcf30-5c76-49f7-a408-20e071bbea1c",
+ "type": "Threat"
+ },
+ {
+ "linkedId": "46db1eb4-a451-4d05-afe1-c695491e2387",
+ "assumptionId": "2d2a1ddf-5bb8-4a55-8f60-e195bc0b4b90",
+ "type": "Threat"
+ }
+ ],
+ "mitigationLinks": [
+ {
+ "linkedId": "46db1eb4-a451-4d05-afe1-c695491e2387",
+ "mitigationId": "11fb1c71-42f0-4004-89a7-09d8bf6f8b11"
+ },
+ {
+ "linkedId": "46db1eb4-a451-4d05-afe1-c695491e2387",
+ "mitigationId": "bdef5b69-e690-4c9c-bfc1-960390779d3b"
+ }
+ ],
+ "threats": [
+ ]
+ }
\ No newline at end of file
diff --git a/unittests/test_apiv2_scan_import_options.py b/unittests/test_apiv2_scan_import_options.py
index b7f802b3e5d..8d296ca20f2 100644
--- a/unittests/test_apiv2_scan_import_options.py
+++ b/unittests/test_apiv2_scan_import_options.py
@@ -30,7 +30,7 @@ def setUp(self):
test.save()
def import_zap_scan(self, upload_empty_scan=False):
- with open("tests/zap_sample.xml") as file:
+ with open("tests/zap_sample.xml", encoding="utf-8") as file:
if upload_empty_scan:
file = SimpleUploadedFile("zap_sample.xml", self.EMPTY_ZAP_SCAN.encode("utf-8"))
diff --git a/unittests/test_deduplication_logic.py b/unittests/test_deduplication_logic.py
index 2345af912f4..ef1d91a0d53 100644
--- a/unittests/test_deduplication_logic.py
+++ b/unittests/test_deduplication_logic.py
@@ -1044,7 +1044,7 @@ def test_hash_code_onetime(self):
self.assertEqual(finding_new.hash_code, None)
finding_new.save()
- self.assertTrue(finding_new.hash_code) # True -> not None
+ self.assertIsNotNone(finding_new.hash_code)
hash_code_at_creation = finding_new.hash_code
finding_new.title = "new_title"
@@ -1111,17 +1111,17 @@ def test_hash_code_without_dedupe(self):
finding_new.save(dedupe_option=False)
# save skips hash_code generation if dedupe_option==False
- self.assertFalse(finding_new.hash_code)
+ self.assertIsNone(finding_new.hash_code)
finding_new.save(dedupe_option=True)
- self.assertTrue(finding_new.hash_code)
+ self.assertIsNotNone(finding_new.hash_code)
finding_new, _finding_124 = self.copy_and_reset_finding(id=124)
finding_new.save()
# by default hash_code should be generated
- self.assertTrue(finding_new.hash_code)
+ self.assertIsNotNone(finding_new.hash_code)
# # utility methods
@@ -1248,11 +1248,11 @@ def assert_finding(self, finding, not_pk=None, duplicate=False, duplicate_findin
self.assertEqual(finding.duplicate, duplicate)
if not duplicate:
- self.assertFalse(finding.duplicate_finding) # False -> None
+ self.assertIsNone(finding.duplicate_finding)
if duplicate_finding_id:
logger.debug("asserting that finding %i is a duplicate of %i", finding.id if finding.id is not None else "None", duplicate_finding_id if duplicate_finding_id is not None else "None")
- self.assertTrue(finding.duplicate_finding) # True -> not None
+ self.assertIsNotNone(finding.duplicate_finding)
self.assertEqual(finding.duplicate_finding.id, duplicate_finding_id)
if not_hash_code:
diff --git a/unittests/test_endpoint_meta_import.py b/unittests/test_endpoint_meta_import.py
index 4b4237c9d77..d159dbd4f2a 100644
--- a/unittests/test_endpoint_meta_import.py
+++ b/unittests/test_endpoint_meta_import.py
@@ -206,7 +206,7 @@ def endpoint_meta_import_ui(self, product, payload):
def endpoint_meta_import_scan_with_params_ui(self, filename, product=1, create_endpoints=True,
create_tags=True, create_dojo_meta=True, expected_http_status_code=201):
- with open(get_unit_tests_path() + "/" + filename) as testfile:
+ with open(get_unit_tests_path() + "/" + filename, encoding="utf-8") as testfile:
payload = {
"create_endpoints": create_endpoints,
"create_tags": create_tags,
diff --git a/unittests/test_factory.py b/unittests/test_factory.py
index 31e793e03e7..4f268dee0b4 100644
--- a/unittests/test_factory.py
+++ b/unittests/test_factory.py
@@ -15,25 +15,25 @@ class TestFactory(DojoTestCase):
def test_get_parser(self):
with self.subTest(scan_type="Acunetix Scan"):
scan_type = "Acunetix Scan"
- testfile = open(get_unit_tests_path() + "/scans/acunetix/one_finding.xml")
+ testfile = open(get_unit_tests_path() + "/scans/acunetix/one_finding.xml", encoding="utf-8")
parser = get_parser(scan_type)
parser.get_findings(testfile, Test())
testfile.close()
with self.subTest(scan_type="Anchore Engine Scan"):
scan_type = "Anchore Engine Scan"
- testfile = open(get_unit_tests_path() + "/scans/anchore_engine/one_vuln.json")
+ testfile = open(get_unit_tests_path() + "/scans/anchore_engine/one_vuln.json", encoding="utf-8")
parser = get_parser(scan_type)
parser.get_findings(testfile, Test())
testfile.close()
with self.subTest(scan_type="Tenable Scan"):
scan_type = "Tenable Scan"
- testfile = open(get_unit_tests_path() + "/scans/tenable/nessus/nessus_v_unknown.xml")
+ testfile = open(get_unit_tests_path() + "/scans/tenable/nessus/nessus_v_unknown.xml", encoding="utf-8")
parser = get_parser(scan_type)
parser.get_findings(testfile, Test())
testfile.close()
with self.subTest(scan_type="ZAP Scan"):
scan_type = "ZAP Scan"
- testfile = open(get_unit_tests_path() + "/scans/zap/some_2.9.0.xml")
+ testfile = open(get_unit_tests_path() + "/scans/zap/some_2.9.0.xml", encoding="utf-8")
parser = get_parser(scan_type)
parser.get_findings(testfile, Test())
testfile.close()
diff --git a/unittests/test_import_reimport.py b/unittests/test_import_reimport.py
index 5bbeaccb6d4..1015f206d7a 100644
--- a/unittests/test_import_reimport.py
+++ b/unittests/test_import_reimport.py
@@ -1115,7 +1115,7 @@ def test_import_reimport_keep_false_positive_and_out_of_scope(self):
active_findings_before = self.get_test_findings_api(test_id, active=True)
self.assert_finding_count_json(0, active_findings_before)
- with assertTestImportModelsCreated(self, reimports=1, affected_findings=1, created=1):
+ with assertTestImportModelsCreated(self, reimports=1, affected_findings=1, created=1, untouched=1):
reimport0 = self.reimport_scan_with_params(test_id, self.zap_sample0_filename)
self.assertEqual(reimport0["test"], test_id)
@@ -1478,6 +1478,24 @@ def test_import_history_reactivated_and_untouched_findings_do_not_mix(self):
self.reimport_scan_with_params(test_id, self.generic_import_1, scan_type=self.scan_type_generic)
# Passing this test means an exception does not occur
+ def test_dynamic_parsing_field_set_to_true(self):
+ # Test that a generic finding import creates a new test type
+ # with the dynamically_generated field set to True
+ import0 = self.import_scan_with_params(self.generic_import_1, scan_type=self.scan_type_generic)
+ test_id = import0["test"]
+ # Fetch the test from the DB to access the test type
+ test = Test.objects.get(id=test_id)
+ self.assertTrue(test.test_type.dynamically_generated)
+
+ def test_dynamic_parsing_field_set_to_false(self):
+ # Test that a ZAP import does not create a new test type
+ # and that the dynamically_generated field set to False
+ import0 = self.import_scan_with_params(self.zap_sample0_filename)
+ test_id = import0["test"]
+ # Fetch the test from the DB to access the test type
+ test = Test.objects.get(id=test_id)
+ self.assertFalse(test.test_type.dynamically_generated)
+
class ImportReimportTestAPI(DojoAPITestCase, ImportReimportMixin):
fixtures = ["dojo_testdata.json"]
@@ -1820,7 +1838,7 @@ def import_scan_with_params_ui(self, filename, scan_type="ZAP Scan", engagement=
elif not verified:
verifiedPayload = "force_to_false"
- with open(get_unit_tests_path() + filename) as testfile:
+ with open(get_unit_tests_path() + filename, encoding="utf-8") as testfile:
payload = {
"minimum_severity": minimum_severity,
"active": activePayload,
@@ -1860,7 +1878,7 @@ def reimport_scan_with_params_ui(self, test_id, filename, scan_type="ZAP Scan",
if not verified:
verifiedPayload = "force_to_false"
- with open(get_unit_tests_path() + filename) as testfile:
+ with open(get_unit_tests_path() + filename, encoding="utf-8") as testfile:
payload = {
"minimum_severity": minimum_severity,
"active": activePayload,
diff --git a/unittests/test_importers_closeold.py b/unittests/test_importers_closeold.py
index b5671d257b0..2a9d82978f2 100644
--- a/unittests/test_importers_closeold.py
+++ b/unittests/test_importers_closeold.py
@@ -37,19 +37,19 @@ def test_close_old_same_engagement(self):
"scan_type": scan_type,
}
# Import first test
- with open(f"{get_unit_tests_path()}/scans/acunetix/many_findings.xml", "r+") as many_findings_scan:
+ with open(f"{get_unit_tests_path()}/scans/acunetix/many_findings.xml", "r+", encoding="utf-8") as many_findings_scan:
importer = DefaultImporter(close_old_findings=False, **import_options)
_, _, len_new_findings, len_closed_findings, _, _, _ = importer.process_scan(many_findings_scan)
self.assertEqual(4, len_new_findings)
self.assertEqual(0, len_closed_findings)
# Import same test, should close no findings
- with open(f"{get_unit_tests_path()}/scans/acunetix/many_findings.xml", "r+") as many_findings_scan:
+ with open(f"{get_unit_tests_path()}/scans/acunetix/many_findings.xml", "r+", encoding="utf-8") as many_findings_scan:
importer = DefaultImporter(close_old_findings=True, **import_options)
_, _, len_new_findings, len_closed_findings, _, _, _ = importer.process_scan(many_findings_scan)
self.assertEqual(4, len_new_findings)
self.assertEqual(0, len_closed_findings)
# Import test with only one finding. Remaining findings should close
- with open(f"{get_unit_tests_path()}/scans/acunetix/one_finding.xml", "r+") as single_finding_scan:
+ with open(f"{get_unit_tests_path()}/scans/acunetix/one_finding.xml", "r+", encoding="utf-8") as single_finding_scan:
importer = DefaultImporter(close_old_findings=True, **import_options)
_, _, len_new_findings, len_closed_findings, _, _, _ = importer.process_scan(single_finding_scan)
self.assertEqual(1, len_new_findings)
@@ -95,19 +95,19 @@ def test_close_old_same_product_scan(self):
"scan_type": scan_type,
}
# Import first test
- with open(f"{get_unit_tests_path()}/scans/acunetix/many_findings.xml", "r+") as many_findings_scan:
+ with open(f"{get_unit_tests_path()}/scans/acunetix/many_findings.xml", "r+", encoding="utf-8") as many_findings_scan:
importer = DefaultImporter(engagement=engagement1, close_old_findings=False, **import_options)
_, _, len_new_findings, len_closed_findings, _, _, _ = importer.process_scan(many_findings_scan)
self.assertEqual(4, len_new_findings)
self.assertEqual(0, len_closed_findings)
# Import same test, should close no findings
- with open(f"{get_unit_tests_path()}/scans/acunetix/many_findings.xml", "r+") as many_findings_scan:
+ with open(f"{get_unit_tests_path()}/scans/acunetix/many_findings.xml", "r+", encoding="utf-8") as many_findings_scan:
importer = DefaultImporter(engagement=engagement2, close_old_findings=True, **import_options)
_, _, len_new_findings, len_closed_findings, _, _, _ = importer.process_scan(many_findings_scan)
self.assertEqual(4, len_new_findings)
self.assertEqual(0, len_closed_findings)
# Import test with only one finding. Remaining findings should close
- with open(f"{get_unit_tests_path()}/scans/acunetix/one_finding.xml", "r+") as single_finding_scan:
+ with open(f"{get_unit_tests_path()}/scans/acunetix/one_finding.xml", "r+", encoding="utf-8") as single_finding_scan:
importer = DefaultImporter(engagement=engagement3, close_old_findings=True, **import_options)
_, _, len_new_findings, len_closed_findings, _, _, _ = importer.process_scan(single_finding_scan)
self.assertEqual(1, len_new_findings)
diff --git a/unittests/test_importers_importer.py b/unittests/test_importers_importer.py
index 66c98055e58..41baf6d78e9 100644
--- a/unittests/test_importers_importer.py
+++ b/unittests/test_importers_importer.py
@@ -39,7 +39,7 @@
class TestDojoDefaultImporter(DojoTestCase):
def test_parse_findings(self):
- with open(get_unit_tests_path() + "/scans/acunetix/one_finding.xml") as scan:
+ with open(get_unit_tests_path() + "/scans/acunetix/one_finding.xml", encoding="utf-8") as scan:
scan_type = "Acunetix Scan"
user, _created = User.objects.get_or_create(username="admin")
product_type, _created = Product_Type.objects.get_or_create(name="test")
@@ -80,7 +80,7 @@ def test_parse_findings(self):
self.assertIn(finding.numerical_severity, ["S0", "S1", "S2", "S3", "S4"])
def test_import_scan(self):
- with open(get_unit_tests_path() + "/scans/sarif/spotbugs.sarif") as scan:
+ with open(get_unit_tests_path() + "/scans/sarif/spotbugs.sarif", encoding="utf-8") as scan:
scan_type = SarifParser().get_scan_types()[0] # SARIF format implement the new method
user, _ = User.objects.get_or_create(username="admin")
product_type, _ = Product_Type.objects.get_or_create(name="test2")
@@ -114,7 +114,7 @@ def test_import_scan(self):
self.assertEqual(0, len_closed_findings)
def test_import_scan_without_test_scan_type(self):
- with open(f"{get_unit_tests_path()}/scans/gitlab_sast/gl-sast-report-1-vuln_v15.json") as scan:
+ with open(f"{get_unit_tests_path()}/scans/gitlab_sast/gl-sast-report-1-vuln_v15.json", encoding="utf-8") as scan:
# GitLabSastParser implements get_tests but report has no scanner name
scan_type = GitlabSastParser().get_scan_types()[0]
user, _ = User.objects.get_or_create(username="admin")
diff --git a/unittests/test_jira_config_engagement.py b/unittests/test_jira_config_engagement.py
index c8697994980..6db30e089a3 100644
--- a/unittests/test_jira_config_engagement.py
+++ b/unittests/test_jira_config_engagement.py
@@ -126,10 +126,10 @@ def get_engagement_with_empty_jira_project_data(self, engagement):
}
def get_expected_redirect_engagement(self, engagement):
- return "/engagement/%i" % engagement.id
+ return f"/engagement/{engagement.id}"
def get_expected_redirect_edit_engagement(self, engagement):
- return "/engagement/edit/%i" % engagement.id
+ return f"/engagement/edit/{engagement.id}"
def add_engagement_jira(self, data, expect_redirect_to=None, expect_200=False):
response = self.client.get(reverse("new_eng_for_prod", args=(self.product_id, )))
diff --git a/unittests/test_jira_config_product.py b/unittests/test_jira_config_product.py
index 41c9ffdc96c..ff72f34993a 100644
--- a/unittests/test_jira_config_product.py
+++ b/unittests/test_jira_config_product.py
@@ -85,7 +85,7 @@ def test_add_jira_instance_unknown_host(self):
self.assertEqual(200, response.status_code)
content = response.content.decode("utf-8")
# debian throws 'Name or service not known' error and alpine 'Name does not resolve'
- self.assertTrue(("Name or service not known" in content) or ("Name does not resolve" in content))
+ self.assertTrue(("Name or service not known" in content) or ("Name does not resolve" in content), content)
# test raw connection error
with self.assertRaises(requests.exceptions.RequestException):
diff --git a/unittests/test_notifications.py b/unittests/test_notifications.py
index 02dcb31494b..7f5a2b76a4c 100644
--- a/unittests/test_notifications.py
+++ b/unittests/test_notifications.py
@@ -1,30 +1,47 @@
+import datetime
+import logging
from unittest.mock import patch
from auditlog.context import set_actor
+from crum import impersonate
from django.test import override_settings
from django.urls import reverse
from django.utils import timezone
from rest_framework.authtoken.models import Token
from rest_framework.test import APIClient, APITestCase
+import dojo.notifications.helper as notifications_helper
+from dojo import __version__ as dd_version
from dojo.models import (
DEFAULT_NOTIFICATION,
Alerts,
Dojo_User,
Endpoint,
Engagement,
+ Finding,
Finding_Group,
+ Notification_Webhooks,
Notifications,
Product,
Product_Type,
+ System_Settings,
Test,
Test_Type,
User,
+ get_current_datetime,
+)
+from dojo.notifications.helper import (
+ create_notification,
+ send_alert_notification,
+ send_webhooks_notification,
+ webhook_reactivation,
+ webhook_status_cleanup,
)
-from dojo.notifications.helper import create_notification, send_alert_notification
from .dojo_test_case import DojoTestCase
+logger = logging.getLogger(__name__)
+
class TestNotifications(DojoTestCase):
fixtures = ["dojo_testdata.json"]
@@ -386,3 +403,467 @@ def test_auditlog_on(self, mock):
prod_type = Product_Type.objects.create(name="notif prod type API")
self.client.delete(reverse("product_type-detail", args=(prod_type.pk,)), format="json")
self.assertEqual(mock.call_args_list[-1].kwargs["description"], 'The product type "notif prod type API" was deleted by admin')
+
+
+class TestNotificationWebhooks(DojoTestCase):
+ fixtures = ["dojo_testdata.json"]
+
+ def run(self, result=None):
+ testuser = User.objects.get(username="admin")
+ testuser.usercontactinfo.block_execution = True
+ testuser.save()
+
+ # unit tests are running without any user, which will result in actions like dedupe happening in the celery process
+ # this doesn't work in unittests as unittests are using an in memory sqlite database and celery can't see the data
+ # so we're running the test under the admin user context and set block_execution to True
+ with impersonate(testuser):
+ super().run(result)
+
+ def setUp(self):
+ self.sys_wh = Notification_Webhooks.objects.filter(owner=None).first()
+ self.url_base = "http://webhook.endpoint:8080"
+
+ def test_missing_system_webhook(self):
+ # test data contains 2 entries but we need to test missing definition
+ Notification_Webhooks.objects.all().delete()
+ with self.assertLogs("dojo.notifications.helper", level="INFO") as cm:
+ send_webhooks_notification(event="dummy")
+ self.assertIn("URLs for Webhooks not configured: skipping system notification", cm.output[0])
+
+ def test_missing_personal_webhook(self):
+ # test data contains 2 entries but we need to test missing definition
+ Notification_Webhooks.objects.all().delete()
+ with self.assertLogs("dojo.notifications.helper", level="INFO") as cm:
+ send_webhooks_notification(event="dummy", user=Dojo_User.objects.get(username="admin"))
+ self.assertIn("URLs for Webhooks not configured for user '(admin)': skipping user notification", cm.output[0])
+
+ def test_system_webhook_inactive(self):
+ self.sys_wh.status = Notification_Webhooks.Status.STATUS_INACTIVE_PERMANENT
+ self.sys_wh.save()
+ with self.assertLogs("dojo.notifications.helper", level="INFO") as cm:
+ send_webhooks_notification(event="dummy")
+ self.assertIn("URL for Webhook 'My webhook endpoint' is not active: Permanently inactive (inactive_permanent)", cm.output[0])
+
+ def test_system_webhook_sucessful(self):
+ with self.assertLogs("dojo.notifications.helper", level="DEBUG") as cm:
+ send_webhooks_notification(event="dummy")
+ self.assertIn("Message sent to endpoint 'My webhook endpoint' successfully.", cm.output[-1])
+
+ updated_wh = Notification_Webhooks.objects.filter(owner=None).first()
+ self.assertEqual(updated_wh.status, Notification_Webhooks.Status.STATUS_ACTIVE)
+ self.assertIsNone(updated_wh.first_error)
+ self.assertIsNone(updated_wh.last_error)
+
+ def test_system_webhook_4xx(self):
+ self.sys_wh.url = f"{self.url_base}/status/400"
+ self.sys_wh.save()
+
+ with self.assertLogs("dojo.notifications.helper", level="ERROR") as cm:
+ send_webhooks_notification(event="dummy", title="Dummy event")
+ self.assertIn("Error when sending message to Webhooks 'My webhook endpoint' (status: 400)", cm.output[-1])
+
+ updated_wh = Notification_Webhooks.objects.all().filter(owner=None).first()
+ self.assertEqual(updated_wh.status, Notification_Webhooks.Status.STATUS_INACTIVE_PERMANENT)
+ self.assertIsNotNone(updated_wh.first_error)
+ self.assertEqual(updated_wh.first_error, updated_wh.last_error)
+
+ def test_system_webhook_first_5xx(self):
+ self.sys_wh.url = f"{self.url_base}/status/500"
+ self.sys_wh.save()
+
+ with self.assertLogs("dojo.notifications.helper", level="ERROR") as cm:
+ send_webhooks_notification(event="dummy", title="Dummy event")
+
+ updated_wh = Notification_Webhooks.objects.filter(owner=None).first()
+ self.assertEqual(updated_wh.status, Notification_Webhooks.Status.STATUS_INACTIVE_TMP)
+ self.assertIsNotNone(updated_wh.first_error)
+ self.assertEqual(updated_wh.first_error, updated_wh.last_error)
+ self.assertEqual("Response status code: 500", updated_wh.note)
+ self.assertIn("Error when sending message to Webhooks 'My webhook endpoint' (status: 500)", cm.output[-1])
+
+ def test_system_webhook_second_5xx_within_one_day(self):
+ ten_mins_ago = get_current_datetime() - datetime.timedelta(minutes=10)
+ self.sys_wh.url = f"{self.url_base}/status/500"
+ self.sys_wh.status = Notification_Webhooks.Status.STATUS_ACTIVE_TMP
+ self.sys_wh.first_error = ten_mins_ago
+ self.sys_wh.last_error = ten_mins_ago
+ self.sys_wh.save()
+
+ with self.assertLogs("dojo.notifications.helper", level="ERROR") as cm:
+ send_webhooks_notification(event="dummy", title="Dummy event")
+
+ updated_wh = Notification_Webhooks.objects.filter(owner=None).first()
+ self.assertEqual(updated_wh.status, Notification_Webhooks.Status.STATUS_INACTIVE_TMP)
+ self.assertEqual(updated_wh.first_error, ten_mins_ago)
+ self.assertGreater(updated_wh.last_error, ten_mins_ago)
+ self.assertEqual("Response status code: 500", updated_wh.note)
+ self.assertIn("Error when sending message to Webhooks 'My webhook endpoint' (status: 500)", cm.output[-1])
+
+ def test_system_webhook_third_5xx_after_more_then_day(self):
+ now = get_current_datetime()
+ day_ago = now - datetime.timedelta(hours=24, minutes=10)
+ ten_minutes_ago = now - datetime.timedelta(minutes=10)
+ self.sys_wh.url = f"{self.url_base}/status/500"
+ self.sys_wh.status = Notification_Webhooks.Status.STATUS_ACTIVE_TMP
+ self.sys_wh.first_error = day_ago
+ self.sys_wh.last_error = ten_minutes_ago
+ self.sys_wh.save()
+
+ with self.assertLogs("dojo.notifications.helper", level="ERROR") as cm:
+ send_webhooks_notification(event="dummy", title="Dummy event")
+
+ updated_wh = Notification_Webhooks.objects.filter(owner=None).first()
+ self.assertEqual(updated_wh.status, Notification_Webhooks.Status.STATUS_INACTIVE_PERMANENT)
+ self.assertEqual(updated_wh.first_error, day_ago)
+ self.assertGreater(updated_wh.last_error, ten_minutes_ago)
+ self.assertEqual("Response status code: 500", updated_wh.note)
+ self.assertIn("Error when sending message to Webhooks 'My webhook endpoint' (status: 500)", cm.output[-1])
+
+ def test_webhook_reactivation(self):
+ with self.subTest("active"):
+ wh = Notification_Webhooks.objects.filter(owner=None).first()
+ webhook_reactivation(endpoint_id=wh.pk)
+
+ updated_wh = Notification_Webhooks.objects.filter(owner=None).first()
+ self.assertEqual(updated_wh.status, Notification_Webhooks.Status.STATUS_ACTIVE)
+ self.assertIsNone(updated_wh.first_error)
+ self.assertIsNone(updated_wh.last_error)
+ self.assertIsNone(updated_wh.note)
+
+ with self.subTest("inactive"):
+ now = get_current_datetime()
+ wh = Notification_Webhooks.objects.filter(owner=None).first()
+ wh.status = Notification_Webhooks.Status.STATUS_INACTIVE_TMP
+ wh.first_error = now
+ wh.last_error = now
+ wh.note = "Response status code: 418"
+ wh.save()
+
+ with self.assertLogs("dojo.notifications.helper", level="DEBUG") as cm:
+ webhook_reactivation(endpoint_id=wh.pk)
+
+ updated_wh = Notification_Webhooks.objects.filter(owner=None).first()
+ self.assertEqual(updated_wh.status, Notification_Webhooks.Status.STATUS_ACTIVE_TMP)
+ self.assertIsNotNone(updated_wh.first_error)
+ self.assertEqual(updated_wh.first_error, updated_wh.last_error)
+ self.assertEqual(updated_wh.note, "Response status code: 418")
+ self.assertIn("Webhook endpoint 'My webhook endpoint' reactivated to 'active_tmp'", cm.output[-1])
+
+ def test_webhook_status_cleanup(self):
+ with self.subTest("active"):
+ webhook_status_cleanup()
+
+ updated_wh = Notification_Webhooks.objects.filter(owner=None).first()
+ self.assertEqual(updated_wh.status, Notification_Webhooks.Status.STATUS_ACTIVE)
+ self.assertIsNone(updated_wh.first_error)
+ self.assertIsNone(updated_wh.last_error)
+ self.assertIsNone(updated_wh.note)
+
+ with self.subTest("active_tmp_new"):
+ now = get_current_datetime()
+ wh = Notification_Webhooks.objects.filter(owner=None).first()
+ wh.status = Notification_Webhooks.Status.STATUS_ACTIVE_TMP
+ wh.first_error = now
+ wh.last_error = now
+ wh.note = "Response status code: 503"
+ wh.save()
+
+ webhook_status_cleanup()
+
+ updated_wh = Notification_Webhooks.objects.filter(owner=None).first()
+ self.assertEqual(updated_wh.status, Notification_Webhooks.Status.STATUS_ACTIVE_TMP)
+ self.assertIsNotNone(updated_wh.first_error)
+ self.assertEqual(updated_wh.first_error, updated_wh.last_error)
+ self.assertEqual(updated_wh.note, "Response status code: 503")
+
+ with self.subTest("active_tmp_old"):
+ day_ago = get_current_datetime() - datetime.timedelta(hours=24, minutes=10)
+ wh = Notification_Webhooks.objects.filter(owner=None).first()
+ wh.status = Notification_Webhooks.Status.STATUS_ACTIVE_TMP
+ wh.first_error = day_ago
+ wh.last_error = day_ago
+ wh.note = "Response status code: 503"
+ wh.save()
+
+ with self.assertLogs("dojo.notifications.helper", level="DEBUG") as cm:
+ webhook_status_cleanup()
+
+ updated_wh = Notification_Webhooks.objects.filter(owner=None).first()
+ self.assertEqual(updated_wh.status, Notification_Webhooks.Status.STATUS_ACTIVE)
+ self.assertIsNone(updated_wh.first_error)
+ self.assertIsNone(updated_wh.last_error)
+ self.assertEqual(updated_wh.note, "Reactivation from active_tmp")
+ self.assertIn("Webhook endpoint 'My webhook endpoint' reactivated from 'active_tmp' to 'active'", cm.output[-1])
+
+ with self.subTest("inactive_tmp_new"):
+ minute_ago = get_current_datetime() - datetime.timedelta(minutes=1)
+ wh = Notification_Webhooks.objects.filter(owner=None).first()
+ wh.status = Notification_Webhooks.Status.STATUS_INACTIVE_TMP
+ wh.first_error = minute_ago
+ wh.last_error = minute_ago
+ wh.note = "Response status code: 503"
+ wh.save()
+
+ webhook_status_cleanup()
+
+ updated_wh = Notification_Webhooks.objects.filter(owner=None).first()
+ self.assertEqual(updated_wh.status, Notification_Webhooks.Status.STATUS_INACTIVE_TMP)
+ self.assertEqual(updated_wh.first_error, minute_ago)
+ self.assertEqual(updated_wh.last_error, minute_ago)
+ self.assertEqual(updated_wh.note, "Response status code: 503")
+
+ with self.subTest("inactive_tmp_old"):
+ ten_minutes_ago = get_current_datetime() - datetime.timedelta(minutes=10)
+ wh = Notification_Webhooks.objects.filter(owner=None).first()
+ wh.status = Notification_Webhooks.Status.STATUS_INACTIVE_TMP
+ wh.first_error = ten_minutes_ago
+ wh.last_error = ten_minutes_ago
+ wh.note = "Response status code: 503"
+ wh.save()
+
+ with self.assertLogs("dojo.notifications.helper", level="DEBUG") as cm:
+ webhook_status_cleanup()
+
+ updated_wh = Notification_Webhooks.objects.filter(owner=None).first()
+ self.assertEqual(updated_wh.status, Notification_Webhooks.Status.STATUS_ACTIVE_TMP)
+ self.assertEqual(updated_wh.first_error, ten_minutes_ago)
+ self.assertEqual(updated_wh.last_error, ten_minutes_ago)
+ self.assertEqual(updated_wh.note, "Response status code: 503")
+ self.assertIn("Webhook endpoint 'My webhook endpoint' reactivated to 'active_tmp'", cm.output[-1])
+
+ def test_system_webhook_timeout(self):
+ self.sys_wh.url = f"{self.url_base}/delay/3"
+ self.sys_wh.save()
+
+ system_settings = System_Settings.objects.get()
+ system_settings.webhooks_notifications_timeout = 1
+ system_settings.save()
+
+ with self.assertLogs("dojo.notifications.helper", level="ERROR") as cm:
+ send_webhooks_notification(event="dummy", title="Dummy event")
+
+ updated_wh = Notification_Webhooks.objects.filter(owner=None).first()
+ self.assertEqual(updated_wh.status, Notification_Webhooks.Status.STATUS_INACTIVE_TMP)
+ self.assertIsNotNone(updated_wh.first_error)
+ self.assertEqual(updated_wh.first_error, updated_wh.last_error)
+ self.assertIn("HTTPConnectionPool(host='webhook.endpoint', port=8080): Read timed out.", updated_wh.note)
+ self.assertIn("Timeout when sending message to Webhook 'My webhook endpoint'", cm.output[-1])
+
+ def test_system_webhook_wrong_fqdn(self):
+
+ self.sys_wh.url = "http://non.existing.place"
+ self.sys_wh.save()
+
+ with self.assertLogs("dojo.notifications.helper", level="ERROR") as cm:
+ send_webhooks_notification(event="dummy", title="Dummy event")
+
+ updated_wh = Notification_Webhooks.objects.filter(owner=None).first()
+ self.assertEqual(updated_wh.status, Notification_Webhooks.Status.STATUS_INACTIVE_PERMANENT)
+ self.assertIsNotNone(updated_wh.first_error)
+ self.assertEqual(updated_wh.first_error, updated_wh.last_error)
+ self.assertIn("HTTPConnectionPool(host='non.existing.place', port=80): Max retries exceeded with url: /", updated_wh.note)
+ self.assertIn("HTTPConnectionPool(host='non.existing.place', port=80): Max retries exceeded with url: /", cm.output[-1])
+
+ @patch("requests.request", **{"return_value.status_code": 200})
+ def test_headers(self, mock):
+ Product_Type.objects.create(name="notif prod type")
+ self.assertEqual(mock.call_args.kwargs["headers"], {
+ "User-Agent": f"DefectDojo-{dd_version}",
+ "X-DefectDojo-Event": "product_type_added",
+ "X-DefectDojo-Instance": "http://localhost:8080",
+ "Accept": "application/json",
+ "Auth": "Token xxx",
+ })
+
+ @patch("requests.request", **{"return_value.status_code": 200})
+ def test_events_messages(self, mock):
+ with self.subTest("product_type_added"):
+ prod_type = Product_Type.objects.create(name="notif prod type")
+ self.assertEqual(mock.call_args.kwargs["headers"]["X-DefectDojo-Event"], "product_type_added")
+ self.assertEqual(mock.call_args.kwargs["json"], {
+ "description": None,
+ "user": None,
+ "url_api": f"http://localhost:8080/api/v2/product_types/{prod_type.pk}/",
+ "url_ui": f"http://localhost:8080/product/type/{prod_type.pk}",
+ "product_type": {
+ "id": prod_type.pk,
+ "name": "notif prod type",
+ "url_api": f"http://localhost:8080/api/v2/product_types/{prod_type.pk}/",
+ "url_ui": f"http://localhost:8080/product/type/{prod_type.pk}",
+ },
+ })
+
+ with self.subTest("product_added"):
+ prod = Product.objects.create(name="notif prod", prod_type=prod_type)
+ self.assertEqual(mock.call_args.kwargs["headers"]["X-DefectDojo-Event"], "product_added")
+ self.assertEqual(mock.call_args.kwargs["json"], {
+ "description": None,
+ "user": None,
+ "url_api": f"http://localhost:8080/api/v2/products/{prod.pk}/",
+ "url_ui": f"http://localhost:8080/product/{prod.pk}",
+ "product_type": {
+ "id": prod_type.pk,
+ "name": "notif prod type",
+ "url_api": f"http://localhost:8080/api/v2/product_types/{prod_type.pk}/",
+ "url_ui": f"http://localhost:8080/product/type/{prod_type.pk}",
+ },
+ "product": {
+ "id": prod.pk,
+ "name": "notif prod",
+ "url_api": f"http://localhost:8080/api/v2/products/{prod.pk}/",
+ "url_ui": f"http://localhost:8080/product/{prod.pk}",
+ },
+ })
+
+ with self.subTest("engagement_added"):
+ eng = Engagement.objects.create(name="notif eng", product=prod, target_start=timezone.now(), target_end=timezone.now())
+ self.assertEqual(mock.call_args.kwargs["headers"]["X-DefectDojo-Event"], "engagement_added")
+ self.assertEqual(mock.call_args.kwargs["json"], {
+ "description": None,
+ "user": None,
+ "url_api": f"http://localhost:8080/api/v2/engagements/{eng.pk}/",
+ "url_ui": f"http://localhost:8080/engagement/{eng.pk}",
+ "product_type": {
+ "id": prod_type.pk,
+ "name": "notif prod type",
+ "url_api": f"http://localhost:8080/api/v2/product_types/{prod_type.pk}/",
+ "url_ui": f"http://localhost:8080/product/type/{prod_type.pk}",
+ },
+ "product": {
+ "id": prod.pk,
+ "name": "notif prod",
+ "url_api": f"http://localhost:8080/api/v2/products/{prod.pk}/",
+ "url_ui": f"http://localhost:8080/product/{prod.pk}",
+ },
+ "engagement": {
+ "id": eng.pk,
+ "name": "notif eng",
+ "url_api": f"http://localhost:8080/api/v2/engagements/{eng.pk}/",
+ "url_ui": f"http://localhost:8080/engagement/{eng.pk}",
+ },
+ })
+
+ with self.subTest("test_added"):
+ test = Test.objects.create(title="notif test", engagement=eng, target_start=timezone.now(), target_end=timezone.now(), test_type_id=Test_Type.objects.first().id)
+ notifications_helper.notify_test_created(test)
+ self.assertEqual(mock.call_args.kwargs["headers"]["X-DefectDojo-Event"], "test_added")
+ self.assertEqual(mock.call_args.kwargs["json"], {
+ "description": None,
+ "user": None,
+ "url_api": f"http://localhost:8080/api/v2/tests/{test.pk}/",
+ "url_ui": f"http://localhost:8080/test/{test.pk}",
+ "product_type": {
+ "id": prod_type.pk,
+ "name": "notif prod type",
+ "url_api": f"http://localhost:8080/api/v2/product_types/{prod_type.pk}/",
+ "url_ui": f"http://localhost:8080/product/type/{prod_type.pk}",
+ },
+ "product": {
+ "id": prod.pk,
+ "name": "notif prod",
+ "url_api": f"http://localhost:8080/api/v2/products/{prod.pk}/",
+ "url_ui": f"http://localhost:8080/product/{prod.pk}",
+ },
+ "engagement": {
+ "id": eng.pk,
+ "name": "notif eng",
+ "url_api": f"http://localhost:8080/api/v2/engagements/{eng.pk}/",
+ "url_ui": f"http://localhost:8080/engagement/{eng.pk}",
+ },
+ "test": {
+ "id": test.pk,
+ "title": "notif test",
+ "url_api": f"http://localhost:8080/api/v2/tests/{test.pk}/",
+ "url_ui": f"http://localhost:8080/test/{test.pk}",
+ },
+ })
+
+ with self.subTest("scan_added_empty"):
+ notifications_helper.notify_scan_added(test, updated_count=0)
+ self.assertEqual(mock.call_args.kwargs["headers"]["X-DefectDojo-Event"], "scan_added_empty")
+ self.assertEqual(mock.call_args.kwargs["json"], {
+ "description": None,
+ "user": None,
+ "url_api": f"http://localhost:8080/api/v2/tests/{test.pk}/",
+ "url_ui": f"http://localhost:8080/test/{test.pk}",
+ "product_type": {
+ "id": prod_type.pk,
+ "name": "notif prod type",
+ "url_api": f"http://localhost:8080/api/v2/product_types/{prod_type.pk}/",
+ "url_ui": f"http://localhost:8080/product/type/{prod_type.pk}",
+ },
+ "product": {
+ "id": prod.pk,
+ "name": "notif prod",
+ "url_api": f"http://localhost:8080/api/v2/products/{prod.pk}/",
+ "url_ui": f"http://localhost:8080/product/{prod.pk}",
+ },
+ "engagement": {
+ "id": eng.pk,
+ "name": "notif eng",
+ "url_api": f"http://localhost:8080/api/v2/engagements/{eng.pk}/",
+ "url_ui": f"http://localhost:8080/engagement/{eng.pk}",
+ },
+ "test": {
+ "id": test.pk,
+ "title": "notif test",
+ "url_api": f"http://localhost:8080/api/v2/tests/{test.pk}/",
+ "url_ui": f"http://localhost:8080/test/{test.pk}",
+ },
+ "finding_count": 0,
+ "findings": {
+ "mitigated": [],
+ "new": [],
+ "reactivated": [],
+ "untouched": [],
+ },
+ })
+
+ with self.subTest("scan_added"):
+ notifications_helper.notify_scan_added(test,
+ updated_count=4,
+ new_findings=[
+ Finding.objects.create(test=test, title="New Finding", severity="Critical"),
+ ],
+ findings_mitigated=[
+ Finding.objects.create(test=test, title="Mitigated Finding", severity="Medium"),
+ ],
+ findings_reactivated=[
+ Finding.objects.create(test=test, title="Reactivated Finding", severity="Low"),
+ ],
+ findings_untouched=[
+ Finding.objects.create(test=test, title="Untouched Finding", severity="Info"),
+ ],
+ )
+ self.assertEqual(mock.call_args.kwargs["headers"]["X-DefectDojo-Event"], "scan_added")
+ self.maxDiff = None
+ self.assertEqual(mock.call_args.kwargs["json"]["findings"], {
+ "new": [{
+ "id": 232,
+ "title": "New Finding",
+ "severity": "Critical",
+ "url_api": "http://localhost:8080/api/v2/findings/232/",
+ "url_ui": "http://localhost:8080/finding/232",
+ }],
+ "mitigated": [{
+ "id": 233,
+ "title": "Mitigated Finding",
+ "severity": "Medium",
+ "url_api": "http://localhost:8080/api/v2/findings/233/",
+ "url_ui": "http://localhost:8080/finding/233",
+ }],
+ "reactivated": [{
+ "id": 234,
+ "title": "Reactivated Finding",
+ "severity": "Low",
+ "url_api": "http://localhost:8080/api/v2/findings/234/",
+ "url_ui": "http://localhost:8080/finding/234",
+ }],
+ "untouched": [{
+ "id": 235,
+ "title": "Untouched Finding",
+ "severity": "Info",
+ "url_api": "http://localhost:8080/api/v2/findings/235/",
+ "url_ui": "http://localhost:8080/finding/235",
+ }],
+ })
diff --git a/unittests/test_parsers.py b/unittests/test_parsers.py
index 02346487fbb..63edff395c6 100644
--- a/unittests/test_parsers.py
+++ b/unittests/test_parsers.py
@@ -1,5 +1,4 @@
import os
-import re
from pathlib import Path
from .dojo_test_case import DojoTestCase, get_unit_tests_path
@@ -32,18 +31,18 @@ def test_file_existence(self):
f"Documentation file '{doc_file}' is missing or using different name",
)
- content = Path(doc_file).read_text()
- self.assertTrue(re.search("title:", content),
+ content = Path(doc_file).read_text(encoding="utf-8")
+ self.assertRegex(content, "title:",
f"Documentation file '{doc_file}' does not contain a title",
)
- self.assertTrue(re.search("toc_hide: true", content),
+ self.assertRegex(content, "toc_hide: true",
f"Documentation file '{doc_file}' does not contain toc_hide: true",
)
if category == "file":
- self.assertTrue(re.search("### Sample Scan Data", content),
+ self.assertRegex(content, "### Sample Scan Data",
f"Documentation file '{doc_file}' does not contain ### Sample Scan Data",
)
- self.assertTrue(re.search("https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans", content),
+ self.assertRegex(content, "https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans",
f"Documentation file '{doc_file}' does not contain https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans",
)
@@ -83,7 +82,7 @@ def test_file_existence(self):
if file.is_file() and file.name != "__pycache__" and file.name != "__init__.py":
f = os.path.join(basedir, "dojo", "tools", parser_dir.name, file.name)
read_true = False
- with open(f) as f:
+ with open(f, encoding="utf-8") as f:
i = 0
for line in f:
if read_true is True:
diff --git a/unittests/test_rest_framework.py b/unittests/test_rest_framework.py
index 78bcb9286cb..e4c4ef361e6 100644
--- a/unittests/test_rest_framework.py
+++ b/unittests/test_rest_framework.py
@@ -53,6 +53,7 @@
NotesViewSet,
NoteTypeViewSet,
NotificationsViewSet,
+ NotificationWebhooksViewSet,
ProductAPIScanConfigurationViewSet,
ProductGroupViewSet,
ProductMemberViewSet,
@@ -107,6 +108,7 @@
Languages,
Note_Type,
Notes,
+ Notification_Webhooks,
Notifications,
Product,
Product_API_Scan_Configuration,
@@ -1111,7 +1113,7 @@ def test_request_response_post_and_download(self):
# Test the creation
for level in self.url_levels.keys():
length = FileUpload.objects.count()
- with open(f"{str(self.path)}/scans/acunetix/one_finding.xml") as testfile:
+ with open(f"{str(self.path)}/scans/acunetix/one_finding.xml", encoding="utf-8") as testfile:
payload = {
"title": level,
"file": testfile,
@@ -1123,7 +1125,7 @@ def test_request_response_post_and_download(self):
self.url_levels[level] = response.data.get("id")
# Test the download
- file_data = Path(f"{str(self.path)}/scans/acunetix/one_finding.xml").read_text()
+ file_data = Path(f"{str(self.path)}/scans/acunetix/one_finding.xml").read_text(encoding="utf-8")
for level, file_id in self.url_levels.items():
response = self.client.get(f"/api/v2/{level}/files/download/{file_id}/")
self.assertEqual(200, response.status_code)
@@ -1774,7 +1776,7 @@ def __init__(self, *args, **kwargs):
self.viewname = "importscan"
self.viewset = ImportScanView
- testfile = open("tests/zap_sample.xml")
+ testfile = open("tests/zap_sample.xml", encoding="utf-8")
self.payload = {
"minimum_severity": "Low",
"active": False,
@@ -1801,7 +1803,7 @@ def test_create_not_authorized_product_name_engagement_name(self, mock, importer
importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE
reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE
- with open("tests/zap_sample.xml") as testfile:
+ with open("tests/zap_sample.xml", encoding="utf-8") as testfile:
payload = {
"minimum_severity": "Low",
"active": False,
@@ -1831,7 +1833,7 @@ def test_create_not_authorized_product_name_engagement_name_auto_create_engageme
importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE
reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE
- with open("tests/zap_sample.xml") as testfile:
+ with open("tests/zap_sample.xml", encoding="utf-8") as testfile:
payload = {
"minimum_severity": "Low",
"active": False,
@@ -1862,7 +1864,7 @@ def test_create_not_authorized_product_name_engagement_name_auto_create_product(
importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE
reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE
- with open("tests/zap_sample.xml") as testfile:
+ with open("tests/zap_sample.xml", encoding="utf-8") as testfile:
payload = {
"minimum_severity": "Low",
"active": False,
@@ -1894,7 +1896,7 @@ def test_create_not_authorized_product_name_engagement_name_auto_create_product_
importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE
reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE
- with open("tests/zap_sample.xml") as testfile:
+ with open("tests/zap_sample.xml", encoding="utf-8") as testfile:
payload = {
"minimum_severity": "Low",
"active": False,
@@ -1928,7 +1930,7 @@ def test_create_authorized_product_name_engagement_name_auto_create_engagement(s
importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE
reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE
- with open("tests/zap_sample.xml") as testfile:
+ with open("tests/zap_sample.xml", encoding="utf-8") as testfile:
payload = {
"minimum_severity": "Low",
"active": False,
@@ -1963,7 +1965,7 @@ def test_create_authorized_product_name_engagement_name_auto_create_product(self
mock.return_value = True
importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE
reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE
- with open("tests/zap_sample.xml") as testfile:
+ with open("tests/zap_sample.xml", encoding="utf-8") as testfile:
payload = {
"minimum_severity": "Low",
"active": False,
@@ -1995,7 +1997,7 @@ def test_create_authorized_product_name_engagement_name_auto_create_product_type
importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE
reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE
- with open("tests/zap_sample.xml") as testfile:
+ with open("tests/zap_sample.xml", encoding="utf-8") as testfile:
payload = {
"minimum_severity": "Low",
"active": False,
@@ -2037,7 +2039,7 @@ def test_reimport_zap_xml(self, importer_mock, reimporter_mock):
importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE
reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE
- with open("tests/zap_sample.xml") as testfile:
+ with open("tests/zap_sample.xml", encoding="utf-8") as testfile:
length = Test.objects.all().count()
response = self.client.post(
reverse("reimportscan-list"), {
@@ -2063,7 +2065,7 @@ def test_create_not_authorized_product_name_engagement_name(self, mock, importer
importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE
reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE
- with open("tests/zap_sample.xml") as testfile:
+ with open("tests/zap_sample.xml", encoding="utf-8") as testfile:
payload = {
"minimum_severity": "Low",
"active": False,
@@ -2093,7 +2095,7 @@ def test_create_authorized_product_name_engagement_name_scan_type_title_auto_cre
importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE
reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE
- with open("tests/zap_sample.xml") as testfile:
+ with open("tests/zap_sample.xml", encoding="utf-8") as testfile:
payload = {
"minimum_severity": "Low",
"active": False,
@@ -2126,7 +2128,7 @@ def test_create_authorized_product_name_engagement_name_auto_create_engagement(s
importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE
reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE
- with open("tests/zap_sample.xml") as testfile:
+ with open("tests/zap_sample.xml", encoding="utf-8") as testfile:
payload = {
"minimum_severity": "Low",
"active": False,
@@ -2162,7 +2164,7 @@ def test_create_authorized_product_name_engagement_name_auto_create_product(self
importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE
reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE
- with open("tests/zap_sample.xml") as testfile:
+ with open("tests/zap_sample.xml", encoding="utf-8") as testfile:
payload = {
"minimum_severity": "Low",
"active": False,
@@ -2194,7 +2196,7 @@ def test_create_authorized_product_name_engagement_name_auto_create_product_type
importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE
reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE
- with open("tests/zap_sample.xml") as testfile:
+ with open("tests/zap_sample.xml", encoding="utf-8") as testfile:
payload = {
"minimum_severity": "Low",
"active": False,
@@ -2225,7 +2227,7 @@ def test_create_not_authorized_test_id(self, mock, importer_mock, reimporter_moc
importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE
reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE
- with open("tests/zap_sample.xml") as testfile:
+ with open("tests/zap_sample.xml", encoding="utf-8") as testfile:
payload = {
"minimum_severity": "Low",
"active": True,
@@ -2253,7 +2255,7 @@ def test_create_not_authorized_product_name_engagement_name_auto_create_engageme
importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE
reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE
- with open("tests/zap_sample.xml") as testfile:
+ with open("tests/zap_sample.xml", encoding="utf-8") as testfile:
payload = {
"minimum_severity": "Low",
"active": False,
@@ -2284,7 +2286,7 @@ def test_create_not_authorized_product_name_engagement_name_auto_create_product(
importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE
reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE
- with open("tests/zap_sample.xml") as testfile:
+ with open("tests/zap_sample.xml", encoding="utf-8") as testfile:
payload = {
"minimum_severity": "Low",
"active": False,
@@ -2316,7 +2318,7 @@ def test_create_not_authorized_product_name_engagement_name_auto_create_product_
importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE
reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE
- with open("tests/zap_sample.xml") as testfile:
+ with open("tests/zap_sample.xml", encoding="utf-8") as testfile:
payload = {
"minimum_severity": "Low",
"active": False,
@@ -2347,7 +2349,7 @@ def test_create_not_authorized_product_name_engagement_name_scan_type(self, mock
importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE
reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE
- with open("tests/zap_sample.xml") as testfile:
+ with open("tests/zap_sample.xml", encoding="utf-8") as testfile:
payload = {
"minimum_severity": "Low",
"active": False,
@@ -2375,7 +2377,7 @@ def test_create_not_authorized_product_name_engagement_name_scan_type_title(self
importer_mock.return_value = IMPORTER_MOCK_RETURN_VALUE
reimporter_mock.return_value = REIMPORTER_MOCK_RETURN_VALUE
- with open("tests/zap_sample.xml") as testfile:
+ with open("tests/zap_sample.xml", encoding="utf-8") as testfile:
payload = {
"minimum_severity": "Low",
"active": False,
@@ -2709,7 +2711,7 @@ def __init__(self, *args, **kwargs):
self.viewset = ImportLanguagesView
self.payload = {
"product": 1,
- "file": open("unittests/files/defectdojo_cloc.json"),
+ "file": open("unittests/files/defectdojo_cloc.json", encoding="utf-8"),
}
self.test_type = TestType.OBJECT_PERMISSIONS
self.permission_check_class = Languages
@@ -2997,3 +2999,24 @@ def __init__(self, *args, **kwargs):
def test_create(self):
self.skipTest("Only one Announcement can exists")
+
+
+class NotificationWebhooksTest(BaseClass.BaseClassTest):
+ fixtures = ["dojo_testdata.json"]
+
+ def __init__(self, *args, **kwargs):
+ self.endpoint_model = Notification_Webhooks
+ self.endpoint_path = "notification_webhooks"
+ self.viewname = "notification_webhooks"
+ self.viewset = NotificationWebhooksViewSet
+ self.payload = {
+ "name": "My endpoint",
+ "url": "http://webhook.endpoint:8080/post",
+ }
+ self.update_fields = {
+ "header_name": "Auth",
+ "header_value": "token x",
+ }
+ self.test_type = TestType.STANDARD
+ self.deleted_objects = 1
+ BaseClass.RESTEndpointTest.__init__(self, *args, **kwargs)
diff --git a/unittests/test_tags.py b/unittests/test_tags.py
index 3330d42f0ba..a1e1aa20cc1 100644
--- a/unittests/test_tags.py
+++ b/unittests/test_tags.py
@@ -363,7 +363,7 @@ def test_remove_tag_from_product_then_add_tag_to_product(self):
self.assertEqual(product_tags_post_removal, self._convert_instance_tags_to_list(objects.get("test")))
self.assertEqual(product_tags_post_removal, self._convert_instance_tags_to_list(objects.get("finding")))
# Add a tag from the product
- self.product.tags.add("more", "tags" "!")
+ self.product.tags.add("more", "tags!")
# This triggers an async function with celery that will fail, so run it manually here
propagate_tags_on_product_sync(self.product)
# Save the tags post removal
diff --git a/unittests/test_utils.py b/unittests/test_utils.py
index 320b5257173..4bed9f7369f 100644
--- a/unittests/test_utils.py
+++ b/unittests/test_utils.py
@@ -197,9 +197,7 @@ def __exit__(self, exc_type, exc_value, exc_traceback):
created_count = self.final_model_count - self.initial_model_count
self.test_case.assertEqual(
created_count, self.num,
- "%i %s objects created, %i expected. query: %s, first 100 objects: %s" % (
- created_count, self.queryset.model, self.num, self.queryset.query, self.queryset.all().order_by("-id")[:100],
- ),
+ f"{created_count} {self.queryset.model} objects created, {self.num} expected. query: {self.queryset.query}, first 100 objects: {self.queryset.all().order_by('-id')[:100]}",
)
diff --git a/unittests/tools/test_acunetix_parser.py b/unittests/tools/test_acunetix_parser.py
index 21f83af5273..47969cdeeab 100644
--- a/unittests/tools/test_acunetix_parser.py
+++ b/unittests/tools/test_acunetix_parser.py
@@ -9,7 +9,7 @@
class TestAcunetixParser(DojoTestCase):
def test_parse_file_with_one_finding(self):
- with open("unittests/scans/acunetix/one_finding.xml") as testfile:
+ with open("unittests/scans/acunetix/one_finding.xml", encoding="utf-8") as testfile:
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -37,7 +37,7 @@ def test_parse_file_with_one_finding(self):
self.assertEqual("some/path", endpoint.path)
def test_parse_file_with_multiple_finding(self):
- with open("unittests/scans/acunetix/many_findings.xml") as testfile:
+ with open("unittests/scans/acunetix/many_findings.xml", encoding="utf-8") as testfile:
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -132,7 +132,7 @@ def test_parse_file_with_multiple_finding(self):
self.assertIsInstance(req_resp["resp"], str)
def test_parse_file_with_example_com(self):
- with open("unittests/scans/acunetix/XML_http_example_co_id_.xml") as testfile:
+ with open("unittests/scans/acunetix/XML_http_example_co_id_.xml", encoding="utf-8") as testfile:
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -204,7 +204,7 @@ def test_parse_file_with_example_com(self):
self.assertIsInstance(req_resp["resp"], str)
def test_parse_file_with_one_finding_acunetix360(self):
- with open("unittests/scans/acunetix/acunetix360_one_finding.json") as testfile:
+ with open("unittests/scans/acunetix/acunetix360_one_finding.json", encoding="utf-8") as testfile:
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -225,7 +225,7 @@ def test_parse_file_with_one_finding_acunetix360(self):
self.assertIn("https://online.acunetix360.com/issues/detail/735f4503-e9eb-4b4c-4306-ad49020a4c4b", finding.references)
def test_parse_file_with_one_finding_false_positive(self):
- with open("unittests/scans/acunetix/acunetix360_one_finding_false_positive.json") as testfile:
+ with open("unittests/scans/acunetix/acunetix360_one_finding_false_positive.json", encoding="utf-8") as testfile:
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -245,7 +245,7 @@ def test_parse_file_with_one_finding_false_positive(self):
self.assertTrue(finding.false_p)
def test_parse_file_with_one_finding_risk_accepted(self):
- with open("unittests/scans/acunetix/acunetix360_one_finding_accepted_risk.json") as testfile:
+ with open("unittests/scans/acunetix/acunetix360_one_finding_accepted_risk.json", encoding="utf-8") as testfile:
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -265,7 +265,7 @@ def test_parse_file_with_one_finding_risk_accepted(self):
self.assertTrue(finding.risk_accepted)
def test_parse_file_with_multiple_finding_acunetix360(self):
- with open("unittests/scans/acunetix/acunetix360_many_findings.json") as testfile:
+ with open("unittests/scans/acunetix/acunetix360_many_findings.json", encoding="utf-8") as testfile:
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(16, len(findings))
@@ -306,7 +306,7 @@ def test_parse_file_with_multiple_finding_acunetix360(self):
self.assertEqual(str(endpoint), "http://php.testsparker.com")
def test_parse_file_with_mulitple_cwe(self):
- with open("unittests/scans/acunetix/acunetix360_multiple_cwe.json") as testfile:
+ with open("unittests/scans/acunetix/acunetix360_multiple_cwe.json", encoding="utf-8") as testfile:
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -325,13 +325,13 @@ def test_parse_file_with_mulitple_cwe(self):
self.assertEqual(str(endpoint), "http://php.testsparker.com/auth/login.php")
def test_parse_file_issue_10370(self):
- with open("unittests/scans/acunetix/issue_10370.json") as testfile:
+ with open("unittests/scans/acunetix/issue_10370.json", encoding="utf-8") as testfile:
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
def test_parse_file_issue_10435(self):
- with open("unittests/scans/acunetix/issue_10435.json") as testfile:
+ with open("unittests/scans/acunetix/issue_10435.json", encoding="utf-8") as testfile:
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
diff --git a/unittests/tools/test_anchore_engine_parser.py b/unittests/tools/test_anchore_engine_parser.py
index 8a94ee27e3e..60a4e511f30 100644
--- a/unittests/tools/test_anchore_engine_parser.py
+++ b/unittests/tools/test_anchore_engine_parser.py
@@ -5,25 +5,25 @@
class TestAnchoreEngineParser(DojoTestCase):
def test_anchore_engine_parser_has_no_finding(self):
- with open("unittests/scans/anchore_engine/no_vuln.json") as testfile:
+ with open("unittests/scans/anchore_engine/no_vuln.json", encoding="utf-8") as testfile:
parser = AnchoreEngineParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_anchore_engine_parser_has_one_finding(self):
- with open("unittests/scans/anchore_engine/one_vuln.json") as testfile:
+ with open("unittests/scans/anchore_engine/one_vuln.json", encoding="utf-8") as testfile:
parser = AnchoreEngineParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
def test_anchore_engine_parser_has_many_findings(self):
- with open("unittests/scans/anchore_engine/many_vulns.json") as testfile:
+ with open("unittests/scans/anchore_engine/many_vulns.json", encoding="utf-8") as testfile:
parser = AnchoreEngineParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(23, len(findings))
def test_anchore_engine_parser_has_many_findings_2_4_1(self):
- with open("unittests/scans/anchore_engine/many_vulns_2.4.1.json") as testfile:
+ with open("unittests/scans/anchore_engine/many_vulns_2.4.1.json", encoding="utf-8") as testfile:
parser = AnchoreEngineParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(51, len(findings))
diff --git a/unittests/tools/test_anchore_enterprise_parser.py b/unittests/tools/test_anchore_enterprise_parser.py
index b7badd15719..2acabcf20c6 100644
--- a/unittests/tools/test_anchore_enterprise_parser.py
+++ b/unittests/tools/test_anchore_enterprise_parser.py
@@ -7,19 +7,19 @@
class TestAnchoreEnterpriseParser(DojoTestCase):
def test_anchore_policy_check_parser_has_no_findings(self):
- with open(path.join(path.dirname(__file__), "../scans/anchore_enterprise/no_checks.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/anchore_enterprise/no_checks.json"), encoding="utf-8") as testfile:
parser = AnchoreEnterpriseParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_anchore_policy_check_parser_has_one_finding(self):
- with open(path.join(path.dirname(__file__), "../scans/anchore_enterprise/one_check.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/anchore_enterprise/one_check.json"), encoding="utf-8") as testfile:
parser = AnchoreEnterpriseParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
def test_anchore_policy_check_parser_has_multiple_findings(self):
- with open(path.join(path.dirname(__file__), "../scans/anchore_enterprise/many_checks.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/anchore_enterprise/many_checks.json"), encoding="utf-8") as testfile:
parser = AnchoreEnterpriseParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(57, len(findings))
@@ -28,7 +28,7 @@ def test_anchore_policy_check_parser_has_multiple_findings(self):
self.assertEqual("CVE-2015-2992", finding.unsaved_vulnerability_ids[0])
def test_anchore_policy_check_parser_invalid_format(self):
- with open(path.join(path.dirname(__file__), "../scans/anchore_enterprise/invalid_checks_format.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/anchore_enterprise/invalid_checks_format.json"), encoding="utf-8") as testfile:
with self.assertRaises(Exception):
parser = AnchoreEnterpriseParser()
parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_anchore_grype_parser.py b/unittests/tools/test_anchore_grype_parser.py
index c243654fa86..c706e0c384f 100644
--- a/unittests/tools/test_anchore_grype_parser.py
+++ b/unittests/tools/test_anchore_grype_parser.py
@@ -6,14 +6,14 @@
class TestAnchoreGrypeParser(DojoTestCase):
def test_parser_has_no_findings(self):
- with open("unittests/scans/anchore_grype/no_vuln.json") as testfile:
+ with open("unittests/scans/anchore_grype/no_vuln.json", encoding="utf-8") as testfile:
parser = AnchoreGrypeParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parser_has_many_findings(self):
found = False
- with open("unittests/scans/anchore_grype/many_vulns.json") as testfile:
+ with open("unittests/scans/anchore_grype/many_vulns.json", encoding="utf-8") as testfile:
parser = AnchoreGrypeParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1509, len(findings))
@@ -35,7 +35,7 @@ def test_parser_has_many_findings(self):
def test_grype_parser_with_one_criticle_vuln_has_one_findings(self):
found = False
- with open("unittests/scans/anchore_grype/many_vulns2.json") as testfile:
+ with open("unittests/scans/anchore_grype/many_vulns2.json", encoding="utf-8") as testfile:
parser = AnchoreGrypeParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1567, len(findings))
@@ -56,7 +56,7 @@ def test_grype_parser_with_one_criticle_vuln_has_one_findings(self):
def test_grype_parser_with_many_vulns3(self):
found = False
- with open("unittests/scans/anchore_grype/many_vulns3.json") as testfile:
+ with open("unittests/scans/anchore_grype/many_vulns3.json", encoding="utf-8") as testfile:
parser = AnchoreGrypeParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(327, len(findings))
@@ -77,7 +77,7 @@ def test_grype_parser_with_many_vulns3(self):
def test_grype_parser_with_new_matcher_list(self):
found = False
- with open("unittests/scans/anchore_grype/many_vulns4.json") as testfile:
+ with open("unittests/scans/anchore_grype/many_vulns4.json", encoding="utf-8") as testfile:
parser = AnchoreGrypeParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(9, len(findings))
@@ -97,7 +97,7 @@ def test_grype_parser_with_new_matcher_list(self):
self.assertTrue(found)
def test_check_all_fields(self):
- with open("unittests/scans/anchore_grype/check_all_fields.json") as testfile:
+ with open("unittests/scans/anchore_grype/check_all_fields.json", encoding="utf-8") as testfile:
parser = AnchoreGrypeParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(5, len(findings))
@@ -266,13 +266,13 @@ def test_check_all_fields(self):
self.assertEqual(2, finding.nb_occurences)
def test_grype_issue_9618(self):
- with open("unittests/scans/anchore_grype/issue_9618.json") as testfile:
+ with open("unittests/scans/anchore_grype/issue_9618.json", encoding="utf-8") as testfile:
parser = AnchoreGrypeParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(35, len(findings))
def test_grype_issue_9942(self):
- with open("unittests/scans/anchore_grype/issue_9942.json") as testfile:
+ with open("unittests/scans/anchore_grype/issue_9942.json", encoding="utf-8") as testfile:
parser = AnchoreGrypeParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
diff --git a/unittests/tools/test_anchorectl_policies_parser.py b/unittests/tools/test_anchorectl_policies_parser.py
index 14aa9187c8f..c8bdb4b4ef3 100644
--- a/unittests/tools/test_anchorectl_policies_parser.py
+++ b/unittests/tools/test_anchorectl_policies_parser.py
@@ -5,13 +5,13 @@
class TestAnchoreCTLPoliciesParser(DojoTestCase):
def test_anchore_engine_parser_has_no_finding(self):
- with open("unittests/scans/anchorectl_policies/no_violation.json") as testfile:
+ with open("unittests/scans/anchorectl_policies/no_violation.json", encoding="utf-8") as testfile:
parser = AnchoreCTLPoliciesParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_anchore_engine_parser_has_one_finding_and_it_is_correctly_parsed(self):
- with open("unittests/scans/anchorectl_policies/one_violation.json") as testfile:
+ with open("unittests/scans/anchorectl_policies/one_violation.json", encoding="utf-8") as testfile:
parser = AnchoreCTLPoliciesParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -21,7 +21,7 @@ def test_anchore_engine_parser_has_one_finding_and_it_is_correctly_parsed(self):
self.assertEqual(singleFinding.description, "User root found as effective user, which is not on the allowed list")
def test_anchore_engine_parser_has_many_findings(self):
- with open("unittests/scans/anchorectl_policies/many_violations.json") as testfile:
+ with open("unittests/scans/anchorectl_policies/many_violations.json", encoding="utf-8") as testfile:
parser = AnchoreCTLPoliciesParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(3, len(findings))
diff --git a/unittests/tools/test_anchorectl_vulns_parser.py b/unittests/tools/test_anchorectl_vulns_parser.py
index d3d7276cd6e..1ba824fe765 100644
--- a/unittests/tools/test_anchorectl_vulns_parser.py
+++ b/unittests/tools/test_anchorectl_vulns_parser.py
@@ -5,13 +5,13 @@
class TestAnchoreCTLVulnsParser(DojoTestCase):
def test_anchore_engine_parser_has_no_finding(self):
- with open("unittests/scans/anchorectl_vulns/no_vuln.json") as testfile:
+ with open("unittests/scans/anchorectl_vulns/no_vuln.json", encoding="utf-8") as testfile:
parser = AnchoreCTLVulnsParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_anchore_engine_parser_has_one_finding_and_it_is_correctly_parsed(self):
- with open("unittests/scans/anchorectl_vulns/one_vuln.json") as testfile:
+ with open("unittests/scans/anchorectl_vulns/one_vuln.json", encoding="utf-8") as testfile:
parser = AnchoreCTLVulnsParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -21,7 +21,7 @@ def test_anchore_engine_parser_has_one_finding_and_it_is_correctly_parsed(self):
self.assertEqual(singleFinding.description, "**Image hash**: None\n\n**Package**: libgnutls30-3.5.8-5+deb9u4\n\n**Package path**: None\n\n**Package type**: dpkg\n\n**Feed**: vulnerabilities/debian:9\n\n**CPE**: None\n\n**Description**: That test description\n\n")
def test_anchore_engine_parser_has_many_findings(self):
- with open("unittests/scans/anchorectl_vulns/many_vulns.json") as testfile:
+ with open("unittests/scans/anchorectl_vulns/many_vulns.json", encoding="utf-8") as testfile:
parser = AnchoreCTLVulnsParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(23, len(findings))
diff --git a/unittests/tools/test_api_blackduck_parser.py b/unittests/tools/test_api_blackduck_parser.py
index a8922ca99bc..f58613ca710 100644
--- a/unittests/tools/test_api_blackduck_parser.py
+++ b/unittests/tools/test_api_blackduck_parser.py
@@ -7,7 +7,7 @@
class TestApiBlackduckParser(DojoTestCase):
def test_bandit_parser_has_many_findings(self):
- with open("unittests/scans/api_blackduck/many_vulns.json") as testfile:
+ with open("unittests/scans/api_blackduck/many_vulns.json", encoding="utf-8") as testfile:
parser = ApiBlackduckParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
diff --git a/unittests/tools/test_api_bugcrowd_parser.py b/unittests/tools/test_api_bugcrowd_parser.py
index dd60565b6c7..48e748633cf 100644
--- a/unittests/tools/test_api_bugcrowd_parser.py
+++ b/unittests/tools/test_api_bugcrowd_parser.py
@@ -8,13 +8,13 @@
class TestApiBugcrowdParser(TestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/api_bugcrowd/bugcrowd_empty.json") as testfile:
+ with open("unittests/scans/api_bugcrowd/bugcrowd_empty.json", encoding="utf-8") as testfile:
parser = ApiBugcrowdParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln_has_one_findings(self):
- with open("unittests/scans/api_bugcrowd/bugcrowd_one.json") as testfile:
+ with open("unittests/scans/api_bugcrowd/bugcrowd_one.json", encoding="utf-8") as testfile:
# description = """
# Vulnerability Name: JWT alg none
@@ -51,7 +51,7 @@ def test_parse_file_with_one_vuln_has_one_findings(self):
endpoint.clean()
def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
- with open("unittests/scans/api_bugcrowd/bugcrowd_many.json") as testfile:
+ with open("unittests/scans/api_bugcrowd/bugcrowd_many.json", encoding="utf-8") as testfile:
parser = ApiBugcrowdParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(3, len(findings))
@@ -117,7 +117,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
def test_parse_file_with_not_reproducible_finding(self):
with open(
- "unittests/scans/api_bugcrowd/bugcrowd_not_reproducible.json",
+ "unittests/scans/api_bugcrowd/bugcrowd_not_reproducible.json", encoding="utf-8",
) as testfile:
# description = """
@@ -149,7 +149,7 @@ def test_parse_file_with_not_reproducible_finding(self):
endpoint.clean()
def test_parse_file_with_broken_bug_url(self):
- with open("unittests/scans/api_bugcrowd/bugcrowd_broken_bug_url.json") as testfile:
+ with open("unittests/scans/api_bugcrowd/bugcrowd_broken_bug_url.json", encoding="utf-8") as testfile:
parser = ApiBugcrowdParser()
with self.assertLogs("dojo.tools.api_bugcrowd.parser", level="ERROR") as cm:
parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_api_cobalt_parser.py b/unittests/tools/test_api_cobalt_parser.py
index 8db2c23aace..afb45d902f7 100644
--- a/unittests/tools/test_api_cobalt_parser.py
+++ b/unittests/tools/test_api_cobalt_parser.py
@@ -9,13 +9,13 @@
class TestApiCobaltParser(DojoTestCase):
def test_cobalt_api_parser_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/api_cobalt/cobalt_api_zero_vul.json") as testfile:
+ with open("unittests/scans/api_cobalt/cobalt_api_zero_vul.json", encoding="utf-8") as testfile:
parser = ApiCobaltParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_cobalt_api_parser_with_many_vuln_has_many_findings(self):
- with open("unittests/scans/api_cobalt/cobalt_api_many_vul.json") as testfile:
+ with open("unittests/scans/api_cobalt/cobalt_api_many_vul.json", encoding="utf-8") as testfile:
parser = ApiCobaltParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -24,7 +24,7 @@ def test_cobalt_api_parser_with_many_vuln_has_many_findings(self):
self.assertEqual(3, len(findings))
def test_cobalt_api_parser_with_carried_over_finding(self):
- with open("unittests/scans/api_cobalt/cobalt_api_one_vul_carried_over.json") as testfile:
+ with open("unittests/scans/api_cobalt/cobalt_api_one_vul_carried_over.json", encoding="utf-8") as testfile:
parser = ApiCobaltParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -51,7 +51,7 @@ def test_cobalt_api_parser_with_carried_over_finding(self):
self.assertTrue(finding.dynamic_finding)
def test_cobalt_api_parser_with_check_fix_finding(self):
- with open("unittests/scans/api_cobalt/cobalt_api_one_vul_check_fix.json") as testfile:
+ with open("unittests/scans/api_cobalt/cobalt_api_one_vul_check_fix.json", encoding="utf-8") as testfile:
parser = ApiCobaltParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -78,7 +78,7 @@ def test_cobalt_api_parser_with_check_fix_finding(self):
self.assertTrue(finding.dynamic_finding)
def test_cobalt_api_parser_with_invalid_finding(self):
- with open("unittests/scans/api_cobalt/cobalt_api_one_vul_invalid.json") as testfile:
+ with open("unittests/scans/api_cobalt/cobalt_api_one_vul_invalid.json", encoding="utf-8") as testfile:
parser = ApiCobaltParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -105,7 +105,7 @@ def test_cobalt_api_parser_with_invalid_finding(self):
self.assertTrue(finding.dynamic_finding)
def test_cobalt_api_parser_with_need_fix_finding(self):
- with open("unittests/scans/api_cobalt/cobalt_api_one_vul_need_fix.json") as testfile:
+ with open("unittests/scans/api_cobalt/cobalt_api_one_vul_need_fix.json", encoding="utf-8") as testfile:
parser = ApiCobaltParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -132,7 +132,7 @@ def test_cobalt_api_parser_with_need_fix_finding(self):
self.assertTrue(finding.dynamic_finding)
def test_cobalt_api_parser_with_new_finding(self):
- with open("unittests/scans/api_cobalt/cobalt_api_one_vul_new.json") as testfile:
+ with open("unittests/scans/api_cobalt/cobalt_api_one_vul_new.json", encoding="utf-8") as testfile:
parser = ApiCobaltParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -159,7 +159,7 @@ def test_cobalt_api_parser_with_new_finding(self):
self.assertTrue(finding.dynamic_finding)
def test_cobalt_api_parser_with_out_of_scope_finding(self):
- with open("unittests/scans/api_cobalt/cobalt_api_one_vul_out_of_scope.json") as testfile:
+ with open("unittests/scans/api_cobalt/cobalt_api_one_vul_out_of_scope.json", encoding="utf-8") as testfile:
parser = ApiCobaltParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -186,7 +186,7 @@ def test_cobalt_api_parser_with_out_of_scope_finding(self):
self.assertTrue(finding.dynamic_finding)
def test_cobalt_api_parser_with_triaging_finding(self):
- with open("unittests/scans/api_cobalt/cobalt_api_one_vul_triaging.json") as testfile:
+ with open("unittests/scans/api_cobalt/cobalt_api_one_vul_triaging.json", encoding="utf-8") as testfile:
parser = ApiCobaltParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -213,7 +213,7 @@ def test_cobalt_api_parser_with_triaging_finding(self):
self.assertTrue(finding.dynamic_finding)
def test_cobalt_api_parser_with_valid_fix_finding(self):
- with open("unittests/scans/api_cobalt/cobalt_api_one_vul_valid_fix.json") as testfile:
+ with open("unittests/scans/api_cobalt/cobalt_api_one_vul_valid_fix.json", encoding="utf-8") as testfile:
parser = ApiCobaltParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -240,7 +240,7 @@ def test_cobalt_api_parser_with_valid_fix_finding(self):
self.assertTrue(finding.dynamic_finding)
def test_cobalt_api_parser_with_wont_fix_finding(self):
- with open("unittests/scans/api_cobalt/cobalt_api_one_vul_wont_fix.json") as testfile:
+ with open("unittests/scans/api_cobalt/cobalt_api_one_vul_wont_fix.json", encoding="utf-8") as testfile:
parser = ApiCobaltParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -268,7 +268,7 @@ def test_cobalt_api_parser_with_wont_fix_finding(self):
@patch("dojo.tools.api_cobalt.importer.CobaltApiImporter.get_findings")
def test_cobalt_api_parser_with_api(self, mock):
- with open(get_unit_tests_path() + "/scans/api_cobalt/cobalt_api_many_vul.json") as api_findings_file:
+ with open(get_unit_tests_path() + "/scans/api_cobalt/cobalt_api_many_vul.json", encoding="utf-8") as api_findings_file:
api_findings = json.load(api_findings_file)
mock.return_value = api_findings
diff --git a/unittests/tools/test_api_edgescan_parser.py b/unittests/tools/test_api_edgescan_parser.py
index 93399d952e4..944b721f016 100644
--- a/unittests/tools/test_api_edgescan_parser.py
+++ b/unittests/tools/test_api_edgescan_parser.py
@@ -32,13 +32,13 @@ def test_requires_tool_type(self):
self.assertEqual(parser.requires_tool_type("scan_type"), "Edgescan")
def test_parse_file_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/api_edgescan/no_vuln.json") as testfile:
+ with open("unittests/scans/api_edgescan/no_vuln.json", encoding="utf-8") as testfile:
parser = ApiEdgescanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln_has_one_findings(self):
- with open("unittests/scans/api_edgescan/one_vuln.json") as testfile:
+ with open("unittests/scans/api_edgescan/one_vuln.json", encoding="utf-8") as testfile:
parser = ApiEdgescanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -61,7 +61,7 @@ def test_parse_file_with_one_vuln_has_one_findings(self):
self.assertEqual(finding.unsaved_endpoints[0].protocol, None)
def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
- with open("unittests/scans/api_edgescan/many_vulns.json") as testfile:
+ with open("unittests/scans/api_edgescan/many_vulns.json", encoding="utf-8") as testfile:
parser = ApiEdgescanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(2, len(findings))
diff --git a/unittests/tools/test_api_sonarqube_importer.py b/unittests/tools/test_api_sonarqube_importer.py
index c7ff7e90dc0..ed157ed2046 100644
--- a/unittests/tools/test_api_sonarqube_importer.py
+++ b/unittests/tools/test_api_sonarqube_importer.py
@@ -9,55 +9,55 @@
def dummy_product(self, *args, **kwargs):
- with open(get_unit_tests_path() + "/scans/api_sonarqube/product.json") as json_file:
+ with open(get_unit_tests_path() + "/scans/api_sonarqube/product.json", encoding="utf-8") as json_file:
data = json.load(json_file)
return data
def dummy_issues(self, *args, **kwargs):
- with open(get_unit_tests_path() + "/scans/api_sonarqube/issues.json") as json_file:
+ with open(get_unit_tests_path() + "/scans/api_sonarqube/issues.json", encoding="utf-8") as json_file:
data = json.load(json_file)
return data
def dummy_rule(self, *args, **kwargs):
- with open(get_unit_tests_path() + "/scans/api_sonarqube/rule.json") as json_file:
+ with open(get_unit_tests_path() + "/scans/api_sonarqube/rule.json", encoding="utf-8") as json_file:
data = json.load(json_file)
return data
def dummy_rule_wo_html_desc(self, *args, **kwargs):
- with open(get_unit_tests_path() + "/scans/api_sonarqube/rule_wo_html_desc.json") as json_file:
+ with open(get_unit_tests_path() + "/scans/api_sonarqube/rule_wo_html_desc.json", encoding="utf-8") as json_file:
data = json.load(json_file)
return data
def dummy_no_hotspot(self, *args, **kwargs):
- with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/no_vuln.json") as json_file:
+ with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/no_vuln.json", encoding="utf-8") as json_file:
data = json.load(json_file)
return data
def dummy_one_hotspot(self, *args, **kwargs):
- with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/one_vuln.json") as json_file:
+ with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/one_vuln.json", encoding="utf-8") as json_file:
data = json.load(json_file)
return data
def dummy_many_hotspots(self, *args, **kwargs):
- with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/many_vulns.json") as json_file:
+ with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/many_vulns.json", encoding="utf-8") as json_file:
data = json.load(json_file)
return data
def dummy_hotspot_rule(self, *args, **kwargs):
- with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/rule.json") as json_file:
+ with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/rule.json", encoding="utf-8") as json_file:
data = json.load(json_file)
return data
def dummy_hotspot_rule_wo_risk_description(self, *args, **kwargs):
- with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/rule_wo_risk_description.json") as json_file:
+ with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/rule_wo_risk_description.json", encoding="utf-8") as json_file:
data = json.load(json_file)
return data
diff --git a/unittests/tools/test_api_sonarqube_parser.py b/unittests/tools/test_api_sonarqube_parser.py
index aa278b80252..fe4334408cd 100644
--- a/unittests/tools/test_api_sonarqube_parser.py
+++ b/unittests/tools/test_api_sonarqube_parser.py
@@ -15,25 +15,25 @@
def dummy_product(self, *args, **kwargs):
- with open("unittests/scans/api_sonarqube/product.json") as json_file:
+ with open("unittests/scans/api_sonarqube/product.json", encoding="utf-8") as json_file:
data = json.load(json_file)
return data
def dummy_issues(self, *args, **kwargs):
- with open("unittests/scans/api_sonarqube/issues.json") as json_file:
+ with open("unittests/scans/api_sonarqube/issues.json", encoding="utf-8") as json_file:
data = json.load(json_file)
return data
def dummy_rule(self, *args, **kwargs):
- with open("unittests/scans/api_sonarqube/rule.json") as json_file:
+ with open("unittests/scans/api_sonarqube/rule.json", encoding="utf-8") as json_file:
data = json.load(json_file)
return data
def dummy_hotspot_rule(self, *args, **kwargs):
- with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/rule.json") as json_file:
+ with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/rule.json", encoding="utf-8") as json_file:
data = json.load(json_file)
return data
diff --git a/unittests/tools/test_api_vulners_parser.py b/unittests/tools/test_api_vulners_parser.py
index bd2e4df2125..e532e1ee272 100644
--- a/unittests/tools/test_api_vulners_parser.py
+++ b/unittests/tools/test_api_vulners_parser.py
@@ -6,7 +6,7 @@
class TestApiVulnersParser(DojoTestCase):
def test_parse_many_findings(self):
- with open("unittests/scans/api_vulners/report_many_vulns.json") as testfile:
+ with open("unittests/scans/api_vulners/report_many_vulns.json", encoding="utf-8") as testfile:
parser = ApiVulnersParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(3, len(findings))
@@ -19,7 +19,7 @@ def test_parse_many_findings(self):
self.assertEqual("CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H", finding.cvssv3)
def test_parse_one_finding(self):
- with open("unittests/scans/api_vulners/report_one_vuln.json") as testfile:
+ with open("unittests/scans/api_vulners/report_one_vuln.json", encoding="utf-8") as testfile:
parser = ApiVulnersParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -31,13 +31,13 @@ def test_parse_one_finding(self):
self.assertEqual("CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:H/I:H/A:H", finding.cvssv3)
def test_parse_no_finding(self):
- with open("unittests/scans/api_vulners/report_no_vulns.json") as testfile:
+ with open("unittests/scans/api_vulners/report_no_vulns.json", encoding="utf-8") as testfile:
parser = ApiVulnersParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_no_description(self):
- with open("unittests/scans/api_vulners/report_no_description.json") as testfile:
+ with open("unittests/scans/api_vulners/report_no_description.json", encoding="utf-8") as testfile:
parser = ApiVulnersParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
diff --git a/unittests/tools/test_appcheck_web_application_scanner_parser.py b/unittests/tools/test_appcheck_web_application_scanner_parser.py
index c1ca51f54b8..0775a3de5af 100644
--- a/unittests/tools/test_appcheck_web_application_scanner_parser.py
+++ b/unittests/tools/test_appcheck_web_application_scanner_parser.py
@@ -2,7 +2,11 @@
from dojo.models import Finding, Test
from dojo.tools.appcheck_web_application_scanner.engines.appcheck import AppCheckScanningEngineParser
-from dojo.tools.appcheck_web_application_scanner.engines.base import BaseEngineParser, strip_markup
+from dojo.tools.appcheck_web_application_scanner.engines.base import (
+ BaseEngineParser,
+ escape_non_printable,
+ strip_markup,
+)
from dojo.tools.appcheck_web_application_scanner.engines.nmap import NmapScanningEngineParser
from dojo.tools.appcheck_web_application_scanner.parser import AppCheckWebApplicationScannerParser
@@ -10,13 +14,13 @@
class TestAppCheckWebApplicationScannerParser(TestCase):
def test_appcheck_web_application_scanner_parser_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/appcheck_web_application_scanner/appcheck_web_application_scanner_zero_vul.json") as testfile:
+ with open("unittests/scans/appcheck_web_application_scanner/appcheck_web_application_scanner_zero_vul.json", encoding="utf-8") as testfile:
parser = AppCheckWebApplicationScannerParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_appcheck_web_application_scanner_parser_with_one_criticle_vuln_has_one_findings(self):
- with open("unittests/scans/appcheck_web_application_scanner/appcheck_web_application_scanner_one_vul.json") as testfile:
+ with open("unittests/scans/appcheck_web_application_scanner/appcheck_web_application_scanner_one_vul.json", encoding="utf-8") as testfile:
parser = AppCheckWebApplicationScannerParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -38,9 +42,10 @@ def test_appcheck_web_application_scanner_parser_with_one_criticle_vuln_has_one_
finding.description.startswith(
"The remote host is running a FTP service that allows cleartext logins over\n unencrypted connections.",
),
+ finding.description,
)
for section in ["**Impact**:", "**Detection**:", "**Technical Details**:"]:
- self.assertTrue(section in finding.description)
+ self.assertIn(section, finding.description)
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
@@ -49,7 +54,7 @@ def test_appcheck_web_application_scanner_parser_with_one_criticle_vuln_has_one_
self.assertEqual("0.0.0.1", endpoint.host)
def test_appcheck_web_application_scanner_parser_with_many_vuln_has_many_findings(self):
- with open("unittests/scans/appcheck_web_application_scanner/appcheck_web_application_scanner_many_vul.json") as testfile:
+ with open("unittests/scans/appcheck_web_application_scanner/appcheck_web_application_scanner_many_vul.json", encoding="utf-8") as testfile:
parser = AppCheckWebApplicationScannerParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(6, len(findings))
@@ -73,10 +78,11 @@ def test_appcheck_web_application_scanner_parser_with_many_vuln_has_many_finding
finding.description.startswith(
"The dedicated port scanner found open ports on this host, along with other\nhost-specific information, which can be viewed in Technical Details.",
),
+ finding.description,
)
- self.assertTrue(
- "Host: 0.0.0.1 (0.0.0.1)\nHost is up, received user-set (0.015s latency).\nScanned at 2020-01-29 15:44:46 UTC for 15763s\nNot shown: 65527 filtered ports, 4 closed ports\nReason: 65527 no-responses and 4 resets\nSome closed ports may be reported as filtered due to --defeat-rst-ratelimit\nPORT STATE SERVICE REASON VERSION\n21/tcp open ftp syn-ack ttl 116 Microsoft ftpd\n45000/tcp open ssl/asmp? syn-ack ttl 116\n45010/tcp open unknown syn-ack ttl 116\n60001/tcp open ssl/unknown syn-ack ttl 116\n60011/tcp open unknown syn-ack ttl 116\nService Info: OS: Windows; CPE: cpe:/o:microsoft:windows"
- in finding.description,
+ self.assertIn(
+ "Host: 0.0.0.1 (0.0.0.1)\nHost is up, received user-set (0.015s latency).\nScanned at 2020-01-29 15:44:46 UTC for 15763s\nNot shown: 65527 filtered ports, 4 closed ports\nReason: 65527 no-responses and 4 resets\nSome closed ports may be reported as filtered due to --defeat-rst-ratelimit\nPORT STATE SERVICE REASON VERSION\n21/tcp open ftp syn-ack ttl 116 Microsoft ftpd\n45000/tcp open ssl/asmp? syn-ack ttl 116\n45010/tcp open unknown syn-ack ttl 116\n60001/tcp open ssl/unknown syn-ack ttl 116\n60011/tcp open unknown syn-ack ttl 116\nService Info: OS: Windows; CPE: cpe:/o:microsoft:windows",
+ finding.description,
)
expected_ports = [21, 45000, 45010, 60001, 60011]
@@ -102,9 +108,9 @@ def test_appcheck_web_application_scanner_parser_with_many_vuln_has_many_finding
self.assertEqual("8.0.32", finding.component_version)
self.assertEqual(1, len(finding.unsaved_vulnerability_ids))
self.assertEqual("CVE-2016-6796", finding.unsaved_vulnerability_ids[0])
- self.assertTrue(finding.description.startswith('**Product Background**\n\n**Apache Tomcat** is a free and open-source Java web application server. It provides a "pure Java" HTTP web server environment in which Java code can also run, implementing the Jakarta Servlet, Jakarta Expression Language, and WebSocket technologies. Tomcat is released with **Catalina** (a servlet and JSP Java Server Pages container), **Coyote** (an HTTP connector), **Coyote JK** (JK protocol proxy connector) and **Jasper** (a JSP engine). Tomcat can optionally be bundled with Java Enterprise Edition (Jakarta EE) as **Apache TomEE** to deliver a complete application server with enterprise features such as distributed computing and web services.\n\n**Vulnerability Summary**\n\nA malicious web application running on Apache Tomcat 9.0.0.M1 to 9.0.0.M9, 8.5.0 to 8.5.4, 8.0.0.RC1 to 8.0.36, 7.0.0 to 7.0.70 and 6.0.0 to 6.0.45 was able to bypass a configured SecurityManager via manipulation of the configuration parameters for the JSP Servlet.\n\n**References**\n\n* http://www.securitytracker.com/id/1038757\n\n* http://www.securitytracker.com/id/1037141\n\n* http://www.securityfocus.com/bid/93944\n\n* http://www.debian.org/security/2016/dsa-3720\n\n* https://access.redhat.com/errata/RHSA-2017:2247\n\n* https://access.redhat.com/errata/RHSA-2017:1552\n\n* https://access.redhat.com/errata/RHSA-2017:1550\n\n* https://access.redhat.com/errata/RHSA-2017:1549\n\n* https://access.redhat.com/errata/RHSA-2017:1548\n\n* https://access.redhat.com/errata/RHSA-2017:0456\n\n* https://access.redhat.com/errata/RHSA-2017:0455\n\n* http://rhn.redhat.com/errata/RHSA-2017-1551.html\n\n* http://rhn.redhat.com/errata/RHSA-2017-0457.html\n\n* https://security.netapp.com/advisory/ntap-20180605-0001/\n\n* https://usn.ubuntu.com/4557-1/\n\n* https://www.oracle.com/security-alerts/cpuoct2021.html\n\n'))
+ self.assertTrue(finding.description.startswith('**Product Background**\n\n**Apache Tomcat** is a free and open-source Java web application server. It provides a "pure Java" HTTP web server environment in which Java code can also run, implementing the Jakarta Servlet, Jakarta Expression Language, and WebSocket technologies. Tomcat is released with **Catalina** (a servlet and JSP Java Server Pages container), **Coyote** (an HTTP connector), **Coyote JK** (JK protocol proxy connector) and **Jasper** (a JSP engine). Tomcat can optionally be bundled with Java Enterprise Edition (Jakarta EE) as **Apache TomEE** to deliver a complete application server with enterprise features such as distributed computing and web services.\n\n**Vulnerability Summary**\n\nA malicious web application running on Apache Tomcat 9.0.0.M1 to 9.0.0.M9, 8.5.0 to 8.5.4, 8.0.0.RC1 to 8.0.36, 7.0.0 to 7.0.70 and 6.0.0 to 6.0.45 was able to bypass a configured SecurityManager via manipulation of the configuration parameters for the JSP Servlet.\n\n**References**\n\n* http://www.securitytracker.com/id/1038757\n\n* http://www.securitytracker.com/id/1037141\n\n* http://www.securityfocus.com/bid/93944\n\n* http://www.debian.org/security/2016/dsa-3720\n\n* https://access.redhat.com/errata/RHSA-2017:2247\n\n* https://access.redhat.com/errata/RHSA-2017:1552\n\n* https://access.redhat.com/errata/RHSA-2017:1550\n\n* https://access.redhat.com/errata/RHSA-2017:1549\n\n* https://access.redhat.com/errata/RHSA-2017:1548\n\n* https://access.redhat.com/errata/RHSA-2017:0456\n\n* https://access.redhat.com/errata/RHSA-2017:0455\n\n* http://rhn.redhat.com/errata/RHSA-2017-1551.html\n\n* http://rhn.redhat.com/errata/RHSA-2017-0457.html\n\n* https://security.netapp.com/advisory/ntap-20180605-0001/\n\n* https://usn.ubuntu.com/4557-1/\n\n* https://www.oracle.com/security-alerts/cpuoct2021.html\n\n'), finding.description)
for section in ["**Technical Details**:", "**Classifications**:"]:
- self.assertTrue(section in finding.description)
+ self.assertIn(section, finding.description)
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
@@ -130,9 +136,10 @@ def test_appcheck_web_application_scanner_parser_with_many_vuln_has_many_finding
finding.description.startswith(
"This is simply a report of HTTP request methods supported by the web application.",
),
+ finding.description,
)
for section in ["**Permitted HTTP Methods**:"]:
- self.assertTrue(section in finding.description)
+ self.assertIn(section, finding.description)
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
@@ -167,9 +174,10 @@ def test_appcheck_web_application_scanner_parser_with_many_vuln_has_many_finding
finding.description.startswith(
"This routine reports all SSL/TLS cipher suites accepted by a service where attack vectors exists only on HTTPS services.\n\nThese rules are applied for the evaluation of the vulnerable cipher suites:\n\n- 64-bit block cipher 3DES vulnerable to the SWEET32 attack (CVE-2016-2183).",
),
+ finding.description,
)
for section in ["**Technical Details**:", "**External Sources**"]:
- self.assertTrue(section in finding.description)
+ self.assertIn(section, finding.description)
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
@@ -198,9 +206,10 @@ def test_appcheck_web_application_scanner_parser_with_many_vuln_has_many_finding
finding.description.startswith(
"The server responded with a HTTP status code that may indicate that the remote server is experiencing technical\ndifficulties that are likely to affect the scan and may also be affecting other application users.",
),
+ finding.description,
)
for section in ["**Technical Details**:"]:
- self.assertTrue(section in finding.description)
+ self.assertIn(section, finding.description)
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
@@ -211,12 +220,77 @@ def test_appcheck_web_application_scanner_parser_with_many_vuln_has_many_finding
self.assertEqual("ajax/ShelfEdgeLabel/ShelfEdgeLabelsPromotionalBatch", endpoint.path)
def test_appcheck_web_application_scanner_parser_dupes(self):
- with open("unittests/scans/appcheck_web_application_scanner/appcheck_web_application_scanner_dupes.json") as testfile:
+ with open("unittests/scans/appcheck_web_application_scanner/appcheck_web_application_scanner_dupes.json", encoding="utf-8") as testfile:
parser = AppCheckWebApplicationScannerParser()
findings = parser.get_findings(testfile, Test())
# Test has 5 entries, but we should only return 3 findings.
self.assertEqual(3, len(findings))
+ def test_appcheck_web_application_scanner_parser_http2(self):
+ with open("unittests/scans/appcheck_web_application_scanner/appcheck_web_application_scanner_http2.json", encoding="utf-8") as testfile:
+ parser = AppCheckWebApplicationScannerParser()
+ findings = parser.get_findings(testfile, Test())
+ self.assertEqual(3, len(findings))
+
+ finding = findings[0]
+ self.assertEqual("1c564bddf78f7642468474a49c9be6653f39e9df6b32d658", finding.unique_id_from_tool)
+ self.assertEqual("2024-08-06", finding.date)
+ self.assertEqual("HTTP/2 Supported", finding.title)
+ self.assertEqual(1, len(finding.unsaved_endpoints))
+ self.assertNotIn("**Messages**", finding.description)
+ self.assertNotIn("\x00", finding.description)
+ self.assertIsNotNone(finding.unsaved_request)
+ self.assertTrue(finding.unsaved_request.startswith(":method = GET"), finding.unsaved_request)
+ self.assertIsNotNone(finding.unsaved_response)
+ self.assertTrue(finding.unsaved_response.startswith(":status: 200"), finding.unsaved_response)
+ endpoint = finding.unsaved_endpoints[0]
+ endpoint.clean()
+ self.assertEqual("www.xzzvwy.com", endpoint.host)
+ self.assertEqual(443, endpoint.port)
+ self.assertEqual("https", endpoint.protocol)
+ self.assertEqual("media/vzdldjmk/pingpong2.jpg", endpoint.path)
+ self.assertEqual("rmode=max&height=500", endpoint.query)
+
+ finding = findings[1]
+ self.assertEqual("4e7c0b570ff6083376b99e1897102a87907effe2199dc8d4", finding.unique_id_from_tool)
+ self.assertEqual("2024-08-06", finding.date)
+ self.assertEqual("HTTP/2 Protocol: Transfer-Encoding Header Accepted", finding.title)
+ self.assertNotIn("**Messages**", finding.description)
+ self.assertNotIn("\x00", finding.description)
+ self.assertIn("**HTTP2 Headers**", finding.description)
+ self.assertIsNotNone(finding.unsaved_request)
+ self.assertTrue(finding.unsaved_request.startswith(":method = POST"), finding.unsaved_request)
+ self.assertIsNotNone(finding.unsaved_response)
+ self.assertTrue(finding.unsaved_response.startswith(":status: 200"), finding.unsaved_response)
+ self.assertEqual(1, len(finding.unsaved_endpoints))
+ endpoint = finding.unsaved_endpoints[0]
+ endpoint.clean()
+ self.assertEqual("www.xzzvwy.com", endpoint.host)
+ self.assertEqual(443, endpoint.port)
+ self.assertEqual("https", endpoint.protocol)
+ self.assertEqual("media/mmzzvwy/pingpong2.jpg", endpoint.path)
+ self.assertEqual("rmode=max&height=500", endpoint.query)
+
+ finding = findings[2]
+ self.assertEqual("2f1fb384e6a866f9ee0c6f7550e3b607e8b1dd2b1ab0fd02", finding.unique_id_from_tool)
+ self.assertEqual("2024-08-06", finding.date)
+ self.assertEqual("HTTP/2 Protocol: Transfer-Encoding Header Accepted", finding.title)
+ self.assertNotIn("**Messages**", finding.description)
+ self.assertIn("**HTTP2 Headers**", finding.description)
+ self.assertNotIn("\x00", finding.description)
+ self.assertIsNotNone(finding.unsaved_request)
+ self.assertTrue(finding.unsaved_request.startswith(":method = POST"), finding.unsaved_request)
+ self.assertIsNotNone(finding.unsaved_response)
+ self.assertTrue(finding.unsaved_response.startswith(":status: 200"), finding.unsaved_response)
+ self.assertEqual(1, len(finding.unsaved_endpoints))
+ endpoint = finding.unsaved_endpoints[0]
+ endpoint.clean()
+ self.assertEqual("www.zzvwy.com", endpoint.host)
+ self.assertEqual(443, endpoint.port)
+ self.assertEqual("https", endpoint.protocol)
+ self.assertEqual("media/bnhfz2s2/transport-hubs.jpeg", endpoint.path)
+ self.assertEqual("width=768&height=505&mode=crop&format=webp&quality=60", endpoint.query)
+
def test_appcheck_web_application_scanner_parser_base_engine_parser(self):
engine = BaseEngineParser()
@@ -411,6 +485,14 @@ def test_appcheck_web_application_scanner_parser_appcheck_engine_parser(self):
{"Messages": "--->\n\nsome stuff\n\n<--\n\nhere"},
# Incorrect request starting-marker
{"Messages": "-->\n\nsome stuff here\n\n<---\n\nhere"},
+ # Missing data
+ {"Messages": "HTTP/2 Request Headers:\n\n\r\nHTTP/2 Response Headers:\n\n"},
+ {"Messages": "HTTP/2 Request Headers:\n\n\r\nHTTP/2 Response Headers:\n\nData"},
+ {"Messages": "HTTP/2 Request Headers:\n\nData\r\nHTTP/2 Response Headers:\n\n"},
+ # No response
+ {"Messages": "HTTP/2 Request Headers:\n\nData\r\n"},
+ # No request
+ {"Messages": "\r\nHTTP/2 Response Headers:\n\nData"},
]:
has_messages_entry = "Messages" in no_rr
engine.extract_request_response(f, no_rr)
@@ -418,17 +500,30 @@ def test_appcheck_web_application_scanner_parser_appcheck_engine_parser(self):
self.assertIsNone(f.unsaved_response)
# If the dict originally has a 'Messages' entry, it should remain there since no req/res was extracted
if has_messages_entry:
- self.assertTrue("Messages" in no_rr)
-
- for req, res in [
- ("some stuff", "here"), ("some stuff <---", " here"), ("s--->", "here<---"), (" s ", " h "),
- ("some stuff... HERE\r\n\r\n", "no, here\n\n"),
- ]:
- rr = {"Messages": f"--->\n\n{req}\n\n<---\n\n{res}"}
- engine.extract_request_response(f, rr)
- self.assertEqual(req.strip(), f.unsaved_request)
- self.assertEqual(res.strip(), f.unsaved_response)
- f.unsaved_request = f.unsaved_response = None
+ self.assertIn("Messages", no_rr)
+
+ for template, test_data in {
+ # HTTP/1
+ "--->\n\n{req}\n\n<---\n\n{res}": [
+ ("some stuff", "here"),
+ ("some stuff <---", " here"),
+ ("s--->", "here<---"),
+ (" s ", " h "),
+ ("some stuff... HERE\r\n\r\n", "no, here\n\n"),
+ ],
+ # HTTP/2
+ "HTTP/2 Request Headers:\n\n{req}\r\nHTTP/2 Response Headers:\n\n{res}": [
+ ("some stuff", "here"),
+ (" s---> ", " here<--- "),
+ ("\x00\x01\u0004\n\r\tdata", "\r\n\x00\x01\x0c\x0bdata"),
+ ],
+ }.items():
+ for req, res in test_data:
+ rr = {"Messages": template.format(req=req, res=res)}
+ engine.extract_request_response(f, rr)
+ self.assertEqual(req.strip(), f.unsaved_request)
+ self.assertEqual(res.strip(), f.unsaved_response)
+ f.unsaved_request = f.unsaved_response = None
def test_appcheck_web_application_scanner_parser_markup_stripper(self):
for markup, expected in [
@@ -440,3 +535,33 @@ def test_appcheck_web_application_scanner_parser_markup_stripper(self):
("[[markup]] but with [[urlhere]]", "but with urlhere"),
]:
self.assertEqual(expected, strip_markup(markup))
+
+ def test_appcheck_web_application_scanner_parser_non_printable_escape(self):
+ for test_string, expected in [
+ ("", ""),
+ (
+ "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ \t\n\r\x0b\x0c",
+ "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ \t\n\r\\x0b\\x0c",
+ ),
+ ("'!Test String?'\"\"", "'!Test String?'\"\""),
+ ("\r\n\tTest\r\nString\t\r\n", "\r\n\tTest\r\nString\t\r\n"),
+ ("\0Test\r\nString\0\n", "\\x00Test\r\nString\\x00\n"),
+ ("\0\0你好,\0我不知道。对马好!\n", "\\x00\\x00你好,\\x00我不知道。对马好!\n"), # noqa: RUF001
+ ("\u0000", r"\x00"),
+ ("\x00", r"\x00"),
+ ("\u0000\u0000", r"\x00\x00"),
+ ("\r\n\t\t\u0000\u0000\n\n", "\r\n\t\t\\x00\\x00\n\n"),
+ (
+ "¡A qÙîçk ΛæzŸ ßrȯωñ Møøβe\nönce \u0000\u202d\u200e Σister's ÞΕ 🜯 ¼ 50¢ «soda¬¿ υϖυ 🤪\u000b…",
+ "¡A qÙîçk ΛæzŸ ßrȯωñ Møøβe\nönce \\x00\\u202d\\u200e Σister's ÞΕ 🜯 ¼ 50¢ «soda¬¿ υϖυ 🤪\\x0b…",
+ ),
+ (
+ "Words: \u0000\u0010ABCD\u0000\u0001\u0001`\u0000jpeg\u0000CC+\u0000\b\u0000\u0003;\u0001\u0002\u00002\u001c\u0000@\u0000i\u0004\\\u0000. Done.",
+ r"Words: \x00\x10ABCD\x00\x01\x01`\x00jpeg\x00CC+\x00\x08\x00\x03;\x01\x02\x002\x1c\x00@\x00i\x04\\x00. Done.",
+ ),
+ (
+ "\u0016\no#bota\u00124&7\r\u0019j9}\t\u0004ef\u202egh\u001c",
+ "\\x16\no#bota\\x124&7\r\\x19j9}\t\\x04ef\\u202egh\\x1c",
+ ),
+ ]:
+ self.assertEqual(expected, escape_non_printable(test_string))
diff --git a/unittests/tools/test_appspider_parser.py b/unittests/tools/test_appspider_parser.py
index 0d94c4b719b..1930efa7f39 100644
--- a/unittests/tools/test_appspider_parser.py
+++ b/unittests/tools/test_appspider_parser.py
@@ -10,7 +10,7 @@ def test_appspider_parser_has_one_finding(self):
test = Test()
test.engagement = Engagement()
test.engagement.product = Product()
- testfile = open(path.join(path.dirname(__file__), "../scans/appspider/one_vuln.xml"))
+ testfile = open(path.join(path.dirname(__file__), "../scans/appspider/one_vuln.xml"), encoding="utf-8")
parser = AppSpiderParser()
findings = parser.get_findings(testfile, test)
for finding in findings:
diff --git a/unittests/tools/test_aqua_parser.py b/unittests/tools/test_aqua_parser.py
index 9b2279cfa75..3cb28ee7eea 100644
--- a/unittests/tools/test_aqua_parser.py
+++ b/unittests/tools/test_aqua_parser.py
@@ -7,13 +7,13 @@
class TestAquaParser(DojoTestCase):
def test_aqua_parser_has_no_finding(self):
- with open("unittests/scans/aqua/no_vuln.json") as testfile:
+ with open("unittests/scans/aqua/no_vuln.json", encoding="utf-8") as testfile:
parser = AquaParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_aqua_parser_has_one_finding(self):
- with open("unittests/scans/aqua/one_vuln.json") as testfile:
+ with open("unittests/scans/aqua/one_vuln.json", encoding="utf-8") as testfile:
parser = AquaParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -30,13 +30,13 @@ def test_aqua_parser_has_one_finding(self):
self.assertEqual("CVE-2019-14697", finding.unsaved_vulnerability_ids[0])
def test_aqua_parser_has_many_findings(self):
- with open("unittests/scans/aqua/many_vulns.json") as testfile:
+ with open("unittests/scans/aqua/many_vulns.json", encoding="utf-8") as testfile:
parser = AquaParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(24, len(findings))
def test_aqua_parser_v2_has_one_finding(self):
- with open("unittests/scans/aqua/one_v2.json") as testfile:
+ with open("unittests/scans/aqua/one_v2.json", encoding="utf-8") as testfile:
parser = AquaParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -49,13 +49,13 @@ def test_aqua_parser_v2_has_one_finding(self):
self.assertEqual("CVE-2019-15601", finding.unsaved_vulnerability_ids[0])
def test_aqua_parser_v2_has_many_findings(self):
- with open("unittests/scans/aqua/many_v2.json") as testfile:
+ with open("unittests/scans/aqua/many_v2.json", encoding="utf-8") as testfile:
parser = AquaParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(3, len(findings))
def test_aqua_parser_cvssv3_has_no_finding(self):
- with open("unittests/scans/aqua/many_v2.json") as testfile:
+ with open("unittests/scans/aqua/many_v2.json", encoding="utf-8") as testfile:
parser = AquaParser()
findings = parser.get_findings(testfile, Test())
nb_cvssv3 = 0
@@ -66,7 +66,7 @@ def test_aqua_parser_cvssv3_has_no_finding(self):
self.assertEqual(0, nb_cvssv3)
def test_aqua_parser_cvssv3_has_many_findings(self):
- with open("unittests/scans/aqua/many_vulns.json") as testfile:
+ with open("unittests/scans/aqua/many_vulns.json", encoding="utf-8") as testfile:
parser = AquaParser()
findings = parser.get_findings(testfile, Test())
nb_cvssv3 = 0
@@ -77,7 +77,7 @@ def test_aqua_parser_cvssv3_has_many_findings(self):
self.assertEqual(16, nb_cvssv3)
def test_aqua_parser_for_aqua_severity(self):
- with open("unittests/scans/aqua/vulns_with_aqua_severity.json") as testfile:
+ with open("unittests/scans/aqua/vulns_with_aqua_severity.json", encoding="utf-8") as testfile:
parser = AquaParser()
findings = parser.get_findings(testfile, Test())
sevs = []
@@ -93,20 +93,27 @@ def test_aqua_parser_for_aqua_severity(self):
self.assertEqual(7, d["Info"])
def test_aqua_parser_issue_10585(self):
- with open("unittests/scans/aqua/issue_10585.json") as testfile:
+ with open("unittests/scans/aqua/issue_10585.json", encoding="utf-8") as testfile:
parser = AquaParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_aqua_parser_aqua_devops_issue_10611(self):
- with open("unittests/scans/aqua/aqua_devops_issue_10611.json") as testfile:
+ with open("unittests/scans/aqua/aqua_devops_issue_10611.json", encoding="utf-8") as testfile:
parser = AquaParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(101, len(findings))
self.assertEqual("server.key - server.key (/juice-shop/node_modules/node-gyp/test/fixtures/server.key) ", findings[83].title)
+ def test_aqua_parser_aqua_devops_issue_10849(self):
+ with open("unittests/scans/aqua/issue_10849.json", encoding="utf-8") as testfile:
+ parser = AquaParser()
+ findings = parser.get_findings(testfile, Test())
+ self.assertEqual(0.0006, findings[0].epss_score)
+ self.assertEqual(0.23474, findings[0].epss_percentile)
+
def test_aqua_parser_aqua_devops_empty(self):
- with open("unittests/scans/aqua/empty_aquadevops.json") as testfile:
+ with open("unittests/scans/aqua/empty_aquadevops.json", encoding="utf-8") as testfile:
parser = AquaParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
diff --git a/unittests/tools/test_arachni_parser.py b/unittests/tools/test_arachni_parser.py
index e175ec7a8c1..337200796ea 100644
--- a/unittests/tools/test_arachni_parser.py
+++ b/unittests/tools/test_arachni_parser.py
@@ -8,7 +8,7 @@
class TestArachniParser(DojoTestCase):
def test_parser_has_one_finding(self):
- with open("unittests/scans/arachni/arachni.afr.json") as testfile:
+ with open("unittests/scans/arachni/arachni.afr.json", encoding="utf-8") as testfile:
parser = ArachniParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -23,7 +23,7 @@ def test_parser_has_one_finding(self):
self.assertEqual(datetime.datetime(2017, 11, 14, 2, 57, 29, tzinfo=datetime.timezone.utc), finding.date)
def test_parser_has_many_finding(self):
- with open("unittests/scans/arachni/dd.com.afr.json") as testfile:
+ with open("unittests/scans/arachni/dd.com.afr.json", encoding="utf-8") as testfile:
parser = ArachniParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -54,7 +54,7 @@ def test_parser_has_many_finding(self):
self.assertIn("server", finding.unsaved_tags)
def test_parser_has_many_finding2(self):
- with open("unittests/scans/arachni/js.com.afr.json") as testfile:
+ with open("unittests/scans/arachni/js.com.afr.json", encoding="utf-8") as testfile:
parser = ArachniParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
diff --git a/unittests/tools/test_asff_parser.py b/unittests/tools/test_asff_parser.py
index 68c409c1669..fe01bb06cfd 100644
--- a/unittests/tools/test_asff_parser.py
+++ b/unittests/tools/test_asff_parser.py
@@ -13,12 +13,17 @@ def sample_path(file_name):
class TestAsffParser(DojoTestCase):
def load_sample_json(self, file_name):
- with open(sample_path(file_name)) as file:
+ with open(sample_path(file_name), encoding="utf-8") as file:
return json.load(file)
def common_check_finding(self, finding, data, index, guarddutydate=False):
+ parser = AsffParser()
+ resource_arns = parser.get_item_resource_arns(data[index])
+ resource_arn_strings = ", ".join(resource_arns)
+ control_description = data[index].get("Description", "")
+ full_description = f"**AWS resource ARN:** {resource_arn_strings}\n\n{control_description}"
self.assertEqual(finding.title, data[index]["Title"])
- self.assertEqual(finding.description, data[index]["Description"])
+ self.assertEqual(finding.description, full_description)
if guarddutydate:
self.assertEqual(finding.date.date(),
datetime.strptime(data[0]["CreatedAt"], "%Y-%m-%dT%H:%M:%S.%fZ").date())
@@ -31,12 +36,12 @@ def common_check_finding(self, finding, data, index, guarddutydate=False):
"IpV4Addresses"
]
for endpoint in finding.unsaved_endpoints:
- self.assertTrue(endpoint, expected_ipv4s)
+ self.assertIn(str(endpoint), expected_ipv4s)
endpoint.clean()
def test_asff_one_vuln(self):
data = self.load_sample_json("one_vuln.json")
- with open(sample_path("one_vuln.json")) as file:
+ with open(sample_path("one_vuln.json"), encoding="utf-8") as file:
parser = AsffParser()
findings = parser.get_findings(file, Test())
self.assertEqual(1, len(findings))
@@ -44,7 +49,7 @@ def test_asff_one_vuln(self):
def test_asff_many_vulns(self):
data = self.load_sample_json("many_vulns.json")
- with open(sample_path("many_vulns.json")) as file:
+ with open(sample_path("many_vulns.json"), encoding="utf-8") as file:
parser = AsffParser()
findings = parser.get_findings(file, Test())
self.assertEqual(len(findings), 5)
@@ -53,7 +58,7 @@ def test_asff_many_vulns(self):
def test_asff_guardduty(self):
data = self.load_sample_json("guardduty/Unusual Behaviors-User-Persistence IAMUser-NetworkPermissions.json")
- with open(sample_path("guardduty/Unusual Behaviors-User-Persistence IAMUser-NetworkPermissions.json")) as file:
+ with open(sample_path("guardduty/Unusual Behaviors-User-Persistence IAMUser-NetworkPermissions.json"), encoding="utf-8") as file:
parser = AsffParser()
findings = parser.get_findings(file, Test())
self.assertEqual(len(findings), 1)
diff --git a/unittests/tools/test_auditjs_parser.py b/unittests/tools/test_auditjs_parser.py
index 7e128183a8c..4a367a7ca2c 100644
--- a/unittests/tools/test_auditjs_parser.py
+++ b/unittests/tools/test_auditjs_parser.py
@@ -6,13 +6,13 @@
class TestAuditJSParser(DojoTestCase):
def test_auditjs_parser_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/auditjs/auditjs_zero_vul.json") as testfile:
+ with open("unittests/scans/auditjs/auditjs_zero_vul.json", encoding="utf-8") as testfile:
parser = AuditJSParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_auditjs_parser_with_one_criticle_vuln_has_one_findings(self):
- with open("unittests/scans/auditjs/auditjs_one_vul.json") as testfile:
+ with open("unittests/scans/auditjs/auditjs_one_vul.json", encoding="utf-8") as testfile:
parser = AuditJSParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -32,7 +32,7 @@ def test_auditjs_parser_with_one_criticle_vuln_has_one_findings(self):
findings[0].references)
def test_auditjs_parser_with_many_vuln_has_many_findings(self):
- with open("unittests/scans/auditjs/auditjs_many_vul.json") as testfile:
+ with open("unittests/scans/auditjs/auditjs_many_vul.json", encoding="utf-8") as testfile:
parser = AuditJSParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -59,16 +59,16 @@ def test_auditjs_parser_with_many_vuln_has_many_findings(self):
def test_auditjs_parser_empty_with_error(self):
with self.assertRaises(ValueError) as context:
- with open("unittests/scans/auditjs/empty_with_error.json") as testfile:
+ with open("unittests/scans/auditjs/empty_with_error.json", encoding="utf-8") as testfile:
parser = AuditJSParser()
parser.get_findings(testfile, Test())
- self.assertTrue(
- "Invalid JSON format. Are you sure you used --json option ?" in str(context.exception),
+ self.assertIn(
+ "Invalid JSON format. Are you sure you used --json option ?", str(context.exception),
)
def test_auditjs_parser_with_package_name_has_namespace(self):
- with open("unittests/scans/auditjs/auditjs_with_package_namespace.json") as testfile:
+ with open("unittests/scans/auditjs/auditjs_with_package_namespace.json", encoding="utf-8") as testfile:
parser = AuditJSParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_aws_prowler_parser.py b/unittests/tools/test_aws_prowler_parser.py
index 567e963abda..91da95da6dc 100644
--- a/unittests/tools/test_aws_prowler_parser.py
+++ b/unittests/tools/test_aws_prowler_parser.py
@@ -14,12 +14,12 @@ def setup(self, testfile):
def test_aws_prowler_parser_with_no_vuln_has_no_findings(self):
findings = self.setup(
- open("unittests/scans/aws_prowler/no_vuln.csv"))
+ open("unittests/scans/aws_prowler/no_vuln.csv", encoding="utf-8"))
self.assertEqual(0, len(findings))
def test_aws_prowler_parser_with_critical_vuln_has_one_findings(self):
findings = self.setup(
- open("unittests/scans/aws_prowler/one_vuln.csv"))
+ open("unittests/scans/aws_prowler/one_vuln.csv", encoding="utf-8"))
self.assertEqual(1, len(findings))
self.assertEqual(
"Root user in the account wasn't accessed in the last 1 days", findings[0].title,
@@ -27,7 +27,7 @@ def test_aws_prowler_parser_with_critical_vuln_has_one_findings(self):
def test_aws_prowler_parser_with_many_vuln_has_many_findings(self):
findings = self.setup(
- open("unittests/scans/aws_prowler/many_vuln.csv"))
+ open("unittests/scans/aws_prowler/many_vuln.csv", encoding="utf-8"))
self.assertEqual(4, len(findings))
self.assertEqual(
"Root user in the account wasn't accessed in the last 1 days", findings[0].title)
@@ -42,7 +42,7 @@ def test_aws_prowler_parser_with_many_vuln_has_many_findings(self):
def test_aws_prowler_parser_with_many_vuln_has_many_findings2(self):
findings = self.setup(
- open("unittests/scans/aws_prowler/many_vuln2.csv"))
+ open("unittests/scans/aws_prowler/many_vuln2.csv", encoding="utf-8"))
self.assertEqual(174, len(findings))
self.assertEqual("Root user in the account wasn't accessed in the last 1 days", findings[0].title)
self.assertEqual("Info", findings[0].severity)
@@ -52,7 +52,7 @@ def test_aws_prowler_parser_with_many_vuln_has_many_findings2(self):
def test_aws_prowler_parser_issue4450(self):
findings = self.setup(
- open("unittests/scans/aws_prowler/issue4450.csv"))
+ open("unittests/scans/aws_prowler/issue4450.csv", encoding="utf-8"))
self.assertEqual(4, len(findings))
with self.subTest(i=0):
finding = findings[0]
@@ -72,12 +72,12 @@ def test_aws_prowler_parser_issue4450(self):
def test_aws_prowler_parser_with_no_vuln_has_no_findings_json(self):
findings = self.setup(
- open("unittests/scans/aws_prowler/no_vuln.json"))
+ open("unittests/scans/aws_prowler/no_vuln.json", encoding="utf-8"))
self.assertEqual(0, len(findings))
def test_aws_prowler_parser_with_critical_vuln_has_one_findings_json(self):
findings = self.setup(
- open("unittests/scans/aws_prowler/one_vuln.json"))
+ open("unittests/scans/aws_prowler/one_vuln.json", encoding="utf-8"))
self.assertEqual(1, len(findings))
self.assertEqual("eu-central-1: Only Virtual MFA is enabled for root", findings[0].title)
self.assertIn("012345678912", findings[0].description)
@@ -97,7 +97,7 @@ def test_aws_prowler_parser_with_critical_vuln_has_one_findings_json(self):
def test_aws_prowler_parser_with_many_vuln_has_many_findings_json(self):
findings = self.setup(
- open("unittests/scans/aws_prowler/many_vuln.json"))
+ open("unittests/scans/aws_prowler/many_vuln.json", encoding="utf-8"))
self.assertEqual(4, len(findings))
with self.subTest(i=0):
self.assertEqual("eu-central-1: Only Virtual MFA is enabled for root", findings[0].title)
diff --git a/unittests/tools/test_aws_prowler_v3plus_parser.py b/unittests/tools/test_aws_prowler_v3plus_parser.py
index 6c7f2564e23..6eb22c296cf 100644
--- a/unittests/tools/test_aws_prowler_v3plus_parser.py
+++ b/unittests/tools/test_aws_prowler_v3plus_parser.py
@@ -12,12 +12,12 @@ def setup(self, testfile):
def test_aws_prowler_parser_with_no_vuln_has_no_findings_json(self):
findings = self.setup(
- open("unittests/scans/aws_prowler_v3plus/no_vuln.json"))
+ open("unittests/scans/aws_prowler_v3plus/no_vuln.json", encoding="utf-8"))
self.assertEqual(0, len(findings))
def test_aws_prowler_parser_with_critical_vuln_has_one_findings_json(self):
findings = self.setup(
- open("unittests/scans/aws_prowler_v3plus/one_vuln.json"))
+ open("unittests/scans/aws_prowler_v3plus/one_vuln.json", encoding="utf-8"))
self.assertEqual(1, len(findings))
self.assertEqual("prowler-aws-acm_certificates_expiration_check-999999999999-us-east-1-api.sandbox.partner.teste.com", findings[0].unique_id_from_tool)
self.assertIn("Check if ACM Certificates are about to expire in specific days or less", findings[0].description)
@@ -26,7 +26,7 @@ def test_aws_prowler_parser_with_critical_vuln_has_one_findings_json(self):
def test_aws_prowler_parser_with_many_vuln_has_many_findings_json(self):
findings = self.setup(
- open("unittests/scans/aws_prowler_v3plus/many_vuln.json"))
+ open("unittests/scans/aws_prowler_v3plus/many_vuln.json", encoding="utf-8"))
self.assertEqual(3, len(findings))
with self.subTest(i=0):
self.assertEqual("prowler-aws-acm_certificates_expiration_check-999999999999-us-east-1-api.teste.teste.com", findings[0].unique_id_from_tool)
@@ -40,12 +40,12 @@ def test_aws_prowler_parser_with_many_vuln_has_many_findings_json(self):
def test_aws_prowler_parser_with_no_vuln_has_no_findings_ocsf_json(self):
findings = self.setup(
- open("unittests/scans/aws_prowler_v3plus/no_vuln.ocsf.json"))
+ open("unittests/scans/aws_prowler_v3plus/no_vuln.ocsf.json", encoding="utf-8"))
self.assertEqual(0, len(findings))
def test_aws_prowler_parser_with_critical_vuln_has_one_findings_ocsf_json(self):
findings = self.setup(
- open("unittests/scans/aws_prowler_v3plus/one_vuln.ocsf.json"))
+ open("unittests/scans/aws_prowler_v3plus/one_vuln.ocsf.json", encoding="utf-8"))
self.assertEqual(1, len(findings))
self.assertEqual("prowler-aws-iam_role_administratoraccess_policy_permissive_trust_relationship-123456789012-us-east-1-myAdministratorExecutionRole", findings[0].unique_id_from_tool)
self.assertIn("Ensure IAM Roles with attached AdministratorAccess policy have a well defined trust relationship", findings[0].description)
@@ -54,14 +54,11 @@ def test_aws_prowler_parser_with_critical_vuln_has_one_findings_ocsf_json(self):
def test_aws_prowler_parser_with_many_vuln_has_many_findings_ocsf_json(self):
findings = self.setup(
- open("unittests/scans/aws_prowler_v3plus/many_vuln.ocsf.json"))
- self.assertEqual(3, len(findings))
+ open("unittests/scans/aws_prowler_v3plus/many_vuln.ocsf.json", encoding="utf-8"))
+ self.assertEqual(2, len(findings))
with self.subTest(i=0):
self.assertEqual("prowler-aws-iam_role_administratoraccess_policy_permissive_trust_relationship-123456789012-us-east-1-myAdministratorExecutionRole", findings[0].unique_id_from_tool)
self.assertIn("Ensure IAM Roles with attached AdministratorAccess policy have a well defined trust relationship", findings[0].description)
with self.subTest(i=1):
self.assertEqual("prowler-aws-iam_role_cross_account_readonlyaccess_policy-123456789012-us-east-1-AuditRole", findings[1].unique_id_from_tool)
self.assertIn("Ensure IAM Roles do not have ReadOnlyAccess access for external AWS accounts", findings[1].description)
- with self.subTest(i=3):
- self.assertEqual("prowler-aws-iam_role_permissive_trust_relationship-123456789012-us-east-1-CrossAccountResourceAccessRole", findings[2].unique_id_from_tool)
- self.assertIn("Ensure IAM Roles do not allow assume role from any role of a cross account", findings[2].description)
diff --git a/unittests/tools/test_awssecurityhub_parser.py b/unittests/tools/test_awssecurityhub_parser.py
index f4eb990d0e3..14e53d3cce7 100644
--- a/unittests/tools/test_awssecurityhub_parser.py
+++ b/unittests/tools/test_awssecurityhub_parser.py
@@ -12,7 +12,7 @@ def sample_path(file_name: str):
class TestAwsSecurityHubParser(DojoTestCase):
def test_one_finding(self):
- with open(get_unit_tests_path() + sample_path("config_one_finding.json")) as test_file:
+ with open(get_unit_tests_path() + sample_path("config_one_finding.json"), encoding="utf-8") as test_file:
parser = AwsSecurityHubParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(1, len(findings))
@@ -23,7 +23,7 @@ def test_one_finding(self):
self.assertEqual("https://docs.aws.amazon.com/console/securityhub/IAM.5/remediation", finding.references)
def test_one_finding_active(self):
- with open(get_unit_tests_path() + sample_path("config_one_finding_active.json")) as test_file:
+ with open(get_unit_tests_path() + sample_path("config_one_finding_active.json"), encoding="utf-8") as test_file:
parser = AwsSecurityHubParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(1, len(findings))
@@ -33,7 +33,7 @@ def test_one_finding_active(self):
self.assertTrue(finding.active)
def test_many_findings(self):
- with open(get_unit_tests_path() + sample_path("config_many_findings.json")) as test_file:
+ with open(get_unit_tests_path() + sample_path("config_many_findings.json"), encoding="utf-8") as test_file:
parser = AwsSecurityHubParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(3, len(findings))
@@ -42,13 +42,13 @@ def test_many_findings(self):
self.assertEqual("This is a Security Hub Finding \nThis AWS control checks whether AWS Multi-Factor Authentication (MFA) is enabled for all AWS Identity and Access Management (IAM) users that use a console password.\n**AWS Finding ARN:** arn:aws:securityhub:us-east-1:012345678912:subscription/aws-foundational-security-best-practices/v/1.0.0/IAM.5/finding/de861909-2d26-4e45-bd86-19d2ab6ceef1\n**Resource IDs:** AWS::::Account:012345678912\n**AwsAccountId:** 012345678912\n**Generator ID:** aws-foundational-security-best-practices/v/1.0.0/IAM.5\n", finding.description)
def test_repeated_findings(self):
- with open(get_unit_tests_path() + sample_path("config_repeated_findings.json")) as test_file:
+ with open(get_unit_tests_path() + sample_path("config_repeated_findings.json"), encoding="utf-8") as test_file:
parser = AwsSecurityHubParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(1, len(findings))
def test_unique_id(self):
- with open(get_unit_tests_path() + sample_path("config_one_finding.json")) as test_file:
+ with open(get_unit_tests_path() + sample_path("config_one_finding.json"), encoding="utf-8") as test_file:
parser = AwsSecurityHubParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(
@@ -57,7 +57,7 @@ def test_unique_id(self):
)
def test_inspector_ec2(self):
- with open(get_unit_tests_path() + sample_path("inspector_ec2_cve.json")) as test_file:
+ with open(get_unit_tests_path() + sample_path("inspector_ec2_cve.json"), encoding="utf-8") as test_file:
parser = AwsSecurityHubParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(5, len(findings))
@@ -71,7 +71,7 @@ def test_inspector_ec2(self):
self.assertEqual("AwsEc2Instance arn:aws:ec2:us-east-1:XXXXXXXXXXXX:i-11111111111111111", endpoint.host)
def test_inspector_ec2_with_no_vulnerabilities(self):
- with open(get_unit_tests_path() + sample_path("inspector_ec2_cve_no_vulnerabilities.json")) as test_file:
+ with open(get_unit_tests_path() + sample_path("inspector_ec2_cve_no_vulnerabilities.json"), encoding="utf-8") as test_file:
parser = AwsSecurityHubParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(1, len(findings))
@@ -79,7 +79,7 @@ def test_inspector_ec2_with_no_vulnerabilities(self):
self.assertEqual(finding.component_name, "AwsEc2Instance")
def test_inspector_ec2_ghsa(self):
- with open(get_unit_tests_path() + sample_path("inspector_ec2_ghsa.json")) as test_file:
+ with open(get_unit_tests_path() + sample_path("inspector_ec2_ghsa.json"), encoding="utf-8") as test_file:
parser = AwsSecurityHubParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(1, len(findings))
@@ -94,7 +94,7 @@ def test_inspector_ec2_ghsa(self):
self.assertEqual("AwsEc2Instance arn:aws:ec2:eu-central-1:012345678912:instance/i-07c11cc535d830123", endpoint.host)
def test_inspector_ecr(self):
- with open(get_unit_tests_path() + sample_path("inspector_ecr.json")) as test_file:
+ with open(get_unit_tests_path() + sample_path("inspector_ecr.json"), encoding="utf-8") as test_file:
parser = AwsSecurityHubParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(7, len(findings))
@@ -111,7 +111,7 @@ def test_inspector_ecr(self):
self.assertEqual("AwsEcrContainerImage arn:aws:ecr:eu-central-1:123456789012:repository/repo-os/sha256:af965ef68c78374a5f987fce98c0ddfa45801df2395bf012c50b863e65978d74", endpoint.host)
def test_guardduty(self):
- with open(get_unit_tests_path() + sample_path("guardduty.json")) as test_file:
+ with open(get_unit_tests_path() + sample_path("guardduty.json"), encoding="utf-8") as test_file:
parser = AwsSecurityHubParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(4, len(findings))
diff --git a/unittests/tools/test_azure_security_center_recommendations_parser.py b/unittests/tools/test_azure_security_center_recommendations_parser.py
index 0119f982e8c..3ee1beefe3f 100644
--- a/unittests/tools/test_azure_security_center_recommendations_parser.py
+++ b/unittests/tools/test_azure_security_center_recommendations_parser.py
@@ -8,13 +8,13 @@
class TestAzureSecurityCenterRecommendationsParser(DojoTestCase):
def test_parse_file_with_no_findings(self):
- with open("unittests/scans/azure_security_center_recommendations/zero_vulns.csv") as testfile:
+ with open("unittests/scans/azure_security_center_recommendations/zero_vulns.csv", encoding="utf-8") as testfile:
parser = AzureSecurityCenterRecommendationsParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_file_with_multiple_findings(self):
- with open("unittests/scans/azure_security_center_recommendations/many_vulns.csv") as testfile:
+ with open("unittests/scans/azure_security_center_recommendations/many_vulns.csv", encoding="utf-8") as testfile:
parser = AzureSecurityCenterRecommendationsParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_bandit_parser.py b/unittests/tools/test_bandit_parser.py
index a27d629e63e..6e51768ced9 100644
--- a/unittests/tools/test_bandit_parser.py
+++ b/unittests/tools/test_bandit_parser.py
@@ -9,13 +9,13 @@
class TestBanditParser(DojoTestCase):
def test_bandit_parser_has_no_finding(self):
- with open("unittests/scans/bandit/no_vuln.json") as testfile:
+ with open("unittests/scans/bandit/no_vuln.json", encoding="utf-8") as testfile:
parser = BanditParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_bandit_parser_has_one_finding(self):
- with open("unittests/scans/bandit/one_vuln.json") as testfile:
+ with open("unittests/scans/bandit/one_vuln.json", encoding="utf-8") as testfile:
parser = BanditParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -33,7 +33,7 @@ def test_bandit_parser_has_one_finding(self):
self.assertIn("https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html", item.references)
def test_bandit_parser_has_many_findings(self):
- with open("unittests/scans/bandit/many_vulns.json") as testfile:
+ with open("unittests/scans/bandit/many_vulns.json", encoding="utf-8") as testfile:
parser = BanditParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(214, len(findings))
@@ -48,7 +48,7 @@ def test_bandit_parser_has_many_findings(self):
self.assertIn("https://bandit.readthedocs.io/en/latest/plugins/b110_try_except_pass.html", item.references)
def test_bandit_parser_has_many_findings_recent(self):
- with open("unittests/scans/bandit/dd.json") as testfile:
+ with open("unittests/scans/bandit/dd.json", encoding="utf-8") as testfile:
parser = BanditParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(47, len(findings))
@@ -62,7 +62,7 @@ def test_bandit_parser_has_many_findings_recent(self):
self.assertEqual("Certain", item.get_scanner_confidence_text())
def test_bandit_parser_has_many_findings_recent2(self):
- with open("unittests/scans/bandit/dd2.json") as testfile:
+ with open("unittests/scans/bandit/dd2.json", encoding="utf-8") as testfile:
parser = BanditParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(165, len(findings))
diff --git a/unittests/tools/test_bearer_cli_parser.py b/unittests/tools/test_bearer_cli_parser.py
index d0fcdd26577..92a7b55098a 100644
--- a/unittests/tools/test_bearer_cli_parser.py
+++ b/unittests/tools/test_bearer_cli_parser.py
@@ -7,7 +7,7 @@
class TestBearerParser(TestCase):
def test_bearer_parser_with_one_vuln_has_one_findings(self):
- testfile = open("unittests/scans/bearer_cli/bearer_cli_one_vul.json")
+ testfile = open("unittests/scans/bearer_cli/bearer_cli_one_vul.json", encoding="utf-8")
parser = BearerCLIParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -21,7 +21,7 @@ def test_bearer_parser_with_one_vuln_has_one_findings(self):
self.assertEqual(581, findings[0].line)
def test_bearer_parser_with_many_vuln_has_many_findings(self):
- testfile = open("unittests/scans/bearer_cli/bearer_cli_many_vul.json")
+ testfile = open("unittests/scans/bearer_cli/bearer_cli_many_vul.json", encoding="utf-8")
parser = BearerCLIParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
diff --git a/unittests/tools/test_brakeman_parser.py b/unittests/tools/test_brakeman_parser.py
index 0c8e4a43c8c..185c7f22042 100644
--- a/unittests/tools/test_brakeman_parser.py
+++ b/unittests/tools/test_brakeman_parser.py
@@ -6,19 +6,19 @@
class TestBrakemanParser(DojoTestCase):
def test_parse_file_no_finding(self):
- with open("unittests/scans/brakeman/no_finding.json") as testfile:
+ with open("unittests/scans/brakeman/no_finding.json", encoding="utf-8") as testfile:
parser = BrakemanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_file_has_two_findings(self):
- with open("unittests/scans/brakeman/two_findings.json") as testfile:
+ with open("unittests/scans/brakeman/two_findings.json", encoding="utf-8") as testfile:
parser = BrakemanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(2, len(findings))
def test_parse_file_has_many_findings(self):
- with open("unittests/scans/brakeman/many_findings.json") as testfile:
+ with open("unittests/scans/brakeman/many_findings.json", encoding="utf-8") as testfile:
parser = BrakemanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(18, len(findings))
diff --git a/unittests/tools/test_bugcrowd_parser.py b/unittests/tools/test_bugcrowd_parser.py
index dc39110fb9d..5e66c9c6c7b 100644
--- a/unittests/tools/test_bugcrowd_parser.py
+++ b/unittests/tools/test_bugcrowd_parser.py
@@ -8,7 +8,7 @@
class TestBugCrowdParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/bugcrowd/BugCrowd-zero.csv") as testfile:
+ with open("unittests/scans/bugcrowd/BugCrowd-zero.csv", encoding="utf-8") as testfile:
parser = BugCrowdParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -17,7 +17,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self):
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln_has_one_findings(self):
- with open("unittests/scans/bugcrowd/BugCrowd-one.csv") as testfile:
+ with open("unittests/scans/bugcrowd/BugCrowd-one.csv", encoding="utf-8") as testfile:
parser = BugCrowdParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -27,7 +27,7 @@ def test_parse_file_with_one_vuln_has_one_findings(self):
self.assertEqual(findings[0].date, datetime(2020, 3, 1, 6, 15, 6, tzinfo=timezone.utc))
def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
- with open("unittests/scans/bugcrowd/BugCrowd-many.csv") as testfile:
+ with open("unittests/scans/bugcrowd/BugCrowd-many.csv", encoding="utf-8") as testfile:
parser = BugCrowdParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
diff --git a/unittests/tools/test_bundler_audit_parser.py b/unittests/tools/test_bundler_audit_parser.py
index b08834c38b0..c27c87a44f9 100644
--- a/unittests/tools/test_bundler_audit_parser.py
+++ b/unittests/tools/test_bundler_audit_parser.py
@@ -7,7 +7,7 @@
class TestBundlerAuditParser(DojoTestCase):
def test_get_findings(self):
- with open(path.join(path.dirname(__file__), "../scans/bundler_audit/bundler-audit_v0.6.1.txt")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/bundler_audit/bundler-audit_v0.6.1.txt"), encoding="utf-8") as testfile:
parser = BundlerAuditParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(2, len(findings))
@@ -29,7 +29,7 @@ def test_get_findings(self):
self.assertEqual("2.2.3", finding.component_version)
def test_get_findings_version9(self):
- with open(path.join(path.dirname(__file__), "../scans/bundler_audit/version_9.0.txt")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/bundler_audit/version_9.0.txt"), encoding="utf-8") as testfile:
parser = BundlerAuditParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(4, len(findings))
diff --git a/unittests/tools/test_burp_api_parser.py b/unittests/tools/test_burp_api_parser.py
index bbd36e634a3..af34ecac7af 100644
--- a/unittests/tools/test_burp_api_parser.py
+++ b/unittests/tools/test_burp_api_parser.py
@@ -7,7 +7,7 @@ class TestParser(DojoTestCase):
def test_example_report(self):
testfile = get_unit_tests_path() + "/scans/burp_api/example.json"
- with open(testfile) as f:
+ with open(testfile, encoding="utf-8") as f:
parser = BurpApiParser()
findings = parser.get_findings(f, Test())
for finding in findings:
@@ -25,7 +25,7 @@ def test_example_report(self):
def test_validate_more(self):
testfile = get_unit_tests_path() + "/scans/burp_api/many_vulns.json"
- with open(testfile) as f:
+ with open(testfile, encoding="utf-8") as f:
parser = BurpApiParser()
findings = parser.get_findings(f, Test())
for finding in findings:
@@ -62,7 +62,7 @@ def test_convert_confidence(self):
def test_fix_issue_9128(self):
testfile = get_unit_tests_path() + "/scans/burp_api/fix_issue_9128.json"
- with open(testfile) as f:
+ with open(testfile, encoding="utf-8") as f:
parser = BurpApiParser()
findings = parser.get_findings(f, Test())
for finding in findings:
diff --git a/unittests/tools/test_burp_dastardly_parser.py b/unittests/tools/test_burp_dastardly_parser.py
index 3c17bcb0914..2846895e985 100644
--- a/unittests/tools/test_burp_dastardly_parser.py
+++ b/unittests/tools/test_burp_dastardly_parser.py
@@ -8,7 +8,7 @@
class TestBurpParser(DojoTestCase):
def test_burp_dastardly_multiple_findings(self):
- with open(path.join(path.dirname(__file__), "../scans/burp_dastardly/many_findings.xml")) as test_file:
+ with open(path.join(path.dirname(__file__), "../scans/burp_dastardly/many_findings.xml"), encoding="utf-8") as test_file:
parser = BurpDastardlyParser()
findings = parser.get_findings(test_file, Test())
for finding in findings:
diff --git a/unittests/tools/test_burp_enterprise_parser.py b/unittests/tools/test_burp_enterprise_parser.py
index cbbdec18ac7..0d28dfe26f7 100644
--- a/unittests/tools/test_burp_enterprise_parser.py
+++ b/unittests/tools/test_burp_enterprise_parser.py
@@ -8,7 +8,7 @@
class TestBurpEnterpriseParser(DojoTestCase):
def test_burp_enterprise_with_multiple_vulns(self):
- with open(path.join(path.dirname(__file__), "../scans/burp_enterprise/many_vulns.html")) as test_file:
+ with open(path.join(path.dirname(__file__), "../scans/burp_enterprise/many_vulns.html"), encoding="utf-8") as test_file:
parser = BurpEnterpriseParser()
findings = parser.get_findings(test_file, Test())
for finding in findings:
diff --git a/unittests/tools/test_burp_graphql_parser.py b/unittests/tools/test_burp_graphql_parser.py
index 320f70e6a37..ea669d741f0 100644
--- a/unittests/tools/test_burp_graphql_parser.py
+++ b/unittests/tools/test_burp_graphql_parser.py
@@ -8,7 +8,7 @@
class TestBurpGraphQLParser(DojoTestCase):
def test_burp_one_finding(self):
- with open(path.join(path.dirname(__file__), "../scans/burp_graphql/one_finding.json")) as test_file:
+ with open(path.join(path.dirname(__file__), "../scans/burp_graphql/one_finding.json"), encoding="utf-8") as test_file:
parser = BurpGraphQLParser()
findings = parser.get_findings(test_file, Test())
for finding in findings:
@@ -33,7 +33,7 @@ def test_burp_one_finding(self):
self.assertIn("CWE-79", findings[0].references)
def test_burp_two_findings(self):
- with open(path.join(path.dirname(__file__), "../scans/burp_graphql/two_findings.json")) as test_file:
+ with open(path.join(path.dirname(__file__), "../scans/burp_graphql/two_findings.json"), encoding="utf-8") as test_file:
parser = BurpGraphQLParser()
findings = parser.get_findings(test_file, Test())
for finding in findings:
@@ -49,27 +49,27 @@ def test_burp_two_findings(self):
self.assertIn("description 3", findings[1].description)
def test_burp_no_findings(self):
- with open(path.join(path.dirname(__file__), "../scans/burp_graphql/no_findings.json")) as test_file:
+ with open(path.join(path.dirname(__file__), "../scans/burp_graphql/no_findings.json"), encoding="utf-8") as test_file:
parser = BurpGraphQLParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(0, len(findings))
def test_burp_null_title(self):
- with open(path.join(path.dirname(__file__), "../scans/burp_graphql/null_title.json")) as test_file:
+ with open(path.join(path.dirname(__file__), "../scans/burp_graphql/null_title.json"), encoding="utf-8") as test_file:
with self.assertRaises(ValueError):
parser = BurpGraphQLParser()
parser.get_findings(test_file, Test())
def test_burp_null_request_segments(self):
- with open(path.join(path.dirname(__file__), "../scans/burp_graphql/null_request_segments.json")) as test_file:
+ with open(path.join(path.dirname(__file__), "../scans/burp_graphql/null_request_segments.json"), encoding="utf-8") as test_file:
parser = BurpGraphQLParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(1, len(findings))
def test_burp_null_data(self):
- with open(path.join(path.dirname(__file__), "../scans/burp_graphql/null_data.json")) as test_file:
+ with open(path.join(path.dirname(__file__), "../scans/burp_graphql/null_data.json"), encoding="utf-8") as test_file:
parser = BurpGraphQLParser()
findings = parser.get_findings(test_file, Test())
for finding in findings:
diff --git a/unittests/tools/test_burp_parser.py b/unittests/tools/test_burp_parser.py
index 12aad2405a1..e8ba5c870f6 100644
--- a/unittests/tools/test_burp_parser.py
+++ b/unittests/tools/test_burp_parser.py
@@ -8,7 +8,7 @@
class TestBurpParser(DojoTestCase):
def test_burp_with_one_vuln_has_one_finding(self):
- with open(path.join(path.dirname(__file__), "../scans/burp/one_finding.xml")) as test_file:
+ with open(path.join(path.dirname(__file__), "../scans/burp/one_finding.xml"), encoding="utf-8") as test_file:
parser = BurpParser()
findings = parser.get_findings(test_file, Test())
for finding in findings:
@@ -20,7 +20,7 @@ def test_burp_with_one_vuln_has_one_finding(self):
self.assertEqual(3, len(findings[0].unsaved_endpoints))
def test_burp_with_multiple_vulns_has_multiple_findings(self):
- with open(path.join(path.dirname(__file__), "../scans/burp/seven_findings.xml")) as test_file:
+ with open(path.join(path.dirname(__file__), "../scans/burp/seven_findings.xml"), encoding="utf-8") as test_file:
parser = BurpParser()
findings = parser.get_findings(test_file, Test())
for finding in findings:
@@ -34,7 +34,7 @@ def test_burp_with_multiple_vulns_has_multiple_findings(self):
self.assertEqual("Frameable response (potential Clickjacking)", finding.title)
def test_burp_with_one_vuln_with_blank_response(self):
- with open(path.join(path.dirname(__file__), "../scans/burp/one_finding_with_blank_response.xml")) as test_file:
+ with open(path.join(path.dirname(__file__), "../scans/burp/one_finding_with_blank_response.xml"), encoding="utf-8") as test_file:
parser = BurpParser()
findings = parser.get_findings(test_file, Test())
for finding in findings:
@@ -50,7 +50,7 @@ def test_burp_with_one_vuln_with_blank_response(self):
self.assertEqual("High", findings[0].severity)
def test_burp_with_one_vuln_with_cwe(self):
- with open(path.join(path.dirname(__file__), "../scans/burp/one_finding_with_cwe.xml")) as test_file:
+ with open(path.join(path.dirname(__file__), "../scans/burp/one_finding_with_cwe.xml"), encoding="utf-8") as test_file:
parser = BurpParser()
findings = parser.get_findings(test_file, Test())
for finding in findings:
@@ -66,7 +66,7 @@ def test_burp_with_one_vuln_with_cwe(self):
self.assertEqual("Info", findings[0].severity)
def test_burp_issue4399(self):
- with open(path.join(path.dirname(__file__), "../scans/burp/issue4399.xml")) as test_file:
+ with open(path.join(path.dirname(__file__), "../scans/burp/issue4399.xml"), encoding="utf-8") as test_file:
parser = BurpParser()
findings = parser.get_findings(test_file, Test())
for finding in findings:
diff --git a/unittests/tools/test_cargo_audit_parser.py b/unittests/tools/test_cargo_audit_parser.py
index c1f3c622917..d8c4ac7fd22 100644
--- a/unittests/tools/test_cargo_audit_parser.py
+++ b/unittests/tools/test_cargo_audit_parser.py
@@ -6,13 +6,13 @@
class TestCargoAuditParser(DojoTestCase):
def test_parse_no_findings(self):
- with open("unittests/scans/cargo_audit/no_findings.json") as testfile:
+ with open("unittests/scans/cargo_audit/no_findings.json", encoding="utf-8") as testfile:
parser = CargoAuditParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_many_findings(self):
- with open("unittests/scans/cargo_audit/many_findings.json") as testfile:
+ with open("unittests/scans/cargo_audit/many_findings.json", encoding="utf-8") as testfile:
parser = CargoAuditParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(4, len(findings))
diff --git a/unittests/tools/test_checkmarx_one_parser.py b/unittests/tools/test_checkmarx_one_parser.py
index bd7842c1e8d..f2cde3169c0 100644
--- a/unittests/tools/test_checkmarx_one_parser.py
+++ b/unittests/tools/test_checkmarx_one_parser.py
@@ -11,7 +11,7 @@
class TestCheckmarxOneParser(DojoTestCase):
def test_checkmarx_one_many_vulns(self):
- with open("unittests/scans/checkmarx_one/checkmarx_one.json") as testfile:
+ with open("unittests/scans/checkmarx_one/checkmarx_one.json", encoding="utf-8") as testfile:
parser = CheckmarxOneParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -28,8 +28,14 @@ def test_checkmarx_one_many_vulns(self):
self.assertEqual("Medium", finding_test.severity)
self.assertEqual("/src/helpers/Constants.ts", finding_test.file_path)
+ def test_checkmarx_one_no_findings(self):
+ with open("unittests/scans/checkmarx_one/no_findings.json", encoding="utf-8") as testfile:
+ parser = CheckmarxOneParser()
+ findings = parser.get_findings(testfile, Test())
+ self.assertEqual(0, len(findings))
+
def test_checkmarx_one_many_findings(self):
- with open("unittests/scans/checkmarx_one/many_findings.json") as testfile:
+ with open("unittests/scans/checkmarx_one/many_findings.json", encoding="utf-8") as testfile:
parser = CheckmarxOneParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(6, len(findings))
@@ -45,14 +51,8 @@ def test_checkmarx_one_many_findings(self):
self.assertEqual("High", finding_test.severity)
self.assertEqual("/qe/testharness/Dockerfile", finding_test.file_path)
- def test_checkmarx_one_no_findings(self):
- with open("unittests/scans/checkmarx_one/no_findings.json") as testfile:
- parser = CheckmarxOneParser()
- findings = parser.get_findings(testfile, Test())
- self.assertEqual(0, len(findings))
-
- def test_checkmarx_one_new_format(self):
- with open("unittests/scans/checkmarx_one/api_export.json") as testfile:
+ def test_checkmarx_one_sca_10770(self):
+ with open("unittests/scans/checkmarx_one/checkmarx_one_sca_10770.json", encoding="utf-8") as testfile:
parser = CheckmarxOneParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(8, len(findings))
@@ -65,8 +65,8 @@ def test_checkmarx_one_new_format(self):
self.assertIsNotNone(finding.severity)
self.assertIsNotNone(finding.description)
finding_test = findings[0]
- self.assertEqual("Medium", finding_test.severity)
- self.assertEqual("/.github/workflows/checkmarx.yaml", finding_test.file_path)
+ self.assertEqual("High", finding_test.severity)
+ self.assertEqual(89, finding_test.cwe)
def test_checkmarx_vulnerabilities_from_scan_results(self):
def test_iac_finding(finding):
@@ -123,7 +123,7 @@ def test_sca_finding(finding):
# Not implemented yet
pass
- with open("unittests/scans/checkmarx_one/vulnerabilities_from_scan_results.json") as testfile:
+ with open("unittests/scans/checkmarx_one/vulnerabilities_from_scan_results.json", encoding="utf-8") as testfile:
parser = CheckmarxOneParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(146, len(findings))
diff --git a/unittests/tools/test_checkmarx_osa_parser.py b/unittests/tools/test_checkmarx_osa_parser.py
index 176ced0f33a..74592b51246 100644
--- a/unittests/tools/test_checkmarx_osa_parser.py
+++ b/unittests/tools/test_checkmarx_osa_parser.py
@@ -10,7 +10,7 @@ class TestCheckmarxOsaParser(DojoTestCase):
# maxDiff = None
def init(self, report_filename):
- my_file_handle = open(report_filename)
+ my_file_handle = open(report_filename, encoding="utf-8")
product = Product()
engagement = Engagement()
test = Test()
diff --git a/unittests/tools/test_checkmarx_parser.py b/unittests/tools/test_checkmarx_parser.py
index c6b4cc92606..322b28faa3b 100644
--- a/unittests/tools/test_checkmarx_parser.py
+++ b/unittests/tools/test_checkmarx_parser.py
@@ -12,7 +12,7 @@ class TestCheckmarxParser(DojoTestCase):
# maxDiff = None
def init(self, reportFilename):
- my_file_handle = open(reportFilename)
+ my_file_handle = open(reportFilename, encoding="utf-8")
product = Product()
engagement = Engagement()
test = Test()
diff --git a/unittests/tools/test_checkov_parser.py b/unittests/tools/test_checkov_parser.py
index f4379b0c0c6..9e4cd58cbdb 100644
--- a/unittests/tools/test_checkov_parser.py
+++ b/unittests/tools/test_checkov_parser.py
@@ -6,31 +6,31 @@
class TestCheckovParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/checkov/checkov-report-0-vuln.json") as testfile:
+ with open("unittests/scans/checkov/checkov-report-0-vuln.json", encoding="utf-8") as testfile:
parser = CheckovParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_file_with_no_vuln_has_no_findings_v2(self):
- with open("unittests/scans/checkov/checkov2-report-0-vuln.json") as testfile:
+ with open("unittests/scans/checkov/checkov2-report-0-vuln.json", encoding="utf-8") as testfile:
parser = CheckovParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln_has_one_finding(self):
- with open("unittests/scans/checkov/checkov-report-1-vuln.json") as testfile:
+ with open("unittests/scans/checkov/checkov-report-1-vuln.json", encoding="utf-8") as testfile:
parser = CheckovParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
- with open("unittests/scans/checkov/checkov-report-many-vuln.json") as testfile:
+ with open("unittests/scans/checkov/checkov-report-many-vuln.json", encoding="utf-8") as testfile:
parser = CheckovParser()
findings = parser.get_findings(testfile, Test())
self.assertGreater(len(findings), 2)
def test_parse_file_with_multiple_check_type_has_multiple_check_type(self):
- with open("unittests/scans/checkov/checkov-report-multiple-check_type.json") as testfile:
+ with open("unittests/scans/checkov/checkov-report-multiple-check_type.json", encoding="utf-8") as testfile:
parser = CheckovParser()
findings = parser.get_findings(testfile, Test())
@@ -80,7 +80,7 @@ def test_parse_file_with_multiple_check_type_has_multiple_check_type(self):
)
def test_parse_file_with_specified_severity(self):
- with open("unittests/scans/checkov/checkov-report-severity.json") as testfile:
+ with open("unittests/scans/checkov/checkov-report-severity.json", encoding="utf-8") as testfile:
parser = CheckovParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(4, len(findings))
diff --git a/unittests/tools/test_chefinspect_parser.py b/unittests/tools/test_chefinspect_parser.py
index d979b4b3137..65aa6262810 100644
--- a/unittests/tools/test_chefinspect_parser.py
+++ b/unittests/tools/test_chefinspect_parser.py
@@ -6,19 +6,19 @@
class TestChefInspectParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/chefinspect/no_finding.log") as testfile:
+ with open("unittests/scans/chefinspect/no_finding.log", encoding="utf-8") as testfile:
parser = ChefInspectParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln_has_one_finding(self):
- with open("unittests/scans/chefinspect/one_finding.log") as testfile:
+ with open("unittests/scans/chefinspect/one_finding.log", encoding="utf-8") as testfile:
parser = ChefInspectParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
- with open("unittests/scans/chefinspect/many_findings.log") as testfile:
+ with open("unittests/scans/chefinspect/many_findings.log", encoding="utf-8") as testfile:
parser = ChefInspectParser()
findings = parser.get_findings(testfile, Test())
- self.assertTrue(10, len(findings))
+ self.assertEqual(10, len(findings))
diff --git a/unittests/tools/test_clair_parser.py b/unittests/tools/test_clair_parser.py
index 6ecdbfcd59c..858215fab6f 100644
--- a/unittests/tools/test_clair_parser.py
+++ b/unittests/tools/test_clair_parser.py
@@ -5,21 +5,21 @@
class TestClairParser(DojoTestCase):
def test_no_findings_clair(self):
- my_file_handle = open("unittests/scans/clair/clair_empty.json")
+ my_file_handle = open("unittests/scans/clair/clair_empty.json", encoding="utf-8")
parser = ClairParser()
findings = parser.get_findings(my_file_handle, None)
my_file_handle.close()
self.assertEqual(0, len(findings))
def test_few_findings_clair(self):
- my_file_handle = open("unittests/scans/clair/clair_few_vuln.json")
+ my_file_handle = open("unittests/scans/clair/clair_few_vuln.json", encoding="utf-8")
parser = ClairParser()
findings = parser.get_findings(my_file_handle, None)
my_file_handle.close()
self.assertEqual(4, len(findings))
def test_many_findings_clair(self):
- my_file_handle = open("unittests/scans/clair/clair_many_vul.json")
+ my_file_handle = open("unittests/scans/clair/clair_many_vul.json", encoding="utf-8")
parser = ClairParser()
findings = parser.get_findings(my_file_handle, None)
my_file_handle.close()
@@ -32,21 +32,21 @@ def test_many_findings_clair(self):
self.assertEqual("CVE-2018-20839", finding.unsaved_vulnerability_ids[0])
def test_parse_no_content_no_findings_clairklar(self):
- my_file_handle = open("unittests/scans/clair/clairklar_empty.json")
+ my_file_handle = open("unittests/scans/clair/clairklar_empty.json", encoding="utf-8")
parser = ClairParser()
findings = parser.get_findings(my_file_handle, None)
my_file_handle.close()
self.assertEqual(0, len(findings))
def test_high_findings_clairklar(self):
- my_file_handle = open("unittests/scans/clair/clairklar_high.json")
+ my_file_handle = open("unittests/scans/clair/clairklar_high.json", encoding="utf-8")
parser = ClairParser()
findings = parser.get_findings(my_file_handle, None)
my_file_handle.close()
self.assertEqual(6, len(findings))
def test_mixed_findings_clairklar(self):
- my_file_handle = open("unittests/scans/clair/clairklar_mixed.json")
+ my_file_handle = open("unittests/scans/clair/clairklar_mixed.json", encoding="utf-8")
parser = ClairParser()
findings = parser.get_findings(my_file_handle, None)
my_file_handle.close()
diff --git a/unittests/tools/test_cloudsploit_parser.py b/unittests/tools/test_cloudsploit_parser.py
index 0e1564390c2..a919b03ff9e 100644
--- a/unittests/tools/test_cloudsploit_parser.py
+++ b/unittests/tools/test_cloudsploit_parser.py
@@ -6,21 +6,21 @@
class TestCloudsploitParser(DojoTestCase):
def test_cloudsploit_parser_with_no_vuln_has_no_findings(self):
- testfile = open("unittests/scans/cloudsploit/cloudsploit_zero_vul.json")
+ testfile = open("unittests/scans/cloudsploit/cloudsploit_zero_vul.json", encoding="utf-8")
parser = CloudsploitParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(0, len(findings))
def test_cloudsploit_parser_with_one_criticle_vuln_has_one_findings(self):
- testfile = open("unittests/scans/cloudsploit/cloudsploit_one_vul.json")
+ testfile = open("unittests/scans/cloudsploit/cloudsploit_one_vul.json", encoding="utf-8")
parser = CloudsploitParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(1, len(findings))
def test_cloudsploit_parser_with_many_vuln_has_many_findings(self):
- testfile = open("unittests/scans/cloudsploit/cloudsploit_many_vul.json")
+ testfile = open("unittests/scans/cloudsploit/cloudsploit_many_vul.json", encoding="utf-8")
parser = CloudsploitParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
diff --git a/unittests/tools/test_cobalt_parser.py b/unittests/tools/test_cobalt_parser.py
index f8f3f908155..ad93c3a83ed 100644
--- a/unittests/tools/test_cobalt_parser.py
+++ b/unittests/tools/test_cobalt_parser.py
@@ -7,19 +7,19 @@ class TestCobaltParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/cobalt/cobalt_no_vuln.csv") as testfile:
+ with open("unittests/scans/cobalt/cobalt_no_vuln.csv", encoding="utf-8") as testfile:
parser = CobaltParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln_has_one_findings(self):
- with open("unittests/scans/cobalt/cobalt_one_vuln.csv") as testfile:
+ with open("unittests/scans/cobalt/cobalt_one_vuln.csv", encoding="utf-8") as testfile:
parser = CobaltParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
- with open("unittests/scans/cobalt/cobalt_many_vuln.csv") as testfile:
+ with open("unittests/scans/cobalt/cobalt_many_vuln.csv", encoding="utf-8") as testfile:
parser = CobaltParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(9, len(findings))
diff --git a/unittests/tools/test_codechecker_parser.py b/unittests/tools/test_codechecker_parser.py
index 6053d38e675..4f81bca4570 100644
--- a/unittests/tools/test_codechecker_parser.py
+++ b/unittests/tools/test_codechecker_parser.py
@@ -7,7 +7,7 @@ class TestCodeCheckerParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
with open(
- get_unit_tests_path() + "/scans/codechecker/cc-report-0-vuln.json",
+ get_unit_tests_path() + "/scans/codechecker/cc-report-0-vuln.json", encoding="utf-8",
) as testfile:
parser = CodeCheckerParser()
findings = parser.get_findings(testfile, Test())
@@ -15,7 +15,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self):
def test_parse_file_with_one_vuln_has_one_finding(self):
with open(
- get_unit_tests_path() + "/scans/codechecker/cc-report-1-vuln.json",
+ get_unit_tests_path() + "/scans/codechecker/cc-report-1-vuln.json", encoding="utf-8",
) as testfile:
parser = CodeCheckerParser()
findings = parser.get_findings(testfile, Test())
@@ -33,7 +33,7 @@ def test_parse_file_with_one_vuln_has_one_finding(self):
def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
with open(
- get_unit_tests_path() + "/scans/codechecker/cc-report-many-vuln.json",
+ get_unit_tests_path() + "/scans/codechecker/cc-report-many-vuln.json", encoding="utf-8",
) as testfile:
parser = CodeCheckerParser()
findings = parser.get_findings(testfile, Test())
@@ -60,7 +60,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
def test_parse_file_with_various_review_statuses(self):
with open(
- get_unit_tests_path() + "/scans/codechecker/cc-report-review-status.json",
+ get_unit_tests_path() + "/scans/codechecker/cc-report-review-status.json", encoding="utf-8",
) as testfile:
parser = CodeCheckerParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_contrast_parser.py b/unittests/tools/test_contrast_parser.py
index 6b6f3ae85f0..b2f7a48863c 100644
--- a/unittests/tools/test_contrast_parser.py
+++ b/unittests/tools/test_contrast_parser.py
@@ -11,7 +11,7 @@ def test_example_report(self):
test = Test()
test.engagement = Engagement()
test.engagement.product = Product()
- with open("unittests/scans/contrast/contrast-node-goat.csv") as testfile:
+ with open("unittests/scans/contrast/contrast-node-goat.csv", encoding="utf-8") as testfile:
parser = ContrastParser()
findings = parser.get_findings(testfile, test)
for finding in findings:
@@ -56,7 +56,7 @@ def test_example2_report(self):
test = Test()
test.engagement = Engagement()
test.engagement.product = Product()
- with open("unittests/scans/contrast/vulnerabilities2020-09-21.csv") as testfile:
+ with open("unittests/scans/contrast/vulnerabilities2020-09-21.csv", encoding="utf-8") as testfile:
parser = ContrastParser()
findings = parser.get_findings(testfile, test)
for finding in findings:
diff --git a/unittests/tools/test_coverity_api_parser.py b/unittests/tools/test_coverity_api_parser.py
index fd1a2684204..f6f468cfa0c 100644
--- a/unittests/tools/test_coverity_api_parser.py
+++ b/unittests/tools/test_coverity_api_parser.py
@@ -8,25 +8,25 @@
class TestZapParser(DojoTestCase):
def test_parse_wrong_file(self):
with self.assertRaises(ValueError):
- with open("unittests/scans/coverity_api/wrong.json") as testfile:
+ with open("unittests/scans/coverity_api/wrong.json", encoding="utf-8") as testfile:
parser = CoverityApiParser()
parser.get_findings(testfile, Test())
def test_parse_no_findings(self):
- with open("unittests/scans/coverity_api/empty.json") as testfile:
+ with open("unittests/scans/coverity_api/empty.json", encoding="utf-8") as testfile:
parser = CoverityApiParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_only_quality(self):
"""This report only have quality findings"""
- with open("unittests/scans/coverity_api/only_quality.json") as testfile:
+ with open("unittests/scans/coverity_api/only_quality.json", encoding="utf-8") as testfile:
parser = CoverityApiParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_some_findings(self):
- with open("unittests/scans/coverity_api/few_findings.json") as testfile:
+ with open("unittests/scans/coverity_api/few_findings.json", encoding="utf-8") as testfile:
parser = CoverityApiParser()
findings = parser.get_findings(testfile, Test())
self.assertIsInstance(findings, list)
@@ -43,7 +43,7 @@ def test_parse_some_findings(self):
self.assertEqual(22463, finding.unique_id_from_tool)
def test_parse_few_findings_triaged_as_bug(self):
- with open("unittests/scans/coverity_api/few_findings_triaged_as_bug.json") as testfile:
+ with open("unittests/scans/coverity_api/few_findings_triaged_as_bug.json", encoding="utf-8") as testfile:
parser = CoverityApiParser()
findings = parser.get_findings(testfile, Test())
self.assertIsInstance(findings, list)
@@ -60,7 +60,7 @@ def test_parse_few_findings_triaged_as_bug(self):
self.assertEqual(22248, finding.unique_id_from_tool)
def test_parse_some_findings_mitigated(self):
- with open("unittests/scans/coverity_api/few_findings_mitigated.json") as testfile:
+ with open("unittests/scans/coverity_api/few_findings_mitigated.json", encoding="utf-8") as testfile:
parser = CoverityApiParser()
findings = parser.get_findings(testfile, Test())
self.assertIsInstance(findings, list)
diff --git a/unittests/tools/test_coverity_scan_parser.py b/unittests/tools/test_coverity_scan_parser.py
index e8f07afdccb..c3720884e87 100644
--- a/unittests/tools/test_coverity_scan_parser.py
+++ b/unittests/tools/test_coverity_scan_parser.py
@@ -7,14 +7,14 @@
class TestCoverityScanParser(DojoTestCase):
def test_parse_no_findings(self):
- with open(f"{SCANS_PATH}/no_vuln.json") as testfile:
+ with open(f"{SCANS_PATH}/no_vuln.json", encoding="utf-8") as testfile:
parser = CoverityScanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_one_finding(self):
- with open(f"{SCANS_PATH}/one_vuln.json") as testfile:
+ with open(f"{SCANS_PATH}/one_vuln.json", encoding="utf-8") as testfile:
parser = CoverityScanParser()
findings = parser.get_findings(testfile, Test())
@@ -31,7 +31,7 @@ def test_parse_one_finding(self):
)
def test_parse_many_findings(self):
- with open(f"{SCANS_PATH}/many_vulns.json") as testfile:
+ with open(f"{SCANS_PATH}/many_vulns.json", encoding="utf-8") as testfile:
parser = CoverityScanParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_crashtest_security_parser.py b/unittests/tools/test_crashtest_security_parser.py
index 5201d7c2919..88aa859cad6 100644
--- a/unittests/tools/test_crashtest_security_parser.py
+++ b/unittests/tools/test_crashtest_security_parser.py
@@ -5,14 +5,14 @@
class TestCrashtestSecurityParser(DojoTestCase):
def test_crashtest_security_json_parser_empty_file_has_no_findings(self):
- testfile = open("unittests/scans/crashtest_security/empty.json")
+ testfile = open("unittests/scans/crashtest_security/empty.json", encoding="utf-8")
parser = CrashtestSecurityParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(0, len(findings))
def test_crashtest_security_json_parser_full_file_has_many_findings(self):
- testfile = open("unittests/scans/crashtest_security/full.json")
+ testfile = open("unittests/scans/crashtest_security/full.json", encoding="utf-8")
parser = CrashtestSecurityParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -25,7 +25,7 @@ def test_crashtest_security_json_parser_full_file_has_many_findings(self):
def test_crashtest_security_json_parser_extracted_data_file_has_many_findings(self):
testfile = open(
- get_unit_tests_path() + "/scans/crashtest_security/data_extracted.json",
+ get_unit_tests_path() + "/scans/crashtest_security/data_extracted.json", encoding="utf-8",
)
parser = CrashtestSecurityParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_cred_scan_parser.py b/unittests/tools/test_cred_scan_parser.py
index af49941f4db..a913e591ed7 100644
--- a/unittests/tools/test_cred_scan_parser.py
+++ b/unittests/tools/test_cred_scan_parser.py
@@ -8,13 +8,13 @@
class TestCredScanParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/cred_scan/cred_scan_no_vuln.csv") as testfile:
+ with open("unittests/scans/cred_scan/cred_scan_no_vuln.csv", encoding="utf-8") as testfile:
parser = CredScanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln_has_one_findings(self):
- with open("unittests/scans/cred_scan/cred_scan_one_vuln.csv") as testfile:
+ with open("unittests/scans/cred_scan/cred_scan_one_vuln.csv", encoding="utf-8") as testfile:
parser = CredScanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -25,7 +25,7 @@ def test_parse_file_with_one_vuln_has_one_findings(self):
self.assertEqual(datetime.date(2021, 4, 10), datetime.datetime.date(finding.date))
def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
- with open("unittests/scans/cred_scan/cred_scan_many_vuln.csv") as testfile:
+ with open("unittests/scans/cred_scan/cred_scan_many_vuln.csv", encoding="utf-8") as testfile:
parser = CredScanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(3, len(findings))
diff --git a/unittests/tools/test_crunch42_parser.py b/unittests/tools/test_crunch42_parser.py
index 57cab2f32ea..79565e95023 100644
--- a/unittests/tools/test_crunch42_parser.py
+++ b/unittests/tools/test_crunch42_parser.py
@@ -6,7 +6,7 @@
class TestCrunch42Parser(DojoTestCase):
def test_crunch42parser_single_has_many_findings(self):
- with open("unittests/scans/crunch42/crunch42_many_findings.json") as testfile:
+ with open("unittests/scans/crunch42/crunch42_many_findings.json", encoding="utf-8") as testfile:
parser = Crunch42Parser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(8, len(findings))
@@ -18,7 +18,7 @@ def test_crunch42parser_single_has_many_findings(self):
self.assertGreater(len(finding.description), 0)
def test_crunch42parser_single_has_many_findings2(self):
- with open("unittests/scans/crunch42/crunch42_many_findings2.json") as testfile:
+ with open("unittests/scans/crunch42/crunch42_many_findings2.json", encoding="utf-8") as testfile:
parser = Crunch42Parser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(5, len(findings))
diff --git a/unittests/tools/test_cyclonedx_parser.py b/unittests/tools/test_cyclonedx_parser.py
index 4a548f67020..08233e00220 100644
--- a/unittests/tools/test_cyclonedx_parser.py
+++ b/unittests/tools/test_cyclonedx_parser.py
@@ -7,7 +7,7 @@
class TestCyclonedxParser(DojoTestCase):
def test_grype_report(self):
- with open("unittests/scans/cyclonedx/grype_dd_1_14_1.xml") as file:
+ with open("unittests/scans/cyclonedx/grype_dd_1_14_1.xml", encoding="utf-8") as file:
parser = CycloneDXParser()
findings = list(parser.get_findings(file, Test()))
for finding in findings:
@@ -31,7 +31,7 @@ def test_grype_report(self):
def test_spec1_report(self):
"""Test a report from the spec itself"""
- with open("unittests/scans/cyclonedx/spec1.xml") as file:
+ with open("unittests/scans/cyclonedx/spec1.xml", encoding="utf-8") as file:
parser = CycloneDXParser()
findings = list(parser.get_findings(file, Test()))
for finding in findings:
@@ -54,7 +54,7 @@ def test_spec1_report(self):
def test_spec1_report_low_first(self):
"""Test a report from the spec itself"""
- with open("unittests/scans/cyclonedx/spec1_lowfirst.xml") as file:
+ with open("unittests/scans/cyclonedx/spec1_lowfirst.xml", encoding="utf-8") as file:
parser = CycloneDXParser()
findings = list(parser.get_findings(file, Test()))
for finding in findings:
@@ -74,7 +74,7 @@ def test_spec1_report_low_first(self):
self.assertEqual("Upgrade\n", finding.mitigation)
def test_cyclonedx_bom_report(self):
- with open("unittests/scans/cyclonedx/cyclonedx_bom.xml") as file:
+ with open("unittests/scans/cyclonedx/cyclonedx_bom.xml", encoding="utf-8") as file:
parser = CycloneDXParser()
findings = parser.get_findings(file, Test())
for finding in findings:
@@ -83,7 +83,7 @@ def test_cyclonedx_bom_report(self):
def test_cyclonedx_jake_report(self):
"""Test a report generated by Jake"""
- with open("unittests/scans/cyclonedx/jake.xml") as file:
+ with open("unittests/scans/cyclonedx/jake.xml", encoding="utf-8") as file:
parser = CycloneDXParser()
findings = parser.get_findings(file, Test())
for finding in findings:
@@ -92,7 +92,7 @@ def test_cyclonedx_jake_report(self):
def test_cyclonedx_retirejs_report(self):
"""Test a report generated by RetireJS"""
- with open("unittests/scans/cyclonedx/retirejs.latest.xml") as file:
+ with open("unittests/scans/cyclonedx/retirejs.latest.xml", encoding="utf-8") as file:
parser = CycloneDXParser()
findings = parser.get_findings(file, Test())
for finding in findings:
@@ -101,7 +101,7 @@ def test_cyclonedx_retirejs_report(self):
def test_cyclonedx_grype_11_report(self):
"""Test a report generated by Grype 0.11"""
- with open("unittests/scans/cyclonedx/dd_1_15_0.xml") as file:
+ with open("unittests/scans/cyclonedx/dd_1_15_0.xml", encoding="utf-8") as file:
parser = CycloneDXParser()
findings = parser.get_findings(file, Test())
for finding in findings:
@@ -142,7 +142,7 @@ def test_cyclonedx_grype_11_report(self):
def test_cyclonedx_1_4_xml(self):
"""CycloneDX version 1.4 XML format"""
- with open("unittests/scans/cyclonedx/valid-vulnerability-1.4.xml") as file:
+ with open("unittests/scans/cyclonedx/valid-vulnerability-1.4.xml", encoding="utf-8") as file:
parser = CycloneDXParser()
findings = parser.get_findings(file, Test())
for finding in findings:
@@ -188,7 +188,7 @@ def test_cyclonedx_1_4_xml(self):
def test_cyclonedx_1_4_json(self):
"""CycloneDX version 1.4 JSON format"""
- with open("unittests/scans/cyclonedx/valid-vulnerability-1.4.json") as file:
+ with open("unittests/scans/cyclonedx/valid-vulnerability-1.4.json", encoding="utf-8") as file:
parser = CycloneDXParser()
findings = parser.get_findings(file, Test())
for finding in findings:
@@ -232,7 +232,7 @@ def test_cyclonedx_1_4_json(self):
def test_cyclonedx_1_4_jake_json(self):
"""CycloneDX version 1.4 JSON format produced by jake 1.4.1"""
- with open("unittests/scans/cyclonedx/jake2.json") as file:
+ with open("unittests/scans/cyclonedx/jake2.json", encoding="utf-8") as file:
parser = CycloneDXParser()
findings = parser.get_findings(file, Test())
self.assertEqual(7, len(findings))
@@ -286,7 +286,7 @@ def test_cyclonedx_1_4_jake_json(self):
def test_cyclonedx_1_4_xml_cvssv31(self):
"""CycloneDX version 1.4 XML format"""
- with open("unittests/scans/cyclonedx/log4j.xml") as file:
+ with open("unittests/scans/cyclonedx/log4j.xml", encoding="utf-8") as file:
parser = CycloneDXParser()
findings = parser.get_findings(file, Test())
for finding in findings:
@@ -303,7 +303,7 @@ def test_cyclonedx_1_4_xml_cvssv31(self):
def test_cyclonedx_1_4_json_cvssv31(self):
"""CycloneDX version 1.4 JSON format"""
- with open("unittests/scans/cyclonedx/log4j.json") as file:
+ with open("unittests/scans/cyclonedx/log4j.json", encoding="utf-8") as file:
parser = CycloneDXParser()
findings = parser.get_findings(file, Test())
for finding in findings:
@@ -320,7 +320,7 @@ def test_cyclonedx_1_4_json_cvssv31(self):
def test_cyclonedx_1_4_json_nested_cvssv31(self):
"""CycloneDX version 1.4 JSON format"""
- with open("unittests/scans/cyclonedx/nested-component-log4j.json") as file:
+ with open("unittests/scans/cyclonedx/nested-component-log4j.json", encoding="utf-8") as file:
parser = CycloneDXParser()
findings = parser.get_findings(file, Test())
for finding in findings:
@@ -337,7 +337,7 @@ def test_cyclonedx_1_4_json_nested_cvssv31(self):
def test_cyclonedx_issue_9277(self):
"""CycloneDX version 1.5 JSON format"""
- with open("unittests/scans/cyclonedx/issue_9277.json") as file:
+ with open("unittests/scans/cyclonedx/issue_9277.json", encoding="utf-8") as file:
parser = CycloneDXParser()
findings = parser.get_findings(file, Test())
for finding in findings:
@@ -350,7 +350,7 @@ def test_cyclonedx_issue_9277(self):
def test_cyclonedx_issue_8022(self):
"""CycloneDX version 1.4 JSON format"""
- with open("unittests/scans/cyclonedx/issue_8022.json") as file:
+ with open("unittests/scans/cyclonedx/issue_8022.json", encoding="utf-8") as file:
parser = CycloneDXParser()
findings = parser.get_findings(file, Test())
for finding in findings:
diff --git a/unittests/tools/test_dawnscanner_parser.py b/unittests/tools/test_dawnscanner_parser.py
index 62ccc11c443..73541ee45c6 100644
--- a/unittests/tools/test_dawnscanner_parser.py
+++ b/unittests/tools/test_dawnscanner_parser.py
@@ -8,7 +8,7 @@
class TestDawnScannerParser(DojoTestCase):
def test_burp_with_one_vuln_has_one_finding(self):
- with open(path.join(path.dirname(__file__), "../scans/dawnscanner/dawnscanner_v1.6.9.json")) as test_file:
+ with open(path.join(path.dirname(__file__), "../scans/dawnscanner/dawnscanner_v1.6.9.json"), encoding="utf-8") as test_file:
parser = DawnScannerParser()
findings = parser.get_findings(test_file, Test())
for finding in findings:
diff --git a/unittests/tools/test_dependency_check_parser.py b/unittests/tools/test_dependency_check_parser.py
index bd6f9cf4028..8f39cc6f707 100644
--- a/unittests/tools/test_dependency_check_parser.py
+++ b/unittests/tools/test_dependency_check_parser.py
@@ -22,13 +22,13 @@ def __init__(self, name, content):
class TestDependencyCheckParser(DojoTestCase):
def test_parse_empty_file(self):
- with open("unittests/scans/dependency_check/single_dependency_with_related_no_vulnerability.xml") as testfile:
+ with open("unittests/scans/dependency_check/single_dependency_with_related_no_vulnerability.xml", encoding="utf-8") as testfile:
parser = DependencyCheckParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_file_with_single_vulnerability_has_single_finding(self):
- with open("unittests/scans/dependency_check/single_vuln.xml") as testfile:
+ with open("unittests/scans/dependency_check/single_vuln.xml", encoding="utf-8") as testfile:
parser = DependencyCheckParser()
findings = parser.get_findings(testfile, Test())
items = findings
@@ -46,14 +46,14 @@ def test_parse_file_with_single_vulnerability_has_single_finding(self):
self.assertEqual(items[i].date, datetime(2016, 11, 5, 14, 52, 15, 748000, tzinfo=tzoffset(None, -14400)))
def test_parse_file_with_single_dependency_with_related_no_vulnerability(self):
- with open("unittests/scans/dependency_check/single_dependency_with_related_no_vulnerability.xml") as testfile:
+ with open("unittests/scans/dependency_check/single_dependency_with_related_no_vulnerability.xml", encoding="utf-8") as testfile:
parser = DependencyCheckParser()
findings = parser.get_findings(testfile, Test())
items = findings
self.assertEqual(0, len(items))
def test_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self):
- with open("unittests/scans/dependency_check/multiple_vulnerabilities_has_multiple_findings.xml") as testfile:
+ with open("unittests/scans/dependency_check/multiple_vulnerabilities_has_multiple_findings.xml", encoding="utf-8") as testfile:
parser = DependencyCheckParser()
findings = parser.get_findings(testfile, Test())
items = findings
@@ -255,7 +255,7 @@ def test_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self):
def test_parse_java_6_5_3(self):
"""Test with version 6.5.3"""
- with open(path.join(path.dirname(__file__), "../scans/dependency_check/version-6.5.3.xml")) as test_file:
+ with open(path.join(path.dirname(__file__), "../scans/dependency_check/version-6.5.3.xml"), encoding="utf-8") as test_file:
parser = DependencyCheckParser()
findings = parser.get_findings(test_file, Test())
items = findings
@@ -274,7 +274,7 @@ def test_parse_java_6_5_3(self):
self.assertEqual(items[i].date, datetime(2022, 1, 15, 14, 31, 13, 42600, tzinfo=timezone.utc))
def test_parse_file_pr6439(self):
- with open("unittests/scans/dependency_check/PR6439.xml") as testfile:
+ with open("unittests/scans/dependency_check/PR6439.xml", encoding="utf-8") as testfile:
parser = DependencyCheckParser()
findings = parser.get_findings(testfile, Test())
items = findings
diff --git a/unittests/tools/test_dependency_track_parser.py b/unittests/tools/test_dependency_track_parser.py
index 783d68441f9..60db4cedc35 100644
--- a/unittests/tools/test_dependency_track_parser.py
+++ b/unittests/tools/test_dependency_track_parser.py
@@ -7,7 +7,7 @@ class TestDependencyTrackParser(DojoTestCase):
def test_dependency_track_parser_with_empty_list_for_findings_key_has_no_findings(self):
with open(
- get_unit_tests_path() + "/scans/dependency_track/no_findings_because_findings_key_is_empty_list.json",
+ get_unit_tests_path() + "/scans/dependency_track/no_findings_because_findings_key_is_empty_list.json", encoding="utf-8",
) as testfile:
parser = DependencyTrackParser()
findings = parser.get_findings(testfile, Test())
@@ -15,7 +15,7 @@ def test_dependency_track_parser_with_empty_list_for_findings_key_has_no_finding
def test_dependency_track_parser_with_missing_findings_key_has_no_findings(self):
with open(
- get_unit_tests_path() + "/scans/dependency_track/no_findings_because_findings_key_is_missing.json",
+ get_unit_tests_path() + "/scans/dependency_track/no_findings_because_findings_key_is_missing.json", encoding="utf-8",
) as testfile:
parser = DependencyTrackParser()
findings = parser.get_findings(testfile, Test())
@@ -23,7 +23,7 @@ def test_dependency_track_parser_with_missing_findings_key_has_no_findings(self)
def test_dependency_track_parser_with_null_findings_key_has_no_findings(self):
with open(
- get_unit_tests_path() + "/scans/dependency_track/no_findings_because_findings_key_is_null.json",
+ get_unit_tests_path() + "/scans/dependency_track/no_findings_because_findings_key_is_null.json", encoding="utf-8",
) as testfile:
parser = DependencyTrackParser()
findings = parser.get_findings(testfile, Test())
@@ -31,7 +31,7 @@ def test_dependency_track_parser_with_null_findings_key_has_no_findings(self):
def test_dependency_track_parser_has_many_findings(self):
with open(
- get_unit_tests_path() + "/scans/dependency_track/many_findings.json",
+ get_unit_tests_path() + "/scans/dependency_track/many_findings.json", encoding="utf-8",
) as testfile:
parser = DependencyTrackParser()
findings = parser.get_findings(testfile, Test())
@@ -49,7 +49,7 @@ def test_dependency_track_parser_has_many_findings(self):
def test_dependency_track_parser_has_one_finding(self):
with open(
- get_unit_tests_path() + "/scans/dependency_track/one_finding.json",
+ get_unit_tests_path() + "/scans/dependency_track/one_finding.json", encoding="utf-8",
) as testfile:
parser = DependencyTrackParser()
findings = parser.get_findings(testfile, Test())
@@ -57,7 +57,7 @@ def test_dependency_track_parser_has_one_finding(self):
def test_dependency_track_parser_v3_8_0(self):
with open(
- get_unit_tests_path() + "/scans/dependency_track/dependency_track_3.8.0_2021-01-18.json",
+ get_unit_tests_path() + "/scans/dependency_track/dependency_track_3.8.0_2021-01-18.json", encoding="utf-8",
) as testfile:
parser = DependencyTrackParser()
findings = parser.get_findings(testfile, Test())
@@ -67,7 +67,7 @@ def test_dependency_track_parser_v3_8_0(self):
def test_dependency_track_parser_findings_with_alias(self):
with open(
- get_unit_tests_path() + "/scans/dependency_track/many_findings_with_alias.json",
+ get_unit_tests_path() + "/scans/dependency_track/many_findings_with_alias.json", encoding="utf-8",
) as testfile:
parser = DependencyTrackParser()
findings = parser.get_findings(testfile, Test())
@@ -79,7 +79,7 @@ def test_dependency_track_parser_findings_with_alias(self):
def test_dependency_track_parser_findings_with_empty_alias(self):
with open(
- get_unit_tests_path() + "/scans/dependency_track/many_findings_with_empty_alias.json",
+ get_unit_tests_path() + "/scans/dependency_track/many_findings_with_empty_alias.json", encoding="utf-8",
) as testfile:
parser = DependencyTrackParser()
findings = parser.get_findings(testfile, Test())
@@ -88,7 +88,7 @@ def test_dependency_track_parser_findings_with_empty_alias(self):
self.assertIn("CVE-2022-2053", findings[11].unsaved_vulnerability_ids)
def test_dependency_track_parser_findings_with_cvssV3_score(self):
- with open(f"{get_unit_tests_path()}/scans/dependency_track/many_findings_with_cvssV3_score.json") as testfile:
+ with open(f"{get_unit_tests_path()}/scans/dependency_track/many_findings_with_cvssV3_score.json", encoding="utf-8") as testfile:
parser = DependencyTrackParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(12, len(findings))
@@ -98,7 +98,7 @@ def test_dependency_track_parser_findings_with_cvssV3_score(self):
self.assertEqual(8.3, findings[0].cvssv3_score)
def test_dependency_track_parser_findings_with_epss_score(self):
- with open(f"{get_unit_tests_path()}/scans/dependency_track/dependency_track_4.10_2024_02_11.json") as testfile:
+ with open(f"{get_unit_tests_path()}/scans/dependency_track/dependency_track_4.10_2024_02_11.json", encoding="utf-8") as testfile:
parser = DependencyTrackParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
diff --git a/unittests/tools/test_detect_secrets_parser.py b/unittests/tools/test_detect_secrets_parser.py
index d403cf7af71..421d393f845 100644
--- a/unittests/tools/test_detect_secrets_parser.py
+++ b/unittests/tools/test_detect_secrets_parser.py
@@ -10,13 +10,13 @@
class TestDetectSecretsParser(DojoTestCase):
def test_parse_no_findings(self):
- with open("unittests/scans/detect_secrets/no_findings.json") as testfile:
+ with open("unittests/scans/detect_secrets/no_findings.json", encoding="utf-8") as testfile:
parser = DetectSecretsParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_many_findings(self):
- with open("unittests/scans/detect_secrets/many_findings.json") as testfile:
+ with open("unittests/scans/detect_secrets/many_findings.json", encoding="utf-8") as testfile:
parser = DetectSecretsParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(4, len(findings))
diff --git a/unittests/tools/test_dockerbench_parser.py b/unittests/tools/test_dockerbench_parser.py
index 820972c0769..22c0d66f28d 100644
--- a/unittests/tools/test_dockerbench_parser.py
+++ b/unittests/tools/test_dockerbench_parser.py
@@ -7,7 +7,7 @@ class TestDockerBenchParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
with open(
- get_unit_tests_path() + "/scans/dockerbench/docker-bench-report-zero-vulns.json",
+ get_unit_tests_path() + "/scans/dockerbench/docker-bench-report-zero-vulns.json", encoding="utf-8",
) as testfile:
parser = DockerBenchParser()
findings = parser.get_findings(testfile, Test())
@@ -15,7 +15,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self):
def test_parse_file_with_one_vuln_has_one_finding(self):
with open(
- get_unit_tests_path() + "/scans/dockerbench/docker-bench-report-single-vuln.json",
+ get_unit_tests_path() + "/scans/dockerbench/docker-bench-report-single-vuln.json", encoding="utf-8",
) as testfile:
parser = DockerBenchParser()
findings = parser.get_findings(testfile, Test())
@@ -29,7 +29,7 @@ def test_parse_file_with_one_vuln_has_one_finding(self):
def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
with open(
- get_unit_tests_path() + "/scans/dockerbench/docker-bench-report-many-vulns.json",
+ get_unit_tests_path() + "/scans/dockerbench/docker-bench-report-many-vulns.json", encoding="utf-8",
) as testfile:
parser = DockerBenchParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_dockle_parser.py b/unittests/tools/test_dockle_parser.py
index 1ecd8b2a7be..314112299c2 100644
--- a/unittests/tools/test_dockle_parser.py
+++ b/unittests/tools/test_dockle_parser.py
@@ -6,13 +6,13 @@
class TestDockleParser(DojoTestCase):
def test_parse_no_findings(self):
- with open("unittests/scans/dockle/no_findings.json") as testfile:
+ with open("unittests/scans/dockle/no_findings.json", encoding="utf-8") as testfile:
parser = DockleParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_many_findings(self):
- with open("unittests/scans/dockle/many_findings.json") as testfile:
+ with open("unittests/scans/dockle/many_findings.json", encoding="utf-8") as testfile:
parser = DockleParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(3, len(findings))
diff --git a/unittests/tools/test_drheader_parser.py b/unittests/tools/test_drheader_parser.py
index ccda65e2864..9eb07664bfd 100644
--- a/unittests/tools/test_drheader_parser.py
+++ b/unittests/tools/test_drheader_parser.py
@@ -6,35 +6,35 @@
class TestDrHeaderParser(DojoTestCase):
def test_parse_file_has_no_findings(self):
- testfile = open("unittests/scans/drheader/no_vulns.json")
+ testfile = open("unittests/scans/drheader/no_vulns.json", encoding="utf-8")
parser = DrHeaderParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(0, len(findings))
def test_parse_file_has_many_finding_one_tool(self):
- testfile = open("unittests/scans/drheader/scan.json")
+ testfile = open("unittests/scans/drheader/scan.json", encoding="utf-8")
parser = DrHeaderParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(6, len(findings))
def test_parse_file_has_many_finding_one_tool2(self):
- testfile = open("unittests/scans/drheader/scan2.json")
+ testfile = open("unittests/scans/drheader/scan2.json", encoding="utf-8")
parser = DrHeaderParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(6, len(findings))
def test_parse_file_has_many_finding_one_tool3(self):
- testfile = open("unittests/scans/drheader/scan3.json")
+ testfile = open("unittests/scans/drheader/scan3.json", encoding="utf-8")
parser = DrHeaderParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(11, len(findings))
def test_parse_file_has_many_finding_multiple_urls(self):
- testfile = open("unittests/scans/drheader/multiple_urls.json")
+ testfile = open("unittests/scans/drheader/multiple_urls.json", encoding="utf-8")
parser = DrHeaderParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
diff --git a/unittests/tools/test_eslint_parser.py b/unittests/tools/test_eslint_parser.py
index 02668608909..c1574c574e4 100644
--- a/unittests/tools/test_eslint_parser.py
+++ b/unittests/tools/test_eslint_parser.py
@@ -5,21 +5,21 @@
class TestESLintParser(DojoTestCase):
def test_parse_file_has_two_findings(self):
- testfile = open("unittests/scans/eslint/scan.json")
+ testfile = open("unittests/scans/eslint/scan.json", encoding="utf-8")
parser = ESLintParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(2, len(findings))
def test_parse_empty_file(self):
- testfile = open("unittests/scans/eslint/empty.json")
+ testfile = open("unittests/scans/eslint/empty.json", encoding="utf-8")
parser = ESLintParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(0, len(findings))
def test_parse_file_with_no_finding(self):
- testfile = open("unittests/scans/eslint/no_finding.json")
+ testfile = open("unittests/scans/eslint/no_finding.json", encoding="utf-8")
parser = ESLintParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
diff --git a/unittests/tools/test_fortify_parser.py b/unittests/tools/test_fortify_parser.py
index cba9a984a9b..43ab4a911ec 100644
--- a/unittests/tools/test_fortify_parser.py
+++ b/unittests/tools/test_fortify_parser.py
@@ -5,7 +5,7 @@
class TestFortifyParser(DojoTestCase):
def test_fortify_many_findings(self):
- with open("unittests/scans/fortify/fortify_many_findings.xml") as testfile:
+ with open("unittests/scans/fortify/fortify_many_findings.xml", encoding="utf-8") as testfile:
parser = FortifyParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(324, len(findings))
@@ -17,7 +17,7 @@ def test_fortify_many_findings(self):
self.assertEqual(81, finding.line)
def test_fortify_few_findings(self):
- with open("unittests/scans/fortify/fortify_few_findings.xml") as testfile:
+ with open("unittests/scans/fortify/fortify_few_findings.xml", encoding="utf-8") as testfile:
parser = FortifyParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(2, len(findings))
@@ -30,7 +30,7 @@ def test_fortify_few_findings(self):
self.assertEqual("53C25D2FC6950554F16D3CEF9E41EF6F", finding.unique_id_from_tool)
def test_fortify_few_findings_count_chart(self):
- with open("unittests/scans/fortify/fortify_few_findings_count_chart.xml") as testfile:
+ with open("unittests/scans/fortify/fortify_few_findings_count_chart.xml", encoding="utf-8") as testfile:
parser = FortifyParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(3, len(findings))
@@ -43,7 +43,7 @@ def test_fortify_few_findings_count_chart(self):
self.assertEqual("53C25D2FC6950554F16D3CEF9E41EF6F", finding.unique_id_from_tool)
def test_fortify_issue6260(self):
- with open("unittests/scans/fortify/issue6260.xml") as testfile:
+ with open("unittests/scans/fortify/issue6260.xml", encoding="utf-8") as testfile:
parser = FortifyParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(16, len(findings))
@@ -56,7 +56,7 @@ def test_fortify_issue6260(self):
self.assertEqual("7A2F1C728BDDBB17C7CB31CEDF5D8F85", finding.unique_id_from_tool)
def test_fortify_issue6082(self):
- with open("unittests/scans/fortify/issue6082.xml") as testfile:
+ with open("unittests/scans/fortify/issue6082.xml", encoding="utf-8") as testfile:
parser = FortifyParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(2, len(findings))
@@ -76,7 +76,7 @@ def test_fortify_issue6082(self):
self.assertEqual("B5B15F27E10F4D7799BD0ED1E6D34C5D", finding.unique_id_from_tool)
def test_fortify_many_fdr_findings(self):
- with open("unittests/scans/fortify/many_findings.fpr") as testfile:
+ with open("unittests/scans/fortify/many_findings.fpr", encoding="utf-8") as testfile:
parser = FortifyParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(61, len(findings))
diff --git a/unittests/tools/test_gcloud_artifact_scan_parser.py b/unittests/tools/test_gcloud_artifact_scan_parser.py
index c581d9f8f6d..88a2b0ec099 100644
--- a/unittests/tools/test_gcloud_artifact_scan_parser.py
+++ b/unittests/tools/test_gcloud_artifact_scan_parser.py
@@ -5,7 +5,7 @@
class TestGCloudArtifactScanParser(DojoTestCase):
def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
- with open(f"{get_unit_tests_path()}/scans/gcloud_artifact_scan/many_vulns.json") as testfile:
+ with open(f"{get_unit_tests_path()}/scans/gcloud_artifact_scan/many_vulns.json", encoding="utf-8") as testfile:
parser = GCloudArtifactScanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(7, len(findings))
diff --git a/unittests/tools/test_generic_parser.py b/unittests/tools/test_generic_parser.py
index 8684efe4b53..bbd8d6e7ed2 100644
--- a/unittests/tools/test_generic_parser.py
+++ b/unittests/tools/test_generic_parser.py
@@ -25,7 +25,7 @@ def setUp(self):
self.test = Test(engagement=self.engagement)
def test_parse_report1(self):
- with open("unittests/scans/generic/generic_report1.csv") as file:
+ with open("unittests/scans/generic/generic_report1.csv", encoding="utf-8") as file:
parser = GenericParser()
findings = parser.get_findings(file, self.test)
for finding in findings:
@@ -434,7 +434,7 @@ def test_column_order_is_flexible(self):
self.assertEqual(fields1, fields2)
def test_parse_json(self):
- with open("unittests/scans/generic/generic_report1.json") as file:
+ with open("unittests/scans/generic/generic_report1.json", encoding="utf-8") as file:
parser = GenericParser()
findings = parser.get_findings(file, Test())
for finding in findings:
@@ -465,7 +465,7 @@ def test_parse_json(self):
self.assertIn(finding.severity, Finding.SEVERITIES)
def test_parse_json2(self):
- with open("unittests/scans/generic/generic_report2.json") as file:
+ with open("unittests/scans/generic/generic_report2.json", encoding="utf-8") as file:
parser = GenericParser()
findings = parser.get_findings(file, Test())
for finding in findings:
@@ -488,7 +488,7 @@ def test_parse_json2(self):
self.assertEqual("Some mitigation", finding.mitigation)
def test_parse_json3(self):
- with open("unittests/scans/generic/generic_report3.json") as file:
+ with open("unittests/scans/generic/generic_report3.json", encoding="utf-8") as file:
parser = GenericParser()
findings = parser.get_findings(file, Test())
self.assertEqual(3, len(findings))
@@ -526,7 +526,7 @@ def test_parse_json3(self):
self.assertEqual("test-pest", endpoint.path)
def test_parse_endpoints_and_vulnerability_ids_json(self):
- with open("unittests/scans/generic/generic_report4.json") as file:
+ with open("unittests/scans/generic/generic_report4.json", encoding="utf-8") as file:
parser = GenericParser()
findings = parser.get_findings(file, Test())
self.assertEqual(1, len(findings))
@@ -557,7 +557,7 @@ def test_parse_endpoints_and_vulnerability_ids_json(self):
self.assertEqual("CVE-2015-9235", finding.unsaved_vulnerability_ids[1])
def test_parse_host_and_vulnerability_id_csv(self):
- with open("unittests/scans/generic/generic_report4.csv") as file:
+ with open("unittests/scans/generic/generic_report4.csv", encoding="utf-8") as file:
parser = GenericParser()
findings = parser.get_findings(file, Test())
self.assertEqual(4, len(findings))
@@ -599,7 +599,7 @@ def test_parse_host_and_vulnerability_id_csv(self):
self.assertIsNone(finding.unsaved_vulnerability_ids)
def test_parse_json_with_image(self):
- with open("unittests/scans/generic/test_with_image.json") as file:
+ with open("unittests/scans/generic/test_with_image.json", encoding="utf-8") as file:
parser = GenericParser()
findings = parser.get_findings(file, Test())
self.assertEqual(1, len(findings))
@@ -612,7 +612,7 @@ def test_parse_json_with_image(self):
self.assertIn("data", image)
def test_parse_json_custom_test(self):
- with open("unittests/scans/generic/generic_custom_test.json") as file:
+ with open("unittests/scans/generic/generic_custom_test.json", encoding="utf-8") as file:
parser = GenericParser()
tests = parser.get_tests(parser.get_scan_types()[0], file)
self.assertEqual(1, len(tests))
@@ -637,14 +637,14 @@ def test_parse_json_custom_test(self):
self.assertEqual("TEST1", finding.vuln_id_from_tool)
def test_parse_json_empty_finding(self):
- with open("unittests/scans/generic/generic_empty.json") as file:
+ with open("unittests/scans/generic/generic_empty.json", encoding="utf-8") as file:
parser = GenericParser()
with self.assertRaisesMessage(ValueError,
"Required fields are missing: ['description', 'severity', 'title']"):
parser.get_findings(file, Test())
def test_parse_json_invalid_finding(self):
- with open("unittests/scans/generic/generic_invalid.json") as file:
+ with open("unittests/scans/generic/generic_invalid.json", encoding="utf-8") as file:
parser = GenericParser()
with self.assertRaisesMessage(ValueError,
"Not allowed fields are present: ['invalid_field', 'last_status_update']"):
diff --git a/unittests/tools/test_ggshield_parser.py b/unittests/tools/test_ggshield_parser.py
index e4163900f1d..27d4e267663 100644
--- a/unittests/tools/test_ggshield_parser.py
+++ b/unittests/tools/test_ggshield_parser.py
@@ -6,13 +6,13 @@
class TestGgshieldParser(DojoTestCase):
def test_parse_empty(self):
- with open("unittests/scans/ggshield/no_finding.json") as testfile:
+ with open("unittests/scans/ggshield/no_finding.json", encoding="utf-8") as testfile:
parser = GgshieldParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_one_finding(self):
- with open("unittests/scans/ggshield/one_finding.json") as testfile:
+ with open("unittests/scans/ggshield/one_finding.json", encoding="utf-8") as testfile:
parser = GgshieldParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -23,7 +23,7 @@ def test_parse_one_finding(self):
self.assertEqual("2021-07-05", finding.date)
def test_parse_many_finding(self):
- with open("unittests/scans/ggshield/many_findings.json") as testfile:
+ with open("unittests/scans/ggshield/many_findings.json", encoding="utf-8") as testfile:
parser = GgshieldParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(2, len(findings))
diff --git a/unittests/tools/test_github_vulnerability_parser.py b/unittests/tools/test_github_vulnerability_parser.py
index 857d665ebfc..c0c9a0350ec 100644
--- a/unittests/tools/test_github_vulnerability_parser.py
+++ b/unittests/tools/test_github_vulnerability_parser.py
@@ -10,14 +10,14 @@
class TestGithubVulnerabilityParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
"""sample with zero vulnerability"""
- with open("unittests/scans/github_vulnerability/github-0-vuln.json") as testfile:
+ with open("unittests/scans/github_vulnerability/github-0-vuln.json", encoding="utf-8") as testfile:
parser = GithubVulnerabilityParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln_has_one_findings(self):
"""sample with one vulnerability"""
- with open("unittests/scans/github_vulnerability/github-1-vuln.json") as testfile:
+ with open("unittests/scans/github_vulnerability/github-1-vuln.json", encoding="utf-8") as testfile:
parser = GithubVulnerabilityParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -37,7 +37,7 @@ def test_parse_file_with_one_vuln_has_one_findings(self):
def test_parse_file_with_one_vuln_has_one_finding_and_dependabot_direct_link(self):
"""sample with one vulnerability"""
- with open("unittests/scans/github_vulnerability/github-1-vuln-repo-dependabot-link.json") as testfile:
+ with open("unittests/scans/github_vulnerability/github-1-vuln-repo-dependabot-link.json", encoding="utf-8") as testfile:
parser = GithubVulnerabilityParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -57,13 +57,13 @@ def test_parse_file_with_one_vuln_has_one_finding_and_dependabot_direct_link(sel
def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
"""sample with five vulnerability"""
- with open("unittests/scans/github_vulnerability/github-5-vuln.json") as testfile:
+ with open("unittests/scans/github_vulnerability/github-5-vuln.json", encoding="utf-8") as testfile:
parser = GithubVulnerabilityParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(5, len(findings))
def test_parse_file_issue2984(self):
- with open("unittests/scans/github_vulnerability/github_issue2984.json") as testfile:
+ with open("unittests/scans/github_vulnerability/github_issue2984.json", encoding="utf-8") as testfile:
parser = GithubVulnerabilityParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(4, len(findings))
@@ -87,7 +87,7 @@ def test_parse_file_issue2984(self):
self.assertEqual(finding.unique_id_from_tool, "DASFMMFKLNKDSAKFSDLANJKKFDSNJSAKDFNJKDFS=")
def test_parse_file_search(self):
- with open("unittests/scans/github_vulnerability/github_search.json") as testfile:
+ with open("unittests/scans/github_vulnerability/github_search.json", encoding="utf-8") as testfile:
parser = GithubVulnerabilityParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(2, len(findings))
@@ -115,7 +115,7 @@ def test_parse_file_search(self):
def test_parse_file_search2(self):
"""Search result with more data/attributes"""
- with open("unittests/scans/github_vulnerability/github_search2.json") as testfile:
+ with open("unittests/scans/github_vulnerability/github_search2.json", encoding="utf-8") as testfile:
parser = GithubVulnerabilityParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(2, len(findings))
@@ -143,7 +143,7 @@ def test_parse_file_search2(self):
def test_parse_file_search3(self):
"""Search result with more data/attributes"""
- with open("unittests/scans/github_vulnerability/github_search3.json") as testfile:
+ with open("unittests/scans/github_vulnerability/github_search3.json", encoding="utf-8") as testfile:
parser = GithubVulnerabilityParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(2, len(findings))
@@ -175,7 +175,7 @@ def test_parse_file_search3(self):
def test_parse_file_search4_null_cvss_vector(self):
"""Search result with more data/attributes"""
- with open("unittests/scans/github_vulnerability/github_search4_null_cvss_vector.json") as testfile:
+ with open("unittests/scans/github_vulnerability/github_search4_null_cvss_vector.json", encoding="utf-8") as testfile:
parser = GithubVulnerabilityParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(2, len(findings))
@@ -206,7 +206,7 @@ def test_parse_file_search4_null_cvss_vector(self):
self.assertEqual(finding.unique_id_from_tool, "MDI4OlJlcG9zaXRvcnlWdWxuZXJhYmlsaXR5QWxlcnQ1NTE5NTI2OTM=")
def test_parse_cwe_and_date(self):
- with open("unittests/scans/github_vulnerability/github_h2.json") as testfile:
+ with open("unittests/scans/github_vulnerability/github_h2.json", encoding="utf-8") as testfile:
parser = GithubVulnerabilityParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -229,7 +229,7 @@ def test_parse_cwe_and_date(self):
self.assertEqual(finding.active, True)
def test_parse_state(self):
- with open("unittests/scans/github_vulnerability/github_shiro.json") as testfile:
+ with open("unittests/scans/github_vulnerability/github_shiro.json", encoding="utf-8") as testfile:
parser = GithubVulnerabilityParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -253,7 +253,7 @@ def test_parse_state(self):
self.assertEqual(finding.is_mitigated, True)
def test_parser_version(self):
- with open("unittests/scans/github_vulnerability/github-vuln-version.json") as testfile:
+ with open("unittests/scans/github_vulnerability/github-vuln-version.json", encoding="utf-8") as testfile:
parser = GithubVulnerabilityParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -268,7 +268,7 @@ def test_parser_version(self):
self.assertEqual(finding.component_version, "5.3.29")
def test_parse_file_issue_9582(self):
- with open("unittests/scans/github_vulnerability/issue_9582.json") as testfile:
+ with open("unittests/scans/github_vulnerability/issue_9582.json", encoding="utf-8") as testfile:
parser = GithubVulnerabilityParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(2, len(findings))
diff --git a/unittests/tools/test_gitlab_api_fuzzing_parser.py b/unittests/tools/test_gitlab_api_fuzzing_parser.py
index 2948134d72d..0da1fadde85 100644
--- a/unittests/tools/test_gitlab_api_fuzzing_parser.py
+++ b/unittests/tools/test_gitlab_api_fuzzing_parser.py
@@ -5,14 +5,14 @@
class TestGitlabAPIFuzzingParser(DojoTestCase):
def test_gitlab_api_fuzzing_parser_with_no_vuln_has_no_findings(self):
- with open(f"{get_unit_tests_path()}/scans/gitlab_api_fuzzing/gitlab_api_fuzzing_0_vuln.json") as testfile:
+ with open(f"{get_unit_tests_path()}/scans/gitlab_api_fuzzing/gitlab_api_fuzzing_0_vuln.json", encoding="utf-8") as testfile:
parser = GitlabAPIFuzzingParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(0, len(findings))
def test_gitlab_api_fuzzing_parser_with_one_criticle_vuln_has_one_findings_v14(self):
- with open(f"{get_unit_tests_path()}/scans/gitlab_api_fuzzing/gitlab_api_fuzzing_1_vuln_v14.json") as testfile:
+ with open(f"{get_unit_tests_path()}/scans/gitlab_api_fuzzing/gitlab_api_fuzzing_1_vuln_v14.json", encoding="utf-8") as testfile:
parser = GitlabAPIFuzzingParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -28,7 +28,7 @@ def test_gitlab_api_fuzzing_parser_with_one_criticle_vuln_has_one_findings_v14(s
)
def test_gitlab_api_fuzzing_parser_with_one_criticle_vuln_has_one_findings_v15(self):
- with open(f"{get_unit_tests_path()}/scans/gitlab_api_fuzzing/gitlab_api_fuzzing_1_vuln_v15.json") as testfile:
+ with open(f"{get_unit_tests_path()}/scans/gitlab_api_fuzzing/gitlab_api_fuzzing_1_vuln_v15.json", encoding="utf-8") as testfile:
parser = GitlabAPIFuzzingParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -44,7 +44,7 @@ def test_gitlab_api_fuzzing_parser_with_one_criticle_vuln_has_one_findings_v15(s
)
def test_gitlab_api_fuzzing_parser_with_invalid_json(self):
- with open(f"{get_unit_tests_path()}/scans/gitlab_api_fuzzing/gitlab_api_fuzzing_invalid.json") as testfile:
+ with open(f"{get_unit_tests_path()}/scans/gitlab_api_fuzzing/gitlab_api_fuzzing_invalid.json", encoding="utf-8") as testfile:
# Something is wrong with JSON file
with self.assertRaises((KeyError, ValueError)):
parser = GitlabAPIFuzzingParser()
diff --git a/unittests/tools/test_gitlab_container_scan_parser.py b/unittests/tools/test_gitlab_container_scan_parser.py
index ab3e05a2b6d..4bc69cd809e 100644
--- a/unittests/tools/test_gitlab_container_scan_parser.py
+++ b/unittests/tools/test_gitlab_container_scan_parser.py
@@ -7,13 +7,13 @@
class TestGitlabContainerScanParser(DojoTestCase):
def test_gitlab_container_scan_parser_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/gitlab_container_scan/gl-container-scanning-report-0-vuln.json") as testfile:
+ with open("unittests/scans/gitlab_container_scan/gl-container-scanning-report-0-vuln.json", encoding="utf-8") as testfile:
parser = GitlabContainerScanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_gitlab_container_scan_parser_with_one_vuln_has_one_findings_v14(self):
- with open("unittests/scans/gitlab_container_scan/gl-container-scanning-report-1-vuln_v14.json") as testfile:
+ with open("unittests/scans/gitlab_container_scan/gl-container-scanning-report-1-vuln_v14.json", encoding="utf-8") as testfile:
parser = GitlabContainerScanParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -32,7 +32,7 @@ def test_gitlab_container_scan_parser_with_one_vuln_has_one_findings_v14(self):
self.assertEqual("df52bc8ce9a2ae56bbcb0c4ecda62123fbd6f69b", first_finding.unique_id_from_tool)
def test_gitlab_container_scan_parser_with_one_vuln_has_one_findings_v15(self):
- with open("unittests/scans/gitlab_container_scan/gl-container-scanning-report-1-vuln_v15.json") as testfile:
+ with open("unittests/scans/gitlab_container_scan/gl-container-scanning-report-1-vuln_v15.json", encoding="utf-8") as testfile:
parser = GitlabContainerScanParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -51,7 +51,7 @@ def test_gitlab_container_scan_parser_with_one_vuln_has_one_findings_v15(self):
self.assertEqual("df52bc8ce9a2ae56bbcb0c4ecda62123fbd6f69b", first_finding.unique_id_from_tool)
def test_gitlab_container_scan_parser_with_five_vuln_has_five_findings_v14(self):
- with open("unittests/scans/gitlab_container_scan/gl-container-scanning-report-5-vuln_v14.json") as testfile:
+ with open("unittests/scans/gitlab_container_scan/gl-container-scanning-report-5-vuln_v14.json", encoding="utf-8") as testfile:
parser = GitlabContainerScanParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -60,7 +60,7 @@ def test_gitlab_container_scan_parser_with_five_vuln_has_five_findings_v14(self)
self.assertEqual(5, len(findings))
def test_gitlab_container_scan_parser_with_five_vuln_has_five_findings_v15(self):
- with open("unittests/scans/gitlab_container_scan/gl-container-scanning-report-5-vuln_v15.json") as testfile:
+ with open("unittests/scans/gitlab_container_scan/gl-container-scanning-report-5-vuln_v15.json", encoding="utf-8") as testfile:
parser = GitlabContainerScanParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -69,7 +69,7 @@ def test_gitlab_container_scan_parser_with_five_vuln_has_five_findings_v15(self)
self.assertEqual(5, len(findings))
def test_gitlab_container_scan_parser_with_fless_data_v14(self):
- with open("unittests/scans/gitlab_container_scan/issue6639_v14.json") as testfile:
+ with open("unittests/scans/gitlab_container_scan/issue6639_v14.json", encoding="utf-8") as testfile:
parser = GitlabContainerScanParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -104,7 +104,7 @@ def test_gitlab_container_scan_parser_with_fless_data_v14(self):
self.assertEqual("CVE-2022-0778", finding.unique_id_from_tool)
def test_gitlab_container_scan_parser_with_fless_data_v15(self):
- with open("unittests/scans/gitlab_container_scan/issue6639_v15.json") as testfile:
+ with open("unittests/scans/gitlab_container_scan/issue6639_v15.json", encoding="utf-8") as testfile:
parser = GitlabContainerScanParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
diff --git a/unittests/tools/test_gitlab_dast_parser.py b/unittests/tools/test_gitlab_dast_parser.py
index a2d5c2f7628..01107a3a93a 100644
--- a/unittests/tools/test_gitlab_dast_parser.py
+++ b/unittests/tools/test_gitlab_dast_parser.py
@@ -5,13 +5,13 @@
class TestGitlabDastParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/gitlab_dast/gitlab_dast_zero_vul.json") as testfile:
+ with open("unittests/scans/gitlab_dast/gitlab_dast_zero_vul.json", encoding="utf-8") as testfile:
parser = GitlabDastParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln_has_one_finding_v14(self):
- with open("unittests/scans/gitlab_dast/gitlab_dast_one_vul_v14.json") as testfile:
+ with open("unittests/scans/gitlab_dast/gitlab_dast_one_vul_v14.json", encoding="utf-8") as testfile:
parser = GitlabDastParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -39,7 +39,7 @@ def test_parse_file_with_one_vuln_has_one_finding_v14(self):
self.assertEqual(359, finding.cwe)
def test_parse_file_with_one_vuln_has_one_finding_v15(self):
- with open("unittests/scans/gitlab_dast/gitlab_dast_one_vul_v15.json") as testfile:
+ with open("unittests/scans/gitlab_dast/gitlab_dast_one_vul_v15.json", encoding="utf-8") as testfile:
parser = GitlabDastParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -67,7 +67,7 @@ def test_parse_file_with_one_vuln_has_one_finding_v15(self):
self.assertEqual(359, finding.cwe)
def test_parse_file_with_multiple_vuln_has_multiple_findings_v14(self):
- with open("unittests/scans/gitlab_dast/gitlab_dast_many_vul_v14.json") as testfile:
+ with open("unittests/scans/gitlab_dast/gitlab_dast_many_vul_v14.json", encoding="utf-8") as testfile:
parser = GitlabDastParser()
findings = parser.get_findings(testfile, Test())
@@ -105,7 +105,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings_v14(self):
self.assertIn("Ensure that your web server,", finding.mitigation)
def test_parse_file_with_multiple_vuln_has_multiple_findings_v15(self):
- with open("unittests/scans/gitlab_dast/gitlab_dast_many_vul_v15.json") as testfile:
+ with open("unittests/scans/gitlab_dast/gitlab_dast_many_vul_v15.json", encoding="utf-8") as testfile:
parser = GitlabDastParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_gitlab_dep_scan_parser.py b/unittests/tools/test_gitlab_dep_scan_parser.py
index a31b98051fa..ea306247c22 100644
--- a/unittests/tools/test_gitlab_dep_scan_parser.py
+++ b/unittests/tools/test_gitlab_dep_scan_parser.py
@@ -6,25 +6,25 @@
class TestGitlabDepScanParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
- with open(f"{get_unit_tests_path()}/scans/gitlab_dep_scan/gl-dependency-scanning-report-0-vuln.json") as testfile:
+ with open(f"{get_unit_tests_path()}/scans/gitlab_dep_scan/gl-dependency-scanning-report-0-vuln.json", encoding="utf-8") as testfile:
parser = GitlabDepScanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln_has_one_finding_v14(self):
- with open(f"{get_unit_tests_path()}/scans/gitlab_dep_scan/gl-dependency-scanning-report-1-vuln_v14.json") as testfile:
+ with open(f"{get_unit_tests_path()}/scans/gitlab_dep_scan/gl-dependency-scanning-report-1-vuln_v14.json", encoding="utf-8") as testfile:
parser = GitlabDepScanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
def test_parse_file_with_one_vuln_has_one_finding_v15(self):
- with open(f"{get_unit_tests_path()}/scans/gitlab_dep_scan/gl-dependency-scanning-report-1-vuln_v15.json") as testfile:
+ with open(f"{get_unit_tests_path()}/scans/gitlab_dep_scan/gl-dependency-scanning-report-1-vuln_v15.json", encoding="utf-8") as testfile:
parser = GitlabDepScanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
def test_parse_file_with_two_vuln_has_one_missing_component__v14(self):
- with open(f"{get_unit_tests_path()}/scans/gitlab_dep_scan/gl-dependency-scanning-report-2-vuln-missing-component_v14.json") as testfile:
+ with open(f"{get_unit_tests_path()}/scans/gitlab_dep_scan/gl-dependency-scanning-report-2-vuln-missing-component_v14.json", encoding="utf-8") as testfile:
parser = GitlabDepScanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(2, len(findings))
@@ -36,7 +36,7 @@ def test_parse_file_with_two_vuln_has_one_missing_component__v14(self):
self.assertEqual("v0.0.0-20190308221718-c2843e01d9a2", finding.component_version)
def test_parse_file_with_two_vuln_has_one_missing_component__v15(self):
- with open(f"{get_unit_tests_path()}/scans/gitlab_dep_scan/gl-dependency-scanning-report-2-vuln-missing-component_v15.json") as testfile:
+ with open(f"{get_unit_tests_path()}/scans/gitlab_dep_scan/gl-dependency-scanning-report-2-vuln-missing-component_v15.json", encoding="utf-8") as testfile:
parser = GitlabDepScanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(2, len(findings))
@@ -48,7 +48,7 @@ def test_parse_file_with_two_vuln_has_one_missing_component__v15(self):
self.assertEqual("v0.0.0-20190308221718-c2843e01d9a2", finding.component_version)
def test_parse_file_with_multiple_vuln_has_multiple_findings_v14(self):
- with open(f"{get_unit_tests_path()}/scans/gitlab_dep_scan/gl-dependency-scanning-report-many-vuln_v14.json") as testfile:
+ with open(f"{get_unit_tests_path()}/scans/gitlab_dep_scan/gl-dependency-scanning-report-many-vuln_v14.json", encoding="utf-8") as testfile:
parser = GitlabDepScanParser()
findings = parser.get_findings(testfile, Test())
self.assertGreater(len(findings), 2)
@@ -57,7 +57,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings_v14(self):
self.assertEqual("CVE-2020-29652", findings[0].unsaved_vulnerability_ids[0])
def test_parse_file_with_multiple_vuln_has_multiple_findings_v15(self):
- with open(f"{get_unit_tests_path()}/scans/gitlab_dep_scan/gl-dependency-scanning-report-many-vuln_v15.json") as testfile:
+ with open(f"{get_unit_tests_path()}/scans/gitlab_dep_scan/gl-dependency-scanning-report-many-vuln_v15.json", encoding="utf-8") as testfile:
parser = GitlabDepScanParser()
findings = parser.get_findings(testfile, Test())
self.assertGreater(len(findings), 2)
diff --git a/unittests/tools/test_gitlab_sast_parser.py b/unittests/tools/test_gitlab_sast_parser.py
index 16e9ef4ad90..e0757ac6694 100644
--- a/unittests/tools/test_gitlab_sast_parser.py
+++ b/unittests/tools/test_gitlab_sast_parser.py
@@ -6,13 +6,13 @@
class TestGitlabSastParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/gitlab_sast/gl-sast-report-0-vuln.json") as testfile:
+ with open("unittests/scans/gitlab_sast/gl-sast-report-0-vuln.json", encoding="utf-8") as testfile:
parser = GitlabSastParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln_has_one_finding_v14(self):
- with open("unittests/scans/gitlab_sast/gl-sast-report-1-vuln_v14.json") as testfile:
+ with open("unittests/scans/gitlab_sast/gl-sast-report-1-vuln_v14.json", encoding="utf-8") as testfile:
parser = GitlabSastParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -21,7 +21,7 @@ def test_parse_file_with_one_vuln_has_one_finding_v14(self):
self.assertEqual("Critical", finding.severity)
def test_parse_file_with_one_vuln_has_one_finding_v15(self):
- with open("unittests/scans/gitlab_sast/gl-sast-report-1-vuln_v15.json") as testfile:
+ with open("unittests/scans/gitlab_sast/gl-sast-report-1-vuln_v15.json", encoding="utf-8") as testfile:
parser = GitlabSastParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -30,7 +30,7 @@ def test_parse_file_with_one_vuln_has_one_finding_v15(self):
self.assertEqual("Critical", finding.severity)
def test_parse_file_with_multiple_vuln_has_multiple_findings_v14(self):
- with open(f"{get_unit_tests_path()}/scans/gitlab_sast/gl-sast-report-many-vuln_v14.json") as testfile:
+ with open(f"{get_unit_tests_path()}/scans/gitlab_sast/gl-sast-report-many-vuln_v14.json", encoding="utf-8") as testfile:
parser = GitlabSastParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(219, len(findings))
@@ -45,7 +45,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings_v14(self):
self.assertEqual("Critical", finding.severity)
def test_parse_file_with_multiple_vuln_has_multiple_findings_v15(self):
- with open(f"{get_unit_tests_path()}/scans/gitlab_sast/gl-sast-report-many-vuln_v15.json") as testfile:
+ with open(f"{get_unit_tests_path()}/scans/gitlab_sast/gl-sast-report-many-vuln_v15.json", encoding="utf-8") as testfile:
parser = GitlabSastParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(219, len(findings))
@@ -60,7 +60,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings_v15(self):
self.assertEqual("Critical", finding.severity)
def test_parse_file_with_various_confidences_v14(self):
- with open(f"{get_unit_tests_path()}/scans/gitlab_sast/gl-sast-report-confidence_v14.json") as testfile:
+ with open(f"{get_unit_tests_path()}/scans/gitlab_sast/gl-sast-report-confidence_v14.json", encoding="utf-8") as testfile:
parser = GitlabSastParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(len(findings), 8)
@@ -79,7 +79,7 @@ def test_parse_file_with_various_confidences_v14(self):
self.assertEqual("Certain", finding.get_scanner_confidence_text())
def test_parse_file_with_various_confidences_v15(self):
- with open(f"{get_unit_tests_path()}/scans/gitlab_sast/gl-sast-report-confidence_v15.json") as testfile:
+ with open(f"{get_unit_tests_path()}/scans/gitlab_sast/gl-sast-report-confidence_v15.json", encoding="utf-8") as testfile:
parser = GitlabSastParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(len(findings), 8)
@@ -98,7 +98,7 @@ def test_parse_file_with_various_confidences_v15(self):
self.assertEqual("", finding.get_scanner_confidence_text())
def test_parse_file_with_various_cwes_v14(self):
- with open("unittests/scans/gitlab_sast/gl-sast-report-cwe_v14.json") as testfile:
+ with open("unittests/scans/gitlab_sast/gl-sast-report-cwe_v14.json", encoding="utf-8") as testfile:
parser = GitlabSastParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(len(findings), 3)
@@ -107,7 +107,7 @@ def test_parse_file_with_various_cwes_v14(self):
self.assertEqual(None, findings[2].cwe)
def test_parse_file_with_various_cwes_v15(self):
- with open("unittests/scans/gitlab_sast/gl-sast-report-cwe_v15.json") as testfile:
+ with open("unittests/scans/gitlab_sast/gl-sast-report-cwe_v15.json", encoding="utf-8") as testfile:
parser = GitlabSastParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(len(findings), 3)
@@ -116,7 +116,7 @@ def test_parse_file_with_various_cwes_v15(self):
self.assertEqual(None, findings[2].cwe)
def test_parse_file_issue4336_v14(self):
- with open("unittests/scans/gitlab_sast/gl-sast-report_issue4344_v14.json") as testfile:
+ with open("unittests/scans/gitlab_sast/gl-sast-report_issue4344_v14.json", encoding="utf-8") as testfile:
parser = GitlabSastParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -124,7 +124,7 @@ def test_parse_file_issue4336_v14(self):
self.assertEqual("[None severity] Potential XSS vulnerability", finding.title)
def test_parse_file_issue4336_v15(self):
- with open("unittests/scans/gitlab_sast/gl-sast-report_issue4344_v15.json") as testfile:
+ with open("unittests/scans/gitlab_sast/gl-sast-report_issue4344_v15.json", encoding="utf-8") as testfile:
parser = GitlabSastParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -132,7 +132,7 @@ def test_parse_file_issue4336_v15(self):
self.assertEqual("[None severity] Potential XSS vulnerability", finding.title)
def test_without_scan_v14(self):
- with open("unittests/scans/gitlab_sast/gl-sast-report-1-vuln_v14.json") as testfile:
+ with open("unittests/scans/gitlab_sast/gl-sast-report-1-vuln_v14.json", encoding="utf-8") as testfile:
parser = GitlabSastParser()
tests = parser.get_tests(None, testfile)
self.assertEqual(1, len(tests))
@@ -144,7 +144,7 @@ def test_without_scan_v14(self):
self.assertEqual(1, len(findings))
def test_without_scan_v15(self):
- with open("unittests/scans/gitlab_sast/gl-sast-report-1-vuln_v15.json") as testfile:
+ with open("unittests/scans/gitlab_sast/gl-sast-report-1-vuln_v15.json", encoding="utf-8") as testfile:
parser = GitlabSastParser()
tests = parser.get_tests(None, testfile)
self.assertEqual(1, len(tests))
@@ -156,7 +156,7 @@ def test_without_scan_v15(self):
self.assertEqual(1, len(findings))
def test_with_scan_v14(self):
- with open("unittests/scans/gitlab_sast/gl-sast-report-confidence_v14.json") as testfile:
+ with open("unittests/scans/gitlab_sast/gl-sast-report-confidence_v14.json", encoding="utf-8") as testfile:
parser = GitlabSastParser()
tests = parser.get_tests(None, testfile)
self.assertEqual(1, len(tests))
@@ -168,7 +168,7 @@ def test_with_scan_v14(self):
self.assertEqual(8, len(findings))
def test_with_scan_v15(self):
- with open("unittests/scans/gitlab_sast/gl-sast-report-confidence_v15.json") as testfile:
+ with open("unittests/scans/gitlab_sast/gl-sast-report-confidence_v15.json", encoding="utf-8") as testfile:
parser = GitlabSastParser()
tests = parser.get_tests(None, testfile)
self.assertEqual(1, len(tests))
diff --git a/unittests/tools/test_gitlab_secret_detection_report_parser.py b/unittests/tools/test_gitlab_secret_detection_report_parser.py
index 32747f7b88a..2d1df5bbe18 100644
--- a/unittests/tools/test_gitlab_secret_detection_report_parser.py
+++ b/unittests/tools/test_gitlab_secret_detection_report_parser.py
@@ -9,7 +9,7 @@
class TestGitlabSecretDetectionReportParser(DojoTestCase):
def test_gitlab_secret_detection_report_parser_with_no_vuln_has_no_findings(self):
- with open(f"{get_unit_tests_path()}/scans/gitlab_secret_detection_report/gitlab_secret_detection_report_0_vuln.json") as testfile:
+ with open(f"{get_unit_tests_path()}/scans/gitlab_secret_detection_report/gitlab_secret_detection_report_0_vuln.json", encoding="utf-8") as testfile:
parser = GitlabSecretDetectionReportParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
@@ -17,7 +17,7 @@ def test_gitlab_secret_detection_report_parser_with_no_vuln_has_no_findings(self
def test_gitlab_secret_detection_report_parser_with_one_vuln_has_one_findings_v14(
self,
):
- with open(f"{get_unit_tests_path()}/scans/gitlab_secret_detection_report/gitlab_secret_detection_report_1_vuln_v14.json") as testfile:
+ with open(f"{get_unit_tests_path()}/scans/gitlab_secret_detection_report/gitlab_secret_detection_report_1_vuln_v14.json", encoding="utf-8") as testfile:
parser = GitlabSecretDetectionReportParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -38,7 +38,7 @@ def test_gitlab_secret_detection_report_parser_with_one_vuln_has_one_findings_v1
def test_gitlab_secret_detection_report_parser_with_one_vuln_has_one_findings_v15(
self,
):
- with open(f"{get_unit_tests_path()}/scans/gitlab_secret_detection_report/gitlab_secret_detection_report_1_vuln_v15.json") as testfile:
+ with open(f"{get_unit_tests_path()}/scans/gitlab_secret_detection_report/gitlab_secret_detection_report_1_vuln_v15.json", encoding="utf-8") as testfile:
parser = GitlabSecretDetectionReportParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -59,7 +59,7 @@ def test_gitlab_secret_detection_report_parser_with_one_vuln_has_one_findings_v1
def test_gitlab_secret_detection_report_parser_with_many_vuln_has_many_findings_v14(
self,
):
- with open(f"{get_unit_tests_path()}/scans/gitlab_secret_detection_report/gitlab_secret_detection_report_3_vuln_v14.json") as testfile:
+ with open(f"{get_unit_tests_path()}/scans/gitlab_secret_detection_report/gitlab_secret_detection_report_3_vuln_v14.json", encoding="utf-8") as testfile:
parser = GitlabSecretDetectionReportParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -70,7 +70,7 @@ def test_gitlab_secret_detection_report_parser_with_many_vuln_has_many_findings_
def test_gitlab_secret_detection_report_parser_with_many_vuln_has_many_findings_v15(
self,
):
- with open(f"{get_unit_tests_path()}/scans/gitlab_secret_detection_report/gitlab_secret_detection_report_3_vuln_v15.json") as testfile:
+ with open(f"{get_unit_tests_path()}/scans/gitlab_secret_detection_report/gitlab_secret_detection_report_3_vuln_v15.json", encoding="utf-8") as testfile:
parser = GitlabSecretDetectionReportParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
diff --git a/unittests/tools/test_gitleaks_parser.py b/unittests/tools/test_gitleaks_parser.py
index 43313e2e3f5..1b70f854dd4 100644
--- a/unittests/tools/test_gitleaks_parser.py
+++ b/unittests/tools/test_gitleaks_parser.py
@@ -6,13 +6,13 @@
class TestGitleaksParser(DojoTestCase):
def test_parse_file_legacy_with_no_findings(self):
- with open(get_unit_tests_path() + "/scans/gitleaks/no_findings.json") as testfile:
+ with open(get_unit_tests_path() + "/scans/gitleaks/no_findings.json", encoding="utf-8") as testfile:
parser = GitleaksParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_file_legacy_with_one_finding(self):
- with open(get_unit_tests_path() + "/scans/gitleaks/data_one.json") as testfile:
+ with open(get_unit_tests_path() + "/scans/gitleaks/data_one.json", encoding="utf-8") as testfile:
parser = GitleaksParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -24,7 +24,7 @@ def test_parse_file_legacy_with_one_finding(self):
self.assertIn("AsymmetricPrivateKey", finding.unsaved_tags)
def test_parse_file_legacy_with_multiple_finding(self):
- with open(get_unit_tests_path() + "/scans/gitleaks/data_many.json") as testfile:
+ with open(get_unit_tests_path() + "/scans/gitleaks/data_many.json", encoding="utf-8") as testfile:
parser = GitleaksParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(2, len(findings))
@@ -36,13 +36,13 @@ def test_parse_file_legacy_with_multiple_finding(self):
self.assertIn("Github", finding.unsaved_tags)
def test_parse_file_legacy_with_multiple_redacted_finding(self):
- with open(get_unit_tests_path() + "/scans/gitleaks/redacted_data_many.json") as testfile:
+ with open(get_unit_tests_path() + "/scans/gitleaks/redacted_data_many.json", encoding="utf-8") as testfile:
parser = GitleaksParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(6, len(findings))
def test_parse_file_legacy_from_issue4336(self):
- with open(get_unit_tests_path() + "/scans/gitleaks/issue4336.json") as testfile:
+ with open(get_unit_tests_path() + "/scans/gitleaks/issue4336.json", encoding="utf-8") as testfile:
parser = GitleaksParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -53,7 +53,7 @@ def test_parse_file_legacy_from_issue4336(self):
self.assertEqual(23, finding.line)
def test_parse_file_from_version_7_5_0(self):
- with open(get_unit_tests_path() + "/scans/gitleaks/version_7.5.0.json") as testfile:
+ with open(get_unit_tests_path() + "/scans/gitleaks/version_7.5.0.json", encoding="utf-8") as testfile:
parser = GitleaksParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(4, len(findings))
@@ -84,7 +84,7 @@ def test_parse_file_from_version_7_5_0(self):
self.assertIn("AWS", finding.unsaved_tags)
def test_parse_file_from_version_8(self):
- with open(get_unit_tests_path() + "/scans/gitleaks/gitleaks8_many.json") as testfile:
+ with open(get_unit_tests_path() + "/scans/gitleaks/gitleaks8_many.json", encoding="utf-8") as testfile:
parser = GitleaksParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(3, len(findings))
diff --git a/unittests/tools/test_gosec_parser.py b/unittests/tools/test_gosec_parser.py
index 53c79a71ada..c696692a44b 100644
--- a/unittests/tools/test_gosec_parser.py
+++ b/unittests/tools/test_gosec_parser.py
@@ -6,7 +6,7 @@
class TestGosecParser(DojoTestCase):
def test_parse_file_with_one_finding(self):
- with open("unittests/scans/gosec/many_vulns.json") as testfile:
+ with open("unittests/scans/gosec/many_vulns.json", encoding="utf-8") as testfile:
parser = GosecParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(28, len(findings))
diff --git a/unittests/tools/test_govulncheck_parser.py b/unittests/tools/test_govulncheck_parser.py
index 78f706e47a9..1865ff3c5d2 100644
--- a/unittests/tools/test_govulncheck_parser.py
+++ b/unittests/tools/test_govulncheck_parser.py
@@ -7,7 +7,7 @@ class TestGovulncheckParser(DojoTestCase):
def test_parse_empty(self):
with self.assertRaises(ValueError) as exp:
- with open("unittests/scans/govulncheck/empty.json") as testfile:
+ with open("unittests/scans/govulncheck/empty.json", encoding="utf-8") as testfile:
parser = GovulncheckParser()
parser.get_findings(testfile, Test())
self.assertIn(
@@ -15,13 +15,13 @@ def test_parse_empty(self):
)
def test_parse_no_findings(self):
- with open("unittests/scans/govulncheck/no_vulns.json") as testfile:
+ with open("unittests/scans/govulncheck/no_vulns.json", encoding="utf-8") as testfile:
parser = GovulncheckParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_many_findings(self):
- with open("unittests/scans/govulncheck/many_vulns.json") as testfile:
+ with open("unittests/scans/govulncheck/many_vulns.json", encoding="utf-8") as testfile:
parser = GovulncheckParser()
findings = parser.get_findings(testfile, Test())
@@ -67,13 +67,13 @@ def test_parse_many_findings(self):
self.assertEqual("https://groups.google.com/g/golang-announce/c/x49AQzIVX-s", finding.references)
def test_parse_new_version_no_findings(self):
- with open("unittests/scans/govulncheck/no_vulns_new_version.json") as testfile:
+ with open("unittests/scans/govulncheck/no_vulns_new_version.json", encoding="utf-8") as testfile:
parser = GovulncheckParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_new_version_many_findings(self):
- with open("unittests/scans/govulncheck/many_vulns_new_version.json") as testfile:
+ with open("unittests/scans/govulncheck/many_vulns_new_version.json", encoding="utf-8") as testfile:
parser = GovulncheckParser()
findings = parser.get_findings(testfile, Test())
@@ -94,7 +94,7 @@ def test_parse_new_version_many_findings(self):
self.assertIsNotNone(finding.references)
def test_parse_new_version_many_findings_custom_severity(self):
- with open("unittests/scans/govulncheck/many_vulns_new_version_custom_severity.json") as testfile:
+ with open("unittests/scans/govulncheck/many_vulns_new_version_custom_severity.json", encoding="utf-8") as testfile:
parser = GovulncheckParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_h1_parser.py b/unittests/tools/test_h1_parser.py
index 079404cd220..4b4e6020200 100644
--- a/unittests/tools/test_h1_parser.py
+++ b/unittests/tools/test_h1_parser.py
@@ -6,19 +6,19 @@
class TestHackerOneParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_finding(self):
- with open("unittests/scans/h1/data_empty.json") as testfile:
+ with open("unittests/scans/h1/data_empty.json", encoding="utf-8") as testfile:
parser = H1Parser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln_has_one_finding(self):
- with open("unittests/scans/h1/data_one.json") as testfile:
+ with open("unittests/scans/h1/data_one.json", encoding="utf-8") as testfile:
parser = H1Parser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
- with open("unittests/scans/h1/data_many.json") as testfile:
+ with open("unittests/scans/h1/data_many.json", encoding="utf-8") as testfile:
parser = H1Parser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(2, len(findings))
diff --git a/unittests/tools/test_hadolint_parser.py b/unittests/tools/test_hadolint_parser.py
index d19ed046b74..1e96dfe287a 100644
--- a/unittests/tools/test_hadolint_parser.py
+++ b/unittests/tools/test_hadolint_parser.py
@@ -6,7 +6,7 @@
class TesthadolintParser(DojoTestCase):
def test_parse_file_with_one_dockerfile(self):
- testfile = open("unittests/scans/hadolint/one_dockerfile.json")
+ testfile = open("unittests/scans/hadolint/one_dockerfile.json", encoding="utf-8")
parser = HadolintParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -16,7 +16,7 @@ def test_parse_file_with_one_dockerfile(self):
self.assertEqual(finding.file_path, "django-DefectDojo\\Dockerfile.django")
def test_parse_file_with_many_dockerfile(self):
- testfile = open("unittests/scans/hadolint/many_dockerfile.json")
+ testfile = open("unittests/scans/hadolint/many_dockerfile.json", encoding="utf-8")
parser = HadolintParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
diff --git a/unittests/tools/test_harbor_vulnerability_parser.py b/unittests/tools/test_harbor_vulnerability_parser.py
index 9d6aa3e5784..6659f23d604 100644
--- a/unittests/tools/test_harbor_vulnerability_parser.py
+++ b/unittests/tools/test_harbor_vulnerability_parser.py
@@ -6,7 +6,7 @@
class TestHarborVulnerabilityParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/harbor_vulnerability/harbor-0-vuln.json") as testfile:
+ with open("unittests/scans/harbor_vulnerability/harbor-0-vuln.json", encoding="utf-8") as testfile:
parser = HarborVulnerabilityParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
@@ -14,7 +14,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self):
# Sample with One Test
# + also verify data with one test
def test_parse_file_with_one_vuln_has_one_findings(self):
- with open("unittests/scans/harbor_vulnerability/harbor-1-vuln.json") as testfile:
+ with open("unittests/scans/harbor_vulnerability/harbor-1-vuln.json", encoding="utf-8") as testfile:
parser = HarborVulnerabilityParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -35,7 +35,7 @@ def test_parse_file_with_one_vuln_has_one_findings(self):
# Sample with Multiple Test
def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
- with open("unittests/scans/harbor_vulnerability/harbor-5-vuln.json") as testfile:
+ with open("unittests/scans/harbor_vulnerability/harbor-5-vuln.json", encoding="utf-8") as testfile:
parser = HarborVulnerabilityParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(5, len(findings))
@@ -47,7 +47,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
# Sample with Trivy Test
def test_parse_file_with_multiple_vuln_has_multiple_trivy_findings(self):
- with open("unittests/scans/harbor_vulnerability/harbor-trivy-vuln.json") as testfile:
+ with open("unittests/scans/harbor_vulnerability/harbor-trivy-vuln.json", encoding="utf-8") as testfile:
parser = HarborVulnerabilityParser()
findings = parser.get_findings(testfile, Test())
@@ -57,7 +57,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_trivy_findings(self):
# Sample with harborapi pip
def test_parse_file_with_multiple_vuln_has_harborapi_pip_package(self):
- with open("unittests/scans/harbor_vulnerability/harborapipip.json") as testfile:
+ with open("unittests/scans/harbor_vulnerability/harborapipip.json", encoding="utf-8") as testfile:
parser = HarborVulnerabilityParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(2, len(findings))
diff --git a/unittests/tools/test_hcl_appscan_parser.py b/unittests/tools/test_hcl_appscan_parser.py
index 5539260fa7a..6ee0f083ccc 100644
--- a/unittests/tools/test_hcl_appscan_parser.py
+++ b/unittests/tools/test_hcl_appscan_parser.py
@@ -5,14 +5,14 @@
class TestHCLAppScanParser(DojoTestCase):
def test_no_findings(self):
- my_file_handle = open("unittests/scans/hcl_appscan/no_findings.xml")
+ my_file_handle = open("unittests/scans/hcl_appscan/no_findings.xml", encoding="utf-8")
parser = HCLAppScanParser()
findings = parser.get_findings(my_file_handle, None)
my_file_handle.close()
self.assertEqual(0, len(findings))
def test_many_findings(self):
- my_file_handle = open("unittests/scans/hcl_appscan/many_findings.xml")
+ my_file_handle = open("unittests/scans/hcl_appscan/many_findings.xml", encoding="utf-8")
parser = HCLAppScanParser()
findings = parser.get_findings(my_file_handle, None)
my_file_handle.close()
@@ -26,7 +26,7 @@ def test_many_findings(self):
self.assertEqual(findings[9].cwe, 522)
def test_issue_9279(self):
- my_file_handle = open("unittests/scans/hcl_appscan/issue_9279.xml")
+ my_file_handle = open("unittests/scans/hcl_appscan/issue_9279.xml", encoding="utf-8")
parser = HCLAppScanParser()
findings = parser.get_findings(my_file_handle, None)
my_file_handle.close()
@@ -39,7 +39,7 @@ def test_issue_9279(self):
self.assertEqual(findings[10].cwe, 1275)
def test_issue_10074(self):
- with open("unittests/scans/hcl_appscan/issue_10074.xml") as my_file_handle:
+ with open("unittests/scans/hcl_appscan/issue_10074.xml", encoding="utf-8") as my_file_handle:
parser = HCLAppScanParser()
findings = parser.get_findings(my_file_handle, None)
my_file_handle.close()
diff --git a/unittests/tools/test_horusec_parser.py b/unittests/tools/test_horusec_parser.py
index ee562025241..10e27efee40 100644
--- a/unittests/tools/test_horusec_parser.py
+++ b/unittests/tools/test_horusec_parser.py
@@ -9,7 +9,7 @@
class TestHorusecParser(DojoTestCase):
def test_get_findings(self):
"""Version 2.6.3 with big project in Python"""
- with open(path.join(path.dirname(__file__), "../scans/horusec/version_2.6.3.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/horusec/version_2.6.3.json"), encoding="utf-8") as testfile:
parser = HorusecParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(267, len(findings))
@@ -17,7 +17,7 @@ def test_get_findings(self):
def test_get_tests(self):
"""Version 2.6.3 with big project in Python"""
- with open(path.join(path.dirname(__file__), "../scans/horusec/version_2.6.3.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/horusec/version_2.6.3.json"), encoding="utf-8") as testfile:
parser = HorusecParser()
tests = parser.get_tests("Horusec Scan", testfile)
self.assertEqual(1, len(tests))
@@ -49,7 +49,7 @@ def test_get_tests(self):
def test_get_tests_ok(self):
"""Version 2.6.3 with big project in Python"""
- with open(path.join(path.dirname(__file__), "../scans/horusec/horres3.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/horusec/horres3.json"), encoding="utf-8") as testfile:
parser = HorusecParser()
tests = parser.get_tests("Horusec Scan", testfile)
self.assertEqual(1, len(tests))
@@ -81,7 +81,7 @@ def test_get_tests_ok(self):
def test_get_tests_issue_6258(self):
""""""
- with open(path.join(path.dirname(__file__), "../scans/horusec/issue_6258.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/horusec/issue_6258.json"), encoding="utf-8") as testfile:
parser = HorusecParser()
tests = parser.get_tests("Horusec Scan", testfile)
self.assertEqual(1, len(tests))
@@ -117,7 +117,7 @@ def test_get_tests_issue_6258(self):
def test_get_tests_pr_6563(self):
""""""
- with open(path.join(path.dirname(__file__), "../scans/horusec/pr_6563.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/horusec/pr_6563.json"), encoding="utf-8") as testfile:
parser = HorusecParser()
tests = parser.get_tests("Horusec Scan", testfile)
self.assertEqual(1, len(tests))
@@ -136,7 +136,7 @@ def test_get_tests_pr_6563(self):
def test_issue_9939(self):
""""""
- with open(path.join(path.dirname(__file__), "../scans/horusec/issue_9939.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/horusec/issue_9939.json"), encoding="utf-8") as testfile:
parser = HorusecParser()
tests = parser.get_tests("Horusec Scan", testfile)
self.assertEqual(1, len(tests))
diff --git a/unittests/tools/test_humble_parser.py b/unittests/tools/test_humble_parser.py
index 2b0a85a2ca4..d4284e1692b 100644
--- a/unittests/tools/test_humble_parser.py
+++ b/unittests/tools/test_humble_parser.py
@@ -5,7 +5,7 @@
class TestHumbleParser(DojoTestCase):
def test_humble_parser_with_many_findings(self):
- with open("unittests/scans/humble/many_findings.json") as testfile:
+ with open("unittests/scans/humble/many_findings.json", encoding="utf-8") as testfile:
parser = HumbleParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -19,7 +19,7 @@ def test_humble_parser_with_many_findings(self):
self.assertEqual("Deprecated header: Strict-Transport-Security (Recommended Values)", finding.title)
def test_humble_parser_with_many_findings2(self):
- with open("unittests/scans/humble/many_findings2.json") as testfile:
+ with open("unittests/scans/humble/many_findings2.json", encoding="utf-8") as testfile:
parser = HumbleParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
diff --git a/unittests/tools/test_huskyci_parser.py b/unittests/tools/test_huskyci_parser.py
index 22199ed5bb5..7c7bb1ad59a 100644
--- a/unittests/tools/test_huskyci_parser.py
+++ b/unittests/tools/test_huskyci_parser.py
@@ -6,14 +6,14 @@
class TestHuskyCIParser(DojoTestCase):
def test_parse_file_no_finding(self):
- with open("unittests/scans/huskyci/huskyci_report_no_finding.json") as testfile:
+ with open("unittests/scans/huskyci/huskyci_report_no_finding.json", encoding="utf-8") as testfile:
parser = HuskyCIParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_file_has_one_finding_one_tool(self):
with open(
- get_unit_tests_path() + "/scans/huskyci/huskyci_report_one_finding_one_tool.json",
+ get_unit_tests_path() + "/scans/huskyci/huskyci_report_one_finding_one_tool.json", encoding="utf-8",
) as testfile:
parser = HuskyCIParser()
findings = parser.get_findings(testfile, Test())
@@ -21,7 +21,7 @@ def test_parse_file_has_one_finding_one_tool(self):
def test_parse_file_has_many_finding_one_tool(self):
with open(
- get_unit_tests_path() + "/scans/huskyci/huskyci_report_many_finding_one_tool.json",
+ get_unit_tests_path() + "/scans/huskyci/huskyci_report_many_finding_one_tool.json", encoding="utf-8",
) as testfile:
parser = HuskyCIParser()
findings = parser.get_findings(testfile, Test())
@@ -29,7 +29,7 @@ def test_parse_file_has_many_finding_one_tool(self):
def test_parse_file_has_many_finding_two_tools(self):
with open(
- get_unit_tests_path() + "/scans/huskyci/huskyci_report_many_finding_two_tools.json",
+ get_unit_tests_path() + "/scans/huskyci/huskyci_report_many_finding_two_tools.json", encoding="utf-8",
) as testfile:
parser = HuskyCIParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_hydra_parser.py b/unittests/tools/test_hydra_parser.py
index 93077abb166..a4045e2c261 100644
--- a/unittests/tools/test_hydra_parser.py
+++ b/unittests/tools/test_hydra_parser.py
@@ -9,25 +9,25 @@ class TestHydraParser(DojoTestCase):
__test_datetime = datetime(2019, 3, 1, 14, 44, 22)
def test_invalid_json_format(self):
- with open("unittests/scans/hydra/invalid.json") as testfile:
+ with open("unittests/scans/hydra/invalid.json", encoding="utf-8") as testfile:
parser = HydraParser()
with self.assertRaises(ValueError):
parser.get_findings(testfile, Test())
def test_parser_ensures_data_is_for_hydra_before_parsing(self):
- with open("unittests/scans/hydra/oddly_familiar_json_that_isnt_us.json") as testfile:
+ with open("unittests/scans/hydra/oddly_familiar_json_that_isnt_us.json", encoding="utf-8") as testfile:
parser = HydraParser()
with self.assertRaises(ValueError):
parser.get_findings(testfile, Test())
def test_hydra_parser_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/hydra/hydra_report_no_finding.json") as testfile:
+ with open("unittests/scans/hydra/hydra_report_no_finding.json", encoding="utf-8") as testfile:
parser = HydraParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_hydra_parser_with_one_finding_has_one_finding(self):
- with open("unittests/scans/hydra/hydra_report_one_finding.json") as testfile:
+ with open("unittests/scans/hydra/hydra_report_one_finding.json", encoding="utf-8") as testfile:
parser = HydraParser()
findings = parser.get_findings(testfile, Test())
self.__assertAllEndpointsAreClean(findings)
@@ -45,7 +45,7 @@ def test_hydra_parser_with_one_finding_has_one_finding(self):
)
def test_hydra_parser_with_one_finding_and_missing_date_has_one_finding(self):
- with open("unittests/scans/hydra/hydra_report_one_finding_missing_date.json") as testfile:
+ with open("unittests/scans/hydra/hydra_report_one_finding_missing_date.json", encoding="utf-8") as testfile:
parser = HydraParser()
findings = parser.get_findings(testfile, Test())
self.__assertAllEndpointsAreClean(findings)
@@ -63,7 +63,7 @@ def test_hydra_parser_with_one_finding_and_missing_date_has_one_finding(self):
)
def test_hydra_parser_with_two_findings_with_one_incomplete_has_one_finding(self):
- with open("unittests/scans/hydra/hydra_report_two_findings_with_one_incomplete.json") as testfile:
+ with open("unittests/scans/hydra/hydra_report_two_findings_with_one_incomplete.json", encoding="utf-8") as testfile:
parser = HydraParser()
findings = parser.get_findings(testfile, Test())
self.__assertAllEndpointsAreClean(findings)
@@ -81,7 +81,7 @@ def test_hydra_parser_with_two_findings_with_one_incomplete_has_one_finding(self
)
def test_hydra_parser_with_many_findings_has_many_findings(self):
- with open("unittests/scans/hydra/hydra_report_many_finding.json") as testfile:
+ with open("unittests/scans/hydra/hydra_report_many_finding.json", encoding="utf-8") as testfile:
parser = HydraParser()
findings = parser.get_findings(testfile, Test())
self.__assertAllEndpointsAreClean(findings)
diff --git a/unittests/tools/test_ibm_app_parser.py b/unittests/tools/test_ibm_app_parser.py
index ecaa3f017ea..7e6fc5d4474 100644
--- a/unittests/tools/test_ibm_app_parser.py
+++ b/unittests/tools/test_ibm_app_parser.py
@@ -6,7 +6,7 @@
class TestIbmAppParser(DojoTestCase):
def test_parse_file(self):
- testfile = open("unittests/scans/ibm_app/testfire.xml")
+ testfile = open("unittests/scans/ibm_app/testfire.xml", encoding="utf-8")
parser = IbmAppParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
diff --git a/unittests/tools/test_immuniweb_parser.py b/unittests/tools/test_immuniweb_parser.py
index 97de358c917..413a2c0a1e8 100644
--- a/unittests/tools/test_immuniweb_parser.py
+++ b/unittests/tools/test_immuniweb_parser.py
@@ -6,13 +6,13 @@
class TestImmuniwebParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/immuniweb/ImmuniWeb-0-vuln.xml") as testfile:
+ with open("unittests/scans/immuniweb/ImmuniWeb-0-vuln.xml", encoding="utf-8") as testfile:
parser = ImmuniwebParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln_has_one_finding(self):
- with open("unittests/scans/immuniweb/ImmuniWeb-1-vuln.xml") as testfile:
+ with open("unittests/scans/immuniweb/ImmuniWeb-1-vuln.xml", encoding="utf-8") as testfile:
parser = ImmuniwebParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -21,7 +21,7 @@ def test_parse_file_with_one_vuln_has_one_finding(self):
self.assertEqual(1, len(findings))
def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
- with open("unittests/scans/immuniweb/ImmuniWeb-multiple-vuln.xml") as testfile:
+ with open("unittests/scans/immuniweb/ImmuniWeb-multiple-vuln.xml", encoding="utf-8") as testfile:
parser = ImmuniwebParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
diff --git a/unittests/tools/test_intsights_parser.py b/unittests/tools/test_intsights_parser.py
index 0154a791389..c4460d9464c 100644
--- a/unittests/tools/test_intsights_parser.py
+++ b/unittests/tools/test_intsights_parser.py
@@ -6,7 +6,7 @@
class TestIntSightsParser(DojoTestCase):
def test_intsights_parser_with_one_critical_vuln_has_one_findings_json(
self):
- with open("unittests/scans/intsights/intsights_one_vul.json") as testfile:
+ with open("unittests/scans/intsights/intsights_one_vul.json", encoding="utf-8") as testfile:
parser = IntSightsParser()
findings = parser.get_findings(testfile, Test())
@@ -27,7 +27,7 @@ def test_intsights_parser_with_one_critical_vuln_has_one_findings_json(
def test_intsights_parser_with_one_critical_vuln_has_one_findings_csv(
self):
- with open("unittests/scans/intsights/intsights_one_vuln.csv") as testfile:
+ with open("unittests/scans/intsights/intsights_one_vuln.csv", encoding="utf-8") as testfile:
parser = IntSightsParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -42,31 +42,31 @@ def test_intsights_parser_with_one_critical_vuln_has_one_findings_csv(
finding.title)
def test_intsights_parser_with_many_vuln_has_many_findings_json(self):
- with open("unittests/scans/intsights/intsights_many_vul.json") as testfile:
+ with open("unittests/scans/intsights/intsights_many_vul.json", encoding="utf-8") as testfile:
parser = IntSightsParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(3, len(findings))
def test_intsights_parser_with_many_vuln_has_many_findings_csv(self):
- with open("unittests/scans/intsights/intsights_many_vuln.csv") as testfile:
+ with open("unittests/scans/intsights/intsights_many_vuln.csv", encoding="utf-8") as testfile:
parser = IntSightsParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(9, len(findings))
def test_intsights_parser_invalid_text_with_error_csv(self):
with self.assertRaises(ValueError):
- with open("unittests/scans/intsights/intsights_invalid_file.txt") as testfile:
+ with open("unittests/scans/intsights/intsights_invalid_file.txt", encoding="utf-8") as testfile:
parser = IntSightsParser()
parser.get_findings(testfile, Test())
def test_intsights_parser_with_no_alerts_json(self):
- with open("unittests/scans/intsights/intsights_zero_vuln.json") as testfile:
+ with open("unittests/scans/intsights/intsights_zero_vuln.json", encoding="utf-8") as testfile:
parser = IntSightsParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_intsights_parser_with_no_alerts_csv(self):
- with open("unittests/scans/intsights/intsights_zero_vuln.csv") as testfile:
+ with open("unittests/scans/intsights/intsights_zero_vuln.csv", encoding="utf-8") as testfile:
parser = IntSightsParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
diff --git a/unittests/tools/test_jfrog_xray_api_summary_artifact_parser.py b/unittests/tools/test_jfrog_xray_api_summary_artifact_parser.py
index 7b511521058..dae26302328 100644
--- a/unittests/tools/test_jfrog_xray_api_summary_artifact_parser.py
+++ b/unittests/tools/test_jfrog_xray_api_summary_artifact_parser.py
@@ -9,14 +9,14 @@
class TestJFrogXrayApiSummaryArtifactParser(DojoTestCase):
def test_parse_file_with_no_vuln(self):
- testfile = open("unittests/scans/jfrog_xray_api_summary_artifact/no_vuln.json")
+ testfile = open("unittests/scans/jfrog_xray_api_summary_artifact/no_vuln.json", encoding="utf-8")
parser = JFrogXrayApiSummaryArtifactParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln(self):
- testfile = open("unittests/scans/jfrog_xray_api_summary_artifact/one_vuln.json")
+ testfile = open("unittests/scans/jfrog_xray_api_summary_artifact/one_vuln.json", encoding="utf-8")
parser = JFrogXrayApiSummaryArtifactParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -52,7 +52,7 @@ def test_parse_file_with_one_vuln(self):
def test_parse_file_with_many_vulns(self):
testfile = open(
- "unittests/scans/jfrog_xray_api_summary_artifact/many_vulns.json",
+ "unittests/scans/jfrog_xray_api_summary_artifact/many_vulns.json", encoding="utf-8",
)
parser = JFrogXrayApiSummaryArtifactParser()
findings = parser.get_findings(testfile, Test())
@@ -64,7 +64,7 @@ def test_parse_file_with_many_vulns(self):
def test_parse_file_with_malformed_cvssv3_score(self):
testfile = open(
- "unittests/scans/jfrog_xray_api_summary_artifact/malformed_cvssv3.json",
+ "unittests/scans/jfrog_xray_api_summary_artifact/malformed_cvssv3.json", encoding="utf-8",
)
parser = JFrogXrayApiSummaryArtifactParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_jfrog_xray_on_demand_binary_scan_parser.py b/unittests/tools/test_jfrog_xray_on_demand_binary_scan_parser.py
index b37f4e1b7b6..257a88dd49f 100644
--- a/unittests/tools/test_jfrog_xray_on_demand_binary_scan_parser.py
+++ b/unittests/tools/test_jfrog_xray_on_demand_binary_scan_parser.py
@@ -10,7 +10,7 @@
class TestJFrogXrayOnDemandBinaryScanParser(DojoTestCase):
def test_parse_file_with_one_vuln(self):
- testfile = open("unittests/scans/jfrog_xray_on_demand_binary_scan/one_vuln.json")
+ testfile = open("unittests/scans/jfrog_xray_on_demand_binary_scan/one_vuln.json", encoding="utf-8")
parser = JFrogXrayOnDemandBinaryScanParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -21,7 +21,7 @@ def test_parse_file_with_one_vuln(self):
self.assertEqual("High", item.severity)
def test_parse_file_with_many_vulns(self):
- testfile = open("unittests/scans/jfrog_xray_on_demand_binary_scan/many_vulns.json")
+ testfile = open("unittests/scans/jfrog_xray_on_demand_binary_scan/many_vulns.json", encoding="utf-8")
parser = JFrogXrayOnDemandBinaryScanParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -50,14 +50,14 @@ def test_clean_title(self):
self.assertEqual("Processing some specially crafted ASN.1 object identifiers or", clean_title("Issue summary: Processing some specially crafted ASN.1 object identifiers or\ndata containing them may be very slow."))
def test_parse_file_with_many_vulns_docker(self):
- testfile = open("unittests/scans/jfrog_xray_on_demand_binary_scan/many_vulns_docker.json")
+ testfile = open("unittests/scans/jfrog_xray_on_demand_binary_scan/many_vulns_docker.json", encoding="utf-8")
parser = JFrogXrayOnDemandBinaryScanParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(4, len(findings))
def test_parse_file_with_many_vulns_pypi(self):
- testfile = open("unittests/scans/jfrog_xray_on_demand_binary_scan/many_vulns_pypi.json")
+ testfile = open("unittests/scans/jfrog_xray_on_demand_binary_scan/many_vulns_pypi.json", encoding="utf-8")
parser = JFrogXrayOnDemandBinaryScanParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
diff --git a/unittests/tools/test_jfrog_xray_unified_parser.py b/unittests/tools/test_jfrog_xray_unified_parser.py
index 4acdf8bd893..25349d89838 100644
--- a/unittests/tools/test_jfrog_xray_unified_parser.py
+++ b/unittests/tools/test_jfrog_xray_unified_parser.py
@@ -8,14 +8,14 @@
class TestJFrogXrayUnifiedParser(DojoTestCase):
def test_parse_file_with_no_vuln(self):
- testfile = open("unittests/scans/jfrog_xray_unified/no_vuln.json")
+ testfile = open("unittests/scans/jfrog_xray_unified/no_vuln.json", encoding="utf-8")
parser = JFrogXrayUnifiedParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln(self):
- testfile = open("unittests/scans/jfrog_xray_unified/one_vuln.json")
+ testfile = open("unittests/scans/jfrog_xray_unified/one_vuln.json", encoding="utf-8")
parser = JFrogXrayUnifiedParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -45,14 +45,14 @@ def test_parse_file_with_one_vuln(self):
self.assertEqual("XRAY-139239", item.unique_id_from_tool)
def test_parse_file_with_many_vulns(self):
- testfile = open("unittests/scans/jfrog_xray_unified/many_vulns.json")
+ testfile = open("unittests/scans/jfrog_xray_unified/many_vulns.json", encoding="utf-8")
parser = JFrogXrayUnifiedParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(3, len(findings))
def test_parse_file_with_very_many_vulns(self):
- testfile = open("unittests/scans/jfrog_xray_unified/very_many_vulns.json")
+ testfile = open("unittests/scans/jfrog_xray_unified/very_many_vulns.json", encoding="utf-8")
parser = JFrogXrayUnifiedParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -339,7 +339,7 @@ def test_parse_file_with_very_many_vulns(self):
# **finished various packages**
def test_parse_file_with_another_report(self):
- testfile = open("unittests/scans/jfrog_xray_unified/Vulnerabilities-Report-XRAY_Unified.json")
+ testfile = open("unittests/scans/jfrog_xray_unified/Vulnerabilities-Report-XRAY_Unified.json", encoding="utf-8")
parser = JFrogXrayUnifiedParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
diff --git a/unittests/tools/test_jfrogxray_parser.py b/unittests/tools/test_jfrogxray_parser.py
index ba75e6ca8c2..0a4aeb2e39b 100644
--- a/unittests/tools/test_jfrogxray_parser.py
+++ b/unittests/tools/test_jfrogxray_parser.py
@@ -6,7 +6,7 @@
class TestJfrogJFrogXrayParser(DojoTestCase):
def test_parse_file_with_one_vuln(self):
- testfile = open("unittests/scans/jfrogxray/one_vuln.json")
+ testfile = open("unittests/scans/jfrogxray/one_vuln.json", encoding="utf-8")
parser = JFrogXrayParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -19,14 +19,14 @@ def test_parse_file_with_one_vuln(self):
self.assertEqual(787, item.cwe)
def test_parse_file_with_many_vulns(self):
- testfile = open("unittests/scans/jfrogxray/many_vulns.json")
+ testfile = open("unittests/scans/jfrogxray/many_vulns.json", encoding="utf-8")
parser = JFrogXrayParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(3, len(findings))
def test_parse_file_with_many_vulns2(self):
- testfile = open("unittests/scans/jfrogxray/many_vulns2.json")
+ testfile = open("unittests/scans/jfrogxray/many_vulns2.json", encoding="utf-8")
parser = JFrogXrayParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
diff --git a/unittests/tools/test_kics_parser.py b/unittests/tools/test_kics_parser.py
index da9fdaeb454..03fe1ddaac9 100644
--- a/unittests/tools/test_kics_parser.py
+++ b/unittests/tools/test_kics_parser.py
@@ -6,13 +6,13 @@
class TestKICSParser(DojoTestCase):
def test_parse_no_findings(self):
- with open("unittests/scans/kics/no_findings.json") as testfile:
+ with open("unittests/scans/kics/no_findings.json", encoding="utf-8") as testfile:
parser = KICSParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_many_findings(self):
- with open("unittests/scans/kics/many_findings.json") as testfile:
+ with open("unittests/scans/kics/many_findings.json", encoding="utf-8") as testfile:
parser = KICSParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(18, len(findings))
diff --git a/unittests/tools/test_kiuwan_parser.py b/unittests/tools/test_kiuwan_parser.py
index f3c71124b45..d27f003bbbb 100644
--- a/unittests/tools/test_kiuwan_parser.py
+++ b/unittests/tools/test_kiuwan_parser.py
@@ -6,31 +6,31 @@
class TestKiuwanParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/kiuwan/kiuwan_no_vuln.csv") as testfile:
+ with open("unittests/scans/kiuwan/kiuwan_no_vuln.csv", encoding="utf-8") as testfile:
parser = KiuwanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_file_with_two_vuln_has_two_findings(self):
- with open("unittests/scans/kiuwan/kiuwan_two_vuln.csv") as testfile:
+ with open("unittests/scans/kiuwan/kiuwan_two_vuln.csv", encoding="utf-8") as testfile:
parser = KiuwanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(2, len(findings))
def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
- with open("unittests/scans/kiuwan/kiuwan_many_vuln.csv") as testfile:
+ with open("unittests/scans/kiuwan/kiuwan_many_vuln.csv", encoding="utf-8") as testfile:
parser = KiuwanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(131, len(findings))
def test_parse_file_with_defects(self):
- with open("unittests/scans/kiuwan/kiuwan_defects.csv") as testfile:
+ with open("unittests/scans/kiuwan/kiuwan_defects.csv", encoding="utf-8") as testfile:
parser = KiuwanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
def test_parse_file_issue_9308(self):
- with open("unittests/scans/kiuwan/issue_9308.csv") as testfile:
+ with open("unittests/scans/kiuwan/issue_9308.csv", encoding="utf-8") as testfile:
parser = KiuwanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(2, len(findings))
diff --git a/unittests/tools/test_kiuwan_sca_parser.py b/unittests/tools/test_kiuwan_sca_parser.py
index 2d8778aa018..3c868c483c8 100644
--- a/unittests/tools/test_kiuwan_sca_parser.py
+++ b/unittests/tools/test_kiuwan_sca_parser.py
@@ -6,27 +6,27 @@
# ./dc-unittest.sh --profile postgres-redis --test-case unittests.tools.test_kiuwan_sca_parser.TestKiuwanSCAParser
class TestKiuwanSCAParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/kiuwan_sca/kiuwan_sca_no_vuln.json") as testfile:
+ with open("unittests/scans/kiuwan_sca/kiuwan_sca_no_vuln.json", encoding="utf-8") as testfile:
parser = KiuwanSCAParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_file_with_two_vuln_has_two_findings(self):
- with open("unittests/scans/kiuwan_sca/kiuwan_sca_two_vuln.json") as testfile:
+ with open("unittests/scans/kiuwan_sca/kiuwan_sca_two_vuln.json", encoding="utf-8") as testfile:
parser = KiuwanSCAParser()
findings = parser.get_findings(testfile, Test())
# file contains 3, but we only get 2 as "muted" ones are ignored:
self.assertEqual(2, len(findings))
def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
- with open("unittests/scans/kiuwan_sca/kiuwan_sca_many_vuln.json") as testfile:
+ with open("unittests/scans/kiuwan_sca/kiuwan_sca_many_vuln.json", encoding="utf-8") as testfile:
parser = KiuwanSCAParser()
findings = parser.get_findings(testfile, Test())
# also tests deduplication as there are 28 findings in the file:
self.assertEqual(27, len(findings))
def test_correct_mapping(self):
- with open("unittests/scans/kiuwan_sca/kiuwan_sca_two_vuln.json") as testfile:
+ with open("unittests/scans/kiuwan_sca/kiuwan_sca_two_vuln.json", encoding="utf-8") as testfile:
parser = KiuwanSCAParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_kubeaudit_parser.py b/unittests/tools/test_kubeaudit_parser.py
index 1ec26a57292..dea4e51e5bf 100644
--- a/unittests/tools/test_kubeaudit_parser.py
+++ b/unittests/tools/test_kubeaudit_parser.py
@@ -6,7 +6,7 @@
class TestKubeAuditParser(DojoTestCase):
def test_parse_file_has_no_findings(self):
- testfile = open("unittests/scans/kubeaudit/kubeaudit.json")
+ testfile = open("unittests/scans/kubeaudit/kubeaudit.json", encoding="utf-8")
parser = KubeAuditParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
diff --git a/unittests/tools/test_kubebench_parser.py b/unittests/tools/test_kubebench_parser.py
index 56a8efa1f94..25b77faaf77 100644
--- a/unittests/tools/test_kubebench_parser.py
+++ b/unittests/tools/test_kubebench_parser.py
@@ -7,7 +7,7 @@ class TestKubeBenchParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
with open(
- get_unit_tests_path() + "/scans/kubebench/kube-bench-report-zero-vuln.json",
+ get_unit_tests_path() + "/scans/kubebench/kube-bench-report-zero-vuln.json", encoding="utf-8",
) as testfile:
parser = KubeBenchParser()
findings = parser.get_findings(testfile, Test())
@@ -15,7 +15,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self):
def test_parse_file_with_one_vuln_has_one_finding(self):
with open(
- get_unit_tests_path() + "/scans/kubebench/kube-bench-report-one-vuln.json",
+ get_unit_tests_path() + "/scans/kubebench/kube-bench-report-one-vuln.json", encoding="utf-8",
) as testfile:
parser = KubeBenchParser()
findings = parser.get_findings(testfile, Test())
@@ -23,7 +23,7 @@ def test_parse_file_with_one_vuln_has_one_finding(self):
def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
with open(
- get_unit_tests_path() + "/scans/kubebench/kube-bench-report-many-vuln.json",
+ get_unit_tests_path() + "/scans/kubebench/kube-bench-report-many-vuln.json", encoding="utf-8",
) as testfile:
parser = KubeBenchParser()
findings = parser.get_findings(testfile, Test())
@@ -33,7 +33,7 @@ def test_parse_file_with_controls_tag(self):
# The testfile has been derived from https://github.com/kubernetes-sigs/wg-policy-prototypes/blob/master/policy-report/kube-bench-adapter/samples/kube-bench-output.json
with open(
- get_unit_tests_path() + "/scans/kubebench/kube-bench-controls.json",
+ get_unit_tests_path() + "/scans/kubebench/kube-bench-controls.json", encoding="utf-8",
) as testfile:
parser = KubeBenchParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_kubehunter_parser.py b/unittests/tools/test_kubehunter_parser.py
index 4fee661bf45..2141d59bb1d 100644
--- a/unittests/tools/test_kubehunter_parser.py
+++ b/unittests/tools/test_kubehunter_parser.py
@@ -7,13 +7,13 @@
class TestKubeHunterParser(TestCase):
def test_kubehunter_parser_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/kubehunter/kubehunter_zero_vul.json") as testfile:
+ with open("unittests/scans/kubehunter/kubehunter_zero_vul.json", encoding="utf-8") as testfile:
parser = KubeHunterParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_kubehunter_parser_with_one_criticle_vuln_has_one_findings(self):
- with open("unittests/scans/kubehunter/kubehunter_one_vul.json") as testfile:
+ with open("unittests/scans/kubehunter/kubehunter_one_vul.json", encoding="utf-8") as testfile:
parser = KubeHunterParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -28,7 +28,7 @@ def test_kubehunter_parser_with_one_criticle_vuln_has_one_findings(self):
self.assertEqual(finding.severity, "High")
def test_kubehunter_parser_with_many_vuln_has_many_findings(self):
- with open("unittests/scans/kubehunter/kubehunter_many_vul.json") as testfile:
+ with open("unittests/scans/kubehunter/kubehunter_many_vul.json", encoding="utf-8") as testfile:
parser = KubeHunterParser()
findings = parser.get_findings(testfile, Test())
@@ -36,7 +36,7 @@ def test_kubehunter_parser_with_many_vuln_has_many_findings(self):
def test_kubehunter_parser_empty_with_error(self):
with self.assertRaises(ValueError) as context:
- with open("unittests/scans/kubehunter/empty.json") as testfile:
+ with open("unittests/scans/kubehunter/empty.json", encoding="utf-8") as testfile:
parser = KubeHunterParser()
parser.get_findings(testfile, Test())
@@ -45,7 +45,7 @@ def test_kubehunter_parser_empty_with_error(self):
)
def test_kubehunter_parser_dupe(self):
- with open("unittests/scans/kubehunter/dupe.json") as testfile:
+ with open("unittests/scans/kubehunter/dupe.json", encoding="utf-8") as testfile:
parser = KubeHunterParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
diff --git a/unittests/tools/test_kubescape_parser.py b/unittests/tools/test_kubescape_parser.py
index 6437b705ef0..caacd8fe85b 100644
--- a/unittests/tools/test_kubescape_parser.py
+++ b/unittests/tools/test_kubescape_parser.py
@@ -5,19 +5,19 @@
class TestKubescapeParser(DojoTestCase):
def test_parse_file_has_many_findings(self):
- with open(get_unit_tests_path() + "/scans/kubescape/many_findings.json") as testfile:
+ with open(get_unit_tests_path() + "/scans/kubescape/many_findings.json", encoding="utf-8") as testfile:
parser = KubescapeParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(710, len(findings))
def test_parse_file_has_many_results(self):
- with open(get_unit_tests_path() + "/scans/kubescape/results.json") as testfile:
+ with open(get_unit_tests_path() + "/scans/kubescape/results.json", encoding="utf-8") as testfile:
parser = KubescapeParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_file_with_a_failure(self):
- with open(get_unit_tests_path() + "/scans/kubescape/with_a_failure.json") as testfile:
+ with open(get_unit_tests_path() + "/scans/kubescape/with_a_failure.json", encoding="utf-8") as testfile:
parser = KubescapeParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(3, len(findings))
diff --git a/unittests/tools/test_legitify_parser.py b/unittests/tools/test_legitify_parser.py
new file mode 100644
index 00000000000..66f803258b2
--- /dev/null
+++ b/unittests/tools/test_legitify_parser.py
@@ -0,0 +1,43 @@
+from dojo.models import Test
+from dojo.tools.legitify.parser import LegitifyParser
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
+
+
+class TestLegitifyParser(DojoTestCase):
+ def test_parse_file_with_many_findings(self):
+ with open(get_unit_tests_path() + "/scans/legitify/legitify_many_findings.json", encoding="utf-8") as testfile:
+ parser = LegitifyParser()
+ findings = parser.get_findings(testfile, Test())
+ self.assertEqual(16, len(findings))
+ with self.subTest(i=0):
+ finding = findings[0]
+ self.assertEqual("High", finding.severity)
+ self.assertEqual("code_review_not_required", finding.vuln_id_from_tool)
+ self.assertEqual("Repository | Default Branch Should Require Code Review", finding.title)
+ self.assertFalse(finding.dynamic_finding)
+ self.assertTrue(finding.static_finding)
+ for finding in findings:
+ for endpoint in finding.unsaved_endpoints:
+ endpoint.clean()
+
+ def test_parse_file_with_one_finding(self):
+ with open(get_unit_tests_path() + "/scans/legitify/legitify_one_finding.json", encoding="utf-8") as testfile:
+ parser = LegitifyParser()
+ findings = parser.get_findings(testfile, Test())
+ self.assertEqual(1, len(findings))
+ with self.subTest(i=0):
+ finding = findings[0]
+ self.assertEqual("High", finding.severity)
+ self.assertEqual("code_review_not_required", finding.vuln_id_from_tool)
+ self.assertEqual("Repository | Default Branch Should Require Code Review", finding.title)
+ self.assertFalse(finding.dynamic_finding)
+ self.assertTrue(finding.static_finding)
+ for finding in findings:
+ for endpoint in finding.unsaved_endpoints:
+ endpoint.clean()
+
+ def test_parse_file_with_no_findings(self):
+ with open(get_unit_tests_path() + "/scans/legitify/legitify_no_findings.json", encoding="utf-8") as testfile:
+ parser = LegitifyParser()
+ findings = parser.get_findings(testfile, Test())
+ self.assertEqual(0, len(findings))
diff --git a/unittests/tools/test_mend_parser.py b/unittests/tools/test_mend_parser.py
index 3a48c5c49e9..393dd4097c1 100644
--- a/unittests/tools/test_mend_parser.py
+++ b/unittests/tools/test_mend_parser.py
@@ -6,13 +6,13 @@
class TestMendParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/mend/okhttp_no_vuln.json") as testfile:
+ with open("unittests/scans/mend/okhttp_no_vuln.json", encoding="utf-8") as testfile:
parser = MendParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln_has_one_findings(self):
- with open("unittests/scans/mend/okhttp_one_vuln.json") as testfile:
+ with open("unittests/scans/mend/okhttp_one_vuln.json", encoding="utf-8") as testfile:
parser = MendParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -23,14 +23,14 @@ def test_parse_file_with_one_vuln_has_one_findings(self):
self.assertEqual(5.3, finding.cvssv3_score)
def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
- with open("unittests/scans/mend/okhttp_many_vuln.json") as testfile:
+ with open("unittests/scans/mend/okhttp_many_vuln.json", encoding="utf-8") as testfile:
parser = MendParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(6, len(findings))
def test_parse_file_with_multiple_vuln_cli_output(self):
with open(
- get_unit_tests_path() + "/scans/mend/cli_generated_many_vulns.json",
+ get_unit_tests_path() + "/scans/mend/cli_generated_many_vulns.json", encoding="utf-8",
) as testfile:
parser = MendParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_meterian_parser.py b/unittests/tools/test_meterian_parser.py
index d02b877aba8..2a5a9f3c27b 100644
--- a/unittests/tools/test_meterian_parser.py
+++ b/unittests/tools/test_meterian_parser.py
@@ -7,33 +7,33 @@ class TestMeterianParser(DojoTestCase):
def test_meterianParser_invalid_security_report_raise_ValueError_exception(self):
with self.assertRaises(ValueError):
- with open("unittests/scans/meterian/report_invalid.json") as testfile:
+ with open("unittests/scans/meterian/report_invalid.json", encoding="utf-8") as testfile:
parser = MeterianParser()
parser.get_findings(testfile, Test())
def test_meterianParser_report_has_no_finding(self):
- with open("unittests/scans/meterian/report_no_vulns.json") as testfile:
+ with open("unittests/scans/meterian/report_no_vulns.json", encoding="utf-8") as testfile:
parser = MeterianParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_meterianParser_report_has_one_findings(self):
- with open("unittests/scans/meterian/report_one_vuln.json") as testfile:
+ with open("unittests/scans/meterian/report_one_vuln.json", encoding="utf-8") as testfile:
parser = MeterianParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
def test_meterianParser_report_has_many_findings(self):
- with open("unittests/scans/meterian/report_many_vulns.json") as testfile:
+ with open("unittests/scans/meterian/report_many_vulns.json", encoding="utf-8") as testfile:
parser = MeterianParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(20, len(findings))
def test_meterianParser_finding_has_fields(self):
- with open("unittests/scans/meterian/report_one_vuln.json") as testfile:
+ with open("unittests/scans/meterian/report_one_vuln.json", encoding="utf-8") as testfile:
parser = MeterianParser()
findings = parser.get_findings(testfile, Test())
@@ -52,7 +52,7 @@ def test_meterianParser_finding_has_fields(self):
self.assertEqual(1, len(finding.unsaved_vulnerability_ids))
self.assertEqual("CVE-2020-26289", finding.unsaved_vulnerability_ids[0])
self.assertEqual(400, finding.cwe)
- self.assertTrue(finding.mitigation.startswith("## Remediation"))
+ self.assertTrue(finding.mitigation.startswith("## Remediation"), finding.mitigation)
self.assertIn("Upgrade date-and-time to version 0.14.2 or higher.", finding.mitigation)
self.assertIn("https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-26289", finding.references, "found " + finding.references)
self.assertIn("https://nvd.nist.gov/vuln/detail/CVE-2020-26289", finding.references, "found " + finding.references)
@@ -63,17 +63,17 @@ def test_meterianParser_finding_has_fields(self):
self.assertEqual(["nodejs"], finding.tags)
def test_meterianParser_finding_has_no_remediation(self):
- with open("unittests/scans/meterian/report_one_vuln_no_remediation.json") as testfile:
+ with open("unittests/scans/meterian/report_one_vuln_no_remediation.json", encoding="utf-8") as testfile:
parser = MeterianParser()
findings = parser.get_findings(testfile, Test())
finding = findings[0]
- self.assertTrue(finding.mitigation.startswith("We were not able to provide a safe version for this library."))
+ self.assertTrue(finding.mitigation.startswith("We were not able to provide a safe version for this library."), finding.mitigation)
self.assertIn("You should consider replacing this component as it could be an "
+ "issue for the safety of your application.", finding.mitigation)
def test_meterianParser_dual_language_report_has_two_findins(self):
- with open("unittests/scans/meterian/report_multi_language.json") as testfile:
+ with open("unittests/scans/meterian/report_multi_language.json", encoding="utf-8") as testfile:
parser = MeterianParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_microfocus_webinspect_parser.py b/unittests/tools/test_microfocus_webinspect_parser.py
index dd21b4f6101..07f43bca936 100644
--- a/unittests/tools/test_microfocus_webinspect_parser.py
+++ b/unittests/tools/test_microfocus_webinspect_parser.py
@@ -10,7 +10,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self):
test.engagement = Engagement()
test.engagement.product = Product()
with open(
- get_unit_tests_path() + "/scans/microfocus_webinspect/Webinspect_no_vuln.xml",
+ get_unit_tests_path() + "/scans/microfocus_webinspect/Webinspect_no_vuln.xml", encoding="utf-8",
) as testfile:
parser = MicrofocusWebinspectParser()
findings = parser.get_findings(testfile, test)
@@ -21,7 +21,7 @@ def test_parse_file_with_one_vuln_has_one_findings(self):
test.engagement = Engagement()
test.engagement.product = Product()
with open(
- get_unit_tests_path() + "/scans/microfocus_webinspect/Webinspect_one_vuln.xml",
+ get_unit_tests_path() + "/scans/microfocus_webinspect/Webinspect_one_vuln.xml", encoding="utf-8",
) as testfile:
parser = MicrofocusWebinspectParser()
findings = parser.get_findings(testfile, test)
@@ -42,7 +42,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
test.engagement = Engagement()
test.engagement.product = Product()
with open(
- get_unit_tests_path() + "/scans/microfocus_webinspect/Webinspect_many_vuln.xml",
+ get_unit_tests_path() + "/scans/microfocus_webinspect/Webinspect_many_vuln.xml", encoding="utf-8",
)as testfile:
parser = MicrofocusWebinspectParser()
findings = parser.get_findings(testfile, test)
@@ -73,7 +73,7 @@ def test_convert_severity(self):
)
def test_parse_file_version_18_20(self):
- with open("unittests/scans/microfocus_webinspect/Webinspect_V18_20.xml") as testfile:
+ with open("unittests/scans/microfocus_webinspect/Webinspect_V18_20.xml", encoding="utf-8") as testfile:
parser = MicrofocusWebinspectParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -121,7 +121,7 @@ def test_parse_file_issue7690(self):
test.engagement = Engagement()
test.engagement.product = Product()
with open(
- get_unit_tests_path() + "/scans/microfocus_webinspect/issue_7690.xml",
+ get_unit_tests_path() + "/scans/microfocus_webinspect/issue_7690.xml", encoding="utf-8",
) as testfile:
parser = MicrofocusWebinspectParser()
findings = parser.get_findings(testfile, test)
diff --git a/unittests/tools/test_mobsf_parser.py b/unittests/tools/test_mobsf_parser.py
index 4f4953a894c..31a40eb7841 100644
--- a/unittests/tools/test_mobsf_parser.py
+++ b/unittests/tools/test_mobsf_parser.py
@@ -10,7 +10,7 @@ def test_parse_file(self):
engagement = Engagement()
engagement.product = Product()
test.engagement = engagement
- testfile = open("unittests/scans/mobsf/report1.json")
+ testfile = open("unittests/scans/mobsf/report1.json", encoding="utf-8")
parser = MobSFParser()
findings = parser.get_findings(testfile, test)
testfile.close()
@@ -36,7 +36,7 @@ def test_parse_file2(self):
engagement = Engagement()
engagement.product = Product()
test.engagement = engagement
- testfile = open("unittests/scans/mobsf/report2.json")
+ testfile = open("unittests/scans/mobsf/report2.json", encoding="utf-8")
parser = MobSFParser()
findings = parser.get_findings(testfile, test)
testfile.close()
@@ -50,7 +50,7 @@ def test_parse_file_3_1_9_android(self):
engagement = Engagement()
engagement.product = Product()
test.engagement = engagement
- testfile = open("unittests/scans/mobsf/android.json")
+ testfile = open("unittests/scans/mobsf/android.json", encoding="utf-8")
parser = MobSFParser()
findings = parser.get_findings(testfile, test)
testfile.close()
@@ -73,7 +73,7 @@ def test_parse_file_3_1_9_ios(self):
engagement = Engagement()
engagement.product = Product()
test.engagement = engagement
- testfile = open("unittests/scans/mobsf/ios.json")
+ testfile = open("unittests/scans/mobsf/ios.json", encoding="utf-8")
parser = MobSFParser()
findings = parser.get_findings(testfile, test)
testfile.close()
@@ -94,7 +94,7 @@ def test_parse_file_mobsf_3_7_9(self):
engagement = Engagement()
engagement.product = Product()
test.engagement = engagement
- testfile = open("unittests/scans/mobsf/mobsf_3_7_9.json")
+ testfile = open("unittests/scans/mobsf/mobsf_3_7_9.json", encoding="utf-8")
parser = MobSFParser()
findings = parser.get_findings(testfile, test)
testfile.close()
@@ -109,7 +109,7 @@ def test_parse_issue_9132(self):
engagement = Engagement()
engagement.product = Product()
test.engagement = engagement
- testfile = open("unittests/scans/mobsf/issue_9132.json")
+ testfile = open("unittests/scans/mobsf/issue_9132.json", encoding="utf-8")
parser = MobSFParser()
findings = parser.get_findings(testfile, test)
testfile.close()
@@ -120,7 +120,7 @@ def test_parse_allsafe(self):
engagement = Engagement()
engagement.product = Product()
test.engagement = engagement
- testfile = open("unittests/scans/mobsf/allsafe.json")
+ testfile = open("unittests/scans/mobsf/allsafe.json", encoding="utf-8")
parser = MobSFParser()
findings = parser.get_findings(testfile, test)
testfile.close()
@@ -131,7 +131,7 @@ def test_parse_damnvulnrablebank(self):
engagement = Engagement()
engagement.product = Product()
test.engagement = engagement
- testfile = open("unittests/scans/mobsf/damnvulnrablebank.json")
+ testfile = open("unittests/scans/mobsf/damnvulnrablebank.json", encoding="utf-8")
parser = MobSFParser()
findings = parser.get_findings(testfile, test)
testfile.close()
diff --git a/unittests/tools/test_mobsfscan_parser.py b/unittests/tools/test_mobsfscan_parser.py
index c0822bbf79c..9a41cb02035 100644
--- a/unittests/tools/test_mobsfscan_parser.py
+++ b/unittests/tools/test_mobsfscan_parser.py
@@ -6,13 +6,13 @@
class TestMobsfscanParser(DojoTestCase):
def test_parse_no_findings(self):
- with open("unittests/scans/mobsfscan/no_findings.json") as testfile:
+ with open("unittests/scans/mobsfscan/no_findings.json", encoding="utf-8") as testfile:
parser = MobsfscanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_many_findings(self):
- with open("unittests/scans/mobsfscan/many_findings.json") as testfile:
+ with open("unittests/scans/mobsfscan/many_findings.json", encoding="utf-8") as testfile:
parser = MobsfscanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(7, len(findings))
@@ -83,7 +83,7 @@ def test_parse_many_findings(self):
self.assertIsNotNone(finding.references)
def test_parse_many_findings_cwe_lower(self):
- with open("unittests/scans/mobsfscan/many_findings_cwe_lower.json") as testfile:
+ with open("unittests/scans/mobsfscan/many_findings_cwe_lower.json", encoding="utf-8") as testfile:
parser = MobsfscanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(7, len(findings))
diff --git a/unittests/tools/test_mozilla_observatory_parser.py b/unittests/tools/test_mozilla_observatory_parser.py
index 2dfd4b1f32c..d15a97a54f3 100644
--- a/unittests/tools/test_mozilla_observatory_parser.py
+++ b/unittests/tools/test_mozilla_observatory_parser.py
@@ -5,7 +5,7 @@
class TestMozillaObservatoryParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/mozilla_observatory/mozilla_no_vuln.json") as testfile:
+ with open("unittests/scans/mozilla_observatory/mozilla_no_vuln.json", encoding="utf-8") as testfile:
parser = MozillaObservatoryParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(4, len(findings))
@@ -19,20 +19,20 @@ def test_parse_file_with_no_vuln_has_no_findings(self):
self.assertIn("Preloaded via the HTTP Strict Transport Security (HSTS) preloading process", finding.description)
def test_parse_file_with_two_vuln_has_two_findings(self):
- with open("unittests/scans/mozilla_observatory/mozilla_gitlab_two_vuln.json") as testfile:
+ with open("unittests/scans/mozilla_observatory/mozilla_gitlab_two_vuln.json", encoding="utf-8") as testfile:
parser = MozillaObservatoryParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(2, len(findings))
def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
- with open("unittests/scans/mozilla_observatory/mozilla_google_many_vuln.json") as testfile:
+ with open("unittests/scans/mozilla_observatory/mozilla_google_many_vuln.json", encoding="utf-8") as testfile:
parser = MozillaObservatoryParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(6, len(findings))
def test_parse_file_cli_mozilla_org(self):
"""Test from the CLI"""
- with open("unittests/scans/mozilla_observatory/mozilla_org.json") as testfile:
+ with open("unittests/scans/mozilla_observatory/mozilla_org.json", encoding="utf-8") as testfile:
parser = MozillaObservatoryParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(12, len(findings))
@@ -49,7 +49,7 @@ def test_parse_file_cli_mozilla_org(self):
def test_parse_file_cli_demo(self):
"""Test from the CLI"""
- with open("unittests/scans/mozilla_observatory/demo.json") as testfile:
+ with open("unittests/scans/mozilla_observatory/demo.json", encoding="utf-8") as testfile:
parser = MozillaObservatoryParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(12, len(findings))
@@ -79,7 +79,7 @@ def test_parse_file_cli_demo(self):
def test_parse_file_cli_juicy(self):
"""Test from the CLI"""
- with open("unittests/scans/mozilla_observatory/juicy.json") as testfile:
+ with open("unittests/scans/mozilla_observatory/juicy.json", encoding="utf-8") as testfile:
parser = MozillaObservatoryParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(12, len(findings))
@@ -120,7 +120,7 @@ def test_parse_file_cli_juicy(self):
def test_parse_file_cli_nmap_scanme(self):
"""Test from the CLI"""
- with open("unittests/scans/mozilla_observatory/nmap_scanme.json") as testfile:
+ with open("unittests/scans/mozilla_observatory/nmap_scanme.json", encoding="utf-8") as testfile:
parser = MozillaObservatoryParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(12, len(findings))
@@ -179,7 +179,7 @@ def test_parse_file_cli_nmap_scanme(self):
def test_parse_file_cli_nmap_scanme_no_name_attribute(self):
"""Test from the CLI"""
- with open("unittests/scans/mozilla_observatory/nmap_scanme_2022.json") as testfile:
+ with open("unittests/scans/mozilla_observatory/nmap_scanme_2022.json", encoding="utf-8") as testfile:
parser = MozillaObservatoryParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(12, len(findings))
diff --git a/unittests/tools/test_ms_defender_parser.py b/unittests/tools/test_ms_defender_parser.py
index 9d22621096d..a83049bf2e6 100644
--- a/unittests/tools/test_ms_defender_parser.py
+++ b/unittests/tools/test_ms_defender_parser.py
@@ -6,7 +6,7 @@
class TestMSDefenderParser(DojoTestCase):
def test_parse_many_findings(self):
- testfile = open("unittests/scans/ms_defender/report_many_vulns.json")
+ testfile = open("unittests/scans/ms_defender/report_many_vulns.json", encoding="utf-8")
parser = MSDefenderParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -16,7 +16,7 @@ def test_parse_many_findings(self):
self.assertEqual("CVE-5678-9887_wjeriowerjoiewrjoweirjeowij", finding.title)
def test_parse_one_finding(self):
- testfile = open("unittests/scans/ms_defender/report_one_vuln.json")
+ testfile = open("unittests/scans/ms_defender/report_one_vuln.json", encoding="utf-8")
parser = MSDefenderParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -27,14 +27,14 @@ def test_parse_one_finding(self):
self.assertEqual("CVE-1234-5678", finding.unsaved_vulnerability_ids[0])
def test_parse_no_finding(self):
- testfile = open("unittests/scans/ms_defender/report_no_vuln.json")
+ testfile = open("unittests/scans/ms_defender/report_no_vuln.json", encoding="utf-8")
parser = MSDefenderParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(0, len(findings))
def test_parser_defender_zip(self):
- testfile = open("unittests/scans/ms_defender/defender.zip")
+ testfile = open("unittests/scans/ms_defender/defender.zip", encoding="utf-8")
parser = MSDefenderParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -47,7 +47,7 @@ def test_parser_defender_zip(self):
self.assertEqual("1.1.1.1", finding.unsaved_endpoints[0].host)
def test_parser_defender_wrong_machines_zip(self):
- testfile = open("unittests/scans/ms_defender/defender_wrong_machines.zip")
+ testfile = open("unittests/scans/ms_defender/defender_wrong_machines.zip", encoding="utf-8")
parser = MSDefenderParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -57,7 +57,7 @@ def test_parser_defender_wrong_machines_zip(self):
self.assertEqual("CVE-5678-9887_wjeriowerjoiewrjoweirjeowij", finding.title)
def test_parser_defender_multiple_files_zip(self):
- testfile = open("unittests/scans/ms_defender/defender_multiple_files.zip")
+ testfile = open("unittests/scans/ms_defender/defender_multiple_files.zip", encoding="utf-8")
parser = MSDefenderParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
diff --git a/unittests/tools/test_nancy_parser.py b/unittests/tools/test_nancy_parser.py
index 271a5c05e98..3823380dd00 100644
--- a/unittests/tools/test_nancy_parser.py
+++ b/unittests/tools/test_nancy_parser.py
@@ -7,13 +7,13 @@
class TestNancyParser(DojoTestCase):
def test_nancy_parser_with_no_vuln_has_no_findings(self):
- with open(path.join(path.dirname(__file__), "../scans/nancy/nancy_no_findings.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/nancy/nancy_no_findings.json"), encoding="utf-8") as testfile:
parser = NancyParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_nancy_parser_with_one_vuln_has_one_findings(self):
- with open(path.join(path.dirname(__file__), "../scans/nancy/nancy_one_findings.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/nancy/nancy_one_findings.json"), encoding="utf-8") as testfile:
parser = NancyParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -27,7 +27,7 @@ def test_nancy_parser_with_one_vuln_has_one_findings(self):
self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:R/S:C/C:L/I:L/A:N", finding.cvssv3)
def test_nancy_plus_parser_with_many_vuln_has_many_findings(self):
- with open(path.join(path.dirname(__file__), "../scans/nancy/nancy_many_findings.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/nancy/nancy_many_findings.json"), encoding="utf-8") as testfile:
parser = NancyParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(13, len(findings))
diff --git a/unittests/tools/test_netsparker_parser.py b/unittests/tools/test_netsparker_parser.py
index 0dbc95ff4bd..55e396205ab 100644
--- a/unittests/tools/test_netsparker_parser.py
+++ b/unittests/tools/test_netsparker_parser.py
@@ -6,7 +6,7 @@
class TestNetsparkerParser(DojoTestCase):
def test_parse_file_with_one_finding(self):
- with open("unittests/scans/netsparker/netsparker_one_finding.json") as testfile:
+ with open("unittests/scans/netsparker/netsparker_one_finding.json", encoding="utf-8") as testfile:
parser = NetsparkerParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -26,7 +26,7 @@ def test_parse_file_with_one_finding(self):
self.assertEqual(str(endpoint), "http://php.testsparker.com/auth/login.php")
def test_parse_file_with_multiple_finding(self):
- with open("unittests/scans/netsparker/netsparker_many_findings.json") as testfile:
+ with open("unittests/scans/netsparker/netsparker_many_findings.json", encoding="utf-8") as testfile:
parser = NetsparkerParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(16, len(findings))
@@ -70,7 +70,7 @@ def test_parse_file_with_multiple_finding(self):
self.assertEqual(str(endpoint), "http://php.testsparker.com")
def test_parse_file_issue_9816(self):
- with open("unittests/scans/netsparker/issue_9816.json") as testfile:
+ with open("unittests/scans/netsparker/issue_9816.json", encoding="utf-8") as testfile:
parser = NetsparkerParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(3, len(findings))
@@ -84,7 +84,7 @@ def test_parse_file_issue_9816(self):
self.assertEqual("03/02/2019", finding.date.strftime("%d/%m/%Y"))
def test_parse_file_issue_10311(self):
- with open("unittests/scans/netsparker/issue_10311.json") as testfile:
+ with open("unittests/scans/netsparker/issue_10311.json", encoding="utf-8") as testfile:
parser = NetsparkerParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(3, len(findings))
diff --git a/unittests/tools/test_neuvector_compliance_parser.py b/unittests/tools/test_neuvector_compliance_parser.py
index 30de36ea32e..3f82ec373b0 100644
--- a/unittests/tools/test_neuvector_compliance_parser.py
+++ b/unittests/tools/test_neuvector_compliance_parser.py
@@ -7,14 +7,14 @@
class TestNeuVectorComplianceParser(DojoTestCase):
def test_parse_file_with_no_vuln(self):
- testfile = open(path.join(path.dirname(__file__), "../scans/neuvector_compliance/no_vuln.json"))
+ testfile = open(path.join(path.dirname(__file__), "../scans/neuvector_compliance/no_vuln.json"), encoding="utf-8")
parser = NeuVectorComplianceParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln(self):
- testfile = open(path.join(path.dirname(__file__), "../scans/neuvector_compliance/one_vuln.json"))
+ testfile = open(path.join(path.dirname(__file__), "../scans/neuvector_compliance/one_vuln.json"), encoding="utf-8")
parser = NeuVectorComplianceParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -22,7 +22,7 @@ def test_parse_file_with_one_vuln(self):
self.assertEqual("docker_D.1.1.11", findings[0].vuln_id_from_tool)
def test_parse_file_with_many_vulns(self):
- testfile = open(path.join(path.dirname(__file__), "../scans/neuvector_compliance/many_vulns.json"))
+ testfile = open(path.join(path.dirname(__file__), "../scans/neuvector_compliance/many_vulns.json"), encoding="utf-8")
parser = NeuVectorComplianceParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
diff --git a/unittests/tools/test_neuvector_parser.py b/unittests/tools/test_neuvector_parser.py
index c2bdd07af76..362618375e5 100644
--- a/unittests/tools/test_neuvector_parser.py
+++ b/unittests/tools/test_neuvector_parser.py
@@ -7,14 +7,14 @@
class TestNeuVectorParser(DojoTestCase):
def test_parse_file_with_no_vuln(self):
- testfile = open(path.join(path.dirname(__file__), "../scans/neuvector/no_vuln.json"))
+ testfile = open(path.join(path.dirname(__file__), "../scans/neuvector/no_vuln.json"), encoding="utf-8")
parser = NeuVectorParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln(self):
- testfile = open(path.join(path.dirname(__file__), "../scans/neuvector/one_vuln.json"))
+ testfile = open(path.join(path.dirname(__file__), "../scans/neuvector/one_vuln.json"), encoding="utf-8")
parser = NeuVectorParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -23,7 +23,7 @@ def test_parse_file_with_one_vuln(self):
self.assertEqual("CVE-2015-8356", findings[0].unsaved_vulnerability_ids[0])
def test_parse_file_with_many_vulns(self):
- testfile = open(path.join(path.dirname(__file__), "../scans/neuvector/many_vulns.json"))
+ testfile = open(path.join(path.dirname(__file__), "../scans/neuvector/many_vulns.json"), encoding="utf-8")
parser = NeuVectorParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
diff --git a/unittests/tools/test_nexpose_parser.py b/unittests/tools/test_nexpose_parser.py
index 3242949cd4b..c6fc0b116ea 100644
--- a/unittests/tools/test_nexpose_parser.py
+++ b/unittests/tools/test_nexpose_parser.py
@@ -10,7 +10,7 @@
class TestNexposeParser(DojoTestCase):
def test_nexpose_parser_has_no_finding(self):
- with open("unittests/scans/nexpose/no_vuln.xml") as testfile:
+ with open("unittests/scans/nexpose/no_vuln.xml", encoding="utf-8") as testfile:
parser = NexposeParser()
findings = parser.get_findings(testfile, Test())
@@ -29,7 +29,7 @@ def test_nexpose_parser_has_many_finding(self):
test = Test()
test.engagement = Engagement()
test.engagement.product = Product()
- with open("unittests/scans/nexpose/many_vulns.xml") as testfile:
+ with open("unittests/scans/nexpose/many_vulns.xml", encoding="utf-8") as testfile:
parser = NexposeParser()
findings = parser.get_findings(testfile, test)
@@ -135,7 +135,7 @@ def test_nexpose_parser_has_many_finding(self):
self.assertEqual("udp", endpoint.protocol)
def test_nexpose_parser_tests_outside_endpoint(self):
- with open("unittests/scans/nexpose/report_auth.xml") as testfile:
+ with open("unittests/scans/nexpose/report_auth.xml", encoding="utf-8") as testfile:
parser = NexposeParser()
findings = parser.get_findings(testfile, Test())
@@ -167,7 +167,7 @@ def test_nexpose_parser_tests_outside_endpoint(self):
self.assertIsNone(finding.unsaved_vulnerability_ids)
def test_nexpose_parser_dns(self):
- with open("unittests/scans/nexpose/dns.xml") as testfile:
+ with open("unittests/scans/nexpose/dns.xml", encoding="utf-8") as testfile:
parser = NexposeParser()
findings = parser.get_findings(testfile, Test())
@@ -208,7 +208,7 @@ def test_nexpose_parser_dns(self):
@override_settings(USE_FIRST_SEEN=True)
def test_nexpose_parser_use_first_seen(self):
- with open("unittests/scans/nexpose/dns.xml") as testfile:
+ with open("unittests/scans/nexpose/dns.xml", encoding="utf-8") as testfile:
parser = NexposeParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_nikto_parser.py b/unittests/tools/test_nikto_parser.py
index 4498095381a..ef24221b9ab 100644
--- a/unittests/tools/test_nikto_parser.py
+++ b/unittests/tools/test_nikto_parser.py
@@ -10,7 +10,7 @@ def test_parse_file_with_old_format(self):
engagement = Engagement()
engagement.product = Product()
test.engagement = engagement
- with open("unittests/scans/nikto/nikto-report-old-format.xml") as testfile:
+ with open("unittests/scans/nikto/nikto-report-old-format.xml", encoding="utf-8") as testfile:
parser = NiktoParser()
findings = parser.get_findings(testfile, test)
for finding in findings:
@@ -19,7 +19,7 @@ def test_parse_file_with_old_format(self):
self.assertEqual(1, len(findings))
def test_parse_file_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/nikto/nikto-report-zero-vuln.xml") as testfile:
+ with open("unittests/scans/nikto/nikto-report-zero-vuln.xml", encoding="utf-8") as testfile:
parser = NiktoParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
@@ -29,7 +29,7 @@ def test_parse_file_with_one_vuln_has_one_finding(self):
engagement = Engagement()
engagement.product = Product()
test.engagement = engagement
- with open("unittests/scans/nikto/nikto-report-one-vuln.xml") as testfile:
+ with open("unittests/scans/nikto/nikto-report-one-vuln.xml", encoding="utf-8") as testfile:
parser = NiktoParser()
findings = parser.get_findings(testfile, test)
for finding in findings:
@@ -42,7 +42,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
engagement = Engagement()
engagement.product = Product()
test.engagement = engagement
- with open("unittests/scans/nikto/nikto-report-many-vuln.xml") as testfile:
+ with open("unittests/scans/nikto/nikto-report-many-vuln.xml", encoding="utf-8") as testfile:
parser = NiktoParser()
findings = parser.get_findings(testfile, test)
for finding in findings:
@@ -51,7 +51,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
self.assertEqual(len(findings), 10)
def test_parse_file_json_with_multiple_vuln_has_multiple_findings(self):
- with open("unittests/scans/nikto/juice-shop.json") as testfile:
+ with open("unittests/scans/nikto/juice-shop.json", encoding="utf-8") as testfile:
parser = NiktoParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -74,7 +74,7 @@ def test_parse_file_json_with_multiple_vuln_has_multiple_findings(self):
self.assertEqual(140, len(finding.unsaved_endpoints))
def test_parse_file_json_with_uri_errors(self):
- with open("unittests/scans/nikto/nikto-output.xml") as testfile:
+ with open("unittests/scans/nikto/nikto-output.xml", encoding="utf-8") as testfile:
parser = NiktoParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -103,7 +103,7 @@ def test_parse_file_json_with_uri_errors(self):
self.assertEqual("examples/servlets/index.html", endpoint.path)
def test_parse_file_json_another(self):
- with open("unittests/scans/nikto/tdh.json") as testfile:
+ with open("unittests/scans/nikto/tdh.json", encoding="utf-8") as testfile:
parser = NiktoParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -134,7 +134,7 @@ def test_parse_file_json_another(self):
self.assertIsNone(endpoint.path)
def test_parse_file_xml_another(self):
- with open("unittests/scans/nikto/tdh.xml") as testfile:
+ with open("unittests/scans/nikto/tdh.xml", encoding="utf-8") as testfile:
parser = NiktoParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -176,7 +176,7 @@ def test_parse_file_xml_another(self):
self.assertIsNone(endpoint.path)
def test_parse_file_issue_9274(self):
- with open("unittests/scans/nikto/issue_9274.json") as testfile:
+ with open("unittests/scans/nikto/issue_9274.json", encoding="utf-8") as testfile:
parser = NiktoParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
diff --git a/unittests/tools/test_nmap_parser.py b/unittests/tools/test_nmap_parser.py
index 26dffc2381c..5a36d43cc15 100644
--- a/unittests/tools/test_nmap_parser.py
+++ b/unittests/tools/test_nmap_parser.py
@@ -8,7 +8,7 @@
class TestNmapParser(DojoTestCase):
def test_parse_file_with_no_open_ports_has_no_findings(self):
- with open("unittests/scans/nmap/nmap_0port.xml") as testfile:
+ with open("unittests/scans/nmap/nmap_0port.xml", encoding="utf-8") as testfile:
parser = NmapParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -17,7 +17,7 @@ def test_parse_file_with_no_open_ports_has_no_findings(self):
self.assertEqual(0, len(findings))
def test_parse_file_with_single_open_ports_has_single_finding(self):
- with open("unittests/scans/nmap/nmap_1port.xml") as testfile:
+ with open("unittests/scans/nmap/nmap_1port.xml", encoding="utf-8") as testfile:
parser = NmapParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -37,7 +37,7 @@ def test_parse_file_with_single_open_ports_has_single_finding(self):
self.assertEqual("tcp", endpoint.protocol)
def test_parse_file_with_multiple_open_ports_has_multiple_finding(self):
- with open("unittests/scans/nmap/nmap_multiple_port.xml") as testfile:
+ with open("unittests/scans/nmap/nmap_multiple_port.xml", encoding="utf-8") as testfile:
parser = NmapParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -57,7 +57,7 @@ def test_parse_file_with_multiple_open_ports_has_multiple_finding(self):
self.assertEqual("tcp", endpoint.protocol)
def test_parse_file_with_script_vulner(self):
- with open("unittests/scans/nmap/nmap_script_vulners.xml") as testfile:
+ with open("unittests/scans/nmap/nmap_script_vulners.xml", encoding="utf-8") as testfile:
parser = NmapParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -83,7 +83,7 @@ def test_parse_file_with_script_vulner(self):
self.assertEqual(datetime.datetime(2020, 2, 17, 9, 7, 25), findings[2].date)
def test_parse_issue4406(self):
- with open("unittests/scans/nmap/issue4406.xml") as testfile:
+ with open("unittests/scans/nmap/issue4406.xml", encoding="utf-8") as testfile:
parser = NmapParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -131,3 +131,6 @@ def test_parse_issue4406(self):
self.assertEqual("ip-10-250-195-71.eu-west-1.compute.internal", endpoint.host)
self.assertEqual(31641, endpoint.port)
self.assertEqual("tcp", endpoint.protocol)
+ with self.subTest(i=55):
+ finding = findings[55]
+ self.assertEqual("### Host\n\n**IP Address:** 10.250.195.71\n**FQDN:** ip-10-250-195-71.eu-west-1.compute.internal\n\n\n**Port/Protocol:** 30150/tcp\n\n\n**Script ID:** fingerprint-strings\n**Script Output:** \n GenericLines: \n E_BAD_PROTOCOL\n\n\n", finding.description)
diff --git a/unittests/tools/test_noseyparker_parser.py b/unittests/tools/test_noseyparker_parser.py
index e55087eb3e5..714e8a4fa7b 100644
--- a/unittests/tools/test_noseyparker_parser.py
+++ b/unittests/tools/test_noseyparker_parser.py
@@ -7,13 +7,13 @@
class TestNoseyParkerParser(TestCase):
def test_noseyparker_parser__no_vulns(self):
- with open("unittests/scans/noseyparker/noseyparker_zero_vul.jsonl") as testfile:
+ with open("unittests/scans/noseyparker/noseyparker_zero_vul.jsonl", encoding="utf-8") as testfile:
parser = NoseyParkerParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_noseyparker_parser_one_vuln(self):
- with open("unittests/scans/noseyparker/noseyparker_one_vul.jsonl") as testfile:
+ with open("unittests/scans/noseyparker/noseyparker_one_vul.jsonl", encoding="utf-8") as testfile:
parser = NoseyParkerParser()
findings = parser.get_findings(testfile, Test())
finding = findings[0]
@@ -24,7 +24,7 @@ def test_noseyparker_parser_one_vuln(self):
def test_noseyparker_parser_many_vulns(self):
# Testfile contains 5 lines (Middle 2 are duplicates and line #4 has 2 of the same exact matches)
- with open("unittests/scans/noseyparker/noseyparker_many_vul.jsonl") as testfile:
+ with open("unittests/scans/noseyparker/noseyparker_many_vul.jsonl", encoding="utf-8") as testfile:
parser = NoseyParkerParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -34,12 +34,12 @@ def test_noseyparker_parser_many_vulns(self):
def test_noseyparker_parser_error(self):
with self.assertRaises(ValueError) as context:
- with open("unittests/scans/noseyparker/empty_with_error.json") as testfile:
+ with open("unittests/scans/noseyparker/empty_with_error.json", encoding="utf-8") as testfile:
parser = NoseyParkerParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(0, len(findings))
- self.assertTrue(
- "Invalid Nosey Parker data, make sure to use Nosey Parker v0.16.0" in str(context.exception),
+ self.assertIn(
+ "Invalid Nosey Parker data, make sure to use Nosey Parker v0.16.0", str(context.exception),
)
- self.assertTrue("ECONNREFUSED" in str(context.exception))
+ self.assertIn("ECONNREFUSED", str(context.exception))
diff --git a/unittests/tools/test_npm_audit_7_plus_parser.py b/unittests/tools/test_npm_audit_7_plus_parser.py
index e551c5d77b0..6028e992a48 100644
--- a/unittests/tools/test_npm_audit_7_plus_parser.py
+++ b/unittests/tools/test_npm_audit_7_plus_parser.py
@@ -7,14 +7,14 @@
class TestNpmAudit7PlusParser(DojoTestCase):
def test_npm_audit_7_plus_parser_with_no_vuln_has_no_findings(self):
- testfile = open(path.join(path.dirname(__file__), "../scans/npm_audit_7_plus/no_vuln.json"))
+ testfile = open(path.join(path.dirname(__file__), "../scans/npm_audit_7_plus/no_vuln.json"), encoding="utf-8")
parser = NpmAudit7PlusParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(0, len(findings))
def test_npm_audit_7_plus_parser_with_one_vuln_has_one_findings(self):
- testfile = open(path.join(path.dirname(__file__), "../scans/npm_audit_7_plus/one_vuln.json"))
+ testfile = open(path.join(path.dirname(__file__), "../scans/npm_audit_7_plus/one_vuln.json"), encoding="utf-8")
parser = NpmAudit7PlusParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -28,7 +28,7 @@ def test_npm_audit_7_plus_parser_with_one_vuln_has_one_findings(self):
self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:L", finding.cvssv3)
def test_npm_audit_7_plus_parser_with_many_vuln_has_many_findings(self):
- testfile = open(path.join(path.dirname(__file__), "../scans/npm_audit_7_plus/many_vulns.json"))
+ testfile = open(path.join(path.dirname(__file__), "../scans/npm_audit_7_plus/many_vulns.json"), encoding="utf-8")
parser = NpmAudit7PlusParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -40,3 +40,14 @@ def test_npm_audit_7_plus_parser_with_many_vuln_has_many_findings(self):
self.assertIsNotNone(finding.description)
self.assertGreater(len(finding.description), 0)
self.assertEqual("@vercel/fun", finding.title)
+
+ def test_npm_audit_7_plus_parser_issue_10801(self):
+ testfile = open(path.join(path.dirname(__file__), "../scans/npm_audit_7_plus/issue_10801.json"), encoding="utf-8")
+ parser = NpmAudit7PlusParser()
+ findings = parser.get_findings(testfile, Test())
+ testfile.close()
+ self.assertEqual(1, len(findings))
+ with self.subTest(i=0):
+ finding = findings[0]
+ self.assertEqual("Medium", finding.severity)
+ self.assertEqual(0, finding.cwe)
diff --git a/unittests/tools/test_npm_audit_parser.py b/unittests/tools/test_npm_audit_parser.py
index 5c11d848f0a..0d8493009af 100644
--- a/unittests/tools/test_npm_audit_parser.py
+++ b/unittests/tools/test_npm_audit_parser.py
@@ -7,13 +7,13 @@
class TestNpmAuditParser(DojoTestCase):
def test_npm_audit_parser_with_no_vuln_has_no_findings(self):
- with open(path.join(path.dirname(__file__), "../scans/npm_audit/no_vuln.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/npm_audit/no_vuln.json"), encoding="utf-8") as testfile:
parser = NpmAuditParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_npm_audit_parser_with_one_criticle_vuln_has_one_findings(self):
- with open(path.join(path.dirname(__file__), "../scans/npm_audit/one_vuln.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/npm_audit/one_vuln.json"), encoding="utf-8") as testfile:
parser = NpmAuditParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -22,7 +22,7 @@ def test_npm_audit_parser_with_one_criticle_vuln_has_one_findings(self):
self.assertEqual("1.9.2", findings[0].component_version)
def test_npm_audit_parser_with_many_vuln_has_many_findings(self):
- with open(path.join(path.dirname(__file__), "../scans/npm_audit/many_vuln.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/npm_audit/many_vuln.json"), encoding="utf-8") as testfile:
parser = NpmAuditParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(5, len(findings))
@@ -39,7 +39,7 @@ def test_npm_audit_parser_with_many_vuln_has_many_findings(self):
def test_npm_audit_parser_multiple_cwes_per_finding(self):
# cwes formatted as escaped list: "cwe": "[\"CWE-346\",\"CWE-453\"]",
- with open(path.join(path.dirname(__file__), "../scans/npm_audit/multiple_cwes.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/npm_audit/multiple_cwes.json"), encoding="utf-8") as testfile:
parser = NpmAuditParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(41, len(findings))
@@ -48,14 +48,14 @@ def test_npm_audit_parser_multiple_cwes_per_finding(self):
def test_npm_audit_parser_multiple_cwes_per_finding_list(self):
# cwes formatted as proper list: "cwe": ["CWE-918","CWE-1333"],
- with open(path.join(path.dirname(__file__), "../scans/npm_audit/multiple_cwes2.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/npm_audit/multiple_cwes2.json"), encoding="utf-8") as testfile:
parser = NpmAuditParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(6, len(findings))
self.assertEqual(918, findings[0].cwe)
def test_npm_audit_parser_with_one_criticle_vuln_has_null_as_cwe(self):
- with open(path.join(path.dirname(__file__), "../scans/npm_audit/cwe_null.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/npm_audit/cwe_null.json"), encoding="utf-8") as testfile:
parser = NpmAuditParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -65,7 +65,7 @@ def test_npm_audit_parser_with_one_criticle_vuln_has_null_as_cwe(self):
def test_npm_audit_parser_empty_with_error(self):
with self.assertRaises(ValueError) as context:
- with open(path.join(path.dirname(__file__), "../scans/npm_audit/empty_with_error.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/npm_audit/empty_with_error.json"), encoding="utf-8") as testfile:
parser = NpmAuditParser()
parser.get_findings(testfile, Test())
@@ -74,7 +74,7 @@ def test_npm_audit_parser_empty_with_error(self):
def test_npm_audit_parser_many_vuln_npm7(self):
with self.assertRaises(ValueError) as context:
- with open(path.join(path.dirname(__file__), "../scans/npm_audit/many_vuln_npm7.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/npm_audit/many_vuln_npm7.json"), encoding="utf-8") as testfile:
parser = NpmAuditParser()
parser.get_findings(testfile, Test())
@@ -90,7 +90,7 @@ def test_npm_audit_censored_hash(self):
self.assertEqual(censored_path, "censored_by_npm_audit>censored_by_npm_audit>lodash")
def test_npm_audit_parser_issue_7897(self):
- with open(path.join(path.dirname(__file__), "../scans/npm_audit/issue_7897.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/npm_audit/issue_7897.json"), encoding="utf-8") as testfile:
parser = NpmAuditParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(5, len(findings))
diff --git a/unittests/tools/test_nsp_parser.py b/unittests/tools/test_nsp_parser.py
index 571adb41cdb..289c7a996ce 100644
--- a/unittests/tools/test_nsp_parser.py
+++ b/unittests/tools/test_nsp_parser.py
@@ -6,13 +6,13 @@
class TestNspParser(DojoTestCase):
def test_parse_none(self):
parser = NspParser()
- with open("unittests/scans/nsp/none.json") as test_file:
+ with open("unittests/scans/nsp/none.json", encoding="utf-8") as test_file:
findings = parser.get_findings(test_file, Test())
self.assertEqual(0, len(findings))
def test_parse_ok(self):
parser = NspParser()
- with open("unittests/scans/nsp/scan.json") as test_file:
+ with open("unittests/scans/nsp/scan.json", encoding="utf-8") as test_file:
findings = parser.get_findings(test_file, Test())
self.assertEqual(9, len(findings))
diff --git a/unittests/tools/test_nuclei_parser.py b/unittests/tools/test_nuclei_parser.py
index fe8d81b26f2..7cede5a5463 100644
--- a/unittests/tools/test_nuclei_parser.py
+++ b/unittests/tools/test_nuclei_parser.py
@@ -10,19 +10,19 @@
class TestNucleiParser(DojoTestCase):
def test_parse_no_empty(self):
- with open("unittests/scans/nuclei/empty.jsonl") as testfile:
+ with open("unittests/scans/nuclei/empty.jsonl", encoding="utf-8") as testfile:
parser = NucleiParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_no_findings(self):
- with open("unittests/scans/nuclei/no_findings.json") as testfile:
+ with open("unittests/scans/nuclei/no_findings.json", encoding="utf-8") as testfile:
parser = NucleiParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_issue_9201(self):
- with open("unittests/scans/nuclei/issue_9201.json") as testfile:
+ with open("unittests/scans/nuclei/issue_9201.json", encoding="utf-8") as testfile:
parser = NucleiParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -32,7 +32,7 @@ def test_parse_issue_9201(self):
self.assertEqual("example.com", finding.unsaved_endpoints[0].host)
def test_parse_many_findings(self):
- with open("unittests/scans/nuclei/many_findings.json") as testfile:
+ with open("unittests/scans/nuclei/many_findings.json", encoding="utf-8") as testfile:
parser = NucleiParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -151,7 +151,7 @@ def test_parse_many_findings(self):
self.assertEqual("mysql-native-password-bruteforce", finding.vuln_id_from_tool)
def test_parse_many_findings_new(self):
- with open("unittests/scans/nuclei/many_findings_new.json") as testfile:
+ with open("unittests/scans/nuclei/many_findings_new.json", encoding="utf-8") as testfile:
parser = NucleiParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -192,7 +192,7 @@ def test_parse_many_findings_new(self):
self.assertEqual("prometheus-metrics", finding.vuln_id_from_tool)
def test_parse_many_findings_third(self):
- with open("unittests/scans/nuclei/many_findings_third.json") as testfile:
+ with open("unittests/scans/nuclei/many_findings_third.json", encoding="utf-8") as testfile:
parser = NucleiParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -226,7 +226,7 @@ def test_parse_many_findings_third(self):
self.assertEqual("asp.net-favicon", finding.component_name)
def test_parse_many_findings_v3(self):
- with open("unittests/scans/nuclei/multiple_v3.json") as testfile:
+ with open("unittests/scans/nuclei/multiple_v3.json", encoding="utf-8") as testfile:
parser = NucleiParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
diff --git a/unittests/tools/test_openscap_parser.py b/unittests/tools/test_openscap_parser.py
index 2fd718a1eef..0c2d5625f46 100644
--- a/unittests/tools/test_openscap_parser.py
+++ b/unittests/tools/test_openscap_parser.py
@@ -6,14 +6,14 @@
class TestOpenscapParser(DojoTestCase):
def test_openscap_parser_with_no_vuln_has_no_findings(self):
- testfile = open("unittests/scans/openscap/no_vuln_rhsa.xml")
+ testfile = open("unittests/scans/openscap/no_vuln_rhsa.xml", encoding="utf-8")
parser = OpenscapParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(0, len(findings))
def test_openscap_parser_with_one_criticle_vuln_has_one_findings(self):
- testfile = open("unittests/scans/openscap/one_vuln_rhsa.xml")
+ testfile = open("unittests/scans/openscap/one_vuln_rhsa.xml", encoding="utf-8")
parser = OpenscapParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -26,7 +26,7 @@ def test_openscap_parser_with_one_criticle_vuln_has_one_findings(self):
self.assertEqual("CVE-2005-1038", finding.unsaved_vulnerability_ids[0])
def test_openscap_parser_with_many_vuln_has_many_findings(self):
- testfile = open("unittests/scans/openscap/many_vuln_rhsa.xml")
+ testfile = open("unittests/scans/openscap/many_vuln_rhsa.xml", encoding="utf-8")
parser = OpenscapParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -51,7 +51,7 @@ def test_openscap_parser_with_many_vuln_has_many_findings(self):
self.assertEqual("192.168.100.194", finding.unsaved_endpoints[6].host)
def test_parser_from_spec_1_1_3(self):
- testfile = open("unittests/scans/openscap/ios-sample-v1.1.3.xccdf.xml")
+ testfile = open("unittests/scans/openscap/ios-sample-v1.1.3.xccdf.xml", encoding="utf-8")
parser = OpenscapParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
diff --git a/unittests/tools/test_openvas_parser.py b/unittests/tools/test_openvas_parser.py
index d7906896e35..5a2ba5a17ce 100644
--- a/unittests/tools/test_openvas_parser.py
+++ b/unittests/tools/test_openvas_parser.py
@@ -5,7 +5,7 @@
class TestOpenVASParser(DojoTestCase):
def test_openvas_csv_one_vuln(self):
- with open("unittests/scans/openvas/one_vuln.csv") as f:
+ with open("unittests/scans/openvas/one_vuln.csv", encoding="utf-8") as f:
test = Test()
test.engagement = Engagement()
test.engagement.product = Product()
@@ -26,7 +26,7 @@ def test_openvas_csv_one_vuln(self):
self.assertEqual(22, findings[0].unsaved_endpoints[0].port)
def test_openvas_csv_many_vuln(self):
- with open("unittests/scans/openvas/many_vuln.csv") as f:
+ with open("unittests/scans/openvas/many_vuln.csv", encoding="utf-8") as f:
test = Test()
test.engagement = Engagement()
test.engagement.product = Product()
@@ -51,7 +51,7 @@ def test_openvas_csv_many_vuln(self):
self.assertEqual(finding.unsaved_vulnerability_ids[0], "CVE-2011-3389")
def test_openvas_csv_report_usingCVE(self):
- with open("unittests/scans/openvas/report_using_CVE.csv") as f:
+ with open("unittests/scans/openvas/report_using_CVE.csv", encoding="utf-8") as f:
test = Test()
test.engagement = Engagement()
test.engagement.product = Product()
@@ -67,7 +67,7 @@ def test_openvas_csv_report_usingCVE(self):
self.assertEqual(finding.unsaved_vulnerability_ids[0], "CVE-2014-0117")
def test_openvas_csv_report_usingOpenVAS(self):
- with open("unittests/scans/openvas/report_using_openVAS.csv") as f:
+ with open("unittests/scans/openvas/report_using_openVAS.csv", encoding="utf-8") as f:
test = Test()
test.engagement = Engagement()
test.engagement.product = Product()
@@ -83,7 +83,7 @@ def test_openvas_csv_report_usingOpenVAS(self):
self.assertEqual(finding.unsaved_vulnerability_ids, [])
def test_openvas_xml_no_vuln(self):
- with open("unittests/scans/openvas/no_vuln.xml") as f:
+ with open("unittests/scans/openvas/no_vuln.xml", encoding="utf-8") as f:
test = Test()
test.engagement = Engagement()
test.engagement.product = Product()
@@ -92,7 +92,7 @@ def test_openvas_xml_no_vuln(self):
self.assertEqual(0, len(findings))
def test_openvas_xml_one_vuln(self):
- with open("unittests/scans/openvas/one_vuln.xml") as f:
+ with open("unittests/scans/openvas/one_vuln.xml", encoding="utf-8") as f:
test = Test()
test.engagement = Engagement()
test.engagement.product = Product()
@@ -108,7 +108,7 @@ def test_openvas_xml_one_vuln(self):
self.assertEqual("Critical", finding.severity)
def test_openvas_xml_many_vuln(self):
- with open("unittests/scans/openvas/many_vuln.xml") as f:
+ with open("unittests/scans/openvas/many_vuln.xml", encoding="utf-8") as f:
test = Test()
test.engagement = Engagement()
test.engagement.product = Product()
diff --git a/unittests/tools/test_ort_parser.py b/unittests/tools/test_ort_parser.py
index 0d5c618cb63..d42098d1845 100644
--- a/unittests/tools/test_ort_parser.py
+++ b/unittests/tools/test_ort_parser.py
@@ -11,7 +11,7 @@ def test_parse_without_file_has_no_finding(self):
def test_parse_file_has_many_finding_one_tool(self):
testfile = open(
- get_unit_tests_path() + "/scans/ort/evaluated-model-reporter-test-output.json",
+ get_unit_tests_path() + "/scans/ort/evaluated-model-reporter-test-output.json", encoding="utf-8",
)
parser = OrtParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_ossindex_devaudit_parser.py b/unittests/tools/test_ossindex_devaudit_parser.py
index 8f30f964667..9b11e19cee9 100644
--- a/unittests/tools/test_ossindex_devaudit_parser.py
+++ b/unittests/tools/test_ossindex_devaudit_parser.py
@@ -7,7 +7,7 @@ class TestOssIndexDevauditParser(DojoTestCase):
def test_ossindex_devaudit_parser_with_no_vulns_has_no_findings(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_no_vuln.json",
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_no_vuln.json", encoding="utf-8",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -16,7 +16,7 @@ def test_ossindex_devaudit_parser_with_no_vulns_has_no_findings(self):
def test_ossindex_devaudit_parser_with_one_critical_vuln_has_one_finding(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_one_vuln.json",
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_one_vuln.json", encoding="utf-8",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -25,7 +25,7 @@ def test_ossindex_devaudit_parser_with_one_critical_vuln_has_one_finding(self):
def test_ossindex_devaudit_parser_with_multiple_vulns_has_multiple_finding(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_multiple_vulns.json",
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_multiple_vulns.json", encoding="utf-8",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -34,7 +34,7 @@ def test_ossindex_devaudit_parser_with_multiple_vulns_has_multiple_finding(self)
def test_ossindex_devaudit_parser_with_no_cve_returns_info_severity(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_vuln_no_cvssscore.json",
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_vuln_no_cvssscore.json", encoding="utf-8",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -43,7 +43,7 @@ def test_ossindex_devaudit_parser_with_no_cve_returns_info_severity(self):
def test_ossindex_devaudit_parser_with_reference_shows_reference(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_one_vuln.json",
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_one_vuln.json", encoding="utf-8",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -55,7 +55,7 @@ def test_ossindex_devaudit_parser_with_reference_shows_reference(self):
def test_ossindex_devaudit_parser_with_empty_reference_shows_empty_reference(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_empty_reference.json",
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_empty_reference.json", encoding="utf-8",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -66,7 +66,7 @@ def test_ossindex_devaudit_parser_with_empty_reference_shows_empty_reference(sel
def test_ossindex_devaudit_parser_with_missing_reference_shows_empty(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_missing_reference.json",
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_missing_reference.json", encoding="utf-8",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -77,7 +77,7 @@ def test_ossindex_devaudit_parser_with_missing_reference_shows_empty(self):
def test_ossindex_devaudit_parser_with_missing_cwe_shows_1035(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_missing_cwe.json",
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_missing_cwe.json", encoding="utf-8",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -88,7 +88,7 @@ def test_ossindex_devaudit_parser_with_missing_cwe_shows_1035(self):
def test_ossindex_devaudit_parser_with_null_cwe_shows_1035(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_null_cwe.json",
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_null_cwe.json", encoding="utf-8",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -99,7 +99,7 @@ def test_ossindex_devaudit_parser_with_null_cwe_shows_1035(self):
def test_ossindex_devaudit_parser_with_empty_cwe_shows_1035(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_empty_cwe.json",
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_empty_cwe.json", encoding="utf-8",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -110,7 +110,7 @@ def test_ossindex_devaudit_parser_with_empty_cwe_shows_1035(self):
def test_ossindex_devaudit_parser_get_severity_shows_info(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_info.json",
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_info.json", encoding="utf-8",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -121,7 +121,7 @@ def test_ossindex_devaudit_parser_get_severity_shows_info(self):
def test_ossindex_devaudit_parser_get_severity_shows_critical(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_critical.json",
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_critical.json", encoding="utf-8",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -132,7 +132,7 @@ def test_ossindex_devaudit_parser_get_severity_shows_critical(self):
def test_ossindex_devaudit_parser_get_severity_shows_high(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_high.json",
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_high.json", encoding="utf-8",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -143,7 +143,7 @@ def test_ossindex_devaudit_parser_get_severity_shows_high(self):
def test_ossindex_devaudit_parser_get_severity_shows_medium(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_medium.json",
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_medium.json", encoding="utf-8",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -154,7 +154,7 @@ def test_ossindex_devaudit_parser_get_severity_shows_medium(self):
def test_ossindex_devaudit_parser_get_severity_shows_low(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_low.json",
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_low.json", encoding="utf-8",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_osv_scanner_parser.py b/unittests/tools/test_osv_scanner_parser.py
index 794d429bd03..3b82fc93303 100644
--- a/unittests/tools/test_osv_scanner_parser.py
+++ b/unittests/tools/test_osv_scanner_parser.py
@@ -7,13 +7,13 @@
class TestOSVScannerParser(DojoTestCase):
def test_no_findings(self):
- with open(path.join(path.dirname(__file__), "../scans/osv_scanner/no_findings.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/osv_scanner/no_findings.json"), encoding="utf-8") as testfile:
parser = OSVScannerParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_some_findings(self):
- with open(path.join(path.dirname(__file__), "../scans/osv_scanner/some_findings.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/osv_scanner/some_findings.json"), encoding="utf-8") as testfile:
parser = OSVScannerParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -25,7 +25,7 @@ def test_some_findings(self):
self.assertEqual(finding.severity, "Low")
def test_many_findings(self):
- with open(path.join(path.dirname(__file__), "../scans/osv_scanner/many_findings.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/osv_scanner/many_findings.json"), encoding="utf-8") as testfile:
parser = OSVScannerParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(66, len(findings))
diff --git a/unittests/tools/test_outpost24_parser.py b/unittests/tools/test_outpost24_parser.py
index 39e44d82203..fd132e649b3 100644
--- a/unittests/tools/test_outpost24_parser.py
+++ b/unittests/tools/test_outpost24_parser.py
@@ -5,7 +5,7 @@
class TestOutpost24Parser(DojoTestCase):
def assert_file_has_n_items(self, filename, item_count):
- with open(filename) as file:
+ with open(filename, encoding="utf-8") as file:
parser = Outpost24Parser()
findings = parser.get_findings(file, Test())
for finding in findings:
diff --git a/unittests/tools/test_php_security_audit_v2_parser.py b/unittests/tools/test_php_security_audit_v2_parser.py
index c8d4fd2091b..4ae779e1304 100644
--- a/unittests/tools/test_php_security_audit_v2_parser.py
+++ b/unittests/tools/test_php_security_audit_v2_parser.py
@@ -6,7 +6,7 @@
class TestPhpSecurityAuditV2ParserParser(DojoTestCase):
def test_php_symfony_security_check_parser_with_no_vuln_has_no_findings(self):
- testfile = open("unittests/scans/php_security_audit_v2/php_security_audit_v2.0.0_unformatted.json")
+ testfile = open("unittests/scans/php_security_audit_v2/php_security_audit_v2.0.0_unformatted.json", encoding="utf-8")
parser = PhpSecurityAuditV2Parser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -23,7 +23,7 @@ def test_php_symfony_security_check_parser_with_no_vuln_has_no_findings(self):
def test_php_symfony_security_check_parser_with_many_vuln(self):
"""New report with latest version"""
- testfile = open("unittests/scans/php_security_audit_v2/many_vulns.json")
+ testfile = open("unittests/scans/php_security_audit_v2/many_vulns.json", encoding="utf-8")
parser = PhpSecurityAuditV2Parser()
findings = parser.get_findings(testfile, Test())
testfile.close()
diff --git a/unittests/tools/test_php_symfony_security_check_parser.py b/unittests/tools/test_php_symfony_security_check_parser.py
index 5e8c4bd51d7..6786d54b9c6 100644
--- a/unittests/tools/test_php_symfony_security_check_parser.py
+++ b/unittests/tools/test_php_symfony_security_check_parser.py
@@ -7,7 +7,7 @@ class TestPhpSymfonySecurityCheckerParser(DojoTestCase):
def test_php_symfony_security_check_parser_with_no_vuln_has_no_findings(self):
testfile = open(
- get_unit_tests_path() + "/scans/php_symfony_security_check/php_symfony_no_vuln.json",
+ get_unit_tests_path() + "/scans/php_symfony_security_check/php_symfony_no_vuln.json", encoding="utf-8",
)
parser = PhpSymfonySecurityCheckParser()
findings = parser.get_findings(testfile, Test())
@@ -19,7 +19,7 @@ def test_php_symfony_security_check_parser_with_one_criticle_vuln_has_one_findin
self,
):
testfile = open(
- get_unit_tests_path() + "/scans/php_symfony_security_check/php_symfony_one_vuln.json",
+ get_unit_tests_path() + "/scans/php_symfony_security_check/php_symfony_one_vuln.json", encoding="utf-8",
)
parser = PhpSymfonySecurityCheckParser()
findings = parser.get_findings(testfile, Test())
@@ -28,7 +28,7 @@ def test_php_symfony_security_check_parser_with_one_criticle_vuln_has_one_findin
def test_php_symfony_security_check_parser_with_many_vuln_has_many_findings(self):
testfile = open(
- get_unit_tests_path() + "/scans/php_symfony_security_check/php_symfony_many_vuln.json",
+ get_unit_tests_path() + "/scans/php_symfony_security_check/php_symfony_many_vuln.json", encoding="utf-8",
)
parser = PhpSymfonySecurityCheckParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_pip_audit_parser.py b/unittests/tools/test_pip_audit_parser.py
index 22771c1d402..44c4e84085a 100644
--- a/unittests/tools/test_pip_audit_parser.py
+++ b/unittests/tools/test_pip_audit_parser.py
@@ -9,7 +9,7 @@ def test_parser_empty(self):
testfiles = ["unittests/scans/pip_audit/empty.json",
"unittests/scans/pip_audit/empty_new.json"]
for path in testfiles:
- testfile = open(path)
+ testfile = open(path, encoding="utf-8")
parser = PipAuditParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -19,7 +19,7 @@ def test_parser_zero_findings(self):
testfiles = ["unittests/scans/pip_audit/zero_vulns.json",
"unittests/scans/pip_audit/zero_vulns_new.json"]
for path in testfiles:
- testfile = open(path)
+ testfile = open(path, encoding="utf-8")
parser = PipAuditParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -29,7 +29,7 @@ def test_parser_many_vulns(self):
testfiles = ["unittests/scans/pip_audit/many_vulns.json",
"unittests/scans/pip_audit/many_vulns_new.json"]
for path in testfiles:
- testfile = open(path)
+ testfile = open(path, encoding="utf-8")
parser = PipAuditParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
diff --git a/unittests/tools/test_pmd_parser.py b/unittests/tools/test_pmd_parser.py
index 9a232384025..5fbc74d9f76 100644
--- a/unittests/tools/test_pmd_parser.py
+++ b/unittests/tools/test_pmd_parser.py
@@ -6,19 +6,19 @@
class TestPMDParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/pmd/pmd_no_vuln.csv") as testfile:
+ with open("unittests/scans/pmd/pmd_no_vuln.csv", encoding="utf-8") as testfile:
parser = PmdParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln_has_one_findings(self):
- with open("unittests/scans/pmd/pmd_one_vuln.csv") as testfile:
+ with open("unittests/scans/pmd/pmd_one_vuln.csv", encoding="utf-8") as testfile:
parser = PmdParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
- with open("unittests/scans/pmd/pmd_many_vulns.csv") as testfile:
+ with open("unittests/scans/pmd/pmd_many_vulns.csv", encoding="utf-8") as testfile:
parser = PmdParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(16, len(findings))
diff --git a/unittests/tools/test_popeye_parser.py b/unittests/tools/test_popeye_parser.py
index 1dac1387700..17bb5b6b8ec 100644
--- a/unittests/tools/test_popeye_parser.py
+++ b/unittests/tools/test_popeye_parser.py
@@ -6,14 +6,14 @@
class TestPopeyeParser(DojoTestCase):
def test_popeye_parser_with_no_vuln_has_no_findings(self):
- testfile = open("unittests/scans/popeye/popeye_zero_vul.json")
+ testfile = open("unittests/scans/popeye/popeye_zero_vul.json", encoding="utf-8")
parser = PopeyeParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(0, len(findings))
def test_popeye_parser_with_one_warning_has_one_findings(self):
- testfile = open("unittests/scans/popeye/popeye_one_vul.json")
+ testfile = open("unittests/scans/popeye/popeye_one_vul.json", encoding="utf-8")
parser = PopeyeParser()
findings = parser.get_findings(testfile, Test())
finding_title = "pods test-namespace/6cff44dc94-d92km [POP-106] No resources requests/limits defined"
@@ -31,7 +31,7 @@ def test_popeye_parser_with_one_warning_has_one_findings(self):
self.assertEqual(finding_vuln_id_from_tool, findings[0].vuln_id_from_tool)
def test_popeye_parser_with_many_vuln_has_many_findings(self):
- testfile = open("unittests/scans/popeye/popeye_many_vul.json")
+ testfile = open("unittests/scans/popeye/popeye_many_vul.json", encoding="utf-8")
parser = PopeyeParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
diff --git a/unittests/tools/test_progpilot_parser.py b/unittests/tools/test_progpilot_parser.py
index b1f6557b20c..9cc11fb6c30 100644
--- a/unittests/tools/test_progpilot_parser.py
+++ b/unittests/tools/test_progpilot_parser.py
@@ -6,7 +6,7 @@
class TestProgpilotParser(DojoTestCase):
def test_progpilotparser_single_has_many_findings(self):
- testfile = open("unittests/scans/progpilot/progpilot.json")
+ testfile = open("unittests/scans/progpilot/progpilot.json", encoding="utf-8")
parser = ProgpilotParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -22,21 +22,21 @@ def test_progpilotparser_single_has_many_findings(self):
self.assertEqual(593, finding.line)
def test_progpilotparser_single_has_one_finding(self):
- testfile = open("unittests/scans/progpilot/progpilot2.json")
+ testfile = open("unittests/scans/progpilot/progpilot2.json", encoding="utf-8")
parser = ProgpilotParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(1, len(findings))
def test_progpilotparser_single_has_many_findings3(self):
- testfile = open("unittests/scans/progpilot/progpilot3.json")
+ testfile = open("unittests/scans/progpilot/progpilot3.json", encoding="utf-8")
parser = ProgpilotParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(3, len(findings))
def test_progpilotparser_single_has_many_findings4(self):
- testfile = open("unittests/scans/progpilot/progpilot4.json")
+ testfile = open("unittests/scans/progpilot/progpilot4.json", encoding="utf-8")
parser = ProgpilotParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
diff --git a/unittests/tools/test_pwn_sast_parser.py b/unittests/tools/test_pwn_sast_parser.py
index 140aa761c3a..c4b9f6033e4 100644
--- a/unittests/tools/test_pwn_sast_parser.py
+++ b/unittests/tools/test_pwn_sast_parser.py
@@ -6,34 +6,34 @@
class TestPWNSASTParser(DojoTestCase):
def test_parse_no_findings(self):
- with open("unittests/scans/pwn_sast/no_findings.json") as testfile:
+ with open("unittests/scans/pwn_sast/no_findings.json", encoding="utf-8") as testfile:
parser = PWNSASTParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_one_finding(self):
- with open("unittests/scans/pwn_sast/one_finding.json") as testfile:
+ with open("unittests/scans/pwn_sast/one_finding.json", encoding="utf-8") as testfile:
parser = PWNSASTParser()
findings = parser.get_findings(testfile, Test())
self.assertIsInstance(findings, list)
self.assertEqual(1, len(findings))
def test_parse_many_finding(self):
- with open("unittests/scans/pwn_sast/many_findings.json") as testfile:
+ with open("unittests/scans/pwn_sast/many_findings.json", encoding="utf-8") as testfile:
parser = PWNSASTParser()
findings = parser.get_findings(testfile, Test())
self.assertIsInstance(findings, list)
self.assertEqual(3, len(findings))
def test_one_dup_finding(self):
- with open("unittests/scans/pwn_sast/one_dup_finding.json") as testfile:
+ with open("unittests/scans/pwn_sast/one_dup_finding.json", encoding="utf-8") as testfile:
parser = PWNSASTParser()
findings = parser.get_findings(testfile, Test())
self.assertIsInstance(findings, list)
self.assertEqual(1, len(findings))
def test_title_is_not_none(self):
- with open("unittests/scans/pwn_sast/one_finding.json") as testfile:
+ with open("unittests/scans/pwn_sast/one_finding.json", encoding="utf-8") as testfile:
parser = PWNSASTParser()
findings = parser.get_findings(testfile, Test())
self.assertIsInstance(findings, list)
diff --git a/unittests/tools/test_qualys_infrascan_webgui_parser.py b/unittests/tools/test_qualys_infrascan_webgui_parser.py
index e692eee0509..b76aeba84d9 100644
--- a/unittests/tools/test_qualys_infrascan_webgui_parser.py
+++ b/unittests/tools/test_qualys_infrascan_webgui_parser.py
@@ -11,7 +11,7 @@ class TestQualysInfrascanWebguiParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
with open(
- get_unit_tests_path() + "/scans/qualys_infrascan_webgui/qualys_infrascan_webgui_0.xml",
+ get_unit_tests_path() + "/scans/qualys_infrascan_webgui/qualys_infrascan_webgui_0.xml", encoding="utf-8",
) as testfile:
parser = QualysInfrascanWebguiParser()
findings = parser.get_findings(testfile, Test())
@@ -21,7 +21,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self):
# + also verify data with one test
def test_parse_file_with_one_vuln_has_one_findings(self):
with open(
- get_unit_tests_path() + "/scans/qualys_infrascan_webgui/qualys_infrascan_webgui_1.xml",
+ get_unit_tests_path() + "/scans/qualys_infrascan_webgui/qualys_infrascan_webgui_1.xml", encoding="utf-8",
) as testfile:
parser = QualysInfrascanWebguiParser()
findings = parser.get_findings(testfile, Test())
@@ -38,7 +38,7 @@ def test_parse_file_with_one_vuln_has_one_findings(self):
# Sample with Multiple Test
def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
with open(
- get_unit_tests_path() + "/scans/qualys_infrascan_webgui/qualys_infrascan_webgui_multiple.xml",
+ get_unit_tests_path() + "/scans/qualys_infrascan_webgui/qualys_infrascan_webgui_multiple.xml", encoding="utf-8",
) as testfile:
parser = QualysInfrascanWebguiParser()
findings = parser.get_findings(testfile, Test())
@@ -61,7 +61,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
# Sample with Multiple Test
def test_parse_file_with_finding_no_dns(self):
with open(
- get_unit_tests_path() + "/scans/qualys_infrascan_webgui/qualys_infrascan_webgui_3.xml",
+ get_unit_tests_path() + "/scans/qualys_infrascan_webgui/qualys_infrascan_webgui_3.xml", encoding="utf-8",
) as testfile:
parser = QualysInfrascanWebguiParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_qualys_parser.py b/unittests/tools/test_qualys_parser.py
index 35801961163..15840f8561d 100644
--- a/unittests/tools/test_qualys_parser.py
+++ b/unittests/tools/test_qualys_parser.py
@@ -18,7 +18,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self):
def parse_file_with_no_vuln_has_no_findings(self):
with open(
- get_unit_tests_path() + "/scans/qualys/empty.xml",
+ get_unit_tests_path() + "/scans/qualys/empty.xml", encoding="utf-8",
) as testfile:
parser = QualysParser()
findings = parser.get_findings(testfile, Test())
@@ -35,7 +35,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
def parse_file_with_multiple_vuln_has_multiple_findings(self):
with open(
- get_unit_tests_path() + "/scans/qualys/Qualys_Sample_Report.xml",
+ get_unit_tests_path() + "/scans/qualys/Qualys_Sample_Report.xml", encoding="utf-8",
) as testfile:
parser = QualysParser()
findings = parser.get_findings(testfile, Test())
@@ -82,7 +82,7 @@ def test_parse_file_with_no_vuln_has_no_findings_csv(self):
def parse_file_with_no_vuln_has_no_findings_csv(self):
with open(
- get_unit_tests_path() + "/scans/qualys/empty.csv",
+ get_unit_tests_path() + "/scans/qualys/empty.csv", encoding="utf-8",
) as testfile:
parser = QualysParser()
findings = parser.get_findings(testfile, Test())
@@ -99,7 +99,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings_csv(self):
def parse_file_with_multiple_vuln_has_multiple_findings_csv(self):
with open(
- get_unit_tests_path() + "/scans/qualys/Qualys_Sample_Report.csv",
+ get_unit_tests_path() + "/scans/qualys/Qualys_Sample_Report.csv", encoding="utf-8",
) as testfile:
parser = QualysParser()
findings = parser.get_findings(testfile, Test())
@@ -136,7 +136,7 @@ def parse_file_with_multiple_vuln_has_multiple_findings_csv(self):
def test_parse_file_monthly_pci_issue6932(self):
with open(
- get_unit_tests_path() + "/scans/qualys/monthly_pci_issue6932.csv",
+ get_unit_tests_path() + "/scans/qualys/monthly_pci_issue6932.csv", encoding="utf-8",
) as testfile:
parser = QualysParser()
findings = parser.get_findings(testfile, Test())
@@ -144,7 +144,7 @@ def test_parse_file_monthly_pci_issue6932(self):
def test_parse_file_with_cvss_values_and_scores(self):
with open(
- get_unit_tests_path() + "/scans/qualys/Qualys_Sample_Report.xml",
+ get_unit_tests_path() + "/scans/qualys/Qualys_Sample_Report.xml", encoding="utf-8",
) as testfile:
parser = QualysParser()
findings = parser.get_findings(testfile, Test())
@@ -179,7 +179,7 @@ def test_parse_file_with_cvss_values_and_scores(self):
)
def test_get_severity_legacy(self):
- with open(get_unit_tests_path() + "/scans/qualys/Qualys_Sample_Report.xml") as testfile:
+ with open(get_unit_tests_path() + "/scans/qualys/Qualys_Sample_Report.xml", encoding="utf-8") as testfile:
parser = QualysParser()
findings = parser.get_findings(testfile, Test())
counts = {}
@@ -197,7 +197,7 @@ def test_get_severity_legacy(self):
@override_settings(USE_QUALYS_LEGACY_SEVERITY_PARSING=False)
def test_get_severity(self):
- with open(get_unit_tests_path() + "/scans/qualys/Qualys_Sample_Report.xml") as testfile:
+ with open(get_unit_tests_path() + "/scans/qualys/Qualys_Sample_Report.xml", encoding="utf-8") as testfile:
parser = QualysParser()
findings = parser.get_findings(testfile, Test())
counts = {}
diff --git a/unittests/tools/test_qualys_webapp_parser.py b/unittests/tools/test_qualys_webapp_parser.py
index 617824dcfa8..71bd295634e 100644
--- a/unittests/tools/test_qualys_webapp_parser.py
+++ b/unittests/tools/test_qualys_webapp_parser.py
@@ -6,7 +6,7 @@
class TestQualysWebAppParser(DojoTestCase):
def test_qualys_webapp_parser_with_no_vuln_has_no_findings(self):
- testfile = open("unittests/scans/qualys_webapp/qualys_webapp_no_vuln.xml")
+ testfile = open("unittests/scans/qualys_webapp/qualys_webapp_no_vuln.xml", encoding="utf-8")
parser = QualysWebAppParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -18,7 +18,7 @@ def test_qualys_webapp_parser_with_no_vuln_has_no_findings(self):
self.assertEqual(17, len(findings))
def test_qualys_webapp_parser_with_one_criticle_vuln_has_one_findings(self):
- testfile = open("unittests/scans/qualys_webapp/qualys_webapp_one_vuln.xml")
+ testfile = open("unittests/scans/qualys_webapp/qualys_webapp_one_vuln.xml", encoding="utf-8")
parser = QualysWebAppParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -31,7 +31,7 @@ def test_qualys_webapp_parser_with_one_criticle_vuln_has_one_findings(self):
def test_qualys_webapp_parser_with_many_vuln_has_many_findings(self):
testfile = open(
- get_unit_tests_path() + "/scans/qualys_webapp/qualys_webapp_many_vuln.xml",
+ get_unit_tests_path() + "/scans/qualys_webapp/qualys_webapp_many_vuln.xml", encoding="utf-8",
)
parser = QualysWebAppParser()
findings = parser.get_findings(testfile, Test())
@@ -45,7 +45,7 @@ def test_qualys_webapp_parser_with_many_vuln_has_many_findings(self):
def test_qualys_webapp_parser_info_is_vuln(self):
testfile = open(
- get_unit_tests_path() + "/scans/qualys_webapp/qualys_webapp_many_vuln.xml",
+ get_unit_tests_path() + "/scans/qualys_webapp/qualys_webapp_many_vuln.xml", encoding="utf-8",
)
parser = QualysWebAppParser()
findings = parser.get_findings(testfile, Test(), enable_weakness=True)
@@ -59,7 +59,7 @@ def test_qualys_webapp_parser_info_is_vuln(self):
def test_discussion_10239(self):
testfile = open(
- get_unit_tests_path() + "/scans/qualys_webapp/discussion_10239.xml",
+ get_unit_tests_path() + "/scans/qualys_webapp/discussion_10239.xml", encoding="utf-8",
)
parser = QualysWebAppParser()
findings = parser.get_findings(testfile, Test(), enable_weakness=True)
diff --git a/unittests/tools/test_rapplex_parser.py b/unittests/tools/test_rapplex_parser.py
index 605432df19e..9d56762513e 100644
--- a/unittests/tools/test_rapplex_parser.py
+++ b/unittests/tools/test_rapplex_parser.py
@@ -8,13 +8,13 @@
class TestRapplexParser(DojoTestCase):
def test_rapplex_parser_with_no_findings(self):
- with open(path.join(path.dirname(__file__), "../scans/rapplex/rapplex_zero_vul.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/rapplex/rapplex_zero_vul.json"), encoding="utf-8") as testfile:
parser = RapplexParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_rapplex_parser_with_one_findings(self):
- with open(path.join(path.dirname(__file__), "../scans/rapplex/rapplex_one_vul.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/rapplex/rapplex_one_vul.json"), encoding="utf-8") as testfile:
parser = RapplexParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -26,7 +26,7 @@ def test_rapplex_parser_with_one_findings(self):
self.assertIsNotNone(finding.references)
def test_rapplex_parser_with_many_findings(self):
- with open(path.join(path.dirname(__file__), "../scans/rapplex/rapplex_many_vul.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/rapplex/rapplex_many_vul.json"), encoding="utf-8") as testfile:
parser = RapplexParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(8, len(findings))
diff --git a/unittests/tools/test_redhatsatellite_parser.py b/unittests/tools/test_redhatsatellite_parser.py
index bfaabbd326c..63ab8ba3e41 100644
--- a/unittests/tools/test_redhatsatellite_parser.py
+++ b/unittests/tools/test_redhatsatellite_parser.py
@@ -6,19 +6,19 @@
class TestRedHatSatelliteParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/redhatsatellite/no_findings.json") as testfile:
+ with open("unittests/scans/redhatsatellite/no_findings.json", encoding="utf-8") as testfile:
parser = RedHatSatelliteParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_file_with_one_finding(self):
- with open("unittests/scans/redhatsatellite/one_finding.json") as testfile:
+ with open("unittests/scans/redhatsatellite/one_finding.json", encoding="utf-8") as testfile:
parser = RedHatSatelliteParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
def test_parse_file_with_multiple_findingse(self):
- with open("unittests/scans/redhatsatellite/many_findings.json") as testfile:
+ with open("unittests/scans/redhatsatellite/many_findings.json", encoding="utf-8") as testfile:
parser = RedHatSatelliteParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(3, len(findings))
@@ -27,7 +27,7 @@ def test_parse_file_with_multiple_findingse(self):
self.assertEqual("CVE-1990-2", findings[0].unsaved_vulnerability_ids[2])
def test_parse_file_with_many_packages(self):
- with open("unittests/scans/redhatsatellite/many_packages.json") as testfile:
+ with open("unittests/scans/redhatsatellite/many_packages.json", encoding="utf-8") as testfile:
parser = RedHatSatelliteParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
diff --git a/unittests/tools/test_retirejs_parser.py b/unittests/tools/test_retirejs_parser.py
index d26c8af1c1c..80090385aeb 100644
--- a/unittests/tools/test_retirejs_parser.py
+++ b/unittests/tools/test_retirejs_parser.py
@@ -5,7 +5,7 @@
class TestRetireJsParser(DojoTestCase):
def test_parse(self):
- with open("unittests/scans/retirejs/latest.json") as testfile:
+ with open("unittests/scans/retirejs/latest.json", encoding="utf-8") as testfile:
parser = RetireJsParser()
findings = parser.get_findings(testfile, Test())
self.assertIsInstance(findings, list)
diff --git a/unittests/tools/test_risk_recon_parser.py b/unittests/tools/test_risk_recon_parser.py
index dde31a77ca3..38c8b496be6 100644
--- a/unittests/tools/test_risk_recon_parser.py
+++ b/unittests/tools/test_risk_recon_parser.py
@@ -8,19 +8,19 @@
class TestRiskReconAPIParser(DojoTestCase):
def test_api_with_bad_url(self):
- with open("unittests/scans/risk_recon/bad_url.json") as testfile:
+ with open("unittests/scans/risk_recon/bad_url.json", encoding="utf-8") as testfile:
with self.assertRaises(Exception):
parser = RiskReconParser()
parser.get_findings(testfile, Test())
def test_api_with_bad_key(self):
- with open("unittests/scans/risk_recon/bad_key.json") as testfile:
+ with open("unittests/scans/risk_recon/bad_key.json", encoding="utf-8") as testfile:
with self.assertRaises(Exception):
parser = RiskReconParser()
parser.get_findings(testfile, Test())
def test_parser_without_api(self):
- with open("unittests/scans/risk_recon/findings.json") as testfile:
+ with open("unittests/scans/risk_recon/findings.json", encoding="utf-8") as testfile:
parser = RiskReconParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(2, len(findings))
diff --git a/unittests/tools/test_rubocop_parser.py b/unittests/tools/test_rubocop_parser.py
index e581366ba62..0fa5d3cbdb3 100644
--- a/unittests/tools/test_rubocop_parser.py
+++ b/unittests/tools/test_rubocop_parser.py
@@ -5,21 +5,21 @@
class TestRubocopParser(DojoTestCase):
def test_parser_empty(self):
- testfile = open("unittests/scans/rubocop/empty.json")
+ testfile = open("unittests/scans/rubocop/empty.json", encoding="utf-8")
parser = RubocopParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(0, len(findings))
def test_parser_zero_findings(self):
- testfile = open("unittests/scans/rubocop/zero_vulns.json")
+ testfile = open("unittests/scans/rubocop/zero_vulns.json", encoding="utf-8")
parser = RubocopParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(0, len(findings))
def test_parser_one_vuln(self):
- testfile = open("unittests/scans/rubocop/one_finding.json")
+ testfile = open("unittests/scans/rubocop/one_finding.json", encoding="utf-8")
parser = RubocopParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -33,7 +33,7 @@ def test_parser_one_vuln(self):
self.assertEqual("Security/MarshalLoad", finding.vuln_id_from_tool)
def test_parser_many_vulns(self):
- testfile = open("unittests/scans/rubocop/many_vulns.json")
+ testfile = open("unittests/scans/rubocop/many_vulns.json", encoding="utf-8")
parser = RubocopParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
diff --git a/unittests/tools/test_rusty_hog_parser.py b/unittests/tools/test_rusty_hog_parser.py
index b9aca9a65ce..3d7df04ea0f 100644
--- a/unittests/tools/test_rusty_hog_parser.py
+++ b/unittests/tools/test_rusty_hog_parser.py
@@ -5,25 +5,25 @@
class TestRustyhogParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_finding_choctawhog(self):
- with open("unittests/scans/rusty_hog/choctawhog_no_vuln.json") as testfile:
+ with open("unittests/scans/rusty_hog/choctawhog_no_vuln.json", encoding="utf-8") as testfile:
parser = RustyhogParser()
findings = parser.get_items(testfile, "Rusty Hog", Test()) # The outputfile is empty. A subscanner can't be classified
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln_has_one_finding_choctawhog(self):
- with open("unittests/scans/rusty_hog/choctawhog_one_vuln.json") as testfile:
+ with open("unittests/scans/rusty_hog/choctawhog_one_vuln.json", encoding="utf-8") as testfile:
parser = RustyhogParser()
findings = parser.get_items(testfile, "Choctaw Hog", Test())
self.assertEqual(1, len(findings))
def test_parse_file_with_multiple_vuln_has_multiple_finding_choctawhog(self):
- with open("unittests/scans/rusty_hog/choctawhog_many_vulns.json") as testfile:
+ with open("unittests/scans/rusty_hog/choctawhog_many_vulns.json", encoding="utf-8") as testfile:
parser = RustyhogParser()
findings = parser.get_items(testfile, "Choctaw Hog", Test())
self.assertEqual(13, len(findings))
def test_parse_file_with_multiple_vuln_has_multiple_finding_choctawhog_content(self):
- with open("unittests/scans/rusty_hog/choctawhog_many_vulns.json") as testfile:
+ with open("unittests/scans/rusty_hog/choctawhog_many_vulns.json", encoding="utf-8") as testfile:
parser = RustyhogParser()
findings = parser.get_items(testfile, "Choctaw Hog", Test())
self.assertEqual(findings[0].title, "Email address found in Git path .github/workflows/main.yml (a7bce96377c4ff2ac16cd51fb0da7fe7ea678829)")
@@ -36,25 +36,25 @@ def test_parse_file_with_multiple_vuln_has_multiple_finding_choctawhog_content(s
self.assertIn("Please ensure no secret material nor confidential information is kept in clear within git repositories.", findings[0].mitigation)
def test_parse_file_with_no_vuln_has_no_finding_duorchog(self):
- with open("unittests/scans/rusty_hog/durochog_no_vuln.json") as testfile:
+ with open("unittests/scans/rusty_hog/durochog_no_vuln.json", encoding="utf-8") as testfile:
parser = RustyhogParser()
findings = parser.get_items(testfile, "Rusty Hog", Test()) # The outputfile is empty. A subscanner can't be classified
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln_has_one_finding_durochog(self):
- with open("unittests/scans/rusty_hog/durochog_one_vuln.json") as testfile:
+ with open("unittests/scans/rusty_hog/durochog_one_vuln.json", encoding="utf-8") as testfile:
parser = RustyhogParser()
findings = parser.get_items(testfile, "Duroc Hog", Test())
self.assertEqual(1, len(findings))
def test_parse_file_with_multiple_vuln_has_multiple_finding_durochog(self):
- with open("unittests/scans/rusty_hog/durochog_many_vulns.json") as testfile:
+ with open("unittests/scans/rusty_hog/durochog_many_vulns.json", encoding="utf-8") as testfile:
parser = RustyhogParser()
findings = parser.get_items(testfile, "Duroc Hog", Test())
self.assertEqual(4, len(findings))
def test_parse_file_with_multiple_vuln_has_multiple_finding_durochog_content(self):
- with open("unittests/scans/rusty_hog/durochog_many_vulns.json") as testfile:
+ with open("unittests/scans/rusty_hog/durochog_many_vulns.json", encoding="utf-8") as testfile:
parser = RustyhogParser()
findings = parser.get_items(testfile, "Duroc Hog", Test())
self.assertEqual(findings[0].title, "password (Password) found in path /scan_folder/unittests/scans/sonarqube/sonar-no-finding.html")
@@ -65,25 +65,25 @@ def test_parse_file_with_multiple_vuln_has_multiple_finding_durochog_content(sel
self.assertIn("Please ensure no secret material nor confidential information is kept in clear within directories, files, and archives.", findings[0].mitigation)
def test_parse_file_with_no_vuln_has_no_finding_gottingenhog(self):
- with open("unittests/scans/rusty_hog/gottingenhog_no_vuln.json") as testfile:
+ with open("unittests/scans/rusty_hog/gottingenhog_no_vuln.json", encoding="utf-8") as testfile:
parser = RustyhogParser()
findings = parser.get_items(testfile, "Rusty Hog", Test()) # The outputfile is empty. A subscanner can't be classified
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln_has_one_finding_gottingenhog(self):
- with open("unittests/scans/rusty_hog/gottingenhog_one_vuln.json") as testfile:
+ with open("unittests/scans/rusty_hog/gottingenhog_one_vuln.json", encoding="utf-8") as testfile:
parser = RustyhogParser()
findings = parser.get_items(testfile, "Gottingen Hog", Test())
self.assertEqual(1, len(findings))
def test_parse_file_with_multiple_vuln_has_multiple_finding_gottingenhog(self):
- with open("unittests/scans/rusty_hog/gottingenhog_many_vulns.json") as testfile:
+ with open("unittests/scans/rusty_hog/gottingenhog_many_vulns.json", encoding="utf-8") as testfile:
parser = RustyhogParser()
findings = parser.get_items(testfile, "Gottingen Hog", Test())
self.assertEqual(10, len(findings))
def test_parse_file_with_multiple_vuln_has_multiple_finding_gottingenhog_content(self):
- with open("unittests/scans/rusty_hog/gottingenhog_many_vulns.json") as testfile:
+ with open("unittests/scans/rusty_hog/gottingenhog_many_vulns.json", encoding="utf-8") as testfile:
parser = RustyhogParser()
findings = parser.get_items(testfile, "Gottingen Hog", Test())
self.assertEqual(findings[0].title, "password found in Jira ID TEST-123 (Issue Description)")
@@ -94,25 +94,25 @@ def test_parse_file_with_multiple_vuln_has_multiple_finding_gottingenhog_content
self.assertIn("Please ensure no secret material nor confidential information is kept in clear within JIRA Tickets.", findings[0].mitigation)
def test_parse_file_with_no_vuln_has_no_finding_essexhog(self):
- with open("unittests/scans/rusty_hog/essexhog_no_vuln.json") as testfile:
+ with open("unittests/scans/rusty_hog/essexhog_no_vuln.json", encoding="utf-8") as testfile:
parser = RustyhogParser()
findings = parser.get_items(testfile, "Rusty Hog", Test()) # The outputfile is empty. A subscanner can't be classified
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln_has_one_finding_essexhog(self):
- with open("unittests/scans/rusty_hog/essexhog_one_vuln.json") as testfile:
+ with open("unittests/scans/rusty_hog/essexhog_one_vuln.json", encoding="utf-8") as testfile:
parser = RustyhogParser()
findings = parser.get_items(testfile, "Essex Hog", Test())
self.assertEqual(1, len(findings))
def test_parse_file_with_multiple_vuln_has_multiple_finding_essexhog(self):
- with open("unittests/scans/rusty_hog/essexhog_many_vulns.json") as testfile:
+ with open("unittests/scans/rusty_hog/essexhog_many_vulns.json", encoding="utf-8") as testfile:
parser = RustyhogParser()
findings = parser.get_items(testfile, "Essex Hog", Test())
self.assertEqual(3, len(findings))
def test_parse_file_with_multiple_vuln_has_multiple_finding_essexhog_content(self):
- with open("unittests/scans/rusty_hog/essexhog_many_vulns.json") as testfile:
+ with open("unittests/scans/rusty_hog/essexhog_many_vulns.json", encoding="utf-8") as testfile:
parser = RustyhogParser()
findings = parser.get_items(testfile, "Essex Hog", Test())
self.assertEqual(findings[0].title, "SSH (EC) private key found in Confluence Page ID 12345")
diff --git a/unittests/tools/test_sarif_parser.py b/unittests/tools/test_sarif_parser.py
index a819846169a..e316ae9fe24 100644
--- a/unittests/tools/test_sarif_parser.py
+++ b/unittests/tools/test_sarif_parser.py
@@ -19,7 +19,7 @@ def test_example_report(self):
with open(
path.join(
get_unit_tests_path() + "/scans/sarif/DefectDojo_django-DefectDojo__2020-12-11_13 42 10__export.sarif",
- ),
+ ), encoding="utf-8",
)as testfile:
parser = SarifParser()
findings = parser.get_findings(testfile, Test())
@@ -29,7 +29,7 @@ def test_example_report(self):
def test_suppression_report(self):
"""test report file having different suppression definitions"""
- with open(path.join(path.dirname(__file__), "../scans/sarif/suppression_test.sarif")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sarif/suppression_test.sarif"), encoding="utf-8") as testfile:
parser = SarifParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -41,7 +41,7 @@ def test_suppression_report(self):
self.assertEqual(True, finding.active)
def test_example2_report(self):
- with open(path.join(path.dirname(__file__), "../scans/sarif/appendix_k.sarif")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sarif/appendix_k.sarif"), encoding="utf-8") as testfile:
parser = SarifParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -69,13 +69,13 @@ def test_example2_report(self):
self.common_checks(finding)
def test_example_k1_report(self):
- with open(path.join(path.dirname(__file__), "../scans/sarif/appendix_k1.sarif")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sarif/appendix_k1.sarif"), encoding="utf-8") as testfile:
parser = SarifParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_example_k2_report(self):
- with open(path.join(path.dirname(__file__), "../scans/sarif/appendix_k2.sarif")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sarif/appendix_k2.sarif"), encoding="utf-8") as testfile:
parser = SarifParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -90,7 +90,7 @@ def test_example_k2_report(self):
self.common_checks(finding)
def test_example_k3_report(self):
- with open(path.join(path.dirname(__file__), "../scans/sarif/appendix_k3.sarif")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sarif/appendix_k3.sarif"), encoding="utf-8") as testfile:
parser = SarifParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -100,7 +100,7 @@ def test_example_k3_report(self):
self.common_checks(finding)
def test_example_k4_report_mitigation(self):
- with open(path.join(path.dirname(__file__), "../scans/sarif/appendix_k4.sarif")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sarif/appendix_k4.sarif"), encoding="utf-8") as testfile:
parser = SarifParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -117,7 +117,7 @@ def test_example_k4_report_mitigation(self):
def test_example_report_ms(self):
"""Report file come from Microsoft SARIF sdk on GitHub"""
- with open(path.join(path.dirname(__file__), "../scans/sarif/SuppressionTestCurrent.sarif")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sarif/SuppressionTestCurrent.sarif"), encoding="utf-8") as testfile:
parser = SarifParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(4, len(findings))
@@ -127,7 +127,7 @@ def test_example_report_ms(self):
self.common_checks(finding)
def test_example_report_semgrep(self):
- with open(path.join(path.dirname(__file__), "../scans/sarif/semgrepowasp-benchmark-sample.sarif")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sarif/semgrepowasp-benchmark-sample.sarif"), encoding="utf-8") as testfile:
test = Test()
parser = SarifParser()
findings = parser.get_findings(testfile, test)
@@ -141,7 +141,7 @@ def test_example_report_semgrep(self):
self.common_checks(finding)
def test_example_report_scanlift_dependency_check(self):
- with open(path.join(path.dirname(__file__), "../scans/sarif/dependency_check.sarif")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sarif/dependency_check.sarif"), encoding="utf-8") as testfile:
parser = SarifParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(13, len(findings))
@@ -164,7 +164,7 @@ def test_example_report_scanlift_dependency_check(self):
self.common_checks(finding)
def test_example_report_scanlift_bash(self):
- with open(path.join(path.dirname(__file__), "../scans/sarif/bash-report.sarif")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sarif/bash-report.sarif"), encoding="utf-8") as testfile:
parser = SarifParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(27, len(findings))
@@ -193,7 +193,7 @@ def test_example_report_scanlift_bash(self):
self.common_checks(finding)
def test_example_report_taint_python(self):
- with open(path.join(path.dirname(__file__), "../scans/sarif/taint-python-report.sarif")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sarif/taint-python-report.sarif"), encoding="utf-8") as testfile:
parser = SarifParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(11, len(findings))
@@ -235,7 +235,7 @@ def test_example_report_taint_python(self):
def test_njsscan(self):
"""Generated with opensecurity/njsscan (https://github.com/ajinabraham/njsscan)"""
- with open(path.join(path.dirname(__file__), "../scans/sarif/njsscan.sarif")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sarif/njsscan.sarif"), encoding="utf-8") as testfile:
parser = SarifParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(2, len(findings))
@@ -262,7 +262,7 @@ def test_njsscan(self):
def test_dockle(self):
"""Generated with goodwithtech/dockle (https://github.com/goodwithtech/dockle)"""
- with open(path.join(path.dirname(__file__), "../scans/sarif/dockle_0_3_15.sarif")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sarif/dockle_0_3_15.sarif"), encoding="utf-8") as testfile:
parser = SarifParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(4, len(findings))
@@ -310,7 +310,7 @@ def test_dockle(self):
)
def test_mobsfscan(self):
- with open(path.join(path.dirname(__file__), "../scans/sarif/mobsfscan.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sarif/mobsfscan.json"), encoding="utf-8") as testfile:
parser = SarifParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(9, len(findings))
@@ -318,7 +318,7 @@ def test_mobsfscan(self):
self.common_checks(finding)
def test_gitleaks(self):
- with open(path.join(path.dirname(__file__), "../scans/sarif/gitleaks_7.5.0.sarif")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sarif/gitleaks_7.5.0.sarif"), encoding="utf-8") as testfile:
parser = SarifParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(8, len(findings))
@@ -368,7 +368,7 @@ def test_gitleaks(self):
self.assertEqual(37, finding.line)
def test_flawfinder(self):
- with open(path.join(path.dirname(__file__), "../scans/sarif/flawfinder.sarif")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sarif/flawfinder.sarif"), encoding="utf-8") as testfile:
parser = SarifParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(53, len(findings))
@@ -444,7 +444,7 @@ def test_flawfinder(self):
self.assertEqual("https://cwe.mitre.org/data/definitions/120.html", finding.references)
def test_flawfinder_interfacev2(self):
- with open(path.join(path.dirname(__file__), "../scans/sarif/flawfinder.sarif")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sarif/flawfinder.sarif"), encoding="utf-8") as testfile:
parser = SarifParser()
tests = parser.get_tests(parser.get_scan_types()[0], testfile)
self.assertEqual(1, len(tests))
@@ -513,7 +513,7 @@ def test_flawfinder_interfacev2(self):
self.assertEqual("https://cwe.mitre.org/data/definitions/120.html", finding.references)
def test_appendix_k1_double_interfacev2(self):
- with open(path.join(path.dirname(__file__), "../scans/sarif/appendix_k1_double.sarif")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sarif/appendix_k1_double.sarif"), encoding="utf-8") as testfile:
parser = SarifParser()
tests = parser.get_tests(parser.get_scan_types()[0], testfile)
self.assertEqual(2, len(tests))
@@ -529,7 +529,7 @@ def test_appendix_k1_double_interfacev2(self):
self.assertEqual(0, len(findings))
def test_codeql_snippet_report(self):
- with open(path.join(path.dirname(__file__), "../scans/sarif/codeQL-output.sarif")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sarif/codeQL-output.sarif"), encoding="utf-8") as testfile:
parser = SarifParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(72, len(findings))
@@ -555,7 +555,7 @@ def test_codeql_snippet_report(self):
self.common_checks(finding)
def test_severity_cvss_from_grype(self):
- with open(path.join(path.dirname(__file__), "../scans/sarif/cxf-3.4.6.sarif")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sarif/cxf-3.4.6.sarif"), encoding="utf-8") as testfile:
parser = SarifParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(22, len(findings))
@@ -584,14 +584,14 @@ def test_get_fingerprints_hashes(self):
)
def test_tags_from_result_properties(self):
- with open(path.join(path.dirname(__file__), "../scans/sarif/taint-python-report.sarif")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sarif/taint-python-report.sarif"), encoding="utf-8") as testfile:
parser = SarifParser()
findings = parser.get_findings(testfile, Test())
item = findings[0]
self.assertEqual(["Scan"], item.tags)
def test_severity_in_properties(self):
- with open(path.join(path.dirname(__file__), "../scans/sarif/issue_10191.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sarif/issue_10191.json"), encoding="utf-8") as testfile:
parser = SarifParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(77, len(findings))
diff --git a/unittests/tools/test_scantist_parser.py b/unittests/tools/test_scantist_parser.py
index a51223869a6..a2c6618b096 100644
--- a/unittests/tools/test_scantist_parser.py
+++ b/unittests/tools/test_scantist_parser.py
@@ -6,13 +6,13 @@
class TestScantistParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/scantist/scantist-no-vuln.json") as testfile:
+ with open("unittests/scans/scantist/scantist-no-vuln.json", encoding="utf-8") as testfile:
parser = ScantistParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln_has_one_finding(self):
- with open("unittests/scans/scantist/scantist-one-vuln.json") as testfile:
+ with open("unittests/scans/scantist/scantist-one-vuln.json", encoding="utf-8") as testfile:
parser = ScantistParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -30,7 +30,7 @@ def test_parse_file_with_one_vuln_has_one_finding(self):
) # Negligible is translated to Informational
def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
- with open("unittests/scans/scantist/scantist-many-vuln.json") as testfile:
+ with open("unittests/scans/scantist/scantist-many-vuln.json", encoding="utf-8") as testfile:
parser = ScantistParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(17, len(findings))
diff --git a/unittests/tools/test_scout_suite_parser.py b/unittests/tools/test_scout_suite_parser.py
index c8b04c45021..f689fcc8741 100644
--- a/unittests/tools/test_scout_suite_parser.py
+++ b/unittests/tools/test_scout_suite_parser.py
@@ -7,13 +7,13 @@
class TestScoutSuiteParser(DojoTestCase):
def test_scout_suite_parser_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/scout_suite/no_vuln.js") as test_file:
+ with open("unittests/scans/scout_suite/no_vuln.js", encoding="utf-8") as test_file:
parser = ScoutSuiteParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(0, len(findings))
def test_scout_suite_parser_with_two_findings(self):
- with open("unittests/scans/scout_suite/two_findings.js") as test_file:
+ with open("unittests/scans/scout_suite/two_findings.js", encoding="utf-8") as test_file:
parser = ScoutSuiteParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(4, len(findings))
@@ -32,7 +32,7 @@ def test_scout_suite_parser_with_two_findings(self):
self.assertEqual("gcp:cloudstorage-bucket-no-versioning", finding.vuln_id_from_tool)
def test_get_findings(self):
- with open("unittests/scans/scout_suite/new2.js") as test_file:
+ with open("unittests/scans/scout_suite/new2.js", encoding="utf-8") as test_file:
parser = ScoutSuiteParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(356, len(findings))
@@ -56,7 +56,7 @@ def test_get_findings(self):
self.assertEqual("aws:config-recorder-not-configured", finding.vuln_id_from_tool)
def test_get_tests(self):
- with open("unittests/scans/scout_suite/new2.js") as test_file:
+ with open("unittests/scans/scout_suite/new2.js", encoding="utf-8") as test_file:
parser = ScoutSuiteParser()
scan_type = parser.get_scan_types()[0]
tests = parser.get_tests(scan_type, test_file)
diff --git a/unittests/tools/test_semgrep_parser.py b/unittests/tools/test_semgrep_parser.py
index ce19977b904..6892b0b8494 100644
--- a/unittests/tools/test_semgrep_parser.py
+++ b/unittests/tools/test_semgrep_parser.py
@@ -6,13 +6,13 @@
class TestSemgrepParser(DojoTestCase):
def test_parse_empty(self):
- with open("unittests/scans/semgrep/empty.json") as testfile:
+ with open("unittests/scans/semgrep/empty.json", encoding="utf-8") as testfile:
parser = SemgrepParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_one_finding(self):
- with open("unittests/scans/semgrep/one_finding.json") as testfile:
+ with open("unittests/scans/semgrep/one_finding.json", encoding="utf-8") as testfile:
parser = SemgrepParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -27,7 +27,7 @@ def test_parse_one_finding(self):
self.assertIn("Using CBC with PKCS5Padding is susceptible to padding orcale attacks", finding.description)
def test_parse_many_finding(self):
- with open("unittests/scans/semgrep/many_findings.json") as testfile:
+ with open("unittests/scans/semgrep/many_findings.json", encoding="utf-8") as testfile:
parser = SemgrepParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(3, len(findings))
@@ -47,7 +47,7 @@ def test_parse_many_finding(self):
self.assertEqual("java.lang.security.audit.cbc-padding-oracle.cbc-padding-oracle", finding.vuln_id_from_tool)
def test_parse_repeated_finding(self):
- with open("unittests/scans/semgrep/repeated_findings.json") as testfile:
+ with open("unittests/scans/semgrep/repeated_findings.json", encoding="utf-8") as testfile:
parser = SemgrepParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -61,7 +61,7 @@ def test_parse_repeated_finding(self):
self.assertEqual(2, finding.nb_occurences)
def test_parse_many_vulns(self):
- with open("unittests/scans/semgrep/many_vulns.json") as testfile:
+ with open("unittests/scans/semgrep/many_vulns.json", encoding="utf-8") as testfile:
parser = SemgrepParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -91,7 +91,7 @@ def test_parse_many_vulns(self):
self.assertEqual("python.lang.security.unquoted-csv-writer.unquoted-csv-writer", finding.vuln_id_from_tool)
def test_parse_cwe_list(self):
- with open("unittests/scans/semgrep/cwe_list.json") as testfile:
+ with open("unittests/scans/semgrep/cwe_list.json", encoding="utf-8") as testfile:
parser = SemgrepParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -105,10 +105,10 @@ def test_parse_cwe_list(self):
self.assertIn("A CSRF middleware was not detected in your express application. Ensure you are either using one such as `csurf` or `csrf` (see rule references) and/or you are properly doing CSRF validation in your routes with a token or cookies.", finding.description)
def test_different_lines_same_fingerprint(self):
- with open("unittests/scans/semgrep/semgrep_version_1_30_0_line_26.json") as testfile:
+ with open("unittests/scans/semgrep/semgrep_version_1_30_0_line_26.json", encoding="utf-8") as testfile:
parser = SemgrepParser()
findings_first = parser.get_findings(testfile, Test())
- with open("unittests/scans/semgrep/semgrep_version_1_30_0_line_27.json") as testfile2:
+ with open("unittests/scans/semgrep/semgrep_version_1_30_0_line_27.json", encoding="utf-8") as testfile2:
parser = SemgrepParser()
findings_second = parser.get_findings(testfile2, Test())
self.assertEqual(len(findings_first), len(findings_second))
@@ -116,13 +116,13 @@ def test_different_lines_same_fingerprint(self):
self.assertEqual(first.unique_id_from_tool, second.unique_id_from_tool)
def test_parse_issue_8435(self):
- with open("unittests/scans/semgrep/issue_8435.json") as testfile:
+ with open("unittests/scans/semgrep/issue_8435.json", encoding="utf-8") as testfile:
parser = SemgrepParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
def test_parse_sca_deployments_vulns(self):
- with open("unittests/scans/semgrep/sca-deployments-vulns.json") as testfile:
+ with open("unittests/scans/semgrep/sca-deployments-vulns.json", encoding="utf-8") as testfile:
parser = SemgrepParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(18, len(findings))
diff --git a/unittests/tools/test_skf_parser.py b/unittests/tools/test_skf_parser.py
index f0f197abe5a..655395cd6b3 100644
--- a/unittests/tools/test_skf_parser.py
+++ b/unittests/tools/test_skf_parser.py
@@ -6,7 +6,7 @@
class TestSkfParser(DojoTestCase):
def test_single_has_no_finding(self):
- with open("unittests/scans/skf/export.csv") as testfile:
+ with open("unittests/scans/skf/export.csv", encoding="utf-8") as testfile:
parser = SKFParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(27, len(findings))
diff --git a/unittests/tools/test_snyk_code_parser.py b/unittests/tools/test_snyk_code_parser.py
index 20d3109e1e9..8d9fe8bd859 100644
--- a/unittests/tools/test_snyk_code_parser.py
+++ b/unittests/tools/test_snyk_code_parser.py
@@ -6,14 +6,14 @@
class TestSnykCodeParser(DojoTestCase):
def test_snykParser_single_has_many_findings(self):
- testfile = open("unittests/scans/snyk_code/single_project_many_vulns.json")
+ testfile = open("unittests/scans/snyk_code/single_project_many_vulns.json", encoding="utf-8")
parser = SnykCodeParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(206, len(findings))
def test_snykcode_issue_9270(self):
- with open("unittests/scans/snyk_code/snykcode_issue_9270.json") as testfile:
+ with open("unittests/scans/snyk_code/snykcode_issue_9270.json", encoding="utf-8") as testfile:
parser = SnykCodeParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(39, len(findings))
diff --git a/unittests/tools/test_snyk_parser.py b/unittests/tools/test_snyk_parser.py
index ab176576262..17efff35a1b 100644
--- a/unittests/tools/test_snyk_parser.py
+++ b/unittests/tools/test_snyk_parser.py
@@ -6,49 +6,49 @@
class TestSnykParser(DojoTestCase):
def test_snykParser_single_has_no_finding(self):
- testfile = open("unittests/scans/snyk/single_project_no_vulns.json")
+ testfile = open("unittests/scans/snyk/single_project_no_vulns.json", encoding="utf-8")
parser = SnykParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
testfile.close()
def test_snykParser_allprojects_has_no_finding(self):
- testfile = open("unittests/scans/snyk/all-projects_no_vulns.json")
+ testfile = open("unittests/scans/snyk/all-projects_no_vulns.json", encoding="utf-8")
parser = SnykParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
testfile.close()
def test_snykParser_single_has_one_finding(self):
- testfile = open("unittests/scans/snyk/single_project_one_vuln.json")
+ testfile = open("unittests/scans/snyk/single_project_one_vuln.json", encoding="utf-8")
parser = SnykParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
testfile.close()
def test_snykParser_allprojects_has_one_finding(self):
- testfile = open("unittests/scans/snyk/all-projects_one_vuln.json")
+ testfile = open("unittests/scans/snyk/all-projects_one_vuln.json", encoding="utf-8")
parser = SnykParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(1, len(findings))
def test_snykParser_single_has_many_findings(self):
- testfile = open("unittests/scans/snyk/single_project_many_vulns.json")
+ testfile = open("unittests/scans/snyk/single_project_many_vulns.json", encoding="utf-8")
parser = SnykParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(41, len(findings))
def test_snykParser_allprojects_has_many_findings(self):
- testfile = open("unittests/scans/snyk/all-projects_many_vulns.json")
+ testfile = open("unittests/scans/snyk/all-projects_many_vulns.json", encoding="utf-8")
parser = SnykParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(4, len(findings))
def test_snykParser_finding_has_fields(self):
- testfile = open("unittests/scans/snyk/single_project_one_vuln.json")
+ testfile = open("unittests/scans/snyk/single_project_one_vuln.json", encoding="utf-8")
parser = SnykParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -89,7 +89,7 @@ def test_snykParser_finding_has_fields(self):
)
def test_snykParser_file_path_with_ampersand_is_preserved(self):
- testfile = open("unittests/scans/snyk/single_project_one_vuln_with_ampersands.json")
+ testfile = open("unittests/scans/snyk/single_project_one_vuln_with_ampersands.json", encoding="utf-8")
parser = SnykParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -102,7 +102,7 @@ def test_snykParser_file_path_with_ampersand_is_preserved(self):
def test_snykParser_allprojects_issue4277(self):
"""Report to linked to issue 4277"""
- testfile = open("unittests/scans/snyk/all_projects_issue4277.json")
+ testfile = open("unittests/scans/snyk/all_projects_issue4277.json", encoding="utf-8")
parser = SnykParser()
findings = list(parser.get_findings(testfile, Test()))
testfile.close()
@@ -139,7 +139,7 @@ def test_snykParser_allprojects_issue4277(self):
self.assertEqual("CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:L/E:P/RL:O/RC:C", finding.cvssv3)
def test_snykParser_cvssscore_none(self):
- with open("unittests/scans/snyk/single_project_None_cvss.json") as testfile:
+ with open("unittests/scans/snyk/single_project_None_cvss.json", encoding="utf-8") as testfile:
parser = SnykParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -150,7 +150,7 @@ def test_snykParser_cvssscore_none(self):
)
def test_snykParser_target_file(self):
- with open("unittests/scans/snyk/all_containers_target_output.json") as testfile:
+ with open("unittests/scans/snyk/all_containers_target_output.json", encoding="utf-8") as testfile:
parser = SnykParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(40, len(findings))
@@ -160,7 +160,7 @@ def test_snykParser_target_file(self):
self.assertIn("target_file:Mobile-Security-Framework-MobSF/requirements.txt", finding.unsaved_tags)
def test_snykParser_update_libs_tag(self):
- with open("unittests/scans/snyk/single_project_upgrade_libs.json") as testfile:
+ with open("unittests/scans/snyk/single_project_upgrade_libs.json", encoding="utf-8") as testfile:
parser = SnykParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(254, len(findings))
@@ -172,19 +172,19 @@ def test_snykParser_update_libs_tag(self):
self.assertIn("shell-quote@1.7.2", finding.mitigation)
def test_snykcontainer_issue_9270(self):
- with open("unittests/scans/snyk/snykcontainer_issue_9270.json") as testfile:
+ with open("unittests/scans/snyk/snykcontainer_issue_9270.json", encoding="utf-8") as testfile:
parser = SnykParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(25, len(findings))
def test_snykcode_issue_9270(self):
- with open("unittests/scans/snyk/snykcode_issue_9270.json") as testfile:
+ with open("unittests/scans/snyk/snykcode_issue_9270.json", encoding="utf-8") as testfile:
parser = SnykParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(39, len(findings))
def test_snykcode_issue_9270_epss(self):
- with open("unittests/scans/snyk/snykcontainer_issue_epss.json") as testfile:
+ with open("unittests/scans/snyk/snykcontainer_issue_epss.json", encoding="utf-8") as testfile:
parser = SnykParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
diff --git a/unittests/tools/test_solar_appscreener_parser.py b/unittests/tools/test_solar_appscreener_parser.py
index 0fb8cf4ee4e..b6e327c1840 100644
--- a/unittests/tools/test_solar_appscreener_parser.py
+++ b/unittests/tools/test_solar_appscreener_parser.py
@@ -7,7 +7,7 @@ class TestSolarAppscreenerParser(DojoTestCase):
def test_solar_appscreener_parser_with_no_vuln_has_no_findings(self):
testfile = open(
- get_unit_tests_path() + "/scans/solar_appscreener/solar_appscreener_zero_vul.csv")
+ get_unit_tests_path() + "/scans/solar_appscreener/solar_appscreener_zero_vul.csv", encoding="utf-8")
parser = SolarAppscreenerParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -15,7 +15,7 @@ def test_solar_appscreener_parser_with_no_vuln_has_no_findings(self):
def test_solar_appscreener_parser_with_one_criticle_vuln_has_one_findings(self):
testfile = open(
- get_unit_tests_path() + "/scans/solar_appscreener/solar_appscreener_one_vul.csv")
+ get_unit_tests_path() + "/scans/solar_appscreener/solar_appscreener_one_vul.csv", encoding="utf-8")
parser = SolarAppscreenerParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -33,7 +33,7 @@ def test_solar_appscreener_parser_with_one_criticle_vuln_has_one_findings(self):
def test_solar_appscreener_parser_with_many_vuln_has_many_findings(self):
testfile = open(
- get_unit_tests_path() + "/scans/solar_appscreener/solar_appscreener_many_vul.csv")
+ get_unit_tests_path() + "/scans/solar_appscreener/solar_appscreener_many_vul.csv", encoding="utf-8")
parser = SolarAppscreenerParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
diff --git a/unittests/tools/test_sonarqube_parser.py b/unittests/tools/test_sonarqube_parser.py
index c80be607e1b..352d001ca43 100644
--- a/unittests/tools/test_sonarqube_parser.py
+++ b/unittests/tools/test_sonarqube_parser.py
@@ -8,7 +8,7 @@ class TestSonarQubeParser(DojoTestCase):
# maxDiff = None
def init(self, reportFilename):
- my_file_handle = open(reportFilename)
+ my_file_handle = open(reportFilename, encoding="utf-8")
product = Product()
engagement = Engagement()
test = Test()
@@ -227,7 +227,7 @@ def test_detailed_parse_file_with_table_in_table(self):
)
self.assertEqual(str, type(item.references))
self.assertMultiLineEqual(
- "squid:S2975\n" "Copy Constructor versus Cloning\n" "S2157\n" "S1182",
+ "squid:S2975\nCopy Constructor versus Cloning\nS2157\nS1182",
item.references,
)
self.assertEqual(str, type(item.file_path))
@@ -444,7 +444,7 @@ def test_detailed_parse_file_table_has_whitespace(self):
)
self.assertEqual(str, type(item.references))
self.assertMultiLineEqual(
- "squid:S2975\n" "Copy Constructor versus Cloning\n" "S2157\n" "S1182",
+ "squid:S2975\nCopy Constructor versus Cloning\nS2157\nS1182",
item.references,
)
self.assertEqual(str, type(item.file_path))
diff --git a/unittests/tools/test_sonatype_parser.py b/unittests/tools/test_sonatype_parser.py
index 232f4dfec03..7e6fd88fb38 100644
--- a/unittests/tools/test_sonatype_parser.py
+++ b/unittests/tools/test_sonatype_parser.py
@@ -5,7 +5,7 @@
class TestSonatypeParser(DojoTestCase):
def test_parse_file_with_two_vulns(self):
- testfile = open("unittests/scans/sonatype/two_vulns.json")
+ testfile = open("unittests/scans/sonatype/two_vulns.json", encoding="utf-8")
parser = SonatypeParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -14,28 +14,28 @@ def test_parse_file_with_two_vulns(self):
self.assertEqual("CVE-2016-2402", findings[0].unsaved_vulnerability_ids[0])
def test_parse_file_with_many_vulns(self):
- testfile = open("unittests/scans/sonatype/many_vulns.json")
+ testfile = open("unittests/scans/sonatype/many_vulns.json", encoding="utf-8")
parser = SonatypeParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(6, len(findings))
def test_parse_file_with_long_file_path(self):
- testfile = open("unittests/scans/sonatype/long_file_path.json")
+ testfile = open("unittests/scans/sonatype/long_file_path.json", encoding="utf-8")
parser = SonatypeParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(3, len(findings))
def test_find_no_vuln(self):
- testfile = open("unittests/scans/sonatype/no_vuln.json")
+ testfile = open("unittests/scans/sonatype/no_vuln.json", encoding="utf-8")
parser = SonatypeParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(0, len(findings))
def test_component_parsed_correctly(self):
- testfile = open("unittests/scans/sonatype/many_vulns.json")
+ testfile = open("unittests/scans/sonatype/many_vulns.json", encoding="utf-8")
parser = SonatypeParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -44,7 +44,7 @@ def test_component_parsed_correctly(self):
self.assertEqual("2.6.0", findings[5].component_version)
def test_severity_parsed_correctly(self):
- testfile = open("unittests/scans/sonatype/many_vulns.json")
+ testfile = open("unittests/scans/sonatype/many_vulns.json", encoding="utf-8")
parser = SonatypeParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -56,21 +56,21 @@ def test_severity_parsed_correctly(self):
self.assertEqual("Medium", findings[5].severity)
def test_cwe_parsed_correctly(self):
- testfile = open("unittests/scans/sonatype/many_vulns.json")
+ testfile = open("unittests/scans/sonatype/many_vulns.json", encoding="utf-8")
parser = SonatypeParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual("693", findings[5].cwe)
def test_cvssv3_parsed_correctly(self):
- testfile = open("unittests/scans/sonatype/many_vulns.json")
+ testfile = open("unittests/scans/sonatype/many_vulns.json", encoding="utf-8")
parser = SonatypeParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual("CVSS:3.1/AV:N/AC:H/PR:N/UI:R/S:U/C:N/I:H/A:N", findings[5].cvssv3)
def test_filepath_parsed_correctly(self):
- testfile = open("unittests/scans/sonatype/many_vulns.json")
+ testfile = open("unittests/scans/sonatype/many_vulns.json", encoding="utf-8")
parser = SonatypeParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
diff --git a/unittests/tools/test_spotbugs_parser.py b/unittests/tools/test_spotbugs_parser.py
index 2587bc71b26..c6d26f9d521 100644
--- a/unittests/tools/test_spotbugs_parser.py
+++ b/unittests/tools/test_spotbugs_parser.py
@@ -40,7 +40,7 @@ def test_find_file_path(self):
def test_file(self):
parser = SpotbugsParser()
- testfile = open("unittests/scans/spotbugs/many_findings.xml")
+ testfile = open("unittests/scans/spotbugs/many_findings.xml", encoding="utf-8")
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(81, len(findings))
@@ -99,7 +99,7 @@ def test_version_4_4(self):
"""There was a big difference between version < 4.4.x and after
The dictionnary is not in the report anymore
"""
- testfile = open("unittests/scans/spotbugs/version_4.4.0.xml")
+ testfile = open("unittests/scans/spotbugs/version_4.4.0.xml", encoding="utf-8")
parser = SpotbugsParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
diff --git a/unittests/tools/test_ssh_audit_parser.py b/unittests/tools/test_ssh_audit_parser.py
index e189ea81e7c..18c61c1a8e1 100644
--- a/unittests/tools/test_ssh_audit_parser.py
+++ b/unittests/tools/test_ssh_audit_parser.py
@@ -6,7 +6,7 @@
class TestSSHAuditParser(DojoTestCase):
def test_parse_file_with_many_vuln_has_many_findings(self):
- with open("unittests/scans/ssh_audit/many_vulns.json") as testfile:
+ with open("unittests/scans/ssh_audit/many_vulns.json", encoding="utf-8") as testfile:
parser = SSHAuditParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -19,7 +19,7 @@ def test_parse_file_with_many_vuln_has_many_findings(self):
self.assertEqual(findings[13].severity, "Medium")
def test_parse_file_with_many_vuln_has_many_findings2(self):
- with open("unittests/scans/ssh_audit/many_vulns2.json") as testfile:
+ with open("unittests/scans/ssh_audit/many_vulns2.json", encoding="utf-8") as testfile:
parser = SSHAuditParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -32,7 +32,7 @@ def test_parse_file_with_many_vuln_has_many_findings2(self):
self.assertEqual(findings[9].severity, "Medium")
def test_parse_file_with_many_vuln_bug_fix(self):
- with open("unittests/scans/ssh_audit/bug_fix.json") as testfile:
+ with open("unittests/scans/ssh_audit/bug_fix.json", encoding="utf-8") as testfile:
parser = SSHAuditParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
diff --git a/unittests/tools/test_ssl_labs_parser.py b/unittests/tools/test_ssl_labs_parser.py
index 125965ea138..575b63dbb4e 100644
--- a/unittests/tools/test_ssl_labs_parser.py
+++ b/unittests/tools/test_ssl_labs_parser.py
@@ -6,13 +6,13 @@
class TestSslLabsParser(DojoTestCase):
def test_parse_none(self):
parser = SslLabsParser()
- with open("unittests/scans/ssl_labs/none.json") as test_file:
+ with open("unittests/scans/ssl_labs/none.json", encoding="utf-8") as test_file:
findings = parser.get_findings(test_file, Test())
self.assertEqual(0, len(findings))
def test_parse_ok(self):
parser = SslLabsParser()
- with open("unittests/scans/ssl_labs/ssl_labs_ok_v1.5.0.json") as test_file:
+ with open("unittests/scans/ssl_labs/ssl_labs_ok_v1.5.0.json", encoding="utf-8") as test_file:
findings = parser.get_findings(test_file, Test())
for finding in findings:
for endpoint in finding.unsaved_endpoints:
@@ -26,7 +26,7 @@ def test_parse_ok(self):
def test_parse_dh1024(self):
parser = SslLabsParser()
- with open("unittests/scans/ssl_labs/ssl_labs_dh1024_v1.5.0.json") as test_file:
+ with open("unittests/scans/ssl_labs/ssl_labs_dh1024_v1.5.0.json", encoding="utf-8") as test_file:
findings = parser.get_findings(test_file, Test())
for finding in findings:
for endpoint in finding.unsaved_endpoints:
@@ -41,7 +41,7 @@ def test_parse_dh1024(self):
def test_parse_3des(self):
parser = SslLabsParser()
- with open("unittests/scans/ssl_labs/ssl_labs_3des_v1.5.0.json") as test_file:
+ with open("unittests/scans/ssl_labs/ssl_labs_3des_v1.5.0.json", encoding="utf-8") as test_file:
findings = parser.get_findings(test_file, Test())
for finding in findings:
for endpoint in finding.unsaved_endpoints:
@@ -56,7 +56,7 @@ def test_parse_3des(self):
def test_parse_revoked(self):
parser = SslLabsParser()
- with open("unittests/scans/ssl_labs/ssl_labs_revoked_v1.5.0.json") as test_file:
+ with open("unittests/scans/ssl_labs/ssl_labs_revoked_v1.5.0.json", encoding="utf-8") as test_file:
findings = parser.get_findings(test_file, Test())
for finding in findings:
for endpoint in finding.unsaved_endpoints:
@@ -71,7 +71,7 @@ def test_parse_revoked(self):
def test_parse_multiple(self):
parser = SslLabsParser()
- with open("unittests/scans/ssl_labs/ssl_labs_multiple_v1.5.0.json") as test_file:
+ with open("unittests/scans/ssl_labs/ssl_labs_multiple_v1.5.0.json", encoding="utf-8") as test_file:
findings = parser.get_findings(test_file, Test())
for finding in findings:
for endpoint in finding.unsaved_endpoints:
diff --git a/unittests/tools/test_sslscan_parser.py b/unittests/tools/test_sslscan_parser.py
index 157046e3e10..c7bfe5abbaa 100644
--- a/unittests/tools/test_sslscan_parser.py
+++ b/unittests/tools/test_sslscan_parser.py
@@ -6,13 +6,13 @@
class TestSslscanParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/sslscan/sslscan_no_vuln.xml") as testfile:
+ with open("unittests/scans/sslscan/sslscan_no_vuln.xml", encoding="utf-8") as testfile:
parser = SslscanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln_has_one_findings(self):
- with open("unittests/scans/sslscan/sslscan_one_vuln.xml") as testfile:
+ with open("unittests/scans/sslscan/sslscan_one_vuln.xml", encoding="utf-8") as testfile:
parser = SslscanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -21,7 +21,7 @@ def test_parse_file_with_one_vuln_has_one_findings(self):
endpoint.clean()
def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
- with open("unittests/scans/sslscan/sslscan_many_vuln.xml") as testfile:
+ with open("unittests/scans/sslscan/sslscan_many_vuln.xml", encoding="utf-8") as testfile:
parser = SslscanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(2, len(findings))
diff --git a/unittests/tools/test_sslyze_parser.py b/unittests/tools/test_sslyze_parser.py
index 4f1186f1559..fa879d91b42 100644
--- a/unittests/tools/test_sslyze_parser.py
+++ b/unittests/tools/test_sslyze_parser.py
@@ -7,19 +7,19 @@
class TestSslyzeJSONParser(DojoTestCase):
def test_parse_json_file_with_one_target_has_zero_vuln_old(self):
- with open(path.join(path.dirname(__file__), "../scans/sslyze/one_target_zero_vuln_old.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sslyze/one_target_zero_vuln_old.json"), encoding="utf-8") as testfile:
parser = SslyzeParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_json_file_issue_9848(self):
- with open(path.join(path.dirname(__file__), "../scans/sslyze/issue_9848.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sslyze/issue_9848.json"), encoding="utf-8") as testfile:
parser = SslyzeParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(3, len(findings))
def test_parse_json_file_with_one_target_has_one_vuln_old(self):
- with open(path.join(path.dirname(__file__), "../scans/sslyze/one_target_one_vuln_old.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sslyze/one_target_one_vuln_old.json"), encoding="utf-8") as testfile:
parser = SslyzeParser()
findings = parser.get_findings(testfile, Test())
@@ -41,7 +41,7 @@ def test_parse_json_file_with_one_target_has_one_vuln_old(self):
self.assertEqual(443, endpoint.port)
def test_parse_json_file_with_one_target_has_four_vuln_old(self):
- with open(path.join(path.dirname(__file__), "../scans/sslyze/one_target_many_vuln_old.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sslyze/one_target_many_vuln_old.json"), encoding="utf-8") as testfile:
parser = SslyzeParser()
findings = parser.get_findings(testfile, Test())
@@ -54,20 +54,20 @@ def test_parse_json_file_with_one_target_has_four_vuln_old(self):
self.assertEqual("CVE-2014-0224", findings[1].unsaved_vulnerability_ids[0])
def test_parse_json_file_with_two_target_has_many_vuln_old(self):
- with open(path.join(path.dirname(__file__), "../scans/sslyze/two_targets_two_vuln_old.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sslyze/two_targets_two_vuln_old.json"), encoding="utf-8") as testfile:
parser = SslyzeParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(2, len(findings))
def test_parse_json_file_with_one_target_has_zero_vuln_new(self):
- with open(path.join(path.dirname(__file__), "../scans/sslyze/one_target_zero_vuln_new.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sslyze/one_target_zero_vuln_new.json"), encoding="utf-8") as testfile:
parser = SslyzeParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_json_file_with_one_target_has_one_vuln_new(self):
- with open(path.join(path.dirname(__file__), "../scans/sslyze/one_target_one_vuln_new.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sslyze/one_target_one_vuln_new.json"), encoding="utf-8") as testfile:
parser = SslyzeParser()
findings = parser.get_findings(testfile, Test())
@@ -104,13 +104,13 @@ def test_parse_json_file_with_one_target_has_one_vuln_new(self):
self.assertEqual(443, endpoint.port)
def test_parse_json_file_with_one_target_has_three_vuln_new(self):
- with open(path.join(path.dirname(__file__), "../scans/sslyze/one_target_many_vuln_new.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sslyze/one_target_many_vuln_new.json"), encoding="utf-8") as testfile:
parser = SslyzeParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(3, len(findings))
def test_parse_json_file_with_two_target_has_many_vuln_new(self):
- with open(path.join(path.dirname(__file__), "../scans/sslyze/two_targets_many_vuln_new.json")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sslyze/two_targets_many_vuln_new.json"), encoding="utf-8") as testfile:
parser = SslyzeParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(5, len(findings))
@@ -159,7 +159,7 @@ def test_parse_json_file_with_two_target_has_many_vuln_new(self):
class TestSSLyzeXMLParser(DojoTestCase):
def test_parse_file_with_one_target_has_three_vuln(self):
- with open(path.join(path.dirname(__file__), "../scans/sslyze/report_one_target_three_vuln.xml")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sslyze/report_one_target_three_vuln.xml"), encoding="utf-8") as testfile:
parser = SslyzeParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -168,7 +168,7 @@ def test_parse_file_with_one_target_has_three_vuln(self):
self.assertEqual(3, len(findings))
def test_parse_xml_file_with_one_target_has_one_vuln(self):
- with open(path.join(path.dirname(__file__), "../scans/sslyze/report_one_target_one_vuln.xml")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sslyze/report_one_target_one_vuln.xml"), encoding="utf-8") as testfile:
parser = SslyzeParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -177,7 +177,7 @@ def test_parse_xml_file_with_one_target_has_one_vuln(self):
self.assertEqual(1, len(findings))
def test_parse_xml_file_with_one_target_has_three_vuln(self):
- with open(path.join(path.dirname(__file__), "../scans/sslyze/report_one_target_three_vuln.xml")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sslyze/report_one_target_three_vuln.xml"), encoding="utf-8") as testfile:
parser = SslyzeParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -186,7 +186,7 @@ def test_parse_xml_file_with_one_target_has_three_vuln(self):
self.assertEqual(3, len(findings))
def test_parse_xml_file_with_two_target_has_many_vuln(self):
- with open(path.join(path.dirname(__file__), "../scans/sslyze/report_two_target_many_vuln.xml")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/sslyze/report_two_target_many_vuln.xml"), encoding="utf-8") as testfile:
parser = SslyzeParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
diff --git a/unittests/tools/test_stackhawk_parser.py b/unittests/tools/test_stackhawk_parser.py
index b9756ffb069..da043f94104 100644
--- a/unittests/tools/test_stackhawk_parser.py
+++ b/unittests/tools/test_stackhawk_parser.py
@@ -9,25 +9,25 @@ class TestStackHawkParser(DojoTestCase):
__test_datetime = datetime.datetime(2022, 2, 16, 23, 7, 19, 575000, datetime.timezone.utc)
def test_invalid_json_format(self):
- with open("unittests/scans/stackhawk/invalid.json") as testfile:
+ with open("unittests/scans/stackhawk/invalid.json", encoding="utf-8") as testfile:
parser = StackHawkParser()
with self.assertRaises(ValueError):
parser.get_findings(testfile, Test())
def test_parser_ensures_data_is_for_stackhawk_before_parsing(self):
- with open("unittests/scans/stackhawk/oddly_familiar_json_that_isnt_us.json") as testfile:
+ with open("unittests/scans/stackhawk/oddly_familiar_json_that_isnt_us.json", encoding="utf-8") as testfile:
parser = StackHawkParser()
with self.assertRaises(ValueError):
parser.get_findings(testfile, Test())
def test_stackhawk_parser_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/stackhawk/stackhawk_zero_vul.json") as testfile:
+ with open("unittests/scans/stackhawk/stackhawk_zero_vul.json", encoding="utf-8") as testfile:
parser = StackHawkParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_stackhawk_parser_with_one_high_vuln_has_one_findings(self):
- with open("unittests/scans/stackhawk/stackhawk_one_vul.json") as testfile:
+ with open("unittests/scans/stackhawk/stackhawk_one_vul.json", encoding="utf-8") as testfile:
parser = StackHawkParser()
findings = parser.get_findings(testfile, Test())
self.__assertAllEndpointsAreClean(findings)
@@ -50,7 +50,7 @@ def test_stackhawk_parser_with_one_high_vuln_has_one_findings(self):
)
def test_stackhawk_parser_with_many_vuln_has_many_findings_and_removes_duplicates(self):
- with open("unittests/scans/stackhawk/stackhawk_many_vul.json") as testfile:
+ with open("unittests/scans/stackhawk/stackhawk_many_vul.json", encoding="utf-8") as testfile:
parser = StackHawkParser()
findings = parser.get_findings(testfile, Test())
self.__assertAllEndpointsAreClean(findings)
@@ -141,7 +141,7 @@ def test_stackhawk_parser_with_many_vuln_has_many_findings_and_removes_duplicate
)
def test_that_a_scan_import_updates_the_test_description(self):
- with open("unittests/scans/stackhawk/stackhawk_zero_vul.json") as testfile:
+ with open("unittests/scans/stackhawk/stackhawk_zero_vul.json", encoding="utf-8") as testfile:
parser = StackHawkParser()
test = Test()
parser.get_findings(testfile, test)
@@ -153,7 +153,7 @@ def test_that_a_scan_import_updates_the_test_description(self):
)
def test_that_a_scan_with_all_false_positive_endpoints_on_a_finding_marks_as_false_positive(self):
- with open("unittests/scans/stackhawk/stackhawk_one_vuln_all_endpoints_false_positive.json") as testfile:
+ with open("unittests/scans/stackhawk/stackhawk_one_vuln_all_endpoints_false_positive.json", encoding="utf-8") as testfile:
parser = StackHawkParser()
findings = parser.get_findings(testfile, Test())
self.__assertAllEndpointsAreClean(findings)
@@ -173,7 +173,7 @@ def test_that_a_scan_with_all_false_positive_endpoints_on_a_finding_marks_as_fal
)
def test_that_a_scan_with_all_risk_accepted_endpoints_on_a_finding_marks_as_risk_accepted(self):
- with open("unittests/scans/stackhawk/stackhawk_one_vuln_all_endpoints_risk_accepted.json") as testfile:
+ with open("unittests/scans/stackhawk/stackhawk_one_vuln_all_endpoints_risk_accepted.json", encoding="utf-8") as testfile:
parser = StackHawkParser()
findings = parser.get_findings(testfile, Test())
self.__assertAllEndpointsAreClean(findings)
@@ -193,7 +193,7 @@ def test_that_a_scan_with_all_risk_accepted_endpoints_on_a_finding_marks_as_risk
)
def test_that_a_scan_with_endpoints_in_differing_statuses_does_not_mark_as_risk_accepted_or_false_positive(self):
- with open("unittests/scans/stackhawk/stackhawk_one_vuln_all_endpoints_have_different_status.json") as testfile:
+ with open("unittests/scans/stackhawk/stackhawk_one_vuln_all_endpoints_have_different_status.json", encoding="utf-8") as testfile:
parser = StackHawkParser()
findings = parser.get_findings(testfile, Test())
self.__assertAllEndpointsAreClean(findings)
diff --git a/unittests/tools/test_sysdig_reports_parser.py b/unittests/tools/test_sysdig_reports_parser.py
index 2e38af87e05..d67ea363c0a 100644
--- a/unittests/tools/test_sysdig_reports_parser.py
+++ b/unittests/tools/test_sysdig_reports_parser.py
@@ -7,13 +7,13 @@
class TestSysdigParser(TestCase):
def test_sysdig_parser_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/sysdig_reports/sysdig_reports_zero_vul.csv") as testfile:
+ with open("unittests/scans/sysdig_reports/sysdig_reports_zero_vul.csv", encoding="utf-8") as testfile:
parser = SysdigReportsParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_sysdig_parser_with_one_criticle_vuln_has_one_findings(self):
- with open("unittests/scans/sysdig_reports/sysdig_reports_one_vul.csv") as testfile:
+ with open("unittests/scans/sysdig_reports/sysdig_reports_one_vul.csv", encoding="utf-8") as testfile:
parser = SysdigReportsParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -25,7 +25,7 @@ def test_sysdig_parser_with_one_criticle_vuln_has_one_findings(self):
self.assertEqual("CVE-2018-19360", findings[0].unsaved_vulnerability_ids[0])
def test_sysdig_parser_with_many_vuln_has_many_findings(self):
- with open("unittests/scans/sysdig_reports/sysdig_reports_many_vul.csv") as testfile:
+ with open("unittests/scans/sysdig_reports/sysdig_reports_many_vul.csv", encoding="utf-8") as testfile:
parser = SysdigReportsParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -35,7 +35,7 @@ def test_sysdig_parser_with_many_vuln_has_many_findings(self):
def test_sysdig_parser_missing_cve_field_id_from_csv_file(self):
with self.assertRaises(ValueError) as context:
- with open("unittests/scans/sysdig_reports/sysdig_reports_missing_cve_field.csv") as testfile:
+ with open("unittests/scans/sysdig_reports/sysdig_reports_missing_cve_field.csv", encoding="utf-8") as testfile:
parser = SysdigReportsParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -47,7 +47,7 @@ def test_sysdig_parser_missing_cve_field_id_from_csv_file(self):
def test_sysdig_parser_missing_cve_field_not_starting_with_cve(self):
with self.assertRaises(ValueError) as context:
- with open("unittests/scans/sysdig_reports/sysdig_reports_not_starting_with_cve.csv") as testfile:
+ with open("unittests/scans/sysdig_reports/sysdig_reports_not_starting_with_cve.csv", encoding="utf-8") as testfile:
parser = SysdigReportsParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -58,7 +58,7 @@ def test_sysdig_parser_missing_cve_field_not_starting_with_cve(self):
)
def test_sysdig_parser_json_with_many_findings(self):
- with open("unittests/scans/sysdig_reports/sysdig.json") as testfile:
+ with open("unittests/scans/sysdig_reports/sysdig.json", encoding="utf-8") as testfile:
parser = SysdigReportsParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
diff --git a/unittests/tools/test_talisman_parser.py b/unittests/tools/test_talisman_parser.py
index 9862f6088aa..5f41d1da249 100644
--- a/unittests/tools/test_talisman_parser.py
+++ b/unittests/tools/test_talisman_parser.py
@@ -5,13 +5,13 @@
class TestTalismanParser(DojoTestCase):
def test_parse_empty(self):
- with open("unittests/scans/talisman/no_finding.json") as testfile:
+ with open("unittests/scans/talisman/no_finding.json", encoding="utf-8") as testfile:
parser = TalismanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_one_finding(self):
- with open("unittests/scans/talisman/one_finding.json") as testfile:
+ with open("unittests/scans/talisman/one_finding.json", encoding="utf-8") as testfile:
parser = TalismanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -21,7 +21,7 @@ def test_parse_one_finding(self):
self.assertIsNotNone(finding.description)
def test_parse_many_finding(self):
- with open("unittests/scans/talisman/many_findings.json") as testfile:
+ with open("unittests/scans/talisman/many_findings.json", encoding="utf-8") as testfile:
parser = TalismanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(3, len(findings))
diff --git a/unittests/tools/test_tenable_parser.py b/unittests/tools/test_tenable_parser.py
index 370c65a3d43..e80c3e4462c 100644
--- a/unittests/tools/test_tenable_parser.py
+++ b/unittests/tools/test_tenable_parser.py
@@ -13,7 +13,7 @@ def create_test(self):
return test
def test_parse_some_findings_nessus_legacy(self):
- with open(path.join(path.dirname(__file__), "../scans/tenable/nessus/nessus_many_vuln.xml")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/tenable/nessus/nessus_many_vuln.xml"), encoding="utf-8") as testfile:
parser = TenableParser()
findings = parser.get_findings(testfile, self.create_test())
for finding in findings:
@@ -30,7 +30,7 @@ def test_parse_some_findings_nessus_legacy(self):
def test_parse_some_findings_csv_nessus_legacy(self):
"""Test one report provided by a user"""
- with open(path.join(path.dirname(__file__), "../scans/tenable/nessus/nessus_many_vuln.csv")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/tenable/nessus/nessus_many_vuln.csv"), encoding="utf-8") as testfile:
parser = TenableParser()
findings = parser.get_findings(testfile, self.create_test())
for finding in findings:
@@ -60,7 +60,7 @@ def test_parse_some_findings_csv_nessus_legacy(self):
def test_parse_some_findings_csv2_nessus_legacy(self):
"""Test that use default columns of Nessus Pro 8.13.1 (#257)"""
- with open(path.join(path.dirname(__file__), "../scans/tenable/nessus/nessus_many_vuln2-default.csv")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/tenable/nessus/nessus_many_vuln2-default.csv"), encoding="utf-8") as testfile:
parser = TenableParser()
findings = parser.get_findings(testfile, self.create_test())
for finding in findings:
@@ -82,7 +82,7 @@ def test_parse_some_findings_csv2_nessus_legacy(self):
def test_parse_some_findings_csv2_all_nessus_legacy(self):
"""Test that use a report with all columns of Nessus Pro 8.13.1 (#257)"""
- with open(path.join(path.dirname(__file__), "../scans/tenable/nessus/nessus_many_vuln2-all.csv")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/tenable/nessus/nessus_many_vuln2-all.csv"), encoding="utf-8") as testfile:
parser = TenableParser()
findings = parser.get_findings(testfile, self.create_test())
for finding in findings:
@@ -104,13 +104,13 @@ def test_parse_some_findings_csv2_all_nessus_legacy(self):
def test_parse_some_findings_csv_bytes_nessus_legacy(self):
"""This tests is designed to test the parser with different read modes"""
- with open(path.join(path.dirname(__file__), "../scans/tenable/nessus/nessus_many_vuln2-all.csv")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/tenable/nessus/nessus_many_vuln2-all.csv"), encoding="utf-8") as testfile:
parser = TenableParser()
findings = parser.get_findings(testfile, self.create_test())
for finding in findings:
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
- with open(path.join(path.dirname(__file__), "../scans/tenable/nessus/nessus_many_vuln2-all.csv")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/tenable/nessus/nessus_many_vuln2-all.csv"), encoding="utf-8") as testfile:
parser = TenableParser()
findings = parser.get_findings(testfile, self.create_test())
for finding in findings:
@@ -125,7 +125,7 @@ def test_parse_some_findings_csv_bytes_nessus_legacy(self):
def test_parse_some_findings_samples_nessus_legacy(self):
"""Test that come from samples repo"""
- with open(path.join(path.dirname(__file__), "../scans/tenable/nessus/nessus_v_unknown.xml")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/tenable/nessus/nessus_v_unknown.xml"), encoding="utf-8") as testfile:
parser = TenableParser()
findings = parser.get_findings(testfile, self.create_test())
for finding in findings:
@@ -156,7 +156,7 @@ def test_parse_some_findings_samples_nessus_legacy(self):
def test_parse_some_findings_with_cvssv3_nessus_legacy(self):
"""test with cvssv3"""
- with open(path.join(path.dirname(__file__), "../scans/tenable/nessus/nessus_with_cvssv3.nessus")) as testfile:
+ with open(path.join(path.dirname(__file__), "../scans/tenable/nessus/nessus_with_cvssv3.nessus"), encoding="utf-8") as testfile:
parser = TenableParser()
findings = parser.get_findings(testfile, self.create_test())
for finding in findings:
@@ -171,7 +171,7 @@ def test_parse_some_findings_with_cvssv3_nessus_legacy(self):
self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:N/A:N", finding.cvssv3)
def test_parse_many_findings_xml_nessus_was_legacy(self):
- with open("unittests/scans/tenable/nessus_was/nessus_was_many_vuln.xml") as testfile:
+ with open("unittests/scans/tenable/nessus_was/nessus_was_many_vuln.xml", encoding="utf-8") as testfile:
parser = TenableParser()
findings = parser.get_findings(testfile, self.create_test())
for finding in findings:
@@ -187,7 +187,7 @@ def test_parse_many_findings_xml_nessus_was_legacy(self):
self.assertEqual("Cross-Site Scripting (XSS)", finding.title)
def test_parse_one_findings_xml_nessus_was_legacy(self):
- with open("unittests/scans/tenable/nessus_was/nessus_was_one_vuln.xml") as testfile:
+ with open("unittests/scans/tenable/nessus_was/nessus_was_one_vuln.xml", encoding="utf-8") as testfile:
parser = TenableParser()
findings = parser.get_findings(testfile, self.create_test())
for finding in findings:
@@ -201,7 +201,7 @@ def test_parse_one_findings_xml_nessus_was_legacy(self):
self.assertEqual("Cross-Site Scripting (XSS)", finding.title)
def test_parse_no_findings_xml_nessus_was_legacy(self):
- with open("unittests/scans/tenable/nessus_was/nessus_was_no_vuln.xml") as testfile:
+ with open("unittests/scans/tenable/nessus_was/nessus_was_no_vuln.xml", encoding="utf-8") as testfile:
parser = TenableParser()
findings = parser.get_findings(testfile, self.create_test())
for finding in findings:
@@ -210,7 +210,7 @@ def test_parse_no_findings_xml_nessus_was_legacy(self):
self.assertEqual(0, len(findings))
def test_parse_many_findings_csv_nessus_was_legacy(self):
- with open("unittests/scans/tenable/nessus_was/nessus_was_many_vuln.csv") as testfile:
+ with open("unittests/scans/tenable/nessus_was/nessus_was_many_vuln.csv", encoding="utf-8") as testfile:
parser = TenableParser()
findings = parser.get_findings(testfile, self.create_test())
for finding in findings:
@@ -228,7 +228,7 @@ def test_parse_many_findings_csv_nessus_was_legacy(self):
self.assertEqual("http", finding.unsaved_endpoints[0].protocol)
def test_parse_one_findings_csv_nessus_was_legacy(self):
- with open("unittests/scans/tenable/nessus_was/nessus_was_one_vuln.csv") as testfile:
+ with open("unittests/scans/tenable/nessus_was/nessus_was_one_vuln.csv", encoding="utf-8") as testfile:
parser = TenableParser()
findings = parser.get_findings(testfile, self.create_test())
for finding in findings:
@@ -244,13 +244,13 @@ def test_parse_one_findings_csv_nessus_was_legacy(self):
self.assertEqual("http", finding.unsaved_endpoints[0].protocol)
def test_parse_no_findings_csv_nessus_was_legacy(self):
- with open("unittests/scans/tenable/nessus_was/nessus_was_no_vuln.csv") as testfile:
+ with open("unittests/scans/tenable/nessus_was/nessus_was_no_vuln.csv", encoding="utf-8") as testfile:
parser = TenableParser()
findings = parser.get_findings(testfile, self.create_test())
self.assertEqual(0, len(findings))
def test_parse_many_tenable_vulns(self):
- with open("unittests/scans/tenable/tenable_many_vuln.csv") as testfile:
+ with open("unittests/scans/tenable/tenable_many_vuln.csv", encoding="utf-8") as testfile:
parser = TenableParser()
findings = parser.get_findings(testfile, self.create_test())
for finding in findings:
@@ -270,7 +270,7 @@ def test_parse_many_tenable_vulns(self):
self.assertEqual("CVE-2023-32233", vulnerability_id)
def test_parse_issue_6992(self):
- with open("unittests/scans/tenable/nessus/issue_6992.nessus") as testfile:
+ with open("unittests/scans/tenable/nessus/issue_6992.nessus", encoding="utf-8") as testfile:
parser = TenableParser()
findings = parser.get_findings(testfile, self.create_test())
for finding in findings:
@@ -280,7 +280,7 @@ def test_parse_issue_6992(self):
self.assertEqual("High", findings[0].severity)
def test_parse_nessus_new(self):
- with open("unittests/scans/tenable/nessus/nessus_new.csv") as testfile:
+ with open("unittests/scans/tenable/nessus/nessus_new.csv", encoding="utf-8") as testfile:
parser = TenableParser()
findings = parser.get_findings(testfile, self.create_test())
self.assertEqual(99, len(findings))
@@ -291,7 +291,7 @@ def test_parse_nessus_new(self):
self.assertEqual("3.1", finding.cvssv3_score)
def test_parse_issue_9612(self):
- with open("unittests/scans/tenable/issue_9612.csv") as testfile:
+ with open("unittests/scans/tenable/issue_9612.csv", encoding="utf-8") as testfile:
parser = TenableParser()
findings = parser.get_findings(testfile, self.create_test())
for finding in findings:
diff --git a/unittests/tools/test_terrascan_parser.py b/unittests/tools/test_terrascan_parser.py
index 8201b65c31a..9046908ea2d 100644
--- a/unittests/tools/test_terrascan_parser.py
+++ b/unittests/tools/test_terrascan_parser.py
@@ -6,13 +6,13 @@
class TestTerrascanParser(DojoTestCase):
def test_parse_no_findings(self):
- with open("unittests/scans/terrascan/no_findings.json") as testfile:
+ with open("unittests/scans/terrascan/no_findings.json", encoding="utf-8") as testfile:
parser = TerrascanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_many_findings(self):
- with open("unittests/scans/terrascan/many_findings.json") as testfile:
+ with open("unittests/scans/terrascan/many_findings.json", encoding="utf-8") as testfile:
parser = TerrascanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(9, len(findings))
diff --git a/unittests/tools/test_testssl_parser.py b/unittests/tools/test_testssl_parser.py
index deae8217330..978a48b7945 100644
--- a/unittests/tools/test_testssl_parser.py
+++ b/unittests/tools/test_testssl_parser.py
@@ -6,13 +6,13 @@
class TestTestsslParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_finding(self):
- with open("unittests/scans/testssl/defectdojo_no_vuln.csv") as testfile:
+ with open("unittests/scans/testssl/defectdojo_no_vuln.csv", encoding="utf-8") as testfile:
parser = TestsslParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln_has_one_finding(self):
- with open("unittests/scans/testssl/defectdojo_one_vuln.csv") as testfile:
+ with open("unittests/scans/testssl/defectdojo_one_vuln.csv", encoding="utf-8") as testfile:
parser = TestsslParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -21,7 +21,7 @@ def test_parse_file_with_one_vuln_has_one_finding(self):
self.assertEqual(1, len(findings))
def test_parse_file_with_many_vuln_has_many_findings(self):
- with open("unittests/scans/testssl/defectdojo_many_vuln.csv") as testfile:
+ with open("unittests/scans/testssl/defectdojo_many_vuln.csv", encoding="utf-8") as testfile:
parser = TestsslParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -45,7 +45,7 @@ def test_parse_file_with_many_vuln_has_many_findings(self):
self.assertEqual(310, finding.cwe)
def test_parse_file_with_many_cves(self):
- with open("unittests/scans/testssl/many_cves.csv") as testfile:
+ with open("unittests/scans/testssl/many_cves.csv", encoding="utf-8") as testfile:
parser = TestsslParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -66,7 +66,7 @@ def test_parse_file_with_many_cves(self):
self.assertEqual(310, finding.cwe)
def test_parse_file_with_31_version(self):
- with open("unittests/scans/testssl/demo.csv") as testfile:
+ with open("unittests/scans/testssl/demo.csv", encoding="utf-8") as testfile:
parser = TestsslParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -75,7 +75,7 @@ def test_parse_file_with_31_version(self):
self.assertEqual(12, len(findings))
def test_parse_file_with_31_version2(self):
- with open("unittests/scans/testssl/demo2.csv") as testfile:
+ with open("unittests/scans/testssl/demo2.csv", encoding="utf-8") as testfile:
parser = TestsslParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -84,7 +84,7 @@ def test_parse_file_with_31_version2(self):
self.assertEqual(3, len(findings))
def test_parse_file_with_one_vuln_has_overall_medium(self):
- with open("unittests/scans/testssl/overall_medium.csv") as testfile:
+ with open("unittests/scans/testssl/overall_medium.csv", encoding="utf-8") as testfile:
parser = TestsslParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -93,7 +93,7 @@ def test_parse_file_with_one_vuln_has_overall_medium(self):
self.assertEqual(2, len(findings))
def test_parse_file_with_one_vuln_has_overall_critical(self):
- with open("unittests/scans/testssl/overall_critical.csv") as testfile:
+ with open("unittests/scans/testssl/overall_critical.csv", encoding="utf-8") as testfile:
parser = TestsslParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -102,7 +102,7 @@ def test_parse_file_with_one_vuln_has_overall_critical(self):
self.assertEqual(145, len(findings))
def test_parse_file_with_one_vuln_has_failed_target(self):
- with open("unittests/scans/testssl/failed_target.csv") as testfile:
+ with open("unittests/scans/testssl/failed_target.csv", encoding="utf-8") as testfile:
parser = TestsslParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
diff --git a/unittests/tools/test_tfsec_parser.py b/unittests/tools/test_tfsec_parser.py
index 23a88fd895c..814a5154d61 100644
--- a/unittests/tools/test_tfsec_parser.py
+++ b/unittests/tools/test_tfsec_parser.py
@@ -6,13 +6,13 @@
class TestTFSecParser(DojoTestCase):
def test_parse_no_findings(self):
- with open("unittests/scans/tfsec/no_findings.json") as testfile:
+ with open("unittests/scans/tfsec/no_findings.json", encoding="utf-8") as testfile:
parser = TFSecParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_one_finding_legacy(self):
- with open("unittests/scans/tfsec/one_finding_legacy.json") as testfile:
+ with open("unittests/scans/tfsec/one_finding_legacy.json", encoding="utf-8") as testfile:
parser = TFSecParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -31,7 +31,7 @@ def test_parse_one_finding_legacy(self):
self.assertEqual(1, finding.nb_occurences)
def test_parse_many_findings_legacy(self):
- with open("unittests/scans/tfsec/many_findings_legacy.json") as testfile:
+ with open("unittests/scans/tfsec/many_findings_legacy.json", encoding="utf-8") as testfile:
parser = TFSecParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(3, len(findings))
@@ -76,7 +76,7 @@ def test_parse_many_findings_legacy(self):
self.assertEqual(1, finding.nb_occurences)
def test_parse_many_findings_current(self):
- with open("unittests/scans/tfsec/many_findings_current.json") as testfile:
+ with open("unittests/scans/tfsec/many_findings_current.json", encoding="utf-8") as testfile:
parser = TFSecParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(13, len(findings))
diff --git a/unittests/tools/test_threagile_parser.py b/unittests/tools/test_threagile_parser.py
index 81a1dc7a2b6..8bfe657fce0 100644
--- a/unittests/tools/test_threagile_parser.py
+++ b/unittests/tools/test_threagile_parser.py
@@ -5,7 +5,7 @@
class TestThreAgileParser(DojoTestCase):
def test_non_threagile_file_raises_error(self):
- with open("unittests/scans/threagile/bad_formatted_risks_file.json") as testfile:
+ with open("unittests/scans/threagile/bad_formatted_risks_file.json", encoding="utf-8") as testfile:
parser = ThreagileParser()
with self.assertRaises(TypeError) as exc_context:
parser.get_findings(testfile, Test())
@@ -13,13 +13,13 @@ def test_non_threagile_file_raises_error(self):
self.assertEqual("Invalid ThreAgile risks file", str(exc))
def test_empty_file_returns_no_findings(self):
- with open("unittests/scans/threagile/empty_file_no_risks.json") as testfile:
+ with open("unittests/scans/threagile/empty_file_no_risks.json", encoding="utf-8") as testfile:
parser = ThreagileParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_file_with_vulnerabilities_returns_correct_findings(self):
- with open("unittests/scans/threagile/risks.json") as testfile:
+ with open("unittests/scans/threagile/risks.json", encoding="utf-8") as testfile:
parser = ThreagileParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(6, len(findings))
@@ -33,28 +33,28 @@ def test_file_with_vulnerabilities_returns_correct_findings(self):
self.assertEqual("policies-rego-storage-ta", finding.component_name)
def test_in_discussion_is_under_review(self):
- with open("unittests/scans/threagile/risks.json") as testfile:
+ with open("unittests/scans/threagile/risks.json", encoding="utf-8") as testfile:
parser = ThreagileParser()
findings = parser.get_findings(testfile, Test())
finding = findings[1]
self.assertTrue(finding.under_review)
def test_accepted_finding_is_accepted(self):
- with open("unittests/scans/threagile/risks.json") as testfile:
+ with open("unittests/scans/threagile/risks.json", encoding="utf-8") as testfile:
parser = ThreagileParser()
findings = parser.get_findings(testfile, Test())
finding = findings[2]
self.assertTrue(finding.risk_accepted)
def test_in_progress_is_verified(self):
- with open("unittests/scans/threagile/risks.json") as testfile:
+ with open("unittests/scans/threagile/risks.json", encoding="utf-8") as testfile:
parser = ThreagileParser()
findings = parser.get_findings(testfile, Test())
finding = findings[3]
self.assertTrue(finding.verified)
def test_mitigated_is_mitigated(self):
- with open("unittests/scans/threagile/risks.json") as testfile:
+ with open("unittests/scans/threagile/risks.json", encoding="utf-8") as testfile:
parser = ThreagileParser()
findings = parser.get_findings(testfile, Test())
finding = findings[4]
@@ -62,7 +62,7 @@ def test_mitigated_is_mitigated(self):
self.assertEqual("some-runtime", finding.component_name)
def test_false_positive_is_false_positive(self):
- with open("unittests/scans/threagile/risks.json") as testfile:
+ with open("unittests/scans/threagile/risks.json", encoding="utf-8") as testfile:
parser = ThreagileParser()
findings = parser.get_findings(testfile, Test())
finding = findings[5]
diff --git a/unittests/tools/test_threat_composer_parser.py b/unittests/tools/test_threat_composer_parser.py
new file mode 100644
index 00000000000..9dfbf524c6c
--- /dev/null
+++ b/unittests/tools/test_threat_composer_parser.py
@@ -0,0 +1,66 @@
+import os
+
+from dojo.models import Test
+from dojo.tools.threat_composer.parser import ThreatComposerParser
+from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path
+
+
+def sample_path(file_name: str):
+ return os.path.join("/scans/threat_composer", file_name)
+
+
+class TestThreatComposerParser(DojoTestCase):
+
+ def test_threat_composer_parser_with_no_threat_has_no_findings(self):
+ with open(get_unit_tests_path() + sample_path("threat_composer_zero_threats.json"), encoding="utf-8") as testfile:
+ parser = ThreatComposerParser()
+ findings = parser.get_findings(testfile, Test())
+ self.assertEqual(0, len(findings))
+
+ def test_threat_composer_parser_with_one_threat_has_one_finding(self):
+ with open(get_unit_tests_path() + sample_path("threat_composer_one_threat.json"), encoding="utf-8") as testfile:
+ parser = ThreatComposerParser()
+ findings = parser.get_findings(testfile, Test())
+ self.assertEqual(1, len(findings))
+
+ with self.subTest(i=0):
+ finding = findings[0]
+ self.assertEqual("lorem ipsum", finding.title)
+ self.assertEqual("High", finding.severity)
+ self.assertIsNotNone(finding.description)
+ self.assertIn("Assumption", str(finding.description))
+ self.assertIsNotNone(finding.mitigation)
+ self.assertIn("Assumption", str(finding.mitigation))
+ self.assertIsNotNone(finding.impact)
+ self.assertEqual("46db1eb4-a451-4d05-afe1-c695491e2387", finding.unique_id_from_tool)
+ self.assertEqual(23, finding.vuln_id_from_tool)
+ self.assertFalse(finding.false_p)
+ self.assertFalse(finding.verified)
+
+ def test_threat_composer_parser_with_many_threats_has_many_findings(self):
+ with open(get_unit_tests_path() + sample_path("threat_composer_many_threats.json"), encoding="utf-8") as testfile:
+ parser = ThreatComposerParser()
+ findings = parser.get_findings(testfile, Test())
+ self.assertEqual(21, len(findings))
+
+ def test_threat_composer_parser_empty_with_error(self):
+ with self.assertRaises(ValueError) as context:
+ with open(get_unit_tests_path() + sample_path("threat_composer_no_threats_with_error.json"), encoding="utf-8") as testfile:
+ parser = ThreatComposerParser()
+ parser.get_findings(testfile, Test())
+
+ self.assertNotIn("No threats found in the JSON file", str(context.exception))
+
+ def test_threat_composer_parser_with_one_threat_has_not_assumptions(self):
+ with open(get_unit_tests_path() + sample_path("threat_composer_broken_assumptions.json"), encoding="utf-8") as testfile:
+ parser = ThreatComposerParser()
+ findings = parser.get_findings(testfile, Test())
+ finding = findings[0]
+ self.assertNotIn("Assumption", str(finding.description))
+
+ def test_threat_composer_parser_with_one_threat_has_not_mitigations(self):
+ with open(get_unit_tests_path() + sample_path("threat_composer_broken_mitigations.json"), encoding="utf-8") as testfile:
+ parser = ThreatComposerParser()
+ findings = parser.get_findings(testfile, Test())
+ finding = findings[0]
+ self.assertNotIn("Mitigation", str(finding.mitigation))
diff --git a/unittests/tools/test_trivy_operator_parser.py b/unittests/tools/test_trivy_operator_parser.py
index a5a52f1dedd..8ac6b5f3189 100644
--- a/unittests/tools/test_trivy_operator_parser.py
+++ b/unittests/tools/test_trivy_operator_parser.py
@@ -12,13 +12,13 @@ def sample_path(file_name):
class TestTrivyOperatorParser(DojoTestCase):
def test_configauditreport_no_vuln(self):
- with open(sample_path("configauditreport_no_vuln.json")) as test_file:
+ with open(sample_path("configauditreport_no_vuln.json"), encoding="utf-8") as test_file:
parser = TrivyOperatorParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(len(findings), 0)
def test_configauditreport_single_vulns(self):
- with open(sample_path("configauditreport_single_vuln.json")) as test_file:
+ with open(sample_path("configauditreport_single_vuln.json"), encoding="utf-8") as test_file:
parser = TrivyOperatorParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(len(findings), 1)
@@ -29,7 +29,7 @@ def test_configauditreport_single_vulns(self):
self.assertEqual("KSV014 - Root file system is not read-only", finding.title)
def test_configauditreport_many_vulns(self):
- with open(sample_path("configauditreport_many.json")) as test_file:
+ with open(sample_path("configauditreport_many.json"), encoding="utf-8") as test_file:
parser = TrivyOperatorParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(len(findings), 13)
@@ -45,13 +45,13 @@ def test_configauditreport_many_vulns(self):
self.assertEqual("KSV016 - Memory requests not specified", finding.title)
def test_vulnerabilityreport_no_vuln(self):
- with open(sample_path("vulnerabilityreport_no_vuln.json")) as test_file:
+ with open(sample_path("vulnerabilityreport_no_vuln.json"), encoding="utf-8") as test_file:
parser = TrivyOperatorParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(len(findings), 0)
def test_vulnerabilityreport_single_vulns(self):
- with open(sample_path("vulnerabilityreport_single_vuln.json")) as test_file:
+ with open(sample_path("vulnerabilityreport_single_vuln.json"), encoding="utf-8") as test_file:
parser = TrivyOperatorParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(len(findings), 1)
@@ -64,7 +64,7 @@ def test_vulnerabilityreport_single_vulns(self):
self.assertEqual(4.2, finding.cvssv3_score)
def test_vulnerabilityreport_many(self):
- with open(sample_path("vulnerabilityreport_many.json")) as test_file:
+ with open(sample_path("vulnerabilityreport_many.json"), encoding="utf-8") as test_file:
parser = TrivyOperatorParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(len(findings), 41)
@@ -84,13 +84,13 @@ def test_vulnerabilityreport_many(self):
self.assertEqual(6.5, finding.cvssv3_score)
def test_exposedsecretreport_no_vuln(self):
- with open(sample_path("exposedsecretreport_no_vuln.json")) as test_file:
+ with open(sample_path("exposedsecretreport_no_vuln.json"), encoding="utf-8") as test_file:
parser = TrivyOperatorParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(len(findings), 0)
def test_exposedsecretreport_single_vulns(self):
- with open(sample_path("exposedsecretreport_single_vuln.json")) as test_file:
+ with open(sample_path("exposedsecretreport_single_vuln.json"), encoding="utf-8") as test_file:
parser = TrivyOperatorParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(len(findings), 1)
@@ -103,7 +103,7 @@ def test_exposedsecretreport_single_vulns(self):
self.assertEqual("Secret detected in root/aws_secret.txt - AWS Secret Access Key", finding.title)
def test_exposedsecretreport_many(self):
- with open(sample_path("exposedsecretreport_many.json")) as test_file:
+ with open(sample_path("exposedsecretreport_many.json"), encoding="utf-8") as test_file:
parser = TrivyOperatorParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(len(findings), 2)
@@ -123,7 +123,7 @@ def test_exposedsecretreport_many(self):
self.assertEqual("Secret detected in root/github_secret.txt - GitHub Personal Access Token", finding.title)
def test_vulnerabilityreport_extended(self):
- with open(sample_path("vulnerabilityreport_extended.json")) as test_file:
+ with open(sample_path("vulnerabilityreport_extended.json"), encoding="utf-8") as test_file:
parser = TrivyOperatorParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(len(findings), 5)
@@ -138,7 +138,7 @@ def test_vulnerabilityreport_extended(self):
self.assertEqual("os-pkgs, ubuntu", str(finding.tags))
def test_cis_benchmark(self):
- with open(sample_path("cis_benchmark.json")) as test_file:
+ with open(sample_path("cis_benchmark.json"), encoding="utf-8") as test_file:
parser = TrivyOperatorParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(len(findings), 795)
diff --git a/unittests/tools/test_trivy_parser.py b/unittests/tools/test_trivy_parser.py
index 2c13876bf20..8fbebadd8b2 100644
--- a/unittests/tools/test_trivy_parser.py
+++ b/unittests/tools/test_trivy_parser.py
@@ -13,13 +13,13 @@ def sample_path(file_name):
class TestTrivyParser(DojoTestCase):
def test_legacy_no_vuln(self):
- with open(sample_path("legacy_no_vuln.json")) as test_file:
+ with open(sample_path("legacy_no_vuln.json"), encoding="utf-8") as test_file:
parser = TrivyParser()
trivy_findings = parser.get_findings(test_file, Test())
self.assertEqual(len(trivy_findings), 0)
def test_legacy_many_vulns(self):
- with open(sample_path("legacy_many_vulns.json")) as test_file:
+ with open(sample_path("legacy_many_vulns.json"), encoding="utf-8") as test_file:
parser = TrivyParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(len(findings), 93)
@@ -32,13 +32,13 @@ def test_legacy_many_vulns(self):
self.assertEqual("1.8.2.2", finding.component_version)
def test_scheme_2_no_vuln(self):
- with open(sample_path("scheme_2_no_vuln.json")) as test_file:
+ with open(sample_path("scheme_2_no_vuln.json"), encoding="utf-8") as test_file:
parser = TrivyParser()
trivy_findings = parser.get_findings(test_file, Test())
self.assertEqual(len(trivy_findings), 0)
def test_scheme_2_many_vulns(self):
- with open(sample_path("scheme_2_many_vulns.json")) as test_file:
+ with open(sample_path("scheme_2_many_vulns.json"), encoding="utf-8") as test_file:
parser = TrivyParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(len(findings), 5)
@@ -74,7 +74,7 @@ def test_scheme_2_many_vulns(self):
self.assertFalse(finding.dynamic_finding)
def test_misconfigurations_and_secrets(self):
- with open(sample_path("misconfigurations_and_secrets.json")) as test_file:
+ with open(sample_path("misconfigurations_and_secrets.json"), encoding="utf-8") as test_file:
parser = TrivyParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(len(findings), 5)
@@ -106,7 +106,7 @@ def test_misconfigurations_and_secrets(self):
self.assertEqual(["secret"], finding.tags)
def test_kubernetes(self):
- with open(sample_path("kubernetes.json")) as test_file:
+ with open(sample_path("kubernetes.json"), encoding="utf-8") as test_file:
parser = TrivyParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(len(findings), 20)
@@ -177,7 +177,7 @@ def test_kubernetes(self):
self.assertEqual("default / Deployment / redis-follower", finding.service)
def test_license_scheme(self):
- with open(sample_path("license_scheme.json")) as test_file:
+ with open(sample_path("license_scheme.json"), encoding="utf-8") as test_file:
parser = TrivyParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(len(findings), 19)
@@ -193,7 +193,7 @@ def test_license_scheme(self):
self.assertEqual(description, finding.description)
def test_issue_9092(self):
- with open(sample_path("issue_9092.json")) as test_file:
+ with open(sample_path("issue_9092.json"), encoding="utf-8") as test_file:
parser = TrivyParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(len(findings), 1)
@@ -202,7 +202,7 @@ def test_issue_9092(self):
self.assertEqual(finding.file_path, "requirements.txt")
def test_issue_9170(self):
- with open(sample_path("issue_9170.json")) as test_file:
+ with open(sample_path("issue_9170.json"), encoding="utf-8") as test_file:
parser = TrivyParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(len(findings), 37)
@@ -211,7 +211,7 @@ def test_issue_9170(self):
self.assertEqual("KSV116 - Runs with a root primary or supplementary GID", finding.title)
def test_issue_9263(self):
- with open(sample_path("issue_9263.json")) as test_file:
+ with open(sample_path("issue_9263.json"), encoding="utf-8") as test_file:
parser = TrivyParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(len(findings), 1)
@@ -219,7 +219,7 @@ def test_issue_9263(self):
self.assertEqual("High", finding.severity)
def test_issue_9333(self):
- with open(sample_path("issue_9333.json")) as test_file:
+ with open(sample_path("issue_9333.json"), encoding="utf-8") as test_file:
parser = TrivyParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(len(findings), 13)
diff --git a/unittests/tools/test_trufflehog3_parser.py b/unittests/tools/test_trufflehog3_parser.py
index 6a2be6a9a00..2e8a8523c23 100644
--- a/unittests/tools/test_trufflehog3_parser.py
+++ b/unittests/tools/test_trufflehog3_parser.py
@@ -13,13 +13,13 @@ def sample_path(file_name):
class TestTruffleHog3Parser(DojoTestCase):
def test_zero_vulns(self):
- with open(sample_path("zero_vulns.json")) as test_file:
+ with open(sample_path("zero_vulns.json"), encoding="utf-8") as test_file:
parser = TruffleHog3Parser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(len(findings), 0)
def test_many_vulns_legacy(self):
- with open(sample_path("many_vulns_legacy.json")) as test_file:
+ with open(sample_path("many_vulns_legacy.json"), encoding="utf-8") as test_file:
parser = TruffleHog3Parser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(len(findings), 7)
@@ -46,7 +46,7 @@ def test_many_vulns_legacy(self):
self.assertEqual(7, finding.nb_occurences)
def test_many_vulns2_legacy(self):
- with open(sample_path("many_vulns2_legacy.json")) as test_file:
+ with open(sample_path("many_vulns2_legacy.json"), encoding="utf-8") as test_file:
parser = TruffleHog3Parser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(len(findings), 27)
@@ -57,7 +57,7 @@ def test_many_vulns2_legacy(self):
self.assertEqual(8, finding.nb_occurences)
def test_many_vulns_current(self):
- with open(sample_path("many_vulns_current.json")) as test_file:
+ with open(sample_path("many_vulns_current.json"), encoding="utf-8") as test_file:
parser = TruffleHog3Parser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(len(findings), 3)
@@ -102,7 +102,7 @@ def test_many_vulns_current(self):
self.assertEqual(1, finding.nb_occurences)
def test_issue_6999(self):
- with open(sample_path("issue_6999.json")) as test_file:
+ with open(sample_path("issue_6999.json"), encoding="utf-8") as test_file:
parser = TruffleHog3Parser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(len(findings), 1)
diff --git a/unittests/tools/test_trufflehog_parser.py b/unittests/tools/test_trufflehog_parser.py
index 5c7089a2a04..cfb7a6f86e2 100644
--- a/unittests/tools/test_trufflehog_parser.py
+++ b/unittests/tools/test_trufflehog_parser.py
@@ -12,7 +12,7 @@ def sample_path(file_name):
class TestTruffleHogParser(DojoTestCase):
def test_many_vulns_v2(self):
- with open(sample_path("v2_many_vulns.json")) as test_file:
+ with open(sample_path("v2_many_vulns.json"), encoding="utf-8") as test_file:
parser = TruffleHogParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(len(findings), 18)
@@ -22,7 +22,7 @@ def test_many_vulns_v2(self):
self.assertEqual("test_all.py", finding.file_path)
def test_many_vulns_git_v3(self):
- with open(sample_path("v3_git.json")) as test_file:
+ with open(sample_path("v3_git.json"), encoding="utf-8") as test_file:
parser = TruffleHogParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(len(findings), 3)
@@ -32,7 +32,7 @@ def test_many_vulns_git_v3(self):
self.assertEqual("keys", finding.file_path)
def test_many_vulns_github_v3(self):
- with open(sample_path("v3_github.json")) as test_file:
+ with open(sample_path("v3_github.json"), encoding="utf-8") as test_file:
parser = TruffleHogParser()
findings = parser.get_findings(test_file, Test())
self.assertEqual(len(findings), 3)
diff --git a/unittests/tools/test_trustwave_fusion_api_parser.py b/unittests/tools/test_trustwave_fusion_api_parser.py
index f09a31a0d0b..c11c1eeb688 100644
--- a/unittests/tools/test_trustwave_fusion_api_parser.py
+++ b/unittests/tools/test_trustwave_fusion_api_parser.py
@@ -6,14 +6,14 @@
class TestTrustwaveFusionAPIParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
with open(
- get_unit_tests_path() + "/scans/trustwave_fusion_api/trustwave_fusion_api_zero_vul.json",
+ get_unit_tests_path() + "/scans/trustwave_fusion_api/trustwave_fusion_api_zero_vul.json", encoding="utf-8",
) as testfile:
parser = TrustwaveFusionAPIParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_vuln_with_valid_cve(self):
- with open("unittests/scans/trustwave_fusion_api/test_cve.json") as testfile:
+ with open("unittests/scans/trustwave_fusion_api/test_cve.json", encoding="utf-8") as testfile:
parser = TrustwaveFusionAPIParser()
findings = parser.get_findings(testfile, Test())
@@ -42,7 +42,7 @@ def test_vuln_with_valid_cve(self):
def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
with open(
- get_unit_tests_path() + "/scans/trustwave_fusion_api/trustwave_fusion_api_many_vul.json",
+ get_unit_tests_path() + "/scans/trustwave_fusion_api/trustwave_fusion_api_many_vul.json", encoding="utf-8",
) as testfile:
parser = TrustwaveFusionAPIParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_trustwave_parser.py b/unittests/tools/test_trustwave_parser.py
index b0931d980b7..8f8d7150eb1 100644
--- a/unittests/tools/test_trustwave_parser.py
+++ b/unittests/tools/test_trustwave_parser.py
@@ -15,7 +15,7 @@ def test_no_vuln(self):
test = Test()
test.engagement = Engagement()
test.engagement.product = Product()
- with open(sample_path("many_vulns.csv")) as test_file:
+ with open(sample_path("many_vulns.csv"), encoding="utf-8") as test_file:
parser = TrustwaveParser()
findings = parser.get_findings(test_file, test)
for finding in findings:
diff --git a/unittests/tools/test_twistlock_parser.py b/unittests/tools/test_twistlock_parser.py
index ce91e7cd0df..2934f0230db 100644
--- a/unittests/tools/test_twistlock_parser.py
+++ b/unittests/tools/test_twistlock_parser.py
@@ -7,14 +7,14 @@
class TestTwistlockParser(DojoTestCase):
def test_parse_file_with_no_vuln(self):
- testfile = open(path.join(path.dirname(__file__), "../scans/twistlock/no_vuln.json"))
+ testfile = open(path.join(path.dirname(__file__), "../scans/twistlock/no_vuln.json"), encoding="utf-8")
parser = TwistlockParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(0, len(findings))
def test_parse_file_with_one_vuln(self):
- testfile = open(path.join(path.dirname(__file__), "../scans/twistlock/one_vuln.json"))
+ testfile = open(path.join(path.dirname(__file__), "../scans/twistlock/one_vuln.json"), encoding="utf-8")
parser = TwistlockParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -23,7 +23,7 @@ def test_parse_file_with_one_vuln(self):
self.assertEqual("CVE-2013-7459", findings[0].unsaved_vulnerability_ids[0])
def test_parse_file_with_no_link(self):
- testfile = open(path.join(path.dirname(__file__), "../scans/twistlock/one_vuln_no_link.json"))
+ testfile = open(path.join(path.dirname(__file__), "../scans/twistlock/one_vuln_no_link.json"), encoding="utf-8")
parser = TwistlockParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -32,14 +32,14 @@ def test_parse_file_with_no_link(self):
self.assertEqual("PRISMA-2021-0013", findings[0].unsaved_vulnerability_ids[0])
def test_parse_file_with_many_vulns(self):
- testfile = open(path.join(path.dirname(__file__), "../scans/twistlock/many_vulns.json"))
+ testfile = open(path.join(path.dirname(__file__), "../scans/twistlock/many_vulns.json"), encoding="utf-8")
parser = TwistlockParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(5, len(findings))
def test_parse_file_which_contain_packages_info(self):
- testfile = open(path.join(path.dirname(__file__), "../scans/twistlock/findings_include_packages.json"))
+ testfile = open(path.join(path.dirname(__file__), "../scans/twistlock/findings_include_packages.json"), encoding="utf-8")
parser = TwistlockParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -47,7 +47,7 @@ def test_parse_file_which_contain_packages_info(self):
def test_parse_file_prisma_twistlock_images_no_vuln(self):
testfile = open(
- path.join(path.dirname(__file__), "../scans/twistlock/scan_report_prisma_twistlock_images_no_vuln.csv"),
+ path.join(path.dirname(__file__), "../scans/twistlock/scan_report_prisma_twistlock_images_no_vuln.csv"), encoding="utf-8",
)
parser = TwistlockParser()
findings = parser.get_findings(testfile, Test())
@@ -56,7 +56,7 @@ def test_parse_file_prisma_twistlock_images_no_vuln(self):
def test_parse_file_prisma_twistlock_images_four_vulns(self):
testfile = open(
- path.join(path.dirname(__file__), "../scans/twistlock/scan_report_prisma_twistlock_images_four_vulns.csv"),
+ path.join(path.dirname(__file__), "../scans/twistlock/scan_report_prisma_twistlock_images_four_vulns.csv"), encoding="utf-8",
)
parser = TwistlockParser()
findings = parser.get_findings(testfile, Test())
@@ -69,7 +69,7 @@ def test_parse_file_prisma_twistlock_images_long_package_name(self):
testfile = open(
path.join(
path.dirname(__file__), "../scans/twistlock/scan_report_prisma_twistlock_images_long_package_name.csv",
- ),
+ ), encoding="utf-8",
)
parser = TwistlockParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_veracode_parser.py b/unittests/tools/test_veracode_parser.py
index e6cb7d83f94..1f2ab6626a8 100644
--- a/unittests/tools/test_veracode_parser.py
+++ b/unittests/tools/test_veracode_parser.py
@@ -24,7 +24,7 @@ def test_parse_file_with_one_finding(self):
self.parse_file_with_one_finding()
def parse_file_with_one_finding(self):
- with open("unittests/scans/veracode/one_finding.xml") as testfile:
+ with open("unittests/scans/veracode/one_finding.xml", encoding="utf-8") as testfile:
parser = VeracodeParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -37,7 +37,7 @@ def test_parse_file_many_findings_different_hash_code_different_unique_id(self):
self.parse_file_many_findings_different_hash_code_different_unique_id()
def parse_file_many_findings_different_hash_code_different_unique_id(self):
- with open("unittests/scans/veracode/many_findings_different_hash_code_different_unique_id.xml") as testfile:
+ with open("unittests/scans/veracode/many_findings_different_hash_code_different_unique_id.xml", encoding="utf-8") as testfile:
parser = VeracodeParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(4, len(findings))
@@ -68,7 +68,7 @@ def test_parse_file_with_multiple_finding(self):
self.parse_file_with_multiple_finding()
def parse_file_with_multiple_finding(self):
- with open("unittests/scans/veracode/many_findings.xml") as testfile:
+ with open("unittests/scans/veracode/many_findings.xml", encoding="utf-8") as testfile:
parser = VeracodeParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(4, len(findings))
@@ -113,7 +113,7 @@ def test_parse_file_with_multiple_finding2(self):
self.assertEqual(datetime.datetime.today().date(), finding.date)
def parse_file_with_multiple_finding2(self):
- with open("unittests/scans/veracode/veracode_scan.xml") as testfile:
+ with open("unittests/scans/veracode/veracode_scan.xml", encoding="utf-8") as testfile:
parser = VeracodeParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(7, len(findings))
@@ -147,7 +147,7 @@ def test_parse_file_with_mitigated_finding(self):
self.parse_file_with_mitigated_finding()
def parse_file_with_mitigated_finding(self):
- with open("unittests/scans/veracode/mitigated_finding.xml") as testfile:
+ with open("unittests/scans/veracode/mitigated_finding.xml", encoding="utf-8") as testfile:
parser = VeracodeParser()
findings = parser.get_findings(testfile, self.test)
self.assertEqual(1, len(findings))
@@ -166,7 +166,7 @@ def test_parse_file_with_mitigated_fixed_finding(self):
self.parse_file_with_mitigated_fixed_finding()
def parse_file_with_mitigated_fixed_finding(self):
- with open("unittests/scans/veracode/mitigated_fixed_finding.xml") as testfile:
+ with open("unittests/scans/veracode/mitigated_fixed_finding.xml", encoding="utf-8") as testfile:
parser = VeracodeParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -183,7 +183,7 @@ def test_parse_file_with_mitigated_sca_finding(self):
self.parse_file_with_mitigated_sca_finding()
def parse_file_with_mitigated_sca_finding(self):
- with open("unittests/scans/veracode/veracode_scan_sca_mitigated.xml") as testfile:
+ with open("unittests/scans/veracode/veracode_scan_sca_mitigated.xml", encoding="utf-8") as testfile:
parser = VeracodeParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -202,7 +202,7 @@ def test_parse_file_with_dynamic_finding(self):
self.assertEqual(datetime.datetime.today().date(), finding.date)
def parse_file_with_dynamic_finding(self):
- with open("unittests/scans/veracode/dynamic_finding.xml") as testfile:
+ with open("unittests/scans/veracode/dynamic_finding.xml", encoding="utf-8") as testfile:
parser = VeracodeParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -230,7 +230,7 @@ def test_parse_file_with_changed_severity(self):
self.parse_file_with_changed_severity()
def parse_file_with_changed_severity(self):
- with open("unittests/scans/veracode/veracode_scan_changed_severity.xml") as testfile:
+ with open("unittests/scans/veracode/veracode_scan_changed_severity.xml", encoding="utf-8") as testfile:
parser = VeracodeParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(7, len(findings))
@@ -252,7 +252,7 @@ def test_maven_component_name(self):
self.maven_component_name()
def maven_component_name(self):
- with open("unittests/scans/veracode/veracode_maven.xml") as testfile:
+ with open("unittests/scans/veracode/veracode_maven.xml", encoding="utf-8") as testfile:
parser = VeracodeParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -266,7 +266,7 @@ def maven_component_name(self):
self.assertEqual(9.8, finding.cvssv3_score)
def json_static_findings_test(self, file_name):
- with open(file_name) as testfile:
+ with open(file_name, encoding="utf-8") as testfile:
parser = VeracodeParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(3, len(findings))
@@ -342,7 +342,7 @@ def json_static_embedded_format(self):
self.json_static_findings_test("unittests/scans/veracode/static_embedded_format.json")
def json_dynamic_findings_test(self, file_name):
- with open(file_name) as testfile:
+ with open(file_name, encoding="utf-8") as testfile:
parser = VeracodeParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(3, len(findings))
@@ -414,7 +414,7 @@ def json_dynamic_embedded_format(self):
self.json_dynamic_findings_test("unittests/scans/veracode/dynamic_embedded_format.json")
def json_sca_findings_test(self, file_name):
- with open(file_name) as testfile:
+ with open(file_name, encoding="utf-8") as testfile:
parser = VeracodeParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(7, len(findings))
diff --git a/unittests/tools/test_veracode_sca_parser.py b/unittests/tools/test_veracode_sca_parser.py
index 03c70e50a10..0951f5024d6 100644
--- a/unittests/tools/test_veracode_sca_parser.py
+++ b/unittests/tools/test_veracode_sca_parser.py
@@ -18,7 +18,7 @@ def test_parse_csv(self):
self.parse_csv()
def parse_csv(self):
- with open("unittests/scans/veracode_sca/veracode_sca.csv") as testfile:
+ with open("unittests/scans/veracode_sca/veracode_sca.csv", encoding="utf-8") as testfile:
parser = VeracodeScaParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(3, len(findings))
@@ -67,7 +67,7 @@ def test_parse_json(self):
self.parse_json()
def parse_json(self):
- with open("unittests/scans/veracode_sca/veracode_sca.json") as testfile:
+ with open("unittests/scans/veracode_sca/veracode_sca.json", encoding="utf-8") as testfile:
parser = VeracodeScaParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -93,7 +93,7 @@ def test_parse_json_fixed(self):
self.parse_json_fixed()
def parse_json_fixed(self):
- with open("unittests/scans/veracode_sca/veracode_sca_fixed.json") as testfile:
+ with open("unittests/scans/veracode_sca/veracode_sca_fixed.json", encoding="utf-8") as testfile:
parser = VeracodeScaParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
diff --git a/unittests/tools/test_wapiti_parser.py b/unittests/tools/test_wapiti_parser.py
index fa7dd8592a8..22278904420 100644
--- a/unittests/tools/test_wapiti_parser.py
+++ b/unittests/tools/test_wapiti_parser.py
@@ -7,7 +7,7 @@ class TestWapitiParser(DojoTestCase):
def test_parse_file_3_0_4(self):
"""Generated with version 3.0.4 on OWASP Juicy Shop"""
- with open("unittests/scans/wapiti/juicyshop.xml") as testfile:
+ with open("unittests/scans/wapiti/juicyshop.xml", encoding="utf-8") as testfile:
parser = WapitiParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -28,7 +28,7 @@ def test_parse_file_3_0_4(self):
def test_parse_file_demo(self):
""""""
- with open("unittests/scans/wapiti/demo.xml") as testfile:
+ with open("unittests/scans/wapiti/demo.xml", encoding="utf-8") as testfile:
parser = WapitiParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -41,7 +41,7 @@ def test_parse_file_demo(self):
def test_parse_file_example(self):
""""""
- with open("unittests/scans/wapiti/example.xml") as testfile:
+ with open("unittests/scans/wapiti/example.xml", encoding="utf-8") as testfile:
parser = WapitiParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -54,7 +54,7 @@ def test_parse_file_example(self):
def test_parse_cwe(self):
"""File to test CWE"""
- with open("unittests/scans/wapiti/cwe.xml") as testfile:
+ with open("unittests/scans/wapiti/cwe.xml", encoding="utf-8") as testfile:
parser = WapitiParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
diff --git a/unittests/tools/test_wazuh_parser.py b/unittests/tools/test_wazuh_parser.py
index ec48ac63359..3c8a33b003c 100644
--- a/unittests/tools/test_wazuh_parser.py
+++ b/unittests/tools/test_wazuh_parser.py
@@ -6,13 +6,13 @@
class TestWazuhParser(DojoTestCase):
def test_parse_no_findings(self):
- with open("unittests/scans/wazuh/no_findings.json") as testfile:
+ with open("unittests/scans/wazuh/no_findings.json", encoding="utf-8") as testfile:
parser = WazuhParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_one_finding(self):
- with open("unittests/scans/wazuh/one_finding.json") as testfile:
+ with open("unittests/scans/wazuh/one_finding.json", encoding="utf-8") as testfile:
parser = WazuhParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -26,7 +26,7 @@ def test_parse_one_finding(self):
self.assertEqual(5.5, finding.cvssv3_score)
def test_parse_many_finding(self):
- with open("unittests/scans/wazuh/many_findings.json") as testfile:
+ with open("unittests/scans/wazuh/many_findings.json", encoding="utf-8") as testfile:
parser = WazuhParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -36,7 +36,7 @@ def test_parse_many_finding(self):
self.assertEqual("2023-02-08", finding.date)
def test_parse_one_finding_with_endpoint(self):
- with open("unittests/scans/wazuh/one_finding_with_endpoint.json") as testfile:
+ with open("unittests/scans/wazuh/one_finding_with_endpoint.json", encoding="utf-8") as testfile:
parser = WazuhParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
diff --git a/unittests/tools/test_wfuzz_parser.py b/unittests/tools/test_wfuzz_parser.py
index ce140a9c116..0434f419963 100644
--- a/unittests/tools/test_wfuzz_parser.py
+++ b/unittests/tools/test_wfuzz_parser.py
@@ -6,13 +6,13 @@
class TestWFuzzParser(DojoTestCase):
def test_parse_no_findings(self):
- with open("unittests/scans/wfuzz/no_findings.json") as testfile:
+ with open("unittests/scans/wfuzz/no_findings.json", encoding="utf-8") as testfile:
parser = WFuzzParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_one_finding(self):
- with open("unittests/scans/wfuzz/one_finding.json") as testfile:
+ with open("unittests/scans/wfuzz/one_finding.json", encoding="utf-8") as testfile:
parser = WFuzzParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -21,7 +21,7 @@ def test_parse_one_finding(self):
self.assertEqual(1, len(findings))
def test_parse_many_finding(self):
- with open("unittests/scans/wfuzz/many_findings.json") as testfile:
+ with open("unittests/scans/wfuzz/many_findings.json", encoding="utf-8") as testfile:
parser = WFuzzParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -30,7 +30,7 @@ def test_parse_many_finding(self):
self.assertEqual(4, len(findings))
def test_one_dup_finding(self):
- with open("unittests/scans/wfuzz/one_dup_finding.json") as testfile:
+ with open("unittests/scans/wfuzz/one_dup_finding.json", encoding="utf-8") as testfile:
parser = WFuzzParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -39,7 +39,7 @@ def test_one_dup_finding(self):
self.assertEqual(4, len(findings))
def test_issue_7863(self):
- with open("unittests/scans/wfuzz/issue_7863.json") as testfile:
+ with open("unittests/scans/wfuzz/issue_7863.json", encoding="utf-8") as testfile:
parser = WFuzzParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -49,7 +49,7 @@ def test_issue_7863(self):
self.assertEqual("Medium", findings[0].severity)
def test_one_finding_responsecode_missing(self):
- with open("unittests/scans/wfuzz/one_finding_responsecode_missing.json") as testfile:
+ with open("unittests/scans/wfuzz/one_finding_responsecode_missing.json", encoding="utf-8") as testfile:
parser = WFuzzParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
diff --git a/unittests/tools/test_whispers_parser.py b/unittests/tools/test_whispers_parser.py
index d158bf311ce..e1556605268 100644
--- a/unittests/tools/test_whispers_parser.py
+++ b/unittests/tools/test_whispers_parser.py
@@ -14,21 +14,21 @@ def test_whispers_parser_severity_map(self):
expected_severity = "High"
for fixture in fixtures:
- testfile = open(fixture)
+ testfile = open(fixture, encoding="utf-8")
parser = WhispersParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(expected_severity, findings[0].severity)
def test_whispers_parser_with_no_vuln_has_no_findings(self):
- testfile = open("unittests/scans/whispers/whispers_zero_vul.json")
+ testfile = open("unittests/scans/whispers/whispers_zero_vul.json", encoding="utf-8")
parser = WhispersParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(0, len(findings))
def test_whispers_parser_with_one_critical_vuln_has_one_findings(self):
- testfile = open("unittests/scans/whispers/whispers_one_vul.json")
+ testfile = open("unittests/scans/whispers/whispers_one_vul.json", encoding="utf-8")
parser = WhispersParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
@@ -41,7 +41,7 @@ def test_whispers_parser_with_one_critical_vuln_has_one_findings(self):
self.assertEqual("pip.conf Password", findings[0].vuln_id_from_tool)
def test_whispers_parser_with_many_vuln_has_many_findings(self):
- testfile = open("unittests/scans/whispers/whispers_many_vul.json")
+ testfile = open("unittests/scans/whispers/whispers_many_vul.json", encoding="utf-8")
parser = WhispersParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
diff --git a/unittests/tools/test_whitehat_sentinel_parser.py b/unittests/tools/test_whitehat_sentinel_parser.py
index 9e957333e08..7cfd1ba6bb4 100644
--- a/unittests/tools/test_whitehat_sentinel_parser.py
+++ b/unittests/tools/test_whitehat_sentinel_parser.py
@@ -7,24 +7,24 @@ class TestWhiteHatSentinelParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
with self.assertRaises(ValueError):
- with open("unittests/scans/whitehat_sentinel/empty_file.json") as testfile:
+ with open("unittests/scans/whitehat_sentinel/empty_file.json", encoding="utf-8") as testfile:
parser = WhiteHatSentinelParser()
parser.get_findings(testfile, Test())
def test_parse_file_with_one_vuln_has_one_findings(self):
- with open("unittests/scans/whitehat_sentinel/one_vuln.json") as testfile:
+ with open("unittests/scans/whitehat_sentinel/one_vuln.json", encoding="utf-8") as testfile:
parser = WhiteHatSentinelParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
- with open("unittests/scans/whitehat_sentinel/many_vuln.json") as testfile:
+ with open("unittests/scans/whitehat_sentinel/many_vuln.json", encoding="utf-8") as testfile:
parser = WhiteHatSentinelParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(3, len(findings))
def test_parse_file_with_invalid_data(self):
with self.assertRaises(ValueError):
- with open("unittests/scans/whitehat_sentinel/invalid_data.txt") as testfile:
+ with open("unittests/scans/whitehat_sentinel/invalid_data.txt", encoding="utf-8") as testfile:
parser = WhiteHatSentinelParser()
parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_wiz_parser.py b/unittests/tools/test_wiz_parser.py
index 38a350318c6..9d72c594126 100644
--- a/unittests/tools/test_wiz_parser.py
+++ b/unittests/tools/test_wiz_parser.py
@@ -5,7 +5,7 @@
class TestWizParser(DojoTestCase):
def test_no_findings(self):
- with open("unittests/scans/wiz/no_findings.csv") as testfile:
+ with open("unittests/scans/wiz/no_findings.csv", encoding="utf-8") as testfile:
parser = WizParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -14,7 +14,7 @@ def test_no_findings(self):
self.assertEqual(0, len(findings))
def test_one_findings(self):
- with open("unittests/scans/wiz/one_finding.csv") as testfile:
+ with open("unittests/scans/wiz/one_finding.csv", encoding="utf-8") as testfile:
parser = WizParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -26,7 +26,7 @@ def test_one_findings(self):
self.assertEqual("Informational", finding.severity)
def test_multiple_findings(self):
- with open("unittests/scans/wiz/multiple_findings.csv") as testfile:
+ with open("unittests/scans/wiz/multiple_findings.csv", encoding="utf-8") as testfile:
parser = WizParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
diff --git a/unittests/tools/test_wizcli_dir_parser.py b/unittests/tools/test_wizcli_dir_parser.py
index ffb9d0c2821..7075aa42f90 100644
--- a/unittests/tools/test_wizcli_dir_parser.py
+++ b/unittests/tools/test_wizcli_dir_parser.py
@@ -5,13 +5,13 @@
class TestWizcliDirParser(DojoTestCase):
def test_no_findings(self):
- with open("unittests/scans/wizcli_dir/wizcli_dir_zero_vul.json") as testfile:
+ with open("unittests/scans/wizcli_dir/wizcli_dir_zero_vul.json", encoding="utf-8") as testfile:
parser = WizcliDirParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(len(findings), 0)
def test_one_findings(self):
- with open("unittests/scans/wizcli_dir/wizcli_dir_one_vul.json") as testfile:
+ with open("unittests/scans/wizcli_dir/wizcli_dir_one_vul.json", encoding="utf-8") as testfile:
parser = WizcliDirParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -35,7 +35,7 @@ def test_one_findings(self):
)
def test_multiple_findings(self):
- with open("unittests/scans/wizcli_dir/wizcli_dir_many_vul.json") as testfile:
+ with open("unittests/scans/wizcli_dir/wizcli_dir_many_vul.json", encoding="utf-8") as testfile:
parser = WizcliDirParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(7, len(findings))
diff --git a/unittests/tools/test_wizcli_iac_parser.py b/unittests/tools/test_wizcli_iac_parser.py
index a509d77f331..4d9d9d61547 100644
--- a/unittests/tools/test_wizcli_iac_parser.py
+++ b/unittests/tools/test_wizcli_iac_parser.py
@@ -5,13 +5,13 @@
class TestWizcliIaCParser(DojoTestCase):
def test_no_findings(self):
- with open("unittests/scans/wizcli_iac/wizcli_iac_zero_vul.json") as testfile:
+ with open("unittests/scans/wizcli_iac/wizcli_iac_zero_vul.json", encoding="utf-8") as testfile:
parser = WizcliIaCParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(len(findings), 0)
def test_one_findings(self):
- with open("unittests/scans/wizcli_iac/wizcli_iac_one_vul.json") as testfile:
+ with open("unittests/scans/wizcli_iac/wizcli_iac_one_vul.json", encoding="utf-8") as testfile:
parser = WizcliIaCParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -33,7 +33,7 @@ def test_one_findings(self):
)
def test_multiple_findings(self):
- with open("unittests/scans/wizcli_iac/wizcli_iac_many_vul.json") as testfile:
+ with open("unittests/scans/wizcli_iac/wizcli_iac_many_vul.json", encoding="utf-8") as testfile:
parser = WizcliIaCParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(25, len(findings))
diff --git a/unittests/tools/test_wizcli_img_parser.py b/unittests/tools/test_wizcli_img_parser.py
index e030974cc56..36d2f7c9dbd 100644
--- a/unittests/tools/test_wizcli_img_parser.py
+++ b/unittests/tools/test_wizcli_img_parser.py
@@ -5,13 +5,13 @@
class TestWizcliImgParser(DojoTestCase):
def test_no_findings(self):
- with open("unittests/scans/wizcli_img/wizcli_img_zero_vul.json") as testfile:
+ with open("unittests/scans/wizcli_img/wizcli_img_zero_vul.json", encoding="utf-8") as testfile:
parser = WizcliImgParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(len(findings), 0)
def test_one_findings(self):
- with open("unittests/scans/wizcli_img/wizcli_img_one_vul.json") as testfile:
+ with open("unittests/scans/wizcli_img/wizcli_img_one_vul.json", encoding="utf-8") as testfile:
parser = WizcliImgParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
@@ -31,7 +31,7 @@ def test_one_findings(self):
)
def test_multiple_findings(self):
- with open("unittests/scans/wizcli_img/wizcli_img_many_vul.json") as testfile:
+ with open("unittests/scans/wizcli_img/wizcli_img_many_vul.json", encoding="utf-8") as testfile:
parser = WizcliImgParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(9, len(findings))
diff --git a/unittests/tools/test_wpscan_parser.py b/unittests/tools/test_wpscan_parser.py
index 1e70aa4e51d..bd71aae2946 100644
--- a/unittests/tools/test_wpscan_parser.py
+++ b/unittests/tools/test_wpscan_parser.py
@@ -9,13 +9,13 @@ class TestWpscanParser(DojoTestCase):
def test_parse_file_empty(self):
"""Report from the tool wich have no data"""
- with open("unittests/scans/wpscan/empty.json") as testfile:
+ with open("unittests/scans/wpscan/empty.json", encoding="utf-8") as testfile:
parser = WpscanParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_file_exemple(self):
- with open("unittests/scans/wpscan/sample.json") as testfile:
+ with open("unittests/scans/wpscan/sample.json", encoding="utf-8") as testfile:
parser = WpscanParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -29,7 +29,7 @@ def test_parse_file_exemple(self):
self.assertEqual(datetime.datetime(2021, 3, 26, 11, 50, 50), finding.date)
def test_parse_file_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/wpscan/wordpress_no_vuln.json") as testfile:
+ with open("unittests/scans/wpscan/wordpress_no_vuln.json", encoding="utf-8") as testfile:
parser = WpscanParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -38,7 +38,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self):
self.assertEqual(7, len(findings))
def test_parse_file_with_one_vuln_has_one_findings(self):
- with open("unittests/scans/wpscan/wordpress_one_vuln.json") as testfile:
+ with open("unittests/scans/wpscan/wordpress_one_vuln.json", encoding="utf-8") as testfile:
parser = WpscanParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -52,7 +52,7 @@ def test_parse_file_with_one_vuln_has_one_findings(self):
self.assertEqual(datetime.datetime(2019, 7, 2, 19, 11, 16), finding.date)
def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
- with open("unittests/scans/wpscan/wordpress_many_vuln.json") as testfile:
+ with open("unittests/scans/wpscan/wordpress_many_vuln.json", encoding="utf-8") as testfile:
parser = WpscanParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -66,7 +66,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
self.assertEqual(datetime.datetime(2019, 7, 2, 19, 11, 16), finding.date)
def test_parse_file_with_multiple_vuln(self):
- with open("unittests/scans/wpscan/wpscan.json") as testfile:
+ with open("unittests/scans/wpscan/wpscan.json", encoding="utf-8") as testfile:
parser = WpscanParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -93,7 +93,7 @@ def test_parse_file_with_multiple_vuln(self):
self.assertEqual("", finding.get_scanner_confidence_text()) # data are => "confidence": 100,
def test_parse_file_with_multiple_vuln_in_version(self):
- with open("unittests/scans/wpscan/wordpress_vuln_version.json") as testfile:
+ with open("unittests/scans/wpscan/wordpress_vuln_version.json", encoding="utf-8") as testfile:
parser = WpscanParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -110,7 +110,7 @@ def test_parse_file_with_multiple_vuln_in_version(self):
self.assertEqual("", finding.get_scanner_confidence_text()) # data are => 100%
def test_parse_file_issue5774(self):
- with open("unittests/scans/wpscan/issue5774.json") as testfile:
+ with open("unittests/scans/wpscan/issue5774.json", encoding="utf-8") as testfile:
parser = WpscanParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
diff --git a/unittests/tools/test_xanitizer_parser.py b/unittests/tools/test_xanitizer_parser.py
index 0b27b985a5f..22b15010c98 100644
--- a/unittests/tools/test_xanitizer_parser.py
+++ b/unittests/tools/test_xanitizer_parser.py
@@ -6,19 +6,19 @@
class TestXanitizerParser(DojoTestCase):
def test_parse_file_with_no_findings(self):
- with open("unittests/scans/xanitizer/no-findings.xml") as testfile:
+ with open("unittests/scans/xanitizer/no-findings.xml", encoding="utf-8") as testfile:
parser = XanitizerParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(0, len(findings))
def test_parse_file_with_one_findings(self):
- with open("unittests/scans/xanitizer/one-findings.xml") as testfile:
+ with open("unittests/scans/xanitizer/one-findings.xml", encoding="utf-8") as testfile:
parser = XanitizerParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(1, len(findings))
def test_parse_file_with_multiple_findings(self):
- with open("unittests/scans/xanitizer/multiple-findings.xml") as testfile:
+ with open("unittests/scans/xanitizer/multiple-findings.xml", encoding="utf-8") as testfile:
parser = XanitizerParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(9, len(findings))
@@ -27,7 +27,7 @@ def test_parse_file_with_multiple_findings(self):
self.assertEqual("CVE-2015-5211", finding.unsaved_vulnerability_ids[0])
def test_parse_file_with_multiple_findings_no_details(self):
- with open(get_unit_tests_path() + "/scans/xanitizer/multiple-findings-no-details.xml") as testfile:
+ with open(get_unit_tests_path() + "/scans/xanitizer/multiple-findings-no-details.xml", encoding="utf-8") as testfile:
parser = XanitizerParser()
findings = parser.get_findings(testfile, Test())
self.assertEqual(9, len(findings))
diff --git a/unittests/tools/test_yarn_audit_parser.py b/unittests/tools/test_yarn_audit_parser.py
index 65206725caf..6c95592960d 100644
--- a/unittests/tools/test_yarn_audit_parser.py
+++ b/unittests/tools/test_yarn_audit_parser.py
@@ -16,13 +16,13 @@ def test_yarn_audit_parser_without_file_has_no_findings(self):
self.assertEqual(0, len(findings))
def test_yarn_audit_parser_with_no_vuln_has_no_findings(self):
- with open("unittests/scans/yarn_audit/yarn_audit_zero_vul.json") as testfile:
+ with open("unittests/scans/yarn_audit/yarn_audit_zero_vul.json", encoding="utf-8") as testfile:
parser = YarnAuditParser()
findings = parser.get_findings(testfile, self.get_test())
self.assertEqual(0, len(findings))
def test_yarn_audit_parser_with_one_criticle_vuln_has_one_findings(self):
- with open("unittests/scans/yarn_audit/yarn_audit_one_vul.json") as testfile:
+ with open("unittests/scans/yarn_audit/yarn_audit_one_vul.json", encoding="utf-8") as testfile:
parser = YarnAuditParser()
findings = parser.get_findings(testfile, self.get_test())
self.assertEqual(1, len(findings))
@@ -30,14 +30,14 @@ def test_yarn_audit_parser_with_one_criticle_vuln_has_one_findings(self):
self.assertEqual("4.5.2", findings[0].component_version)
def test_yarn_audit_parser_with_many_vuln_has_many_findings(self):
- with open("unittests/scans/yarn_audit/yarn_audit_many_vul.json") as testfile:
+ with open("unittests/scans/yarn_audit/yarn_audit_many_vul.json", encoding="utf-8") as testfile:
parser = YarnAuditParser()
findings = parser.get_findings(testfile, self.get_test())
self.assertEqual(3, len(findings))
def test_yarn_audit_parser_with_multiple_cwes_per_finding(self):
# cwes formatted as escaped list: "cwe": "[\"CWE-346\",\"CWE-453\"]",
- with open("unittests/scans/yarn_audit/yarn_audit_multiple_cwes.json") as testfile:
+ with open("unittests/scans/yarn_audit/yarn_audit_multiple_cwes.json", encoding="utf-8") as testfile:
parser = YarnAuditParser()
findings = parser.get_findings(testfile, self.get_test())
self.assertEqual(3, len(findings))
@@ -53,7 +53,7 @@ def test_yarn_audit_parser_with_multiple_cwes_per_finding(self):
def test_yarn_audit_parser_with_multiple_cwes_per_finding_list(self):
# cwes formatted as proper list: "cwe": ["CWE-918","CWE-1333"],
- with open("unittests/scans/yarn_audit/yarn_audit_multiple_cwes2.json") as testfile:
+ with open("unittests/scans/yarn_audit/yarn_audit_multiple_cwes2.json", encoding="utf-8") as testfile:
parser = YarnAuditParser()
findings = parser.get_findings(testfile, self.get_test())
self.assertEqual(2, len(findings))
@@ -64,16 +64,16 @@ def test_yarn_audit_parser_with_multiple_cwes_per_finding_list(self):
def test_yarn_audit_parser_empty_with_error(self):
with self.assertRaises(ValueError) as context:
- with open("unittests/scans/yarn_audit/empty_with_error.json") as testfile:
+ with open("unittests/scans/yarn_audit/empty_with_error.json", encoding="utf-8") as testfile:
parser = YarnAuditParser()
parser.get_findings(testfile, self.get_test())
- self.assertTrue(
- "yarn audit report contains errors:" in str(context.exception),
+ self.assertIn(
+ "yarn audit report contains errors:", str(context.exception),
)
- self.assertTrue("ECONNREFUSED" in str(context.exception))
+ self.assertIn("ECONNREFUSED", str(context.exception))
def test_yarn_audit_parser_issue_6495(self):
- with open("unittests/scans/yarn_audit/issue_6495.json") as testfile:
+ with open("unittests/scans/yarn_audit/issue_6495.json", encoding="utf-8") as testfile:
parser = YarnAuditParser()
findings = parser.get_findings(testfile, self.get_test())
testfile.close()
@@ -83,7 +83,7 @@ def test_yarn_audit_parser_issue_6495(self):
self.assertEqual(findings[1].cve, None)
def test_yarn_audit_parser_yarn2_audit_issue9911(self):
- with open("unittests/scans/yarn_audit/yarn2_audit_issue9911.json") as testfile:
+ with open("unittests/scans/yarn_audit/yarn2_audit_issue9911.json", encoding="utf-8") as testfile:
parser = YarnAuditParser()
findings = parser.get_findings(testfile, self.get_test())
testfile.close()
diff --git a/unittests/tools/test_zap_parser.py b/unittests/tools/test_zap_parser.py
index 325e920598b..fd209962ac9 100644
--- a/unittests/tools/test_zap_parser.py
+++ b/unittests/tools/test_zap_parser.py
@@ -5,14 +5,14 @@
class TestZapParser(DojoTestCase):
def test_parse_no_findings(self):
- with open("unittests/scans/zap/empty_2.9.0.xml") as testfile:
+ with open("unittests/scans/zap/empty_2.9.0.xml", encoding="utf-8") as testfile:
parser = ZapParser()
findings = parser.get_findings(testfile, Test())
self.assertIsInstance(findings, list)
self.assertEqual(0, len(findings))
def test_parse_some_findings(self):
- with open("unittests/scans/zap/some_2.9.0.xml") as testfile:
+ with open("unittests/scans/zap/some_2.9.0.xml", encoding="utf-8") as testfile:
parser = ZapParser()
findings = parser.get_findings(testfile, Test())
self.assertIsInstance(findings, list)
@@ -23,7 +23,7 @@ def test_parse_some_findings(self):
endpoint.clean()
def test_parse_some_findings_0(self):
- with open("unittests/scans/zap/0_zap_sample.xml") as testfile:
+ with open("unittests/scans/zap/0_zap_sample.xml", encoding="utf-8") as testfile:
parser = ZapParser()
findings = parser.get_findings(testfile, Test())
self.assertIsInstance(findings, list)
@@ -34,7 +34,7 @@ def test_parse_some_findings_0(self):
endpoint.clean()
def test_parse_some_findings_1(self):
- with open("unittests/scans/zap/1_zap_sample_0_and_new_absent.xml") as testfile:
+ with open("unittests/scans/zap/1_zap_sample_0_and_new_absent.xml", encoding="utf-8") as testfile:
parser = ZapParser()
findings = parser.get_findings(testfile, Test())
self.assertIsInstance(findings, list)
@@ -45,7 +45,7 @@ def test_parse_some_findings_1(self):
endpoint.clean()
def test_parse_some_findings_2(self):
- with open("unittests/scans/zap/2_zap_sample_0_and_new_endpoint.xml") as testfile:
+ with open("unittests/scans/zap/2_zap_sample_0_and_new_endpoint.xml", encoding="utf-8") as testfile:
parser = ZapParser()
findings = parser.get_findings(testfile, Test())
self.assertIsInstance(findings, list)
@@ -56,7 +56,7 @@ def test_parse_some_findings_2(self):
endpoint.clean()
def test_parse_some_findings_3(self):
- with open("unittests/scans/zap/3_zap_sampl_0_and_different_severities.xml") as testfile:
+ with open("unittests/scans/zap/3_zap_sampl_0_and_different_severities.xml", encoding="utf-8") as testfile:
parser = ZapParser()
findings = parser.get_findings(testfile, Test())
self.assertIsInstance(findings, list)
@@ -67,7 +67,7 @@ def test_parse_some_findings_3(self):
endpoint.clean()
def test_parse_some_findings_5(self):
- with open("unittests/scans/zap/5_zap_sample_one.xml") as testfile:
+ with open("unittests/scans/zap/5_zap_sample_one.xml", encoding="utf-8") as testfile:
parser = ZapParser()
findings = parser.get_findings(testfile, Test())
self.assertIsInstance(findings, list)
@@ -81,7 +81,7 @@ def test_parse_issue4360(self):
"""Report from GitHub issue 4360
see: https://github.com/DefectDojo/django-DefectDojo/issues/4360
"""
- with open("unittests/scans/zap/dvwa_baseline_dojo.xml") as testfile:
+ with open("unittests/scans/zap/dvwa_baseline_dojo.xml", encoding="utf-8") as testfile:
parser = ZapParser()
findings = parser.get_findings(testfile, Test())
self.assertIsInstance(findings, list)
@@ -115,7 +115,7 @@ def test_parse_issue4697(self):
"""Report from GitHub issue 4697
see: https://github.com/DefectDojo/django-DefectDojo/issues/4697
"""
- with open("unittests/scans/zap/zap-results-first-scan.xml") as testfile:
+ with open("unittests/scans/zap/zap-results-first-scan.xml", encoding="utf-8") as testfile:
parser = ZapParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -156,7 +156,7 @@ def test_parse_issue4697(self):
def test_parse_juicy(self):
"""Generated with OWASP Juicy shop"""
- with open("unittests/scans/zap/juicy2.xml") as testfile:
+ with open("unittests/scans/zap/juicy2.xml", encoding="utf-8") as testfile:
parser = ZapParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
@@ -196,7 +196,7 @@ def test_parse_juicy(self):
self.assertEqual("assets", endpoint.path)
def test_parse_xml_plus_format(self):
- with open("unittests/scans/zap/zap-xml-plus-format.xml") as testfile:
+ with open("unittests/scans/zap/zap-xml-plus-format.xml", encoding="utf-8") as testfile:
parser = ZapParser()
findings = parser.get_findings(testfile, Test())
for finding in findings: