diff --git a/BurpSuite-SecretFinder/SecretFinder.py b/BurpSuite-SecretFinder/SecretFinder.py index b3d6550..2cc994d 100644 --- a/BurpSuite-SecretFinder/SecretFinder.py +++ b/BurpSuite-SecretFinder/SecretFinder.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -*- coding:utf-8 -*- -# SecretFinder: Burp Suite Extension to find and search apikeys/tokens from a webpage +# SecretFinder: Burp Suite Extension to find and search apikeys/tokens from a webpage # by m4ll0k # https://github.com/m4ll0k @@ -50,7 +50,7 @@ def consolidateDuplicateIssues(self, existingIssue, newIssue): 'firebase' : 'AAAA[A-Za-z0-9_-]{7}:[A-Za-z0-9_-]{140}', 'google_captcha' : '6L[0-9A-Za-z-_]{38}|^6[0-9a-zA-Z_-]{39}$', 'google_oauth' : 'ya29\.[0-9A-Za-z\-_]+', - 'amazon_aws_access_key_id' : 'AKIA[0-9A-Z]{16}', + 'amazon_aws_access_key_id' : 'A[SK]IA[0-9A-Z]{16}', 'amazon_mws_auth_toke' : 'amzn\\.mws\\.[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}', 'amazon_aws_url' : 's3\.amazonaws.com[/]+|[a-zA-Z0-9_-]*\.s3\.amazonaws.com', 'facebook_access_token' : 'EAACEdEose0cBA[0-9A-Za-z]+', @@ -88,9 +88,9 @@ def doActiveScan(self, baseRequestResponse,pa): for reg in self.regexs.items(): tmp_issues = self._CustomScans.findRegEx( - BurpExtender.regex.replace(r'%%regex%%',reg[1]), + BurpExtender.regex.replace(r'%%regex%%',reg[1]), BurpExtender.issuename%(' '.join([x.title() for x in reg[0].split('_')])), - BurpExtender.issuelevel, + BurpExtender.issuelevel, BurpExtender.issuedetail ) scan_issues = scan_issues + tmp_issues @@ -110,7 +110,7 @@ def doPassiveScan(self, baseRequestResponse): for reg in self.regexs.items(): tmp_issues = self._CustomScans.findRegEx( BurpExtender.regex.replace(r'%%regex%%',reg[1]), - BurpExtender.issuename%(' '.join([x.title() for x in reg[0].split('_')])), + BurpExtender.issuename%(' '.join([x.title() for x in reg[0].split('_')])), BurpExtender.issuelevel, BurpExtender.issuedetail ) diff --git a/README.md b/README.md index c12104f..252a220 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ ## about SecretFinder -SecretFinder is a python script based on [LinkFinder](https://github.com/GerbenJavado/LinkFinder), written to discover sensitive data like apikeys, accesstoken, authorizations, jwt,..etc in JavaScript files. It does so by using jsbeautifier for python in combination with a fairly large regular expression. The regular expressions consists of four small regular expressions. These are responsible for finding and search anything on js files. +SecretFinder is a python script based on [LinkFinder](https://github.com/GerbenJavado/LinkFinder), written to discover sensitive data like apikeys, accesstoken, authorizations, jwt,..etc in JavaScript files. It does so by using jsbeautifier for python in combination with a fairly large regular expression. The regular expressions consists of four small regular expressions. These are responsible for finding and search anything on js files. The output is given in HTML or plaintext. @@ -9,7 +9,7 @@ The output is given in HTML or plaintext. -## Help +## Help ``` usage: SecretFinder.py [-h] [-e] -i INPUT [-o OUTPUT] [-r REGEX] [-b] @@ -84,10 +84,10 @@ $ python3 SecretFinder.py ``python3 SecretFinder.py -i https://example.com/ -e -o cli -c 'mysessionid=111234' -H 'x-header:value1\nx-header2:value2' -p 127.0.0.1:8080 -r 'apikey=my.api.key[a-zA-Z]+'`` -- Input accept all this entries: - +- Input accept all this entries: + - Url: e.g. https://www.google.com/ [-e] is required - - Js url: e.g. https://www.google.com/1.js + - Js url: e.g. https://www.google.com/1.js - Folder: e.g. myjsfiles/* - Local file: e.g /js/myjs/file.js @@ -103,7 +103,7 @@ _regex = { 'google_api' : r'AIza[0-9A-Za-z-_]{35}', 'google_captcha' : r'6L[0-9A-Za-z-_]{38}|^6[0-9a-zA-Z_-]{39}$', 'google_oauth' : r'ya29\.[0-9A-Za-z\-_]+', - 'amazon_aws_access_key_id' : r'AKIA[0-9A-Z]{16}', + 'amazon_aws_access_key_id' : r'A[SK]IA[0-9A-Z]{16}', 'amazon_mws_auth_toke' : r'amzn\\.mws\\.[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}', 'amazon_aws_url' : r's3\.amazonaws.com[/]+|[a-zA-Z0-9_-]*\.s3\.amazonaws.com', 'facebook_access_token' : r'EAACEdEose0cBA[0-9A-Za-z]+', @@ -125,8 +125,8 @@ _regex = { 'ssh_dc_private_key' : r'-----BEGIN EC PRIVATE KEY-----', 'pgp_private_block' : r'-----BEGIN PGP PRIVATE KEY BLOCK-----', 'json_web_token' : r'ey[A-Za-z0-9-_=]+\.[A-Za-z0-9-_=]+\.?[A-Za-z0-9-_.+/=]*$', - - 'name_for_my_regex' : r'my_regex', + + 'name_for_my_regex' : r'my_regex', # for example 'example_api_key' : r'^example\w+{10,50}' } diff --git a/SecretFinder.py b/SecretFinder.py index a8057b7..c0ad533 100644 --- a/SecretFinder.py +++ b/SecretFinder.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python # SecretFinder - Tool for discover apikeys/accesstokens and sensitive data in js file # based to LinkFinder - github.com/GerbenJavado # By m4ll0k (@m4ll0k2) github.com/m4ll0k @@ -12,14 +12,14 @@ import re import glob -import argparse -import jsbeautifier +import argparse +import jsbeautifier import webbrowser -import subprocess +import subprocess import base64 -import requests -import string -import random +import requests +import string +import random from html import escape import urllib3 import xml.etree.ElementTree @@ -33,13 +33,13 @@ from lxml import html from urllib.parse import urlparse -# regex +# regex _regex = { 'google_api' : r'AIza[0-9A-Za-z-_]{35}', 'firebase' : r'AAAA[A-Za-z0-9_-]{7}:[A-Za-z0-9_-]{140}', 'google_captcha' : r'6L[0-9A-Za-z-_]{38}|^6[0-9a-zA-Z_-]{39}$', 'google_oauth' : r'ya29\.[0-9A-Za-z\-_]+', - 'amazon_aws_access_key_id' : r'AKIA[0-9A-Z]{16}', + 'amazon_aws_access_key_id' : r'A[SK]IA[0-9A-Z]{16}', 'amazon_mws_auth_toke' : r'amzn\\.mws\\.[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}', 'amazon_aws_url' : r's3\.amazonaws.com[/]+|[a-zA-Z0-9_-]*\.s3\.amazonaws.com', 'amazon_aws_url2' : r"(" \ @@ -135,7 +135,7 @@ $$content$$ - + Report an issue. @@ -162,7 +162,7 @@ def getContext(matches,content,name,rex='.+?'): 'name' : name, 'context' : context, 'multi_context' : True if len(context) > 1 else False - } + } items.append(item) return items @@ -219,11 +219,11 @@ def parser_file(content,mode=1,more_regex=None,no_dup=1): else: filtered_items.append(item) return filtered_items - + def parser_input(input): ''' Parser Input ''' - # method 1 - url + # method 1 - url schemes = ('http://','https://','ftp://','file://','ftps://') if input.startswith(schemes): return [input] @@ -254,8 +254,8 @@ def parser_input(input): for index, path in enumerate(paths): paths[index] = "file://%s" % path return (paths if len(paths)> 0 else parser_error('Input with wildcard does not match any files.')) - - # method 5 - local file + + # method 5 - local file path = "file://%s"% os.path.abspath(input) return [path if os.path.exists(input) else parser_error('file could not be found (maybe you forgot to add http/https).')] @@ -269,13 +269,13 @@ def html_save(output): text_file = open(args.output,"wb") text_file.write(_template.replace('$$content$$',output).encode('utf-8')) text_file.close() - + print('URL to access output: file://%s'%os.path.abspath(args.output)) file = 'file:///%s'%(os.path.abspath(args.output)) if sys.platform == 'linux' or sys.platform == 'linux2': subprocess.call(['xdg-open',file]) else: - webbrowser.open(file) + webbrowser.open(file) except Exception as err: print('Output can\'t be saved in %s due to exception: %s'%(args.output,err)) finally: @@ -287,9 +287,9 @@ def cli_output(matched): print(match.get('name')+'\t->\t'+match.get('matched').encode('ascii','ignore').decode('utf-8')) def urlParser(url): - ''' urlParser ''' + ''' urlParser ''' parse = urlparse(url) - urlParser.this_root = parse.scheme + '://' + parse.netloc + urlParser.this_root = parse.scheme + '://' + parse.netloc urlParser.this_path = parse.scheme + '://' + parse.netloc + '/' + parse.path def extractjsurl(content,base_url): @@ -298,7 +298,7 @@ def extractjsurl(content,base_url): all_src = [] urlParser(base_url) for src in soup.xpath('//script'): - src = src.xpath('@src')[0] if src.xpath('@src') != [] else [] + src = src.xpath('@src')[0] if src.xpath('@src') != [] else [] if src != []: if src.startswith(('http://','https://','ftp://','ftps://')): if src not in all_src: @@ -308,15 +308,15 @@ def extractjsurl(content,base_url): if src not in all_src: all_src.append(src) elif src.startswith('/'): - src = urlParser.this_root + src + src = urlParser.this_root + src if src not in all_src: all_src.append(src) else: - src = urlParser.this_path + src + src = urlParser.this_path + src if src not in all_src: all_src.append(src) if args.ignore and all_src != []: - temp = all_src + temp = all_src ignore = [] for i in args.ignore.split(';'): for src in all_src: @@ -325,20 +325,20 @@ def extractjsurl(content,base_url): if ignore: for i in ignore: temp.pop(int(temp.index(i))) - return temp + return temp if args.only: - temp = all_src + temp = all_src only = [] for i in args.only.split(';'): for src in all_src: if i in src: only.append(src) - return only + return only return all_src def send_request(url): - ''' Send Request ''' - # read local file + ''' Send Request ''' + # read local file # https://github.com/dashea/requests-file if 'file://' in url: s = requests.Session() @@ -356,25 +356,25 @@ def send_request(url): for i in args.header.split('\\n'): # replace space and split name,value = i.replace(' ','').split(':') - headers[name] = value + headers[name] = value # add cookies if args.cookie: headers['Cookie'] = args.cookie headers.update(default_headers) - # proxy + # proxy proxies = {} if args.proxy: proxies.update({ 'http' : args.proxy, 'https' : args.proxy, - # ftp + # ftp }) try: resp = requests.get( url = url, verify = False, - headers = headers, + headers = headers, proxies = proxies ) return resp.content.decode('utf-8','replace') @@ -399,8 +399,8 @@ def send_request(url): if args.input[-1:] == "/": # /aa/ -> /aa args.input = args.input[:-1] - - mode = 1 + + mode = 1 if args.output == "cli": mode = 0 # add args @@ -423,7 +423,7 @@ def send_request(url): # convert input to URLs or JS files urls = parser_input(args.input) # conver URLs to js file - output = '' + output = '' for url in urls: print('[ + ] URL: '+url) if not args.burp: @@ -431,7 +431,7 @@ def send_request(url): else: file = url.get('js') url = url.get('url') - + matched = parser_file(file,mode) if args.output == 'cli': cli_output(matched) @@ -459,6 +459,6 @@ def send_request(url): match.get('context')[0] if len(match.get('context')) > 0 else ''.join(match.get('context')), '%s'%(match.get('context') if len(match.get('context'))>1 else match.get('context')) ) - output += header + body + output += header + body if args.output != 'cli': html_save(output)