diff --git a/Dev/assets/stylesheets/extra-style.vcmriqom.min.css b/Dev/assets/stylesheets/extra-style.b78wdczn.min.css
similarity index 96%
rename from Dev/assets/stylesheets/extra-style.vcmriqom.min.css
rename to Dev/assets/stylesheets/extra-style.b78wdczn.min.css
index a01405c7b..9df78ff25 100644
--- a/Dev/assets/stylesheets/extra-style.vcmriqom.min.css
+++ b/Dev/assets/stylesheets/extra-style.b78wdczn.min.css
@@ -1,3 +1,3 @@
 :root{--bbot-orange: #ff8400}p img{max-width:60em !important}.demonic-jimmy{color:var(--bbot-orange)}.md-nav__link--active{font-weight:bold}.md-typeset__table td:first-child{font-weight:bold}a.md-source,.md-header__topic>span,a:hover{color:var(--bbot-orange)}article.md-content__inner h1{font-weight:500;color:var(--bbot-orange)}article.md-content__inner h1,article.md-content__inner h2{color:var(--bbot-orange)}article.md-content__inner h2,article.md-content__inner h3,article.md-content__inner h4,article.md-content__inner h5{font-weight:300}article.md-content__inner div.highlight{background-color:unset !important}table{font-family:monospace}table td{max-width:100em}[data-md-color-primary=black] p a.md-button--primary{background-color:black;border:none}[data-md-color-primary=black] p a.md-button--primary:hover{background-color:var(--bbot-orange)}[data-md-color-scheme="slate"] div.md-source__repository ul{color:white}[data-md-color-scheme="slate"] .md-nav__link{color:white}[data-md-color-scheme="slate"] .md-nav__link--active{font-weight:bold}[data-md-color-scheme="slate"] .md-typeset__table tr{background-color:#202027}[data-md-color-scheme="slate"] .md-nav__link.md-nav__link--active{color:var(--bbot-orange)}[data-md-color-scheme="slate"] .md-typeset__table thead tr{color:var(--bbot-orange);background-color:var(--md-primary-fg-color--dark)}
 
-/*# sourceMappingURL=extra-style.vcmriqom.min.css.map */
\ No newline at end of file
+/*# sourceMappingURL=extra-style.b78wdczn.min.css.map */
\ No newline at end of file
diff --git a/Dev/assets/stylesheets/extra-style.vcmriqom.min.css.map b/Dev/assets/stylesheets/extra-style.b78wdczn.min.css.map
similarity index 98%
rename from Dev/assets/stylesheets/extra-style.vcmriqom.min.css.map
rename to Dev/assets/stylesheets/extra-style.b78wdczn.min.css.map
index 7d2c43336..bdb57feec 100644
--- a/Dev/assets/stylesheets/extra-style.vcmriqom.min.css.map
+++ b/Dev/assets/stylesheets/extra-style.b78wdczn.min.css.map
@@ -1,6 +1,6 @@
 {
 	"version": 3,
-	"file": "extra-style.vcmriqom.min.css",
+	"file": "extra-style.b78wdczn.min.css",
 	"sources": [
 		"extra_sass/style.css.scss"
 	],
diff --git a/Dev/comparison/index.html b/Dev/comparison/index.html
index 2826deb3c..b3320e96b 100644
--- a/Dev/comparison/index.html
+++ b/Dev/comparison/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/contribution/index.html b/Dev/contribution/index.html
index 11c0f744c..6dfd49e21 100644
--- a/Dev/contribution/index.html
+++ b/Dev/contribution/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/dev/architecture/index.html b/Dev/dev/architecture/index.html
index c8e0b00a8..513e125d9 100644
--- a/Dev/dev/architecture/index.html
+++ b/Dev/dev/architecture/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("../..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/dev/basemodule/index.html b/Dev/dev/basemodule/index.html
index aa147eefc..0fb3a0908 100644
--- a/Dev/dev/basemodule/index.html
+++ b/Dev/dev/basemodule/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("../..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/dev/core/index.html b/Dev/dev/core/index.html
index cd381817b..678458b03 100644
--- a/Dev/dev/core/index.html
+++ b/Dev/dev/core/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("../..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/dev/dev_environment/index.html b/Dev/dev/dev_environment/index.html
index b23abe3cd..c57f771ad 100644
--- a/Dev/dev/dev_environment/index.html
+++ b/Dev/dev/dev_environment/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("../..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/dev/discord_bot/index.html b/Dev/dev/discord_bot/index.html
index 6af868f78..fa80c19fa 100644
--- a/Dev/dev/discord_bot/index.html
+++ b/Dev/dev/discord_bot/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("../..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/dev/engine/index.html b/Dev/dev/engine/index.html
index cf7d6fa21..5f133ff7a 100644
--- a/Dev/dev/engine/index.html
+++ b/Dev/dev/engine/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("../..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/dev/event/index.html b/Dev/dev/event/index.html
index b1acd6e97..9f9ff7d93 100644
--- a/Dev/dev/event/index.html
+++ b/Dev/dev/event/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("../..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/dev/helpers/command/index.html b/Dev/dev/helpers/command/index.html
index 025b58784..fbd63b4b9 100644
--- a/Dev/dev/helpers/command/index.html
+++ b/Dev/dev/helpers/command/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../../../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("../../..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../../../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../../../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/dev/helpers/dns/index.html b/Dev/dev/helpers/dns/index.html
index 07a05daa0..a8a5adeef 100644
--- a/Dev/dev/helpers/dns/index.html
+++ b/Dev/dev/helpers/dns/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../../../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("../../..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../../../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../../../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/dev/helpers/index.html b/Dev/dev/helpers/index.html
index ef700f81b..5e6453c5a 100644
--- a/Dev/dev/helpers/index.html
+++ b/Dev/dev/helpers/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("../..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/dev/helpers/interactsh/index.html b/Dev/dev/helpers/interactsh/index.html
index 08e597011..efb902ce7 100644
--- a/Dev/dev/helpers/interactsh/index.html
+++ b/Dev/dev/helpers/interactsh/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../../../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("../../..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../../../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../../../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/dev/helpers/misc/index.html b/Dev/dev/helpers/misc/index.html
index f940696f5..9eff489c1 100644
--- a/Dev/dev/helpers/misc/index.html
+++ b/Dev/dev/helpers/misc/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../../../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("../../..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../../../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../../../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/dev/helpers/web/index.html b/Dev/dev/helpers/web/index.html
index 84107a6d0..4aac9b4b2 100644
--- a/Dev/dev/helpers/web/index.html
+++ b/Dev/dev/helpers/web/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../../../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("../../..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../../../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../../../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/dev/helpers/wordcloud/index.html b/Dev/dev/helpers/wordcloud/index.html
index 5e88e7474..ce3a83a81 100644
--- a/Dev/dev/helpers/wordcloud/index.html
+++ b/Dev/dev/helpers/wordcloud/index.html
@@ -19,7 +19,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../../../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("../../..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../../../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../../../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/dev/index.html b/Dev/dev/index.html
index 44b8f24fc..e5788242b 100644
--- a/Dev/dev/index.html
+++ b/Dev/dev/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/dev/module_howto/index.html b/Dev/dev/module_howto/index.html
index 2231743a9..bf934c90d 100644
--- a/Dev/dev/module_howto/index.html
+++ b/Dev/dev/module_howto/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("../..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/dev/presets/index.html b/Dev/dev/presets/index.html
index 86dc30e55..f9f45b40d 100644
--- a/Dev/dev/presets/index.html
+++ b/Dev/dev/presets/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("../..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/dev/scanner/index.html b/Dev/dev/scanner/index.html
index 714d1ea1f..e0fc29538 100644
--- a/Dev/dev/scanner/index.html
+++ b/Dev/dev/scanner/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("../..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
@@ -2273,7 +2273,7 @@ <h1 class="doc doc-heading" id="bbot.scanner.Scanner">
         <span style="color: #e6edf3">self</span><span style="color: #ff7b72; font-weight: bold">.</span><span style="color: #e6edf3">_success</span> <span style="color: #ff7b72; font-weight: bold">=</span> <span style="color: #79c0ff">False</span>
 
         <span style="color: #ff7b72">if</span> <span style="color: #e6edf3">scan_id</span> <span style="color: #ff7b72; font-weight: bold">is</span> <span style="color: #ff7b72; font-weight: bold">not</span> <span style="color: #79c0ff">None</span><span style="color: #e6edf3">:</span>
-            <span style="color: #e6edf3">self</span><span style="color: #ff7b72; font-weight: bold">.</span><span style="color: #e6edf3">id</span> <span style="color: #ff7b72; font-weight: bold">=</span> <span style="color: #e6edf3">str(id)</span>
+            <span style="color: #e6edf3">self</span><span style="color: #ff7b72; font-weight: bold">.</span><span style="color: #e6edf3">id</span> <span style="color: #ff7b72; font-weight: bold">=</span> <span style="color: #e6edf3">str(scan_id)</span>
         <span style="color: #ff7b72">else</span><span style="color: #e6edf3">:</span>
             <span style="color: #e6edf3">self</span><span style="color: #ff7b72; font-weight: bold">.</span><span style="color: #e6edf3">id</span> <span style="color: #ff7b72; font-weight: bold">=</span> <span style="color: #79c0ff">f</span><span style="color: #a5d6ff">"SCAN:{</span><span style="color: #e6edf3">sha1(rand_string(</span><span style="color: #a5d6ff">20</span><span style="color: #e6edf3">))</span><span style="color: #ff7b72; font-weight: bold">.</span><span style="color: #e6edf3">hexdigest()</span><span style="color: #a5d6ff">}"</span>
 
@@ -3821,7 +3821,7 @@ <h2 class="doc doc-heading" id="bbot.scanner.Scanner.__init__">
     <span style="color: #e6edf3">self</span><span style="color: #ff7b72; font-weight: bold">.</span><span style="color: #e6edf3">_success</span> <span style="color: #ff7b72; font-weight: bold">=</span> <span style="color: #79c0ff">False</span>
 
     <span style="color: #ff7b72">if</span> <span style="color: #e6edf3">scan_id</span> <span style="color: #ff7b72; font-weight: bold">is</span> <span style="color: #ff7b72; font-weight: bold">not</span> <span style="color: #79c0ff">None</span><span style="color: #e6edf3">:</span>
-        <span style="color: #e6edf3">self</span><span style="color: #ff7b72; font-weight: bold">.</span><span style="color: #e6edf3">id</span> <span style="color: #ff7b72; font-weight: bold">=</span> <span style="color: #e6edf3">str(id)</span>
+        <span style="color: #e6edf3">self</span><span style="color: #ff7b72; font-weight: bold">.</span><span style="color: #e6edf3">id</span> <span style="color: #ff7b72; font-weight: bold">=</span> <span style="color: #e6edf3">str(scan_id)</span>
     <span style="color: #ff7b72">else</span><span style="color: #e6edf3">:</span>
         <span style="color: #e6edf3">self</span><span style="color: #ff7b72; font-weight: bold">.</span><span style="color: #e6edf3">id</span> <span style="color: #ff7b72; font-weight: bold">=</span> <span style="color: #79c0ff">f</span><span style="color: #a5d6ff">"SCAN:{</span><span style="color: #e6edf3">sha1(rand_string(</span><span style="color: #a5d6ff">20</span><span style="color: #e6edf3">))</span><span style="color: #ff7b72; font-weight: bold">.</span><span style="color: #e6edf3">hexdigest()</span><span style="color: #a5d6ff">}"</span>
 
diff --git a/Dev/dev/target/index.html b/Dev/dev/target/index.html
index a8a903a55..7ce460d25 100644
--- a/Dev/dev/target/index.html
+++ b/Dev/dev/target/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("../..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/dev/tests/index.html b/Dev/dev/tests/index.html
index 14ce7421d..f5d6811ef 100644
--- a/Dev/dev/tests/index.html
+++ b/Dev/dev/tests/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("../..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/how_it_works/index.html b/Dev/how_it_works/index.html
index 5a09d427b..721f67dd1 100644
--- a/Dev/how_it_works/index.html
+++ b/Dev/how_it_works/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/index.html b/Dev/index.html
index a93914e0d..4a15aa0dd 100644
--- a/Dev/index.html
+++ b/Dev/index.html
@@ -19,7 +19,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL(".",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/modules/custom_yara_rules/index.html b/Dev/modules/custom_yara_rules/index.html
index 1a6803d3a..4a7dec5aa 100644
--- a/Dev/modules/custom_yara_rules/index.html
+++ b/Dev/modules/custom_yara_rules/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("../..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/modules/internal_modules/index.html b/Dev/modules/internal_modules/index.html
index 808f4f338..0c4ce487f 100644
--- a/Dev/modules/internal_modules/index.html
+++ b/Dev/modules/internal_modules/index.html
@@ -18,7 +18,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("../..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/modules/list_of_modules/index.html b/Dev/modules/list_of_modules/index.html
index 829bb061f..74c8ecec2 100644
--- a/Dev/modules/list_of_modules/index.html
+++ b/Dev/modules/list_of_modules/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("../..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/modules/nuclei/index.html b/Dev/modules/nuclei/index.html
index df29d1fb1..c2c467155 100644
--- a/Dev/modules/nuclei/index.html
+++ b/Dev/modules/nuclei/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("../..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/release_history/index.html b/Dev/release_history/index.html
index ef97e8a14..8bac0f931 100644
--- a/Dev/release_history/index.html
+++ b/Dev/release_history/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/scanning/advanced/index.html b/Dev/scanning/advanced/index.html
index b430f8a62..b3ec10aef 100644
--- a/Dev/scanning/advanced/index.html
+++ b/Dev/scanning/advanced/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("../..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/scanning/configuration/index.html b/Dev/scanning/configuration/index.html
index 652a03c25..f827e5cda 100644
--- a/Dev/scanning/configuration/index.html
+++ b/Dev/scanning/configuration/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("../..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/scanning/events/index.html b/Dev/scanning/events/index.html
index 8a4324019..6968f83eb 100644
--- a/Dev/scanning/events/index.html
+++ b/Dev/scanning/events/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("../..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/scanning/index.html b/Dev/scanning/index.html
index 729cda33e..1bc85ab5f 100644
--- a/Dev/scanning/index.html
+++ b/Dev/scanning/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/scanning/output/index.html b/Dev/scanning/output/index.html
index 91d168d63..1464e21ea 100644
--- a/Dev/scanning/output/index.html
+++ b/Dev/scanning/output/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("../..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/scanning/presets/index.html b/Dev/scanning/presets/index.html
index d5546d084..1d6bd369d 100644
--- a/Dev/scanning/presets/index.html
+++ b/Dev/scanning/presets/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("../..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/scanning/presets_list/index.html b/Dev/scanning/presets_list/index.html
index a3d8ab697..7a207b2fb 100644
--- a/Dev/scanning/presets_list/index.html
+++ b/Dev/scanning/presets_list/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("../..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/scanning/tips_and_tricks/index.html b/Dev/scanning/tips_and_tricks/index.html
index ea511470e..0ebe52408 100644
--- a/Dev/scanning/tips_and_tricks/index.html
+++ b/Dev/scanning/tips_and_tricks/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("../..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
diff --git a/Dev/search/search_index.json b/Dev/search/search_index.json
index d3007d84e..cbba670f4 100644
--- a/Dev/search/search_index.json
+++ b/Dev/search/search_index.json
@@ -1 +1 @@
-{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Getting Started","text":"<p>A BBOT scan in real-time - visualization with VivaGraphJS</p>"},{"location":"#installation","title":"Installation","text":"<p>Supported Platforms</p> <p>Only Linux is supported at this time. Windows and macOS are not supported. If you use one of these platforms, consider using Docker.</p> <p>BBOT offers multiple methods of installation, including pipx and Docker. If you plan to dev on BBOT, see Installation (Poetry).</p>"},{"location":"#python-pip-pipx","title":"Python (pip / pipx)","text":"Note <p><code>pipx</code> installs BBOT inside its own virtual environment.</p> <pre><code># stable version\npipx install bbot\n\n# bleeding edge (dev branch)\npipx install --pip-args '\\--pre' bbot\n\n# execute bbot command\nbbot --help\n</code></pre>"},{"location":"#docker","title":"Docker","text":"<p>Docker images are provided, along with helper script <code>bbot-docker.sh</code> to persist your scan data.</p> <pre><code># bleeding edge (dev)\ndocker run -it blacklanternsecurity/bbot --help\n\n# stable\ndocker run -it blacklanternsecurity/bbot:stable --help\n\n# helper script\ngit clone https://github.com/blacklanternsecurity/bbot &amp;&amp; cd bbot\n./bbot-docker.sh --help\n</code></pre>"},{"location":"#example-commands","title":"Example Commands","text":"<p>Below are some examples of common scans.</p> <p>Subdomains:</p> <pre><code># Perform a full subdomain enumeration on evilcorp.com\nbbot -t evilcorp.com -p subdomain-enum\n</code></pre> <p>Subdomains (passive only):</p> <pre><code># Perform a passive-only subdomain enumeration on evilcorp.com\nbbot -t evilcorp.com -p subdomain-enum -rf passive\n</code></pre> <p>Subdomains + port scan + web screenshots:</p> <pre><code># Port-scan every subdomain, screenshot every webpage, output to current directory\nbbot -t evilcorp.com -p subdomain-enum -m portscan gowitness -n my_scan -o .\n</code></pre> <p>Subdomains + basic web scan:</p> <pre><code># A basic web scan includes wappalyzer, robots.txt, and other non-intrusive web modules\nbbot -t evilcorp.com -p subdomain-enum web-basic\n</code></pre> <p>Web spider:</p> <pre><code># Crawl www.evilcorp.com up to a max depth of 2, automatically extracting emails, secrets, etc.\nbbot -t www.evilcorp.com -p spider -c web.spider_distance=2 web.spider_depth=2\n</code></pre> <p>Everything everywhere all at once:</p> <pre><code># Subdomains, emails, cloud buckets, port scan, basic web, web screenshots, nuclei\nbbot -t evilcorp.com -p kitchen-sink\n</code></pre>"},{"location":"#api-keys","title":"API Keys","text":"<p>BBOT works just fine without API keys. However, there are certain modules that need them to function. If you have API keys and want to make use of these modules, you can place them either in your preset:</p> my_preset.yml<pre><code>description: My custom subdomain enum preset\n\ninclude:\n  - subdomain-enum\n  - cloud-enum\n\nconfig:\n  modules:\n    shodan_dns:\n      api_key: deadbeef\n    virustotal:\n      api_key: cafebabe\n</code></pre> <p>...in BBOT's global YAML config (<code>~/.config/bbot/bbot.yml</code>):</p> <p>Note: this will ensure the API keys are used in all scans, regardless of preset.</p> ~/.config/bbot/bbot.yml<pre><code>modules:\n  shodan_dns:\n    api_key: deadbeef\n  virustotal:\n    api_key: cafebabe\n</code></pre> <p>...or directly on the command-line:</p> <pre><code># specify API key with -c\nbbot -t evilcorp.com -f subdomain-enum -c modules.shodan_dns.api_key=deadbeef modules.virustotal.api_key=cafebabe\n</code></pre> <p>For more information, see Configuration. For a full list of modules, including which ones require API keys, see List of Modules.</p> <p>Next Up: Scanning --&gt;</p>"},{"location":"comparison/","title":"Comparison to Other Tools","text":"<p>BBOT does a lot more than just subdomain enumeration. However, subdomain enumeration is arguably the most important part of OSINT, and since there's so many subdomain enumeration tools out there, they're the easiest class of tool to compare it to.</p> <p>Thanks to BBOT's recursive nature (and its <code>dnsbrute_mutations</code> module with its NLP-powered subdomain mutations), it typically finds about 20-25% more than other tools such as <code>Amass</code> or <code>theHarvester</code>. This holds true especially for larger targets like <code>delta.com</code> (1000+ subdomains):</p>"},{"location":"comparison/#subdomains-found","title":"Subdomains Found","text":""},{"location":"comparison/#runtimes-lower-is-better","title":"Runtimes (Lower is Better)","text":"<p>For a detailed analysis of this data, please see Subdomain Enumeration Tool Face-Off</p>"},{"location":"comparison/#ebaycom-larger-domain","title":"Ebay.com (larger domain)","text":"<p>Note that in this benchmark, Spiderfoot crashed after ~20 minutes due to excessive memory usage. Amass never finished and had to be cancelled after 24h. All other tools finished successfully.</p>"},{"location":"contribution/","title":"Contribution","text":"<p>We welcome contributions! If you have an idea for a new module, or are a Python developer who wants to get involved, please fork us or come talk to us on Discord.</p> <p>To get started devving, see the following links:</p> <ul> <li>Setting up a Dev Environment</li> <li>How to Write a BBOT Module</li> <li>Discord Bot Example</li> </ul>"},{"location":"how_it_works/","title":"How it Works","text":""},{"location":"how_it_works/#bbots-recursive-philosophy","title":"BBOT's Recursive Philosophy","text":"<p>It's well-known that when you're doing recon, it's best to do it recursively. However, there are very few recursive tools, and the main reason for this is because making a recursive tool is hard. In particular, it's very difficult to build a large-scale recursive system that interacts with the internet, and to keep it stable. When we first set out to make BBOT, we didn't know this, and it was definitely a lesson we learned the hard way. BBOT's stability is thanks to its extensive Unit Tests.</p> <p>BBOT inherits its recursive philosophy from Spiderfoot, which means it is also event-driven. Each of BBOT's 100+ modules consume a certain type of Event, use it to discover something new, and produce new events, which get distributed to all the other modules. This happens again and again -- thousands of times during a scan -- spidering outwards in a recursive web of discovery.</p> <p>Below is an interactive graph showing the relationships between modules and the event types they produce and consume.</p>"},{"location":"how_it_works/#how-bbot-modules-work-together","title":"How BBOT Modules Work Together","text":"<p>Each BBOT module does one specific task, such as querying an API for subdomains, or running a tool like <code>nuclei</code>, and is carefully designed to work together with other modules inside BBOT's recursive system.</p> <p>For example, the <code>portscan</code> module consumes <code>DNS_NAME</code>, and produces <code>OPEN_TCP_PORT</code>. The <code>sslcert</code> module consumes <code>OPEN_TCP_PORT</code> and produces <code>DNS_NAME</code>. You can see how even these two modules, when enabled together, will feed each other recursively.</p> <p></p> <p>Because of this, enabling even one module has the potential to increase your results exponentially. This is exactly how BBOT is able to outperform other tools.</p> <p>To learn more about how events flow inside BBOT, see BBOT Internal Architecture.</p>"},{"location":"release_history/","title":"Release History","text":""},{"location":"release_history/#212-nov-1-2024","title":"2.1.2 - Nov 1, 2024","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/1909</li> </ul>"},{"location":"release_history/#211-oct-31-2024","title":"2.1.1 - Oct 31, 2024","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/1885</li> </ul>"},{"location":"release_history/#210-oct-18-2024","title":"2.1.0 - Oct 18, 2024","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/1724</li> </ul>"},{"location":"release_history/#201-aug-29-2024","title":"2.0.1 - Aug 29, 2024","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/1650</li> </ul>"},{"location":"release_history/#200-aug-9-2024","title":"2.0.0 - Aug 9, 2024","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/1424</li> </ul>"},{"location":"release_history/#118-may-29-2024","title":"1.1.8 - May 29, 2024","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/1382</li> </ul>"},{"location":"release_history/#117-may-15-2024","title":"1.1.7 - May 15, 2024","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/1119</li> </ul>"},{"location":"release_history/#116-feb-21-2024","title":"1.1.6 - Feb 21, 2024","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/1002</li> </ul>"},{"location":"release_history/#115-jan-15-2024","title":"1.1.5 - Jan 15, 2024","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/996</li> </ul>"},{"location":"release_history/#114-jan-11-2024","title":"1.1.4 - Jan 11, 2024","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/837</li> </ul>"},{"location":"release_history/#113-nov-4-2023","title":"1.1.3 - Nov 4, 2023","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/823</li> </ul>"},{"location":"release_history/#112-nov-3-2023","title":"1.1.2 - Nov 3, 2023","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/777</li> </ul>"},{"location":"release_history/#111-oct-11-2023","title":"1.1.1 - Oct 11, 2023","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/668</li> </ul>"},{"location":"release_history/#110-aug-4-2023","title":"1.1.0 - Aug 4, 2023","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/598</li> </ul>"},{"location":"release_history/#105-mar-10-2023","title":"1.0.5 - Mar 10, 2023","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/352</li> </ul>"},{"location":"release_history/#105-mar-10-2023_1","title":"1.0.5 - Mar 10, 2023","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/352</li> </ul>"},{"location":"troubleshooting/","title":"Troubleshooting","text":""},{"location":"troubleshooting/#installation-troubleshooting","title":"Installation troubleshooting","text":"<ul> <li><code>Fatal error from pip prevented installation.</code></li> <li><code>ERROR: No matching distribution found for bbot</code></li> <li><code>bash: /home/user/.local/bin/bbot: /home/user/.local/pipx/venvs/bbot/bin/python: bad interpreter</code></li> </ul> <p>If you get errors resembling any of the above, it's probably because your Python version is too old. To install a newer version (3.9+ is required), you will need to do something like this: <pre><code># install a newer version of python\nsudo apt install python3.9 python3.9-venv\n# install pipx\npython3.9 -m pip install --user pipx\n# add pipx to your path\npython3.9 -m pipx ensurepath\n# reboot\nreboot\n# install bbot\npython3.9 -m pipx install bbot\n# run bbot\nbbot --help\n</code></pre></p>"},{"location":"troubleshooting/#modulenotfounderror","title":"<code>ModuleNotFoundError</code>","text":"<p>If you run into a <code>ModuleNotFoundError</code>, try running your <code>bbot</code> command again with <code>--force-deps</code>. This will repair your modules' Python dependencies.</p>"},{"location":"troubleshooting/#regenerate-config","title":"Regenerate Config","text":"<p>As a troubleshooting step it is sometimes useful to clear out your older configs and let BBOT generate new ones. This will ensure that new defaults are property restored, etc. <pre><code># make a backup of the old configs\nmv ~/.config/bbot ~/.config/bbot.bak\n\n# generate new configs\nbbot\n</code></pre></p>"},{"location":"dev/","title":"BBOT Developer Reference","text":"<p>BBOT exposes a Python API that allows you to create, start, and stop scans.</p> <p>Documented in this section are commonly-used classes and functions within BBOT, along with usage examples.</p>"},{"location":"dev/#adding-bbot-to-your-python-project","title":"Adding BBOT to Your Python Project","text":"<p>If you are using Poetry, you can add BBOT to your python environment like this:</p> <pre><code># stable\npoetry add bbot\n\n# bleeding-edge (dev branch)\npoetry add bbot --allow-prereleases\n</code></pre>"},{"location":"dev/#running-a-bbot-scan-from-python","title":"Running a BBOT Scan from Python","text":""},{"location":"dev/#synchronous","title":"Synchronous","text":"<pre><code>from bbot.scanner import Scanner\n\nif __name__ == \"__main__\":\n    scan = Scanner(\"evilcorp.com\", presets=[\"subdomain-enum\"])\n    for event in scan.start():\n        print(event)\n</code></pre>"},{"location":"dev/#asynchronous","title":"Asynchronous","text":"<pre><code>from bbot.scanner import Scanner\n\nasync def main():\n    scan = Scanner(\"evilcorp.com\", presets=[\"subdomain-enum\"])\n    async for event in scan.async_start():\n        print(event.json())\n\nif __name__ == \"__main__\":\n    import asyncio\n    asyncio.run(main())\n</code></pre> <p>For a full listing of <code>Scanner</code> attributes and functions, see the <code>Scanner</code> Code Reference.</p>"},{"location":"dev/#multiple-targets","title":"Multiple Targets","text":"<p>You can specify any number of targets:</p> <pre><code># create a scan against multiple targets\nscan = Scanner(\n    \"evilcorp.com\",\n    \"evilcorp.org\",\n    \"evilcorp.ce\",\n    \"4.3.2.1\",\n    \"1.2.3.4/24\",\n    presets=[\"subdomain-enum\"]\n)\n\n# this is the same as:\ntargets = [\"evilcorp.com\", \"evilcorp.org\", \"evilcorp.ce\", \"4.3.2.1\", \"1.2.3.4/24\"]\nscan = Scanner(*targets, presets=[\"subdomain-enum\"])\n</code></pre> <p>For more details, including which types of targets are valid, see Targets</p>"},{"location":"dev/#other-custom-options","title":"Other Custom Options","text":"<p>In many cases, using a Preset like <code>subdomain-enum</code> is sufficient. However, the <code>Scanner</code> is flexible and accepts many other arguments that can override the default functionality. You can specify <code>flags</code>, <code>modules</code>, <code>output_modules</code>, a <code>whitelist</code> or <code>blacklist</code>, and custom <code>config</code> options:</p> <pre><code># create a scan against multiple targets\nscan = Scanner(\n    # targets\n    \"evilcorp.com\",\n    \"4.3.2.1\",\n    # enable these presets\n    presets=[\"subdomain-enum\"],\n    # whitelist these hosts\n    whitelist=[\"evilcorp.com\", \"evilcorp.org\"],\n    # blacklist these hosts\n    blacklist=[\"prod.evilcorp.com\"],\n    # also enable these individual modules\n    modules=[\"nuclei\", \"ipstack\"],\n    # exclude modules with these flags\n    exclude_flags=[\"slow\"],\n    # custom config options\n    config={\n        \"modules\": {\n            \"nuclei\": {\n                \"tags\": \"apache,nginx\"\n            }\n        }\n    }\n)\n</code></pre> <p>For a list of all the possible scan options, see the <code>Presets</code> Code Reference</p>"},{"location":"dev/architecture/","title":"BBOT Internal Architecture","text":"<p>Here is a basic overview of BBOT's internal architecture.</p>"},{"location":"dev/architecture/#queues","title":"Queues","text":"<p>Being both recursive and event-driven, BBOT makes heavy use of queues. These enable smooth communication between the modules, and ensure that large numbers of events can be produced without slowing down or clogging up the scan.</p> <p>Every module in BBOT has both an incoming and outgoing queue. Event types matching the module's <code>WATCHED_EVENTS</code> (e.g. <code>DNS_NAME</code>) are queued in its incoming queue, and processed by the module's <code>handle_event()</code> (or <code>handle_batch()</code> in the case of batched modules). If the module finds anything interesting, it creates an event and places it in its outgoing queue, to be processed by the scan and redistributed to other modules.</p>"},{"location":"dev/architecture/#event-flow","title":"Event Flow","text":"<p>Below is a graph showing the internal event flow in BBOT. White lines represent queues. Notice how some modules run in sequence, while others run in parallel. With the exception of a few specific modules, most BBOT modules are parallelized.</p> <p></p> <p>For a higher-level overview, see How it Works.</p>"},{"location":"dev/basemodule/","title":"BaseModule","text":""},{"location":"dev/basemodule/#bbot.modules.base.BaseModule","title":"BaseModule","text":"<p>The base class for all BBOT modules.</p> <p>Attributes:</p> <ul> <li> <code>watched_events</code>               (<code>List</code>)           \u2013            <p>Event types to watch.</p> </li> <li> <code>produced_events</code>               (<code>List</code>)           \u2013            <p>Event types to produce.</p> </li> <li> <code>meta</code>               (<code>Dict</code>)           \u2013            <p>Metadata about the module, such as whether authentication is required and a description.</p> </li> <li> <code>flags</code>               (<code>List</code>)           \u2013            <p>Flags indicating the type of module (must have at least \"safe\" or \"aggressive\" and \"passive\" or \"active\").</p> </li> <li> <code>deps_modules</code>               (<code>List</code>)           \u2013            <p>Other BBOT modules this module depends on. Empty list by default.</p> </li> <li> <code>deps_pip</code>               (<code>List</code>)           \u2013            <p>Python dependencies to install via pip. Empty list by default.</p> </li> <li> <code>deps_apt</code>               (<code>List</code>)           \u2013            <p>APT package dependencies to install. Empty list by default.</p> </li> <li> <code>deps_shell</code>               (<code>List</code>)           \u2013            <p>Other dependencies installed via shell commands. Uses ansible.builtin.shell. Empty list by default.</p> </li> <li> <code>deps_ansible</code>               (<code>List</code>)           \u2013            <p>Additional Ansible tasks for complex dependencies. Empty list by default.</p> </li> <li> <code>accept_dupes</code>               (<code>bool</code>)           \u2013            <p>Whether to accept incoming duplicate events. Default is False.</p> </li> <li> <code>suppress_dupes</code>               (<code>bool</code>)           \u2013            <p>Whether to suppress outgoing duplicate events. Default is True.</p> </li> <li> <code>per_host_only</code>               (<code>bool</code>)           \u2013            <p>Limit the module to only scanning once per host. Default is False.</p> </li> <li> <code>per_hostport_only</code>               (<code>bool</code>)           \u2013            <p>Limit the module to only scanning once per host:port. Default is False.</p> </li> <li> <code>per_domain_only</code>               (<code>bool</code>)           \u2013            <p>Limit the module to only scanning once per domain. Default is False.</p> </li> <li> <code>scope_distance_modifier</code>               (<code>(int, None)</code>)           \u2013            <p>Modifies scope distance acceptance for events. Default is 0. <pre><code>None == accept all events\n2 == accept events up to and including the scan's configured search distance plus two\n1 == accept events up to and including the scan's configured search distance plus one\n0 == (DEFAULT) accept events up to and including the scan's configured search distance\n</code></pre></p> </li> <li> <code>target_only</code>               (<code>bool</code>)           \u2013            <p>Accept only the initial target event(s). Default is False.</p> </li> <li> <code>in_scope_only</code>               (<code>bool</code>)           \u2013            <p>Accept only explicitly in-scope events. Default is False.</p> </li> <li> <code>options</code>               (<code>Dict</code>)           \u2013            <p>Customizable options for the module, e.g., {\"api_key\": \"\"}. Empty dict by default.</p> </li> <li> <code>options_desc</code>               (<code>Dict</code>)           \u2013            <p>Descriptions for options, e.g., {\"api_key\": \"API Key\"}. Empty dict by default.</p> </li> <li> <code>module_threads</code>               (<code>int</code>)           \u2013            <p>Maximum concurrent instances of handle_event() or handle_batch(). Default is 1.</p> </li> <li> <code>batch_size</code>               (<code>int</code>)           \u2013            <p>Size of batches processed by handle_batch(). Default is 1.</p> </li> <li> <code>batch_wait</code>               (<code>int</code>)           \u2013            <p>Seconds to wait before force-submitting a batch. Default is 10.</p> </li> <li> <code>api_failure_abort_threshold</code>               (<code>int</code>)           \u2013            <p>Threshold for setting error state after failed HTTP requests (only takes effect when <code>api_request()</code> is used. Default is 5.</p> </li> <li> <code>_preserve_graph</code>               (<code>bool</code>)           \u2013            <p>When set to True, accept events that may be duplicates but are necessary for construction of complete graph. Typically only enabled for output modules that need to maintain full chains of events, e.g. <code>neo4j</code> and <code>json</code>. Default is False.</p> </li> <li> <code>_stats_exclude</code>               (<code>bool</code>)           \u2013            <p>Whether to exclude this module from scan statistics. Default is False.</p> </li> <li> <code>_qsize</code>               (<code>int</code>)           \u2013            <p>Outgoing queue size (0 for infinite). Default is 0.</p> </li> <li> <code>_priority</code>               (<code>int</code>)           \u2013            <p>Priority level of events raised by this module, 1-5. Default is 3.</p> </li> <li> <code>_name</code>               (<code>str</code>)           \u2013            <p>Module name, overridden automatically. Default is 'base'.</p> </li> <li> <code>_type</code>               (<code>str</code>)           \u2013            <p>Module type, for differentiating between normal and output modules. Default is 'scan'.</p> </li> </ul> Source code in <code>bbot/modules/base.py</code> <pre><code>class BaseModule:\n    \"\"\"The base class for all BBOT modules.\n\n    Attributes:\n        watched_events (List): Event types to watch.\n\n        produced_events (List): Event types to produce.\n\n        meta (Dict): Metadata about the module, such as whether authentication is required and a description.\n\n        flags (List): Flags indicating the type of module (must have at least \"safe\" or \"aggressive\" and \"passive\" or \"active\").\n\n        deps_modules (List): Other BBOT modules this module depends on. Empty list by default.\n\n        deps_pip (List): Python dependencies to install via pip. Empty list by default.\n\n        deps_apt (List): APT package dependencies to install. Empty list by default.\n\n        deps_shell (List): Other dependencies installed via shell commands. Uses [ansible.builtin.shell](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/shell_module.html). Empty list by default.\n\n        deps_ansible (List): Additional Ansible tasks for complex dependencies. Empty list by default.\n\n        accept_dupes (bool): Whether to accept incoming duplicate events. Default is False.\n\n        suppress_dupes (bool): Whether to suppress outgoing duplicate events. Default is True.\n\n        per_host_only (bool): Limit the module to only scanning once per host. Default is False.\n\n        per_hostport_only (bool): Limit the module to only scanning once per host:port. Default is False.\n\n        per_domain_only (bool): Limit the module to only scanning once per domain. Default is False.\n\n        scope_distance_modifier (int, None): Modifies scope distance acceptance for events. Default is 0.\n            ```\n            None == accept all events\n            2 == accept events up to and including the scan's configured search distance plus two\n            1 == accept events up to and including the scan's configured search distance plus one\n            0 == (DEFAULT) accept events up to and including the scan's configured search distance\n            ```\n\n        target_only (bool): Accept only the initial target event(s). Default is False.\n\n        in_scope_only (bool): Accept only explicitly in-scope events. Default is False.\n\n        options (Dict): Customizable options for the module, e.g., {\"api_key\": \"\"}. Empty dict by default.\n\n        options_desc (Dict): Descriptions for options, e.g., {\"api_key\": \"API Key\"}. Empty dict by default.\n\n        module_threads (int): Maximum concurrent instances of handle_event() or handle_batch(). Default is 1.\n\n        batch_size (int): Size of batches processed by handle_batch(). Default is 1.\n\n        batch_wait (int): Seconds to wait before force-submitting a batch. Default is 10.\n\n        api_failure_abort_threshold (int): Threshold for setting error state after failed HTTP requests (only takes effect when `api_request()` is used. Default is 5.\n\n        _preserve_graph (bool): When set to True, accept events that may be duplicates but are necessary for construction of complete graph. Typically only enabled for output modules that need to maintain full chains of events, e.g. `neo4j` and `json`. Default is False.\n\n        _stats_exclude (bool): Whether to exclude this module from scan statistics. Default is False.\n\n        _qsize (int): Outgoing queue size (0 for infinite). Default is 0.\n\n        _priority (int): Priority level of events raised by this module, 1-5. Default is 3.\n\n        _name (str): Module name, overridden automatically. Default is 'base'.\n\n        _type (str): Module type, for differentiating between normal and output modules. Default is 'scan'.\n    \"\"\"\n\n    watched_events = []\n    produced_events = []\n    meta = {\"auth_required\": False, \"description\": \"Base module\"}\n    flags = []\n    options = {}\n    options_desc = {}\n\n    deps_modules = []\n    deps_pip = []\n    deps_apt = []\n    deps_shell = []\n    deps_ansible = []\n\n    accept_dupes = False\n    suppress_dupes = True\n    per_host_only = False\n    per_hostport_only = False\n    per_domain_only = False\n    scope_distance_modifier = 0\n    target_only = False\n    in_scope_only = False\n\n    _module_threads = 1\n    _batch_size = 1\n    batch_wait = 10\n\n    # API retries, etc.\n    _api_retries = 2\n    # disable the module after this many failed attempts in a row\n    _api_failure_abort_threshold = 3\n    # sleep for this many seconds after being rate limited\n    _429_sleep_interval = 30\n\n    default_discovery_context = \"{module} discovered {event.type}: {event.data}\"\n\n    _preserve_graph = False\n    _stats_exclude = False\n    _qsize = 1000\n    _priority = 3\n    _name = \"base\"\n    _type = \"scan\"\n    _intercept = False\n    _shuffle_incoming_queue = True\n\n    def __init__(self, scan):\n        \"\"\"Initializes a module instance.\n\n        Args:\n            scan: The BBOT scan object associated with this module instance.\n\n        Attributes:\n            scan: The scan object associated with this module.\n\n            errored (bool): Whether the module has errored out. Default is False.\n        \"\"\"\n        self.scan = scan\n        self.errored = False\n        self._log = None\n        self._incoming_event_queue = None\n        self._outgoing_event_queue = None\n        # track incoming events to prevent unwanted duplicates\n        self._incoming_dup_tracker = set()\n        # tracks which subprocesses are running under this module\n        self._proc_tracker = set()\n        # seconds since we've submitted a batch\n        self._last_submitted_batch = None\n        # additional callbacks to be executed alongside self.cleanup()\n        self.cleanup_callbacks = []\n        self._cleanedup = False\n        self._watched_events = None\n\n        self._task_counter = TaskCounter()\n\n        # string constant\n        self._custom_filter_criteria_msg = \"it did not meet custom filter criteria\"\n\n        self._api_keys = []\n\n        # track number of failures (for .api_request())\n        self._api_request_failures = 0\n\n        self._tasks = []\n        self._event_received = asyncio.Condition()\n        self._event_queued = asyncio.Condition()\n\n        # used for optional \"per host\" tracking\n        self._per_host_tracker = set()\n\n    async def setup(self):\n        \"\"\"\n        Performs one-time setup tasks for the module.\n\n        This method is responsible for preparing the module for its operation, which may include tasks\n        such as downloading necessary resources, validating configuration parameters, or other preliminary\n        checks.\n\n        Returns:\n            tuple:\n                - bool or None: A status indicating the outcome of the setup process. Returns `True` if\n                the setup was successful, `None` for a soft-fail where the module setup did not succeed\n                but the scan will continue with the module disabled, and `False` for a hard-fail where\n                the setup failure causes the scan to abort.\n                - str, optional: A reason for the setup failure, provided only when the setup does not\n                succeed (i.e., returns `None` or `False`).\n\n        Examples:\n            &gt;&gt;&gt; async def setup(self):\n            &gt;&gt;&gt;     if not self.config.get(\"api_key\"):\n            &gt;&gt;&gt;         # Soft-fail: Configuration missing an API key\n            &gt;&gt;&gt;         return None, \"No API key specified\"\n\n            &gt;&gt;&gt; async def setup(self):\n            &gt;&gt;&gt;     try:\n            &gt;&gt;&gt;         wordlist = await self.helpers.wordlist(\"https://raw.githubusercontent.com/user/wordlist.txt\")\n            &gt;&gt;&gt;     except WordlistError as e:\n            &gt;&gt;&gt;         # Hard-fail: Error retrieving wordlist\n            &gt;&gt;&gt;         return False, f\"Error retrieving wordlist: {e}\"\n\n            &gt;&gt;&gt; async def setup(self):\n            &gt;&gt;&gt;     self.timeout = self.config.get(\"timeout\", 5)\n            &gt;&gt;&gt;     # Success: Setup completed without issues\n            &gt;&gt;&gt;     return True\n        \"\"\"\n\n        return True\n\n    async def handle_event(self, event):\n        \"\"\"Asynchronously handles incoming events that the module is configured to watch.\n\n        This method is automatically invoked when an event that matches any in `watched_events` is encountered during a scan. Override this method to implement custom event-handling logic for your module.\n\n        Args:\n            event (Event): The event object containing details about the incoming event.\n\n        Note:\n            This method should be overridden if the `batch_size` attribute of the module is set to 1.\n\n        Returns:\n            None\n        \"\"\"\n        pass\n\n    async def handle_batch(self, *events):\n        \"\"\"Handles incoming events in batches for optimized processing.\n\n        This method is automatically called when multiple events that match any in `watched_events` are encountered and the `batch_size` attribute is set to a value greater than 1. Override this method to implement custom batch event-handling logic for your module.\n\n        Args:\n            *events (Event): A variable number of Event objects to be processed in a batch.\n\n        Note:\n            This method should be overridden if the `batch_size` attribute of the module is set to a value greater than 1.\n\n        Returns:\n            None\n        \"\"\"\n        pass\n\n    async def filter_event(self, event):\n        \"\"\"Asynchronously filters incoming events based on custom criteria.\n\n        Override this method for more granular control over which events are accepted by your module. This method is called automatically before `handle_event()` for each incoming event that matches any in `watched_events`.\n\n        Args:\n            event (Event): The incoming Event object to be filtered.\n\n        Returns:\n            tuple: A 2-tuple where the first value is a bool indicating whether the event should be accepted, and the second value is a string explaining the reason for its acceptance or rejection. By default, returns `(True, None)` to indicate acceptance without reason.\n\n        Note:\n            This method should be overridden if the module requires custom logic for event filtering.\n        \"\"\"\n        return True\n\n    async def finish(self):\n        \"\"\"Asynchronously performs final tasks as the scan nears completion.\n\n        This method can be overridden to execute any necessary finalization logic. For example, if the module relies on a word cloud, you might wait for the scan to finish to ensure the word cloud is most complete before running an operation.\n\n        Returns:\n            None\n\n        Warnings:\n            This method may be called multiple times since it can raise events, which may re-trigger the \"finish\" phase of the scan. Optional to override.\n        \"\"\"\n        return\n\n    async def report(self):\n        \"\"\"Asynchronously executes a final task after the scan is complete but before cleanup.\n\n        This method can be overridden to aggregate data and raise summary events at the end of the scan.\n\n        Returns:\n            None\n\n        Note:\n            This method is called only once per scan.\n        \"\"\"\n        return\n\n    async def cleanup(self):\n        \"\"\"Asynchronously performs final cleanup operations after the scan is complete.\n\n        This method can be overridden to implement custom cleanup logic. It is called only once per scan and may not raise events.\n\n        Returns:\n            None\n\n        Note:\n            This method is called only once per scan and may not raise events.\n        \"\"\"\n        return\n\n    async def require_api_key(self):\n        \"\"\"\n        Asynchronously checks if an API key is required and valid.\n\n        Args:\n            None\n\n        Returns:\n            bool or tuple: Returns True if API key is valid and ready.\n                          Returns a tuple (None, \"error message\") otherwise.\n\n        Notes:\n            - Fetches the API key from the configuration.\n            - Calls the 'ping()' method to test API accessibility.\n            - Sets the API key readiness status accordingly.\n        \"\"\"\n        self.api_key = self.config.get(\"api_key\", \"\")\n        if self.auth_secret:\n            try:\n                await self.ping()\n                self.hugesuccess(f\"API is ready\")\n                return True, \"\"\n            except Exception as e:\n                self.trace(traceback.format_exc())\n                return None, f\"Error with API ({str(e).strip()})\"\n        else:\n            return None, \"No API key set\"\n\n    @property\n    def api_key(self):\n        if self._api_keys:\n            return self._api_keys[0]\n\n    @api_key.setter\n    def api_key(self, api_keys):\n        if isinstance(api_keys, str):\n            api_keys = [api_keys]\n        self._api_keys = list(api_keys)\n\n    def cycle_api_key(self):\n        if len(self._api_keys) &gt; 1:\n            self.verbose(f\"Cycling API key\")\n            self._api_keys.insert(0, self._api_keys.pop())\n        else:\n            self.debug(f\"No extra API keys to cycle\")\n\n    @property\n    def api_retries(self):\n        return max(self._api_retries + 1, len(self._api_keys))\n\n    @property\n    def api_failure_abort_threshold(self):\n        return (self.api_retries * self._api_failure_abort_threshold) + 1\n\n    async def ping(self, url=None):\n        \"\"\"Asynchronously checks the health of the configured API.\n\n        This method is used in conjunction with require_api_key() to verify that the API is not just configured, but also responsive. It makes a test request to a known endpoint to validate the API's health.\n\n        The method uses the `ping_url` attribute if defined, or falls back to a provided URL. If neither is available, no request is made.\n\n        Args:\n            url (str, optional): A specific URL to use for the ping request. If not provided, the method will use the `ping_url` attribute.\n\n        Returns:\n            None\n\n        Raises:\n            ValueError: If the API response is not successful (status code != 200).\n\n        Example Usage:\n            To use this method, simply define the `ping_url` attribute in your module:\n\n            class MyModule(BaseModule):\n                ping_url = \"https://api.example.com/ping\"\n\n            Alternatively, you can override this method for more complex health checks:\n\n            async def ping(self):\n                r = await self.api_request(f\"{self.base_url}/complex-health-check\")\n                if r.status_code != 200 or r.json().get('status') != 'healthy':\n                    raise ValueError(f\"API unhealthy: {r.text}\")\n        \"\"\"\n        if url is None:\n            url = getattr(self, \"ping_url\", \"\")\n        if url:\n            r = await self.api_request(url)\n            if getattr(r, \"status_code\", 0) != 200:\n                response_text = getattr(r, \"text\", \"no response from server\")\n                raise ValueError(response_text)\n\n    @property\n    def batch_size(self):\n        batch_size = self.config.get(\"batch_size\", None)\n        # only allow overriding the batch size if its default value is greater than 1\n        # this prevents modules from being accidentally neutered by an incorrect batch_size setting\n        if batch_size is None or self._batch_size == 1:\n            batch_size = self._batch_size\n        return batch_size\n\n    @property\n    def module_threads(self):\n        module_threads = self.config.get(\"module_threads\", None)\n        if module_threads is None:\n            module_threads = self._module_threads\n        return module_threads\n\n    @property\n    def auth_secret(self):\n        \"\"\"Indicates if the module is properly configured for authentication.\n\n        This read-only property should be used to check whether all necessary attributes (e.g., API keys, tokens, etc.) are configured to perform authenticated requests in the module. Commonly used in setup or initialization steps.\n\n        Returns:\n            bool: True if the module is properly configured for authentication, otherwise False.\n        \"\"\"\n        return getattr(self, \"api_key\", \"\")\n\n    def get_watched_events(self):\n        \"\"\"Retrieve the set of events that the module is interested in observing.\n\n        Override this method if the set of events the module should watch needs to be determined dynamically, e.g., based on configuration options or other runtime conditions.\n\n        Returns:\n            set: The set of event types that this module will handle.\n        \"\"\"\n        if self._watched_events is None:\n            self._watched_events = set(self.watched_events)\n        return self._watched_events\n\n    async def _handle_batch(self):\n        \"\"\"\n        Asynchronously handles a batch of events in the module.\n\n        Args:\n            None\n\n        Returns:\n            bool: True if events were submitted for processing, False otherwise.\n\n        Notes:\n            - The method is wrapped in a task counter to monitor asynchronous operations.\n            - Checks if there are any events in the incoming queue and module is not in an error state.\n            - Invokes '_events_waiting()' to fetch a batch of events.\n            - Calls the module's 'handle_batch()' method to process these events.\n            - If a \"FINISHED\" event is found, invokes 'finish()' method of the module.\n        \"\"\"\n        finish = False\n        async with self._task_counter.count(f\"{self.name}.handle_batch()\") as counter:\n            submitted = False\n            if self.batch_size &lt;= 1:\n                return\n            if self.num_incoming_events &gt; 0:\n                events, finish = await self._events_waiting()\n                if events and not self.errored:\n                    counter.n = len(events)\n                    self.verbose(f\"Handling batch of {len(events):,} events\")\n                    submitted = True\n                    async with self.scan._acatch(f\"{self.name}.handle_batch()\"):\n                        await self.handle_batch(*events)\n                    self.verbose(f\"Finished handling batch of {len(events):,} events\")\n        if finish:\n            context = f\"{self.name}.finish()\"\n            async with self.scan._acatch(context), self._task_counter.count(context):\n                await self.finish()\n        return submitted\n\n    def make_event(self, *args, **kwargs):\n        \"\"\"Create an event for the scan.\n\n        Raises a validation error if the event could not be created, unless raise_error is set to False.\n\n        Args:\n            *args: Positional arguments to be passed to the scan's make_event method.\n            **kwargs: Keyword arguments to be passed to the scan's make_event method.\n            raise_error (bool, optional): Whether to raise a validation error if the event could not be created. Defaults to False.\n\n        Examples:\n            &gt;&gt;&gt; new_event = self.make_event(\"1.2.3.4\", parent=event)\n            &gt;&gt;&gt; await self.emit_event(new_event)\n\n        Returns:\n            Event or None: The created event, or None if a validation error occurred and raise_error was False.\n\n        Raises:\n            ValidationError: If the event could not be validated and raise_error is True.\n        \"\"\"\n        raise_error = kwargs.pop(\"raise_error\", False)\n        module = kwargs.pop(\"module\", None)\n        if module is None:\n            if (not args) or getattr(args[0], \"module\", None) is None:\n                kwargs[\"module\"] = self\n        try:\n            event = self.scan.make_event(*args, **kwargs)\n        except ValidationError as e:\n            if raise_error:\n                raise\n            self.warning(f\"{e}\")\n            return\n        return event\n\n    async def emit_event(self, *args, **kwargs):\n        \"\"\"Emit an event to the event queue and distribute it to interested modules.\n\n        This is how modules \"return\" data.\n\n        The method first creates an event object by calling `self.make_event()` with the provided arguments.\n        Then, the event is queued for outgoing distribution using `self.queue_outgoing_event()`.\n\n        Args:\n            *args: Positional arguments to be passed to `self.make_event()` for event creation.\n            **kwargs: Keyword arguments to be passed for event creation or configuration of the emit action.\n                ```markdown\n                - on_success_callback: Optional callback function to execute upon successful event emission.\n                - abort_if: Optional condition under which the event emission should be aborted.\n                - quick: Optional flag to indicate whether the event should be processed quickly.\n                ```\n\n        Examples:\n            &gt;&gt;&gt; await self.emit_event(\"www.evilcorp.com\", parent=event, tags=[\"affiliate\"])\n\n            &gt;&gt;&gt; new_event = self.make_event(\"1.2.3.4\", parent=event)\n            &gt;&gt;&gt; await self.emit_event(new_event)\n\n        Returns:\n            None\n\n        Raises:\n            ValidationError: If the event cannot be validated (handled in `self.make_event()`).\n        \"\"\"\n        event_kwargs = dict(kwargs)\n        emit_kwargs = {}\n        for o in (\"on_success_callback\", \"abort_if\", \"quick\"):\n            v = event_kwargs.pop(o, None)\n            if v is not None:\n                emit_kwargs[o] = v\n        event = self.make_event(*args, **event_kwargs)\n        if event:\n            await self.queue_outgoing_event(event, **emit_kwargs)\n        return event\n\n    async def _events_waiting(self, batch_size=None):\n        \"\"\"\n        Asynchronously fetches events from the incoming_event_queue, up to a specified batch size.\n\n        Args:\n            None\n\n        Returns:\n            tuple: A tuple containing two elements:\n                - events (list): A list of acceptable events from the queue.\n                - finish (bool): A flag indicating if a \"FINISHED\" event is encountered.\n\n        Notes:\n            - The method pulls events from incoming_event_queue using 'get_nowait()'.\n            - Events go through '_event_postcheck()' for validation.\n            - \"FINISHED\" events are handled differently and the finish flag is set to True.\n            - If the queue is empty or the batch size is reached, the loop breaks.\n        \"\"\"\n        if batch_size is None:\n            batch_size = self.batch_size\n        events = []\n        finish = False\n        while self.incoming_event_queue:\n            if batch_size != -1 and len(events) &gt; self.batch_size:\n                break\n            try:\n                event = self.incoming_event_queue.get_nowait()\n                self.debug(f\"Got {event} from {getattr(event, 'module', 'unknown_module')}\")\n                acceptable, reason = await self._event_postcheck(event)\n                if acceptable:\n                    if event.type == \"FINISHED\":\n                        finish = True\n                    else:\n                        events.append(event)\n                        self.scan.stats.event_consumed(event, self)\n                elif reason:\n                    self.debug(f\"Not accepting {event} because {reason}\")\n            except asyncio.queues.QueueEmpty:\n                break\n        return events, finish\n\n    @property\n    def num_incoming_events(self):\n        ret = 0\n        if self.incoming_event_queue is not False:\n            ret = self.incoming_event_queue.qsize()\n        return ret\n\n    def start(self):\n        self._tasks = [\n            asyncio.create_task(self._worker(), name=f\"{self.scan.name}.{self.name}._worker()\")\n            for _ in range(self.module_threads)\n        ]\n\n    async def _setup(self):\n        \"\"\"\n        Asynchronously sets up the module by invoking its 'setup()' method.\n\n        This method catches exceptions during setup, sets the module's error state if necessary, and determines the\n        status code based on the result of the setup process.\n\n        Args:\n            None\n\n        Returns:\n            tuple: A tuple containing the module's name, status (True for success, False for hard-fail, None for soft-fail),\n            and an optional status message.\n\n        Raises:\n            Exception: Captured exceptions from the 'setup()' method are logged, but not propagated.\n\n        Notes:\n            - The 'setup()' method can return either a simple boolean status or a tuple of status and message.\n            - A WordlistError exception triggers a soft-fail status.\n            - The debug log will contain setup status information for the module.\n        \"\"\"\n        status_codes = {False: \"hard-fail\", None: \"soft-fail\", True: \"success\"}\n\n        status = False\n        self.debug(f\"Setting up module {self.name}\")\n        try:\n            result = await self.setup()\n            if type(result) == tuple and len(result) == 2:\n                status, msg = result\n            else:\n                status = result\n                msg = status_codes[status]\n            self.debug(f\"Finished setting up module {self.name}\")\n        except Exception as e:\n            self.set_error_state(f\"Unexpected error during module setup: {e}\", critical=True)\n            msg = f\"{e}\"\n            self.trace()\n        return self, status, str(msg)\n\n    async def _worker(self):\n        \"\"\"\n        The core worker loop for the module, responsible for handling events from the incoming event queue.\n\n        This method is a coroutine and is run asynchronously. Multiple instances can run simultaneously based on\n        the 'module_threads' configuration. The worker dequeues events from 'incoming_event_queue', performs\n        necessary prechecks, and passes the event to the appropriate handler function.\n\n        Args:\n            None\n\n        Returns:\n            None\n\n        Raises:\n            asyncio.CancelledError: If the worker is cancelled during its operation.\n\n        Notes:\n            - The worker is sensitive to the 'stopping' flag of the scan. It will terminate if this flag is set.\n            - The worker handles backpressure by pausing when the outgoing event queue is full.\n            - Batch processing is supported and is activated when 'batch_size' &gt; 1.\n            - Each event is subject to a post-check via '_event_postcheck()' to decide whether it should be handled.\n            - Special 'FINISHED' events trigger the 'finish()' method of the module.\n        \"\"\"\n        async with self.scan._acatch(context=self._worker, unhandled_is_critical=True):\n            try:\n                while not self.scan.stopping and not self.errored:\n                    # hold the reigns if our outgoing queue is full\n                    if self._qsize &gt; 0 and self.outgoing_event_queue.qsize() &gt;= self._qsize:\n                        await asyncio.sleep(0.1)\n                        continue\n\n                    if self.batch_size &gt; 1:\n                        submitted = await self._handle_batch()\n                        if not submitted:\n                            async with self._event_received:\n                                await self._event_received.wait()\n\n                    else:\n                        try:\n                            if self.incoming_event_queue is not False:\n                                event = await self.incoming_event_queue.get()\n                            else:\n                                self.debug(f\"Event queue is in bad state\")\n                                break\n                        except asyncio.queues.QueueEmpty:\n                            continue\n                        self.debug(f\"Got {event} from {getattr(event, 'module', 'unknown_module')}\")\n                        async with self._task_counter.count(f\"event_postcheck({event})\"):\n                            acceptable, reason = await self._event_postcheck(event)\n                        if acceptable:\n                            if event.type == \"FINISHED\":\n                                context = f\"{self.name}.finish()\"\n                                async with self.scan._acatch(context), self._task_counter.count(context):\n                                    await self.finish()\n                            else:\n                                context = f\"{self.name}.handle_event({event})\"\n                                self.scan.stats.event_consumed(event, self)\n                                self.debug(f\"Handling {event}\")\n                                async with self.scan._acatch(context), self._task_counter.count(context):\n                                    await self.handle_event(event)\n                                self.debug(f\"Finished handling {event}\")\n                        else:\n                            self.debug(f\"Not accepting {event} because {reason}\")\n            except asyncio.CancelledError:\n                # this trace was used for debugging leaked CancelledErrors from inside httpx\n                # self.log.trace(\"Worker cancelled\")\n                raise\n            except BaseException as e:\n                if self.helpers.in_exception_chain(e, (KeyboardInterrupt,)):\n                    self.scan.stop()\n                else:\n                    self.error(f\"Critical failure in module {self.name}: {e}\")\n                    self.error(traceback.format_exc())\n        self.log.trace(f\"Worker stopped\")\n\n    @property\n    def max_scope_distance(self):\n        if self.in_scope_only or self.target_only:\n            return 0\n        if self.scope_distance_modifier is None:\n            return 999\n        return max(0, self.scan.scope_search_distance + self.scope_distance_modifier)\n\n    def _event_precheck(self, event):\n        \"\"\"\n        Pre-checks an event to determine if it should be accepted by the module for queuing.\n\n        This method is called when an event is about to be enqueued into the module's incoming event queue.\n        It applies various filters such as special signal event types, module error state, watched event types, and more\n        to decide whether or not the event should be enqueued.\n\n        Args:\n            event (Event): The event object to check.\n\n        Returns:\n            tuple: A tuple (bool, str) where the bool indicates if the event should be accepted, and the str gives the reason.\n\n        Examples:\n            &gt;&gt;&gt; result, reason = self._event_precheck(event)\n            &gt;&gt;&gt; if result:\n            ...     self.incoming_event_queue.put_nowait(event)\n            ... else:\n            ...     self.debug(f\"Not accepting {event} because {reason}\")\n\n        Notes:\n            - The method considers special signal event types like \"FINISHED\".\n            - Checks whether the module is in an error state.\n            - Checks if the event type matches the types this module is interested in (`watched_events`).\n            - Checks for events tagged as 'target' if the module has `target_only` flag set.\n            - Applies specific filtering based on event type and module name.\n        \"\"\"\n\n        # special signal event types\n        if event.type in (\"FINISHED\",):\n            return True, \"its type is FINISHED\"\n        if self.errored:\n            return False, f\"module is in error state\"\n        # exclude non-watched types\n        if not any(t in self.get_watched_events() for t in (\"*\", event.type)):\n            return False, \"its type is not in watched_events\"\n        if self.target_only:\n            if \"target\" not in event.tags:\n                return False, \"it did not meet target_only filter criteria\"\n\n        # exclude certain URLs (e.g. javascript):\n        # TODO: revisit this after httpx rework\n        if event.type.startswith(\"URL\") and self.name != \"httpx\" and \"httpx-only\" in event.tags:\n            return False, \"its extension was listed in url_extension_httpx_only\"\n\n        return True, \"precheck succeeded\"\n\n    async def _event_postcheck(self, event):\n        \"\"\"\n        A simple wrapper for dup tracking\n        \"\"\"\n        # special exception for \"FINISHED\" event\n        if event.type in (\"FINISHED\",):\n            return True, \"\"\n        acceptable, reason = await self._event_postcheck_inner(event)\n        if acceptable:\n            # check duplicates\n            is_incoming_duplicate, reason = self.is_incoming_duplicate(event, add=True)\n            if is_incoming_duplicate and not self.accept_dupes:\n                return False, f\"module has already seen it\" + (f\" ({reason})\" if reason else \"\")\n\n        return acceptable, reason\n\n    async def _event_postcheck_inner(self, event):\n        \"\"\"\n        Post-checks an event to determine if it should be accepted by the module for handling.\n\n        This method is called when an event is dequeued from the module's incoming event queue, right before it is actually processed.\n        It applies various filters such as scope, custom filtering logic, and per-host tracking to decide the event's fate.\n\n        Args:\n            event (Event): The event object to check.\n\n        Returns:\n            tuple: A tuple (bool, str) where the bool indicates if the event should be accepted, and the str gives the reason.\n\n        Notes:\n            - Override the `filter_event` method for custom filtering logic.\n            - This method also maintains host-based tracking when the `per_host_only` or similar flags are set.\n            - The method will also update event production stats for output modules.\n        \"\"\"\n        # force-output certain events to the graph\n        if self._is_graph_important(event):\n            return True, \"event is critical to the graph\"\n\n        # check scope distance\n        filter_result, reason = self._scope_distance_check(event)\n        if not filter_result:\n            return filter_result, reason\n\n        # custom filtering\n        async with self.scan._acatch(context=self.filter_event):\n            try:\n                filter_result = await self.filter_event(event)\n            except Exception as e:\n                msg = f\"Unhandled exception in {self.name}.filter_event({event}): {e}\"\n                self.error(msg)\n                return False, msg\n            msg = str(self._custom_filter_criteria_msg)\n            with suppress(ValueError, TypeError):\n                filter_result, reason = filter_result\n                msg += f\": {reason}\"\n            if not filter_result:\n                return False, msg\n\n        self.debug(f\"{event} passed post-check\")\n        return True, \"\"\n\n    def _scope_distance_check(self, event):\n        if self.in_scope_only:\n            if event.scope_distance &gt; 0:\n                return False, \"it did not meet in_scope_only filter criteria\"\n        if self.scope_distance_modifier is not None:\n            if event.scope_distance &lt; 0:\n                return False, f\"its scope_distance ({event.scope_distance}) is invalid.\"\n            elif event.scope_distance &gt; self.max_scope_distance:\n                return (\n                    False,\n                    f\"its scope_distance ({event.scope_distance}) exceeds the maximum allowed by the scan ({self.scan.scope_search_distance}) + the module ({self.scope_distance_modifier}) == {self.max_scope_distance}\",\n                )\n        return True, \"\"\n\n    async def _cleanup(self):\n        if not self._cleanedup:\n            self._cleanedup = True\n            for callback in [self.cleanup] + self.cleanup_callbacks:\n                context = f\"{self.name}.cleanup()\"\n                if callable(callback):\n                    async with self.scan._acatch(context), self._task_counter.count(context):\n                        await self.helpers.execute_sync_or_async(callback)\n\n    async def queue_event(self, event):\n        \"\"\"\n        Asynchronously queues an incoming event to the module's event queue for further processing.\n\n        The function performs an initial check to see if the event is acceptable for queuing.\n        If the event passes the check, it is put into the `incoming_event_queue`.\n\n        Args:\n            event: The event object to be queued.\n\n        Returns:\n            None: The function doesn't return anything but modifies the state of the `incoming_event_queue`.\n\n        Examples:\n            &gt;&gt;&gt; await self.queue_event(some_event)\n\n        Raises:\n            AttributeError: If the module is not in an acceptable state to queue incoming events.\n        \"\"\"\n        async with self._task_counter.count(\"queue_event()\", _log=False):\n            if self.incoming_event_queue is False:\n                self.debug(f\"Not in an acceptable state to queue incoming event\")\n                return\n            acceptable, reason = self._event_precheck(event)\n            if not acceptable:\n                if reason and reason != \"its type is not in watched_events\":\n                    self.debug(f\"Not queueing {event} because {reason}\")\n                return\n            else:\n                self.debug(f\"Queueing {event} because {reason}\")\n            try:\n                self.incoming_event_queue.put_nowait(event)\n                async with self._event_received:\n                    self._event_received.notify()\n                if event.type != \"FINISHED\":\n                    self.scan._new_activity = True\n            except AttributeError:\n                self.debug(f\"Not in an acceptable state to queue incoming event\")\n\n    async def queue_outgoing_event(self, event, **kwargs):\n        \"\"\"\n        Queues an outgoing event to the module's outgoing event queue for further processing.\n\n        The function attempts to put the event into the `outgoing_event_queue` immediately.\n        If it's not possible due to the current state of the module, an AttributeError is raised, and a debug log is generated.\n\n        Args:\n            event: The event object to be queued.\n            **kwargs: Additional keyword arguments to be associated with the event.\n\n        Returns:\n            None: The function doesn't return anything but modifies the state of the `outgoing_event_queue`.\n\n        Examples:\n            &gt;&gt;&gt; self.queue_outgoing_event(some_outgoing_event, abort_if=lambda e: \"unresolved\" in e.tags)\n\n        Raises:\n            AttributeError: If the module is not in an acceptable state to queue outgoing events.\n        \"\"\"\n        try:\n            await self.outgoing_event_queue.put((event, kwargs))\n        except AttributeError:\n            self.debug(f\"Not in an acceptable state to queue outgoing event\")\n\n    def set_error_state(self, message=None, clear_outgoing_queue=False, critical=False):\n        \"\"\"\n        Puts the module into an errored state where it cannot accept new events. Optionally logs a warning message.\n\n        The function sets the module's `errored` attribute to True and logs a warning with the optional message.\n        It also clears the incoming event queue to prevent further processing and updates its status to False.\n\n        Args:\n            message (str, optional): Additional message to be logged along with the warning.\n\n        Returns:\n            None: The function doesn't return anything but updates the `errored` state and clears the incoming event queue.\n\n        Examples:\n            &gt;&gt;&gt; self.set_error_state()\n            &gt;&gt;&gt; self.set_error_state(\"Failed to connect to the server\")\n\n        Notes:\n            - The function sets `self._incoming_event_queue` to False to prevent its further use.\n            - If the module was already in an errored state, the function will not reset the error state or the queue.\n        \"\"\"\n        if not self.errored:\n            log_msg = \"Setting error state\"\n            if message is not None:\n                log_msg += f\": {message}\"\n            if critical:\n                log_fn = self.error\n            else:\n                log_fn = self.warning\n            log_fn(log_msg)\n            self.errored = True\n            # clear incoming queue\n            if self.incoming_event_queue is not False:\n                self.debug(f\"Emptying event_queue\")\n                with suppress(asyncio.queues.QueueEmpty):\n                    while 1:\n                        self.incoming_event_queue.get_nowait()\n                # set queue to None to prevent its use\n                # if there are leftover objects in the queue, the scan will hang.\n                self._incoming_event_queue = False\n\n            if clear_outgoing_queue:\n                with suppress(asyncio.queues.QueueEmpty):\n                    while 1:\n                        self.outgoing_event_queue.get_nowait()\n\n    def is_incoming_duplicate(self, event, add=False):\n        if event.type in (\"FINISHED\",):\n            return False, \"\"\n        reason = \"\"\n        try:\n            event_hash = self._incoming_dedup_hash(event)\n        except Exception as e:\n            msg = f\"Unhandled exception in {self.name}._incoming_dedup_hash({event}): {e}\"\n            self.error(msg)\n            return True, msg\n        with suppress(TypeError, ValueError):\n            event_hash, reason = event_hash\n        is_dup = event_hash in self._incoming_dup_tracker\n        if add:\n            self._incoming_dup_tracker.add(event_hash)\n        return is_dup, reason\n\n    def _incoming_dedup_hash(self, event):\n        \"\"\"\n        Determines the criteria for what is considered to be a duplicate event if `accept_dupes` is False.\n        \"\"\"\n        if self.per_host_only:\n            return self.get_per_host_hash(event), \"per_host_only=True\"\n        if self.per_hostport_only:\n            return self.get_per_hostport_hash(event), \"per_hostport_only=True\"\n        elif self.per_domain_only:\n            return self.get_per_domain_hash(event), \"per_domain_only=True\"\n        return hash(event), \"\"\n\n    def _outgoing_dedup_hash(self, event):\n        \"\"\"\n        Determines the criteria for what is considered to be a duplicate event if `suppress_dupes` is True.\n\n        We take into account the `internal` attribute we don't want an internal event (which isn't distributed to output modules)\n        to inadvertently suppress a non-internal event.\n        \"\"\"\n        return hash((event, self.name, event.internal, event.always_emit))\n\n    def get_per_host_hash(self, event):\n        \"\"\"\n        Computes a per-host hash value for a given event. This method may be optionally overridden in subclasses.\n\n        The function uses the event's `host` to create a string to be hashed.\n\n        Args:\n            event (Event): The event object containing host information.\n\n        Returns:\n            int: The hash value computed for the host.\n\n        Examples:\n            &gt;&gt;&gt; event = self.make_event(\"https://example.com:8443\")\n            &gt;&gt;&gt; self.get_per_host_hash(event)\n        \"\"\"\n        return hash(event.host)\n\n    def get_per_hostport_hash(self, event):\n        \"\"\"\n        Computes a per-host:port hash value for a given event. This method may be optionally overridden in subclasses.\n\n        The function uses the event's `host`, `port`, and `scheme` (for URLs) to create a string to be hashed.\n        The hash value is used for distinguishing events related to the same host.\n\n        Args:\n            event (Event): The event object containing host, port, or parsed URL information.\n\n        Returns:\n            int: The hash value computed for the host.\n\n        Examples:\n            &gt;&gt;&gt; event = self.make_event(\"https://example.com:8443\")\n            &gt;&gt;&gt; self.get_per_hostport_hash(event)\n        \"\"\"\n        parsed = getattr(event, \"parsed_url\", None)\n        if parsed is None:\n            to_hash = self.helpers.make_netloc(event.host, event.port)\n        else:\n            to_hash = f\"{parsed.scheme}://{parsed.netloc}/\"\n        return hash(to_hash)\n\n    def get_per_domain_hash(self, event):\n        \"\"\"\n        Computes a per-domain hash value for a given event. This method may be optionally overridden in subclasses.\n\n        Events with the same root domain will receive the same hash value.\n\n        Args:\n            event (Event): The event object containing host, port, or parsed URL information.\n\n        Returns:\n            int: The hash value computed for the domain.\n\n        Examples:\n            &gt;&gt;&gt; event = self.make_event(\"https://www.example.com:8443\")\n            &gt;&gt;&gt; self.get_per_domain_hash(event)\n        \"\"\"\n        _, domain = self.helpers.split_domain(event.host)\n        return hash(domain)\n\n    @property\n    def name(self):\n        return str(self._name)\n\n    @property\n    def helpers(self):\n        return self.scan.helpers\n\n    @property\n    def status(self):\n        \"\"\"\n        Provides the current status of the module as a dictionary.\n\n        The dictionary contains the following keys:\n            - 'events': A sub-dictionary with 'incoming' and 'outgoing' keys, representing the number of events in the respective queues.\n            - 'tasks': The current value of the task counter.\n            - 'errored': A boolean value indicating if the module is in an error state.\n            - 'running': A boolean value indicating if the module is currently processing data.\n\n        Returns:\n            dict: A dictionary containing the current status of the module.\n\n        Examples:\n            &gt;&gt;&gt; self.status\n            {'events': {'incoming': 5, 'outgoing': 2}, 'tasks': 3, 'errored': False, 'running': True}\n        \"\"\"\n        status = {\n            \"events\": {\"incoming\": self.num_incoming_events, \"outgoing\": self.outgoing_event_queue.qsize()},\n            \"tasks\": self._task_counter.value,\n            \"errored\": self.errored,\n        }\n        status[\"running\"] = self.running\n        return status\n\n    @property\n    def running(self):\n        \"\"\"Property indicating whether the module is currently processing data.\n\n        This property checks if the task counter (`self._task_counter.value`) is greater than zero,\n        indicating that there are ongoing tasks in the module.\n\n        Returns:\n            bool: True if the module is currently processing data, False otherwise.\n        \"\"\"\n        return self._task_counter.value &gt; 0\n\n    @property\n    def finished(self):\n        \"\"\"Property indicating whether the module has finished processing.\n\n        This property checks three conditions to determine if the module is finished:\n        1. The module is not currently running (`self.running` is False).\n        2. The number of incoming events in the queue is zero or less (`self.num_incoming_events &lt;= 0`).\n        3. The number of outgoing events in the queue is zero or less (`self.outgoing_event_queue.qsize() &lt;= 0`).\n\n        Returns:\n            bool: True if the module has finished processing, False otherwise.\n        \"\"\"\n        return not self.running and self.num_incoming_events &lt;= 0 and self.outgoing_event_queue.qsize() &lt;= 0\n\n    async def run_process(self, *args, **kwargs):\n        kwargs[\"_proc_tracker\"] = self._proc_tracker\n        return await self.helpers.run(*args, **kwargs)\n\n    async def run_process_live(self, *args, **kwargs):\n        kwargs[\"_proc_tracker\"] = self._proc_tracker\n        async for line in self.helpers.run_live(*args, **kwargs):\n            yield line\n\n    def prepare_api_request(self, url, kwargs):\n        \"\"\"\n        Prepare an API request by adding the necessary authentication - header, bearer token, etc.\n        \"\"\"\n        if self.api_key:\n            url = url.format(api_key=self.api_key)\n            if not \"headers\" in kwargs:\n                kwargs[\"headers\"] = {}\n            kwargs[\"headers\"][\"Authorization\"] = f\"Bearer {self.api_key}\"\n        return url, kwargs\n\n    async def api_request(self, *args, **kwargs):\n        \"\"\"\n        Makes an HTTP request while automatically:\n            - avoiding rate limits (sleep/retry)\n            - cycling API keys\n            - cancelling after too many failed attempts\n        \"\"\"\n        url = args[0] if args else kwargs.pop(\"url\", \"\")\n\n        # loop until we have a successful request\n        for _ in range(self.api_retries):\n            if not \"headers\" in kwargs:\n                kwargs[\"headers\"] = {}\n            new_url, kwargs = self.prepare_api_request(url, kwargs)\n            kwargs[\"url\"] = new_url\n\n            r = await self.helpers.request(**kwargs)\n            success = False if r is None else r.is_success\n\n            if success:\n                self._api_request_failures = 0\n            else:\n                status_code = getattr(r, \"status_code\", 0)\n                response_text = getattr(r, \"text\", \"\")\n                self.trace(f\"API response to {url} failed with status code {status_code}: {response_text}\")\n                self._api_request_failures += 1\n                if self._api_request_failures &gt;= self.api_failure_abort_threshold:\n                    self.set_error_state(\n                        f\"Setting error state due to {self._api_request_failures:,} failed HTTP requests\"\n                    )\n                else:\n                    # sleep for a bit if we're being rate limited\n                    if status_code == 429:\n                        self.verbose(\n                            f\"Sleeping for {self._429_sleep_interval:,} seconds due to rate limit (HTTP status: 429)\"\n                        )\n                        await asyncio.sleep(self._429_sleep_interval)\n                    elif self._api_keys:\n                        # if request failed, cycle API keys and try again\n                        self.cycle_api_key()\n                    continue\n            break\n\n        return r\n\n    async def api_page_iter(self, url, page_size=100, json=True, next_key=None, **requests_kwargs):\n        \"\"\"\n        An asynchronous generator function for iterating through paginated API data.\n\n        This function continuously makes requests to a specified API URL, incrementing the page number\n        or applying a custom pagination function, and yields the received data one page at a time.\n        It is well-suited for APIs that provide paginated results.\n\n        Args:\n            url (str): The initial API URL. Can contain placeholders for 'page', 'page_size', and 'offset'.\n            page_size (int, optional): The number of items per page. Defaults to 100.\n            json (bool, optional): If True, attempts to deserialize the response content to a JSON object. Defaults to True.\n            next_key (callable, optional): A function that takes the last page's data and returns the URL for the next page. Defaults to None.\n            **requests_kwargs: Arbitrary keyword arguments that will be forwarded to the HTTP request function.\n\n        Yields:\n            dict or httpx.Response: If 'json' is True, yields a dictionary containing the parsed JSON data. Otherwise, yields the raw HTTP response.\n\n        Note:\n            The loop will continue indefinitely unless manually stopped. Make sure to break out of the loop once the last page has been received.\n\n        Examples:\n            &gt;&gt;&gt; agen = api_page_iter('https://api.example.com/data?page={page}&amp;page_size={page_size}')\n            &gt;&gt;&gt; try:\n            &gt;&gt;&gt;     async for page in agen:\n            &gt;&gt;&gt;         subdomains = page[\"subdomains\"]\n            &gt;&gt;&gt;         self.hugesuccess(subdomains)\n            &gt;&gt;&gt;         if not subdomains:\n            &gt;&gt;&gt;             break\n            &gt;&gt;&gt; finally:\n            &gt;&gt;&gt;     agen.aclose()\n        \"\"\"\n        page = 1\n        offset = 0\n        result = None\n        while 1:\n            if result and callable(next_key):\n                try:\n                    new_url = next_key(result)\n                except Exception as e:\n                    self.debug(f\"Failed to extract next page of results from {url}: {e}\")\n                    self.debug(traceback.format_exc())\n            else:\n                new_url = self.helpers.safe_format(url, page=page, page_size=page_size, offset=offset)\n            result = await self.api_request(new_url, **requests_kwargs)\n            if result is None:\n                self.verbose(f\"api_page_iter() got no response for {url}\")\n                break\n            try:\n                if json:\n                    result = result.json()\n                yield result\n            except Exception:\n                self.warning(f'Error in api_page_iter() for url: \"{new_url}\"')\n                self.trace(traceback.format_exc())\n                break\n            finally:\n                offset += page_size\n                page += 1\n\n    @property\n    def preset(self):\n        return self.scan.preset\n\n    @property\n    def config(self):\n        \"\"\"Property that provides easy access to the module's configuration in the scan's config.\n\n        This property serves as a shortcut to retrieve the module-specific configuration from\n        `self.scan.config`. If no configuration is found for this module, an empty dictionary is returned.\n\n        Returns:\n            dict: The configuration dictionary specific to this module.\n        \"\"\"\n        config = self.scan.config.get(\"modules\", {}).get(self.name, {})\n        if config is None:\n            config = {}\n        return config\n\n    @property\n    def incoming_event_queue(self):\n        if self._incoming_event_queue is None:\n            if self._shuffle_incoming_queue:\n                self._incoming_event_queue = ShuffleQueue()\n            else:\n                self._incoming_event_queue = asyncio.Queue()\n        return self._incoming_event_queue\n\n    @property\n    def outgoing_event_queue(self):\n        if self._outgoing_event_queue is None:\n            self._outgoing_event_queue = ShuffleQueue(self._qsize)\n        return self._outgoing_event_queue\n\n    @property\n    def priority(self):\n        \"\"\"\n        Gets the priority level of the module as an integer.\n\n        The priority level is constrained to be between 1 and 5, inclusive.\n        A lower value indicates a higher priority.\n\n        Returns:\n            int: The priority level of the module, constrained between 1 and 5.\n\n        Examples:\n            &gt;&gt;&gt; self.priority\n            3\n        \"\"\"\n        return int(max(1, min(5, self._priority)))\n\n    @property\n    def auth_required(self):\n        return self.meta.get(\"auth_required\", False)\n\n    @property\n    def http_timeout(self):\n        \"\"\"\n        Convenience shortcut to `http_timeout` in the config\n        \"\"\"\n        return self.scan.web_config.get(\"http_timeout\", 10)\n\n    @property\n    def log(self):\n        if getattr(self, \"_log\", None) is None:\n            self._log = logging.getLogger(f\"bbot.modules.{self.name}\")\n        return self._log\n\n    @property\n    def memory_usage(self):\n        \"\"\"Property that calculates the current memory usage of the module in bytes.\n\n        This property uses the `get_size` function to estimate the memory consumption\n        of the module object. The depth of the object graph traversal is limited to 3 levels\n        to avoid performance issues. Commonly shared objects like `self.scan`, `self.helpers`,\n        are excluded from the calculation to prevent double-counting.\n\n        Returns:\n            int: The estimated memory usage of the module in bytes.\n        \"\"\"\n        seen = {self.scan, self.helpers, self.log}  # noqa\n        return get_size(self, max_depth=3, seen=seen)\n\n    def __str__(self):\n        return self.name\n\n    def log_table(self, *args, **kwargs):\n        \"\"\"Logs a table to the console and optionally writes it to a file.\n\n        This function generates a table using `self.helpers.make_table`, then logs each line\n        of the table as an info-level log. If a table_name is provided, it also writes the table to a file.\n\n        Args:\n            *args: Variable length argument list to be passed to `self.helpers.make_table`.\n            **kwargs: Arbitrary keyword arguments. If 'table_name' is specified, the table will be written to a file.\n\n        Returns:\n            str: The generated table as a string.\n\n        Examples:\n            &gt;&gt;&gt; self.log_table(['Header1', 'Header2'], [['row1col1', 'row1col2'], ['row2col1', 'row2col2']], table_name=\"my_table\")\n        \"\"\"\n        table_name = kwargs.pop(\"table_name\", None)\n        max_log_entries = kwargs.pop(\"max_log_entries\", None)\n        table = self.helpers.make_table(*args, **kwargs)\n        lines_logged = 0\n        for line in table.splitlines():\n            if max_log_entries is not None and lines_logged &gt; max_log_entries:\n                break\n            self.info(line)\n            lines_logged += 1\n        if table_name is not None:\n            date = self.helpers.make_date()\n            filename = self.scan.home / f\"{self.helpers.tagify(table_name)}-table-{date}.txt\"\n            with open(filename, \"w\") as f:\n                f.write(table)\n            self.verbose(f\"Wrote {table_name} to {filename}\")\n        return table\n\n    def _is_graph_important(self, event):\n        return self.preserve_graph and getattr(event, \"_graph_important\", False) and not getattr(event, \"_omit\", False)\n\n    @property\n    def preserve_graph(self):\n        preserve_graph = self.config.get(\"preserve_graph\", None)\n        if preserve_graph is None:\n            preserve_graph = self._preserve_graph\n        return preserve_graph\n\n    def debug(self, *args, trace=False, **kwargs):\n        \"\"\"Logs debug messages and optionally the stack trace of the most recent exception.\n\n        Args:\n            *args: Variable-length argument list to pass to the logger.\n            trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to False.\n            **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n        Examples:\n            &gt;&gt;&gt; self.debug(\"This is a debug message\")\n            &gt;&gt;&gt; self.debug(\"This is a debug message with a trace\", trace=True)\n        \"\"\"\n        self.log.debug(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def verbose(self, *args, trace=False, **kwargs):\n        \"\"\"Logs messages and optionally the stack trace of the most recent exception.\n\n        Args:\n            *args: Variable-length argument list to pass to the logger.\n            trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to False.\n            **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n        Examples:\n            &gt;&gt;&gt; self.verbose(\"This is a verbose message\")\n            &gt;&gt;&gt; self.verbose(\"This is a verbose message with a trace\", trace=True)\n        \"\"\"\n        self.log.verbose(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def hugeverbose(self, *args, trace=False, **kwargs):\n        \"\"\"Logs a whole message in emboldened white text, and optionally the stack trace of the most recent exception.\n\n        Args:\n            *args: Variable-length argument list to pass to the logger.\n            trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to False.\n            **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n        Examples:\n            &gt;&gt;&gt; self.hugeverbose(\"This is a huge verbose message\")\n            &gt;&gt;&gt; self.hugeverbose(\"This is a huge verbose message with a trace\", trace=True)\n        \"\"\"\n        self.log.hugeverbose(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def info(self, *args, trace=False, **kwargs):\n        \"\"\"Logs informational messages and optionally the stack trace of the most recent exception.\n\n        Args:\n            *args: Variable-length argument list to pass to the logger.\n            trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to False.\n            **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n        Examples:\n            &gt;&gt;&gt; self.info(\"This is an informational message\")\n            &gt;&gt;&gt; self.info(\"This is an informational message with a trace\", trace=True)\n        \"\"\"\n        self.log.info(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def hugeinfo(self, *args, trace=False, **kwargs):\n        \"\"\"Logs a whole message in emboldened blue text, and optionally the stack trace of the most recent exception.\n\n        Args:\n            *args: Variable-length argument list to pass to the logger.\n            trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to False.\n            **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n        Examples:\n            &gt;&gt;&gt; self.hugeinfo(\"This is a huge informational message\")\n            &gt;&gt;&gt; self.hugeinfo(\"This is a huge informational message with a trace\", trace=True)\n        \"\"\"\n        self.log.hugeinfo(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def success(self, *args, trace=False, **kwargs):\n        \"\"\"Logs a success message, and optionally the stack trace of the most recent exception.\n\n        Args:\n            *args: Variable-length argument list to pass to the logger.\n            trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to False.\n            **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n        Examples:\n            &gt;&gt;&gt; self.success(\"Operation completed successfully\")\n            &gt;&gt;&gt; self.success(\"Operation completed with a trace\", trace=True)\n        \"\"\"\n        self.log.success(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def hugesuccess(self, *args, trace=False, **kwargs):\n        \"\"\"Logs a whole message in emboldened green text, and optionally the stack trace of the most recent exception.\n\n        Args:\n            *args: Variable-length argument list to pass to the logger.\n            trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to False.\n            **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n        Examples:\n            &gt;&gt;&gt; self.hugesuccess(\"This is a huge success message\")\n            &gt;&gt;&gt; self.hugesuccess(\"This is a huge success message with a trace\", trace=True)\n        \"\"\"\n        self.log.hugesuccess(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def warning(self, *args, trace=True, **kwargs):\n        \"\"\"Logs a warning message, and optionally the stack trace of the most recent exception.\n\n        Args:\n            *args: Variable-length argument list to pass to the logger.\n            trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to True.\n            **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n        Examples:\n            &gt;&gt;&gt; self.warning(\"This is a warning message\")\n            &gt;&gt;&gt; self.warning(\"This is a warning message with a trace\", trace=False)\n        \"\"\"\n        self.log.warning(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def hugewarning(self, *args, trace=True, **kwargs):\n        \"\"\"Logs a whole message in emboldened orange text, and optionally the stack trace of the most recent exception.\n\n        Args:\n            *args: Variable-length argument list to pass to the logger.\n            trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to True.\n            **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n        Examples:\n            &gt;&gt;&gt; self.hugewarning(\"This is a huge warning message\")\n            &gt;&gt;&gt; self.hugewarning(\"This is a huge warning message with a trace\", trace=False)\n        \"\"\"\n        self.log.hugewarning(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def error(self, *args, trace=True, **kwargs):\n        \"\"\"Logs an error message, and optionally the stack trace of the most recent exception.\n\n        Args:\n            *args: Variable-length argument list to pass to the logger.\n            trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to True.\n            **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n        Examples:\n            &gt;&gt;&gt; self.error(\"This is an error message\")\n            &gt;&gt;&gt; self.error(\"This is an error message with a trace\", trace=False)\n        \"\"\"\n        self.log.error(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def trace(self, msg=None):\n        \"\"\"Logs the stack trace of the most recently caught exception.\n\n        This method captures the type, value, and traceback of the most recent exception and logs it using the trace level. It is typically used for debugging purposes.\n\n        Anything logged using this method will always be written to the scan's `debug.log`, even if debugging is not enabled.\n\n        Examples:\n            &gt;&gt;&gt; try:\n            &gt;&gt;&gt;     1 / 0\n            &gt;&gt;&gt; except ZeroDivisionError:\n            &gt;&gt;&gt;     self.trace()\n        \"\"\"\n        if msg is None:\n            e_type, e_val, e_traceback = exc_info()\n            if e_type is not None:\n                self.log.trace(traceback.format_exc())\n        else:\n            self.log.trace(msg)\n\n    def critical(self, *args, trace=True, **kwargs):\n        \"\"\"Logs a whole message in emboldened red text, and optionally the stack trace of the most recent exception.\n\n        Args:\n            *args: Variable-length argument list to pass to the logger.\n            trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to True.\n            **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n        Examples:\n            &gt;&gt;&gt; self.critical(\"This is a critical message\")\n            &gt;&gt;&gt; self.critical(\"This is a critical message with a trace\", trace=False)\n        \"\"\"\n        self.log.critical(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n        if trace:\n            self.trace()\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.auth_secret","title":"auth_secret  <code>property</code>","text":"<pre><code>auth_secret\n</code></pre> <p>Indicates if the module is properly configured for authentication.</p> <p>This read-only property should be used to check whether all necessary attributes (e.g., API keys, tokens, etc.) are configured to perform authenticated requests in the module. Commonly used in setup or initialization steps.</p> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the module is properly configured for authentication, otherwise False.</p> </li> </ul>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.config","title":"config  <code>property</code>","text":"<pre><code>config\n</code></pre> <p>Property that provides easy access to the module's configuration in the scan's config.</p> <p>This property serves as a shortcut to retrieve the module-specific configuration from <code>self.scan.config</code>. If no configuration is found for this module, an empty dictionary is returned.</p> <p>Returns:</p> <ul> <li> <code>dict</code>          \u2013            <p>The configuration dictionary specific to this module.</p> </li> </ul>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.finished","title":"finished  <code>property</code>","text":"<pre><code>finished\n</code></pre> <p>Property indicating whether the module has finished processing.</p> <p>This property checks three conditions to determine if the module is finished: 1. The module is not currently running (<code>self.running</code> is False). 2. The number of incoming events in the queue is zero or less (<code>self.num_incoming_events &lt;= 0</code>). 3. The number of outgoing events in the queue is zero or less (<code>self.outgoing_event_queue.qsize() &lt;= 0</code>).</p> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the module has finished processing, False otherwise.</p> </li> </ul>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.http_timeout","title":"http_timeout  <code>property</code>","text":"<pre><code>http_timeout\n</code></pre> <p>Convenience shortcut to <code>http_timeout</code> in the config</p>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.memory_usage","title":"memory_usage  <code>property</code>","text":"<pre><code>memory_usage\n</code></pre> <p>Property that calculates the current memory usage of the module in bytes.</p> <p>This property uses the <code>get_size</code> function to estimate the memory consumption of the module object. The depth of the object graph traversal is limited to 3 levels to avoid performance issues. Commonly shared objects like <code>self.scan</code>, <code>self.helpers</code>, are excluded from the calculation to prevent double-counting.</p> <p>Returns:</p> <ul> <li> <code>int</code>          \u2013            <p>The estimated memory usage of the module in bytes.</p> </li> </ul>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.priority","title":"priority  <code>property</code>","text":"<pre><code>priority\n</code></pre> <p>Gets the priority level of the module as an integer.</p> <p>The priority level is constrained to be between 1 and 5, inclusive. A lower value indicates a higher priority.</p> <p>Returns:</p> <ul> <li> <code>int</code>          \u2013            <p>The priority level of the module, constrained between 1 and 5.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.priority\n3\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.running","title":"running  <code>property</code>","text":"<pre><code>running\n</code></pre> <p>Property indicating whether the module is currently processing data.</p> <p>This property checks if the task counter (<code>self._task_counter.value</code>) is greater than zero, indicating that there are ongoing tasks in the module.</p> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the module is currently processing data, False otherwise.</p> </li> </ul>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.status","title":"status  <code>property</code>","text":"<pre><code>status\n</code></pre> <p>Provides the current status of the module as a dictionary.</p> The dictionary contains the following keys <ul> <li>'events': A sub-dictionary with 'incoming' and 'outgoing' keys, representing the number of events in the respective queues.</li> <li>'tasks': The current value of the task counter.</li> <li>'errored': A boolean value indicating if the module is in an error state.</li> <li>'running': A boolean value indicating if the module is currently processing data.</li> </ul> <p>Returns:</p> <ul> <li> <code>dict</code>          \u2013            <p>A dictionary containing the current status of the module.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.status\n{'events': {'incoming': 5, 'outgoing': 2}, 'tasks': 3, 'errored': False, 'running': True}\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.__init__","title":"__init__","text":"<pre><code>__init__(scan)\n</code></pre> <p>Initializes a module instance.</p> <p>Parameters:</p> <ul> <li> <code>scan</code>           \u2013            <p>The BBOT scan object associated with this module instance.</p> </li> </ul> <p>Attributes:</p> <ul> <li> <code>scan</code>           \u2013            <p>The scan object associated with this module.</p> </li> <li> <code>errored</code>               (<code>bool</code>)           \u2013            <p>Whether the module has errored out. Default is False.</p> </li> </ul> Source code in <code>bbot/modules/base.py</code> <pre><code>def __init__(self, scan):\n    \"\"\"Initializes a module instance.\n\n    Args:\n        scan: The BBOT scan object associated with this module instance.\n\n    Attributes:\n        scan: The scan object associated with this module.\n\n        errored (bool): Whether the module has errored out. Default is False.\n    \"\"\"\n    self.scan = scan\n    self.errored = False\n    self._log = None\n    self._incoming_event_queue = None\n    self._outgoing_event_queue = None\n    # track incoming events to prevent unwanted duplicates\n    self._incoming_dup_tracker = set()\n    # tracks which subprocesses are running under this module\n    self._proc_tracker = set()\n    # seconds since we've submitted a batch\n    self._last_submitted_batch = None\n    # additional callbacks to be executed alongside self.cleanup()\n    self.cleanup_callbacks = []\n    self._cleanedup = False\n    self._watched_events = None\n\n    self._task_counter = TaskCounter()\n\n    # string constant\n    self._custom_filter_criteria_msg = \"it did not meet custom filter criteria\"\n\n    self._api_keys = []\n\n    # track number of failures (for .api_request())\n    self._api_request_failures = 0\n\n    self._tasks = []\n    self._event_received = asyncio.Condition()\n    self._event_queued = asyncio.Condition()\n\n    # used for optional \"per host\" tracking\n    self._per_host_tracker = set()\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.api_page_iter","title":"api_page_iter  <code>async</code>","text":"<pre><code>api_page_iter(url, page_size=100, json=True, next_key=None, **requests_kwargs)\n</code></pre> <p>An asynchronous generator function for iterating through paginated API data.</p> <p>This function continuously makes requests to a specified API URL, incrementing the page number or applying a custom pagination function, and yields the received data one page at a time. It is well-suited for APIs that provide paginated results.</p> <p>Parameters:</p> <ul> <li> <code>url</code>               (<code>str</code>)           \u2013            <p>The initial API URL. Can contain placeholders for 'page', 'page_size', and 'offset'.</p> </li> <li> <code>page_size</code>               (<code>int</code>, default:                   <code>100</code> )           \u2013            <p>The number of items per page. Defaults to 100.</p> </li> <li> <code>json</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>If True, attempts to deserialize the response content to a JSON object. Defaults to True.</p> </li> <li> <code>next_key</code>               (<code>callable</code>, default:                   <code>None</code> )           \u2013            <p>A function that takes the last page's data and returns the URL for the next page. Defaults to None.</p> </li> <li> <code>**requests_kwargs</code>           \u2013            <p>Arbitrary keyword arguments that will be forwarded to the HTTP request function.</p> </li> </ul> <p>Yields:</p> <ul> <li>           \u2013            <p>dict or httpx.Response: If 'json' is True, yields a dictionary containing the parsed JSON data. Otherwise, yields the raw HTTP response.</p> </li> </ul> Note <p>The loop will continue indefinitely unless manually stopped. Make sure to break out of the loop once the last page has been received.</p> <p>Examples:</p> <pre><code>&gt;&gt;&gt; agen = api_page_iter('https://api.example.com/data?page={page}&amp;page_size={page_size}')\n&gt;&gt;&gt; try:\n&gt;&gt;&gt;     async for page in agen:\n&gt;&gt;&gt;         subdomains = page[\"subdomains\"]\n&gt;&gt;&gt;         self.hugesuccess(subdomains)\n&gt;&gt;&gt;         if not subdomains:\n&gt;&gt;&gt;             break\n&gt;&gt;&gt; finally:\n&gt;&gt;&gt;     agen.aclose()\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>async def api_page_iter(self, url, page_size=100, json=True, next_key=None, **requests_kwargs):\n    \"\"\"\n    An asynchronous generator function for iterating through paginated API data.\n\n    This function continuously makes requests to a specified API URL, incrementing the page number\n    or applying a custom pagination function, and yields the received data one page at a time.\n    It is well-suited for APIs that provide paginated results.\n\n    Args:\n        url (str): The initial API URL. Can contain placeholders for 'page', 'page_size', and 'offset'.\n        page_size (int, optional): The number of items per page. Defaults to 100.\n        json (bool, optional): If True, attempts to deserialize the response content to a JSON object. Defaults to True.\n        next_key (callable, optional): A function that takes the last page's data and returns the URL for the next page. Defaults to None.\n        **requests_kwargs: Arbitrary keyword arguments that will be forwarded to the HTTP request function.\n\n    Yields:\n        dict or httpx.Response: If 'json' is True, yields a dictionary containing the parsed JSON data. Otherwise, yields the raw HTTP response.\n\n    Note:\n        The loop will continue indefinitely unless manually stopped. Make sure to break out of the loop once the last page has been received.\n\n    Examples:\n        &gt;&gt;&gt; agen = api_page_iter('https://api.example.com/data?page={page}&amp;page_size={page_size}')\n        &gt;&gt;&gt; try:\n        &gt;&gt;&gt;     async for page in agen:\n        &gt;&gt;&gt;         subdomains = page[\"subdomains\"]\n        &gt;&gt;&gt;         self.hugesuccess(subdomains)\n        &gt;&gt;&gt;         if not subdomains:\n        &gt;&gt;&gt;             break\n        &gt;&gt;&gt; finally:\n        &gt;&gt;&gt;     agen.aclose()\n    \"\"\"\n    page = 1\n    offset = 0\n    result = None\n    while 1:\n        if result and callable(next_key):\n            try:\n                new_url = next_key(result)\n            except Exception as e:\n                self.debug(f\"Failed to extract next page of results from {url}: {e}\")\n                self.debug(traceback.format_exc())\n        else:\n            new_url = self.helpers.safe_format(url, page=page, page_size=page_size, offset=offset)\n        result = await self.api_request(new_url, **requests_kwargs)\n        if result is None:\n            self.verbose(f\"api_page_iter() got no response for {url}\")\n            break\n        try:\n            if json:\n                result = result.json()\n            yield result\n        except Exception:\n            self.warning(f'Error in api_page_iter() for url: \"{new_url}\"')\n            self.trace(traceback.format_exc())\n            break\n        finally:\n            offset += page_size\n            page += 1\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.api_request","title":"api_request  <code>async</code>","text":"<pre><code>api_request(*args, **kwargs)\n</code></pre> Makes an HTTP request while automatically <ul> <li>avoiding rate limits (sleep/retry)</li> <li>cycling API keys</li> <li>cancelling after too many failed attempts</li> </ul> Source code in <code>bbot/modules/base.py</code> <pre><code>async def api_request(self, *args, **kwargs):\n    \"\"\"\n    Makes an HTTP request while automatically:\n        - avoiding rate limits (sleep/retry)\n        - cycling API keys\n        - cancelling after too many failed attempts\n    \"\"\"\n    url = args[0] if args else kwargs.pop(\"url\", \"\")\n\n    # loop until we have a successful request\n    for _ in range(self.api_retries):\n        if not \"headers\" in kwargs:\n            kwargs[\"headers\"] = {}\n        new_url, kwargs = self.prepare_api_request(url, kwargs)\n        kwargs[\"url\"] = new_url\n\n        r = await self.helpers.request(**kwargs)\n        success = False if r is None else r.is_success\n\n        if success:\n            self._api_request_failures = 0\n        else:\n            status_code = getattr(r, \"status_code\", 0)\n            response_text = getattr(r, \"text\", \"\")\n            self.trace(f\"API response to {url} failed with status code {status_code}: {response_text}\")\n            self._api_request_failures += 1\n            if self._api_request_failures &gt;= self.api_failure_abort_threshold:\n                self.set_error_state(\n                    f\"Setting error state due to {self._api_request_failures:,} failed HTTP requests\"\n                )\n            else:\n                # sleep for a bit if we're being rate limited\n                if status_code == 429:\n                    self.verbose(\n                        f\"Sleeping for {self._429_sleep_interval:,} seconds due to rate limit (HTTP status: 429)\"\n                    )\n                    await asyncio.sleep(self._429_sleep_interval)\n                elif self._api_keys:\n                    # if request failed, cycle API keys and try again\n                    self.cycle_api_key()\n                continue\n        break\n\n    return r\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.cleanup","title":"cleanup  <code>async</code>","text":"<pre><code>cleanup()\n</code></pre> <p>Asynchronously performs final cleanup operations after the scan is complete.</p> <p>This method can be overridden to implement custom cleanup logic. It is called only once per scan and may not raise events.</p> <p>Returns:</p> <ul> <li>           \u2013            <p>None</p> </li> </ul> Note <p>This method is called only once per scan and may not raise events.</p> Source code in <code>bbot/modules/base.py</code> <pre><code>async def cleanup(self):\n    \"\"\"Asynchronously performs final cleanup operations after the scan is complete.\n\n    This method can be overridden to implement custom cleanup logic. It is called only once per scan and may not raise events.\n\n    Returns:\n        None\n\n    Note:\n        This method is called only once per scan and may not raise events.\n    \"\"\"\n    return\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.critical","title":"critical","text":"<pre><code>critical(*args, trace=True, **kwargs)\n</code></pre> <p>Logs a whole message in emboldened red text, and optionally the stack trace of the most recent exception.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Variable-length argument list to pass to the logger.</p> </li> <li> <code>trace</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to log the stack trace of the most recently caught exception. Defaults to True.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Arbitrary keyword arguments to pass to the logger.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.critical(\"This is a critical message\")\n&gt;&gt;&gt; self.critical(\"This is a critical message with a trace\", trace=False)\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def critical(self, *args, trace=True, **kwargs):\n    \"\"\"Logs a whole message in emboldened red text, and optionally the stack trace of the most recent exception.\n\n    Args:\n        *args: Variable-length argument list to pass to the logger.\n        trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to True.\n        **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n    Examples:\n        &gt;&gt;&gt; self.critical(\"This is a critical message\")\n        &gt;&gt;&gt; self.critical(\"This is a critical message with a trace\", trace=False)\n    \"\"\"\n    self.log.critical(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n    if trace:\n        self.trace()\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.debug","title":"debug","text":"<pre><code>debug(*args, trace=False, **kwargs)\n</code></pre> <p>Logs debug messages and optionally the stack trace of the most recent exception.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Variable-length argument list to pass to the logger.</p> </li> <li> <code>trace</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to log the stack trace of the most recently caught exception. Defaults to False.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Arbitrary keyword arguments to pass to the logger.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.debug(\"This is a debug message\")\n&gt;&gt;&gt; self.debug(\"This is a debug message with a trace\", trace=True)\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def debug(self, *args, trace=False, **kwargs):\n    \"\"\"Logs debug messages and optionally the stack trace of the most recent exception.\n\n    Args:\n        *args: Variable-length argument list to pass to the logger.\n        trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to False.\n        **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n    Examples:\n        &gt;&gt;&gt; self.debug(\"This is a debug message\")\n        &gt;&gt;&gt; self.debug(\"This is a debug message with a trace\", trace=True)\n    \"\"\"\n    self.log.debug(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n    if trace:\n        self.trace()\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.emit_event","title":"emit_event  <code>async</code>","text":"<pre><code>emit_event(*args, **kwargs)\n</code></pre> <p>Emit an event to the event queue and distribute it to interested modules.</p> <p>This is how modules \"return\" data.</p> <p>The method first creates an event object by calling <code>self.make_event()</code> with the provided arguments. Then, the event is queued for outgoing distribution using <code>self.queue_outgoing_event()</code>.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Positional arguments to be passed to <code>self.make_event()</code> for event creation.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Keyword arguments to be passed for event creation or configuration of the emit action. <pre><code>- on_success_callback: Optional callback function to execute upon successful event emission.\n- abort_if: Optional condition under which the event emission should be aborted.\n- quick: Optional flag to indicate whether the event should be processed quickly.\n</code></pre></p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; await self.emit_event(\"www.evilcorp.com\", parent=event, tags=[\"affiliate\"])\n</code></pre> <pre><code>&gt;&gt;&gt; new_event = self.make_event(\"1.2.3.4\", parent=event)\n&gt;&gt;&gt; await self.emit_event(new_event)\n</code></pre> <p>Returns:</p> <ul> <li>           \u2013            <p>None</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>ValidationError</code>             \u2013            <p>If the event cannot be validated (handled in <code>self.make_event()</code>).</p> </li> </ul> Source code in <code>bbot/modules/base.py</code> <pre><code>async def emit_event(self, *args, **kwargs):\n    \"\"\"Emit an event to the event queue and distribute it to interested modules.\n\n    This is how modules \"return\" data.\n\n    The method first creates an event object by calling `self.make_event()` with the provided arguments.\n    Then, the event is queued for outgoing distribution using `self.queue_outgoing_event()`.\n\n    Args:\n        *args: Positional arguments to be passed to `self.make_event()` for event creation.\n        **kwargs: Keyword arguments to be passed for event creation or configuration of the emit action.\n            ```markdown\n            - on_success_callback: Optional callback function to execute upon successful event emission.\n            - abort_if: Optional condition under which the event emission should be aborted.\n            - quick: Optional flag to indicate whether the event should be processed quickly.\n            ```\n\n    Examples:\n        &gt;&gt;&gt; await self.emit_event(\"www.evilcorp.com\", parent=event, tags=[\"affiliate\"])\n\n        &gt;&gt;&gt; new_event = self.make_event(\"1.2.3.4\", parent=event)\n        &gt;&gt;&gt; await self.emit_event(new_event)\n\n    Returns:\n        None\n\n    Raises:\n        ValidationError: If the event cannot be validated (handled in `self.make_event()`).\n    \"\"\"\n    event_kwargs = dict(kwargs)\n    emit_kwargs = {}\n    for o in (\"on_success_callback\", \"abort_if\", \"quick\"):\n        v = event_kwargs.pop(o, None)\n        if v is not None:\n            emit_kwargs[o] = v\n    event = self.make_event(*args, **event_kwargs)\n    if event:\n        await self.queue_outgoing_event(event, **emit_kwargs)\n    return event\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.error","title":"error","text":"<pre><code>error(*args, trace=True, **kwargs)\n</code></pre> <p>Logs an error message, and optionally the stack trace of the most recent exception.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Variable-length argument list to pass to the logger.</p> </li> <li> <code>trace</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to log the stack trace of the most recently caught exception. Defaults to True.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Arbitrary keyword arguments to pass to the logger.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.error(\"This is an error message\")\n&gt;&gt;&gt; self.error(\"This is an error message with a trace\", trace=False)\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def error(self, *args, trace=True, **kwargs):\n    \"\"\"Logs an error message, and optionally the stack trace of the most recent exception.\n\n    Args:\n        *args: Variable-length argument list to pass to the logger.\n        trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to True.\n        **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n    Examples:\n        &gt;&gt;&gt; self.error(\"This is an error message\")\n        &gt;&gt;&gt; self.error(\"This is an error message with a trace\", trace=False)\n    \"\"\"\n    self.log.error(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n    if trace:\n        self.trace()\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.filter_event","title":"filter_event  <code>async</code>","text":"<pre><code>filter_event(event)\n</code></pre> <p>Asynchronously filters incoming events based on custom criteria.</p> <p>Override this method for more granular control over which events are accepted by your module. This method is called automatically before <code>handle_event()</code> for each incoming event that matches any in <code>watched_events</code>.</p> <p>Parameters:</p> <ul> <li> <code>event</code>               (<code>Event</code>)           \u2013            <p>The incoming Event object to be filtered.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>tuple</code>          \u2013            <p>A 2-tuple where the first value is a bool indicating whether the event should be accepted, and the second value is a string explaining the reason for its acceptance or rejection. By default, returns <code>(True, None)</code> to indicate acceptance without reason.</p> </li> </ul> Note <p>This method should be overridden if the module requires custom logic for event filtering.</p> Source code in <code>bbot/modules/base.py</code> <pre><code>async def filter_event(self, event):\n    \"\"\"Asynchronously filters incoming events based on custom criteria.\n\n    Override this method for more granular control over which events are accepted by your module. This method is called automatically before `handle_event()` for each incoming event that matches any in `watched_events`.\n\n    Args:\n        event (Event): The incoming Event object to be filtered.\n\n    Returns:\n        tuple: A 2-tuple where the first value is a bool indicating whether the event should be accepted, and the second value is a string explaining the reason for its acceptance or rejection. By default, returns `(True, None)` to indicate acceptance without reason.\n\n    Note:\n        This method should be overridden if the module requires custom logic for event filtering.\n    \"\"\"\n    return True\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.finish","title":"finish  <code>async</code>","text":"<pre><code>finish()\n</code></pre> <p>Asynchronously performs final tasks as the scan nears completion.</p> <p>This method can be overridden to execute any necessary finalization logic. For example, if the module relies on a word cloud, you might wait for the scan to finish to ensure the word cloud is most complete before running an operation.</p> <p>Returns:</p> <ul> <li>           \u2013            <p>None</p> </li> </ul> Source code in <code>bbot/modules/base.py</code> <pre><code>async def finish(self):\n    \"\"\"Asynchronously performs final tasks as the scan nears completion.\n\n    This method can be overridden to execute any necessary finalization logic. For example, if the module relies on a word cloud, you might wait for the scan to finish to ensure the word cloud is most complete before running an operation.\n\n    Returns:\n        None\n\n    Warnings:\n        This method may be called multiple times since it can raise events, which may re-trigger the \"finish\" phase of the scan. Optional to override.\n    \"\"\"\n    return\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.get_per_domain_hash","title":"get_per_domain_hash","text":"<pre><code>get_per_domain_hash(event)\n</code></pre> <p>Computes a per-domain hash value for a given event. This method may be optionally overridden in subclasses.</p> <p>Events with the same root domain will receive the same hash value.</p> <p>Parameters:</p> <ul> <li> <code>event</code>               (<code>Event</code>)           \u2013            <p>The event object containing host, port, or parsed URL information.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>int</code>          \u2013            <p>The hash value computed for the domain.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; event = self.make_event(\"https://www.example.com:8443\")\n&gt;&gt;&gt; self.get_per_domain_hash(event)\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def get_per_domain_hash(self, event):\n    \"\"\"\n    Computes a per-domain hash value for a given event. This method may be optionally overridden in subclasses.\n\n    Events with the same root domain will receive the same hash value.\n\n    Args:\n        event (Event): The event object containing host, port, or parsed URL information.\n\n    Returns:\n        int: The hash value computed for the domain.\n\n    Examples:\n        &gt;&gt;&gt; event = self.make_event(\"https://www.example.com:8443\")\n        &gt;&gt;&gt; self.get_per_domain_hash(event)\n    \"\"\"\n    _, domain = self.helpers.split_domain(event.host)\n    return hash(domain)\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.get_per_host_hash","title":"get_per_host_hash","text":"<pre><code>get_per_host_hash(event)\n</code></pre> <p>Computes a per-host hash value for a given event. This method may be optionally overridden in subclasses.</p> <p>The function uses the event's <code>host</code> to create a string to be hashed.</p> <p>Parameters:</p> <ul> <li> <code>event</code>               (<code>Event</code>)           \u2013            <p>The event object containing host information.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>int</code>          \u2013            <p>The hash value computed for the host.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; event = self.make_event(\"https://example.com:8443\")\n&gt;&gt;&gt; self.get_per_host_hash(event)\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def get_per_host_hash(self, event):\n    \"\"\"\n    Computes a per-host hash value for a given event. This method may be optionally overridden in subclasses.\n\n    The function uses the event's `host` to create a string to be hashed.\n\n    Args:\n        event (Event): The event object containing host information.\n\n    Returns:\n        int: The hash value computed for the host.\n\n    Examples:\n        &gt;&gt;&gt; event = self.make_event(\"https://example.com:8443\")\n        &gt;&gt;&gt; self.get_per_host_hash(event)\n    \"\"\"\n    return hash(event.host)\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.get_per_hostport_hash","title":"get_per_hostport_hash","text":"<pre><code>get_per_hostport_hash(event)\n</code></pre> <p>Computes a per-host:port hash value for a given event. This method may be optionally overridden in subclasses.</p> <p>The function uses the event's <code>host</code>, <code>port</code>, and <code>scheme</code> (for URLs) to create a string to be hashed. The hash value is used for distinguishing events related to the same host.</p> <p>Parameters:</p> <ul> <li> <code>event</code>               (<code>Event</code>)           \u2013            <p>The event object containing host, port, or parsed URL information.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>int</code>          \u2013            <p>The hash value computed for the host.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; event = self.make_event(\"https://example.com:8443\")\n&gt;&gt;&gt; self.get_per_hostport_hash(event)\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def get_per_hostport_hash(self, event):\n    \"\"\"\n    Computes a per-host:port hash value for a given event. This method may be optionally overridden in subclasses.\n\n    The function uses the event's `host`, `port`, and `scheme` (for URLs) to create a string to be hashed.\n    The hash value is used for distinguishing events related to the same host.\n\n    Args:\n        event (Event): The event object containing host, port, or parsed URL information.\n\n    Returns:\n        int: The hash value computed for the host.\n\n    Examples:\n        &gt;&gt;&gt; event = self.make_event(\"https://example.com:8443\")\n        &gt;&gt;&gt; self.get_per_hostport_hash(event)\n    \"\"\"\n    parsed = getattr(event, \"parsed_url\", None)\n    if parsed is None:\n        to_hash = self.helpers.make_netloc(event.host, event.port)\n    else:\n        to_hash = f\"{parsed.scheme}://{parsed.netloc}/\"\n    return hash(to_hash)\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.get_watched_events","title":"get_watched_events","text":"<pre><code>get_watched_events()\n</code></pre> <p>Retrieve the set of events that the module is interested in observing.</p> <p>Override this method if the set of events the module should watch needs to be determined dynamically, e.g., based on configuration options or other runtime conditions.</p> <p>Returns:</p> <ul> <li> <code>set</code>          \u2013            <p>The set of event types that this module will handle.</p> </li> </ul> Source code in <code>bbot/modules/base.py</code> <pre><code>def get_watched_events(self):\n    \"\"\"Retrieve the set of events that the module is interested in observing.\n\n    Override this method if the set of events the module should watch needs to be determined dynamically, e.g., based on configuration options or other runtime conditions.\n\n    Returns:\n        set: The set of event types that this module will handle.\n    \"\"\"\n    if self._watched_events is None:\n        self._watched_events = set(self.watched_events)\n    return self._watched_events\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.handle_batch","title":"handle_batch  <code>async</code>","text":"<pre><code>handle_batch(*events)\n</code></pre> <p>Handles incoming events in batches for optimized processing.</p> <p>This method is automatically called when multiple events that match any in <code>watched_events</code> are encountered and the <code>batch_size</code> attribute is set to a value greater than 1. Override this method to implement custom batch event-handling logic for your module.</p> <p>Parameters:</p> <ul> <li> <code>*events</code>               (<code>Event</code>, default:                   <code>()</code> )           \u2013            <p>A variable number of Event objects to be processed in a batch.</p> </li> </ul> Note <p>This method should be overridden if the <code>batch_size</code> attribute of the module is set to a value greater than 1.</p> <p>Returns:</p> <ul> <li>           \u2013            <p>None</p> </li> </ul> Source code in <code>bbot/modules/base.py</code> <pre><code>async def handle_batch(self, *events):\n    \"\"\"Handles incoming events in batches for optimized processing.\n\n    This method is automatically called when multiple events that match any in `watched_events` are encountered and the `batch_size` attribute is set to a value greater than 1. Override this method to implement custom batch event-handling logic for your module.\n\n    Args:\n        *events (Event): A variable number of Event objects to be processed in a batch.\n\n    Note:\n        This method should be overridden if the `batch_size` attribute of the module is set to a value greater than 1.\n\n    Returns:\n        None\n    \"\"\"\n    pass\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.handle_event","title":"handle_event  <code>async</code>","text":"<pre><code>handle_event(event)\n</code></pre> <p>Asynchronously handles incoming events that the module is configured to watch.</p> <p>This method is automatically invoked when an event that matches any in <code>watched_events</code> is encountered during a scan. Override this method to implement custom event-handling logic for your module.</p> <p>Parameters:</p> <ul> <li> <code>event</code>               (<code>Event</code>)           \u2013            <p>The event object containing details about the incoming event.</p> </li> </ul> Note <p>This method should be overridden if the <code>batch_size</code> attribute of the module is set to 1.</p> <p>Returns:</p> <ul> <li>           \u2013            <p>None</p> </li> </ul> Source code in <code>bbot/modules/base.py</code> <pre><code>async def handle_event(self, event):\n    \"\"\"Asynchronously handles incoming events that the module is configured to watch.\n\n    This method is automatically invoked when an event that matches any in `watched_events` is encountered during a scan. Override this method to implement custom event-handling logic for your module.\n\n    Args:\n        event (Event): The event object containing details about the incoming event.\n\n    Note:\n        This method should be overridden if the `batch_size` attribute of the module is set to 1.\n\n    Returns:\n        None\n    \"\"\"\n    pass\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.hugeinfo","title":"hugeinfo","text":"<pre><code>hugeinfo(*args, trace=False, **kwargs)\n</code></pre> <p>Logs a whole message in emboldened blue text, and optionally the stack trace of the most recent exception.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Variable-length argument list to pass to the logger.</p> </li> <li> <code>trace</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to log the stack trace of the most recently caught exception. Defaults to False.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Arbitrary keyword arguments to pass to the logger.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.hugeinfo(\"This is a huge informational message\")\n&gt;&gt;&gt; self.hugeinfo(\"This is a huge informational message with a trace\", trace=True)\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def hugeinfo(self, *args, trace=False, **kwargs):\n    \"\"\"Logs a whole message in emboldened blue text, and optionally the stack trace of the most recent exception.\n\n    Args:\n        *args: Variable-length argument list to pass to the logger.\n        trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to False.\n        **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n    Examples:\n        &gt;&gt;&gt; self.hugeinfo(\"This is a huge informational message\")\n        &gt;&gt;&gt; self.hugeinfo(\"This is a huge informational message with a trace\", trace=True)\n    \"\"\"\n    self.log.hugeinfo(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n    if trace:\n        self.trace()\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.hugesuccess","title":"hugesuccess","text":"<pre><code>hugesuccess(*args, trace=False, **kwargs)\n</code></pre> <p>Logs a whole message in emboldened green text, and optionally the stack trace of the most recent exception.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Variable-length argument list to pass to the logger.</p> </li> <li> <code>trace</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to log the stack trace of the most recently caught exception. Defaults to False.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Arbitrary keyword arguments to pass to the logger.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.hugesuccess(\"This is a huge success message\")\n&gt;&gt;&gt; self.hugesuccess(\"This is a huge success message with a trace\", trace=True)\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def hugesuccess(self, *args, trace=False, **kwargs):\n    \"\"\"Logs a whole message in emboldened green text, and optionally the stack trace of the most recent exception.\n\n    Args:\n        *args: Variable-length argument list to pass to the logger.\n        trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to False.\n        **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n    Examples:\n        &gt;&gt;&gt; self.hugesuccess(\"This is a huge success message\")\n        &gt;&gt;&gt; self.hugesuccess(\"This is a huge success message with a trace\", trace=True)\n    \"\"\"\n    self.log.hugesuccess(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n    if trace:\n        self.trace()\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.hugeverbose","title":"hugeverbose","text":"<pre><code>hugeverbose(*args, trace=False, **kwargs)\n</code></pre> <p>Logs a whole message in emboldened white text, and optionally the stack trace of the most recent exception.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Variable-length argument list to pass to the logger.</p> </li> <li> <code>trace</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to log the stack trace of the most recently caught exception. Defaults to False.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Arbitrary keyword arguments to pass to the logger.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.hugeverbose(\"This is a huge verbose message\")\n&gt;&gt;&gt; self.hugeverbose(\"This is a huge verbose message with a trace\", trace=True)\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def hugeverbose(self, *args, trace=False, **kwargs):\n    \"\"\"Logs a whole message in emboldened white text, and optionally the stack trace of the most recent exception.\n\n    Args:\n        *args: Variable-length argument list to pass to the logger.\n        trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to False.\n        **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n    Examples:\n        &gt;&gt;&gt; self.hugeverbose(\"This is a huge verbose message\")\n        &gt;&gt;&gt; self.hugeverbose(\"This is a huge verbose message with a trace\", trace=True)\n    \"\"\"\n    self.log.hugeverbose(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n    if trace:\n        self.trace()\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.hugewarning","title":"hugewarning","text":"<pre><code>hugewarning(*args, trace=True, **kwargs)\n</code></pre> <p>Logs a whole message in emboldened orange text, and optionally the stack trace of the most recent exception.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Variable-length argument list to pass to the logger.</p> </li> <li> <code>trace</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to log the stack trace of the most recently caught exception. Defaults to True.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Arbitrary keyword arguments to pass to the logger.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.hugewarning(\"This is a huge warning message\")\n&gt;&gt;&gt; self.hugewarning(\"This is a huge warning message with a trace\", trace=False)\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def hugewarning(self, *args, trace=True, **kwargs):\n    \"\"\"Logs a whole message in emboldened orange text, and optionally the stack trace of the most recent exception.\n\n    Args:\n        *args: Variable-length argument list to pass to the logger.\n        trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to True.\n        **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n    Examples:\n        &gt;&gt;&gt; self.hugewarning(\"This is a huge warning message\")\n        &gt;&gt;&gt; self.hugewarning(\"This is a huge warning message with a trace\", trace=False)\n    \"\"\"\n    self.log.hugewarning(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n    if trace:\n        self.trace()\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.info","title":"info","text":"<pre><code>info(*args, trace=False, **kwargs)\n</code></pre> <p>Logs informational messages and optionally the stack trace of the most recent exception.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Variable-length argument list to pass to the logger.</p> </li> <li> <code>trace</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to log the stack trace of the most recently caught exception. Defaults to False.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Arbitrary keyword arguments to pass to the logger.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.info(\"This is an informational message\")\n&gt;&gt;&gt; self.info(\"This is an informational message with a trace\", trace=True)\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def info(self, *args, trace=False, **kwargs):\n    \"\"\"Logs informational messages and optionally the stack trace of the most recent exception.\n\n    Args:\n        *args: Variable-length argument list to pass to the logger.\n        trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to False.\n        **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n    Examples:\n        &gt;&gt;&gt; self.info(\"This is an informational message\")\n        &gt;&gt;&gt; self.info(\"This is an informational message with a trace\", trace=True)\n    \"\"\"\n    self.log.info(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n    if trace:\n        self.trace()\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.log_table","title":"log_table","text":"<pre><code>log_table(*args, **kwargs)\n</code></pre> <p>Logs a table to the console and optionally writes it to a file.</p> <p>This function generates a table using <code>self.helpers.make_table</code>, then logs each line of the table as an info-level log. If a table_name is provided, it also writes the table to a file.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Variable length argument list to be passed to <code>self.helpers.make_table</code>.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Arbitrary keyword arguments. If 'table_name' is specified, the table will be written to a file.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>The generated table as a string.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.log_table(['Header1', 'Header2'], [['row1col1', 'row1col2'], ['row2col1', 'row2col2']], table_name=\"my_table\")\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def log_table(self, *args, **kwargs):\n    \"\"\"Logs a table to the console and optionally writes it to a file.\n\n    This function generates a table using `self.helpers.make_table`, then logs each line\n    of the table as an info-level log. If a table_name is provided, it also writes the table to a file.\n\n    Args:\n        *args: Variable length argument list to be passed to `self.helpers.make_table`.\n        **kwargs: Arbitrary keyword arguments. If 'table_name' is specified, the table will be written to a file.\n\n    Returns:\n        str: The generated table as a string.\n\n    Examples:\n        &gt;&gt;&gt; self.log_table(['Header1', 'Header2'], [['row1col1', 'row1col2'], ['row2col1', 'row2col2']], table_name=\"my_table\")\n    \"\"\"\n    table_name = kwargs.pop(\"table_name\", None)\n    max_log_entries = kwargs.pop(\"max_log_entries\", None)\n    table = self.helpers.make_table(*args, **kwargs)\n    lines_logged = 0\n    for line in table.splitlines():\n        if max_log_entries is not None and lines_logged &gt; max_log_entries:\n            break\n        self.info(line)\n        lines_logged += 1\n    if table_name is not None:\n        date = self.helpers.make_date()\n        filename = self.scan.home / f\"{self.helpers.tagify(table_name)}-table-{date}.txt\"\n        with open(filename, \"w\") as f:\n            f.write(table)\n        self.verbose(f\"Wrote {table_name} to {filename}\")\n    return table\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.make_event","title":"make_event","text":"<pre><code>make_event(*args, **kwargs)\n</code></pre> <p>Create an event for the scan.</p> <p>Raises a validation error if the event could not be created, unless raise_error is set to False.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Positional arguments to be passed to the scan's make_event method.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Keyword arguments to be passed to the scan's make_event method.</p> </li> <li> <code>raise_error</code>               (<code>bool</code>)           \u2013            <p>Whether to raise a validation error if the event could not be created. Defaults to False.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; new_event = self.make_event(\"1.2.3.4\", parent=event)\n&gt;&gt;&gt; await self.emit_event(new_event)\n</code></pre> <p>Returns:</p> <ul> <li>           \u2013            <p>Event or None: The created event, or None if a validation error occurred and raise_error was False.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>ValidationError</code>             \u2013            <p>If the event could not be validated and raise_error is True.</p> </li> </ul> Source code in <code>bbot/modules/base.py</code> <pre><code>def make_event(self, *args, **kwargs):\n    \"\"\"Create an event for the scan.\n\n    Raises a validation error if the event could not be created, unless raise_error is set to False.\n\n    Args:\n        *args: Positional arguments to be passed to the scan's make_event method.\n        **kwargs: Keyword arguments to be passed to the scan's make_event method.\n        raise_error (bool, optional): Whether to raise a validation error if the event could not be created. Defaults to False.\n\n    Examples:\n        &gt;&gt;&gt; new_event = self.make_event(\"1.2.3.4\", parent=event)\n        &gt;&gt;&gt; await self.emit_event(new_event)\n\n    Returns:\n        Event or None: The created event, or None if a validation error occurred and raise_error was False.\n\n    Raises:\n        ValidationError: If the event could not be validated and raise_error is True.\n    \"\"\"\n    raise_error = kwargs.pop(\"raise_error\", False)\n    module = kwargs.pop(\"module\", None)\n    if module is None:\n        if (not args) or getattr(args[0], \"module\", None) is None:\n            kwargs[\"module\"] = self\n    try:\n        event = self.scan.make_event(*args, **kwargs)\n    except ValidationError as e:\n        if raise_error:\n            raise\n        self.warning(f\"{e}\")\n        return\n    return event\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.ping","title":"ping  <code>async</code>","text":"<pre><code>ping(url=None)\n</code></pre> <p>Asynchronously checks the health of the configured API.</p> <p>This method is used in conjunction with require_api_key() to verify that the API is not just configured, but also responsive. It makes a test request to a known endpoint to validate the API's health.</p> <p>The method uses the <code>ping_url</code> attribute if defined, or falls back to a provided URL. If neither is available, no request is made.</p> <p>Parameters:</p> <ul> <li> <code>url</code>               (<code>str</code>, default:                   <code>None</code> )           \u2013            <p>A specific URL to use for the ping request. If not provided, the method will use the <code>ping_url</code> attribute.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>None</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>ValueError</code>             \u2013            <p>If the API response is not successful (status code != 200).</p> </li> </ul> Example Usage <p>To use this method, simply define the <code>ping_url</code> attribute in your module:</p> <p>class MyModule(BaseModule):     ping_url = \"https://api.example.com/ping\"</p> <p>Alternatively, you can override this method for more complex health checks:</p> <p>async def ping(self):     r = await self.api_request(f\"{self.base_url}/complex-health-check\")     if r.status_code != 200 or r.json().get('status') != 'healthy':         raise ValueError(f\"API unhealthy: {r.text}\")</p> Source code in <code>bbot/modules/base.py</code> <pre><code>async def ping(self, url=None):\n    \"\"\"Asynchronously checks the health of the configured API.\n\n    This method is used in conjunction with require_api_key() to verify that the API is not just configured, but also responsive. It makes a test request to a known endpoint to validate the API's health.\n\n    The method uses the `ping_url` attribute if defined, or falls back to a provided URL. If neither is available, no request is made.\n\n    Args:\n        url (str, optional): A specific URL to use for the ping request. If not provided, the method will use the `ping_url` attribute.\n\n    Returns:\n        None\n\n    Raises:\n        ValueError: If the API response is not successful (status code != 200).\n\n    Example Usage:\n        To use this method, simply define the `ping_url` attribute in your module:\n\n        class MyModule(BaseModule):\n            ping_url = \"https://api.example.com/ping\"\n\n        Alternatively, you can override this method for more complex health checks:\n\n        async def ping(self):\n            r = await self.api_request(f\"{self.base_url}/complex-health-check\")\n            if r.status_code != 200 or r.json().get('status') != 'healthy':\n                raise ValueError(f\"API unhealthy: {r.text}\")\n    \"\"\"\n    if url is None:\n        url = getattr(self, \"ping_url\", \"\")\n    if url:\n        r = await self.api_request(url)\n        if getattr(r, \"status_code\", 0) != 200:\n            response_text = getattr(r, \"text\", \"no response from server\")\n            raise ValueError(response_text)\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.prepare_api_request","title":"prepare_api_request","text":"<pre><code>prepare_api_request(url, kwargs)\n</code></pre> <p>Prepare an API request by adding the necessary authentication - header, bearer token, etc.</p> Source code in <code>bbot/modules/base.py</code> <pre><code>def prepare_api_request(self, url, kwargs):\n    \"\"\"\n    Prepare an API request by adding the necessary authentication - header, bearer token, etc.\n    \"\"\"\n    if self.api_key:\n        url = url.format(api_key=self.api_key)\n        if not \"headers\" in kwargs:\n            kwargs[\"headers\"] = {}\n        kwargs[\"headers\"][\"Authorization\"] = f\"Bearer {self.api_key}\"\n    return url, kwargs\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.queue_event","title":"queue_event  <code>async</code>","text":"<pre><code>queue_event(event)\n</code></pre> <p>Asynchronously queues an incoming event to the module's event queue for further processing.</p> <p>The function performs an initial check to see if the event is acceptable for queuing. If the event passes the check, it is put into the <code>incoming_event_queue</code>.</p> <p>Parameters:</p> <ul> <li> <code>event</code>           \u2013            <p>The event object to be queued.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>None</code>          \u2013            <p>The function doesn't return anything but modifies the state of the <code>incoming_event_queue</code>.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; await self.queue_event(some_event)\n</code></pre> <p>Raises:</p> <ul> <li> <code>AttributeError</code>             \u2013            <p>If the module is not in an acceptable state to queue incoming events.</p> </li> </ul> Source code in <code>bbot/modules/base.py</code> <pre><code>async def queue_event(self, event):\n    \"\"\"\n    Asynchronously queues an incoming event to the module's event queue for further processing.\n\n    The function performs an initial check to see if the event is acceptable for queuing.\n    If the event passes the check, it is put into the `incoming_event_queue`.\n\n    Args:\n        event: The event object to be queued.\n\n    Returns:\n        None: The function doesn't return anything but modifies the state of the `incoming_event_queue`.\n\n    Examples:\n        &gt;&gt;&gt; await self.queue_event(some_event)\n\n    Raises:\n        AttributeError: If the module is not in an acceptable state to queue incoming events.\n    \"\"\"\n    async with self._task_counter.count(\"queue_event()\", _log=False):\n        if self.incoming_event_queue is False:\n            self.debug(f\"Not in an acceptable state to queue incoming event\")\n            return\n        acceptable, reason = self._event_precheck(event)\n        if not acceptable:\n            if reason and reason != \"its type is not in watched_events\":\n                self.debug(f\"Not queueing {event} because {reason}\")\n            return\n        else:\n            self.debug(f\"Queueing {event} because {reason}\")\n        try:\n            self.incoming_event_queue.put_nowait(event)\n            async with self._event_received:\n                self._event_received.notify()\n            if event.type != \"FINISHED\":\n                self.scan._new_activity = True\n        except AttributeError:\n            self.debug(f\"Not in an acceptable state to queue incoming event\")\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.queue_outgoing_event","title":"queue_outgoing_event  <code>async</code>","text":"<pre><code>queue_outgoing_event(event, **kwargs)\n</code></pre> <p>Queues an outgoing event to the module's outgoing event queue for further processing.</p> <p>The function attempts to put the event into the <code>outgoing_event_queue</code> immediately. If it's not possible due to the current state of the module, an AttributeError is raised, and a debug log is generated.</p> <p>Parameters:</p> <ul> <li> <code>event</code>           \u2013            <p>The event object to be queued.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Additional keyword arguments to be associated with the event.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>None</code>          \u2013            <p>The function doesn't return anything but modifies the state of the <code>outgoing_event_queue</code>.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.queue_outgoing_event(some_outgoing_event, abort_if=lambda e: \"unresolved\" in e.tags)\n</code></pre> <p>Raises:</p> <ul> <li> <code>AttributeError</code>             \u2013            <p>If the module is not in an acceptable state to queue outgoing events.</p> </li> </ul> Source code in <code>bbot/modules/base.py</code> <pre><code>async def queue_outgoing_event(self, event, **kwargs):\n    \"\"\"\n    Queues an outgoing event to the module's outgoing event queue for further processing.\n\n    The function attempts to put the event into the `outgoing_event_queue` immediately.\n    If it's not possible due to the current state of the module, an AttributeError is raised, and a debug log is generated.\n\n    Args:\n        event: The event object to be queued.\n        **kwargs: Additional keyword arguments to be associated with the event.\n\n    Returns:\n        None: The function doesn't return anything but modifies the state of the `outgoing_event_queue`.\n\n    Examples:\n        &gt;&gt;&gt; self.queue_outgoing_event(some_outgoing_event, abort_if=lambda e: \"unresolved\" in e.tags)\n\n    Raises:\n        AttributeError: If the module is not in an acceptable state to queue outgoing events.\n    \"\"\"\n    try:\n        await self.outgoing_event_queue.put((event, kwargs))\n    except AttributeError:\n        self.debug(f\"Not in an acceptable state to queue outgoing event\")\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.report","title":"report  <code>async</code>","text":"<pre><code>report()\n</code></pre> <p>Asynchronously executes a final task after the scan is complete but before cleanup.</p> <p>This method can be overridden to aggregate data and raise summary events at the end of the scan.</p> <p>Returns:</p> <ul> <li>           \u2013            <p>None</p> </li> </ul> Note <p>This method is called only once per scan.</p> Source code in <code>bbot/modules/base.py</code> <pre><code>async def report(self):\n    \"\"\"Asynchronously executes a final task after the scan is complete but before cleanup.\n\n    This method can be overridden to aggregate data and raise summary events at the end of the scan.\n\n    Returns:\n        None\n\n    Note:\n        This method is called only once per scan.\n    \"\"\"\n    return\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.require_api_key","title":"require_api_key  <code>async</code>","text":"<pre><code>require_api_key()\n</code></pre> <p>Asynchronously checks if an API key is required and valid.</p> <p>Returns:</p> <ul> <li>           \u2013            <p>bool or tuple: Returns True if API key is valid and ready.           Returns a tuple (None, \"error message\") otherwise.</p> </li> </ul> Notes <ul> <li>Fetches the API key from the configuration.</li> <li>Calls the 'ping()' method to test API accessibility.</li> <li>Sets the API key readiness status accordingly.</li> </ul> Source code in <code>bbot/modules/base.py</code> <pre><code>async def require_api_key(self):\n    \"\"\"\n    Asynchronously checks if an API key is required and valid.\n\n    Args:\n        None\n\n    Returns:\n        bool or tuple: Returns True if API key is valid and ready.\n                      Returns a tuple (None, \"error message\") otherwise.\n\n    Notes:\n        - Fetches the API key from the configuration.\n        - Calls the 'ping()' method to test API accessibility.\n        - Sets the API key readiness status accordingly.\n    \"\"\"\n    self.api_key = self.config.get(\"api_key\", \"\")\n    if self.auth_secret:\n        try:\n            await self.ping()\n            self.hugesuccess(f\"API is ready\")\n            return True, \"\"\n        except Exception as e:\n            self.trace(traceback.format_exc())\n            return None, f\"Error with API ({str(e).strip()})\"\n    else:\n        return None, \"No API key set\"\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.set_error_state","title":"set_error_state","text":"<pre><code>set_error_state(message=None, clear_outgoing_queue=False, critical=False)\n</code></pre> <p>Puts the module into an errored state where it cannot accept new events. Optionally logs a warning message.</p> <p>The function sets the module's <code>errored</code> attribute to True and logs a warning with the optional message. It also clears the incoming event queue to prevent further processing and updates its status to False.</p> <p>Parameters:</p> <ul> <li> <code>message</code>               (<code>str</code>, default:                   <code>None</code> )           \u2013            <p>Additional message to be logged along with the warning.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>None</code>          \u2013            <p>The function doesn't return anything but updates the <code>errored</code> state and clears the incoming event queue.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.set_error_state()\n&gt;&gt;&gt; self.set_error_state(\"Failed to connect to the server\")\n</code></pre> Notes <ul> <li>The function sets <code>self._incoming_event_queue</code> to False to prevent its further use.</li> <li>If the module was already in an errored state, the function will not reset the error state or the queue.</li> </ul> Source code in <code>bbot/modules/base.py</code> <pre><code>def set_error_state(self, message=None, clear_outgoing_queue=False, critical=False):\n    \"\"\"\n    Puts the module into an errored state where it cannot accept new events. Optionally logs a warning message.\n\n    The function sets the module's `errored` attribute to True and logs a warning with the optional message.\n    It also clears the incoming event queue to prevent further processing and updates its status to False.\n\n    Args:\n        message (str, optional): Additional message to be logged along with the warning.\n\n    Returns:\n        None: The function doesn't return anything but updates the `errored` state and clears the incoming event queue.\n\n    Examples:\n        &gt;&gt;&gt; self.set_error_state()\n        &gt;&gt;&gt; self.set_error_state(\"Failed to connect to the server\")\n\n    Notes:\n        - The function sets `self._incoming_event_queue` to False to prevent its further use.\n        - If the module was already in an errored state, the function will not reset the error state or the queue.\n    \"\"\"\n    if not self.errored:\n        log_msg = \"Setting error state\"\n        if message is not None:\n            log_msg += f\": {message}\"\n        if critical:\n            log_fn = self.error\n        else:\n            log_fn = self.warning\n        log_fn(log_msg)\n        self.errored = True\n        # clear incoming queue\n        if self.incoming_event_queue is not False:\n            self.debug(f\"Emptying event_queue\")\n            with suppress(asyncio.queues.QueueEmpty):\n                while 1:\n                    self.incoming_event_queue.get_nowait()\n            # set queue to None to prevent its use\n            # if there are leftover objects in the queue, the scan will hang.\n            self._incoming_event_queue = False\n\n        if clear_outgoing_queue:\n            with suppress(asyncio.queues.QueueEmpty):\n                while 1:\n                    self.outgoing_event_queue.get_nowait()\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.setup","title":"setup  <code>async</code>","text":"<pre><code>setup()\n</code></pre> <p>Performs one-time setup tasks for the module.</p> <p>This method is responsible for preparing the module for its operation, which may include tasks such as downloading necessary resources, validating configuration parameters, or other preliminary checks.</p> <p>Returns:</p> <ul> <li> <code>tuple</code>          \u2013            <ul> <li>bool or None: A status indicating the outcome of the setup process. Returns <code>True</code> if the setup was successful, <code>None</code> for a soft-fail where the module setup did not succeed but the scan will continue with the module disabled, and <code>False</code> for a hard-fail where the setup failure causes the scan to abort.</li> <li>str, optional: A reason for the setup failure, provided only when the setup does not succeed (i.e., returns <code>None</code> or <code>False</code>).</li> </ul> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; async def setup(self):\n&gt;&gt;&gt;     if not self.config.get(\"api_key\"):\n&gt;&gt;&gt;         # Soft-fail: Configuration missing an API key\n&gt;&gt;&gt;         return None, \"No API key specified\"\n</code></pre> <pre><code>&gt;&gt;&gt; async def setup(self):\n&gt;&gt;&gt;     try:\n&gt;&gt;&gt;         wordlist = await self.helpers.wordlist(\"https://raw.githubusercontent.com/user/wordlist.txt\")\n&gt;&gt;&gt;     except WordlistError as e:\n&gt;&gt;&gt;         # Hard-fail: Error retrieving wordlist\n&gt;&gt;&gt;         return False, f\"Error retrieving wordlist: {e}\"\n</code></pre> <pre><code>&gt;&gt;&gt; async def setup(self):\n&gt;&gt;&gt;     self.timeout = self.config.get(\"timeout\", 5)\n&gt;&gt;&gt;     # Success: Setup completed without issues\n&gt;&gt;&gt;     return True\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>async def setup(self):\n    \"\"\"\n    Performs one-time setup tasks for the module.\n\n    This method is responsible for preparing the module for its operation, which may include tasks\n    such as downloading necessary resources, validating configuration parameters, or other preliminary\n    checks.\n\n    Returns:\n        tuple:\n            - bool or None: A status indicating the outcome of the setup process. Returns `True` if\n            the setup was successful, `None` for a soft-fail where the module setup did not succeed\n            but the scan will continue with the module disabled, and `False` for a hard-fail where\n            the setup failure causes the scan to abort.\n            - str, optional: A reason for the setup failure, provided only when the setup does not\n            succeed (i.e., returns `None` or `False`).\n\n    Examples:\n        &gt;&gt;&gt; async def setup(self):\n        &gt;&gt;&gt;     if not self.config.get(\"api_key\"):\n        &gt;&gt;&gt;         # Soft-fail: Configuration missing an API key\n        &gt;&gt;&gt;         return None, \"No API key specified\"\n\n        &gt;&gt;&gt; async def setup(self):\n        &gt;&gt;&gt;     try:\n        &gt;&gt;&gt;         wordlist = await self.helpers.wordlist(\"https://raw.githubusercontent.com/user/wordlist.txt\")\n        &gt;&gt;&gt;     except WordlistError as e:\n        &gt;&gt;&gt;         # Hard-fail: Error retrieving wordlist\n        &gt;&gt;&gt;         return False, f\"Error retrieving wordlist: {e}\"\n\n        &gt;&gt;&gt; async def setup(self):\n        &gt;&gt;&gt;     self.timeout = self.config.get(\"timeout\", 5)\n        &gt;&gt;&gt;     # Success: Setup completed without issues\n        &gt;&gt;&gt;     return True\n    \"\"\"\n\n    return True\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.success","title":"success","text":"<pre><code>success(*args, trace=False, **kwargs)\n</code></pre> <p>Logs a success message, and optionally the stack trace of the most recent exception.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Variable-length argument list to pass to the logger.</p> </li> <li> <code>trace</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to log the stack trace of the most recently caught exception. Defaults to False.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Arbitrary keyword arguments to pass to the logger.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.success(\"Operation completed successfully\")\n&gt;&gt;&gt; self.success(\"Operation completed with a trace\", trace=True)\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def success(self, *args, trace=False, **kwargs):\n    \"\"\"Logs a success message, and optionally the stack trace of the most recent exception.\n\n    Args:\n        *args: Variable-length argument list to pass to the logger.\n        trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to False.\n        **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n    Examples:\n        &gt;&gt;&gt; self.success(\"Operation completed successfully\")\n        &gt;&gt;&gt; self.success(\"Operation completed with a trace\", trace=True)\n    \"\"\"\n    self.log.success(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n    if trace:\n        self.trace()\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.trace","title":"trace","text":"<pre><code>trace(msg=None)\n</code></pre> <p>Logs the stack trace of the most recently caught exception.</p> <p>This method captures the type, value, and traceback of the most recent exception and logs it using the trace level. It is typically used for debugging purposes.</p> <p>Anything logged using this method will always be written to the scan's <code>debug.log</code>, even if debugging is not enabled.</p> <p>Examples:</p> <pre><code>&gt;&gt;&gt; try:\n&gt;&gt;&gt;     1 / 0\n&gt;&gt;&gt; except ZeroDivisionError:\n&gt;&gt;&gt;     self.trace()\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def trace(self, msg=None):\n    \"\"\"Logs the stack trace of the most recently caught exception.\n\n    This method captures the type, value, and traceback of the most recent exception and logs it using the trace level. It is typically used for debugging purposes.\n\n    Anything logged using this method will always be written to the scan's `debug.log`, even if debugging is not enabled.\n\n    Examples:\n        &gt;&gt;&gt; try:\n        &gt;&gt;&gt;     1 / 0\n        &gt;&gt;&gt; except ZeroDivisionError:\n        &gt;&gt;&gt;     self.trace()\n    \"\"\"\n    if msg is None:\n        e_type, e_val, e_traceback = exc_info()\n        if e_type is not None:\n            self.log.trace(traceback.format_exc())\n    else:\n        self.log.trace(msg)\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.verbose","title":"verbose","text":"<pre><code>verbose(*args, trace=False, **kwargs)\n</code></pre> <p>Logs messages and optionally the stack trace of the most recent exception.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Variable-length argument list to pass to the logger.</p> </li> <li> <code>trace</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to log the stack trace of the most recently caught exception. Defaults to False.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Arbitrary keyword arguments to pass to the logger.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.verbose(\"This is a verbose message\")\n&gt;&gt;&gt; self.verbose(\"This is a verbose message with a trace\", trace=True)\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def verbose(self, *args, trace=False, **kwargs):\n    \"\"\"Logs messages and optionally the stack trace of the most recent exception.\n\n    Args:\n        *args: Variable-length argument list to pass to the logger.\n        trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to False.\n        **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n    Examples:\n        &gt;&gt;&gt; self.verbose(\"This is a verbose message\")\n        &gt;&gt;&gt; self.verbose(\"This is a verbose message with a trace\", trace=True)\n    \"\"\"\n    self.log.verbose(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n    if trace:\n        self.trace()\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.warning","title":"warning","text":"<pre><code>warning(*args, trace=True, **kwargs)\n</code></pre> <p>Logs a warning message, and optionally the stack trace of the most recent exception.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Variable-length argument list to pass to the logger.</p> </li> <li> <code>trace</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to log the stack trace of the most recently caught exception. Defaults to True.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Arbitrary keyword arguments to pass to the logger.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.warning(\"This is a warning message\")\n&gt;&gt;&gt; self.warning(\"This is a warning message with a trace\", trace=False)\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def warning(self, *args, trace=True, **kwargs):\n    \"\"\"Logs a warning message, and optionally the stack trace of the most recent exception.\n\n    Args:\n        *args: Variable-length argument list to pass to the logger.\n        trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to True.\n        **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n    Examples:\n        &gt;&gt;&gt; self.warning(\"This is a warning message\")\n        &gt;&gt;&gt; self.warning(\"This is a warning message with a trace\", trace=False)\n    \"\"\"\n    self.log.warning(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n    if trace:\n        self.trace()\n</code></pre>"},{"location":"dev/core/","title":"BBOTCore","text":""},{"location":"dev/core/#bbot.core.core.BBOTCore","title":"BBOTCore","text":"<p>This is the first thing that loads when you import BBOT.</p> <p>Unlike a Preset, BBOTCore holds only the config, not scan-specific stuff like targets, flags, modules, etc.</p> <p>Its main jobs are:</p> <ul> <li>set up logging</li> <li>keep separation between the <code>default</code> and <code>custom</code> config (this allows presets to only display the config options that have changed)</li> <li>allow for easy merging of configs</li> <li>load quickly</li> </ul> Source code in <code>bbot/core/core.py</code> <pre><code>class BBOTCore:\n    \"\"\"\n    This is the first thing that loads when you import BBOT.\n\n    Unlike a Preset, BBOTCore holds only the config, not scan-specific stuff like targets, flags, modules, etc.\n\n    Its main jobs are:\n\n    - set up logging\n    - keep separation between the `default` and `custom` config (this allows presets to only display the config options that have changed)\n    - allow for easy merging of configs\n    - load quickly\n    \"\"\"\n\n    # used for filtering out sensitive config values\n    secrets_strings = [\"api_key\", \"username\", \"password\", \"token\", \"secret\", \"_id\"]\n    # don't filter/remove entries under this key\n    secrets_exclude_keys = [\"modules\"]\n\n    def __init__(self):\n        self._logger = None\n        self._files_config = None\n\n        self._config = None\n        self._custom_config = None\n\n        # bare minimum == logging\n        self.logger\n        self.log = logging.getLogger(\"bbot.core\")\n\n        self._prep_multiprocessing()\n\n    def _prep_multiprocessing(self):\n        import multiprocessing\n        from .helpers.process import BBOTProcess\n\n        if SHARED_INTERPRETER_STATE.is_main_process:\n            # if this is the main bbot process, set the logger and queue for the first time\n            from functools import partialmethod\n\n            BBOTProcess.__init__ = partialmethod(\n                BBOTProcess.__init__, log_level=self.logger.log_level, log_queue=self.logger.queue\n            )\n\n        # this makes our process class the default for process pools, etc.\n        mp_context = multiprocessing.get_context(\"spawn\")\n        mp_context.Process = BBOTProcess\n\n    @property\n    def home(self):\n        return Path(self.config[\"home\"]).expanduser().resolve()\n\n    @property\n    def cache_dir(self):\n        return self.home / \"cache\"\n\n    @property\n    def tools_dir(self):\n        return self.home / \"tools\"\n\n    @property\n    def temp_dir(self):\n        return self.home / \"temp\"\n\n    @property\n    def lib_dir(self):\n        return self.home / \"lib\"\n\n    @property\n    def scans_dir(self):\n        return self.home / \"scans\"\n\n    @property\n    def config(self):\n        \"\"\"\n        .config is just .default_config + .custom_config merged together\n\n        any new values should be added to custom_config.\n        \"\"\"\n        if self._config is None:\n            self._config = OmegaConf.merge(self.default_config, self.custom_config)\n            # set read-only flag (change .custom_config instead)\n            OmegaConf.set_readonly(self._config, True)\n        return self._config\n\n    @property\n    def default_config(self):\n        \"\"\"\n        The default BBOT config (from `defaults.yml`). Read-only.\n        \"\"\"\n        global DEFAULT_CONFIG\n        if DEFAULT_CONFIG is None:\n            self.default_config = self.files_config.get_default_config()\n            # ensure bbot home dir\n            if not \"home\" in self.default_config:\n                self.default_config[\"home\"] = \"~/.bbot\"\n        return DEFAULT_CONFIG\n\n    @default_config.setter\n    def default_config(self, value):\n        # we temporarily clear out the config so it can be refreshed if/when default_config changes\n        global DEFAULT_CONFIG\n        self._config = None\n        DEFAULT_CONFIG = value\n        # set read-only flag (change .custom_config instead)\n        OmegaConf.set_readonly(DEFAULT_CONFIG, True)\n\n    @property\n    def custom_config(self):\n        \"\"\"\n        Custom BBOT config (from `~/.config/bbot/bbot.yml`)\n        \"\"\"\n        # we temporarily clear out the config so it can be refreshed if/when custom_config changes\n        self._config = None\n        if self._custom_config is None:\n            self.custom_config = self.files_config.get_custom_config()\n        return self._custom_config\n\n    @custom_config.setter\n    def custom_config(self, value):\n        # we temporarily clear out the config so it can be refreshed if/when custom_config changes\n        self._config = None\n        # ensure the modules key is always a dictionary\n        modules_entry = value.get(\"modules\", None)\n        if modules_entry is not None and not OmegaConf.is_dict(modules_entry):\n            value[\"modules\"] = {}\n        self._custom_config = value\n\n    def no_secrets_config(self, config):\n        from .helpers.misc import clean_dict\n\n        with suppress(ValueError):\n            config = OmegaConf.to_object(config)\n\n        return clean_dict(\n            config,\n            *self.secrets_strings,\n            fuzzy=True,\n            exclude_keys=self.secrets_exclude_keys,\n        )\n\n    def secrets_only_config(self, config):\n        from .helpers.misc import filter_dict\n\n        with suppress(ValueError):\n            config = OmegaConf.to_object(config)\n\n        return filter_dict(\n            config,\n            *self.secrets_strings,\n            fuzzy=True,\n            exclude_keys=self.secrets_exclude_keys,\n        )\n\n    def merge_custom(self, config):\n        \"\"\"\n        Merge a config into the custom config.\n        \"\"\"\n        self.custom_config = OmegaConf.merge(self.custom_config, OmegaConf.create(config))\n\n    def merge_default(self, config):\n        \"\"\"\n        Merge a config into the default config.\n        \"\"\"\n        self.default_config = OmegaConf.merge(self.default_config, OmegaConf.create(config))\n\n    def copy(self):\n        \"\"\"\n        Return a semi-shallow copy of self. (`custom_config` is copied, but `default_config` stays the same)\n        \"\"\"\n        core_copy = copy(self)\n        core_copy._custom_config = self._custom_config.copy()\n        return core_copy\n\n    @property\n    def files_config(self):\n        \"\"\"\n        Get the configs from `bbot.yml` and `defaults.yml`\n        \"\"\"\n        if self._files_config is None:\n            from .config import files\n\n            self.files = files\n            self._files_config = files.BBOTConfigFiles(self)\n        return self._files_config\n\n    def create_process(self, *args, **kwargs):\n        if os.environ.get(\"BBOT_TESTING\", \"\") == \"True\":\n            process = self.create_thread(*args, **kwargs)\n        else:\n            if SHARED_INTERPRETER_STATE.is_scan_process:\n                from .helpers.process import BBOTProcess\n\n                process = BBOTProcess(*args, **kwargs)\n            else:\n                import multiprocessing\n\n                raise BBOTError(f\"Tried to start server from process {multiprocessing.current_process().name}\")\n        process.daemon = True\n        return process\n\n    def create_thread(self, *args, **kwargs):\n        from .helpers.process import BBOTThread\n\n        return BBOTThread(*args, **kwargs)\n\n    @property\n    def logger(self):\n        self.config\n        if self._logger is None:\n            from .config.logger import BBOTLogger\n\n            self._logger = BBOTLogger(self)\n        return self._logger\n</code></pre>"},{"location":"dev/core/#bbot.core.core.BBOTCore.config","title":"config  <code>property</code>","text":"<pre><code>config\n</code></pre> <p>.config is just .default_config + .custom_config merged together</p> <p>any new values should be added to custom_config.</p>"},{"location":"dev/core/#bbot.core.core.BBOTCore.custom_config","title":"custom_config  <code>property</code> <code>writable</code>","text":"<pre><code>custom_config\n</code></pre> <p>Custom BBOT config (from <code>~/.config/bbot/bbot.yml</code>)</p>"},{"location":"dev/core/#bbot.core.core.BBOTCore.default_config","title":"default_config  <code>property</code> <code>writable</code>","text":"<pre><code>default_config\n</code></pre> <p>The default BBOT config (from <code>defaults.yml</code>). Read-only.</p>"},{"location":"dev/core/#bbot.core.core.BBOTCore.files_config","title":"files_config  <code>property</code>","text":"<pre><code>files_config\n</code></pre> <p>Get the configs from <code>bbot.yml</code> and <code>defaults.yml</code></p>"},{"location":"dev/core/#bbot.core.core.BBOTCore.copy","title":"copy","text":"<pre><code>copy()\n</code></pre> <p>Return a semi-shallow copy of self. (<code>custom_config</code> is copied, but <code>default_config</code> stays the same)</p> Source code in <code>bbot/core/core.py</code> <pre><code>def copy(self):\n    \"\"\"\n    Return a semi-shallow copy of self. (`custom_config` is copied, but `default_config` stays the same)\n    \"\"\"\n    core_copy = copy(self)\n    core_copy._custom_config = self._custom_config.copy()\n    return core_copy\n</code></pre>"},{"location":"dev/core/#bbot.core.core.BBOTCore.merge_custom","title":"merge_custom","text":"<pre><code>merge_custom(config)\n</code></pre> <p>Merge a config into the custom config.</p> Source code in <code>bbot/core/core.py</code> <pre><code>def merge_custom(self, config):\n    \"\"\"\n    Merge a config into the custom config.\n    \"\"\"\n    self.custom_config = OmegaConf.merge(self.custom_config, OmegaConf.create(config))\n</code></pre>"},{"location":"dev/core/#bbot.core.core.BBOTCore.merge_default","title":"merge_default","text":"<pre><code>merge_default(config)\n</code></pre> <p>Merge a config into the default config.</p> Source code in <code>bbot/core/core.py</code> <pre><code>def merge_default(self, config):\n    \"\"\"\n    Merge a config into the default config.\n    \"\"\"\n    self.default_config = OmegaConf.merge(self.default_config, OmegaConf.create(config))\n</code></pre>"},{"location":"dev/dev_environment/","title":"Setting Up a Dev Environment","text":"<p>The following will show you how to set up a fully functioning python environment for devving on BBOT.</p>"},{"location":"dev/dev_environment/#installation-poetry","title":"Installation (Poetry)","text":"<p>Poetry is the recommended method of installation if you want to dev on BBOT. To set up a dev environment with Poetry, you can follow these steps:</p> <ul> <li>Fork BBOT on GitHub</li> <li>Clone your fork and set up a development environment with Poetry:</li> </ul> <pre><code># clone your forked repo and cd into it\ngit clone git@github.com/&lt;username&gt;/bbot.git\ncd bbot\n\n# install poetry\ncurl -sSL https://install.python-poetry.org | python3 -\n\n# install pip dependencies\npoetry install\n# install pre-commit hooks, etc.\npoetry run pre-commit install\n\n# enter virtual environment\npoetry shell\n\nbbot --help\n</code></pre> <ul> <li>Now, any changes you make in the code will be reflected in the <code>bbot</code> command.</li> <li>After making your changes, run the tests locally to ensure they pass.</li> </ul> <pre><code># auto-format code indentation, etc.\nblack .\n\n# run tests\n./bbot/test/run_tests.sh\n</code></pre> <ul> <li>Finally, commit and push your changes, and create a pull request to the <code>dev</code> branch of the main BBOT repo.</li> </ul>"},{"location":"dev/discord_bot/","title":"Discord Bot Example","text":"<p>Below is a simple Discord bot designed to run BBOT scans.</p> examples/discord_bot.py<pre><code>import discord\nfrom discord.ext import commands\n\nfrom bbot.scanner import Scanner\nfrom bbot.modules.output.discord import Discord\n\n\nclass BBOTDiscordBot(commands.Cog):\n    \"\"\"\n    A simple Discord bot capable of running a BBOT scan.\n\n    To set up:\n        1. Go to Discord Developer Portal (https://discord.com/developers)\n        2. Create a new application\n        3. Create an invite link for the bot, visit the link to invite it to your server\n            - Your Application --&gt; OAuth2 --&gt; URL Generator\n                - For Scopes, select \"bot\"\"\n                - For Bot Permissions, select:\n                    - Read Messages/View Channels\n                    - Send Messages\n        4. Turn on \"Message Content Intent\"\n            - Your Application --&gt; Bot --&gt; Privileged Gateway Intents --&gt; Message Content Intent\n        5. Copy your Discord Bot Token and put it at the top this file\n            - Your Application --&gt; Bot --&gt; Reset Token\n        6. Run this script\n\n    To scan evilcorp.com, you would type:\n\n        /scan evilcorp.com\n\n    Results will be output to the same channel.\n    \"\"\"\n\n    def __init__(self):\n        self.current_scan = None\n\n    @commands.command(name=\"scan\", description=\"Scan a target with BBOT.\")\n    async def scan(self, ctx, target: str):\n        if self.current_scan is not None:\n            self.current_scan.stop()\n        await ctx.send(f\"Starting scan against {target}.\")\n\n        # creates scan instance\n        self.current_scan = Scanner(target, flags=\"subdomain-enum\")\n        discord_module = Discord(self.current_scan)\n\n        seen = set()\n        num_events = 0\n        # start scan and iterate through results\n        async for event in self.current_scan.async_start():\n            if hash(event) in seen:\n                continue\n            seen.add(hash(event))\n            await ctx.send(discord_module.format_message(event))\n            num_events += 1\n\n        await ctx.send(f\"Finished scan against {target}. {num_events:,} results.\")\n        self.current_scan = None\n\n\nif __name__ == \"__main__\":\n    intents = discord.Intents.default()\n    intents.message_content = True\n    bot = commands.Bot(command_prefix=\"/\", intents=intents)\n\n    @bot.event\n    async def on_ready():\n        print(f\"We have logged in as {bot.user}\")\n        await bot.add_cog(BBOTDiscordBot())\n\n    bot.run(\"DISCORD_BOT_TOKEN_HERE\")\n</code></pre>"},{"location":"dev/engine/","title":"Engine","text":""},{"location":"dev/engine/#bbot.core.engine.EngineBase","title":"EngineBase","text":"<p>Base Engine class for Server and Client.</p> <p>An Engine is a simple and lightweight RPC implementation that allows offloading async tasks to a separate process. It leverages ZeroMQ in a ROUTER-DEALER configuration.</p> <p>BBOT makes use of this by spawning a dedicated engine for DNS and HTTP tasks. This offloads I/O and helps free up the main event loop for other tasks.</p> <p>To use Engine, you must subclass both EngineClient and EngineServer.</p> <p>See the respective EngineClient and EngineServer classes for usage examples.</p> Source code in <code>bbot/core/engine.py</code> <pre><code>class EngineBase:\n    \"\"\"\n    Base Engine class for Server and Client.\n\n    An Engine is a simple and lightweight RPC implementation that allows offloading async tasks\n    to a separate process. It leverages ZeroMQ in a ROUTER-DEALER configuration.\n\n    BBOT makes use of this by spawning a dedicated engine for DNS and HTTP tasks.\n    This offloads I/O and helps free up the main event loop for other tasks.\n\n    To use Engine, you must subclass both EngineClient and EngineServer.\n\n    See the respective EngineClient and EngineServer classes for usage examples.\n    \"\"\"\n\n    ERROR_CLASS = BBOTEngineError\n\n    def __init__(self, debug=False):\n        self._shutdown_status = False\n        self.log = logging.getLogger(f\"bbot.core.{self.__class__.__name__.lower()}\")\n        self._engine_debug = debug\n\n    def pickle(self, obj):\n        try:\n            return pickle.dumps(obj)\n        except Exception as e:\n            self.log.error(f\"Error serializing object: {obj}: {e}\")\n            self.log.trace(traceback.format_exc())\n        return error_sentinel\n\n    def unpickle(self, binary):\n        try:\n            return pickle.loads(binary)\n        except Exception as e:\n            self.log.error(f\"Error deserializing binary: {e}\")\n            self.log.trace(f\"Offending binary: {binary}\")\n            self.log.trace(traceback.format_exc())\n        return error_sentinel\n\n    async def _infinite_retry(self, callback, *args, **kwargs):\n        interval = kwargs.pop(\"_interval\", 300)\n        context = kwargs.pop(\"_context\", \"\")\n        # default overall timeout of 10 minutes (300 second interval * 2 iterations)\n        max_retries = kwargs.pop(\"_max_retries\", 1)\n        if not context:\n            context = f\"{callback.__name__}({args}, {kwargs})\"\n        retries = 0\n        while not self._shutdown_status:\n            try:\n                return await asyncio.wait_for(callback(*args, **kwargs), timeout=interval)\n            except (TimeoutError, asyncio.exceptions.TimeoutError):\n                self.log.debug(f\"{self.name}: Timeout after {interval:,} seconds {context}, retrying...\")\n                retries += 1\n                if max_retries is not None and retries &gt; max_retries:\n                    raise TimeoutError(f\"Timed out after {(max_retries+1)*interval:,} seconds {context}\")\n\n    def engine_debug(self, *args, **kwargs):\n        if self._engine_debug:\n            self.log.trace(*args, **kwargs)\n</code></pre>"},{"location":"dev/engine/#bbot.core.engine.EngineClient","title":"EngineClient","text":"<p>               Bases: <code>EngineBase</code></p> <p>The client portion of BBOT's RPC Engine.</p> <p>To create an engine, you must create a subclass of this class and also define methods for each of your desired functions.</p> <p>Note that this only supports async functions. If you need to offload a synchronous function to another CPU, use BBOT's multiprocessing pool instead.</p> <p>Any CPU or I/O intense logic should be implemented in the EngineServer.</p> <p>These functions are typically stubs whose only job is to forward the arguments to the server.</p> <p>Functions with the same names should be defined on the EngineServer.</p> <p>The EngineClient must specify its associated server class via the <code>SERVER_CLASS</code> variable.</p> <p>Depending on whether your function is a generator, you will use either <code>run_and_return()</code>, or <code>run_and_yield</code>.</p> <p>Examples:</p> <pre><code>&gt;&gt;&gt; from bbot.core.engine import EngineClient\n&gt;&gt;&gt;\n&gt;&gt;&gt; class MyClient(EngineClient):\n&gt;&gt;&gt;     SERVER_CLASS = MyServer\n&gt;&gt;&gt;\n&gt;&gt;&gt;     async def my_function(self, **kwargs)\n&gt;&gt;&gt;         return await self.run_and_return(\"my_function\", **kwargs)\n&gt;&gt;&gt;\n&gt;&gt;&gt;     async def my_generator(self, **kwargs):\n&gt;&gt;&gt;         async for _ in self.run_and_yield(\"my_generator\", **kwargs):\n&gt;&gt;&gt;             yield _\n</code></pre> Source code in <code>bbot/core/engine.py</code> <pre><code>class EngineClient(EngineBase):\n    \"\"\"\n    The client portion of BBOT's RPC Engine.\n\n    To create an engine, you must create a subclass of this class and also\n    define methods for each of your desired functions.\n\n    Note that this only supports async functions. If you need to offload a synchronous function to another CPU, use BBOT's multiprocessing pool instead.\n\n    Any CPU or I/O intense logic should be implemented in the EngineServer.\n\n    These functions are typically stubs whose only job is to forward the arguments to the server.\n\n    Functions with the same names should be defined on the EngineServer.\n\n    The EngineClient must specify its associated server class via the `SERVER_CLASS` variable.\n\n    Depending on whether your function is a generator, you will use either `run_and_return()`, or `run_and_yield`.\n\n    Examples:\n        &gt;&gt;&gt; from bbot.core.engine import EngineClient\n        &gt;&gt;&gt;\n        &gt;&gt;&gt; class MyClient(EngineClient):\n        &gt;&gt;&gt;     SERVER_CLASS = MyServer\n        &gt;&gt;&gt;\n        &gt;&gt;&gt;     async def my_function(self, **kwargs)\n        &gt;&gt;&gt;         return await self.run_and_return(\"my_function\", **kwargs)\n        &gt;&gt;&gt;\n        &gt;&gt;&gt;     async def my_generator(self, **kwargs):\n        &gt;&gt;&gt;         async for _ in self.run_and_yield(\"my_generator\", **kwargs):\n        &gt;&gt;&gt;             yield _\n    \"\"\"\n\n    SERVER_CLASS = None\n\n    def __init__(self, debug=False, **kwargs):\n        self.name = f\"EngineClient {self.__class__.__name__}\"\n        super().__init__(debug=debug)\n        self.process = None\n        if self.SERVER_CLASS is None:\n            raise ValueError(f\"Must set EngineClient SERVER_CLASS, {self.SERVER_CLASS}\")\n        self.CMDS = dict(self.SERVER_CLASS.CMDS)\n        for k, v in list(self.CMDS.items()):\n            self.CMDS[v] = k\n        self.socket_address = f\"zmq_{rand_string(8)}.sock\"\n        self.socket_path = Path(tempfile.gettempdir()) / self.socket_address\n        self.server_kwargs = kwargs.pop(\"server_kwargs\", {})\n        self._server_process = None\n        self.context = zmq.asyncio.Context()\n        self.context.setsockopt(zmq.LINGER, 0)\n        self.sockets = set()\n\n    def check_error(self, message):\n        if isinstance(message, dict) and len(message) == 1 and \"_e\" in message:\n            self.engine_debug(f\"{self.name}: got error message: {message}\")\n            error, trace = message[\"_e\"]\n            error = self.ERROR_CLASS(error)\n            error.engine_traceback = trace\n            self.engine_debug(f\"{self.name}: raising {error.__class__.__name__}\")\n            raise error\n        return False\n\n    async def run_and_return(self, command, *args, **kwargs):\n        fn_str = f\"{command}({args}, {kwargs})\"\n        self.engine_debug(f\"{self.name}: executing run-and-return {fn_str}\")\n        if self._shutdown_status and not command == \"_shutdown\":\n            self.log.verbose(f\"{self.name} has been shut down and is not accepting new tasks\")\n            return\n        async with self.new_socket() as socket:\n            try:\n                message = self.make_message(command, args=args, kwargs=kwargs)\n                if message is error_sentinel:\n                    return\n                await socket.send(message)\n                binary = await self._infinite_retry(socket.recv, _context=f\"waiting for return value from {fn_str}\")\n            except BaseException:\n                try:\n                    await self.send_cancel_message(socket, fn_str)\n                except Exception:\n                    self.log.debug(f\"{self.name}: {fn_str} failed to send cancel message after exception\")\n                    self.log.trace(traceback.format_exc())\n                raise\n        # self.log.debug(f\"{self.name}.{command}({kwargs}) got binary: {binary}\")\n        message = self.unpickle(binary)\n        self.engine_debug(f\"{self.name}: {fn_str} got return value: {message}\")\n        # error handling\n        if self.check_error(message):\n            return\n        return message\n\n    async def run_and_yield(self, command, *args, **kwargs):\n        fn_str = f\"{command}({args}, {kwargs})\"\n        self.engine_debug(f\"{self.name}: executing run-and-yield {fn_str}\")\n        if self._shutdown_status:\n            self.log.verbose(\"Engine has been shut down and is not accepting new tasks\")\n            return\n        message = self.make_message(command, args=args, kwargs=kwargs)\n        if message is error_sentinel:\n            return\n        async with self.new_socket() as socket:\n            # TODO: synchronize server-side generator by limiting qsize\n            # socket.setsockopt(zmq.RCVHWM, 1)\n            # socket.setsockopt(zmq.SNDHWM, 1)\n            await socket.send(message)\n            while 1:\n                try:\n                    binary = await self._infinite_retry(\n                        socket.recv, _context=f\"waiting for new iteration from {fn_str}\"\n                    )\n                    # self.log.debug(f\"{self.name}.{command}({kwargs}) got binary: {binary}\")\n                    message = self.unpickle(binary)\n                    self.engine_debug(f\"{self.name}: {fn_str} got iteration: {message}\")\n                    # error handling\n                    if self.check_error(message) or self.check_stop(message):\n                        break\n                    yield message\n                except (StopAsyncIteration, GeneratorExit) as e:\n                    exc_name = e.__class__.__name__\n                    self.engine_debug(f\"{self.name}.{command} got {exc_name}\")\n                    try:\n                        await self.send_cancel_message(socket, fn_str)\n                    except Exception:\n                        self.engine_debug(f\"{self.name}.{command} failed to send cancel message after {exc_name}\")\n                        self.log.trace(traceback.format_exc())\n                    break\n\n    async def send_cancel_message(self, socket, context):\n        \"\"\"\n        Send a cancel message and wait for confirmation from the server\n        \"\"\"\n        # -1 == special \"cancel\" signal\n        message = pickle.dumps({\"c\": -1})\n        await self._infinite_retry(socket.send, message)\n        while 1:\n            response = await self._infinite_retry(\n                socket.recv, _context=f\"waiting for CANCEL_OK from {context}\", _max_retries=4\n            )\n            response = pickle.loads(response)\n            if isinstance(response, dict):\n                response = response.get(\"m\", \"\")\n                if response == \"CANCEL_OK\":\n                    break\n\n    async def send_shutdown_message(self):\n        async with self.new_socket() as socket:\n            # -99 == special shutdown message\n            message = pickle.dumps({\"c\": -99})\n            with suppress(TimeoutError, asyncio.exceptions.TimeoutError):\n                await asyncio.wait_for(socket.send(message), 0.5)\n            with suppress(TimeoutError, asyncio.exceptions.TimeoutError):\n                while 1:\n                    response = await asyncio.wait_for(socket.recv(), 0.5)\n                    response = pickle.loads(response)\n                    if isinstance(response, dict):\n                        response = response.get(\"m\", \"\")\n                        if response == \"SHUTDOWN_OK\":\n                            break\n\n    def check_stop(self, message):\n        if isinstance(message, dict) and len(message) == 1 and \"_s\" in message:\n            return True\n        return False\n\n    def make_message(self, command, args=None, kwargs=None):\n        try:\n            cmd_id = self.CMDS[command]\n        except KeyError:\n            raise KeyError(f'Command \"{command}\" not found. Available commands: {\",\".join(self.available_commands)}')\n        message = {\"c\": cmd_id}\n        if args:\n            message[\"a\"] = args\n        if kwargs:\n            message[\"k\"] = kwargs\n        return pickle.dumps(message)\n\n    @property\n    def available_commands(self):\n        return [s for s in self.CMDS if isinstance(s, str)]\n\n    def start_server(self):\n        process_name = multiprocessing.current_process().name\n        if SHARED_INTERPRETER_STATE.is_scan_process:\n            kwargs = dict(self.server_kwargs)\n            # if we're in tests, we use a single event loop to avoid weird race conditions\n            # this allows us to more easily mock http, etc.\n            if os.environ.get(\"BBOT_TESTING\", \"\") == \"True\":\n                kwargs[\"_loop\"] = get_event_loop()\n            kwargs[\"debug\"] = self._engine_debug\n            self.process = CORE.create_process(\n                target=self.server_process,\n                args=(\n                    self.SERVER_CLASS,\n                    self.socket_path,\n                ),\n                kwargs=kwargs,\n                custom_name=f\"BBOT {self.__class__.__name__}\",\n            )\n            self.process.start()\n            return self.process\n        else:\n            raise BBOTEngineError(\n                f\"Tried to start server from process {process_name}. Did you forget \\\"if __name__ == '__main__'?\\\"\"\n            )\n\n    @staticmethod\n    def server_process(server_class, socket_path, **kwargs):\n        try:\n            loop = kwargs.pop(\"_loop\", None)\n            engine_server = server_class(socket_path, **kwargs)\n            if loop is not None:\n                future = asyncio.run_coroutine_threadsafe(engine_server.worker(), loop)\n                future.result()\n            else:\n                asyncio.run(engine_server.worker())\n        except (asyncio.CancelledError, KeyboardInterrupt, CancelledError):\n            return\n        except Exception:\n            import traceback\n\n            log = logging.getLogger(\"bbot.core.engine.server\")\n            log.critical(f\"Unhandled error in {server_class.__name__} server process: {traceback.format_exc()}\")\n\n    @asynccontextmanager\n    async def new_socket(self):\n        if self._server_process is None:\n            self._server_process = self.start_server()\n            while not self.socket_path.exists():\n                self.engine_debug(f\"{self.name}: waiting for server process to start...\")\n                await asyncio.sleep(0.1)\n        socket = self.context.socket(zmq.DEALER)\n        socket.setsockopt(zmq.LINGER, 0)  # Discard pending messages immediately disconnect() or close()\n        socket.setsockopt(zmq.SNDHWM, 0)  # Unlimited send buffer\n        socket.setsockopt(zmq.RCVHWM, 0)  # Unlimited receive buffer\n        socket.connect(f\"ipc://{self.socket_path}\")\n        self.sockets.add(socket)\n        try:\n            yield socket\n        finally:\n            self.sockets.remove(socket)\n            with suppress(Exception):\n                socket.close()\n\n    async def shutdown(self):\n        if not self._shutdown_status:\n            self._shutdown_status = True\n            self.log.verbose(f\"{self.name}: shutting down...\")\n            # send shutdown signal\n            await self.send_shutdown_message()\n            # then terminate context\n            try:\n                self.context.destroy(linger=0)\n            except Exception:\n                print(traceback.format_exc(), file=sys.stderr)\n            try:\n                self.context.term()\n            except Exception:\n                print(traceback.format_exc(), file=sys.stderr)\n            # delete socket file on exit\n            self.socket_path.unlink(missing_ok=True)\n</code></pre>"},{"location":"dev/engine/#bbot.core.engine.EngineClient.send_cancel_message","title":"send_cancel_message  <code>async</code>","text":"<pre><code>send_cancel_message(socket, context)\n</code></pre> <p>Send a cancel message and wait for confirmation from the server</p> Source code in <code>bbot/core/engine.py</code> <pre><code>async def send_cancel_message(self, socket, context):\n    \"\"\"\n    Send a cancel message and wait for confirmation from the server\n    \"\"\"\n    # -1 == special \"cancel\" signal\n    message = pickle.dumps({\"c\": -1})\n    await self._infinite_retry(socket.send, message)\n    while 1:\n        response = await self._infinite_retry(\n            socket.recv, _context=f\"waiting for CANCEL_OK from {context}\", _max_retries=4\n        )\n        response = pickle.loads(response)\n        if isinstance(response, dict):\n            response = response.get(\"m\", \"\")\n            if response == \"CANCEL_OK\":\n                break\n</code></pre>"},{"location":"dev/engine/#bbot.core.engine.EngineServer","title":"EngineServer","text":"<p>               Bases: <code>EngineBase</code></p> <p>The server portion of BBOT's RPC Engine.</p> <p>Methods defined here must match the methods in your EngineClient.</p> <p>To use the functions, you must create mappings for them in the CMDS attribute, as shown below.</p> <p>Examples:</p> <pre><code>&gt;&gt;&gt; from bbot.core.engine import EngineServer\n&gt;&gt;&gt;\n&gt;&gt;&gt; class MyServer(EngineServer):\n&gt;&gt;&gt;     CMDS = {\n&gt;&gt;&gt;         0: \"my_function\",\n&gt;&gt;&gt;         1: \"my_generator\",\n&gt;&gt;&gt;     }\n&gt;&gt;&gt;\n&gt;&gt;&gt;     def my_function(self, arg1=None):\n&gt;&gt;&gt;         await asyncio.sleep(1)\n&gt;&gt;&gt;         return str(arg1)\n&gt;&gt;&gt;\n&gt;&gt;&gt;     def my_generator(self):\n&gt;&gt;&gt;         for i in range(10):\n&gt;&gt;&gt;             await asyncio.sleep(1)\n&gt;&gt;&gt;             yield i\n</code></pre> Source code in <code>bbot/core/engine.py</code> <pre><code>class EngineServer(EngineBase):\n    \"\"\"\n    The server portion of BBOT's RPC Engine.\n\n    Methods defined here must match the methods in your EngineClient.\n\n    To use the functions, you must create mappings for them in the CMDS attribute, as shown below.\n\n    Examples:\n        &gt;&gt;&gt; from bbot.core.engine import EngineServer\n        &gt;&gt;&gt;\n        &gt;&gt;&gt; class MyServer(EngineServer):\n        &gt;&gt;&gt;     CMDS = {\n        &gt;&gt;&gt;         0: \"my_function\",\n        &gt;&gt;&gt;         1: \"my_generator\",\n        &gt;&gt;&gt;     }\n        &gt;&gt;&gt;\n        &gt;&gt;&gt;     def my_function(self, arg1=None):\n        &gt;&gt;&gt;         await asyncio.sleep(1)\n        &gt;&gt;&gt;         return str(arg1)\n        &gt;&gt;&gt;\n        &gt;&gt;&gt;     def my_generator(self):\n        &gt;&gt;&gt;         for i in range(10):\n        &gt;&gt;&gt;             await asyncio.sleep(1)\n        &gt;&gt;&gt;             yield i\n    \"\"\"\n\n    CMDS = {}\n\n    def __init__(self, socket_path, debug=False):\n        self.name = f\"EngineServer {self.__class__.__name__}\"\n        super().__init__(debug=debug)\n        self.engine_debug(f\"{self.name}: finished setup 1 (_debug={self._engine_debug})\")\n        self.socket_path = socket_path\n        self.client_id_var = contextvars.ContextVar(\"client_id\", default=None)\n        # task &lt;--&gt; client id mapping\n        self.tasks = {}\n        # child tasks spawned by main tasks\n        self.child_tasks = {}\n        self.engine_debug(f\"{self.name}: finished setup 2 (_debug={self._engine_debug})\")\n        if self.socket_path is not None:\n            # create ZeroMQ context\n            self.context = zmq.asyncio.Context()\n            # ROUTER socket can handle multiple concurrent requests\n            self.socket = self.context.socket(zmq.ROUTER)\n            self.socket.setsockopt(zmq.LINGER, 0)  # Discard pending messages immediately disconnect() or close()\n            self.socket.setsockopt(zmq.SNDHWM, 0)  # Unlimited send buffer\n            self.socket.setsockopt(zmq.RCVHWM, 0)  # Unlimited receive buffer\n            # create socket file\n            self.socket.bind(f\"ipc://{self.socket_path}\")\n        self.engine_debug(f\"{self.name}: finished setup 3 (_debug={self._engine_debug})\")\n\n    @contextlib.contextmanager\n    def client_id_context(self, value):\n        token = self.client_id_var.set(value)\n        try:\n            yield\n        finally:\n            self.client_id_var.reset(token)\n\n    async def run_and_return(self, client_id, command_fn, *args, **kwargs):\n        fn_str = f\"{command_fn.__name__}({args}, {kwargs})\"\n        self.engine_debug(fn_str)\n        with self.client_id_context(client_id):\n            try:\n                self.engine_debug(f\"{self.name}: starting run-and-return {fn_str}\")\n                try:\n                    result = await command_fn(*args, **kwargs)\n                except BaseException as e:\n                    if in_exception_chain(e, (KeyboardInterrupt, asyncio.CancelledError)):\n                        log_fn = self.log.debug\n                    else:\n                        log_fn = self.log.error\n                    error = f\"{self.name}: error in {fn_str}: {e}\"\n                    trace = traceback.format_exc()\n                    log_fn(error)\n                    self.log.trace(trace)\n                    result = {\"_e\": (error, trace)}\n                finally:\n                    self.tasks.pop(client_id, None)\n                    self.engine_debug(f\"{self.name}: sending response to {fn_str}: {result}\")\n                    await self.send_socket_multipart(client_id, result)\n            except BaseException as e:\n                self.log.critical(\n                    f\"Unhandled exception in {self.name}.run_and_return({client_id}, {command_fn}, {args}, {kwargs}): {e}\"\n                )\n                self.log.critical(traceback.format_exc())\n            finally:\n                self.engine_debug(f\"{self.name} finished run-and-return {fn_str}\")\n\n    async def run_and_yield(self, client_id, command_fn, *args, **kwargs):\n        fn_str = f\"{command_fn.__name__}({args}, {kwargs})\"\n        with self.client_id_context(client_id):\n            try:\n                self.engine_debug(f\"{self.name}: starting run-and-yield {fn_str}\")\n                try:\n                    async for _ in command_fn(*args, **kwargs):\n                        self.engine_debug(f\"{self.name}: sending iteration for {fn_str}: {_}\")\n                        await self.send_socket_multipart(client_id, _)\n                except BaseException as e:\n                    if in_exception_chain(e, (KeyboardInterrupt, asyncio.CancelledError)):\n                        log_fn = self.log.debug\n                    else:\n                        log_fn = self.log.error\n                    error = f\"{self.name}: error in {fn_str}: {e}\"\n                    trace = traceback.format_exc()\n                    log_fn(error)\n                    self.log.trace(trace)\n                    result = {\"_e\": (error, trace)}\n                    await self.send_socket_multipart(client_id, result)\n                finally:\n                    self.engine_debug(f\"{self.name}: reached end of run-and-yield iteration for {fn_str}\")\n                    # _s == special signal that means StopIteration\n                    await self.send_socket_multipart(client_id, {\"_s\": None})\n                    self.tasks.pop(client_id, None)\n            except BaseException as e:\n                self.log.critical(\n                    f\"Unhandled exception in {self.name}.run_and_yield({client_id}, {command_fn}, {args}, {kwargs}): {e}\"\n                )\n                self.log.critical(traceback.format_exc())\n            finally:\n                self.engine_debug(f\"{self.name}: finished run-and-yield {fn_str}\")\n\n    async def send_socket_multipart(self, client_id, message):\n        try:\n            message = pickle.dumps(message)\n            await self._infinite_retry(self.socket.send_multipart, [client_id, message])\n        except Exception as e:\n            self.log.verbose(f\"{self.name}: error sending ZMQ message: {e}\")\n            self.log.trace(traceback.format_exc())\n\n    def check_error(self, message):\n        if message is error_sentinel:\n            return True\n\n    async def worker(self):\n        self.engine_debug(f\"{self.name}: starting worker\")\n        try:\n            while 1:\n                client_id, binary = await self.socket.recv_multipart()\n                message = self.unpickle(binary)\n                self.engine_debug(f\"{self.name} got message: {message}\")\n                if self.check_error(message):\n                    continue\n\n                cmd = message.get(\"c\", None)\n                if not isinstance(cmd, int):\n                    self.log.warning(f\"{self.name}: no command sent in message: {message}\")\n                    continue\n\n                # -1 == cancel task\n                if cmd == -1:\n                    self.engine_debug(f\"{self.name} got cancel signal\")\n                    await self.send_socket_multipart(client_id, {\"m\": \"CANCEL_OK\"})\n                    await self.cancel_task(client_id)\n                    continue\n\n                # -99 == shutdown task\n                if cmd == -99:\n                    self.log.verbose(f\"{self.name} got shutdown signal\")\n                    await self.send_socket_multipart(client_id, {\"m\": \"SHUTDOWN_OK\"})\n                    await self._shutdown()\n                    return\n\n                args = message.get(\"a\", ())\n                if not isinstance(args, tuple):\n                    self.log.warning(f\"{self.name}: received invalid args of type {type(args)}, should be tuple\")\n                    continue\n                kwargs = message.get(\"k\", {})\n                if not isinstance(kwargs, dict):\n                    self.log.warning(f\"{self.name}: received invalid kwargs of type {type(kwargs)}, should be dict\")\n                    continue\n\n                command_name = self.CMDS[cmd]\n                command_fn = getattr(self, command_name, None)\n\n                if command_fn is None:\n                    self.log.warning(f'{self.name} has no function named \"{command_fn}\"')\n                    continue\n\n                if inspect.isasyncgenfunction(command_fn):\n                    self.engine_debug(f\"{self.name}: creating run-and-yield coroutine for {command_name}()\")\n                    coroutine = self.run_and_yield(client_id, command_fn, *args, **kwargs)\n                else:\n                    self.engine_debug(f\"{self.name}: creating run-and-return coroutine for {command_name}()\")\n                    coroutine = self.run_and_return(client_id, command_fn, *args, **kwargs)\n\n                self.engine_debug(f\"{self.name}: creating task for {command_name}() coroutine\")\n                task = asyncio.create_task(coroutine)\n                self.tasks[client_id] = task, command_fn, args, kwargs\n                self.engine_debug(f\"{self.name}: finished creating task for {command_name}() coroutine\")\n        except BaseException as e:\n            await self._shutdown()\n            if not in_exception_chain(e, (KeyboardInterrupt, asyncio.CancelledError)):\n                self.log.error(f\"{self.name}: error in EngineServer worker: {e}\")\n                self.log.trace(traceback.format_exc())\n        finally:\n            self.engine_debug(f\"{self.name}: finished worker()\")\n\n    async def _shutdown(self):\n        if not self._shutdown_status:\n            self.log.verbose(f\"{self.name}: shutting down...\")\n            self._shutdown_status = True\n            await self.cancel_all_tasks()\n            context = getattr(self, \"context\", None)\n            if context is not None:\n                try:\n                    context.destroy(linger=0)\n                except Exception:\n                    self.log.trace(traceback.format_exc())\n                try:\n                    context.term()\n                except Exception:\n                    self.log.trace(traceback.format_exc())\n            self.log.verbose(f\"{self.name}: finished shutting down\")\n\n    async def task_pool(self, fn, args_kwargs, threads=10, timeout=300, global_kwargs=None):\n        if global_kwargs is None:\n            global_kwargs = {}\n\n        tasks = {}\n        args_kwargs = list(args_kwargs)\n\n        def new_task():\n            if args_kwargs:\n                kwargs = {}\n                tracker = None\n                args = args_kwargs.pop(0)\n                if isinstance(args, (list, tuple)):\n                    # you can specify a custom tracker value if you want\n                    # this helps with correlating results\n                    with suppress(ValueError):\n                        args, kwargs, tracker = args\n                    # or you can just specify args/kwargs\n                    with suppress(ValueError):\n                        args, kwargs = args\n\n                if not isinstance(kwargs, dict):\n                    raise ValueError(f\"kwargs must be dict (got: {kwargs})\")\n                if not isinstance(args, (list, tuple)):\n                    args = [args]\n\n                task = self.new_child_task(fn(*args, **kwargs, **global_kwargs))\n                tasks[task] = (args, kwargs, tracker)\n\n        for _ in range(threads):  # Start initial batch of tasks\n            new_task()\n\n        while tasks:  # While there are tasks pending\n            # Wait for the first task to complete\n            finished = await self.finished_tasks(tasks, timeout=timeout)\n            for task in finished:\n                result = task.result()\n                (args, kwargs, tracker) = tasks.pop(task)\n                yield (args, kwargs, tracker), result\n                new_task()\n\n    def new_child_task(self, coro):\n        \"\"\"\n        Create a new asyncio task, making sure to track it based on the client id.\n\n        This allows the task to be automatically cancelled if its parent is cancelled.\n        \"\"\"\n        client_id = self.client_id_var.get()\n        task = asyncio.create_task(coro)\n\n        if client_id:\n\n            def remove_task(t):\n                tasks = self.child_tasks.get(client_id, set())\n                tasks.discard(t)\n                if not tasks:\n                    self.child_tasks.pop(client_id, None)\n\n            task.add_done_callback(remove_task)\n\n            try:\n                self.child_tasks[client_id].add(task)\n            except KeyError:\n                self.child_tasks[client_id] = {task}\n\n        return task\n\n    async def finished_tasks(self, tasks, timeout=None):\n        \"\"\"\n        Given a list of asyncio tasks, return the ones that are finished with an optional timeout\n        \"\"\"\n        if tasks:\n            try:\n                done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED, timeout=timeout)\n                return done\n            except BaseException as e:\n                if isinstance(e, (TimeoutError, asyncio.exceptions.TimeoutError)):\n                    self.log.warning(f\"{self.name}: Timeout after {timeout:,} seconds in finished_tasks({tasks})\")\n                    for task in list(tasks):\n                        task.cancel()\n                        self._await_cancelled_task(task)\n                else:\n                    if not in_exception_chain(e, (KeyboardInterrupt, asyncio.CancelledError)):\n                        self.log.error(f\"{self.name}: Unhandled exception in finished_tasks({tasks}): {e}\")\n                        self.log.trace(traceback.format_exc())\n                    raise\n        return set()\n\n    async def cancel_task(self, client_id):\n        parent_task = self.tasks.pop(client_id, None)\n        if parent_task is None:\n            return\n        parent_task, _cmd, _args, _kwargs = parent_task\n        self.engine_debug(f\"{self.name}: Cancelling client id {client_id} (task: {parent_task})\")\n        parent_task.cancel()\n        child_tasks = self.child_tasks.pop(client_id, set())\n        if child_tasks:\n            self.engine_debug(f\"{self.name}: Cancelling {len(child_tasks):,} child tasks for client id {client_id}\")\n            for child_task in child_tasks:\n                child_task.cancel()\n\n        for task in [parent_task] + list(child_tasks):\n            await self._await_cancelled_task(task)\n\n    async def _await_cancelled_task(self, task):\n        try:\n            await asyncio.wait_for(task, timeout=10)\n        except (TimeoutError, asyncio.exceptions.TimeoutError):\n            self.log.trace(f\"{self.name}: Timeout cancelling task: {task}\")\n            return\n        except (KeyboardInterrupt, asyncio.CancelledError):\n            return\n        except BaseException as e:\n            self.log.error(f\"Unhandled error in {task.get_coro().__name__}(): {e}\")\n            self.log.trace(traceback.format_exc())\n\n    async def cancel_all_tasks(self):\n        for client_id in list(self.tasks):\n            await self.cancel_task(client_id)\n        for client_id, tasks in self.child_tasks.items():\n            for task in list(tasks):\n                await self._await_cancelled_task(task)\n</code></pre>"},{"location":"dev/engine/#bbot.core.engine.EngineServer.finished_tasks","title":"finished_tasks  <code>async</code>","text":"<pre><code>finished_tasks(tasks, timeout=None)\n</code></pre> <p>Given a list of asyncio tasks, return the ones that are finished with an optional timeout</p> Source code in <code>bbot/core/engine.py</code> <pre><code>async def finished_tasks(self, tasks, timeout=None):\n    \"\"\"\n    Given a list of asyncio tasks, return the ones that are finished with an optional timeout\n    \"\"\"\n    if tasks:\n        try:\n            done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED, timeout=timeout)\n            return done\n        except BaseException as e:\n            if isinstance(e, (TimeoutError, asyncio.exceptions.TimeoutError)):\n                self.log.warning(f\"{self.name}: Timeout after {timeout:,} seconds in finished_tasks({tasks})\")\n                for task in list(tasks):\n                    task.cancel()\n                    self._await_cancelled_task(task)\n            else:\n                if not in_exception_chain(e, (KeyboardInterrupt, asyncio.CancelledError)):\n                    self.log.error(f\"{self.name}: Unhandled exception in finished_tasks({tasks}): {e}\")\n                    self.log.trace(traceback.format_exc())\n                raise\n    return set()\n</code></pre>"},{"location":"dev/engine/#bbot.core.engine.EngineServer.new_child_task","title":"new_child_task","text":"<pre><code>new_child_task(coro)\n</code></pre> <p>Create a new asyncio task, making sure to track it based on the client id.</p> <p>This allows the task to be automatically cancelled if its parent is cancelled.</p> Source code in <code>bbot/core/engine.py</code> <pre><code>def new_child_task(self, coro):\n    \"\"\"\n    Create a new asyncio task, making sure to track it based on the client id.\n\n    This allows the task to be automatically cancelled if its parent is cancelled.\n    \"\"\"\n    client_id = self.client_id_var.get()\n    task = asyncio.create_task(coro)\n\n    if client_id:\n\n        def remove_task(t):\n            tasks = self.child_tasks.get(client_id, set())\n            tasks.discard(t)\n            if not tasks:\n                self.child_tasks.pop(client_id, None)\n\n        task.add_done_callback(remove_task)\n\n        try:\n            self.child_tasks[client_id].add(task)\n        except KeyError:\n            self.child_tasks[client_id] = {task}\n\n    return task\n</code></pre>"},{"location":"dev/event/","title":"Event","text":"<p>This is a developer reference. For a high-level description of BBOT events including a full list of event types, see Events</p>"},{"location":"dev/event/#bbot.core.event.base.make_event","title":"make_event","text":"<pre><code>make_event(data, event_type=None, parent=None, context=None, module=None, scan=None, scans=None, tags=None, confidence=100, dummy=False, internal=None)\n</code></pre> <p>Creates and returns a new event object or modifies an existing one.</p> <p>This function serves as a factory for creating new event objects, either by generating a new <code>Event</code> object or by updating an existing event with additional metadata. If <code>data</code> is already an event, it updates the event based on the additional parameters provided.</p> <p>Parameters:</p> <ul> <li> <code>data</code>               (<code>Union[str, dict, BaseEvent]</code>)           \u2013            <p>The primary data for the event or an existing event object.</p> </li> <li> <code>event_type</code>               (<code>str</code>, default:                   <code>None</code> )           \u2013            <p>Type of the event, e.g., 'IP_ADDRESS'. Auto-detected if not provided.</p> </li> <li> <code>parent</code>               (<code>BaseEvent</code>, default:                   <code>None</code> )           \u2013            <p>Parent event leading to this event's discovery.</p> </li> <li> <code>context</code>               (<code>str</code>, default:                   <code>None</code> )           \u2013            <p>Description of circumstances leading to event's discovery.</p> </li> <li> <code>module</code>               (<code>str</code>, default:                   <code>None</code> )           \u2013            <p>Module that discovered the event.</p> </li> <li> <code>scan</code>               (<code>Scan</code>, default:                   <code>None</code> )           \u2013            <p>BBOT Scan object associated with the event.</p> </li> <li> <code>scans</code>               (<code>List[Scan]</code>, default:                   <code>None</code> )           \u2013            <p>Multiple BBOT Scan objects, primarily used for unserialization.</p> </li> <li> <code>tags</code>               (<code>Union[str, List[str]]</code>, default:                   <code>None</code> )           \u2013            <p>Descriptive tags for the event, as a list or a single string.</p> </li> <li> <code>confidence</code>               (<code>int</code>, default:                   <code>100</code> )           \u2013            <p>Confidence level for the event, on a scale of 1-100. Defaults to 100.</p> </li> <li> <code>dummy</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Disables data validations if set to True. Defaults to False.</p> </li> <li> <code>internal</code>               (<code>Any</code>, default:                   <code>None</code> )           \u2013            <p>Makes the event internal if set to True. Defaults to None.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>BaseEvent</code>          \u2013            <p>A new or updated event object.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>ValidationError</code>             \u2013            <p>Raised when there's an error in event data or type sanitization.</p> </li> </ul> <p>Examples:</p> <p>If inside a module, e.g. from within its <code>handle_event()</code>:</p> <pre><code>&gt;&gt;&gt; self.make_event(\"1.2.3.4\", parent=event)\nIP_ADDRESS(\"1.2.3.4\", module=portscan, tags={'ipv4', 'distance-1'})\n</code></pre> <p>If you're outside a module but you have a scan object:</p> <pre><code>&gt;&gt;&gt; scan.make_event(\"1.2.3.4\", parent=scan.root_event)\nIP_ADDRESS(\"1.2.3.4\", module=None, tags={'ipv4', 'distance-1'})\n</code></pre> <p>If you're outside a scan and just messing around:</p> <pre><code>&gt;&gt;&gt; from bbot.core.event.base import make_event\n&gt;&gt;&gt; make_event(\"1.2.3.4\", dummy=True)\nIP_ADDRESS(\"1.2.3.4\", module=None, tags={'ipv4'})\n</code></pre> Note <p>When working within a module's <code>handle_event()</code>, use the instance method <code>self.make_event()</code> instead of calling this function directly.</p> Source code in <code>bbot/core/event/base.py</code> <pre><code>def make_event(\n    data,\n    event_type=None,\n    parent=None,\n    context=None,\n    module=None,\n    scan=None,\n    scans=None,\n    tags=None,\n    confidence=100,\n    dummy=False,\n    internal=None,\n):\n    \"\"\"\n    Creates and returns a new event object or modifies an existing one.\n\n    This function serves as a factory for creating new event objects, either by generating a new `Event`\n    object or by updating an existing event with additional metadata. If `data` is already an event,\n    it updates the event based on the additional parameters provided.\n\n    Parameters:\n        data (Union[str, dict, BaseEvent]): The primary data for the event or an existing event object.\n        event_type (str, optional): Type of the event, e.g., 'IP_ADDRESS'. Auto-detected if not provided.\n        parent (BaseEvent, optional): Parent event leading to this event's discovery.\n        context (str, optional): Description of circumstances leading to event's discovery.\n        module (str, optional): Module that discovered the event.\n        scan (Scan, optional): BBOT Scan object associated with the event.\n        scans (List[Scan], optional): Multiple BBOT Scan objects, primarily used for unserialization.\n        tags (Union[str, List[str]], optional): Descriptive tags for the event, as a list or a single string.\n        confidence (int, optional): Confidence level for the event, on a scale of 1-100. Defaults to 100.\n        dummy (bool, optional): Disables data validations if set to True. Defaults to False.\n        internal (Any, optional): Makes the event internal if set to True. Defaults to None.\n\n    Returns:\n        BaseEvent: A new or updated event object.\n\n    Raises:\n        ValidationError: Raised when there's an error in event data or type sanitization.\n\n    Examples:\n        If inside a module, e.g. from within its `handle_event()`:\n        &gt;&gt;&gt; self.make_event(\"1.2.3.4\", parent=event)\n        IP_ADDRESS(\"1.2.3.4\", module=portscan, tags={'ipv4', 'distance-1'})\n\n        If you're outside a module but you have a scan object:\n        &gt;&gt;&gt; scan.make_event(\"1.2.3.4\", parent=scan.root_event)\n        IP_ADDRESS(\"1.2.3.4\", module=None, tags={'ipv4', 'distance-1'})\n\n        If you're outside a scan and just messing around:\n        &gt;&gt;&gt; from bbot.core.event.base import make_event\n        &gt;&gt;&gt; make_event(\"1.2.3.4\", dummy=True)\n        IP_ADDRESS(\"1.2.3.4\", module=None, tags={'ipv4'})\n\n    Note:\n        When working within a module's `handle_event()`, use the instance method\n        `self.make_event()` instead of calling this function directly.\n    \"\"\"\n\n    # allow tags to be either a string or an array\n    if not tags:\n        tags = []\n    elif isinstance(tags, str):\n        tags = [tags]\n    tags = set(tags)\n\n    if is_event(data):\n        event = copy(data)\n        if scan is not None and not event.scan:\n            event.scan = scan\n        if scans is not None and not event.scans:\n            event.scans = scans\n        if module is not None:\n            event.module = module\n        if parent is not None:\n            event.parent = parent\n        if context is not None:\n            event.discovery_context = context\n        if internal == True:\n            event.internal = True\n        if tags:\n            event.tags = tags.union(event.tags)\n        event_type = data.type\n        return event\n    else:\n        if event_type is None:\n            event_type, data = get_event_type(data)\n            if not dummy:\n                log.debug(f'Autodetected event type \"{event_type}\" based on data: \"{data}\"')\n\n        event_type = str(event_type).strip().upper()\n\n        # Catch these common whoopsies\n        if event_type in (\"DNS_NAME\", \"IP_ADDRESS\"):\n            # DNS_NAME &lt;--&gt; EMAIL_ADDRESS confusion\n            if validators.soft_validate(data, \"email\"):\n                event_type = \"EMAIL_ADDRESS\"\n            else:\n                # DNS_NAME &lt;--&gt; IP_ADDRESS confusion\n                try:\n                    data = validators.validate_host(data)\n                except Exception as e:\n                    log.trace(traceback.format_exc())\n                    raise ValidationError(f'Error sanitizing event data \"{data}\" for type \"{event_type}\": {e}')\n                data_is_ip = is_ip(data)\n                if event_type == \"DNS_NAME\" and data_is_ip:\n                    event_type = \"IP_ADDRESS\"\n                elif event_type == \"IP_ADDRESS\" and not data_is_ip:\n                    event_type = \"DNS_NAME\"\n        # USERNAME &lt;--&gt; EMAIL_ADDRESS confusion\n        if event_type == \"USERNAME\" and validators.soft_validate(data, \"email\"):\n            event_type = \"EMAIL_ADDRESS\"\n            tags.add(\"affiliate\")\n        # Convert single-host IP_RANGE to IP_ADDRESS\n        if event_type == \"IP_RANGE\":\n            with suppress(Exception):\n                net = ipaddress.ip_network(data, strict=False)\n                if net.prefixlen == net.max_prefixlen:\n                    event_type = \"IP_ADDRESS\"\n                    data = net.network_address\n\n        event_class = globals().get(event_type, DefaultEvent)\n\n        return event_class(\n            data,\n            event_type=event_type,\n            parent=parent,\n            context=context,\n            module=module,\n            scan=scan,\n            scans=scans,\n            tags=tags,\n            confidence=confidence,\n            _dummy=dummy,\n            _internal=internal,\n        )\n</code></pre>"},{"location":"dev/event/#bbot.core.event.base.event_from_json","title":"event_from_json","text":"<pre><code>event_from_json(j, siem_friendly=False)\n</code></pre> <p>Creates an event object from a JSON dictionary.</p> <p>This function deserializes a JSON dictionary to create a new event object, using the <code>make_event</code> function for the actual object creation. It sets additional attributes such as the timestamp and scope distance based on the input JSON.</p> <p>Parameters:</p> <ul> <li> <code>j</code>               (<code>Dict</code>)           \u2013            <p>JSON dictionary containing the event attributes.       Must include keys \"data\" and \"type\".</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>BaseEvent</code>          \u2013            <p>A new event object initialized with attributes from the JSON dictionary.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>ValidationError</code>             \u2013            <p>Raised when the JSON dictionary is missing required fields.</p> </li> </ul> Note <p>The function assumes that the input JSON dictionary is valid and may raise exceptions if required keys are missing. Make sure to validate the JSON input beforehand.</p> Source code in <code>bbot/core/event/base.py</code> <pre><code>def event_from_json(j, siem_friendly=False):\n    \"\"\"\n    Creates an event object from a JSON dictionary.\n\n    This function deserializes a JSON dictionary to create a new event object, using the `make_event` function\n    for the actual object creation. It sets additional attributes such as the timestamp and scope distance\n    based on the input JSON.\n\n    Parameters:\n        j (Dict): JSON dictionary containing the event attributes.\n                  Must include keys \"data\" and \"type\".\n\n    Returns:\n        BaseEvent: A new event object initialized with attributes from the JSON dictionary.\n\n    Raises:\n        ValidationError: Raised when the JSON dictionary is missing required fields.\n\n    Note:\n        The function assumes that the input JSON dictionary is valid and may raise exceptions\n        if required keys are missing. Make sure to validate the JSON input beforehand.\n    \"\"\"\n    try:\n        event_type = j[\"type\"]\n        kwargs = {\n            \"event_type\": event_type,\n            \"scans\": j.get(\"scans\", []),\n            \"tags\": j.get(\"tags\", []),\n            \"confidence\": j.get(\"confidence\", 100),\n            \"context\": j.get(\"discovery_context\", None),\n            \"dummy\": True,\n        }\n        if siem_friendly:\n            data = j[\"data\"][event_type]\n        else:\n            data = j[\"data\"]\n        kwargs[\"data\"] = data\n        event = make_event(**kwargs)\n        event_uuid = j.get(\"uuid\", None)\n        if event_uuid is not None:\n            event._uuid = uuid.UUID(event_uuid.split(\":\")[-1])\n\n        resolved_hosts = j.get(\"resolved_hosts\", [])\n        event._resolved_hosts = set(resolved_hosts)\n\n        event.timestamp = datetime.datetime.fromisoformat(j[\"timestamp\"])\n        event.scope_distance = j[\"scope_distance\"]\n        parent_id = j.get(\"parent\", None)\n        if parent_id is not None:\n            event._parent_id = parent_id\n        parent_uuid = j.get(\"parent_uuid\", None)\n        if parent_uuid is not None:\n            parent_type, parent_uuid = parent_uuid.split(\":\", 1)\n            event._parent_uuid = parent_type + \":\" + str(uuid.UUID(parent_uuid))\n        return event\n    except KeyError as e:\n        raise ValidationError(f\"Event missing required field: {e}\")\n</code></pre>"},{"location":"dev/event/#bbot.core.event.base.BaseEvent","title":"BaseEvent","text":"<p>Represents a piece of data discovered during a BBOT scan.</p> <p>An Event contains various attributes that provide metadata about the discovered data. The attributes assist in understanding the context of the Event and facilitate further filtering and querying. Events are integral in the construction of visual graphs and are the cornerstone of data exchange between BBOT modules.</p> <p>You can inherit from this class when creating a new event type. However, it's not always necessary. You only need to subclass if you want to layer additional functionality on top of the base class.</p> <p>Attributes:</p> <ul> <li> <code>type</code>               (<code>str</code>)           \u2013            <p>Specifies the type of the event, e.g., <code>IP_ADDRESS</code>, <code>DNS_NAME</code>.</p> </li> <li> <code>id</code>               (<code>str</code>)           \u2013            <p>An identifier for the event (event type + sha1 hash of data). NOT universally unique.</p> </li> <li> <code>uuid</code>               (<code>UUID</code>)           \u2013            <p>A universally unique identifier for the event.</p> </li> <li> <code>data</code>               (<code>str or dict</code>)           \u2013            <p>The main data for the event, e.g., a URL or IP address.</p> </li> <li> <code>data_graph</code>               (<code>str</code>)           \u2013            <p>Representation of <code>self.data</code> for graph nodes (e.g. Neo4j).</p> </li> <li> <code>data_human</code>               (<code>str</code>)           \u2013            <p>Representation of <code>self.data</code> for human output.</p> </li> <li> <code>data_id</code>               (<code>str</code>)           \u2013            <p>Representation of <code>self.data</code> used to calculate the event's ID (and ultimately its hash, which is used for deduplication)</p> </li> <li> <code>data_json</code>               (<code>str</code>)           \u2013            <p>Representation of <code>self.data</code> to be used in JSON serialization.</p> </li> <li> <code>host</code>               (<code>str, IPvXAddress, or IPvXNetwork</code>)           \u2013            <p>The associated IP address or hostname for the event</p> </li> <li> <code>host_stem</code>               (<code>str</code>)           \u2013            <p>An abbreviated representation of hostname that removes the TLD, e.g. \"www.evilcorp\". Used by the word cloud.</p> </li> <li> <code>port</code>               (<code>int or None</code>)           \u2013            <p>The port associated with the event, if applicable, else None.</p> </li> <li> <code>words</code>               (<code>set</code>)           \u2013            <p>A list of relevant keywords extracted from the event. Used by the word cloud.</p> </li> <li> <code>scope_distance</code>               (<code>int</code>)           \u2013            <p>Indicates how many hops the event is from the main scope; 0 means in-scope.</p> </li> <li> <code>web_spider_distance</code>               (<code>int</code>)           \u2013            <p>The spider distance from the web root, specific to web crawling.</p> </li> <li> <code>scan</code>               (<code>Scanner</code>)           \u2013            <p>The scan object that generated the event.</p> </li> <li> <code>timestamp</code>               (<code>datetime</code>)           \u2013            <p>The time at which the data was discovered.</p> </li> <li> <code>resolved_hosts</code>               (<code>list of str</code>)           \u2013            <p>List of hosts to which the event data resolves, applicable for URLs and DNS names.</p> </li> <li> <code>parent</code>               (<code>BaseEvent</code>)           \u2013            <p>The parent event that led to the discovery of this event.</p> </li> <li> <code>parent_id</code>               (<code>str</code>)           \u2013            <p>The <code>id</code> attribute of the parent event.</p> </li> <li> <code>parent_uuid</code>               (<code>str</code>)           \u2013            <p>The <code>uuid</code> attribute of the parent event.</p> </li> <li> <code>tags</code>               (<code>set of str</code>)           \u2013            <p>Descriptive tags for the event, e.g., <code>mx-record</code>, <code>in-scope</code>.</p> </li> <li> <code>module</code>               (<code>BaseModule</code>)           \u2013            <p>The module that discovered the event.</p> </li> <li> <code>module_sequence</code>               (<code>str</code>)           \u2013            <p>The sequence of modules that participated in the discovery.</p> </li> </ul> <p>Examples:</p> <pre><code>{\n    \"type\": \"URL\",\n    \"id\": \"URL:017ec8e5dc158c0fd46f07169f8577fb4b45e89a\",\n    \"data\": \"http://www.blacklanternsecurity.com/\",\n    \"web_spider_distance\": 0,\n    \"scope_distance\": 0,\n    \"scan\": \"SCAN:4d786912dbc97be199da13074699c318e2067a7f\",\n    \"timestamp\": 1688526222.723366,\n    \"resolved_hosts\": [\"185.199.108.153\"],\n    \"parent\": \"OPEN_TCP_PORT:cf7e6a937b161217eaed99f0c566eae045d094c7\",\n    \"tags\": [\"in-scope\", \"distance-0\", \"dir\", \"ip-185-199-108-153\", \"status-301\", \"http-title-301-moved-permanently\"],\n    \"module\": \"httpx\",\n    \"module_sequence\": \"httpx\"\n}\n</code></pre> Source code in <code>bbot/core/event/base.py</code> <pre><code>class BaseEvent:\n    \"\"\"\n    Represents a piece of data discovered during a BBOT scan.\n\n    An Event contains various attributes that provide metadata about the discovered data.\n    The attributes assist in understanding the context of the Event and facilitate further\n    filtering and querying. Events are integral in the construction of visual graphs and\n    are the cornerstone of data exchange between BBOT modules.\n\n    You can inherit from this class when creating a new event type. However, it's not always\n    necessary. You only need to subclass if you want to layer additional functionality on\n    top of the base class.\n\n    Attributes:\n        type (str): Specifies the type of the event, e.g., `IP_ADDRESS`, `DNS_NAME`.\n        id (str): An identifier for the event (event type + sha1 hash of data). NOT universally unique.\n        uuid (UUID): A universally unique identifier for the event.\n        data (str or dict): The main data for the event, e.g., a URL or IP address.\n        data_graph (str): Representation of `self.data` for graph nodes (e.g. Neo4j).\n        data_human (str): Representation of `self.data` for human output.\n        data_id (str): Representation of `self.data` used to calculate the event's ID (and ultimately its hash, which is used for deduplication)\n        data_json (str): Representation of `self.data` to be used in JSON serialization.\n        host (str, IPvXAddress, or IPvXNetwork): The associated IP address or hostname for the event\n        host_stem (str): An abbreviated representation of hostname that removes the TLD, e.g. \"www.evilcorp\". Used by the word cloud.\n        port (int or None): The port associated with the event, if applicable, else None.\n        words (set): A list of relevant keywords extracted from the event. Used by the word cloud.\n        scope_distance (int): Indicates how many hops the event is from the main scope; 0 means in-scope.\n        web_spider_distance (int): The spider distance from the web root, specific to web crawling.\n        scan (Scanner): The scan object that generated the event.\n        timestamp (datetime.datetime): The time at which the data was discovered.\n        resolved_hosts (list of str): List of hosts to which the event data resolves, applicable for URLs and DNS names.\n        parent (BaseEvent): The parent event that led to the discovery of this event.\n        parent_id (str): The `id` attribute of the parent event.\n        parent_uuid (str): The `uuid` attribute of the parent event.\n        tags (set of str): Descriptive tags for the event, e.g., `mx-record`, `in-scope`.\n        module (BaseModule): The module that discovered the event.\n        module_sequence (str): The sequence of modules that participated in the discovery.\n\n    Examples:\n        ```json\n        {\n            \"type\": \"URL\",\n            \"id\": \"URL:017ec8e5dc158c0fd46f07169f8577fb4b45e89a\",\n            \"data\": \"http://www.blacklanternsecurity.com/\",\n            \"web_spider_distance\": 0,\n            \"scope_distance\": 0,\n            \"scan\": \"SCAN:4d786912dbc97be199da13074699c318e2067a7f\",\n            \"timestamp\": 1688526222.723366,\n            \"resolved_hosts\": [\"185.199.108.153\"],\n            \"parent\": \"OPEN_TCP_PORT:cf7e6a937b161217eaed99f0c566eae045d094c7\",\n            \"tags\": [\"in-scope\", \"distance-0\", \"dir\", \"ip-185-199-108-153\", \"status-301\", \"http-title-301-moved-permanently\"],\n            \"module\": \"httpx\",\n            \"module_sequence\": \"httpx\"\n        }\n        ```\n    \"\"\"\n\n    # Always emit this event type even if it's not in scope\n    _always_emit = False\n    # Always emit events with these tags even if they're not in scope\n    _always_emit_tags = [\"affiliate\", \"target\"]\n    # Bypass scope checking and dns resolution, distribute immediately to modules\n    # This is useful for \"end-of-line\" events like FINDING and VULNERABILITY\n    _quick_emit = False\n    # Whether this event has been retroactively marked as part of an important discovery chain\n    _graph_important = False\n    # Disables certain data validations\n    _dummy = False\n    # Data validation, if data is a dictionary\n    _data_validator = None\n    # Whether to increment scope distance if the child and parent hosts are the same\n    _scope_distance_increment_same_host = False\n    # Don't allow duplicates to occur within a parent chain\n    # In other words, don't emit the event if the same one already exists in its discovery context\n    _suppress_chain_dupes = False\n\n    def __init__(\n        self,\n        data,\n        event_type,\n        parent=None,\n        context=None,\n        module=None,\n        scan=None,\n        scans=None,\n        tags=None,\n        confidence=100,\n        timestamp=None,\n        _dummy=False,\n        _internal=None,\n    ):\n        \"\"\"\n        Initializes an Event object with the given parameters.\n\n        In most cases, you should use `make_event()` instead of instantiating this class directly.\n        `make_event()` is much friendlier, and can auto-detect the event type for you.\n\n        Attributes:\n            data (str, dict): The primary data for the event.\n            event_type (str, optional): Type of the event, e.g., 'IP_ADDRESS'.\n            parent (BaseEvent, optional): Parent event that led to this event's discovery. Defaults to None.\n            module (str, optional): Module that discovered the event. Defaults to None.\n            scan (Scan, optional): BBOT Scan object. Required unless _dummy is True. Defaults to None.\n            scans (list of Scan, optional): BBOT Scan objects, used primarily when unserializing an Event from the database. Defaults to None.\n            tags (list of str, optional): Descriptive tags for the event. Defaults to None.\n            confidence (int, optional): Confidence level for the event, on a scale of 1-100. Defaults to 100.\n            timestamp (datetime, optional): Time of event discovery. Defaults to current UTC time.\n            _dummy (bool, optional): If True, disables certain data validations. Defaults to False.\n            _internal (Any, optional): If specified, makes the event internal. Defaults to None.\n\n        Raises:\n            ValidationError: If either `scan` or `parent` are not specified and `_dummy` is False.\n        \"\"\"\n        self._uuid = uuid.uuid4()\n        self._id = None\n        self._hash = None\n        self._data = None\n        self.__host = None\n        self._tags = set()\n        self._port = None\n        self._omit = False\n        self.__words = None\n        self._parent = None\n        self._priority = None\n        self._parent_id = None\n        self._parent_uuid = None\n        self._host_original = None\n        self._scope_distance = None\n        self._module_priority = None\n        self._resolved_hosts = set()\n        self.dns_children = dict()\n        self.raw_dns_records = dict()\n        self._discovery_context = \"\"\n        self._discovery_context_regex = re.compile(r\"\\{(?:event|module)[^}]*\\}\")\n        self.web_spider_distance = 0\n\n        # for creating one-off events without enforcing parent requirement\n        self._dummy = _dummy\n        self.module = module\n        self._type = event_type\n\n        # keep track of whether this event has been recorded by the scan\n        self._stats_recorded = False\n\n        if timestamp is not None:\n            self.timestamp = timestamp\n        else:\n            try:\n                self.timestamp = datetime.datetime.now(datetime.UTC)\n            except AttributeError:\n                self.timestamp = datetime.datetime.utcnow()\n\n        self.confidence = int(confidence)\n        self._internal = False\n\n        # self.scan holds the instantiated scan object (for helpers, etc.)\n        self.scan = scan\n        if (not self.scan) and (not self._dummy):\n            raise ValidationError(f\"Must specify scan\")\n        # self.scans holds a list of scan IDs from scans that encountered this event\n        self.scans = []\n        if scans is not None:\n            self.scans = scans\n        if self.scan:\n            self.scans = list(set([self.scan.id] + self.scans))\n\n        try:\n            self.data = self._sanitize_data(data)\n        except Exception as e:\n            log.trace(traceback.format_exc())\n            raise ValidationError(f'Error sanitizing event data \"{data}\" for type \"{self.type}\": {e}')\n\n        if not self.data:\n            raise ValidationError(f'Invalid event data \"{data}\" for type \"{self.type}\"')\n\n        self.parent = parent\n        if (not self.parent) and (not self._dummy):\n            raise ValidationError(f\"Must specify event parent\")\n\n        if tags is not None:\n            for tag in tags:\n                self.add_tag(tag)\n\n        # internal events are not ingested by output modules\n        if not self._dummy:\n            # removed this second part because it was making certain sslcert events internal\n            if _internal:  # or parent._internal:\n                self.internal = True\n\n        if not context:\n            context = getattr(self.module, \"default_discovery_context\", \"\")\n        if context:\n            self.discovery_context = context\n\n    @property\n    def data(self):\n        return self._data\n\n    @property\n    def confidence(self):\n        return self._confidence\n\n    @confidence.setter\n    def confidence(self, confidence):\n        self._confidence = min(100, max(1, int(confidence)))\n\n    @property\n    def cumulative_confidence(self):\n        \"\"\"\n        Considers the confidence of parent events. This is useful for filtering out speculative/unreliable events.\n\n        E.g. an event with a confidence of 50 whose parent is also 50 would have a cumulative confidence of 25.\n\n        A confidence of 100 will reset the cumulative confidence to 100.\n        \"\"\"\n        if self._confidence == 100 or self.parent is None or self.parent is self:\n            return self._confidence\n        return int(self._confidence * self.parent.cumulative_confidence / 100)\n\n    @property\n    def resolved_hosts(self):\n        if is_ip(self.host):\n            return {\n                self.host,\n            }\n        return self._resolved_hosts\n\n    @data.setter\n    def data(self, data):\n        self._hash = None\n        self._data_hash = None\n        self._id = None\n        self.__host = None\n        self._port = None\n        self._data = data\n\n    @property\n    def internal(self):\n        return self._internal\n\n    @internal.setter\n    def internal(self, value):\n        \"\"\"\n        Marks the event as internal, excluding it from output but allowing normal exchange between scan modules.\n\n        Internal events are typically speculative and may not be interesting by themselves but can lead to\n        the discovery of interesting events. This method sets the `_internal` attribute to True and adds the\n        \"internal\" tag.\n\n        Examples of internal events include `OPEN_TCP_PORT`s from the `speculate` module,\n        `IP_ADDRESS`es from the `ipneighbor` module, or out-of-scope `DNS_NAME`s that originate\n        from DNS resolutions.\n\n        The purpose of internal events is to enable speculative/explorative discovery without cluttering\n        the console with irrelevant or uninteresting events.\n        \"\"\"\n        if not value in (True, False):\n            raise ValueError(f'\"internal\" must be boolean, not {type(value)}')\n        if value == True:\n            self.add_tag(\"internal\")\n        else:\n            self.remove_tag(\"internal\")\n        self._internal = value\n\n    @property\n    def host(self):\n        \"\"\"\n        An abbreviated representation of the data that allows comparison with other events.\n        For host types, this is a hostname.\n        This allows comparison of an email or a URL with a domain, and vice versa\n            bob@evilcorp.com        --&gt; evilcorp.com\n            https://evilcorp.com    --&gt; evilcorp.com\n            evilcorp.com:80         --&gt; evilcorp.com\n\n        For IP_* types, this is an instantiated object representing the event's data\n        E.g. for IP_ADDRESS, it could be an ipaddress.IPv4Address() or IPv6Address() object\n        \"\"\"\n        if self.__host is None:\n            self.host = self._host()\n        return self.__host\n\n    @host.setter\n    def host(self, host):\n        if self._host_original is None:\n            self._host_original = host\n        self.__host = host\n\n    @property\n    def host_original(self):\n        \"\"\"\n        Original host data, in case it was changed due to a wildcard DNS, etc.\n        \"\"\"\n        if self._host_original is None:\n            return self.host\n        return self._host_original\n\n    @property\n    def host_filterable(self):\n        \"\"\"\n        A string version of the event that's used for regex-based blacklisting.\n\n        For example, the user can specify \"REGEX:.*.evilcorp.com\" in their blacklist, and this regex\n        will be applied against this property.\n        \"\"\"\n        parsed_url = getattr(self, \"parsed_url\", None)\n        if parsed_url is not None:\n            return parsed_url.geturl()\n        if self.host is not None:\n            return str(self.host)\n        return \"\"\n\n    @property\n    def port(self):\n        self.host\n        if getattr(self, \"parsed_url\", None):\n            if self.parsed_url.port is not None:\n                return self.parsed_url.port\n            elif self.parsed_url.scheme == \"https\":\n                return 443\n            elif self.parsed_url.scheme == \"http\":\n                return 80\n        return self._port\n\n    @property\n    def netloc(self):\n        if self.host and is_ip_type(self.host, network=False):\n            return make_netloc(self.host, self.port)\n        return None\n\n    @property\n    def host_stem(self):\n        \"\"\"\n        An abbreviated representation of hostname that removes the TLD\n            E.g. www.evilcorp.com --&gt; www.evilcorp\n        \"\"\"\n        if self.host and type(self.host) == str:\n            return domain_stem(self.host)\n        else:\n            return f\"{self.host}\"\n\n    @property\n    def discovery_context(self):\n        return self._discovery_context\n\n    @discovery_context.setter\n    def discovery_context(self, context):\n        def replace(match):\n            s = match.group()\n            return s.format(module=self.module, event=self)\n\n        try:\n            self._discovery_context = self._discovery_context_regex.sub(replace, context)\n        except Exception as e:\n            log.trace(f\"Error formatting discovery context for {self}: {e} (context: '{context}')\")\n            self._discovery_context = context\n\n    @property\n    def discovery_path(self):\n        \"\"\"\n        This event's full discovery context, including those of all its parents\n        \"\"\"\n        discovery_path = []\n        if self.parent is not None and self.parent is not self:\n            discovery_path = self.parent.discovery_path\n        return discovery_path + [self.discovery_context]\n\n    @property\n    def parent_chain(self):\n        \"\"\"\n        This event's full discovery context, including those of all its parents\n        \"\"\"\n        parent_chain = []\n        if self.parent is not None and self.parent is not self:\n            parent_chain = self.parent.parent_chain\n        return parent_chain + [str(self.uuid)]\n\n    @property\n    def words(self):\n        if self.__words is None:\n            self.__words = set(self._words())\n        return self.__words\n\n    def _words(self):\n        return set()\n\n    @property\n    def tags(self):\n        return self._tags\n\n    @tags.setter\n    def tags(self, tags):\n        self._tags = set()\n        if isinstance(tags, str):\n            tags = (tags,)\n        for tag in tags:\n            self.add_tag(tag)\n\n    def add_tag(self, tag):\n        self._tags.add(tagify(tag))\n\n    def add_tags(self, tags):\n        for tag in set(tags):\n            self.add_tag(tag)\n\n    def remove_tag(self, tag):\n        with suppress(KeyError):\n            self._tags.remove(tagify(tag))\n\n    @property\n    def always_emit(self):\n        \"\"\"\n        If this returns True, the event will always be distributed to output modules regardless of scope distance\n        \"\"\"\n        always_emit_tags = any(t in self.tags for t in self._always_emit_tags)\n        no_host_information = not bool(self.host)\n        return self._always_emit or always_emit_tags or no_host_information\n\n    @property\n    def id(self):\n        \"\"\"\n        A uniquely identifiable hash of the event from the event type + a SHA1 of its data\n        \"\"\"\n        if self._id is None:\n            self._id = f\"{self.type}:{self.data_hash.hex()}\"\n        return self._id\n\n    @property\n    def uuid(self):\n        \"\"\"\n        A universally unique identifier for the event\n        \"\"\"\n        return f\"{self.type}:{self._uuid}\"\n\n    @property\n    def data_hash(self):\n        \"\"\"\n        A raw byte hash of the event's data\n        \"\"\"\n        if self._data_hash is None:\n            self._data_hash = sha1(self.data_id).digest()\n        return self._data_hash\n\n    @property\n    def scope_distance(self):\n        return self._scope_distance\n\n    @scope_distance.setter\n    def scope_distance(self, scope_distance):\n        \"\"\"\n        Setter for the scope_distance attribute, ensuring it only decreases.\n\n        The scope_distance attribute is designed to never increase; it can only be set to smaller values than\n        the current one. If a larger value is provided, it is ignored. The setter also updates the event's\n        tags to reflect the new scope distance.\n\n        Parameters:\n            scope_distance (int): The new scope distance to set, must be a non-negative integer.\n\n        Note:\n            The method will automatically update the relevant 'distance-' tags associated with the event.\n        \"\"\"\n        if scope_distance &lt; 0:\n            raise ValueError(f\"Invalid scope distance: {scope_distance}\")\n        # ensure scope distance does not increase (only allow setting to smaller values)\n        if self.scope_distance is None:\n            new_scope_distance = scope_distance\n        else:\n            new_scope_distance = min(self.scope_distance, scope_distance)\n        if self._scope_distance != new_scope_distance:\n            # remove old scope distance tags\n            for t in list(self.tags):\n                if t.startswith(\"distance-\"):\n                    self.remove_tag(t)\n            if self.host:\n                if scope_distance == 0:\n                    self.add_tag(\"in-scope\")\n                    self.remove_tag(\"affiliate\")\n                else:\n                    self.remove_tag(\"in-scope\")\n                    self.add_tag(f\"distance-{new_scope_distance}\")\n            self._scope_distance = new_scope_distance\n            # apply recursively to parent events\n            parent_scope_distance = getattr(self.parent, \"scope_distance\", None)\n            if parent_scope_distance is not None and self.parent is not self:\n                self.parent.scope_distance = new_scope_distance + 1\n\n    @property\n    def scope_description(self):\n        \"\"\"\n        Returns a single word describing the scope of the event.\n\n        \"in-scope\" if the event is in scope, \"affiliate\" if it's an affiliate, otherwise \"distance-{scope_distance}\"\n        \"\"\"\n        if self.scope_distance == 0:\n            return \"in-scope\"\n        elif \"affiliate\" in self.tags:\n            return \"affiliate\"\n        return f\"distance-{self.scope_distance}\"\n\n    @property\n    def parent(self):\n        return self._parent\n\n    @parent.setter\n    def parent(self, parent):\n        \"\"\"\n        Setter for the parent attribute, ensuring it's a valid event and updating scope distance.\n\n        Sets the parent of the event and automatically adjusts the scope distance based on the parent event's\n        scope distance. The scope distance is incremented by 1 if the host of the parent event is different\n        from the current event's host.\n\n        Parameters:\n            parent (BaseEvent): The new parent event to set. Must be a valid event object.\n\n        Note:\n            If an invalid parent is provided and the event is not a dummy, a warning will be logged.\n        \"\"\"\n        if is_event(parent):\n            self._parent = parent\n            hosts_are_same = (self.host and parent.host) and (self.host == parent.host)\n            new_scope_distance = int(parent.scope_distance)\n            if self.host and parent.scope_distance is not None:\n                # only increment the scope distance if the host changes\n                if self._scope_distance_increment_same_host or not hosts_are_same:\n                    new_scope_distance += 1\n            self.scope_distance = new_scope_distance\n            # inherit certain tags\n            if hosts_are_same:\n                # inherit web spider distance from parent\n                self.web_spider_distance = getattr(parent, \"web_spider_distance\", 0)\n                event_has_url = getattr(self, \"parsed_url\", None) is not None\n                for t in parent.tags:\n                    if t in (\"affiliate\",):\n                        self.add_tag(t)\n                    elif t.startswith(\"mutation-\"):\n                        self.add_tag(t)\n                    # only add these tags if the event has a URL\n                    if event_has_url:\n                        if t in (\"spider-danger\", \"spider-max\"):\n                            self.add_tag(t)\n        elif not self._dummy:\n            log.warning(f\"Tried to set invalid parent on {self}: (got: {parent})\")\n\n    @property\n    def parent_id(self):\n        parent_id = getattr(self.get_parent(), \"id\", None)\n        if parent_id is not None:\n            return parent_id\n        return self._parent_id\n\n    @property\n    def parent_uuid(self):\n        parent_uuid = getattr(self.get_parent(), \"uuid\", None)\n        if parent_uuid is not None:\n            return parent_uuid\n        return self._parent_uuid\n\n    @property\n    def validators(self):\n        \"\"\"\n        Depending on whether the scan attribute is accessible, return either a config-aware or non-config-aware validator\n\n        This exists to prevent a chicken-and-egg scenario during the creation of certain events such as URLs,\n        whose sanitization behavior is different depending on the config.\n\n        However, thanks to this property, validation can still work in the absence of a config.\n        \"\"\"\n        if self.scan is not None:\n            return self.scan.helpers.config_aware_validators\n        return validators\n\n    def get_parent(self):\n        \"\"\"\n        Takes into account events with the _omit flag\n        \"\"\"\n        if getattr(self.parent, \"_omit\", False):\n            return self.parent.get_parent()\n        return self.parent\n\n    def get_parents(self, omit=False, include_self=False):\n        parents = []\n        e = self\n        if include_self:\n            parents.append(self)\n        while 1:\n            if omit:\n                parent = e.get_parent()\n            else:\n                parent = e.parent\n            if parent is None:\n                break\n            if e == parent:\n                break\n            parents.append(parent)\n            e = parent\n        return parents\n\n    def _host(self):\n        return \"\"\n\n    def _sanitize_data(self, data):\n        \"\"\"\n        Validates and sanitizes the event's data during instantiation.\n\n        By default, uses the '_data_load' method to pre-process the data and then applies the '_data_validator'\n        to validate and create a sanitized dictionary. Raises a ValidationError if any of the validations fail.\n        Subclasses can override this method to provide custom validation logic.\n\n        Returns:\n            Any: The sanitized data.\n\n        Raises:\n            ValidationError: If the data fails to validate.\n        \"\"\"\n        data = self._data_load(data)\n        if self._data_validator is not None:\n            if not isinstance(data, dict):\n                raise ValidationError(f\"data is not of type dict: {data}\")\n            data = self._data_validator(**data).model_dump(exclude_none=True)\n        return self.sanitize_data(data)\n\n    def sanitize_data(self, data):\n        return data\n\n    @property\n    def data_human(self):\n        \"\"\"\n        Human representation of event.data\n        \"\"\"\n        return self._data_human()\n\n    def _data_human(self):\n        if isinstance(self.data, (dict, list)):\n            with suppress(Exception):\n                return json.dumps(self.data, sort_keys=True)\n        return smart_decode(self.data)\n\n    def _data_load(self, data):\n        \"\"\"\n        How to load the event data (JSON-decode it, etc.)\n        \"\"\"\n        return data\n\n    @property\n    def data_id(self):\n        \"\"\"\n        Representation of the event.data used to calculate the event's ID\n        \"\"\"\n        return self._data_id()\n\n    def _data_id(self):\n        return self.data\n\n    @property\n    def pretty_string(self):\n        \"\"\"\n        A human-friendly representation of the event's data. Used for graph representation.\n\n        If the event's data is a dictionary, the function will try to return a JSON-formatted string.\n        Otherwise, it will use smart_decode to convert the data into a string representation.\n\n        Override if necessary.\n\n        Returns:\n            str: The graphical representation of the event's data.\n        \"\"\"\n        return self._pretty_string()\n\n    def _pretty_string(self):\n        return self._data_human()\n\n    @property\n    def data_graph(self):\n        \"\"\"\n        Representation of event.data for neo4j graph nodes\n        \"\"\"\n        return self.pretty_string\n\n    @property\n    def data_json(self):\n        \"\"\"\n        JSON representation of event.data\n        \"\"\"\n        return self.data\n\n    def __contains__(self, other):\n        \"\"\"\n        Allows events to be compared using the \"in\" operator:\n        E.g.:\n            if some_event in other_event:\n                ...\n        \"\"\"\n        try:\n            other = make_event(other, dummy=True)\n        except ValidationError:\n            return False\n        # if hashes match\n        if other == self:\n            return True\n        # if hosts match\n        if self.host and other.host:\n            if self.host == other.host:\n                return True\n            # hostnames and IPs\n            radixtarget = RadixTarget()\n            radixtarget.insert(self.host)\n            return bool(radixtarget.search(other.host))\n        return False\n\n    def json(self, mode=\"json\", siem_friendly=False):\n        \"\"\"\n        Serializes the event object to a JSON-compatible dictionary.\n\n        By default, it includes attributes such as 'type', 'id', 'data', 'scope_distance', and others that are present.\n        Additional specific attributes can be serialized based on the mode specified.\n\n        Parameters:\n            mode (str): Specifies the data serialization mode. Default is \"json\". Other options include \"graph\", \"human\", and \"id\".\n            siem_friendly (bool): Whether to format the JSON in a way that's friendly to SIEM ingestion by Elastic, Splunk, etc. This ensures the value of \"data\" is always the same type (a dictionary).\n\n        Returns:\n            dict: JSON-serializable dictionary representation of the event object.\n        \"\"\"\n        j = dict()\n        # type, ID, scope description\n        for i in (\"type\", \"id\", \"uuid\", \"scope_description\", \"netloc\"):\n            v = getattr(self, i, \"\")\n            if v:\n                j.update({i: str(v)})\n        # event data\n        data_attr = getattr(self, f\"data_{mode}\", None)\n        if data_attr is not None:\n            data = data_attr\n        else:\n            data = smart_decode(self.data)\n        if siem_friendly:\n            j[\"data\"] = {self.type: data}\n        else:\n            j[\"data\"] = data\n        # host, dns children\n        if self.host:\n            j[\"host\"] = str(self.host)\n            j[\"resolved_hosts\"] = sorted(str(h) for h in self.resolved_hosts)\n            j[\"dns_children\"] = {k: list(v) for k, v in self.dns_children.items()}\n        if isinstance(self.port, int):\n            j[\"port\"] = self.port\n        # web spider distance\n        web_spider_distance = getattr(self, \"web_spider_distance\", None)\n        if web_spider_distance is not None:\n            j[\"web_spider_distance\"] = web_spider_distance\n        # scope distance\n        j[\"scope_distance\"] = self.scope_distance\n        # scan\n        if self.scan:\n            j[\"scan\"] = self.scan.id\n        # timestamp\n        j[\"timestamp\"] = self.timestamp.isoformat()\n        # parent event\n        parent_id = self.parent_id\n        if parent_id:\n            j[\"parent\"] = parent_id\n        parent_uuid = self.parent_uuid\n        if parent_uuid:\n            j[\"parent_uuid\"] = parent_uuid\n        # tags\n        if self.tags:\n            j.update({\"tags\": list(self.tags)})\n        # parent module\n        if self.module:\n            j.update({\"module\": str(self.module)})\n        # sequence of modules that led to discovery\n        if self.module_sequence:\n            j.update({\"module_sequence\": str(self.module_sequence)})\n        # discovery context\n        j[\"discovery_context\"] = self.discovery_context\n        j[\"discovery_path\"] = self.discovery_path\n        j[\"parent_chain\"] = self.parent_chain\n\n        # normalize non-primitive python objects\n        for k, v in list(j.items()):\n            if k == \"data\":\n                continue\n            if type(v) not in (str, int, float, bool, list, dict, type(None)):\n                try:\n                    j[k] = json.dumps(v, sort_keys=True)\n                except Exception:\n                    j[k] = smart_decode(v)\n        return j\n\n    @staticmethod\n    def from_json(j):\n        \"\"\"\n        Convenience shortcut to create an Event object from a JSON-compatible dictionary.\n\n        Calls the `event_from_json()` function to deserialize the event.\n\n        Parameters:\n            j (dict): The JSON-compatible dictionary containing event data.\n\n        Returns:\n            Event: The deserialized Event object.\n        \"\"\"\n        return event_from_json(j)\n\n    @property\n    def module_sequence(self):\n        \"\"\"\n        Get a human-friendly string that represents the sequence of modules responsible for generating this event.\n\n        Includes the names of omitted parent events to provide a complete view of the module sequence leading to this event.\n\n        Returns:\n            str: The module sequence in human-friendly format.\n        \"\"\"\n        module_name = getattr(self.module, \"name\", \"\")\n        if getattr(self.parent, \"_omit\", False):\n            module_name = f\"{self.parent.module_sequence}-&gt;{module_name}\"\n        return module_name\n\n    @property\n    def module_priority(self):\n        if self._module_priority is None:\n            module = getattr(self, \"module\", None)\n            self._module_priority = int(max(1, min(5, getattr(module, \"priority\", 3))))\n        return self._module_priority\n\n    @module_priority.setter\n    def module_priority(self, priority):\n        self._module_priority = int(max(1, min(5, priority)))\n\n    @property\n    def priority(self):\n        if self._priority is None:\n            timestamp = self.timestamp.timestamp()\n            if self.parent.timestamp == self.timestamp:\n                self._priority = (timestamp,)\n            else:\n                self._priority = getattr(self.parent, \"priority\", ()) + (timestamp,)\n\n        return self._priority\n\n    @property\n    def type(self):\n        return self._type\n\n    @type.setter\n    def type(self, val):\n        self._type = val\n        self._hash = None\n        self._id = None\n\n    @property\n    def _host_size(self):\n        \"\"\"\n        Used for sorting events by their host size, so that parent ones (e.g. IP subnets) come first\n        \"\"\"\n        if self.host:\n            if isinstance(self.host, str):\n                # smaller domains should come first\n                return len(self.host)\n            else:\n                try:\n                    # bigger IP subnets should come first\n                    return -self.host.num_addresses\n                except AttributeError:\n                    # IP addresses default to 1\n                    return 1\n        return 0\n\n    def __iter__(self):\n        \"\"\"\n        For dict(event)\n        \"\"\"\n        yield from self.json().items()\n\n    def __lt__(self, other):\n        \"\"\"\n        For queue sorting\n        \"\"\"\n        return self.priority &lt; getattr(other, \"priority\", (0,))\n\n    def __gt__(self, other):\n        \"\"\"\n        For queue sorting\n        \"\"\"\n        return self.priority &gt; getattr(other, \"priority\", (0,))\n\n    def __eq__(self, other):\n        try:\n            other = make_event(other, dummy=True)\n        except ValidationError:\n            return False\n        return hash(self) == hash(other)\n\n    def __hash__(self):\n        if self._hash is None:\n            self._hash = hash(self.id)\n        return self._hash\n\n    def __str__(self):\n        max_event_len = 80\n        d = str(self.data).replace(\"\\n\", \"\\\\n\")\n        return f'{self.type}(\"{d[:max_event_len]}{(\"...\" if len(d) &gt; max_event_len else \"\")}\", module={self.module}, tags={self.tags})'\n\n    def __repr__(self):\n        return str(self)\n</code></pre>"},{"location":"dev/event/#bbot.core.event.base.BaseEvent.pretty_string","title":"pretty_string  <code>property</code>","text":"<pre><code>pretty_string\n</code></pre> <p>A human-friendly representation of the event's data. Used for graph representation.</p> <p>If the event's data is a dictionary, the function will try to return a JSON-formatted string. Otherwise, it will use smart_decode to convert the data into a string representation.</p> <p>Override if necessary.</p> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>The graphical representation of the event's data.</p> </li> </ul>"},{"location":"dev/event/#bbot.core.event.base.BaseEvent.module_sequence","title":"module_sequence  <code>property</code>","text":"<pre><code>module_sequence\n</code></pre> <p>Get a human-friendly string that represents the sequence of modules responsible for generating this event.</p> <p>Includes the names of omitted parent events to provide a complete view of the module sequence leading to this event.</p> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>The module sequence in human-friendly format.</p> </li> </ul>"},{"location":"dev/event/#bbot.core.event.base.BaseEvent.__init__","title":"__init__","text":"<pre><code>__init__(data, event_type, parent=None, context=None, module=None, scan=None, scans=None, tags=None, confidence=100, timestamp=None, _dummy=False, _internal=None)\n</code></pre> <p>Initializes an Event object with the given parameters.</p> <p>In most cases, you should use <code>make_event()</code> instead of instantiating this class directly. <code>make_event()</code> is much friendlier, and can auto-detect the event type for you.</p> <p>Attributes:</p> <ul> <li> <code>data</code>               (<code>(str, dict)</code>)           \u2013            <p>The primary data for the event.</p> </li> <li> <code>event_type</code>               (<code>str</code>)           \u2013            <p>Type of the event, e.g., 'IP_ADDRESS'.</p> </li> <li> <code>parent</code>               (<code>BaseEvent</code>)           \u2013            <p>Parent event that led to this event's discovery. Defaults to None.</p> </li> <li> <code>module</code>               (<code>str</code>)           \u2013            <p>Module that discovered the event. Defaults to None.</p> </li> <li> <code>scan</code>               (<code>Scan</code>)           \u2013            <p>BBOT Scan object. Required unless _dummy is True. Defaults to None.</p> </li> <li> <code>scans</code>               (<code>list of Scan</code>)           \u2013            <p>BBOT Scan objects, used primarily when unserializing an Event from the database. Defaults to None.</p> </li> <li> <code>tags</code>               (<code>list of str</code>)           \u2013            <p>Descriptive tags for the event. Defaults to None.</p> </li> <li> <code>confidence</code>               (<code>int</code>)           \u2013            <p>Confidence level for the event, on a scale of 1-100. Defaults to 100.</p> </li> <li> <code>timestamp</code>               (<code>datetime</code>)           \u2013            <p>Time of event discovery. Defaults to current UTC time.</p> </li> <li> <code>_dummy</code>               (<code>bool</code>)           \u2013            <p>If True, disables certain data validations. Defaults to False.</p> </li> <li> <code>_internal</code>               (<code>Any</code>)           \u2013            <p>If specified, makes the event internal. Defaults to None.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>ValidationError</code>             \u2013            <p>If either <code>scan</code> or <code>parent</code> are not specified and <code>_dummy</code> is False.</p> </li> </ul> Source code in <code>bbot/core/event/base.py</code> <pre><code>def __init__(\n    self,\n    data,\n    event_type,\n    parent=None,\n    context=None,\n    module=None,\n    scan=None,\n    scans=None,\n    tags=None,\n    confidence=100,\n    timestamp=None,\n    _dummy=False,\n    _internal=None,\n):\n    \"\"\"\n    Initializes an Event object with the given parameters.\n\n    In most cases, you should use `make_event()` instead of instantiating this class directly.\n    `make_event()` is much friendlier, and can auto-detect the event type for you.\n\n    Attributes:\n        data (str, dict): The primary data for the event.\n        event_type (str, optional): Type of the event, e.g., 'IP_ADDRESS'.\n        parent (BaseEvent, optional): Parent event that led to this event's discovery. Defaults to None.\n        module (str, optional): Module that discovered the event. Defaults to None.\n        scan (Scan, optional): BBOT Scan object. Required unless _dummy is True. Defaults to None.\n        scans (list of Scan, optional): BBOT Scan objects, used primarily when unserializing an Event from the database. Defaults to None.\n        tags (list of str, optional): Descriptive tags for the event. Defaults to None.\n        confidence (int, optional): Confidence level for the event, on a scale of 1-100. Defaults to 100.\n        timestamp (datetime, optional): Time of event discovery. Defaults to current UTC time.\n        _dummy (bool, optional): If True, disables certain data validations. Defaults to False.\n        _internal (Any, optional): If specified, makes the event internal. Defaults to None.\n\n    Raises:\n        ValidationError: If either `scan` or `parent` are not specified and `_dummy` is False.\n    \"\"\"\n    self._uuid = uuid.uuid4()\n    self._id = None\n    self._hash = None\n    self._data = None\n    self.__host = None\n    self._tags = set()\n    self._port = None\n    self._omit = False\n    self.__words = None\n    self._parent = None\n    self._priority = None\n    self._parent_id = None\n    self._parent_uuid = None\n    self._host_original = None\n    self._scope_distance = None\n    self._module_priority = None\n    self._resolved_hosts = set()\n    self.dns_children = dict()\n    self.raw_dns_records = dict()\n    self._discovery_context = \"\"\n    self._discovery_context_regex = re.compile(r\"\\{(?:event|module)[^}]*\\}\")\n    self.web_spider_distance = 0\n\n    # for creating one-off events without enforcing parent requirement\n    self._dummy = _dummy\n    self.module = module\n    self._type = event_type\n\n    # keep track of whether this event has been recorded by the scan\n    self._stats_recorded = False\n\n    if timestamp is not None:\n        self.timestamp = timestamp\n    else:\n        try:\n            self.timestamp = datetime.datetime.now(datetime.UTC)\n        except AttributeError:\n            self.timestamp = datetime.datetime.utcnow()\n\n    self.confidence = int(confidence)\n    self._internal = False\n\n    # self.scan holds the instantiated scan object (for helpers, etc.)\n    self.scan = scan\n    if (not self.scan) and (not self._dummy):\n        raise ValidationError(f\"Must specify scan\")\n    # self.scans holds a list of scan IDs from scans that encountered this event\n    self.scans = []\n    if scans is not None:\n        self.scans = scans\n    if self.scan:\n        self.scans = list(set([self.scan.id] + self.scans))\n\n    try:\n        self.data = self._sanitize_data(data)\n    except Exception as e:\n        log.trace(traceback.format_exc())\n        raise ValidationError(f'Error sanitizing event data \"{data}\" for type \"{self.type}\": {e}')\n\n    if not self.data:\n        raise ValidationError(f'Invalid event data \"{data}\" for type \"{self.type}\"')\n\n    self.parent = parent\n    if (not self.parent) and (not self._dummy):\n        raise ValidationError(f\"Must specify event parent\")\n\n    if tags is not None:\n        for tag in tags:\n            self.add_tag(tag)\n\n    # internal events are not ingested by output modules\n    if not self._dummy:\n        # removed this second part because it was making certain sslcert events internal\n        if _internal:  # or parent._internal:\n            self.internal = True\n\n    if not context:\n        context = getattr(self.module, \"default_discovery_context\", \"\")\n    if context:\n        self.discovery_context = context\n</code></pre>"},{"location":"dev/event/#bbot.core.event.base.BaseEvent.json","title":"json","text":"<pre><code>json(mode='json', siem_friendly=False)\n</code></pre> <p>Serializes the event object to a JSON-compatible dictionary.</p> <p>By default, it includes attributes such as 'type', 'id', 'data', 'scope_distance', and others that are present. Additional specific attributes can be serialized based on the mode specified.</p> <p>Parameters:</p> <ul> <li> <code>mode</code>               (<code>str</code>, default:                   <code>'json'</code> )           \u2013            <p>Specifies the data serialization mode. Default is \"json\". Other options include \"graph\", \"human\", and \"id\".</p> </li> <li> <code>siem_friendly</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to format the JSON in a way that's friendly to SIEM ingestion by Elastic, Splunk, etc. This ensures the value of \"data\" is always the same type (a dictionary).</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>dict</code>          \u2013            <p>JSON-serializable dictionary representation of the event object.</p> </li> </ul> Source code in <code>bbot/core/event/base.py</code> <pre><code>def json(self, mode=\"json\", siem_friendly=False):\n    \"\"\"\n    Serializes the event object to a JSON-compatible dictionary.\n\n    By default, it includes attributes such as 'type', 'id', 'data', 'scope_distance', and others that are present.\n    Additional specific attributes can be serialized based on the mode specified.\n\n    Parameters:\n        mode (str): Specifies the data serialization mode. Default is \"json\". Other options include \"graph\", \"human\", and \"id\".\n        siem_friendly (bool): Whether to format the JSON in a way that's friendly to SIEM ingestion by Elastic, Splunk, etc. This ensures the value of \"data\" is always the same type (a dictionary).\n\n    Returns:\n        dict: JSON-serializable dictionary representation of the event object.\n    \"\"\"\n    j = dict()\n    # type, ID, scope description\n    for i in (\"type\", \"id\", \"uuid\", \"scope_description\", \"netloc\"):\n        v = getattr(self, i, \"\")\n        if v:\n            j.update({i: str(v)})\n    # event data\n    data_attr = getattr(self, f\"data_{mode}\", None)\n    if data_attr is not None:\n        data = data_attr\n    else:\n        data = smart_decode(self.data)\n    if siem_friendly:\n        j[\"data\"] = {self.type: data}\n    else:\n        j[\"data\"] = data\n    # host, dns children\n    if self.host:\n        j[\"host\"] = str(self.host)\n        j[\"resolved_hosts\"] = sorted(str(h) for h in self.resolved_hosts)\n        j[\"dns_children\"] = {k: list(v) for k, v in self.dns_children.items()}\n    if isinstance(self.port, int):\n        j[\"port\"] = self.port\n    # web spider distance\n    web_spider_distance = getattr(self, \"web_spider_distance\", None)\n    if web_spider_distance is not None:\n        j[\"web_spider_distance\"] = web_spider_distance\n    # scope distance\n    j[\"scope_distance\"] = self.scope_distance\n    # scan\n    if self.scan:\n        j[\"scan\"] = self.scan.id\n    # timestamp\n    j[\"timestamp\"] = self.timestamp.isoformat()\n    # parent event\n    parent_id = self.parent_id\n    if parent_id:\n        j[\"parent\"] = parent_id\n    parent_uuid = self.parent_uuid\n    if parent_uuid:\n        j[\"parent_uuid\"] = parent_uuid\n    # tags\n    if self.tags:\n        j.update({\"tags\": list(self.tags)})\n    # parent module\n    if self.module:\n        j.update({\"module\": str(self.module)})\n    # sequence of modules that led to discovery\n    if self.module_sequence:\n        j.update({\"module_sequence\": str(self.module_sequence)})\n    # discovery context\n    j[\"discovery_context\"] = self.discovery_context\n    j[\"discovery_path\"] = self.discovery_path\n    j[\"parent_chain\"] = self.parent_chain\n\n    # normalize non-primitive python objects\n    for k, v in list(j.items()):\n        if k == \"data\":\n            continue\n        if type(v) not in (str, int, float, bool, list, dict, type(None)):\n            try:\n                j[k] = json.dumps(v, sort_keys=True)\n            except Exception:\n                j[k] = smart_decode(v)\n    return j\n</code></pre>"},{"location":"dev/event/#bbot.core.event.base.BaseEvent.from_json","title":"from_json  <code>staticmethod</code>","text":"<pre><code>from_json(j)\n</code></pre> <p>Convenience shortcut to create an Event object from a JSON-compatible dictionary.</p> <p>Calls the <code>event_from_json()</code> function to deserialize the event.</p> <p>Parameters:</p> <ul> <li> <code>j</code>               (<code>dict</code>)           \u2013            <p>The JSON-compatible dictionary containing event data.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>Event</code>          \u2013            <p>The deserialized Event object.</p> </li> </ul> Source code in <code>bbot/core/event/base.py</code> <pre><code>@staticmethod\ndef from_json(j):\n    \"\"\"\n    Convenience shortcut to create an Event object from a JSON-compatible dictionary.\n\n    Calls the `event_from_json()` function to deserialize the event.\n\n    Parameters:\n        j (dict): The JSON-compatible dictionary containing event data.\n\n    Returns:\n        Event: The deserialized Event object.\n    \"\"\"\n    return event_from_json(j)\n</code></pre>"},{"location":"dev/module_howto/","title":"How to Write a BBOT Module","text":"<p>Here we'll go over a basic example of writing a custom BBOT module.</p>"},{"location":"dev/module_howto/#create-the-python-file","title":"Create the python file","text":"<ol> <li>Create a new <code>.py</code> file in <code>bbot/modules</code> (or in a custom module directory)</li> <li>At the top of the file, import <code>BaseModule</code></li> <li>Declare a class that inherits from <code>BaseModule</code></li> <li>the class must have the same name as your file (case-insensitive)</li> <li>Define in <code>watched_events</code> what type of data your module will consume</li> <li>Define in <code>produced_events</code> what type of data your module will produce</li> <li>Define (via <code>flags</code>) whether your module is <code>active</code> or <code>passive</code>, and whether it's <code>safe</code> or <code>aggressive</code></li> <li>Put your main logic in <code>.handle_event()</code></li> </ol> <p>Here is an example of a simple module that performs whois lookups:</p> bbot/modules/whois.py<pre><code>from bbot.modules.base import BaseModule\n\nclass whois(BaseModule):\n    watched_events = [\"DNS_NAME\"] # watch for DNS_NAME events\n    produced_events = [\"WHOIS\"] # we produce WHOIS events\n    flags = [\"passive\", \"safe\"]\n    meta = {\"description\": \"Query WhoisXMLAPI for WHOIS data\"}\n    options = {\"api_key\": \"\"} # module config options\n    options_desc = {\"api_key\": \"WhoisXMLAPI Key\"}\n    per_domain_only = True # only run once per domain\n\n    base_url = \"https://www.whoisxmlapi.com/whoisserver/WhoisService\"\n\n    # one-time setup - runs at the beginning of the scan\n    async def setup(self):\n        self.api_key = self.config.get(\"api_key\")\n        if not self.api_key:\n            # soft-fail if no API key is set\n            return None, \"Must set API key\"\n\n    async def handle_event(self, event):\n        self.hugesuccess(f\"Got {event} (event.data: {event.data})\")\n        _, domain = self.helpers.split_domain(event.data)\n        url = f\"{self.base_url}?apiKey={self.api_key}&amp;domainName={domain}&amp;outputFormat=JSON\"\n        self.hugeinfo(f\"Visiting {url}\")\n        response = await self.helpers.request(url)\n        if response is not None:\n            await self.emit_event(response.json(), \"WHOIS\", parent=event)\n</code></pre>"},{"location":"dev/module_howto/#test-your-new-module","title":"Test your new module","text":"<p>After saving the module, you can run it with <code>-m</code>:</p> <pre><code># run a scan enabling the module in bbot/modules/mymodule.py\nbbot -t evilcorp.com -m whois\n</code></pre>"},{"location":"dev/module_howto/#debugging-your-module","title":"Debugging Your Module","text":"<p>BBOT has a variety of colorful logging functions like <code>self.hugesuccess()</code> that can be useful for debugging.</p> <p>BBOT log levels:</p> <ul> <li><code>critical</code>: bright red</li> <li><code>hugesuccess</code>: bright green</li> <li><code>hugewarning</code>: bright orange</li> <li><code>hugeinfo</code>: bright blue</li> <li><code>error</code>: red</li> <li><code>warning</code>: orange</li> <li><code>info</code>: blue</li> <li><code>verbose</code>: grey (must enable <code>-v</code> to see)</li> <li><code>debug</code>: grey (must enable <code>-d</code> to see)</li> </ul> <p>For details on how tests are written, see Unit Tests.</p>"},{"location":"dev/module_howto/#handle_event-and-emit_event","title":"<code>handle_event()</code> and <code>emit_event()</code>","text":"<p>The <code>handle_event()</code> method is the most important part of the module. By overriding this method, you control what the module does. During a scan, when an event from your <code>watched_events</code> is encountered (a <code>DNS_NAME</code> in this example), <code>handle_event()</code> is automatically called with that event as its argument.</p> <p>The <code>emit_event()</code> method is how modules return data. When you call <code>emit_event()</code>, it creates an event and outputs it, sending it any modules that are interested in that data type.</p>"},{"location":"dev/module_howto/#setup","title":"<code>setup()</code>","text":"<p>A module's <code>setup()</code> method is used for performing one-time setup at the start of the scan, like downloading a wordlist or checking to make sure an API key is valid. It needs to return either:</p> <ol> <li><code>True</code> - module setup succeeded</li> <li><code>None</code> - module setup soft-failed (scan will continue but module will be disabled)</li> <li><code>False</code> - module setup hard-failed (scan will abort)</li> </ol> <p>Optionally, it can also return a reason. Here are some examples:</p> <pre><code>async def setup(self):\n    if not self.config.get(\"api_key\"):\n        # soft-fail\n        return None, \"No API key specified\"\n\nasync def setup(self):\n    try:\n        wordlist = self.helpers.wordlist(\"https://raw.githubusercontent.com/user/wordlist.txt\")\n    except WordlistError as e:\n        # hard-fail\n        return False, f\"Error downloading wordlist: {e}\"\n\nasync def setup(self):\n    self.timeout = self.config.get(\"timeout\", 5)\n    # success\n    return True\n</code></pre>"},{"location":"dev/module_howto/#module-config-options","title":"Module Config Options","text":"<p>Each module can have its own set of config options. These live in the <code>options</code> and <code>options_desc</code> attributes on your class. Both are dictionaries; <code>options</code> is for defaults and <code>options_desc</code> is for descriptions. Here is a typical example:</p> bbot/modules/nmap.py<pre><code>class nmap(BaseModule):\n    # ...\n    options = {\n        \"top_ports\": 100,\n        \"ports\": \"\",\n        \"timing\": \"T4\",\n        \"skip_host_discovery\": True,\n    }\n    options_desc = {\n        \"top_ports\": \"Top ports to scan (default 100) (to override, specify 'ports')\",\n        \"ports\": \"Ports to scan\",\n        \"timing\": \"-T&lt;0-5&gt;: Set timing template (higher is faster)\",\n        \"skip_host_discovery\": \"skip host discovery (-Pn)\",\n    }\n\n    async def setup(self):\n        self.ports = self.config.get(\"ports\", \"\")\n        self.timing = self.config.get(\"timing\", \"T4\")\n        self.top_ports = self.config.get(\"top_ports\", 100)\n        self.skip_host_discovery = self.config.get(\"skip_host_discovery\", True)\n        return True\n</code></pre> <p>Once you've defined these variables, you can pass the options via <code>-c</code>:</p> <pre><code>bbot -m nmap -c modules.nmap.top_ports=250\n</code></pre> <p>... or via the config:</p> ~/.config/bbot/bbot.yml<pre><code>modules:\n  nmap:\n    top_ports: 250\n</code></pre> <p>Inside the module, you access them via <code>self.config</code>, e.g.:</p> <pre><code>self.config.get(\"top_ports\")\n</code></pre>"},{"location":"dev/module_howto/#module-dependencies","title":"Module Dependencies","text":"<p>BBOT automates module dependencies with Ansible. If your module relies on a third-party binary, OS package, or python library, you can specify them in the <code>deps_*</code> attributes of your module.</p> <pre><code>class MyModule(BaseModule):\n    ...\n    deps_apt = [\"chromium-browser\"]\n    deps_ansible = [\n        {\n            \"name\": \"install dev tools\",\n            \"package\": {\"name\": [\"gcc\", \"git\", \"make\"], \"state\": \"present\"},\n            \"become\": True,\n            \"ignore_errors\": True,\n        },\n        {\n            \"name\": \"Download massdns source code\",\n            \"git\": {\n                \"repo\": \"https://github.com/blechschmidt/massdns.git\",\n                \"dest\": \"#{BBOT_TEMP}/massdns\",\n                \"single_branch\": True,\n                \"version\": \"master\",\n            },\n        },\n        {\n            \"name\": \"Build massdns\",\n            \"command\": {\"chdir\": \"#{BBOT_TEMP}/massdns\", \"cmd\": \"make\", \"creates\": \"#{BBOT_TEMP}/massdns/bin/massdns\"},\n        },\n        {\n            \"name\": \"Install massdns\",\n            \"copy\": {\"src\": \"#{BBOT_TEMP}/massdns/bin/massdns\", \"dest\": \"#{BBOT_TOOLS}/\", \"mode\": \"u+x,g+x,o+x\"},\n        },\n    ]\n</code></pre>"},{"location":"dev/module_howto/#load-modules-from-custom-locations","title":"Load Modules from Custom Locations","text":"<p>If you have a custom module and you want to use it with BBOT, you can add its parent folder to <code>module_dirs</code>. This saves you from having to copy it into the BBOT install location. To add a custom module directory, add it to <code>module_dirs</code> in your preset:</p> my_preset.yml<pre><code># load BBOT modules from these additional paths\nmodule_dirs:\n  - /home/user/my_modules\n</code></pre>"},{"location":"dev/presets/","title":"Presets","text":""},{"location":"dev/presets/#bbot.scanner.Preset","title":"Preset","text":"<p>A preset is the central config for a BBOT scan. It contains everything a scan needs to run --     targets, modules, flags, config options like API keys, etc.</p> <p>You can create a preset manually and pass it into <code>Scanner(preset=preset)</code>.     Or, you can pass <code>Preset</code>'s kwargs into <code>Scanner()</code> and it will create the preset for you implicitly.</p> <p>Presets can include other presets (which can in turn include other presets, and so on).     This works by merging each preset in turn using <code>Preset.merge()</code>.     The order matters. In case of a conflict, the last preset to be merged wins priority.</p> <p>Presets can be loaded from or saved to YAML. BBOT has a number of ready-made presets for common tasks like subdomain enumeration, web spidering, dirbusting, etc.</p> <p>Presets are highly customizable via <code>conditions</code>, which use the Jinja2 templating engine.     Using <code>conditions</code>, you can define custom logic to inspect the final preset before the scan starts, and change it if need be.     Based on the state of the preset, you can print a warning message, abort the scan, enable/disable modules, etc..</p> <p>Attributes:</p> <ul> <li> <code>target</code>               (<code>Target</code>)           \u2013            <p>Target(s) of scan.</p> </li> <li> <code>whitelist</code>               (<code>Target</code>)           \u2013            <p>Scan whitelist (by default this is the same as <code>target</code>).</p> </li> <li> <code>blacklist</code>               (<code>Target</code>)           \u2013            <p>Scan blacklist (this takes ultimate precedence).</p> </li> <li> <code>helpers</code>               (<code>ConfigAwareHelper</code>)           \u2013            <p>Helper containing various reusable functions, regexes, etc.</p> </li> <li> <code>output_dir</code>               (<code>Path</code>)           \u2013            <p>Output directory for scan.</p> </li> <li> <code>scan_name</code>               (<code>str</code>)           \u2013            <p>Name of scan. Defaults to random value, e.g. \"demonic_jimmy\".</p> </li> <li> <code>name</code>               (<code>str</code>)           \u2013            <p>Human-friendly name of preset. Used mainly for logging purposes.</p> </li> <li> <code>description</code>               (<code>str</code>)           \u2013            <p>Description of preset.</p> </li> <li> <code>modules</code>               (<code>set</code>)           \u2013            <p>Combined modules to enable for the scan. Includes scan modules, internal modules, and output modules.</p> </li> <li> <code>scan_modules</code>               (<code>set</code>)           \u2013            <p>Modules to enable for the scan.</p> </li> <li> <code>output_modules</code>               (<code>set</code>)           \u2013            <p>Output modules to enable for the scan. (note: if no output modules are specified, this is not populated until .bake())</p> </li> <li> <code>internal_modules</code>               (<code>set</code>)           \u2013            <p>Internal modules for the scan. (note: not populated until .bake())</p> </li> <li> <code>exclude_modules</code>               (<code>set</code>)           \u2013            <p>Modules to exclude from the scan. When set, automatically removes excluded modules.</p> </li> <li> <code>flags</code>               (<code>set</code>)           \u2013            <p>Flags to enable for the scan. When set, automatically enables modules.</p> </li> <li> <code>require_flags</code>               (<code>set</code>)           \u2013            <p>Require modules to have these flags. When set, automatically removes offending modules.</p> </li> <li> <code>exclude_flags</code>               (<code>set</code>)           \u2013            <p>Exclude modules that have any of these flags. When set, automatically removes offending modules.</p> </li> <li> <code>module_dirs</code>               (<code>set</code>)           \u2013            <p>Custom directories from which to load modules (alias to <code>self.module_loader.module_dirs</code>). When set, automatically preloads contained modules.</p> </li> <li> <code>config</code>               (<code>DictConfig</code>)           \u2013            <p>BBOT config (alias to <code>core.config</code>)</p> </li> <li> <code>core</code>               (<code>BBOTCore</code>)           \u2013            <p>Local copy of BBOTCore object.</p> </li> <li> <code>verbose</code>               (<code>bool</code>)           \u2013            <p>Whether log level is currently set to verbose. When set, updates log level for all BBOT log handlers.</p> </li> <li> <code>debug</code>               (<code>bool</code>)           \u2013            <p>Whether log level is currently set to debug. When set, updates log level for all BBOT log handlers.</p> </li> <li> <code>silent</code>               (<code>bool</code>)           \u2013            <p>Whether logging is currently disabled. When set to True, silences all stderr.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; preset = Preset(\n        \"evilcorp.com\",\n        \"1.2.3.0/24\",\n        flags=[\"subdomain-enum\"],\n        modules=[\"nuclei\"],\n        config={\"web\": {\"http_proxy\": \"http://127.0.0.1\"}}\n    )\n&gt;&gt;&gt; scan = Scanner(preset=preset)\n</code></pre> <pre><code>&gt;&gt;&gt; preset = Preset.from_yaml_file(\"my_preset.yml\")\n&gt;&gt;&gt; scan = Scanner(preset=preset)\n</code></pre> Source code in <code>bbot/scanner/preset/preset.py</code> <pre><code>class Preset:\n    \"\"\"\n    A preset is the central config for a BBOT scan. It contains everything a scan needs to run --\n        targets, modules, flags, config options like API keys, etc.\n\n    You can create a preset manually and pass it into `Scanner(preset=preset)`.\n        Or, you can pass `Preset`'s kwargs into `Scanner()` and it will create the preset for you implicitly.\n\n    Presets can include other presets (which can in turn include other presets, and so on).\n        This works by merging each preset in turn using `Preset.merge()`.\n        The order matters. In case of a conflict, the last preset to be merged wins priority.\n\n    Presets can be loaded from or saved to YAML. BBOT has a number of ready-made presets for common tasks like\n    subdomain enumeration, web spidering, dirbusting, etc.\n\n    Presets are highly customizable via `conditions`, which use the Jinja2 templating engine.\n        Using `conditions`, you can define custom logic to inspect the final preset before the scan starts, and change it if need be.\n        Based on the state of the preset, you can print a warning message, abort the scan, enable/disable modules, etc..\n\n    Attributes:\n        target (Target): Target(s) of scan.\n        whitelist (Target): Scan whitelist (by default this is the same as `target`).\n        blacklist (Target): Scan blacklist (this takes ultimate precedence).\n        helpers (ConfigAwareHelper): Helper containing various reusable functions, regexes, etc.\n        output_dir (pathlib.Path): Output directory for scan.\n        scan_name (str): Name of scan. Defaults to random value, e.g. \"demonic_jimmy\".\n        name (str): Human-friendly name of preset. Used mainly for logging purposes.\n        description (str): Description of preset.\n        modules (set): Combined modules to enable for the scan. Includes scan modules, internal modules, and output modules.\n        scan_modules (set): Modules to enable for the scan.\n        output_modules (set): Output modules to enable for the scan. (note: if no output modules are specified, this is not populated until .bake())\n        internal_modules (set): Internal modules for the scan. (note: not populated until .bake())\n        exclude_modules (set): Modules to exclude from the scan. When set, automatically removes excluded modules.\n        flags (set): Flags to enable for the scan. When set, automatically enables modules.\n        require_flags (set): Require modules to have these flags. When set, automatically removes offending modules.\n        exclude_flags (set): Exclude modules that have any of these flags. When set, automatically removes offending modules.\n        module_dirs (set): Custom directories from which to load modules (alias to `self.module_loader.module_dirs`). When set, automatically preloads contained modules.\n        config (omegaconf.dictconfig.DictConfig): BBOT config (alias to `core.config`)\n        core (BBOTCore): Local copy of BBOTCore object.\n        verbose (bool): Whether log level is currently set to verbose. When set, updates log level for all BBOT log handlers.\n        debug (bool): Whether log level is currently set to debug. When set, updates log level for all BBOT log handlers.\n        silent (bool): Whether logging is currently disabled. When set to True, silences all stderr.\n\n    Examples:\n        &gt;&gt;&gt; preset = Preset(\n                \"evilcorp.com\",\n                \"1.2.3.0/24\",\n                flags=[\"subdomain-enum\"],\n                modules=[\"nuclei\"],\n                config={\"web\": {\"http_proxy\": \"http://127.0.0.1\"}}\n            )\n        &gt;&gt;&gt; scan = Scanner(preset=preset)\n\n        &gt;&gt;&gt; preset = Preset.from_yaml_file(\"my_preset.yml\")\n        &gt;&gt;&gt; scan = Scanner(preset=preset)\n    \"\"\"\n\n    def __init__(\n        self,\n        *targets,\n        whitelist=None,\n        blacklist=None,\n        modules=None,\n        output_modules=None,\n        exclude_modules=None,\n        flags=None,\n        require_flags=None,\n        exclude_flags=None,\n        config=None,\n        module_dirs=None,\n        include=None,\n        presets=None,\n        output_dir=None,\n        scan_name=None,\n        name=None,\n        description=None,\n        conditions=None,\n        force_start=False,\n        verbose=False,\n        debug=False,\n        silent=False,\n        _exclude=None,\n        _log=True,\n    ):\n        \"\"\"\n        Initializes the Preset class.\n\n        Args:\n            *targets (str): Target(s) to scan. Types supported: hostnames, IPs, CIDRs, emails, open ports.\n            whitelist (list, optional): Whitelisted target(s) to scan. Defaults to the same as `targets`.\n            blacklist (list, optional): Blacklisted target(s). Takes ultimate precedence. Defaults to empty.\n            modules (list[str], optional): List of scan modules to enable for the scan. Defaults to empty list.\n            output_modules (list[str], optional): List of output modules to use. Defaults to csv, human, and json.\n            exclude_modules (list[str], optional): List of modules to exclude from the scan.\n            require_flags (list[str], optional): Only enable modules if they have these flags.\n            exclude_flags (list[str], optional): Don't enable modules if they have any of these flags.\n            module_dirs (list[str], optional): additional directories to load modules from.\n            config (dict, optional): Additional scan configuration settings.\n            include (list[str], optional): names or filenames of other presets to include.\n            presets (list[str], optional): an alias for `include`.\n            output_dir (str or Path, optional): Directory to store scan output. Defaults to BBOT home directory (`~/.bbot`).\n            scan_name (str, optional): Human-readable name of the scan. If not specified, it will be random, e.g. \"demonic_jimmy\".\n            name (str, optional): Human-readable name of the preset. Used mainly for logging.\n            description (str, optional): Description of the preset.\n            conditions (list[str], optional): Custom conditions to be executed before scan start. Written in Jinja2.\n            force_start (bool, optional): If True, ignore conditional aborts and failed module setups. Just run the scan!\n            verbose (bool, optional): Set the BBOT logger to verbose mode.\n            debug (bool, optional): Set the BBOT logger to debug mode.\n            silent (bool, optional): Silence all stderr (effectively disables the BBOT logger).\n            _exclude (list[Path], optional): Preset filenames to exclude from inclusion. Used internally to prevent infinite recursion in circular or self-referencing presets.\n            _log (bool, optional): Whether to enable logging for the preset. This will record which modules/flags are enabled, etc.\n        \"\"\"\n        # internal variables\n        self._cli = False\n        self._log = _log\n        self.scan = None\n        self._args = None\n        self._environ = None\n        self._helpers = None\n        self._module_loader = None\n        self._yaml_str = \"\"\n        self._baked = False\n\n        self._default_output_modules = None\n        self._default_internal_modules = None\n\n        # modules / flags\n        self.modules = set()\n        self.exclude_modules = set()\n        self.flags = set()\n        self.exclude_flags = set()\n        self.require_flags = set()\n\n        # modules + flags\n        if modules is None:\n            modules = []\n        if isinstance(modules, str):\n            modules = [modules]\n        if output_modules is None:\n            output_modules = []\n        if isinstance(output_modules, str):\n            output_modules = [output_modules]\n        if exclude_modules is None:\n            exclude_modules = []\n        if isinstance(exclude_modules, str):\n            exclude_modules = [exclude_modules]\n        if flags is None:\n            flags = []\n        if isinstance(flags, str):\n            flags = [flags]\n        if exclude_flags is None:\n            exclude_flags = []\n        if isinstance(exclude_flags, str):\n            exclude_flags = [exclude_flags]\n        if require_flags is None:\n            require_flags = []\n        if isinstance(require_flags, str):\n            require_flags = [require_flags]\n\n        # these are used only for preserving the modules as specified in the original preset\n        # this is to ensure the preset looks the same when reserialized\n        self.explicit_scan_modules = set() if modules is None else set(modules)\n        self.explicit_output_modules = set() if output_modules is None else set(output_modules)\n\n        # whether to force-start the scan (ignoring conditional aborts and failed module setups)\n        self.force_start = force_start\n\n        # scan output directory\n        self.output_dir = output_dir\n        # name of scan\n        self.scan_name = scan_name\n\n        # name of preset, default blank\n        self.name = name or \"\"\n        # preset description, default blank\n        self.description = description or \"\"\n\n        # custom conditions, evaluated during .bake()\n        self.conditions = []\n        if conditions is not None:\n            for condition in conditions:\n                self.conditions.append((self.name, condition))\n\n        # keeps track of loaded preset files to prevent infinite circular inclusions\n        self._preset_files_loaded = set()\n        if _exclude is not None:\n            for _filename in _exclude:\n                self._preset_files_loaded.add(Path(_filename).resolve())\n\n        # bbot core config\n        self.core = CORE.copy()\n        if config is None:\n            config = omegaconf.OmegaConf.create({})\n        # merge custom configs if specified by the user\n        self.core.merge_custom(config)\n\n        # log verbosity\n        # actual log verbosity isn't set until .bake()\n        self.verbose = verbose\n        self.debug = debug\n        self.silent = silent\n\n        # custom module directories\n        self._module_dirs = set()\n        self.module_dirs = module_dirs\n\n        # target / whitelist / blacklist\n        # these are temporary receptacles until they all get .baked() together\n        self._seeds = set(targets if targets else [])\n        self._whitelist = set(whitelist) if whitelist else whitelist\n        self._blacklist = set(blacklist if blacklist else [])\n\n        self._target = None\n\n        # \"presets\" is alias to \"include\"\n        if presets and include:\n            raise ValueError(\n                'Cannot use both \"presets\" and \"include\" args at the same time (presets is an alias to include). Please pick one or the other :)'\n            )\n        if presets and not include:\n            include = presets\n        # include other presets\n        if include and not isinstance(include, (list, tuple, set)):\n            include = [include]\n        if include:\n            for included_preset in include:\n                self.include_preset(included_preset)\n\n        # we don't fill self.modules yet (that happens in .bake())\n        self.explicit_scan_modules.update(set(modules))\n        self.explicit_output_modules.update(set(output_modules))\n        self.exclude_modules.update(set(exclude_modules))\n        self.flags.update(set(flags))\n        self.exclude_flags.update(set(exclude_flags))\n        self.require_flags.update(set(require_flags))\n\n    @property\n    def bbot_home(self):\n        return Path(self.config.get(\"home\", \"~/.bbot\")).expanduser().resolve()\n\n    @property\n    def target(self):\n        if self._target is None:\n            raise ValueError(\"Cannot access target before preset is baked (use ._seeds instead)\")\n        return self._target\n\n    @property\n    def seeds(self):\n        if self._seeds is None:\n            raise ValueError(\"Cannot access target before preset is baked (use ._seeds instead)\")\n        return self.target.seeds\n\n    @property\n    def whitelist(self):\n        if self._target is None:\n            raise ValueError(\"Cannot access whitelist before preset is baked (use ._whitelist instead)\")\n        return self.target.whitelist\n\n    @property\n    def blacklist(self):\n        if self._target is None:\n            raise ValueError(\"Cannot access blacklist before preset is baked (use ._blacklist instead)\")\n        return self.target.blacklist\n\n    @property\n    def preset_dir(self):\n        return self.bbot_home / \"presets\"\n\n    @property\n    def default_output_modules(self):\n        if self._default_output_modules is not None:\n            output_modules = self._default_output_modules\n        else:\n            output_modules = [\"python\", \"csv\", \"txt\", \"json\"]\n            if self._cli:\n                output_modules.append(\"stdout\")\n        return output_modules\n\n    @property\n    def default_internal_modules(self):\n        preloaded_internal = self.module_loader.preloaded(type=\"internal\")\n        if self._default_internal_modules is not None:\n            internal_modules = self._default_internal_modules\n        else:\n            internal_modules = list(preloaded_internal)\n        return {k: preloaded_internal[k] for k in internal_modules}\n\n    def merge(self, other):\n        \"\"\"\n        Merge another preset into this one.\n\n        If there are any config conflicts, `other` will win over `self`.\n\n        Args:\n            other (Preset): The preset to merge into this one.\n\n        Examples:\n            &gt;&gt;&gt; preset1 = Preset(modules=[\"portscan\"])\n            &gt;&gt;&gt; preset1.scan_modules\n            ['portscan']\n            &gt;&gt;&gt; preset2 = Preset(modules=[\"sslcert\"])\n            &gt;&gt;&gt; preset2.scan_modules\n            ['sslcert']\n            &gt;&gt;&gt; preset1.merge(preset2)\n            &gt;&gt;&gt; preset1.scan_modules\n            ['portscan', 'sslcert']\n        \"\"\"\n        self.log_debug(f'Merging preset \"{other.name}\" into \"{self.name}\"')\n        # config\n        self.core.merge_custom(other.core.custom_config)\n        self.module_loader.core = self.core\n        # module dirs\n        # modules + flags\n        # establish requirements / exclusions first\n        self.exclude_modules.update(other.exclude_modules)\n        self.require_flags.update(other.require_flags)\n        self.exclude_flags.update(other.exclude_flags)\n        # then it's okay to start enabling modules\n        self.explicit_scan_modules.update(other.explicit_scan_modules)\n        self.explicit_output_modules.update(other.explicit_output_modules)\n        self.flags.update(other.flags)\n\n        # target / scope\n        self._seeds.update(other._seeds)\n        # leave whitelist as None until we encounter one\n        if other._whitelist is not None:\n            if self._whitelist is None:\n                self._whitelist = set(other._whitelist)\n            else:\n                self._whitelist.update(other._whitelist)\n        self._blacklist.update(other._blacklist)\n\n        # module dirs\n        self.module_dirs = self.module_dirs.union(other.module_dirs)\n\n        # log verbosity\n        if other.silent:\n            self.silent = other.silent\n        if other.verbose:\n            self.verbose = other.verbose\n        if other.debug:\n            self.debug = other.debug\n        # scan name\n        if other.scan_name is not None:\n            self.scan_name = other.scan_name\n        if other.output_dir is not None:\n            self.output_dir = other.output_dir\n        # conditions\n        if other.conditions:\n            self.conditions.extend(other.conditions)\n        # misc\n        self.force_start = self.force_start | other.force_start\n        self._cli = self._cli | other._cli\n        # transfer args\n        if other._args is not None:\n            self._args = other._args\n\n    def bake(self, scan=None):\n        \"\"\"\n        Return a \"baked\" copy of this preset, ready for use by a BBOT scan.\n\n        Baking a preset finalizes it by populating `preset.modules` based on flags,\n        performing final validations, and substituting environment variables in preloaded modules.\n        It also evaluates custom `conditions` as specified in the preset.\n\n        This function is automatically called in Scanner.__init__(). There is no need to call it manually.\n        \"\"\"\n        self.log_debug(\"Getting baked\")\n        # create a copy of self\n        baked_preset = copy(self)\n        baked_preset.scan = scan\n        # copy core\n        baked_preset.core = self.core.copy()\n        # copy module loader\n        baked_preset._module_loader = self.module_loader.copy()\n        # prepare os environment\n        os_environ = baked_preset.environ.prepare()\n        # find and replace preloaded modules with os environ\n        # this is different from the config variable substitution because it modifies\n        #  the preloaded modules, i.e. their ansible playbooks\n        baked_preset.module_loader.find_and_replace(**os_environ)\n        # update os environ\n        os.environ.clear()\n        os.environ.update(os_environ)\n\n        # validate flags, config options\n        baked_preset.validate()\n\n        # validate log level options\n        baked_preset.apply_log_level(apply_core=scan is not None)\n\n        # assign baked preset to our scan\n        if scan is not None:\n            scan.preset = baked_preset\n\n        # now that our requirements / exclusions are validated, we can start enabling modules\n        # enable scan modules\n        for module in baked_preset.explicit_scan_modules:\n            baked_preset.add_module(module, module_type=\"scan\")\n\n        # enable output modules\n        output_modules_to_enable = set(baked_preset.explicit_output_modules)\n        default_output_modules = self.default_output_modules\n        output_module_override = any(m in default_output_modules for m in output_modules_to_enable)\n        # if none of the default output modules have been explicitly specified, enable them all\n        if not output_module_override:\n            output_modules_to_enable.update(self.default_output_modules)\n        for module in output_modules_to_enable:\n            baked_preset.add_module(module, module_type=\"output\", raise_error=False)\n\n        # enable internal modules\n        for internal_module, preloaded in self.default_internal_modules.items():\n            is_enabled = baked_preset.config.get(internal_module, True)\n            is_excluded = internal_module in baked_preset.exclude_modules\n            if is_enabled and not is_excluded:\n                baked_preset.add_module(internal_module, module_type=\"internal\", raise_error=False)\n\n        # disable internal modules if requested\n        for internal_module in baked_preset.internal_modules:\n            if baked_preset.config.get(internal_module, True) == False:\n                baked_preset.exclude_modules.add(internal_module)\n\n        # enable modules by flag\n        for flag in baked_preset.flags:\n            for module, preloaded in baked_preset.module_loader.preloaded().items():\n                module_flags = preloaded.get(\"flags\", [])\n                module_type = preloaded.get(\"type\", \"scan\")\n                if flag in module_flags:\n                    self.log_debug(f'Enabling module \"{module}\" because it has flag \"{flag}\"')\n                    baked_preset.add_module(module, module_type, raise_error=False)\n\n        # ensure we have output modules\n        if not baked_preset.output_modules:\n            for output_module in self.default_output_modules:\n                baked_preset.add_module(output_module, module_type=\"output\", raise_error=False)\n\n        # create target object\n        from bbot.scanner.target import BBOTTarget\n\n        baked_preset._target = BBOTTarget(\n            *list(self._seeds),\n            whitelist=self._whitelist,\n            blacklist=self._blacklist,\n            strict_scope=self.strict_scope,\n            scan=scan,\n        )\n\n        # evaluate conditions\n        if baked_preset.conditions:\n            from .conditions import ConditionEvaluator\n\n            evaluator = ConditionEvaluator(baked_preset)\n            evaluator.evaluate()\n\n        self._baked = True\n        return baked_preset\n\n    def parse_args(self):\n        \"\"\"\n        Parse CLI arguments, and merge them into this preset.\n\n        Used in `cli.py`.\n        \"\"\"\n        self._cli = True\n        self.merge(self.args.preset_from_args())\n\n    @property\n    def module_dirs(self):\n        return self.module_loader.module_dirs\n\n    @module_dirs.setter\n    def module_dirs(self, module_dirs):\n        if module_dirs:\n            if isinstance(module_dirs, str):\n                module_dirs = [module_dirs]\n            for m in module_dirs:\n                self.module_loader.add_module_dir(m)\n                self._module_dirs.add(m)\n\n    @property\n    def scan_modules(self):\n        return [m for m in self.modules if self.preloaded_module(m).get(\"type\", \"scan\") == \"scan\"]\n\n    @property\n    def output_modules(self):\n        return [m for m in self.modules if self.preloaded_module(m).get(\"type\", \"scan\") == \"output\"]\n\n    @property\n    def internal_modules(self):\n        return [m for m in self.modules if self.preloaded_module(m).get(\"type\", \"scan\") == \"internal\"]\n\n    def add_module(self, module_name, module_type=\"scan\", raise_error=True):\n        self.log_debug(f'Adding module \"{module_name}\" of type \"{module_type}\"')\n        is_valid, reason, preloaded = self._is_valid_module(module_name, module_type, raise_error=raise_error)\n        if not is_valid:\n            self.log_debug(f'Unable to add {module_type} module \"{module_name}\": {reason}')\n            return\n        self.modules.add(module_name)\n        for module_dep in preloaded.get(\"deps\", {}).get(\"modules\", []):\n            if module_dep != module_name and module_dep not in self.modules:\n                self.log_verbose(f'Adding module \"{module_dep}\" because {module_name} depends on it')\n                self.add_module(module_dep, raise_error=False)\n\n    def preloaded_module(self, module):\n        return self.module_loader.preloaded()[module]\n\n    @property\n    def config(self):\n        return self.core.config\n\n    @property\n    def web_config(self):\n        return self.core.config.get(\"web\", {})\n\n    @property\n    def scope_config(self):\n        return self.config.get(\"scope\", {})\n\n    @property\n    def strict_scope(self):\n        return self.scope_config.get(\"strict\", False)\n\n    def apply_log_level(self, apply_core=False):\n        # silent takes precedence\n        if self.silent:\n            self.verbose = False\n            self.debug = False\n            if apply_core:\n                self.core.logger.log_level = \"CRITICAL\"\n                for key in (\"verbose\", \"debug\"):\n                    with suppress(omegaconf.errors.ConfigKeyError):\n                        del self.core.custom_config[key]\n        else:\n            # then debug\n            if self.debug:\n                self.verbose = False\n                if apply_core:\n                    self.core.logger.log_level = \"DEBUG\"\n                    with suppress(omegaconf.errors.ConfigKeyError):\n                        del self.core.custom_config[\"verbose\"]\n            else:\n                # finally verbose\n                if self.verbose and apply_core:\n                    self.core.logger.log_level = \"VERBOSE\"\n\n    @property\n    def helpers(self):\n        if self._helpers is None:\n            from bbot.core.helpers.helper import ConfigAwareHelper\n\n            self._helpers = ConfigAwareHelper(preset=self)\n        return self._helpers\n\n    @property\n    def module_loader(self):\n        self.environ\n        if self._module_loader is None:\n            from bbot.core.modules import MODULE_LOADER\n\n            self._module_loader = MODULE_LOADER\n            self._module_loader.ensure_config_files()\n\n        return self._module_loader\n\n    @property\n    def environ(self):\n        if self._environ is None:\n            from .environ import BBOTEnviron\n\n            self._environ = BBOTEnviron(self)\n        return self._environ\n\n    @property\n    def args(self):\n        if self._args is None:\n            from .args import BBOTArgs\n\n            self._args = BBOTArgs(self)\n        return self._args\n\n    def in_scope(self, host):\n        return self.target.in_scope(host)\n\n    def blacklisted(self, host):\n        return self.target.blacklisted(host)\n\n    def whitelisted(self, host):\n        return self.target.whitelisted(host)\n\n    @classmethod\n    def from_dict(cls, preset_dict, name=None, _exclude=None, _log=False):\n        \"\"\"\n        Create a preset from a Python dictionary object.\n\n        Args:\n            preset_dict (dict): Preset in dictionary form\n            name (str, optional): Name of preset\n            _exclude (list[Path], optional): Preset filenames to exclude from inclusion. Used internally to prevent infinite recursion in circular or self-referencing presets.\n            _log (bool, optional): Whether to enable logging for the preset. This will record which modules/flags are enabled, etc.\n\n        Returns:\n            Preset: The loaded preset\n\n        Examples:\n            &gt;&gt;&gt; preset = Preset.from_dict({\"target\": [\"evilcorp.com\"], \"modules\": [\"portscan\"]})\n        \"\"\"\n        new_preset = cls(\n            *preset_dict.get(\"target\", []),\n            whitelist=preset_dict.get(\"whitelist\"),\n            blacklist=preset_dict.get(\"blacklist\"),\n            modules=preset_dict.get(\"modules\"),\n            output_modules=preset_dict.get(\"output_modules\"),\n            exclude_modules=preset_dict.get(\"exclude_modules\"),\n            flags=preset_dict.get(\"flags\"),\n            require_flags=preset_dict.get(\"require_flags\"),\n            exclude_flags=preset_dict.get(\"exclude_flags\"),\n            verbose=preset_dict.get(\"verbose\", False),\n            debug=preset_dict.get(\"debug\", False),\n            silent=preset_dict.get(\"silent\", False),\n            config=preset_dict.get(\"config\"),\n            module_dirs=preset_dict.get(\"module_dirs\", []),\n            include=list(preset_dict.get(\"include\", [])),\n            scan_name=preset_dict.get(\"scan_name\"),\n            output_dir=preset_dict.get(\"output_dir\"),\n            name=preset_dict.get(\"name\", name),\n            description=preset_dict.get(\"description\"),\n            conditions=preset_dict.get(\"conditions\", []),\n            _exclude=_exclude,\n            _log=_log,\n        )\n        return new_preset\n\n    def include_preset(self, filename):\n        \"\"\"\n        Load a preset from a yaml file and merge it into this one.\n\n        If the full path is not specified, BBOT will look in all the usual places for it.\n\n        The file extension is optional.\n\n        Args:\n            filename (Path): The preset YAML file to merge\n\n        Examples:\n            &gt;&gt;&gt; preset.include_preset(\"/home/user/my_preset.yml\")\n        \"\"\"\n        self.log_debug(f'Including preset \"{filename}\"')\n        preset_filename = PRESET_PATH.find(filename)\n        preset_from_yaml = self.from_yaml_file(preset_filename, _exclude=self._preset_files_loaded)\n        if preset_from_yaml is not False:\n            self.merge(preset_from_yaml)\n            self._preset_files_loaded.add(preset_filename)\n\n    @classmethod\n    def from_yaml_file(cls, filename, _exclude=None, _log=False):\n        \"\"\"\n        Create a preset from a YAML file. If the full path is not specified, BBOT will look in all the usual places for it.\n\n        The file extension is optional.\n\n        Examples:\n            &gt;&gt;&gt; preset = Preset.from_yaml_file(\"/home/user/my_preset.yml\")\n        \"\"\"\n        filename = Path(filename).resolve()\n        try:\n            return _preset_cache[filename]\n        except KeyError:\n            if _exclude is None:\n                _exclude = set()\n            if _exclude is not None and filename in _exclude:\n                log.debug(f\"Not loading {filename} because it was already loaded {_exclude}\")\n                return False\n            log.debug(f\"Loading {filename} because it's not in excluded list ({_exclude})\")\n            _exclude = set(_exclude)\n            _exclude.add(filename)\n            try:\n                yaml_str = open(filename).read()\n            except FileNotFoundError:\n                raise PresetNotFoundError(f'Could not find preset at \"{filename}\" - file does not exist')\n            preset = cls.from_dict(\n                omegaconf.OmegaConf.create(yaml_str), name=filename.stem, _exclude=_exclude, _log=_log\n            )\n            preset._yaml_str = yaml_str\n            _preset_cache[filename] = preset\n            return preset\n\n    @classmethod\n    def from_yaml_string(cls, yaml_preset):\n        \"\"\"\n        Create a preset from a YAML file. If the full path is not specified, BBOT will look in all the usual places for it.\n\n        The file extension is optional.\n\n        Examples:\n            &gt;&gt;&gt; yaml_string = '''\n            &gt;&gt;&gt; target:\n            &gt;&gt;&gt; - evilcorp.com\n            &gt;&gt;&gt; modules:\n            &gt;&gt;&gt; - portscan'''\n            &gt;&gt;&gt; preset = Preset.from_yaml_string(yaml_string)\n        \"\"\"\n        return cls.from_dict(omegaconf.OmegaConf.create(yaml_preset))\n\n    def to_dict(self, include_target=False, full_config=False, redact_secrets=False):\n        \"\"\"\n        Convert this preset into a Python dictionary.\n\n        Args:\n            include_target (bool, optional): If True, include target, whitelist, and blacklist in the dictionary\n            full_config (bool, optional): If True, include the entire config, not just what's changed from the defaults.\n\n        Returns:\n            dict: The preset in dictionary form\n\n        Examples:\n            &gt;&gt;&gt; preset = Preset(flags=[\"subdomain-enum\"], modules=[\"portscan\"])\n            &gt;&gt;&gt; preset.to_dict()\n            {\"flags\": [\"subdomain-enum\"], \"modules\": [\"portscan\"]}\n        \"\"\"\n        preset_dict = {}\n\n        if self.description:\n            preset_dict[\"description\"] = self.description\n\n        # config\n        if full_config:\n            config = self.core.config\n        else:\n            config = self.core.custom_config\n        config = omegaconf.OmegaConf.to_object(config)\n        if redact_secrets:\n            config = self.core.no_secrets_config(config)\n        if config:\n            preset_dict[\"config\"] = config\n\n        # scope\n        if include_target:\n            target = sorted(self.target.seeds.inputs)\n            whitelist = []\n            if self.target.whitelist is not None:\n                whitelist = sorted(self.target.whitelist.inputs)\n            blacklist = sorted(self.target.blacklist.inputs)\n            if target:\n                preset_dict[\"target\"] = target\n            if whitelist and whitelist != target:\n                preset_dict[\"whitelist\"] = whitelist\n            if blacklist:\n                preset_dict[\"blacklist\"] = blacklist\n\n        # flags + modules\n        if self.require_flags:\n            preset_dict[\"require_flags\"] = sorted(self.require_flags)\n        if self.exclude_flags:\n            preset_dict[\"exclude_flags\"] = sorted(self.exclude_flags)\n        if self.exclude_modules:\n            preset_dict[\"exclude_modules\"] = sorted(self.exclude_modules)\n        if self.flags:\n            preset_dict[\"flags\"] = sorted(self.flags)\n        if self.explicit_scan_modules:\n            preset_dict[\"modules\"] = sorted(self.explicit_scan_modules)\n        if self.explicit_output_modules:\n            preset_dict[\"output_modules\"] = sorted(self.explicit_output_modules)\n\n        # log verbosity\n        if self.verbose:\n            preset_dict[\"verbose\"] = True\n        if self.debug:\n            preset_dict[\"debug\"] = True\n        if self.silent:\n            preset_dict[\"silent\"] = True\n\n        # misc scan options\n        if self.scan_name:\n            preset_dict[\"scan_name\"] = self.scan_name\n        if self.scan_name:\n            preset_dict[\"output_dir\"] = self.output_dir\n\n        # conditions\n        if self.conditions:\n            preset_dict[\"conditions\"] = [c[-1] for c in self.conditions]\n\n        return preset_dict\n\n    def to_yaml(self, include_target=False, full_config=False, sort_keys=False):\n        \"\"\"\n        Return the preset in the form of a YAML string.\n\n        Args:\n            include_target (bool, optional): If True, include target, whitelist, and blacklist in the dictionary\n            full_config (bool, optional): If True, include the entire config, not just what's changed from the defaults.\n            sort_keys (bool, optional): If True, sort YAML keys alphabetically\n\n        Returns:\n            str: The preset in the form of a YAML string\n\n        Examples:\n            &gt;&gt;&gt; preset = Preset(flags=[\"subdomain-enum\"], modules=[\"portscan\"])\n            &gt;&gt;&gt; print(preset.to_yaml())\n            flags:\n            - subdomain-enum\n            modules:\n            - portscan\n        \"\"\"\n        preset_dict = self.to_dict(include_target=include_target, full_config=full_config)\n        return yaml.dump(preset_dict, sort_keys=sort_keys)\n\n    def _is_valid_module(self, module, module_type, name_only=False, raise_error=True):\n        if module_type == \"scan\":\n            module_choices = self.module_loader.scan_module_choices\n        elif module_type == \"output\":\n            module_choices = self.module_loader.output_module_choices\n        elif module_type == \"internal\":\n            module_choices = self.module_loader.internal_module_choices\n        else:\n            raise ValidationError(f'Unknown module type \"{module}\"')\n\n        if not module in module_choices:\n            raise ValidationError(get_closest_match(module, module_choices, msg=f\"{module_type} module\"))\n\n        try:\n            preloaded = self.module_loader.preloaded()[module]\n        except KeyError:\n            raise ValidationError(f'Unknown module \"{module}\"')\n\n        if name_only:\n            return True, \"\", preloaded\n\n        if module in self.exclude_modules:\n            reason = \"the module has been excluded\"\n            return False, reason, {}\n\n        module_flags = preloaded.get(\"flags\", [])\n        _module_type = preloaded.get(\"type\", \"scan\")\n        if module_type:\n            if _module_type != module_type:\n                reason = f'its type ({_module_type}) is not \"{module_type}\"'\n                if raise_error:\n                    raise ValidationError(f'Unable to add {module_type} module \"{module}\" because {reason}')\n                return False, reason, preloaded\n\n        if _module_type == \"scan\":\n            if self.exclude_flags:\n                for f in module_flags:\n                    if f in self.exclude_flags:\n                        return False, f'it has excluded flag, \"{f}\"', preloaded\n            if self.require_flags and not all(f in module_flags for f in self.require_flags):\n                return False, f'it doesn\\'t have the required flags ({\",\".join(self.require_flags)})', preloaded\n\n        return True, \"\", preloaded\n\n    def validate(self):\n        \"\"\"\n        Validate module/flag exclusions/requirements, and CLI config options if applicable.\n        \"\"\"\n        if self._cli:\n            self.args.validate()\n\n        # validate excluded modules\n        for excluded_module in self.exclude_modules:\n            if not excluded_module in self.module_loader.all_module_choices:\n                raise ValidationError(\n                    get_closest_match(excluded_module, self.module_loader.all_module_choices, msg=\"module\")\n                )\n        # validate excluded flags\n        for excluded_flag in self.exclude_flags:\n            if not excluded_flag in self.module_loader.flag_choices:\n                raise ValidationError(get_closest_match(excluded_flag, self.module_loader.flag_choices, msg=\"flag\"))\n        # validate required flags\n        for required_flag in self.require_flags:\n            if not required_flag in self.module_loader.flag_choices:\n                raise ValidationError(get_closest_match(required_flag, self.module_loader.flag_choices, msg=\"flag\"))\n        # validate flags\n        for flag in self.flags:\n            if not flag in self.module_loader.flag_choices:\n                raise ValidationError(get_closest_match(flag, self.module_loader.flag_choices, msg=\"flag\"))\n\n    @property\n    def all_presets(self):\n        \"\"\"\n        Recursively find all the presets and return them as a dictionary\n        \"\"\"\n        preset_dir = self.preset_dir\n        home_dir = Path.home()\n\n        # first, add local preset dir to PRESET_PATH\n        PRESET_PATH.add_path(self.preset_dir)\n\n        # ensure local preset directory exists\n        mkdir(preset_dir)\n\n        global DEFAULT_PRESETS\n        if DEFAULT_PRESETS is None:\n            presets = dict()\n            for ext in (\"yml\", \"yaml\"):\n                for preset_path in PRESET_PATH:\n                    # for every yaml file\n                    for original_filename in preset_path.rglob(f\"**/*.{ext}\"):\n                        # not including symlinks\n                        if original_filename.is_symlink():\n                            continue\n\n                        # try to load it as a preset\n                        try:\n                            loaded_preset = self.from_yaml_file(original_filename, _log=True)\n                            if loaded_preset is False:\n                                continue\n                        except Exception as e:\n                            log.warning(f'Failed to load preset at \"{original_filename}\": {e}')\n                            log.trace(traceback.format_exc())\n                            continue\n\n                        # category is the parent folder(s), if any\n                        category = str(original_filename.relative_to(preset_path).parent)\n                        if category == \".\":\n                            category = \"\"\n\n                        local_preset = original_filename\n                        # populate symlinks in local preset dir\n                        if not original_filename.is_relative_to(preset_dir):\n                            relative_preset = original_filename.relative_to(preset_path)\n                            local_preset = preset_dir / relative_preset\n                            mkdir(local_preset.parent, check_writable=False)\n                            if not local_preset.exists():\n                                local_preset.symlink_to(original_filename)\n\n                        # collapse home directory into \"~\"\n                        if local_preset.is_relative_to(home_dir):\n                            local_preset = Path(\"~\") / local_preset.relative_to(home_dir)\n\n                        presets[local_preset] = (loaded_preset, category, preset_path, original_filename)\n\n            # sort by name\n            DEFAULT_PRESETS = dict(sorted(presets.items(), key=lambda x: x[-1][0].name))\n        return DEFAULT_PRESETS\n\n    def presets_table(self, include_modules=True):\n        \"\"\"\n        Return a table of all the presets in the form of a string\n        \"\"\"\n        table = []\n        header = [\"Preset\", \"Category\", \"Description\", \"# Modules\"]\n        if include_modules:\n            header.append(\"Modules\")\n        for yaml_file, (loaded_preset, category, preset_path, original_file) in self.all_presets.items():\n            loaded_preset = loaded_preset.bake()\n            num_modules = f\"{len(loaded_preset.scan_modules):,}\"\n            row = [loaded_preset.name, category, loaded_preset.description, num_modules]\n            if include_modules:\n                row.append(\", \".join(sorted(loaded_preset.scan_modules)))\n            table.append(row)\n        return make_table(table, header)\n\n    def log_verbose(self, msg):\n        if self._log:\n            log.verbose(f\"Preset {self.name}: {msg}\")\n\n    def log_debug(self, msg):\n        if self._log:\n            log.debug(f\"Preset {self.name}: {msg}\")\n</code></pre>"},{"location":"dev/presets/#bbot.scanner.Preset.all_presets","title":"all_presets  <code>property</code>","text":"<pre><code>all_presets\n</code></pre> <p>Recursively find all the presets and return them as a dictionary</p>"},{"location":"dev/presets/#bbot.scanner.Preset.__init__","title":"__init__","text":"<pre><code>__init__(*targets, whitelist=None, blacklist=None, modules=None, output_modules=None, exclude_modules=None, flags=None, require_flags=None, exclude_flags=None, config=None, module_dirs=None, include=None, presets=None, output_dir=None, scan_name=None, name=None, description=None, conditions=None, force_start=False, verbose=False, debug=False, silent=False, _exclude=None, _log=True)\n</code></pre> <p>Initializes the Preset class.</p> <p>Parameters:</p> <ul> <li> <code>*targets</code>               (<code>str</code>, default:                   <code>()</code> )           \u2013            <p>Target(s) to scan. Types supported: hostnames, IPs, CIDRs, emails, open ports.</p> </li> <li> <code>whitelist</code>               (<code>list</code>, default:                   <code>None</code> )           \u2013            <p>Whitelisted target(s) to scan. Defaults to the same as <code>targets</code>.</p> </li> <li> <code>blacklist</code>               (<code>list</code>, default:                   <code>None</code> )           \u2013            <p>Blacklisted target(s). Takes ultimate precedence. Defaults to empty.</p> </li> <li> <code>modules</code>               (<code>list[str]</code>, default:                   <code>None</code> )           \u2013            <p>List of scan modules to enable for the scan. Defaults to empty list.</p> </li> <li> <code>output_modules</code>               (<code>list[str]</code>, default:                   <code>None</code> )           \u2013            <p>List of output modules to use. Defaults to csv, human, and json.</p> </li> <li> <code>exclude_modules</code>               (<code>list[str]</code>, default:                   <code>None</code> )           \u2013            <p>List of modules to exclude from the scan.</p> </li> <li> <code>require_flags</code>               (<code>list[str]</code>, default:                   <code>None</code> )           \u2013            <p>Only enable modules if they have these flags.</p> </li> <li> <code>exclude_flags</code>               (<code>list[str]</code>, default:                   <code>None</code> )           \u2013            <p>Don't enable modules if they have any of these flags.</p> </li> <li> <code>module_dirs</code>               (<code>list[str]</code>, default:                   <code>None</code> )           \u2013            <p>additional directories to load modules from.</p> </li> <li> <code>config</code>               (<code>dict</code>, default:                   <code>None</code> )           \u2013            <p>Additional scan configuration settings.</p> </li> <li> <code>include</code>               (<code>list[str]</code>, default:                   <code>None</code> )           \u2013            <p>names or filenames of other presets to include.</p> </li> <li> <code>presets</code>               (<code>list[str]</code>, default:                   <code>None</code> )           \u2013            <p>an alias for <code>include</code>.</p> </li> <li> <code>output_dir</code>               (<code>str or Path</code>, default:                   <code>None</code> )           \u2013            <p>Directory to store scan output. Defaults to BBOT home directory (<code>~/.bbot</code>).</p> </li> <li> <code>scan_name</code>               (<code>str</code>, default:                   <code>None</code> )           \u2013            <p>Human-readable name of the scan. If not specified, it will be random, e.g. \"demonic_jimmy\".</p> </li> <li> <code>name</code>               (<code>str</code>, default:                   <code>None</code> )           \u2013            <p>Human-readable name of the preset. Used mainly for logging.</p> </li> <li> <code>description</code>               (<code>str</code>, default:                   <code>None</code> )           \u2013            <p>Description of the preset.</p> </li> <li> <code>conditions</code>               (<code>list[str]</code>, default:                   <code>None</code> )           \u2013            <p>Custom conditions to be executed before scan start. Written in Jinja2.</p> </li> <li> <code>force_start</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>If True, ignore conditional aborts and failed module setups. Just run the scan!</p> </li> <li> <code>verbose</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Set the BBOT logger to verbose mode.</p> </li> <li> <code>debug</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Set the BBOT logger to debug mode.</p> </li> <li> <code>silent</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Silence all stderr (effectively disables the BBOT logger).</p> </li> <li> <code>_exclude</code>               (<code>list[Path]</code>, default:                   <code>None</code> )           \u2013            <p>Preset filenames to exclude from inclusion. Used internally to prevent infinite recursion in circular or self-referencing presets.</p> </li> <li> <code>_log</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to enable logging for the preset. This will record which modules/flags are enabled, etc.</p> </li> </ul> Source code in <code>bbot/scanner/preset/preset.py</code> <pre><code>def __init__(\n    self,\n    *targets,\n    whitelist=None,\n    blacklist=None,\n    modules=None,\n    output_modules=None,\n    exclude_modules=None,\n    flags=None,\n    require_flags=None,\n    exclude_flags=None,\n    config=None,\n    module_dirs=None,\n    include=None,\n    presets=None,\n    output_dir=None,\n    scan_name=None,\n    name=None,\n    description=None,\n    conditions=None,\n    force_start=False,\n    verbose=False,\n    debug=False,\n    silent=False,\n    _exclude=None,\n    _log=True,\n):\n    \"\"\"\n    Initializes the Preset class.\n\n    Args:\n        *targets (str): Target(s) to scan. Types supported: hostnames, IPs, CIDRs, emails, open ports.\n        whitelist (list, optional): Whitelisted target(s) to scan. Defaults to the same as `targets`.\n        blacklist (list, optional): Blacklisted target(s). Takes ultimate precedence. Defaults to empty.\n        modules (list[str], optional): List of scan modules to enable for the scan. Defaults to empty list.\n        output_modules (list[str], optional): List of output modules to use. Defaults to csv, human, and json.\n        exclude_modules (list[str], optional): List of modules to exclude from the scan.\n        require_flags (list[str], optional): Only enable modules if they have these flags.\n        exclude_flags (list[str], optional): Don't enable modules if they have any of these flags.\n        module_dirs (list[str], optional): additional directories to load modules from.\n        config (dict, optional): Additional scan configuration settings.\n        include (list[str], optional): names or filenames of other presets to include.\n        presets (list[str], optional): an alias for `include`.\n        output_dir (str or Path, optional): Directory to store scan output. Defaults to BBOT home directory (`~/.bbot`).\n        scan_name (str, optional): Human-readable name of the scan. If not specified, it will be random, e.g. \"demonic_jimmy\".\n        name (str, optional): Human-readable name of the preset. Used mainly for logging.\n        description (str, optional): Description of the preset.\n        conditions (list[str], optional): Custom conditions to be executed before scan start. Written in Jinja2.\n        force_start (bool, optional): If True, ignore conditional aborts and failed module setups. Just run the scan!\n        verbose (bool, optional): Set the BBOT logger to verbose mode.\n        debug (bool, optional): Set the BBOT logger to debug mode.\n        silent (bool, optional): Silence all stderr (effectively disables the BBOT logger).\n        _exclude (list[Path], optional): Preset filenames to exclude from inclusion. Used internally to prevent infinite recursion in circular or self-referencing presets.\n        _log (bool, optional): Whether to enable logging for the preset. This will record which modules/flags are enabled, etc.\n    \"\"\"\n    # internal variables\n    self._cli = False\n    self._log = _log\n    self.scan = None\n    self._args = None\n    self._environ = None\n    self._helpers = None\n    self._module_loader = None\n    self._yaml_str = \"\"\n    self._baked = False\n\n    self._default_output_modules = None\n    self._default_internal_modules = None\n\n    # modules / flags\n    self.modules = set()\n    self.exclude_modules = set()\n    self.flags = set()\n    self.exclude_flags = set()\n    self.require_flags = set()\n\n    # modules + flags\n    if modules is None:\n        modules = []\n    if isinstance(modules, str):\n        modules = [modules]\n    if output_modules is None:\n        output_modules = []\n    if isinstance(output_modules, str):\n        output_modules = [output_modules]\n    if exclude_modules is None:\n        exclude_modules = []\n    if isinstance(exclude_modules, str):\n        exclude_modules = [exclude_modules]\n    if flags is None:\n        flags = []\n    if isinstance(flags, str):\n        flags = [flags]\n    if exclude_flags is None:\n        exclude_flags = []\n    if isinstance(exclude_flags, str):\n        exclude_flags = [exclude_flags]\n    if require_flags is None:\n        require_flags = []\n    if isinstance(require_flags, str):\n        require_flags = [require_flags]\n\n    # these are used only for preserving the modules as specified in the original preset\n    # this is to ensure the preset looks the same when reserialized\n    self.explicit_scan_modules = set() if modules is None else set(modules)\n    self.explicit_output_modules = set() if output_modules is None else set(output_modules)\n\n    # whether to force-start the scan (ignoring conditional aborts and failed module setups)\n    self.force_start = force_start\n\n    # scan output directory\n    self.output_dir = output_dir\n    # name of scan\n    self.scan_name = scan_name\n\n    # name of preset, default blank\n    self.name = name or \"\"\n    # preset description, default blank\n    self.description = description or \"\"\n\n    # custom conditions, evaluated during .bake()\n    self.conditions = []\n    if conditions is not None:\n        for condition in conditions:\n            self.conditions.append((self.name, condition))\n\n    # keeps track of loaded preset files to prevent infinite circular inclusions\n    self._preset_files_loaded = set()\n    if _exclude is not None:\n        for _filename in _exclude:\n            self._preset_files_loaded.add(Path(_filename).resolve())\n\n    # bbot core config\n    self.core = CORE.copy()\n    if config is None:\n        config = omegaconf.OmegaConf.create({})\n    # merge custom configs if specified by the user\n    self.core.merge_custom(config)\n\n    # log verbosity\n    # actual log verbosity isn't set until .bake()\n    self.verbose = verbose\n    self.debug = debug\n    self.silent = silent\n\n    # custom module directories\n    self._module_dirs = set()\n    self.module_dirs = module_dirs\n\n    # target / whitelist / blacklist\n    # these are temporary receptacles until they all get .baked() together\n    self._seeds = set(targets if targets else [])\n    self._whitelist = set(whitelist) if whitelist else whitelist\n    self._blacklist = set(blacklist if blacklist else [])\n\n    self._target = None\n\n    # \"presets\" is alias to \"include\"\n    if presets and include:\n        raise ValueError(\n            'Cannot use both \"presets\" and \"include\" args at the same time (presets is an alias to include). Please pick one or the other :)'\n        )\n    if presets and not include:\n        include = presets\n    # include other presets\n    if include and not isinstance(include, (list, tuple, set)):\n        include = [include]\n    if include:\n        for included_preset in include:\n            self.include_preset(included_preset)\n\n    # we don't fill self.modules yet (that happens in .bake())\n    self.explicit_scan_modules.update(set(modules))\n    self.explicit_output_modules.update(set(output_modules))\n    self.exclude_modules.update(set(exclude_modules))\n    self.flags.update(set(flags))\n    self.exclude_flags.update(set(exclude_flags))\n    self.require_flags.update(set(require_flags))\n</code></pre>"},{"location":"dev/presets/#bbot.scanner.Preset.bake","title":"bake","text":"<pre><code>bake(scan=None)\n</code></pre> <p>Return a \"baked\" copy of this preset, ready for use by a BBOT scan.</p> <p>Baking a preset finalizes it by populating <code>preset.modules</code> based on flags, performing final validations, and substituting environment variables in preloaded modules. It also evaluates custom <code>conditions</code> as specified in the preset.</p> <p>This function is automatically called in Scanner.init(). There is no need to call it manually.</p> Source code in <code>bbot/scanner/preset/preset.py</code> <pre><code>def bake(self, scan=None):\n    \"\"\"\n    Return a \"baked\" copy of this preset, ready for use by a BBOT scan.\n\n    Baking a preset finalizes it by populating `preset.modules` based on flags,\n    performing final validations, and substituting environment variables in preloaded modules.\n    It also evaluates custom `conditions` as specified in the preset.\n\n    This function is automatically called in Scanner.__init__(). There is no need to call it manually.\n    \"\"\"\n    self.log_debug(\"Getting baked\")\n    # create a copy of self\n    baked_preset = copy(self)\n    baked_preset.scan = scan\n    # copy core\n    baked_preset.core = self.core.copy()\n    # copy module loader\n    baked_preset._module_loader = self.module_loader.copy()\n    # prepare os environment\n    os_environ = baked_preset.environ.prepare()\n    # find and replace preloaded modules with os environ\n    # this is different from the config variable substitution because it modifies\n    #  the preloaded modules, i.e. their ansible playbooks\n    baked_preset.module_loader.find_and_replace(**os_environ)\n    # update os environ\n    os.environ.clear()\n    os.environ.update(os_environ)\n\n    # validate flags, config options\n    baked_preset.validate()\n\n    # validate log level options\n    baked_preset.apply_log_level(apply_core=scan is not None)\n\n    # assign baked preset to our scan\n    if scan is not None:\n        scan.preset = baked_preset\n\n    # now that our requirements / exclusions are validated, we can start enabling modules\n    # enable scan modules\n    for module in baked_preset.explicit_scan_modules:\n        baked_preset.add_module(module, module_type=\"scan\")\n\n    # enable output modules\n    output_modules_to_enable = set(baked_preset.explicit_output_modules)\n    default_output_modules = self.default_output_modules\n    output_module_override = any(m in default_output_modules for m in output_modules_to_enable)\n    # if none of the default output modules have been explicitly specified, enable them all\n    if not output_module_override:\n        output_modules_to_enable.update(self.default_output_modules)\n    for module in output_modules_to_enable:\n        baked_preset.add_module(module, module_type=\"output\", raise_error=False)\n\n    # enable internal modules\n    for internal_module, preloaded in self.default_internal_modules.items():\n        is_enabled = baked_preset.config.get(internal_module, True)\n        is_excluded = internal_module in baked_preset.exclude_modules\n        if is_enabled and not is_excluded:\n            baked_preset.add_module(internal_module, module_type=\"internal\", raise_error=False)\n\n    # disable internal modules if requested\n    for internal_module in baked_preset.internal_modules:\n        if baked_preset.config.get(internal_module, True) == False:\n            baked_preset.exclude_modules.add(internal_module)\n\n    # enable modules by flag\n    for flag in baked_preset.flags:\n        for module, preloaded in baked_preset.module_loader.preloaded().items():\n            module_flags = preloaded.get(\"flags\", [])\n            module_type = preloaded.get(\"type\", \"scan\")\n            if flag in module_flags:\n                self.log_debug(f'Enabling module \"{module}\" because it has flag \"{flag}\"')\n                baked_preset.add_module(module, module_type, raise_error=False)\n\n    # ensure we have output modules\n    if not baked_preset.output_modules:\n        for output_module in self.default_output_modules:\n            baked_preset.add_module(output_module, module_type=\"output\", raise_error=False)\n\n    # create target object\n    from bbot.scanner.target import BBOTTarget\n\n    baked_preset._target = BBOTTarget(\n        *list(self._seeds),\n        whitelist=self._whitelist,\n        blacklist=self._blacklist,\n        strict_scope=self.strict_scope,\n        scan=scan,\n    )\n\n    # evaluate conditions\n    if baked_preset.conditions:\n        from .conditions import ConditionEvaluator\n\n        evaluator = ConditionEvaluator(baked_preset)\n        evaluator.evaluate()\n\n    self._baked = True\n    return baked_preset\n</code></pre>"},{"location":"dev/presets/#bbot.scanner.Preset.from_dict","title":"from_dict  <code>classmethod</code>","text":"<pre><code>from_dict(preset_dict, name=None, _exclude=None, _log=False)\n</code></pre> <p>Create a preset from a Python dictionary object.</p> <p>Parameters:</p> <ul> <li> <code>preset_dict</code>               (<code>dict</code>)           \u2013            <p>Preset in dictionary form</p> </li> <li> <code>name</code>               (<code>str</code>, default:                   <code>None</code> )           \u2013            <p>Name of preset</p> </li> <li> <code>_exclude</code>               (<code>list[Path]</code>, default:                   <code>None</code> )           \u2013            <p>Preset filenames to exclude from inclusion. Used internally to prevent infinite recursion in circular or self-referencing presets.</p> </li> <li> <code>_log</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to enable logging for the preset. This will record which modules/flags are enabled, etc.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>Preset</code>          \u2013            <p>The loaded preset</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; preset = Preset.from_dict({\"target\": [\"evilcorp.com\"], \"modules\": [\"portscan\"]})\n</code></pre> Source code in <code>bbot/scanner/preset/preset.py</code> <pre><code>@classmethod\ndef from_dict(cls, preset_dict, name=None, _exclude=None, _log=False):\n    \"\"\"\n    Create a preset from a Python dictionary object.\n\n    Args:\n        preset_dict (dict): Preset in dictionary form\n        name (str, optional): Name of preset\n        _exclude (list[Path], optional): Preset filenames to exclude from inclusion. Used internally to prevent infinite recursion in circular or self-referencing presets.\n        _log (bool, optional): Whether to enable logging for the preset. This will record which modules/flags are enabled, etc.\n\n    Returns:\n        Preset: The loaded preset\n\n    Examples:\n        &gt;&gt;&gt; preset = Preset.from_dict({\"target\": [\"evilcorp.com\"], \"modules\": [\"portscan\"]})\n    \"\"\"\n    new_preset = cls(\n        *preset_dict.get(\"target\", []),\n        whitelist=preset_dict.get(\"whitelist\"),\n        blacklist=preset_dict.get(\"blacklist\"),\n        modules=preset_dict.get(\"modules\"),\n        output_modules=preset_dict.get(\"output_modules\"),\n        exclude_modules=preset_dict.get(\"exclude_modules\"),\n        flags=preset_dict.get(\"flags\"),\n        require_flags=preset_dict.get(\"require_flags\"),\n        exclude_flags=preset_dict.get(\"exclude_flags\"),\n        verbose=preset_dict.get(\"verbose\", False),\n        debug=preset_dict.get(\"debug\", False),\n        silent=preset_dict.get(\"silent\", False),\n        config=preset_dict.get(\"config\"),\n        module_dirs=preset_dict.get(\"module_dirs\", []),\n        include=list(preset_dict.get(\"include\", [])),\n        scan_name=preset_dict.get(\"scan_name\"),\n        output_dir=preset_dict.get(\"output_dir\"),\n        name=preset_dict.get(\"name\", name),\n        description=preset_dict.get(\"description\"),\n        conditions=preset_dict.get(\"conditions\", []),\n        _exclude=_exclude,\n        _log=_log,\n    )\n    return new_preset\n</code></pre>"},{"location":"dev/presets/#bbot.scanner.Preset.from_yaml_file","title":"from_yaml_file  <code>classmethod</code>","text":"<pre><code>from_yaml_file(filename, _exclude=None, _log=False)\n</code></pre> <p>Create a preset from a YAML file. If the full path is not specified, BBOT will look in all the usual places for it.</p> <p>The file extension is optional.</p> <p>Examples:</p> <pre><code>&gt;&gt;&gt; preset = Preset.from_yaml_file(\"/home/user/my_preset.yml\")\n</code></pre> Source code in <code>bbot/scanner/preset/preset.py</code> <pre><code>@classmethod\ndef from_yaml_file(cls, filename, _exclude=None, _log=False):\n    \"\"\"\n    Create a preset from a YAML file. If the full path is not specified, BBOT will look in all the usual places for it.\n\n    The file extension is optional.\n\n    Examples:\n        &gt;&gt;&gt; preset = Preset.from_yaml_file(\"/home/user/my_preset.yml\")\n    \"\"\"\n    filename = Path(filename).resolve()\n    try:\n        return _preset_cache[filename]\n    except KeyError:\n        if _exclude is None:\n            _exclude = set()\n        if _exclude is not None and filename in _exclude:\n            log.debug(f\"Not loading {filename} because it was already loaded {_exclude}\")\n            return False\n        log.debug(f\"Loading {filename} because it's not in excluded list ({_exclude})\")\n        _exclude = set(_exclude)\n        _exclude.add(filename)\n        try:\n            yaml_str = open(filename).read()\n        except FileNotFoundError:\n            raise PresetNotFoundError(f'Could not find preset at \"{filename}\" - file does not exist')\n        preset = cls.from_dict(\n            omegaconf.OmegaConf.create(yaml_str), name=filename.stem, _exclude=_exclude, _log=_log\n        )\n        preset._yaml_str = yaml_str\n        _preset_cache[filename] = preset\n        return preset\n</code></pre>"},{"location":"dev/presets/#bbot.scanner.Preset.from_yaml_string","title":"from_yaml_string  <code>classmethod</code>","text":"<pre><code>from_yaml_string(yaml_preset)\n</code></pre> <p>Create a preset from a YAML file. If the full path is not specified, BBOT will look in all the usual places for it.</p> <p>The file extension is optional.</p> <p>Examples:</p> <pre><code>&gt;&gt;&gt; yaml_string = '''\n&gt;&gt;&gt; target:\n&gt;&gt;&gt; - evilcorp.com\n&gt;&gt;&gt; modules:\n&gt;&gt;&gt; - portscan'''\n&gt;&gt;&gt; preset = Preset.from_yaml_string(yaml_string)\n</code></pre> Source code in <code>bbot/scanner/preset/preset.py</code> <pre><code>@classmethod\ndef from_yaml_string(cls, yaml_preset):\n    \"\"\"\n    Create a preset from a YAML file. If the full path is not specified, BBOT will look in all the usual places for it.\n\n    The file extension is optional.\n\n    Examples:\n        &gt;&gt;&gt; yaml_string = '''\n        &gt;&gt;&gt; target:\n        &gt;&gt;&gt; - evilcorp.com\n        &gt;&gt;&gt; modules:\n        &gt;&gt;&gt; - portscan'''\n        &gt;&gt;&gt; preset = Preset.from_yaml_string(yaml_string)\n    \"\"\"\n    return cls.from_dict(omegaconf.OmegaConf.create(yaml_preset))\n</code></pre>"},{"location":"dev/presets/#bbot.scanner.Preset.include_preset","title":"include_preset","text":"<pre><code>include_preset(filename)\n</code></pre> <p>Load a preset from a yaml file and merge it into this one.</p> <p>If the full path is not specified, BBOT will look in all the usual places for it.</p> <p>The file extension is optional.</p> <p>Parameters:</p> <ul> <li> <code>filename</code>               (<code>Path</code>)           \u2013            <p>The preset YAML file to merge</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; preset.include_preset(\"/home/user/my_preset.yml\")\n</code></pre> Source code in <code>bbot/scanner/preset/preset.py</code> <pre><code>def include_preset(self, filename):\n    \"\"\"\n    Load a preset from a yaml file and merge it into this one.\n\n    If the full path is not specified, BBOT will look in all the usual places for it.\n\n    The file extension is optional.\n\n    Args:\n        filename (Path): The preset YAML file to merge\n\n    Examples:\n        &gt;&gt;&gt; preset.include_preset(\"/home/user/my_preset.yml\")\n    \"\"\"\n    self.log_debug(f'Including preset \"{filename}\"')\n    preset_filename = PRESET_PATH.find(filename)\n    preset_from_yaml = self.from_yaml_file(preset_filename, _exclude=self._preset_files_loaded)\n    if preset_from_yaml is not False:\n        self.merge(preset_from_yaml)\n        self._preset_files_loaded.add(preset_filename)\n</code></pre>"},{"location":"dev/presets/#bbot.scanner.Preset.merge","title":"merge","text":"<pre><code>merge(other)\n</code></pre> <p>Merge another preset into this one.</p> <p>If there are any config conflicts, <code>other</code> will win over <code>self</code>.</p> <p>Parameters:</p> <ul> <li> <code>other</code>               (<code>Preset</code>)           \u2013            <p>The preset to merge into this one.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; preset1 = Preset(modules=[\"portscan\"])\n&gt;&gt;&gt; preset1.scan_modules\n['portscan']\n&gt;&gt;&gt; preset2 = Preset(modules=[\"sslcert\"])\n&gt;&gt;&gt; preset2.scan_modules\n['sslcert']\n&gt;&gt;&gt; preset1.merge(preset2)\n&gt;&gt;&gt; preset1.scan_modules\n['portscan', 'sslcert']\n</code></pre> Source code in <code>bbot/scanner/preset/preset.py</code> <pre><code>def merge(self, other):\n    \"\"\"\n    Merge another preset into this one.\n\n    If there are any config conflicts, `other` will win over `self`.\n\n    Args:\n        other (Preset): The preset to merge into this one.\n\n    Examples:\n        &gt;&gt;&gt; preset1 = Preset(modules=[\"portscan\"])\n        &gt;&gt;&gt; preset1.scan_modules\n        ['portscan']\n        &gt;&gt;&gt; preset2 = Preset(modules=[\"sslcert\"])\n        &gt;&gt;&gt; preset2.scan_modules\n        ['sslcert']\n        &gt;&gt;&gt; preset1.merge(preset2)\n        &gt;&gt;&gt; preset1.scan_modules\n        ['portscan', 'sslcert']\n    \"\"\"\n    self.log_debug(f'Merging preset \"{other.name}\" into \"{self.name}\"')\n    # config\n    self.core.merge_custom(other.core.custom_config)\n    self.module_loader.core = self.core\n    # module dirs\n    # modules + flags\n    # establish requirements / exclusions first\n    self.exclude_modules.update(other.exclude_modules)\n    self.require_flags.update(other.require_flags)\n    self.exclude_flags.update(other.exclude_flags)\n    # then it's okay to start enabling modules\n    self.explicit_scan_modules.update(other.explicit_scan_modules)\n    self.explicit_output_modules.update(other.explicit_output_modules)\n    self.flags.update(other.flags)\n\n    # target / scope\n    self._seeds.update(other._seeds)\n    # leave whitelist as None until we encounter one\n    if other._whitelist is not None:\n        if self._whitelist is None:\n            self._whitelist = set(other._whitelist)\n        else:\n            self._whitelist.update(other._whitelist)\n    self._blacklist.update(other._blacklist)\n\n    # module dirs\n    self.module_dirs = self.module_dirs.union(other.module_dirs)\n\n    # log verbosity\n    if other.silent:\n        self.silent = other.silent\n    if other.verbose:\n        self.verbose = other.verbose\n    if other.debug:\n        self.debug = other.debug\n    # scan name\n    if other.scan_name is not None:\n        self.scan_name = other.scan_name\n    if other.output_dir is not None:\n        self.output_dir = other.output_dir\n    # conditions\n    if other.conditions:\n        self.conditions.extend(other.conditions)\n    # misc\n    self.force_start = self.force_start | other.force_start\n    self._cli = self._cli | other._cli\n    # transfer args\n    if other._args is not None:\n        self._args = other._args\n</code></pre>"},{"location":"dev/presets/#bbot.scanner.Preset.parse_args","title":"parse_args","text":"<pre><code>parse_args()\n</code></pre> <p>Parse CLI arguments, and merge them into this preset.</p> <p>Used in <code>cli.py</code>.</p> Source code in <code>bbot/scanner/preset/preset.py</code> <pre><code>def parse_args(self):\n    \"\"\"\n    Parse CLI arguments, and merge them into this preset.\n\n    Used in `cli.py`.\n    \"\"\"\n    self._cli = True\n    self.merge(self.args.preset_from_args())\n</code></pre>"},{"location":"dev/presets/#bbot.scanner.Preset.presets_table","title":"presets_table","text":"<pre><code>presets_table(include_modules=True)\n</code></pre> <p>Return a table of all the presets in the form of a string</p> Source code in <code>bbot/scanner/preset/preset.py</code> <pre><code>def presets_table(self, include_modules=True):\n    \"\"\"\n    Return a table of all the presets in the form of a string\n    \"\"\"\n    table = []\n    header = [\"Preset\", \"Category\", \"Description\", \"# Modules\"]\n    if include_modules:\n        header.append(\"Modules\")\n    for yaml_file, (loaded_preset, category, preset_path, original_file) in self.all_presets.items():\n        loaded_preset = loaded_preset.bake()\n        num_modules = f\"{len(loaded_preset.scan_modules):,}\"\n        row = [loaded_preset.name, category, loaded_preset.description, num_modules]\n        if include_modules:\n            row.append(\", \".join(sorted(loaded_preset.scan_modules)))\n        table.append(row)\n    return make_table(table, header)\n</code></pre>"},{"location":"dev/presets/#bbot.scanner.Preset.to_dict","title":"to_dict","text":"<pre><code>to_dict(include_target=False, full_config=False, redact_secrets=False)\n</code></pre> <p>Convert this preset into a Python dictionary.</p> <p>Parameters:</p> <ul> <li> <code>include_target</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>If True, include target, whitelist, and blacklist in the dictionary</p> </li> <li> <code>full_config</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>If True, include the entire config, not just what's changed from the defaults.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>dict</code>          \u2013            <p>The preset in dictionary form</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; preset = Preset(flags=[\"subdomain-enum\"], modules=[\"portscan\"])\n&gt;&gt;&gt; preset.to_dict()\n{\"flags\": [\"subdomain-enum\"], \"modules\": [\"portscan\"]}\n</code></pre> Source code in <code>bbot/scanner/preset/preset.py</code> <pre><code>def to_dict(self, include_target=False, full_config=False, redact_secrets=False):\n    \"\"\"\n    Convert this preset into a Python dictionary.\n\n    Args:\n        include_target (bool, optional): If True, include target, whitelist, and blacklist in the dictionary\n        full_config (bool, optional): If True, include the entire config, not just what's changed from the defaults.\n\n    Returns:\n        dict: The preset in dictionary form\n\n    Examples:\n        &gt;&gt;&gt; preset = Preset(flags=[\"subdomain-enum\"], modules=[\"portscan\"])\n        &gt;&gt;&gt; preset.to_dict()\n        {\"flags\": [\"subdomain-enum\"], \"modules\": [\"portscan\"]}\n    \"\"\"\n    preset_dict = {}\n\n    if self.description:\n        preset_dict[\"description\"] = self.description\n\n    # config\n    if full_config:\n        config = self.core.config\n    else:\n        config = self.core.custom_config\n    config = omegaconf.OmegaConf.to_object(config)\n    if redact_secrets:\n        config = self.core.no_secrets_config(config)\n    if config:\n        preset_dict[\"config\"] = config\n\n    # scope\n    if include_target:\n        target = sorted(self.target.seeds.inputs)\n        whitelist = []\n        if self.target.whitelist is not None:\n            whitelist = sorted(self.target.whitelist.inputs)\n        blacklist = sorted(self.target.blacklist.inputs)\n        if target:\n            preset_dict[\"target\"] = target\n        if whitelist and whitelist != target:\n            preset_dict[\"whitelist\"] = whitelist\n        if blacklist:\n            preset_dict[\"blacklist\"] = blacklist\n\n    # flags + modules\n    if self.require_flags:\n        preset_dict[\"require_flags\"] = sorted(self.require_flags)\n    if self.exclude_flags:\n        preset_dict[\"exclude_flags\"] = sorted(self.exclude_flags)\n    if self.exclude_modules:\n        preset_dict[\"exclude_modules\"] = sorted(self.exclude_modules)\n    if self.flags:\n        preset_dict[\"flags\"] = sorted(self.flags)\n    if self.explicit_scan_modules:\n        preset_dict[\"modules\"] = sorted(self.explicit_scan_modules)\n    if self.explicit_output_modules:\n        preset_dict[\"output_modules\"] = sorted(self.explicit_output_modules)\n\n    # log verbosity\n    if self.verbose:\n        preset_dict[\"verbose\"] = True\n    if self.debug:\n        preset_dict[\"debug\"] = True\n    if self.silent:\n        preset_dict[\"silent\"] = True\n\n    # misc scan options\n    if self.scan_name:\n        preset_dict[\"scan_name\"] = self.scan_name\n    if self.scan_name:\n        preset_dict[\"output_dir\"] = self.output_dir\n\n    # conditions\n    if self.conditions:\n        preset_dict[\"conditions\"] = [c[-1] for c in self.conditions]\n\n    return preset_dict\n</code></pre>"},{"location":"dev/presets/#bbot.scanner.Preset.to_yaml","title":"to_yaml","text":"<pre><code>to_yaml(include_target=False, full_config=False, sort_keys=False)\n</code></pre> <p>Return the preset in the form of a YAML string.</p> <p>Parameters:</p> <ul> <li> <code>include_target</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>If True, include target, whitelist, and blacklist in the dictionary</p> </li> <li> <code>full_config</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>If True, include the entire config, not just what's changed from the defaults.</p> </li> <li> <code>sort_keys</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>If True, sort YAML keys alphabetically</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>The preset in the form of a YAML string</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; preset = Preset(flags=[\"subdomain-enum\"], modules=[\"portscan\"])\n&gt;&gt;&gt; print(preset.to_yaml())\nflags:\n- subdomain-enum\nmodules:\n- portscan\n</code></pre> Source code in <code>bbot/scanner/preset/preset.py</code> <pre><code>def to_yaml(self, include_target=False, full_config=False, sort_keys=False):\n    \"\"\"\n    Return the preset in the form of a YAML string.\n\n    Args:\n        include_target (bool, optional): If True, include target, whitelist, and blacklist in the dictionary\n        full_config (bool, optional): If True, include the entire config, not just what's changed from the defaults.\n        sort_keys (bool, optional): If True, sort YAML keys alphabetically\n\n    Returns:\n        str: The preset in the form of a YAML string\n\n    Examples:\n        &gt;&gt;&gt; preset = Preset(flags=[\"subdomain-enum\"], modules=[\"portscan\"])\n        &gt;&gt;&gt; print(preset.to_yaml())\n        flags:\n        - subdomain-enum\n        modules:\n        - portscan\n    \"\"\"\n    preset_dict = self.to_dict(include_target=include_target, full_config=full_config)\n    return yaml.dump(preset_dict, sort_keys=sort_keys)\n</code></pre>"},{"location":"dev/presets/#bbot.scanner.Preset.validate","title":"validate","text":"<pre><code>validate()\n</code></pre> <p>Validate module/flag exclusions/requirements, and CLI config options if applicable.</p> Source code in <code>bbot/scanner/preset/preset.py</code> <pre><code>def validate(self):\n    \"\"\"\n    Validate module/flag exclusions/requirements, and CLI config options if applicable.\n    \"\"\"\n    if self._cli:\n        self.args.validate()\n\n    # validate excluded modules\n    for excluded_module in self.exclude_modules:\n        if not excluded_module in self.module_loader.all_module_choices:\n            raise ValidationError(\n                get_closest_match(excluded_module, self.module_loader.all_module_choices, msg=\"module\")\n            )\n    # validate excluded flags\n    for excluded_flag in self.exclude_flags:\n        if not excluded_flag in self.module_loader.flag_choices:\n            raise ValidationError(get_closest_match(excluded_flag, self.module_loader.flag_choices, msg=\"flag\"))\n    # validate required flags\n    for required_flag in self.require_flags:\n        if not required_flag in self.module_loader.flag_choices:\n            raise ValidationError(get_closest_match(required_flag, self.module_loader.flag_choices, msg=\"flag\"))\n    # validate flags\n    for flag in self.flags:\n        if not flag in self.module_loader.flag_choices:\n            raise ValidationError(get_closest_match(flag, self.module_loader.flag_choices, msg=\"flag\"))\n</code></pre>"},{"location":"dev/scanner/","title":"Scanner","text":""},{"location":"dev/scanner/#bbot.scanner.Scanner","title":"Scanner","text":"<p>A class representing a single BBOT scan</p> <p>Examples:</p> <p>Create scan with multiple targets:</p> <pre><code>&gt;&gt;&gt; my_scan = Scanner(\"evilcorp.com\", \"1.2.3.0/24\", modules=[\"portscan\", \"sslcert\", \"httpx\"])\n</code></pre> <p>Create scan with custom config:</p> <pre><code>&gt;&gt;&gt; config = {\"http_proxy\": \"http://127.0.0.1:8080\", \"modules\": {\"portscan\": {\"top_ports\": 2000}}}\n&gt;&gt;&gt; my_scan = Scanner(\"www.evilcorp.com\", modules=[\"portscan\", \"httpx\"], config=config)\n</code></pre> <p>Start the scan, iterating over events as they're discovered (synchronous):</p> <pre><code>&gt;&gt;&gt; for event in my_scan.start():\n&gt;&gt;&gt;     print(event)\n</code></pre> <p>Start the scan, iterating over events as they're discovered (asynchronous):</p> <pre><code>&gt;&gt;&gt; async for event in my_scan.async_start():\n&gt;&gt;&gt;     print(event)\n</code></pre> <p>Start the scan without consuming events (synchronous):</p> <pre><code>&gt;&gt;&gt; my_scan.start_without_generator()\n</code></pre> <p>Start the scan without consuming events (asynchronous):</p> <pre><code>&gt;&gt;&gt; await my_scan.async_start_without_generator()\n</code></pre> <p>Attributes:</p> <ul> <li> <code>status</code>               (<code>str</code>)           \u2013            <p>Status of scan, representing its current state. It can take on the following string values, each of which is mapped to an integer code in <code>_status_codes</code>: <pre><code>- \"NOT_STARTED\" (0): Initial status before the scan starts.\n- \"STARTING\" (1): Status when the scan is initializing.\n- \"RUNNING\" (2): Status when the scan is in progress.\n- \"FINISHING\" (3): Status when the scan is in the process of finalizing.\n- \"CLEANING_UP\" (4): Status when the scan is cleaning up resources.\n- \"ABORTING\" (5): Status when the scan is in the process of being aborted.\n- \"ABORTED\" (6): Status when the scan has been aborted.\n- \"FAILED\" (7): Status when the scan has encountered a failure.\n- \"FINISHED\" (8): Status when the scan has successfully completed.\n</code></pre></p> </li> <li> <code>_status_code</code>               (<code>int</code>)           \u2013            <p>The numerical representation of the current scan status, stored for internal use. It is mapped according to the values in <code>_status_codes</code>.</p> </li> <li> <code>target</code>               (<code>Target</code>)           \u2013            <p>Target of scan (alias to <code>self.preset.target</code>).</p> </li> <li> <code>preset</code>               (<code>Preset</code>)           \u2013            <p>The main scan Preset in its baked form.</p> </li> <li> <code>config</code>               (<code>DictConfig</code>)           \u2013            <p>BBOT config (alias to <code>self.preset.config</code>).</p> </li> <li> <code>whitelist</code>               (<code>Target</code>)           \u2013            <p>Scan whitelist (by default this is the same as <code>target</code>) (alias to <code>self.preset.whitelist</code>).</p> </li> <li> <code>blacklist</code>               (<code>Target</code>)           \u2013            <p>Scan blacklist (this takes ultimate precedence) (alias to <code>self.preset.blacklist</code>).</p> </li> <li> <code>helpers</code>               (<code>ConfigAwareHelper</code>)           \u2013            <p>Helper containing various reusable functions, regexes, etc. (alias to <code>self.preset.helpers</code>).</p> </li> <li> <code>output_dir</code>               (<code>Path</code>)           \u2013            <p>Output directory for scan (alias to <code>self.preset.output_dir</code>).</p> </li> <li> <code>name</code>               (<code>str</code>)           \u2013            <p>Name of scan (alias to <code>self.preset.scan_name</code>).</p> </li> <li> <code>dispatcher</code>               (<code>Dispatcher</code>)           \u2013            <p>Triggers certain events when the scan <code>status</code> changes.</p> </li> <li> <code>modules</code>               (<code>dict</code>)           \u2013            <p>Holds all loaded modules in this format: <code>{\"module_name\": Module()}</code>.</p> </li> <li> <code>stats</code>               (<code>ScanStats</code>)           \u2013            <p>Holds high-level scan statistics such as how many events have been produced and consumed by each module.</p> </li> <li> <code>home</code>               (<code>Path</code>)           \u2013            <p>Base output directory of the scan (default: <code>~/.bbot/scans/&lt;scan_name&gt;</code>).</p> </li> <li> <code>running</code>               (<code>bool</code>)           \u2013            <p>Whether the scan is currently running.</p> </li> <li> <code>stopping</code>               (<code>bool</code>)           \u2013            <p>Whether the scan is currently stopping.</p> </li> <li> <code>stopped</code>               (<code>bool</code>)           \u2013            <p>Whether the scan is currently stopped.</p> </li> <li> <code>aborting</code>               (<code>bool</code>)           \u2013            <p>Whether the scan is aborted or currently aborting.</p> </li> </ul> Notes <ul> <li>The status is read-only once set to \"ABORTING\" until it transitions to \"ABORTED.\"</li> <li>Invalid statuses are logged but not applied.</li> <li>Setting a status will trigger the <code>on_status</code> event in the dispatcher.</li> </ul> Source code in <code>bbot/scanner/scanner.py</code> <pre><code>class Scanner:\n    \"\"\"A class representing a single BBOT scan\n\n    Examples:\n        Create scan with multiple targets:\n        &gt;&gt;&gt; my_scan = Scanner(\"evilcorp.com\", \"1.2.3.0/24\", modules=[\"portscan\", \"sslcert\", \"httpx\"])\n\n        Create scan with custom config:\n        &gt;&gt;&gt; config = {\"http_proxy\": \"http://127.0.0.1:8080\", \"modules\": {\"portscan\": {\"top_ports\": 2000}}}\n        &gt;&gt;&gt; my_scan = Scanner(\"www.evilcorp.com\", modules=[\"portscan\", \"httpx\"], config=config)\n\n        Start the scan, iterating over events as they're discovered (synchronous):\n        &gt;&gt;&gt; for event in my_scan.start():\n        &gt;&gt;&gt;     print(event)\n\n        Start the scan, iterating over events as they're discovered (asynchronous):\n        &gt;&gt;&gt; async for event in my_scan.async_start():\n        &gt;&gt;&gt;     print(event)\n\n        Start the scan without consuming events (synchronous):\n        &gt;&gt;&gt; my_scan.start_without_generator()\n\n        Start the scan without consuming events (asynchronous):\n        &gt;&gt;&gt; await my_scan.async_start_without_generator()\n\n    Attributes:\n        status (str): Status of scan, representing its current state. It can take on the following string values, each of which is mapped to an integer code in `_status_codes`:\n            ```markdown\n            - \"NOT_STARTED\" (0): Initial status before the scan starts.\n            - \"STARTING\" (1): Status when the scan is initializing.\n            - \"RUNNING\" (2): Status when the scan is in progress.\n            - \"FINISHING\" (3): Status when the scan is in the process of finalizing.\n            - \"CLEANING_UP\" (4): Status when the scan is cleaning up resources.\n            - \"ABORTING\" (5): Status when the scan is in the process of being aborted.\n            - \"ABORTED\" (6): Status when the scan has been aborted.\n            - \"FAILED\" (7): Status when the scan has encountered a failure.\n            - \"FINISHED\" (8): Status when the scan has successfully completed.\n            ```\n        _status_code (int): The numerical representation of the current scan status, stored for internal use. It is mapped according to the values in `_status_codes`.\n        target (Target): Target of scan (alias to `self.preset.target`).\n        preset (Preset): The main scan Preset in its baked form.\n        config (omegaconf.dictconfig.DictConfig): BBOT config (alias to `self.preset.config`).\n        whitelist (Target): Scan whitelist (by default this is the same as `target`) (alias to `self.preset.whitelist`).\n        blacklist (Target): Scan blacklist (this takes ultimate precedence) (alias to `self.preset.blacklist`).\n        helpers (ConfigAwareHelper): Helper containing various reusable functions, regexes, etc. (alias to `self.preset.helpers`).\n        output_dir (pathlib.Path): Output directory for scan (alias to `self.preset.output_dir`).\n        name (str): Name of scan (alias to `self.preset.scan_name`).\n        dispatcher (Dispatcher): Triggers certain events when the scan `status` changes.\n        modules (dict): Holds all loaded modules in this format: `{\"module_name\": Module()}`.\n        stats (ScanStats): Holds high-level scan statistics such as how many events have been produced and consumed by each module.\n        home (pathlib.Path): Base output directory of the scan (default: `~/.bbot/scans/&lt;scan_name&gt;`).\n        running (bool): Whether the scan is currently running.\n        stopping (bool): Whether the scan is currently stopping.\n        stopped (bool): Whether the scan is currently stopped.\n        aborting (bool): Whether the scan is aborted or currently aborting.\n\n    Notes:\n        - The status is read-only once set to \"ABORTING\" until it transitions to \"ABORTED.\"\n        - Invalid statuses are logged but not applied.\n        - Setting a status will trigger the `on_status` event in the dispatcher.\n    \"\"\"\n\n    _status_codes = {\n        \"NOT_STARTED\": 0,\n        \"STARTING\": 1,\n        \"RUNNING\": 2,\n        \"FINISHING\": 3,\n        \"CLEANING_UP\": 4,\n        \"ABORTING\": 5,\n        \"ABORTED\": 6,\n        \"FAILED\": 7,\n        \"FINISHED\": 8,\n    }\n\n    def __init__(\n        self,\n        *targets,\n        scan_id=None,\n        dispatcher=None,\n        **kwargs,\n    ):\n        \"\"\"\n        Initializes the Scanner class.\n\n        If a premade `preset` is specified, it will be used for the scan.\n        Otherwise, `Scan` accepts the same arguments as `Preset`, which are passed through and used to create a new preset.\n\n        Args:\n            *targets (list[str], optional): Scan targets (passed through to `Preset`).\n            preset (Preset, optional): Preset to use for the scan.\n            scan_id (str, optional): Unique identifier for the scan. Auto-generates if None.\n            dispatcher (Dispatcher, optional): Dispatcher object to use. Defaults to new Dispatcher.\n            **kwargs (list[str], optional): Additional keyword arguments (passed through to `Preset`).\n        \"\"\"\n        self._root_event = None\n        self._finish_event = None\n        self.start_time = None\n        self.end_time = None\n        self.duration = None\n        self.duration_human = None\n        self.duration_seconds = None\n\n        self._success = False\n\n        if scan_id is not None:\n            self.id = str(id)\n        else:\n            self.id = f\"SCAN:{sha1(rand_string(20)).hexdigest()}\"\n\n        custom_preset = kwargs.pop(\"preset\", None)\n        kwargs[\"_log\"] = True\n\n        from .preset import Preset\n\n        base_preset = Preset(*targets, **kwargs)\n\n        if custom_preset is not None:\n            if not isinstance(custom_preset, Preset):\n                raise ValidationError(f'Preset must be of type Preset, not \"{type(custom_preset).__name__}\"')\n            base_preset.merge(custom_preset)\n\n        self.preset = base_preset.bake(self)\n\n        # scan name\n        if self.preset.scan_name is None:\n            tries = 0\n            while 1:\n                if tries &gt; 5:\n                    scan_name = f\"{rand_string(4)}_{rand_string(4)}\"\n                    break\n                scan_name = random_name()\n                if self.preset.output_dir is not None:\n                    home_path = Path(self.preset.output_dir).resolve() / scan_name\n                else:\n                    home_path = self.preset.bbot_home / \"scans\" / scan_name\n                if not home_path.exists():\n                    break\n                tries += 1\n        else:\n            scan_name = str(self.preset.scan_name)\n        self.name = scan_name.replace(\"/\", \"_\")\n\n        # make sure the preset has a description\n        if not self.preset.description:\n            self.preset.description = self.name\n\n        # scan output dir\n        if self.preset.output_dir is not None:\n            self.home = Path(self.preset.output_dir).resolve() / self.name\n        else:\n            self.home = self.preset.bbot_home / \"scans\" / self.name\n\n        self._status = \"NOT_STARTED\"\n        self._status_code = 0\n\n        self.modules = OrderedDict({})\n        self._modules_loaded = False\n        self.dummy_modules = {}\n\n        if dispatcher is None:\n            from .dispatcher import Dispatcher\n\n            self.dispatcher = Dispatcher()\n        else:\n            self.dispatcher = dispatcher\n        self.dispatcher.set_scan(self)\n\n        # scope distance\n        self.scope_config = self.config.get(\"scope\", {})\n        self.scope_search_distance = max(0, int(self.scope_config.get(\"search_distance\", 0)))\n        self.scope_report_distance = int(self.scope_config.get(\"report_distance\", 1))\n\n        # web config\n        self.web_config = self.config.get(\"web\", {})\n        self.web_spider_distance = self.web_config.get(\"spider_distance\", 0)\n        self.web_spider_depth = self.web_config.get(\"spider_depth\", 1)\n        self.web_spider_links_per_page = self.web_config.get(\"spider_links_per_page\", 20)\n        max_redirects = self.web_config.get(\"http_max_redirects\", 5)\n        self.web_max_redirects = max(max_redirects, self.web_spider_distance)\n        self.http_proxy = self.web_config.get(\"http_proxy\", \"\")\n        self.http_timeout = self.web_config.get(\"http_timeout\", 10)\n        self.httpx_timeout = self.web_config.get(\"httpx_timeout\", 5)\n        self.http_retries = self.web_config.get(\"http_retries\", 1)\n        self.httpx_retries = self.web_config.get(\"httpx_retries\", 1)\n        self.useragent = self.web_config.get(\"user_agent\", \"BBOT\")\n        # custom HTTP headers warning\n        self.custom_http_headers = self.web_config.get(\"http_headers\", {})\n        if self.custom_http_headers:\n            self.warning(\n                \"You have enabled custom HTTP headers. These will be attached to all in-scope requests and all requests made by httpx.\"\n            )\n\n        # url file extensions\n        self.url_extension_blacklist = set(e.lower() for e in self.config.get(\"url_extension_blacklist\", []))\n        self.url_extension_httpx_only = set(e.lower() for e in self.config.get(\"url_extension_httpx_only\", []))\n\n        # url querystring behavior\n        self.url_querystring_remove = self.config.get(\"url_querystring_remove\", True)\n\n        # blob inclusion\n        self._file_blobs = self.config.get(\"file_blobs\", False)\n        self._folder_blobs = self.config.get(\"folder_blobs\", False)\n\n        # how often to print scan status\n        self.status_frequency = self.config.get(\"status_frequency\", 15)\n\n        from .stats import ScanStats\n\n        self.stats = ScanStats(self)\n\n        self._prepped = False\n        self._finished_init = False\n        self._new_activity = False\n        self._cleanedup = False\n        self._omitted_event_types = None\n\n        self.__loop = None\n        self._manager_worker_loop_tasks = []\n        self.init_events_task = None\n        self.ticker_task = None\n        self.dispatcher_tasks = []\n\n        self._stopping = False\n\n        self._dns_strings = None\n        self._dns_regexes = None\n        self._dns_regexes_yara = None\n        self._dns_yara_rules_uncompiled = None\n        self._dns_yara_rules = None\n\n        self.__log_handlers = None\n        self._log_handler_backup = []\n\n    async def _prep(self):\n        \"\"\"\n        Creates the scan's output folder, loads its modules, and calls their .setup() methods.\n        \"\"\"\n\n        # update the master PID\n        SHARED_INTERPRETER_STATE.update_scan_pid()\n\n        self.helpers.mkdir(self.home)\n        if not self._prepped:\n            # save scan preset\n            with open(self.home / \"preset.yml\", \"w\") as f:\n                f.write(self.preset.to_yaml())\n\n            # log scan overview\n            start_msg = f\"Scan seeded with {len(self.seeds):,} targets\"\n            details = []\n            if self.whitelist != self.target:\n                details.append(f\"{len(self.whitelist):,} in whitelist\")\n            if self.blacklist:\n                details.append(f\"{len(self.blacklist):,} in blacklist\")\n            if details:\n                start_msg += f\" ({', '.join(details)})\"\n            self.hugeinfo(start_msg)\n\n            # load scan modules (this imports and instantiates them)\n            # up to this point they were only preloaded\n            await self.load_modules()\n\n            # run each module's .setup() method\n            succeeded, hard_failed, soft_failed = await self.setup_modules()\n\n            # intercept modules get sewn together like human centipede\n            self.intercept_modules = [m for m in self.modules.values() if m._intercept]\n            for i, intercept_module in enumerate(self.intercept_modules[1:]):\n                prev_intercept_module = self.intercept_modules[i]\n                self.debug(\n                    f\"Setting intercept module {intercept_module.name}._incoming_event_queue to previous intercept module {prev_intercept_module.name}.outgoing_event_queue\"\n                )\n                interqueue = asyncio.Queue()\n                intercept_module._incoming_event_queue = interqueue\n                prev_intercept_module._outgoing_event_queue = interqueue\n\n            # abort if there are no output modules\n            num_output_modules = len([m for m in self.modules.values() if m._type == \"output\"])\n            if num_output_modules &lt; 1:\n                raise ScanError(\"Failed to load output modules. Aborting.\")\n            # abort if any of the module .setup()s hard-failed (i.e. they errored or returned False)\n            total_failed = len(hard_failed + soft_failed)\n            if hard_failed:\n                msg = f\"Setup hard-failed for {len(hard_failed):,} modules ({','.join(hard_failed)})\"\n                self._fail_setup(msg)\n\n            total_modules = total_failed + len(self.modules)\n            success_msg = f\"Setup succeeded for {len(self.modules):,}/{total_modules:,} modules.\"\n\n            self.success(success_msg)\n            self._prepped = True\n\n    def start(self):\n        for event in async_to_sync_gen(self.async_start()):\n            yield event\n\n    def start_without_generator(self):\n        for event in async_to_sync_gen(self.async_start()):\n            pass\n\n    async def async_start_without_generator(self):\n        async for event in self.async_start():\n            pass\n\n    async def async_start(self):\n        \"\"\" \"\"\"\n        self.start_time = datetime.now()\n        self.root_event.data[\"started_at\"] = self.start_time.isoformat()\n        try:\n            await self._prep()\n\n            self._start_log_handlers()\n            self.trace(f'Ran BBOT {__version__} at {self.start_time}, command: {\" \".join(sys.argv)}')\n            self.trace(f\"Target: {self.preset.target.json}\")\n            self.trace(f\"Preset: {self.preset.to_dict(redact_secrets=True)}\")\n\n            if not self.target:\n                self.warning(f\"No scan targets specified\")\n\n            # start status ticker\n            self.ticker_task = asyncio.create_task(\n                self._status_ticker(self.status_frequency), name=f\"{self.name}._status_ticker()\"\n            )\n\n            self.status = \"STARTING\"\n\n            if not self.modules:\n                self.error(f\"No modules loaded\")\n                self.status = \"FAILED\"\n                return\n            else:\n                self.hugesuccess(f\"Starting scan {self.name}\")\n\n            await self.dispatcher.on_start(self)\n\n            self.status = \"RUNNING\"\n            self._start_modules()\n            self.verbose(f\"{len(self.modules):,} modules started\")\n\n            # distribute seed events\n            self.init_events_task = asyncio.create_task(\n                self.ingress_module.init_events(self.target.seeds.events),\n                name=f\"{self.name}.ingress_module.init_events()\",\n            )\n\n            # main scan loop\n            while 1:\n                # abort if we're aborting\n                if self.aborting:\n                    self._drain_queues()\n                    break\n\n                # yield events as they come (async for event in scan.async_start())\n                if \"python\" in self.modules:\n                    events, finish = await self.modules[\"python\"]._events_waiting(batch_size=-1)\n                    for e in events:\n                        yield e\n                    if events:\n                        continue\n\n                # break if initialization finished and the scan is no longer active\n                if self._finished_init and self.modules_finished:\n                    new_activity = await self.finish()\n                    if not new_activity:\n                        self._success = True\n                        scan_finish_event = await self._mark_finished()\n                        yield scan_finish_event\n                        break\n\n                await asyncio.sleep(0.1)\n\n            self._success = True\n\n        except BaseException as e:\n            if self.helpers.in_exception_chain(e, (KeyboardInterrupt, asyncio.CancelledError)):\n                self.stop()\n                self._success = True\n            else:\n                try:\n                    raise\n                except ScanError as e:\n                    self.error(f\"{e}\")\n\n                except BBOTError as e:\n                    self.critical(f\"Error during scan: {e}\")\n\n                except Exception:\n                    self.critical(f\"Unexpected error during scan:\\n{traceback.format_exc()}\")\n\n        finally:\n            tasks = self._cancel_tasks()\n            self.debug(f\"Awaiting {len(tasks):,} tasks\")\n            for task in tasks:\n                # self.debug(f\"Awaiting {task}\")\n                with contextlib.suppress(BaseException):\n                    await asyncio.wait_for(task, timeout=0.1)\n            self.debug(f\"Awaited {len(tasks):,} tasks\")\n            await self._report()\n            await self._cleanup()\n\n            await self.dispatcher.on_finish(self)\n\n            self._stop_log_handlers()\n\n    async def _mark_finished(self):\n        log_fn = self.hugesuccess\n        if self.status == \"ABORTING\":\n            status = \"ABORTED\"\n            log_fn = self.hugewarning\n        elif not self._success:\n            status = \"FAILED\"\n            log_fn = self.critical\n        else:\n            status = \"FINISHED\"\n\n        self.end_time = datetime.now()\n        self.duration = self.end_time - self.start_time\n        self.duration_seconds = self.duration.total_seconds()\n        self.duration_human = self.helpers.human_timedelta(self.duration)\n\n        status_message = f\"Scan {self.name} completed in {self.duration_human} with status {status}\"\n\n        scan_finish_event = self.finish_event(status_message, status)\n\n        # queue final scan event with output modules\n        output_modules = [m for m in self.modules.values() if m._type == \"output\" and m.name != \"python\"]\n        for m in output_modules:\n            await m.queue_event(scan_finish_event)\n        # wait until output modules are flushed\n        while 1:\n            modules_finished = all([m.finished for m in output_modules])\n            if modules_finished:\n                break\n            await asyncio.sleep(0.05)\n\n        self.status = status\n        log_fn(status_message)\n        return scan_finish_event\n\n    def _start_modules(self):\n        self.verbose(f\"Starting module worker loops\")\n        for module in self.modules.values():\n            module.start()\n\n    async def setup_modules(self, remove_failed=True):\n        \"\"\"Asynchronously initializes all loaded modules by invoking their `setup()` methods.\n\n        Args:\n            remove_failed (bool): Flag indicating whether to remove modules that fail setup.\n\n        Returns:\n            tuple:\n                succeeded - List of modules that successfully set up.\n                hard_failed - List of modules that encountered a hard failure during setup.\n                soft_failed - List of modules that encountered a soft failure during setup.\n\n        Raises:\n            ScanError: If no output modules could be loaded.\n\n        Notes:\n            Hard-failed modules are set to an error state and removed if `remove_failed` is True.\n            Soft-failed modules are not set to an error state but are also removed if `remove_failed` is True.\n        \"\"\"\n        await self.load_modules()\n        self.verbose(f\"Setting up modules\")\n        succeeded = []\n        hard_failed = []\n        soft_failed = []\n\n        async for task in self.helpers.as_completed([m._setup() for m in self.modules.values()]):\n            module, status, msg = await task\n            if status == True:\n                self.debug(f\"Setup succeeded for {module.name} ({msg})\")\n                succeeded.append(module.name)\n            elif status == False:\n                self.warning(f\"Setup hard-failed for {module.name}: {msg}\")\n                self.modules[module.name].set_error_state()\n                hard_failed.append(module.name)\n            else:\n                self.info(f\"Setup soft-failed for {module.name}: {msg}\")\n                soft_failed.append(module.name)\n            if (not status) and (module._intercept or remove_failed):\n                # if a intercept module fails setup, we always remove it\n                self.modules.pop(module.name)\n\n        return succeeded, hard_failed, soft_failed\n\n    async def load_modules(self):\n        \"\"\"Asynchronously import and instantiate all scan modules, including internal and output modules.\n\n        This method is automatically invoked by `setup_modules()`. It performs several key tasks in the following sequence:\n\n        1. Install dependencies for each module via `self.helpers.depsinstaller.install()`.\n        2. Load scan modules and updates the `modules` dictionary.\n        3. Load internal modules and updates the `modules` dictionary.\n        4. Load output modules and updates the `modules` dictionary.\n        5. Sorts modules based on their `_priority` attribute.\n\n        If any modules fail to load or their dependencies fail to install, a ScanError will be raised (unless `self.force_start` is True).\n\n        Attributes:\n            succeeded, failed (tuple): A tuple containing lists of modules that succeeded or failed during the dependency installation.\n            loaded_modules, loaded_internal_modules, loaded_output_modules (dict): Dictionaries of successfully loaded modules.\n            failed, failed_internal, failed_output (list): Lists of module names that failed to load.\n\n        Raises:\n            ScanError: If any module dependencies fail to install or modules fail to load, and if `self.force_start` is False.\n\n        Returns:\n            None\n\n        Note:\n            After all modules are loaded, they are sorted by `_priority` and stored in the `modules` dictionary.\n        \"\"\"\n        if not self._modules_loaded:\n            if not self.preset.modules:\n                self.warning(f\"No modules to load\")\n                return\n\n            if not self.preset.scan_modules:\n                self.warning(f\"No scan modules to load\")\n\n            # install module dependencies\n            succeeded, failed = await self.helpers.depsinstaller.install(*self.preset.modules)\n            if failed:\n                msg = f\"Failed to install dependencies for {len(failed):,} modules: {','.join(failed)}\"\n                self._fail_setup(msg)\n            modules = sorted([m for m in self.preset.scan_modules if m in succeeded])\n            output_modules = sorted([m for m in self.preset.output_modules if m in succeeded])\n            internal_modules = sorted([m for m in self.preset.internal_modules if m in succeeded])\n\n            # Load scan modules\n            self.verbose(f\"Loading {len(modules):,} scan modules: {','.join(modules)}\")\n            loaded_modules, failed = self._load_modules(modules)\n            self.modules.update(loaded_modules)\n            if len(failed) &gt; 0:\n                msg = f\"Failed to load {len(failed):,} scan modules: {','.join(failed)}\"\n                self._fail_setup(msg)\n            if loaded_modules:\n                self.info(\n                    f\"Loaded {len(loaded_modules):,}/{len(self.preset.scan_modules):,} scan modules ({','.join(loaded_modules)})\"\n                )\n\n            # Load internal modules\n            self.verbose(f\"Loading {len(internal_modules):,} internal modules: {','.join(internal_modules)}\")\n            loaded_internal_modules, failed_internal = self._load_modules(internal_modules)\n            self.modules.update(loaded_internal_modules)\n            if len(failed_internal) &gt; 0:\n                msg = f\"Failed to load {len(loaded_internal_modules):,} internal modules: {','.join(loaded_internal_modules)}\"\n                self._fail_setup(msg)\n            if loaded_internal_modules:\n                self.info(\n                    f\"Loaded {len(loaded_internal_modules):,}/{len(self.preset.internal_modules):,} internal modules ({','.join(loaded_internal_modules)})\"\n                )\n\n            # Load output modules\n            self.verbose(f\"Loading {len(output_modules):,} output modules: {','.join(output_modules)}\")\n            loaded_output_modules, failed_output = self._load_modules(output_modules)\n            self.modules.update(loaded_output_modules)\n            if len(failed_output) &gt; 0:\n                msg = f\"Failed to load {len(failed_output):,} output modules: {','.join(failed_output)}\"\n                self._fail_setup(msg)\n            if loaded_output_modules:\n                self.info(\n                    f\"Loaded {len(loaded_output_modules):,}/{len(self.preset.output_modules):,} output modules, ({','.join(loaded_output_modules)})\"\n                )\n\n            # builtin intercept modules\n            self.ingress_module = ScanIngress(self)\n            self.egress_module = ScanEgress(self)\n            self.modules[self.ingress_module.name] = self.ingress_module\n            self.modules[self.egress_module.name] = self.egress_module\n\n            # sort modules by priority\n            self.modules = OrderedDict(sorted(self.modules.items(), key=lambda x: getattr(x[-1], \"priority\", 3)))\n\n            self._modules_loaded = True\n\n    @property\n    def modules_finished(self):\n        finished_modules = [m.finished for m in self.modules.values()]\n        return all(finished_modules)\n\n    def kill_module(self, module_name, message=None):\n        from signal import SIGINT\n\n        module = self.modules[module_name]\n        if module._intercept:\n            self.warning(f'Cannot kill module \"{module_name}\" because it is critical to the scan')\n            return\n        module.set_error_state(message=message, clear_outgoing_queue=True)\n        for proc in module._proc_tracker:\n            with contextlib.suppress(Exception):\n                proc.send_signal(SIGINT)\n        self.helpers.cancel_tasks_sync(module._tasks)\n\n    @property\n    def incoming_event_queues(self):\n        return self.ingress_module.incoming_queues\n\n    @property\n    def num_queued_events(self):\n        total = 0\n        for q in self.incoming_event_queues:\n            total += len(q._queue)\n        return total\n\n    def modules_status(self, _log=False):\n        finished = True\n        status = {\"modules\": {}}\n\n        sorted_modules = []\n        for module_name, module in self.modules.items():\n            if module_name.startswith(\"_\"):\n                continue\n            sorted_modules.append(module)\n            mod_status = module.status\n            if mod_status[\"running\"]:\n                finished = False\n            status[\"modules\"][module_name] = mod_status\n\n        # sort modules by name\n        sorted_modules.sort(key=lambda m: m.name)\n\n        status[\"finished\"] = finished\n\n        modules_errored = [m for m, s in status[\"modules\"].items() if s[\"errored\"]]\n\n        max_mem_percent = 90\n        mem_status = self.helpers.memory_status()\n        # abort if we don't have the memory\n        mem_percent = mem_status.percent\n        if mem_percent &gt; max_mem_percent:\n            free_memory = mem_status.available\n            free_memory_human = self.helpers.bytes_to_human(free_memory)\n            self.warning(f\"System memory is at {mem_percent:.1f}% ({free_memory_human} remaining)\")\n\n        if _log:\n            modules_status = []\n            for m, s in status[\"modules\"].items():\n                running = s[\"running\"]\n                incoming = s[\"events\"][\"incoming\"]\n                outgoing = s[\"events\"][\"outgoing\"]\n                tasks = s[\"tasks\"]\n                total = sum([incoming, outgoing, tasks])\n                if running or total &gt; 0:\n                    modules_status.append((m, running, incoming, outgoing, tasks, total))\n            modules_status.sort(key=lambda x: x[-1], reverse=True)\n\n            if modules_status:\n                modules_status_str = \", \".join([f\"{m}({i:,}:{t:,}:{o:,})\" for m, r, i, o, t, _ in modules_status])\n                self.info(f\"{self.name}: Modules running (incoming:processing:outgoing) {modules_status_str}\")\n            else:\n                self.info(f\"{self.name}: No modules running\")\n            event_type_summary = sorted(self.stats.events_emitted_by_type.items(), key=lambda x: x[-1], reverse=True)\n            if event_type_summary:\n                self.info(\n                    f'{self.name}: Events produced so far: {\", \".join([f\"{k}: {v}\" for k,v in event_type_summary])}'\n                )\n            else:\n                self.info(f\"{self.name}: No events produced yet\")\n\n            if modules_errored:\n                self.verbose(\n                    f'{self.name}: Modules errored: {len(modules_errored):,} ({\", \".join([m for m in modules_errored])})'\n                )\n\n            num_queued_events = self.num_queued_events\n            if num_queued_events:\n                self.info(\n                    f\"{self.name}: {num_queued_events:,} events in queue ({self.stats.speedometer.speed:,} processed in the past {self.status_frequency} seconds)\"\n                )\n            else:\n                self.info(\n                    f\"{self.name}: No events in queue ({self.stats.speedometer.speed:,} processed in the past {self.status_frequency} seconds)\"\n                )\n\n            if self.log_level &lt;= logging.DEBUG:\n                # status debugging\n                scan_active_status = []\n                scan_active_status.append(f\"scan._finished_init: {self._finished_init}\")\n                scan_active_status.append(f\"scan.modules_finished: {self.modules_finished}\")\n                for m in sorted_modules:\n                    running = m.running\n                    scan_active_status.append(f\"    {m}:\")\n                    # scan_active_status.append(f\"        running: {running}\")\n                    if running:\n                        # scan_active_status.append(f\"        tasks:\")\n                        for task in list(m._task_counter.tasks.values()):\n                            scan_active_status.append(f\"        - {task}:\")\n                    # scan_active_status.append(f\"        incoming_queue_size: {m.num_incoming_events}\")\n                    # scan_active_status.append(f\"        outgoing_queue_size: {m.outgoing_event_queue.qsize()}\")\n                for line in scan_active_status:\n                    self.debug(line)\n\n                # log module memory usage\n                module_memory_usage = []\n                for module in sorted_modules:\n                    memory_usage = module.memory_usage\n                    module_memory_usage.append((module.name, memory_usage))\n                module_memory_usage.sort(key=lambda x: x[-1], reverse=True)\n                self.debug(f\"MODULE MEMORY USAGE:\")\n                for module_name, usage in module_memory_usage:\n                    self.debug(f\"    - {module_name}: {self.helpers.bytes_to_human(usage)}\")\n\n        status.update({\"modules_errored\": len(modules_errored)})\n\n        return status\n\n    def stop(self):\n        \"\"\"Stops the in-progress scan and performs necessary cleanup.\n\n        This method sets the scan's status to \"ABORTING,\" cancels any pending tasks, and drains event queues. It also kills child processes spawned during the scan.\n\n        Returns:\n            None\n        \"\"\"\n        if not self._stopping:\n            self._stopping = True\n            self.status = \"ABORTING\"\n            self.hugewarning(\"Aborting scan\")\n            self.trace()\n            self._cancel_tasks()\n            self._drain_queues()\n            self.helpers.kill_children()\n            self._drain_queues()\n            self.helpers.kill_children()\n            self.debug(\"Finished aborting scan\")\n\n    async def finish(self):\n        \"\"\"Finalizes the scan by invoking the `finished()` method on all active modules if new activity is detected.\n\n        The method is idempotent and will return False if no new activity has been recorded since the last invocation.\n\n        Returns:\n            bool: True if new activity has been detected and the `finished()` method is invoked on all modules.\n                  False if no new activity has been detected since the last invocation.\n\n        Notes:\n            This method alters the scan's status to \"FINISHING\" if new activity is detected.\n        \"\"\"\n        # if new events were generated since last time we were here\n        if self._new_activity:\n            self._new_activity = False\n            self.status = \"FINISHING\"\n            # Trigger .finished() on every module and start over\n            log.info(\"Finishing scan\")\n            for module in self.modules.values():\n                finished_event = self.make_event(f\"FINISHED\", \"FINISHED\", dummy=True, tags={module.name})\n                await module.queue_event(finished_event)\n            self.verbose(\"Completed finish()\")\n            return True\n        self.verbose(\"Completed final finish()\")\n        # Return False if no new events were generated since last time\n        return False\n\n    def _drain_queues(self):\n        \"\"\"Empties all the event queues for each loaded module and the manager's incoming event queue.\n\n        This method iteratively empties both the incoming and outgoing event queues of each module, as well as the incoming event queue of the scan manager.\n\n        Returns:\n            None\n        \"\"\"\n        self.debug(\"Draining queues\")\n        for module in self.modules.values():\n            with contextlib.suppress(asyncio.queues.QueueEmpty):\n                while 1:\n                    if module.incoming_event_queue not in (None, False):\n                        module.incoming_event_queue.get_nowait()\n            with contextlib.suppress(asyncio.queues.QueueEmpty):\n                while 1:\n                    if module.outgoing_event_queue not in (None, False):\n                        module.outgoing_event_queue.get_nowait()\n        self.debug(\"Finished draining queues\")\n\n    def _cancel_tasks(self):\n        \"\"\"Cancels all asynchronous tasks and shuts down the process pool.\n\n        This method collects all pending tasks from each module, the dispatcher,\n        and the scan manager. After collecting these tasks, it cancels them synchronously\n        using a helper function. Finally, it shuts down the process pool, canceling any\n        pending futures.\n\n        Returns:\n            None\n        \"\"\"\n        self.debug(\"Cancelling all scan tasks\")\n        tasks = []\n        # module workers\n        for m in self.modules.values():\n            tasks += getattr(m, \"_tasks\", [])\n        # init events\n        if self.init_events_task:\n            tasks.append(self.init_events_task)\n        # ticker\n        if self.ticker_task:\n            tasks.append(self.ticker_task)\n        # dispatcher\n        tasks += self.dispatcher_tasks\n        # manager worker loops\n        tasks += self._manager_worker_loop_tasks\n        self.helpers.cancel_tasks_sync(tasks)\n        # process pool\n        self.helpers.process_pool.shutdown(cancel_futures=True)\n        self.debug(\"Finished cancelling all scan tasks\")\n        return tasks\n\n    async def _report(self):\n        \"\"\"Asynchronously executes the `report()` method for each module in the scan.\n\n        This method is called once at the end of each scan and is responsible for\n        triggering the `report()` function for each module. It executes irrespective\n        of whether the scan was aborted or completed successfully. The method makes\n        use of an asynchronous context manager (`_acatch`) to handle exceptions and\n        a task counter to keep track of the task's context.\n\n        Returns:\n            None\n        \"\"\"\n        for mod in self.modules.values():\n            context = f\"{mod.name}.report()\"\n            async with self._acatch(context), mod._task_counter.count(context):\n                await mod.report()\n\n    async def _cleanup(self):\n        \"\"\"Asynchronously executes the `cleanup()` method for each module in the scan.\n\n        This method is called once at the end of the scan to perform resource cleanup\n        tasks. It is executed regardless of whether the scan was aborted or completed\n        successfully. The scan status is set to \"CLEANING_UP\" during the execution.\n        After calling the `cleanup()` method for each module, it performs additional\n        cleanup tasks such as removing the scan's home directory if empty and cleaning\n        old scans.\n\n        Returns:\n            None\n        \"\"\"\n        # clean up self\n        if not self._cleanedup:\n            self._cleanedup = True\n            self.status = \"CLEANING_UP\"\n            # clean up dns engine\n            if self.helpers._dns is not None:\n                await self.helpers.dns.shutdown()\n            # clean up web engine\n            if self.helpers._web is not None:\n                await self.helpers.web.shutdown()\n            # clean up modules\n            for mod in self.modules.values():\n                await mod._cleanup()\n            with contextlib.suppress(Exception):\n                self.home.rmdir()\n            self.helpers.clean_old_scans()\n\n    def in_scope(self, *args, **kwargs):\n        return self.preset.in_scope(*args, **kwargs)\n\n    def whitelisted(self, *args, **kwargs):\n        return self.preset.whitelisted(*args, **kwargs)\n\n    def blacklisted(self, *args, **kwargs):\n        return self.preset.blacklisted(*args, **kwargs)\n\n    @property\n    def core(self):\n        return self.preset.core\n\n    @property\n    def config(self):\n        return self.preset.core.config\n\n    @property\n    def target(self):\n        return self.preset.target\n\n    @property\n    def seeds(self):\n        return self.preset.seeds\n\n    @property\n    def whitelist(self):\n        return self.preset.whitelist\n\n    @property\n    def blacklist(self):\n        return self.preset.blacklist\n\n    @property\n    def helpers(self):\n        return self.preset.helpers\n\n    @property\n    def force_start(self):\n        return self.preset.force_start\n\n    @property\n    def word_cloud(self):\n        return self.helpers.word_cloud\n\n    @property\n    def stopping(self):\n        return not self.running\n\n    @property\n    def stopped(self):\n        return self._status_code &gt; 5\n\n    @property\n    def running(self):\n        return 0 &lt; self._status_code &lt; 4\n\n    @property\n    def aborting(self):\n        return 5 &lt;= self._status_code &lt;= 6\n\n    @property\n    def status(self):\n        return self._status\n\n    @property\n    def omitted_event_types(self):\n        if self._omitted_event_types is None:\n            self._omitted_event_types = self.config.get(\"omit_event_types\", [])\n        return self._omitted_event_types\n\n    @status.setter\n    def status(self, status):\n        \"\"\"\n        Block setting after status has been aborted\n        \"\"\"\n        status = str(status).strip().upper()\n        if status in self._status_codes:\n            if self.status == \"ABORTING\" and not status == \"ABORTED\":\n                self.debug(f'Attempt to set invalid status \"{status}\" on aborted scan')\n            else:\n                if status != self._status:\n                    self._status = status\n                    self._status_code = self._status_codes[status]\n                    self.dispatcher_tasks.append(\n                        asyncio.create_task(\n                            self.dispatcher.catch(self.dispatcher.on_status, self._status, self.id),\n                            name=f\"{self.name}.dispatcher.on_status({status})\",\n                        )\n                    )\n                else:\n                    self.debug(f'Scan status is already \"{status}\"')\n        else:\n            self.debug(f'Attempt to set invalid status \"{status}\" on scan')\n\n    def make_event(self, *args, **kwargs):\n        kwargs[\"scan\"] = self\n        event = make_event(*args, **kwargs)\n        return event\n\n    @property\n    def root_event(self):\n        \"\"\"\n        The root scan event, e.g.:\n            ```json\n            {\n              \"type\": \"SCAN\",\n              \"id\": \"SCAN:1188928d942ace8e3befae0bdb9c3caa22705f54\",\n              \"data\": \"pixilated_kathryn (SCAN:1188928d942ace8e3befae0bdb9c3caa22705f54)\",\n              \"scope_distance\": 0,\n              \"scan\": \"SCAN:1188928d942ace8e3befae0bdb9c3caa22705f54\",\n              \"timestamp\": 1694548779.616255,\n              \"parent\": \"SCAN:1188928d942ace8e3befae0bdb9c3caa22705f54\",\n              \"tags\": [\n                \"distance-0\"\n              ],\n              \"module\": \"TARGET\",\n              \"module_sequence\": \"TARGET\"\n            }\n            ```\n        \"\"\"\n        if self._root_event is None:\n            self._root_event = self.make_root_event(f\"Scan {self.name} started at {self.start_time}\")\n        self._root_event.data[\"status\"] = self.status\n        return self._root_event\n\n    def finish_event(self, context=None, status=None):\n        if self._finish_event is None:\n            if context is None or status is None:\n                raise ValueError(\"Must specify context and status\")\n            self._finish_event = self.make_root_event(context)\n            self._finish_event.data[\"status\"] = status\n        return self._finish_event\n\n    def make_root_event(self, context):\n        root_event = self.make_event(data=self.json, event_type=\"SCAN\", dummy=True, context=context)\n        root_event._id = self.id\n        root_event.scope_distance = 0\n        root_event.parent = root_event\n        root_event.module = self._make_dummy_module(name=\"TARGET\", _type=\"TARGET\")\n        return root_event\n\n    @property\n    def dns_strings(self):\n        \"\"\"\n        A list of DNS hostname strings generated from the scan target\n        \"\"\"\n        if self._dns_strings is None:\n            dns_whitelist = set(t.host for t in self.whitelist if t.host and isinstance(t.host, str))\n            dns_whitelist = sorted(dns_whitelist, key=len)\n            dns_whitelist_set = set()\n            dns_strings = []\n            for t in dns_whitelist:\n                if not any(x in dns_whitelist_set for x in self.helpers.domain_parents(t, include_self=True)):\n                    dns_whitelist_set.add(t)\n                    dns_strings.append(t)\n            self._dns_strings = dns_strings\n        return self._dns_strings\n\n    def _generate_dns_regexes(self, pattern):\n        \"\"\"\n        Generates a list of compiled DNS hostname regexes based on the provided pattern.\n        This method centralizes the regex compilation to avoid redundancy in the dns_regexes and dns_regexes_yara methods.\n\n        Args:\n            pattern (str):\n        Returns:\n            list[re.Pattern]: A list of compiled regex patterns if enabled, otherwise an empty list.\n        \"\"\"\n\n        dns_regexes = []\n        for t in self.dns_strings:\n            regex_pattern = re.compile(f\"{pattern}{re.escape(t)})\", re.I)\n            log.debug(f\"Generated Regex [{regex_pattern.pattern}] for domain {t}\")\n            dns_regexes.append(regex_pattern)\n        return dns_regexes\n\n    @property\n    def dns_regexes(self):\n        \"\"\"\n        A list of DNS hostname regexes generated from the scan target\n        For the purpose of extracting hostnames\n\n        Examples:\n            Extract hostnames from text:\n            &gt;&gt;&gt; for regex in scan.dns_regexes:\n            ...     for match in regex.finditer(response.text):\n            ...         hostname = match.group().lower()\n        \"\"\"\n        if self._dns_regexes is None:\n            self._dns_regexes = self._generate_dns_regexes(r\"((?:(?:[\\w-]+)\\.)+\")\n        return self._dns_regexes\n\n    @property\n    def dns_regexes_yara(self):\n        \"\"\"\n        Returns a list of DNS hostname regexes formatted specifically for compatibility with YARA rules.\n        \"\"\"\n        if self._dns_regexes_yara is None:\n            self._dns_regexes_yara = self._generate_dns_regexes(r\"(([a-z0-9-]+\\.)*\")\n        return self._dns_regexes_yara\n\n    @property\n    def dns_yara_rules_uncompiled(self):\n        if self._dns_yara_rules_uncompiled is None:\n            regexes_component_list = []\n            for i, r in enumerate(self.dns_regexes_yara):\n                regexes_component_list.append(rf\"$dns_name_{i} = /\\b{r.pattern}/ nocase\")\n            if regexes_component_list:\n                regexes_component = \" \".join(regexes_component_list)\n                self._dns_yara_rules_uncompiled = f'rule hostname_extraction {{meta: description = \"matches DNS hostname pattern derived from target(s)\" strings: {regexes_component} condition: any of them}}'\n        return self._dns_yara_rules_uncompiled\n\n    async def dns_yara_rules(self):\n        if self._dns_yara_rules is None:\n            if self.dns_yara_rules_uncompiled is not None:\n                import yara\n\n                self._dns_yara_rules = await self.helpers.run_in_executor(\n                    yara.compile, source=self.dns_yara_rules_uncompiled\n                )\n        return self._dns_yara_rules\n\n    async def extract_in_scope_hostnames(self, s):\n        \"\"\"\n        Given a string, uses yara to extract hostnames matching scan targets\n\n        Examples:\n            &gt;&gt;&gt; await self.scan.extract_in_scope_hostnames(\"http://www.evilcorp.com\")\n            ... {\"www.evilcorp.com\"}\n        \"\"\"\n        matches = set()\n        dns_yara_rules = await self.dns_yara_rules()\n        if dns_yara_rules is not None:\n            for match in await self.helpers.run_in_executor(dns_yara_rules.match, data=s):\n                for string in match.strings:\n                    for instance in string.instances:\n                        matches.add(str(instance))\n        return matches\n\n    @property\n    def json(self):\n        \"\"\"\n        A dictionary representation of the scan including its name, ID, targets, whitelist, blacklist, and modules\n        \"\"\"\n        j = dict()\n        for i in (\"id\", \"name\"):\n            v = getattr(self, i, \"\")\n            if v:\n                j.update({i: v})\n        j[\"target\"] = self.preset.target.json\n        j[\"preset\"] = self.preset.to_dict(redact_secrets=True)\n        if self.start_time is not None:\n            j[\"started_at\"] = self.start_time.isoformat()\n        if self.end_time is not None:\n            j[\"finished_at\"] = self.end_time.isoformat()\n        if self.duration is not None:\n            j[\"duration_seconds\"] = self.duration_seconds\n        if self.duration_human is not None:\n            j[\"duration\"] = self.duration_human\n        return j\n\n    def debug(self, *args, trace=False, **kwargs):\n        log.debug(*args, extra={\"scan_id\": self.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def verbose(self, *args, trace=False, **kwargs):\n        log.verbose(*args, extra={\"scan_id\": self.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def hugeverbose(self, *args, trace=False, **kwargs):\n        log.hugeverbose(*args, extra={\"scan_id\": self.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def info(self, *args, trace=False, **kwargs):\n        log.info(*args, extra={\"scan_id\": self.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def hugeinfo(self, *args, trace=False, **kwargs):\n        log.hugeinfo(*args, extra={\"scan_id\": self.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def success(self, *args, trace=False, **kwargs):\n        log.success(*args, extra={\"scan_id\": self.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def hugesuccess(self, *args, trace=False, **kwargs):\n        log.hugesuccess(*args, extra={\"scan_id\": self.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def warning(self, *args, trace=True, **kwargs):\n        log.warning(*args, extra={\"scan_id\": self.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def hugewarning(self, *args, trace=True, **kwargs):\n        log.hugewarning(*args, extra={\"scan_id\": self.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def error(self, *args, trace=True, **kwargs):\n        log.error(*args, extra={\"scan_id\": self.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def trace(self, msg=None):\n        if msg is None:\n            e_type, e_val, e_traceback = exc_info()\n            if e_type is not None:\n                log.trace(traceback.format_exc())\n        else:\n            log.trace(msg)\n\n    def critical(self, *args, trace=True, **kwargs):\n        log.critical(*args, extra={\"scan_id\": self.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    @property\n    def log_level(self):\n        \"\"\"\n        Return the current log level, e.g. logging.INFO\n        \"\"\"\n        return self.core.logger.log_level\n\n    @property\n    def _log_handlers(self):\n        if self.__log_handlers is None:\n            self.helpers.mkdir(self.home)\n            main_handler = logging.handlers.TimedRotatingFileHandler(\n                str(self.home / \"scan.log\"), when=\"d\", interval=1, backupCount=14\n            )\n            main_handler.addFilter(lambda x: x.levelno != logging.TRACE and x.levelno &gt;= logging.VERBOSE)\n            debug_handler = logging.handlers.TimedRotatingFileHandler(\n                str(self.home / \"debug.log\"), when=\"d\", interval=1, backupCount=14\n            )\n            debug_handler.addFilter(lambda x: x.levelno &gt;= logging.DEBUG)\n            self.__log_handlers = [main_handler, debug_handler]\n        return self.__log_handlers\n\n    def _start_log_handlers(self):\n        # add log handlers\n        for handler in self._log_handlers:\n            self.core.logger.add_log_handler(handler)\n        # temporarily disable main ones\n        for handler_name in (\"file_main\", \"file_debug\"):\n            handler = self.core.logger.log_handlers.get(handler_name, None)\n            if handler is not None and handler not in self._log_handler_backup:\n                self._log_handler_backup.append(handler)\n                self.core.logger.remove_log_handler(handler)\n\n    def _stop_log_handlers(self):\n        # remove log handlers\n        for handler in self._log_handlers:\n            self.core.logger.remove_log_handler(handler)\n        # restore main ones\n        for handler in self._log_handler_backup:\n            self.core.logger.add_log_handler(handler)\n\n    def _fail_setup(self, msg):\n        msg = str(msg)\n        if self.force_start:\n            self.error(msg)\n        else:\n            msg += \" (--force to run module anyway)\"\n            raise ScanError(msg)\n\n    def _load_modules(self, modules):\n        modules = [str(m) for m in modules]\n        loaded_modules = {}\n        failed = set()\n        for module_name, module_class in self.preset.module_loader.load_modules(modules).items():\n            if module_class:\n                try:\n                    loaded_modules[module_name] = module_class(self)\n                    self.verbose(f'Loaded module \"{module_name}\"')\n                    continue\n                except Exception:\n                    self.warning(f\"Failed to load module {module_class}\")\n            else:\n                self.warning(f'Failed to load unknown module \"{module_name}\"')\n            failed.add(module_name)\n        return loaded_modules, failed\n\n    async def _status_ticker(self, interval=15):\n        async with self._acatch():\n            while 1:\n                await asyncio.sleep(interval)\n                self.modules_status(_log=True)\n\n    @contextlib.asynccontextmanager\n    async def _acatch(self, context=\"scan\", finally_callback=None, unhandled_is_critical=False):\n        \"\"\"\n        Async version of catch()\n\n        async with catch():\n            await do_stuff()\n        \"\"\"\n        try:\n            yield\n        except BaseException as e:\n            self._handle_exception(e, context=context, unhandled_is_critical=unhandled_is_critical)\n\n    def _handle_exception(self, e, context=\"scan\", finally_callback=None, unhandled_is_critical=False):\n        if callable(context):\n            context = f\"{context.__qualname__}()\"\n        filename, lineno, funcname = self.helpers.get_traceback_details(e)\n        if self.helpers.in_exception_chain(e, (KeyboardInterrupt,)):\n            log.debug(f\"Interrupted\")\n            self.stop()\n        elif isinstance(e, BrokenPipeError):\n            log.debug(f\"BrokenPipeError in {filename}:{lineno}:{funcname}(): {e}\")\n        elif isinstance(e, asyncio.CancelledError):\n            raise\n        elif isinstance(e, Exception):\n            traceback_str = getattr(e, \"engine_traceback\", None)\n            if traceback_str is None:\n                traceback_str = traceback.format_exc()\n            if unhandled_is_critical:\n                log.critical(f\"Error in {context}: {filename}:{lineno}:{funcname}(): {e}\")\n                log.critical(traceback_str)\n            else:\n                log.error(f\"Error in {context}: {filename}:{lineno}:{funcname}(): {e}\")\n                log.trace(traceback_str)\n        if callable(finally_callback):\n            finally_callback(e)\n\n    def _make_dummy_module(self, name, _type=\"scan\"):\n        \"\"\"\n        Construct a dummy module, for attachment to events\n        \"\"\"\n        try:\n            return self.dummy_modules[name]\n        except KeyError:\n            dummy = DummyModule(scan=self, name=name, _type=_type)\n            self.dummy_modules[name] = dummy\n            return dummy\n</code></pre>"},{"location":"dev/scanner/#bbot.scanner.Scanner.dns_regexes","title":"dns_regexes  <code>property</code>","text":"<pre><code>dns_regexes\n</code></pre> <p>A list of DNS hostname regexes generated from the scan target For the purpose of extracting hostnames</p> <p>Examples:</p> <p>Extract hostnames from text:</p> <pre><code>&gt;&gt;&gt; for regex in scan.dns_regexes:\n...     for match in regex.finditer(response.text):\n...         hostname = match.group().lower()\n</code></pre>"},{"location":"dev/scanner/#bbot.scanner.Scanner.dns_regexes_yara","title":"dns_regexes_yara  <code>property</code>","text":"<pre><code>dns_regexes_yara\n</code></pre> <p>Returns a list of DNS hostname regexes formatted specifically for compatibility with YARA rules.</p>"},{"location":"dev/scanner/#bbot.scanner.Scanner.dns_strings","title":"dns_strings  <code>property</code>","text":"<pre><code>dns_strings\n</code></pre> <p>A list of DNS hostname strings generated from the scan target</p>"},{"location":"dev/scanner/#bbot.scanner.Scanner.json","title":"json  <code>property</code>","text":"<pre><code>json\n</code></pre> <p>A dictionary representation of the scan including its name, ID, targets, whitelist, blacklist, and modules</p>"},{"location":"dev/scanner/#bbot.scanner.Scanner.log_level","title":"log_level  <code>property</code>","text":"<pre><code>log_level\n</code></pre> <p>Return the current log level, e.g. logging.INFO</p>"},{"location":"dev/scanner/#bbot.scanner.Scanner.root_event","title":"root_event  <code>property</code>","text":"<pre><code>root_event\n</code></pre> <p>The root scan event, e.g.:     <pre><code>{\n  \"type\": \"SCAN\",\n  \"id\": \"SCAN:1188928d942ace8e3befae0bdb9c3caa22705f54\",\n  \"data\": \"pixilated_kathryn (SCAN:1188928d942ace8e3befae0bdb9c3caa22705f54)\",\n  \"scope_distance\": 0,\n  \"scan\": \"SCAN:1188928d942ace8e3befae0bdb9c3caa22705f54\",\n  \"timestamp\": 1694548779.616255,\n  \"parent\": \"SCAN:1188928d942ace8e3befae0bdb9c3caa22705f54\",\n  \"tags\": [\n    \"distance-0\"\n  ],\n  \"module\": \"TARGET\",\n  \"module_sequence\": \"TARGET\"\n}\n</code></pre></p>"},{"location":"dev/scanner/#bbot.scanner.Scanner.__init__","title":"__init__","text":"<pre><code>__init__(*targets, scan_id=None, dispatcher=None, **kwargs)\n</code></pre> <p>Initializes the Scanner class.</p> <p>If a premade <code>preset</code> is specified, it will be used for the scan. Otherwise, <code>Scan</code> accepts the same arguments as <code>Preset</code>, which are passed through and used to create a new preset.</p> <p>Parameters:</p> <ul> <li> <code>*targets</code>               (<code>list[str]</code>, default:                   <code>()</code> )           \u2013            <p>Scan targets (passed through to <code>Preset</code>).</p> </li> <li> <code>preset</code>               (<code>Preset</code>)           \u2013            <p>Preset to use for the scan.</p> </li> <li> <code>scan_id</code>               (<code>str</code>, default:                   <code>None</code> )           \u2013            <p>Unique identifier for the scan. Auto-generates if None.</p> </li> <li> <code>dispatcher</code>               (<code>Dispatcher</code>, default:                   <code>None</code> )           \u2013            <p>Dispatcher object to use. Defaults to new Dispatcher.</p> </li> <li> <code>**kwargs</code>               (<code>list[str]</code>, default:                   <code>{}</code> )           \u2013            <p>Additional keyword arguments (passed through to <code>Preset</code>).</p> </li> </ul> Source code in <code>bbot/scanner/scanner.py</code> <pre><code>def __init__(\n    self,\n    *targets,\n    scan_id=None,\n    dispatcher=None,\n    **kwargs,\n):\n    \"\"\"\n    Initializes the Scanner class.\n\n    If a premade `preset` is specified, it will be used for the scan.\n    Otherwise, `Scan` accepts the same arguments as `Preset`, which are passed through and used to create a new preset.\n\n    Args:\n        *targets (list[str], optional): Scan targets (passed through to `Preset`).\n        preset (Preset, optional): Preset to use for the scan.\n        scan_id (str, optional): Unique identifier for the scan. Auto-generates if None.\n        dispatcher (Dispatcher, optional): Dispatcher object to use. Defaults to new Dispatcher.\n        **kwargs (list[str], optional): Additional keyword arguments (passed through to `Preset`).\n    \"\"\"\n    self._root_event = None\n    self._finish_event = None\n    self.start_time = None\n    self.end_time = None\n    self.duration = None\n    self.duration_human = None\n    self.duration_seconds = None\n\n    self._success = False\n\n    if scan_id is not None:\n        self.id = str(id)\n    else:\n        self.id = f\"SCAN:{sha1(rand_string(20)).hexdigest()}\"\n\n    custom_preset = kwargs.pop(\"preset\", None)\n    kwargs[\"_log\"] = True\n\n    from .preset import Preset\n\n    base_preset = Preset(*targets, **kwargs)\n\n    if custom_preset is not None:\n        if not isinstance(custom_preset, Preset):\n            raise ValidationError(f'Preset must be of type Preset, not \"{type(custom_preset).__name__}\"')\n        base_preset.merge(custom_preset)\n\n    self.preset = base_preset.bake(self)\n\n    # scan name\n    if self.preset.scan_name is None:\n        tries = 0\n        while 1:\n            if tries &gt; 5:\n                scan_name = f\"{rand_string(4)}_{rand_string(4)}\"\n                break\n            scan_name = random_name()\n            if self.preset.output_dir is not None:\n                home_path = Path(self.preset.output_dir).resolve() / scan_name\n            else:\n                home_path = self.preset.bbot_home / \"scans\" / scan_name\n            if not home_path.exists():\n                break\n            tries += 1\n    else:\n        scan_name = str(self.preset.scan_name)\n    self.name = scan_name.replace(\"/\", \"_\")\n\n    # make sure the preset has a description\n    if not self.preset.description:\n        self.preset.description = self.name\n\n    # scan output dir\n    if self.preset.output_dir is not None:\n        self.home = Path(self.preset.output_dir).resolve() / self.name\n    else:\n        self.home = self.preset.bbot_home / \"scans\" / self.name\n\n    self._status = \"NOT_STARTED\"\n    self._status_code = 0\n\n    self.modules = OrderedDict({})\n    self._modules_loaded = False\n    self.dummy_modules = {}\n\n    if dispatcher is None:\n        from .dispatcher import Dispatcher\n\n        self.dispatcher = Dispatcher()\n    else:\n        self.dispatcher = dispatcher\n    self.dispatcher.set_scan(self)\n\n    # scope distance\n    self.scope_config = self.config.get(\"scope\", {})\n    self.scope_search_distance = max(0, int(self.scope_config.get(\"search_distance\", 0)))\n    self.scope_report_distance = int(self.scope_config.get(\"report_distance\", 1))\n\n    # web config\n    self.web_config = self.config.get(\"web\", {})\n    self.web_spider_distance = self.web_config.get(\"spider_distance\", 0)\n    self.web_spider_depth = self.web_config.get(\"spider_depth\", 1)\n    self.web_spider_links_per_page = self.web_config.get(\"spider_links_per_page\", 20)\n    max_redirects = self.web_config.get(\"http_max_redirects\", 5)\n    self.web_max_redirects = max(max_redirects, self.web_spider_distance)\n    self.http_proxy = self.web_config.get(\"http_proxy\", \"\")\n    self.http_timeout = self.web_config.get(\"http_timeout\", 10)\n    self.httpx_timeout = self.web_config.get(\"httpx_timeout\", 5)\n    self.http_retries = self.web_config.get(\"http_retries\", 1)\n    self.httpx_retries = self.web_config.get(\"httpx_retries\", 1)\n    self.useragent = self.web_config.get(\"user_agent\", \"BBOT\")\n    # custom HTTP headers warning\n    self.custom_http_headers = self.web_config.get(\"http_headers\", {})\n    if self.custom_http_headers:\n        self.warning(\n            \"You have enabled custom HTTP headers. These will be attached to all in-scope requests and all requests made by httpx.\"\n        )\n\n    # url file extensions\n    self.url_extension_blacklist = set(e.lower() for e in self.config.get(\"url_extension_blacklist\", []))\n    self.url_extension_httpx_only = set(e.lower() for e in self.config.get(\"url_extension_httpx_only\", []))\n\n    # url querystring behavior\n    self.url_querystring_remove = self.config.get(\"url_querystring_remove\", True)\n\n    # blob inclusion\n    self._file_blobs = self.config.get(\"file_blobs\", False)\n    self._folder_blobs = self.config.get(\"folder_blobs\", False)\n\n    # how often to print scan status\n    self.status_frequency = self.config.get(\"status_frequency\", 15)\n\n    from .stats import ScanStats\n\n    self.stats = ScanStats(self)\n\n    self._prepped = False\n    self._finished_init = False\n    self._new_activity = False\n    self._cleanedup = False\n    self._omitted_event_types = None\n\n    self.__loop = None\n    self._manager_worker_loop_tasks = []\n    self.init_events_task = None\n    self.ticker_task = None\n    self.dispatcher_tasks = []\n\n    self._stopping = False\n\n    self._dns_strings = None\n    self._dns_regexes = None\n    self._dns_regexes_yara = None\n    self._dns_yara_rules_uncompiled = None\n    self._dns_yara_rules = None\n\n    self.__log_handlers = None\n    self._log_handler_backup = []\n</code></pre>"},{"location":"dev/scanner/#bbot.scanner.Scanner.async_start","title":"async_start  <code>async</code>","text":"<pre><code>async_start()\n</code></pre> Source code in <code>bbot/scanner/scanner.py</code> <pre><code>async def async_start(self):\n    \"\"\" \"\"\"\n    self.start_time = datetime.now()\n    self.root_event.data[\"started_at\"] = self.start_time.isoformat()\n    try:\n        await self._prep()\n\n        self._start_log_handlers()\n        self.trace(f'Ran BBOT {__version__} at {self.start_time}, command: {\" \".join(sys.argv)}')\n        self.trace(f\"Target: {self.preset.target.json}\")\n        self.trace(f\"Preset: {self.preset.to_dict(redact_secrets=True)}\")\n\n        if not self.target:\n            self.warning(f\"No scan targets specified\")\n\n        # start status ticker\n        self.ticker_task = asyncio.create_task(\n            self._status_ticker(self.status_frequency), name=f\"{self.name}._status_ticker()\"\n        )\n\n        self.status = \"STARTING\"\n\n        if not self.modules:\n            self.error(f\"No modules loaded\")\n            self.status = \"FAILED\"\n            return\n        else:\n            self.hugesuccess(f\"Starting scan {self.name}\")\n\n        await self.dispatcher.on_start(self)\n\n        self.status = \"RUNNING\"\n        self._start_modules()\n        self.verbose(f\"{len(self.modules):,} modules started\")\n\n        # distribute seed events\n        self.init_events_task = asyncio.create_task(\n            self.ingress_module.init_events(self.target.seeds.events),\n            name=f\"{self.name}.ingress_module.init_events()\",\n        )\n\n        # main scan loop\n        while 1:\n            # abort if we're aborting\n            if self.aborting:\n                self._drain_queues()\n                break\n\n            # yield events as they come (async for event in scan.async_start())\n            if \"python\" in self.modules:\n                events, finish = await self.modules[\"python\"]._events_waiting(batch_size=-1)\n                for e in events:\n                    yield e\n                if events:\n                    continue\n\n            # break if initialization finished and the scan is no longer active\n            if self._finished_init and self.modules_finished:\n                new_activity = await self.finish()\n                if not new_activity:\n                    self._success = True\n                    scan_finish_event = await self._mark_finished()\n                    yield scan_finish_event\n                    break\n\n            await asyncio.sleep(0.1)\n\n        self._success = True\n\n    except BaseException as e:\n        if self.helpers.in_exception_chain(e, (KeyboardInterrupt, asyncio.CancelledError)):\n            self.stop()\n            self._success = True\n        else:\n            try:\n                raise\n            except ScanError as e:\n                self.error(f\"{e}\")\n\n            except BBOTError as e:\n                self.critical(f\"Error during scan: {e}\")\n\n            except Exception:\n                self.critical(f\"Unexpected error during scan:\\n{traceback.format_exc()}\")\n\n    finally:\n        tasks = self._cancel_tasks()\n        self.debug(f\"Awaiting {len(tasks):,} tasks\")\n        for task in tasks:\n            # self.debug(f\"Awaiting {task}\")\n            with contextlib.suppress(BaseException):\n                await asyncio.wait_for(task, timeout=0.1)\n        self.debug(f\"Awaited {len(tasks):,} tasks\")\n        await self._report()\n        await self._cleanup()\n\n        await self.dispatcher.on_finish(self)\n\n        self._stop_log_handlers()\n</code></pre>"},{"location":"dev/scanner/#bbot.scanner.Scanner.extract_in_scope_hostnames","title":"extract_in_scope_hostnames  <code>async</code>","text":"<pre><code>extract_in_scope_hostnames(s)\n</code></pre> <p>Given a string, uses yara to extract hostnames matching scan targets</p> <p>Examples:</p> <pre><code>&gt;&gt;&gt; await self.scan.extract_in_scope_hostnames(\"http://www.evilcorp.com\")\n... {\"www.evilcorp.com\"}\n</code></pre> Source code in <code>bbot/scanner/scanner.py</code> <pre><code>async def extract_in_scope_hostnames(self, s):\n    \"\"\"\n    Given a string, uses yara to extract hostnames matching scan targets\n\n    Examples:\n        &gt;&gt;&gt; await self.scan.extract_in_scope_hostnames(\"http://www.evilcorp.com\")\n        ... {\"www.evilcorp.com\"}\n    \"\"\"\n    matches = set()\n    dns_yara_rules = await self.dns_yara_rules()\n    if dns_yara_rules is not None:\n        for match in await self.helpers.run_in_executor(dns_yara_rules.match, data=s):\n            for string in match.strings:\n                for instance in string.instances:\n                    matches.add(str(instance))\n    return matches\n</code></pre>"},{"location":"dev/scanner/#bbot.scanner.Scanner.finish","title":"finish  <code>async</code>","text":"<pre><code>finish()\n</code></pre> <p>Finalizes the scan by invoking the <code>finished()</code> method on all active modules if new activity is detected.</p> <p>The method is idempotent and will return False if no new activity has been recorded since the last invocation.</p> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if new activity has been detected and the <code>finished()</code> method is invoked on all modules.   False if no new activity has been detected since the last invocation.</p> </li> </ul> Notes <p>This method alters the scan's status to \"FINISHING\" if new activity is detected.</p> Source code in <code>bbot/scanner/scanner.py</code> <pre><code>async def finish(self):\n    \"\"\"Finalizes the scan by invoking the `finished()` method on all active modules if new activity is detected.\n\n    The method is idempotent and will return False if no new activity has been recorded since the last invocation.\n\n    Returns:\n        bool: True if new activity has been detected and the `finished()` method is invoked on all modules.\n              False if no new activity has been detected since the last invocation.\n\n    Notes:\n        This method alters the scan's status to \"FINISHING\" if new activity is detected.\n    \"\"\"\n    # if new events were generated since last time we were here\n    if self._new_activity:\n        self._new_activity = False\n        self.status = \"FINISHING\"\n        # Trigger .finished() on every module and start over\n        log.info(\"Finishing scan\")\n        for module in self.modules.values():\n            finished_event = self.make_event(f\"FINISHED\", \"FINISHED\", dummy=True, tags={module.name})\n            await module.queue_event(finished_event)\n        self.verbose(\"Completed finish()\")\n        return True\n    self.verbose(\"Completed final finish()\")\n    # Return False if no new events were generated since last time\n    return False\n</code></pre>"},{"location":"dev/scanner/#bbot.scanner.Scanner.load_modules","title":"load_modules  <code>async</code>","text":"<pre><code>load_modules()\n</code></pre> <p>Asynchronously import and instantiate all scan modules, including internal and output modules.</p> <p>This method is automatically invoked by <code>setup_modules()</code>. It performs several key tasks in the following sequence:</p> <ol> <li>Install dependencies for each module via <code>self.helpers.depsinstaller.install()</code>.</li> <li>Load scan modules and updates the <code>modules</code> dictionary.</li> <li>Load internal modules and updates the <code>modules</code> dictionary.</li> <li>Load output modules and updates the <code>modules</code> dictionary.</li> <li>Sorts modules based on their <code>_priority</code> attribute.</li> </ol> <p>If any modules fail to load or their dependencies fail to install, a ScanError will be raised (unless <code>self.force_start</code> is True).</p> <p>Attributes:</p> <ul> <li> <code>succeeded,</code>               (<code>failed (tuple</code>)           \u2013            <p>A tuple containing lists of modules that succeeded or failed during the dependency installation.</p> </li> <li> <code>loaded_modules,</code>               (<code>loaded_internal_modules, loaded_output_modules (dict</code>)           \u2013            <p>Dictionaries of successfully loaded modules.</p> </li> <li> <code>failed,</code>               (<code>failed_internal, failed_output (list</code>)           \u2013            <p>Lists of module names that failed to load.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>ScanError</code>             \u2013            <p>If any module dependencies fail to install or modules fail to load, and if <code>self.force_start</code> is False.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>None</p> </li> </ul> Note <p>After all modules are loaded, they are sorted by <code>_priority</code> and stored in the <code>modules</code> dictionary.</p> Source code in <code>bbot/scanner/scanner.py</code> <pre><code>async def load_modules(self):\n    \"\"\"Asynchronously import and instantiate all scan modules, including internal and output modules.\n\n    This method is automatically invoked by `setup_modules()`. It performs several key tasks in the following sequence:\n\n    1. Install dependencies for each module via `self.helpers.depsinstaller.install()`.\n    2. Load scan modules and updates the `modules` dictionary.\n    3. Load internal modules and updates the `modules` dictionary.\n    4. Load output modules and updates the `modules` dictionary.\n    5. Sorts modules based on their `_priority` attribute.\n\n    If any modules fail to load or their dependencies fail to install, a ScanError will be raised (unless `self.force_start` is True).\n\n    Attributes:\n        succeeded, failed (tuple): A tuple containing lists of modules that succeeded or failed during the dependency installation.\n        loaded_modules, loaded_internal_modules, loaded_output_modules (dict): Dictionaries of successfully loaded modules.\n        failed, failed_internal, failed_output (list): Lists of module names that failed to load.\n\n    Raises:\n        ScanError: If any module dependencies fail to install or modules fail to load, and if `self.force_start` is False.\n\n    Returns:\n        None\n\n    Note:\n        After all modules are loaded, they are sorted by `_priority` and stored in the `modules` dictionary.\n    \"\"\"\n    if not self._modules_loaded:\n        if not self.preset.modules:\n            self.warning(f\"No modules to load\")\n            return\n\n        if not self.preset.scan_modules:\n            self.warning(f\"No scan modules to load\")\n\n        # install module dependencies\n        succeeded, failed = await self.helpers.depsinstaller.install(*self.preset.modules)\n        if failed:\n            msg = f\"Failed to install dependencies for {len(failed):,} modules: {','.join(failed)}\"\n            self._fail_setup(msg)\n        modules = sorted([m for m in self.preset.scan_modules if m in succeeded])\n        output_modules = sorted([m for m in self.preset.output_modules if m in succeeded])\n        internal_modules = sorted([m for m in self.preset.internal_modules if m in succeeded])\n\n        # Load scan modules\n        self.verbose(f\"Loading {len(modules):,} scan modules: {','.join(modules)}\")\n        loaded_modules, failed = self._load_modules(modules)\n        self.modules.update(loaded_modules)\n        if len(failed) &gt; 0:\n            msg = f\"Failed to load {len(failed):,} scan modules: {','.join(failed)}\"\n            self._fail_setup(msg)\n        if loaded_modules:\n            self.info(\n                f\"Loaded {len(loaded_modules):,}/{len(self.preset.scan_modules):,} scan modules ({','.join(loaded_modules)})\"\n            )\n\n        # Load internal modules\n        self.verbose(f\"Loading {len(internal_modules):,} internal modules: {','.join(internal_modules)}\")\n        loaded_internal_modules, failed_internal = self._load_modules(internal_modules)\n        self.modules.update(loaded_internal_modules)\n        if len(failed_internal) &gt; 0:\n            msg = f\"Failed to load {len(loaded_internal_modules):,} internal modules: {','.join(loaded_internal_modules)}\"\n            self._fail_setup(msg)\n        if loaded_internal_modules:\n            self.info(\n                f\"Loaded {len(loaded_internal_modules):,}/{len(self.preset.internal_modules):,} internal modules ({','.join(loaded_internal_modules)})\"\n            )\n\n        # Load output modules\n        self.verbose(f\"Loading {len(output_modules):,} output modules: {','.join(output_modules)}\")\n        loaded_output_modules, failed_output = self._load_modules(output_modules)\n        self.modules.update(loaded_output_modules)\n        if len(failed_output) &gt; 0:\n            msg = f\"Failed to load {len(failed_output):,} output modules: {','.join(failed_output)}\"\n            self._fail_setup(msg)\n        if loaded_output_modules:\n            self.info(\n                f\"Loaded {len(loaded_output_modules):,}/{len(self.preset.output_modules):,} output modules, ({','.join(loaded_output_modules)})\"\n            )\n\n        # builtin intercept modules\n        self.ingress_module = ScanIngress(self)\n        self.egress_module = ScanEgress(self)\n        self.modules[self.ingress_module.name] = self.ingress_module\n        self.modules[self.egress_module.name] = self.egress_module\n\n        # sort modules by priority\n        self.modules = OrderedDict(sorted(self.modules.items(), key=lambda x: getattr(x[-1], \"priority\", 3)))\n\n        self._modules_loaded = True\n</code></pre>"},{"location":"dev/scanner/#bbot.scanner.Scanner.setup_modules","title":"setup_modules  <code>async</code>","text":"<pre><code>setup_modules(remove_failed=True)\n</code></pre> <p>Asynchronously initializes all loaded modules by invoking their <code>setup()</code> methods.</p> <p>Parameters:</p> <ul> <li> <code>remove_failed</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Flag indicating whether to remove modules that fail setup.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>tuple</code>          \u2013            <p>succeeded - List of modules that successfully set up. hard_failed - List of modules that encountered a hard failure during setup. soft_failed - List of modules that encountered a soft failure during setup.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>ScanError</code>             \u2013            <p>If no output modules could be loaded.</p> </li> </ul> Notes <p>Hard-failed modules are set to an error state and removed if <code>remove_failed</code> is True. Soft-failed modules are not set to an error state but are also removed if <code>remove_failed</code> is True.</p> Source code in <code>bbot/scanner/scanner.py</code> <pre><code>async def setup_modules(self, remove_failed=True):\n    \"\"\"Asynchronously initializes all loaded modules by invoking their `setup()` methods.\n\n    Args:\n        remove_failed (bool): Flag indicating whether to remove modules that fail setup.\n\n    Returns:\n        tuple:\n            succeeded - List of modules that successfully set up.\n            hard_failed - List of modules that encountered a hard failure during setup.\n            soft_failed - List of modules that encountered a soft failure during setup.\n\n    Raises:\n        ScanError: If no output modules could be loaded.\n\n    Notes:\n        Hard-failed modules are set to an error state and removed if `remove_failed` is True.\n        Soft-failed modules are not set to an error state but are also removed if `remove_failed` is True.\n    \"\"\"\n    await self.load_modules()\n    self.verbose(f\"Setting up modules\")\n    succeeded = []\n    hard_failed = []\n    soft_failed = []\n\n    async for task in self.helpers.as_completed([m._setup() for m in self.modules.values()]):\n        module, status, msg = await task\n        if status == True:\n            self.debug(f\"Setup succeeded for {module.name} ({msg})\")\n            succeeded.append(module.name)\n        elif status == False:\n            self.warning(f\"Setup hard-failed for {module.name}: {msg}\")\n            self.modules[module.name].set_error_state()\n            hard_failed.append(module.name)\n        else:\n            self.info(f\"Setup soft-failed for {module.name}: {msg}\")\n            soft_failed.append(module.name)\n        if (not status) and (module._intercept or remove_failed):\n            # if a intercept module fails setup, we always remove it\n            self.modules.pop(module.name)\n\n    return succeeded, hard_failed, soft_failed\n</code></pre>"},{"location":"dev/scanner/#bbot.scanner.Scanner.stop","title":"stop","text":"<pre><code>stop()\n</code></pre> <p>Stops the in-progress scan and performs necessary cleanup.</p> <p>This method sets the scan's status to \"ABORTING,\" cancels any pending tasks, and drains event queues. It also kills child processes spawned during the scan.</p> <p>Returns:</p> <ul> <li>           \u2013            <p>None</p> </li> </ul> Source code in <code>bbot/scanner/scanner.py</code> <pre><code>def stop(self):\n    \"\"\"Stops the in-progress scan and performs necessary cleanup.\n\n    This method sets the scan's status to \"ABORTING,\" cancels any pending tasks, and drains event queues. It also kills child processes spawned during the scan.\n\n    Returns:\n        None\n    \"\"\"\n    if not self._stopping:\n        self._stopping = True\n        self.status = \"ABORTING\"\n        self.hugewarning(\"Aborting scan\")\n        self.trace()\n        self._cancel_tasks()\n        self._drain_queues()\n        self.helpers.kill_children()\n        self._drain_queues()\n        self.helpers.kill_children()\n        self.debug(\"Finished aborting scan\")\n</code></pre>"},{"location":"dev/target/","title":"Target","text":""},{"location":"dev/target/#bbot.scanner.target.BaseTarget","title":"BaseTarget","text":"<p>               Bases: <code>RadixTarget</code></p> <p>A collection of BBOT events that represent a scan target.</p> <p>Based on radixtarget, which allows extremely fast IP and DNS lookups.</p> This class is inherited by all three components of the BBOT target <ul> <li>Whitelist</li> <li>Blacklist</li> <li>Seeds</li> </ul> Source code in <code>bbot/scanner/target.py</code> <pre><code>class BaseTarget(RadixTarget):\n    \"\"\"\n    A collection of BBOT events that represent a scan target.\n\n    Based on radixtarget, which allows extremely fast IP and DNS lookups.\n\n    This class is inherited by all three components of the BBOT target:\n        - Whitelist\n        - Blacklist\n        - Seeds\n    \"\"\"\n\n    special_target_types = {\n        # regex-callback pairs for handling special target types\n        # these aren't defined explicitly; instead they are decorated with @special_target_type\n        # the function must return a list of events\n    }\n    tags = []\n\n    def __init__(self, *targets, scan=None, **kwargs):\n        self.scan = scan\n        self.events = set()\n        self.inputs = set()\n        # Register decorated methods\n        for method in dir(self):\n            if callable(getattr(self, method, None)):\n                func = getattr(self, method)\n                if hasattr(func, \"_regex\"):\n                    self.special_target_types[func._regex] = func\n\n        super().__init__(*targets, **kwargs)\n\n    def get(self, event, **kwargs):\n        \"\"\"\n        Override default .get() to accept events\n        \"\"\"\n        if is_event(event):\n            host = event.host\n        # save resources by checking if the event is an IP or DNS name\n        elif is_ip(event, include_network=True) or is_dns_name(event):\n            host = event\n        elif isinstance(event, str):\n            event = self.make_event(event)\n            host = event.host\n        else:\n            raise ValueError(f\"Invalid host/event: {event} ({type(event)})\")\n        if not host:\n            if kwargs.get(\"raise_error\", False):\n                raise KeyError(f\"Host not found: '{event}'\")\n            return None\n        results = super().get(host, **kwargs)\n        return results\n\n    def make_event(self, *args, **kwargs):\n        # if it's already an event, return it\n        if args and is_event(args[0]):\n            return args[0]\n        # otherwise make a new one\n        if not \"tags\" in kwargs:\n            kwargs[\"tags\"] = set()\n        kwargs[\"tags\"].update(self.tags)\n        return make_event(*args, dummy=True, scan=self.scan, **kwargs)\n\n    def add(self, targets):\n        if not isinstance(targets, (list, set, tuple)):\n            targets = [targets]\n        events = set()\n        for target in targets:\n            _events = []\n            special_target_type, _events = self.check_special_target_types(str(target))\n            if special_target_type:\n                self.inputs.add(str(target))\n            else:\n                event = self.make_event(target)\n                if event:\n                    _events = [event]\n            for event in _events:\n                self.inputs.add(event.data)\n                events.add(event)\n\n        # sort by host size to ensure consistency\n        events = sorted(events, key=lambda e: (0 if not e.host else host_size_key(e.host)))\n        for event in events:\n            self.events.add(event)\n            self._add(event.host, data=event)\n\n    def check_special_target_types(self, target):\n        for regex, callback in self.special_target_types.items():\n            match = regex.match(target)\n            if match:\n                return True, callback(match)\n        return False, []\n\n    def __iter__(self):\n        yield from self.events\n</code></pre>"},{"location":"dev/target/#bbot.scanner.target.BaseTarget.get","title":"get","text":"<pre><code>get(event, **kwargs)\n</code></pre> <p>Override default .get() to accept events</p> Source code in <code>bbot/scanner/target.py</code> <pre><code>def get(self, event, **kwargs):\n    \"\"\"\n    Override default .get() to accept events\n    \"\"\"\n    if is_event(event):\n        host = event.host\n    # save resources by checking if the event is an IP or DNS name\n    elif is_ip(event, include_network=True) or is_dns_name(event):\n        host = event\n    elif isinstance(event, str):\n        event = self.make_event(event)\n        host = event.host\n    else:\n        raise ValueError(f\"Invalid host/event: {event} ({type(event)})\")\n    if not host:\n        if kwargs.get(\"raise_error\", False):\n            raise KeyError(f\"Host not found: '{event}'\")\n        return None\n    results = super().get(host, **kwargs)\n    return results\n</code></pre>"},{"location":"dev/target/#bbot.scanner.target.ScanSeeds","title":"ScanSeeds","text":"<p>               Bases: <code>BaseTarget</code></p> <p>Initial events used to seed a scan.</p> <p>These are the targets specified by the user, e.g. via <code>-t</code> on the CLI.</p> Source code in <code>bbot/scanner/target.py</code> <pre><code>class ScanSeeds(BaseTarget):\n    \"\"\"\n    Initial events used to seed a scan.\n\n    These are the targets specified by the user, e.g. via `-t` on the CLI.\n    \"\"\"\n\n    tags = [\"target\"]\n\n    @special_target_type(r\"^(?:ORG|ORG_STUB):(.*)\")\n    def handle_org_stub(self, match):\n        org_stub_event = self.make_event(match.group(1), event_type=\"ORG_STUB\")\n        if org_stub_event:\n            return [org_stub_event]\n        return []\n\n    @special_target_type(r\"^(?:USER|USERNAME):(.*)\")\n    def handle_username(self, match):\n        username_event = self.make_event(match.group(1), event_type=\"USERNAME\")\n        if username_event:\n            return [username_event]\n        return []\n\n    def get(self, event, single=True, **kwargs):\n        results = super().get(event, **kwargs)\n        if results and single:\n            return next(iter(results))\n        return results\n\n    def _add(self, host, data):\n        \"\"\"\n        Overrides the base method to enable having multiple events for the same host.\n\n        The \"data\" attribute of the node is now a set of events.\n        \"\"\"\n        if host:\n            try:\n                event_set = self.get(host, raise_error=True, single=False)\n                event_set.add(data)\n            except KeyError:\n                event_set = {data}\n            super()._add(host, data=event_set)\n\n    def _hash_value(self):\n        # seeds get hashed by event data\n        return sorted(str(e.data).encode() for e in self.events)\n</code></pre>"},{"location":"dev/target/#bbot.scanner.target.ScanWhitelist","title":"ScanWhitelist","text":"<p>               Bases: <code>ACLTarget</code></p> <p>A collection of BBOT events that represent a scan's whitelist.</p> Source code in <code>bbot/scanner/target.py</code> <pre><code>class ScanWhitelist(ACLTarget):\n    \"\"\"\n    A collection of BBOT events that represent a scan's whitelist.\n    \"\"\"\n\n    pass\n</code></pre>"},{"location":"dev/target/#bbot.scanner.target.ScanBlacklist","title":"ScanBlacklist","text":"<p>               Bases: <code>ACLTarget</code></p> <p>A collection of BBOT events that represent a scan's blacklist.</p> Source code in <code>bbot/scanner/target.py</code> <pre><code>class ScanBlacklist(ACLTarget):\n    \"\"\"\n    A collection of BBOT events that represent a scan's blacklist.\n    \"\"\"\n\n    def __init__(self, *args, **kwargs):\n        self.blacklist_regexes = set()\n        super().__init__(*args, **kwargs)\n\n    @special_target_type(r\"^(?:RE|REGEX):(.*)\")\n    def handle_regex(self, match):\n        pattern = match.group(1)\n        log.info(f\"Blacklisting by custom regex: {pattern}\")\n        blacklist_regex = re.compile(pattern, re.IGNORECASE)\n        self.blacklist_regexes.add(blacklist_regex)\n        return []\n\n    def get(self, event, **kwargs):\n        \"\"\"\n        Here, for the blacklist, we modify this method to also consider any special regex patterns specified by the user\n        \"\"\"\n        event = self.make_event(event)\n        # first, check event's host against blacklist\n        try:\n            event_result = super().get(event, raise_error=True)\n        except KeyError:\n            event_result = None\n        if event_result is not None:\n            return event_result\n        # next, check event's host against regexes\n        host_or_url = event.host_filterable\n        if host_or_url:\n            for regex in self.blacklist_regexes:\n                if regex.search(str(host_or_url)):\n                    return event\n        if kwargs.get(\"raise_error\", False):\n            raise KeyError(f\"Host not found: '{event.data}'\")\n        return None\n\n    def _hash_value(self):\n        # regexes are included in blacklist hash\n        regex_patterns = [str(r.pattern).encode() for r in self.blacklist_regexes]\n        hosts = [str(h).encode() for h in self.sorted_hosts]\n        return hosts + regex_patterns\n</code></pre>"},{"location":"dev/target/#bbot.scanner.target.ScanBlacklist.get","title":"get","text":"<pre><code>get(event, **kwargs)\n</code></pre> <p>Here, for the blacklist, we modify this method to also consider any special regex patterns specified by the user</p> Source code in <code>bbot/scanner/target.py</code> <pre><code>def get(self, event, **kwargs):\n    \"\"\"\n    Here, for the blacklist, we modify this method to also consider any special regex patterns specified by the user\n    \"\"\"\n    event = self.make_event(event)\n    # first, check event's host against blacklist\n    try:\n        event_result = super().get(event, raise_error=True)\n    except KeyError:\n        event_result = None\n    if event_result is not None:\n        return event_result\n    # next, check event's host against regexes\n    host_or_url = event.host_filterable\n    if host_or_url:\n        for regex in self.blacklist_regexes:\n            if regex.search(str(host_or_url)):\n                return event\n    if kwargs.get(\"raise_error\", False):\n        raise KeyError(f\"Host not found: '{event.data}'\")\n    return None\n</code></pre>"},{"location":"dev/target/#bbot.scanner.target.BBOTTarget","title":"BBOTTarget","text":"A convenient abstraction of a scan target that contains three subtargets <ul> <li>seeds</li> <li>whitelist</li> <li>blacklist</li> </ul> <p>Provides high-level functions like in_scope(), which includes both whitelist and blacklist checks.</p> Source code in <code>bbot/scanner/target.py</code> <pre><code>class BBOTTarget:\n    \"\"\"\n    A convenient abstraction of a scan target that contains three subtargets:\n        - seeds\n        - whitelist\n        - blacklist\n\n    Provides high-level functions like in_scope(), which includes both whitelist and blacklist checks.\n    \"\"\"\n\n    def __init__(self, *seeds, whitelist=None, blacklist=None, strict_scope=False, scan=None):\n        self.scan = scan\n        self.strict_scope = strict_scope\n        self.seeds = ScanSeeds(*seeds, strict_dns_scope=strict_scope, scan=scan)\n        if whitelist is None:\n            whitelist = self.seeds.hosts\n        self.whitelist = ScanWhitelist(*whitelist, strict_dns_scope=strict_scope, scan=scan)\n        if blacklist is None:\n            blacklist = []\n        self.blacklist = ScanBlacklist(*blacklist, scan=scan)\n\n    @property\n    def json(self):\n        return {\n            \"seeds\": sorted([e.data for e in self.seeds]),\n            \"whitelist\": sorted([e.data for e in self.whitelist]),\n            \"blacklist\": sorted([e.data for e in self.blacklist]),\n            \"strict_scope\": self.strict_scope,\n            \"hash\": self.hash.hex(),\n            \"seed_hash\": self.seeds.hash.hex(),\n            \"whitelist_hash\": self.whitelist.hash.hex(),\n            \"blacklist_hash\": self.blacklist.hash.hex(),\n            \"scope_hash\": self.scope_hash.hex(),\n        }\n\n    @property\n    def hash(self):\n        sha1_hash = sha1()\n        for target_hash in [t.hash for t in (self.seeds, self.whitelist, self.blacklist)]:\n            sha1_hash.update(target_hash)\n        return sha1_hash.digest()\n\n    @property\n    def scope_hash(self):\n        sha1_hash = sha1()\n        # Consider only the hash values of the whitelist and blacklist\n        for target_hash in [t.hash for t in (self.whitelist, self.blacklist)]:\n            sha1_hash.update(target_hash)\n        return sha1_hash.digest()\n\n    def in_scope(self, host):\n        \"\"\"\n        Check whether a hostname, url, IP, etc. is in scope.\n        Accepts either events or string data.\n\n        Checks whitelist and blacklist.\n        If `host` is an event and its scope distance is zero, it will automatically be considered in-scope.\n\n        Examples:\n            Check if a URL is in scope:\n            &gt;&gt;&gt; preset.in_scope(\"http://www.evilcorp.com\")\n            True\n        \"\"\"\n        try:\n            e = make_event(host, dummy=True)\n        except ValidationError:\n            return False\n        in_scope = e.scope_distance == 0 or self.whitelisted(e)\n        return in_scope and not self.blacklisted(e)\n\n    def blacklisted(self, host):\n        \"\"\"\n        Check whether a hostname, url, IP, etc. is blacklisted.\n\n        Note that `host` can be a hostname, IP address, CIDR, email address, or any BBOT `Event` with the `host` attribute.\n\n        Args:\n            host (str or IPAddress or Event): The host to check against the blacklist\n\n        Examples:\n            Check if a URL's host is blacklisted:\n            &gt;&gt;&gt; preset.blacklisted(\"http://www.evilcorp.com\")\n            True\n        \"\"\"\n        return host in self.blacklist\n\n    def whitelisted(self, host):\n        \"\"\"\n        Check whether a hostname, url, IP, etc. is whitelisted.\n\n        Note that `host` can be a hostname, IP address, CIDR, email address, or any BBOT `Event` with the `host` attribute.\n\n        Args:\n            host (str or IPAddress or Event): The host to check against the whitelist\n\n        Examples:\n            Check if a URL's host is whitelisted:\n            &gt;&gt;&gt; preset.whitelisted(\"http://www.evilcorp.com\")\n            True\n        \"\"\"\n        return host in self.whitelist\n\n    @property\n    def minimal(self):\n        \"\"\"\n        A slimmer, serializable version of the target designed for simple scope checks\n\n        This version doesn't have the events, only their hosts. This allows it to be passed across process boundaries.\n        \"\"\"\n        return self.__class__(\n            whitelist=self.whitelist.inputs,\n            blacklist=self.blacklist.inputs,\n            strict_scope=self.strict_scope,\n        )\n\n    def __eq__(self, other):\n        return self.hash == other.hash\n</code></pre>"},{"location":"dev/target/#bbot.scanner.target.BBOTTarget.minimal","title":"minimal  <code>property</code>","text":"<pre><code>minimal\n</code></pre> <p>A slimmer, serializable version of the target designed for simple scope checks</p> <p>This version doesn't have the events, only their hosts. This allows it to be passed across process boundaries.</p>"},{"location":"dev/target/#bbot.scanner.target.BBOTTarget.blacklisted","title":"blacklisted","text":"<pre><code>blacklisted(host)\n</code></pre> <p>Check whether a hostname, url, IP, etc. is blacklisted.</p> <p>Note that <code>host</code> can be a hostname, IP address, CIDR, email address, or any BBOT <code>Event</code> with the <code>host</code> attribute.</p> <p>Parameters:</p> <ul> <li> <code>host</code>               (<code>str or IPAddress or Event</code>)           \u2013            <p>The host to check against the blacklist</p> </li> </ul> <p>Examples:</p> <p>Check if a URL's host is blacklisted:</p> <pre><code>&gt;&gt;&gt; preset.blacklisted(\"http://www.evilcorp.com\")\nTrue\n</code></pre> Source code in <code>bbot/scanner/target.py</code> <pre><code>def blacklisted(self, host):\n    \"\"\"\n    Check whether a hostname, url, IP, etc. is blacklisted.\n\n    Note that `host` can be a hostname, IP address, CIDR, email address, or any BBOT `Event` with the `host` attribute.\n\n    Args:\n        host (str or IPAddress or Event): The host to check against the blacklist\n\n    Examples:\n        Check if a URL's host is blacklisted:\n        &gt;&gt;&gt; preset.blacklisted(\"http://www.evilcorp.com\")\n        True\n    \"\"\"\n    return host in self.blacklist\n</code></pre>"},{"location":"dev/target/#bbot.scanner.target.BBOTTarget.in_scope","title":"in_scope","text":"<pre><code>in_scope(host)\n</code></pre> <p>Check whether a hostname, url, IP, etc. is in scope. Accepts either events or string data.</p> <p>Checks whitelist and blacklist. If <code>host</code> is an event and its scope distance is zero, it will automatically be considered in-scope.</p> <p>Examples:</p> <p>Check if a URL is in scope:</p> <pre><code>&gt;&gt;&gt; preset.in_scope(\"http://www.evilcorp.com\")\nTrue\n</code></pre> Source code in <code>bbot/scanner/target.py</code> <pre><code>def in_scope(self, host):\n    \"\"\"\n    Check whether a hostname, url, IP, etc. is in scope.\n    Accepts either events or string data.\n\n    Checks whitelist and blacklist.\n    If `host` is an event and its scope distance is zero, it will automatically be considered in-scope.\n\n    Examples:\n        Check if a URL is in scope:\n        &gt;&gt;&gt; preset.in_scope(\"http://www.evilcorp.com\")\n        True\n    \"\"\"\n    try:\n        e = make_event(host, dummy=True)\n    except ValidationError:\n        return False\n    in_scope = e.scope_distance == 0 or self.whitelisted(e)\n    return in_scope and not self.blacklisted(e)\n</code></pre>"},{"location":"dev/target/#bbot.scanner.target.BBOTTarget.whitelisted","title":"whitelisted","text":"<pre><code>whitelisted(host)\n</code></pre> <p>Check whether a hostname, url, IP, etc. is whitelisted.</p> <p>Note that <code>host</code> can be a hostname, IP address, CIDR, email address, or any BBOT <code>Event</code> with the <code>host</code> attribute.</p> <p>Parameters:</p> <ul> <li> <code>host</code>               (<code>str or IPAddress or Event</code>)           \u2013            <p>The host to check against the whitelist</p> </li> </ul> <p>Examples:</p> <p>Check if a URL's host is whitelisted:</p> <pre><code>&gt;&gt;&gt; preset.whitelisted(\"http://www.evilcorp.com\")\nTrue\n</code></pre> Source code in <code>bbot/scanner/target.py</code> <pre><code>def whitelisted(self, host):\n    \"\"\"\n    Check whether a hostname, url, IP, etc. is whitelisted.\n\n    Note that `host` can be a hostname, IP address, CIDR, email address, or any BBOT `Event` with the `host` attribute.\n\n    Args:\n        host (str or IPAddress or Event): The host to check against the whitelist\n\n    Examples:\n        Check if a URL's host is whitelisted:\n        &gt;&gt;&gt; preset.whitelisted(\"http://www.evilcorp.com\")\n        True\n    \"\"\"\n    return host in self.whitelist\n</code></pre>"},{"location":"dev/tests/","title":"Unit Tests","text":"<p>BBOT takes tests seriously. Every module must have a custom-written test that actually tests its functionality. Don't worry if you want to contribute but you aren't used to writing tests. If you open a draft PR, we will help write them :)</p> <p>We use black and flake8 for linting, and pytest for tests.</p>"},{"location":"dev/tests/#running-tests-locally","title":"Running tests locally","text":"<p>We have Github actions that automatically run tests whenever you open a Pull Request. However, you can also run the tests locally with <code>pytest</code>:</p> <pre><code># format code with black\npoetry run black .\n\n# lint with flake8\npoetry run flake8\n\n# run all tests with pytest (takes rougly 30 minutes)\npoetry run pytest\n</code></pre>"},{"location":"dev/tests/#running-specific-tests","title":"Running specific tests","text":"<p>If you only want to run a single test, you can select it with <code>-k</code>:</p> <pre><code># run only the sslcert test\npoetry run pytest -k test_module_sslcert\n</code></pre> <p>You can also filter like this: <pre><code># run all the module tests except for sslcert\npoetry run pytest -k \"test_module_ and not test_module_sslcert\"\n</code></pre></p> <p>If you want to see the output of your module, you can enable <code>--log-cli-level</code>: <pre><code>poetry run pytest --log-cli-level=DEBUG\n</code></pre></p>"},{"location":"dev/tests/#example-writing-a-module-test","title":"Example: Writing a Module Test","text":"<p>To write a test for your module, create a new python file in <code>bbot/test/test_step_2/module_tests</code>. Your filename must be <code>test_module_&lt;module_name&gt;</code>:</p> test_module_mymodule.py<pre><code>from .base import ModuleTestBase\n\n\nclass TestMyModule(ModuleTestBase):\n    targets = [\"blacklanternsecurity.com\"]\n    config_overrides = {\"modules\": {\"mymodule\": {\"api_key\": \"deadbeef\"}}}\n\n    async def setup_after_prep(self, module_test):\n        # mock HTTP response\n        module_test.httpx_mock.add_response(\n            url=\"https://api.com/sudomains?apikey=deadbeef&amp;domain=blacklanternsecurity.com\",\n            json={\n                \"subdomains\": [\n                    \"www.blacklanternsecurity.com\",\n                    \"dev.blacklanternsecurity.com\"\n                ],\n            },\n        )\n        # mock DNS\n        await module_test.mock_dns(\n            {\n                \"blacklanternsecurity.com\": {\"A\": [\"1.2.3.4\"]},\n                \"www.blacklanternsecurity.com\": {\"A\": [\"1.2.3.4\"]},\n                \"dev.blacklanternsecurity.com\": {\"A\": [\"1.2.3.4\"]},\n            }\n        )\n\n    def check(self, module_test, events):\n        # here is where we check to make sure it worked\n        dns_names = [e.data for e in events if e.type == \"DNS_NAME\"]\n        # temporary log messages for debugging\n        for e in dns_names:\n            self.log.critical(e)\n        assert \"www.blacklanternsecurity.com\" in dns_names, \"failed to find subdomain #1\"\n        assert \"dev.blacklanternsecurity.com\" in dns_names, \"failed to find subdomain #2\"\n</code></pre>"},{"location":"dev/tests/#debugging-a-test","title":"Debugging a test","text":"<p>Similar to debugging from within a module, you can debug from within a test using <code>self.log.critical()</code>, etc:</p> <pre><code>    def check(self, module_test, events):\n        for e in events:\n            # bright red\n            self.log.critical(e.type)\n            # bright green\n            self.log.hugesuccess(e.data)\n            # bright orange\n            self.log.hugewarning(e.tags)\n            # bright blue\n            self.log.hugeinfo(e.parent)\n</code></pre>"},{"location":"dev/tests/#more-advanced-tests","title":"More advanced tests","text":"<p>If you have questions about tests or need to write a more advanced test, come talk to us on GitHub or Discord.</p> <p>It's also a good idea to look through our existing tests. BBOT has over a hundred of them, so you might find one that's similar to what you're trying to do.</p>"},{"location":"dev/helpers/","title":"BBOT Helpers","text":"<p>In this section are various helper functions that are designed to make your life easier when devving on BBOT. Whether you're extending BBOT by writing a module or working on its core engine, these functions are designed to act as useful machine parts to perform essential tasks, such as making a web request or executing a DNS query.</p> <p>The vast majority of these helpers can be accessed directly from the <code>.helpers</code> attribute of a scan or module, like so:</p> <pre><code>class MyModule(BaseModule):\n\n    ...\n\n    async def handle_event(self, event):\n        # Web Request\n        response = await self.helpers.request(\"https://www.evilcorp.com\")\n\n        # DNS query\n        for ip in await self.helpers.resolve(\"www.evilcorp.com\"):\n            self.hugesuccess(str(ip))\n\n        # Execute shell command\n        completed_process = await self.run_process(\"ls\", \"-l\")\n        self.hugesuccess(completed_process.stdout)\n\n        # Split a DNS name into subdomain / domain\n        self.helpers.split_domain(\"www.internal.evilcorp.co.uk\")\n        # (\"www.internal\", \"evilcorp.co.uk\")\n</code></pre> <p>Next Up: Command Helpers --&gt;</p>"},{"location":"dev/helpers/command/","title":"Command Helpers","text":"<p>These are helpers related to executing shell commands. They are used throughout BBOT and its modules for executing various binaries such as <code>masscan</code>, <code>nuclei</code>, etc.</p> <p>These helpers can be invoked directly from <code>self.helpers</code>, but inside a module they should always use <code>self.run_process()</code> or <code>self.run_process_live()</code>. These are light wrappers which ensure the running process is tracked by the module so that it can be easily terminated should the user need to kill the module:</p> <pre><code># simple subprocess\nls_result = await self.run_process(\"ls\", \"-l\")\nfor line ls_result.stdout.splitlines():\n    # ...\n\n# iterate through each line in real time\nasync for line in self.run_process_live([\"grep\", \"-R\"]):\n    # ...\n</code></pre>"},{"location":"dev/helpers/command/#bbot.core.helpers.command.run","title":"run  <code>async</code>","text":"<pre><code>run(self, *command, check=False, text=True, idle_timeout=None, **kwargs)\n</code></pre> <p>Runs a command asynchronously and gets its output as a string.</p> <pre><code>This method is a simple helper for executing a command and capturing its output.\nIf an error occurs during execution, it can optionally raise an error or just log the stderr.\n\nArgs:\n    *command (str): The command to run as separate arguments.\n    check (bool, optional): If set to True, raises an error if the subprocess exits with a non-zero status.\n                            Defaults to False.\n    text (bool, optional): If set to True, decodes the subprocess output to string. Defaults to True.\n    idle_timeout (int, optional): Sets a limit on the number of seconds the process can run before throwing a TimeoutError\n    **kwargs (dict): Additional keyword arguments for the subprocess.\n\nReturns:\n    CompletedProcess: A completed process object with attributes for the command, return code, stdout, and stderr.\n\nRaises:\n    CalledProcessError: If the subprocess exits with a non-zero status and `check=True`.\n\nExamples:\n    &gt;&gt;&gt; process = await run([\"ls\", \"/tmp\"])\n    &gt;&gt;&gt; process.stdout\n    \"file1.txt\n</code></pre> <p>file2.txt\"</p> Source code in <code>bbot/core/helpers/command.py</code> <pre><code>async def run(self, *command, check=False, text=True, idle_timeout=None, **kwargs):\n    \"\"\"Runs a command asynchronously and gets its output as a string.\n\n    This method is a simple helper for executing a command and capturing its output.\n    If an error occurs during execution, it can optionally raise an error or just log the stderr.\n\n    Args:\n        *command (str): The command to run as separate arguments.\n        check (bool, optional): If set to True, raises an error if the subprocess exits with a non-zero status.\n                                Defaults to False.\n        text (bool, optional): If set to True, decodes the subprocess output to string. Defaults to True.\n        idle_timeout (int, optional): Sets a limit on the number of seconds the process can run before throwing a TimeoutError\n        **kwargs (dict): Additional keyword arguments for the subprocess.\n\n    Returns:\n        CompletedProcess: A completed process object with attributes for the command, return code, stdout, and stderr.\n\n    Raises:\n        CalledProcessError: If the subprocess exits with a non-zero status and `check=True`.\n\n    Examples:\n        &gt;&gt;&gt; process = await run([\"ls\", \"/tmp\"])\n        &gt;&gt;&gt; process.stdout\n        \"file1.txt\\nfile2.txt\"\n    \"\"\"\n    # proc_tracker optionally keeps track of which processes are running under which modules\n    # this allows for graceful SIGINTing of a module's processes in the case when it's killed\n    proc_tracker = kwargs.pop(\"_proc_tracker\", set())\n    log_stderr = kwargs.pop(\"_log_stderr\", True)\n    proc, _input, command = await self._spawn_proc(*command, **kwargs)\n    if proc is not None:\n        proc_tracker.add(proc)\n        try:\n            if _input is not None:\n                if isinstance(_input, (list, tuple)):\n                    _input = b\"\\n\".join(smart_encode(i) for i in _input) + b\"\\n\"\n                else:\n                    _input = smart_encode(_input)\n\n            try:\n                if idle_timeout is not None:\n                    stdout, stderr = await asyncio.wait_for(proc.communicate(_input), timeout=idle_timeout)\n                else:\n                    stdout, stderr = await proc.communicate(_input)\n            except asyncio.exceptions.TimeoutError:\n                proc.send_signal(SIGINT)\n                raise\n\n            # surface stderr\n            if text:\n                if stderr is not None:\n                    stderr = smart_decode(stderr)\n                if stdout is not None:\n                    stdout = smart_decode(stdout)\n            if proc.returncode:\n                if check:\n                    raise CalledProcessError(proc.returncode, command, output=stdout, stderr=stderr)\n                if stderr and log_stderr:\n                    command_str = \" \".join(command)\n                    log.warning(f\"Stderr for run({command_str}):\\n\\t{stderr}\")\n\n            return CompletedProcess(command, proc.returncode, stdout, stderr)\n        finally:\n            proc_tracker.remove(proc)\n</code></pre>"},{"location":"dev/helpers/command/#bbot.core.helpers.command.run_live","title":"run_live  <code>async</code>","text":"<pre><code>run_live(self, *command, check=False, text=True, idle_timeout=None, **kwargs)\n</code></pre> <p>Runs a command asynchronously and iterates through its output line by line in realtime.</p> <p>This method is useful for executing a command and capturing its output on-the-fly, as it is generated. If an error occurs during execution, it can optionally raise an error or just log the stderr.</p> <p>Parameters:</p> <ul> <li> <code>*command</code>               (<code>str</code>, default:                   <code>()</code> )           \u2013            <p>The command to run as separate arguments.</p> </li> <li> <code>check</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>If set to True, raises an error if the subprocess exits with a non-zero status.                     Defaults to False.</p> </li> <li> <code>text</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>If set to True, decodes the subprocess output to string. Defaults to True.</p> </li> <li> <code>idle_timeout</code>               (<code>int</code>, default:                   <code>None</code> )           \u2013            <p>Sets a limit on the number of seconds the process can remain idle (no lines sent to stdout) before throwing a TimeoutError</p> </li> <li> <code>**kwargs</code>               (<code>dict</code>, default:                   <code>{}</code> )           \u2013            <p>Additional keyword arguments for the subprocess.</p> </li> </ul> <p>Yields:</p> <ul> <li>           \u2013            <p>str or bytes: The output lines of the command, either as a decoded string (if <code>text=True</code>)           or as bytes (if <code>text=False</code>).</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>CalledProcessError</code>             \u2013            <p>If the subprocess exits with a non-zero status and <code>check=True</code>.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; async for line in run_live([\"tail\", \"-f\", \"/var/log/auth.log\"]):\n...     log.info(line)\n</code></pre> Source code in <code>bbot/core/helpers/command.py</code> <pre><code>async def run_live(self, *command, check=False, text=True, idle_timeout=None, **kwargs):\n    \"\"\"Runs a command asynchronously and iterates through its output line by line in realtime.\n\n    This method is useful for executing a command and capturing its output on-the-fly, as it is generated.\n    If an error occurs during execution, it can optionally raise an error or just log the stderr.\n\n    Args:\n        *command (str): The command to run as separate arguments.\n        check (bool, optional): If set to True, raises an error if the subprocess exits with a non-zero status.\n                                Defaults to False.\n        text (bool, optional): If set to True, decodes the subprocess output to string. Defaults to True.\n        idle_timeout (int, optional): Sets a limit on the number of seconds the process can remain idle (no lines sent to stdout) before throwing a TimeoutError\n        **kwargs (dict): Additional keyword arguments for the subprocess.\n\n    Yields:\n        str or bytes: The output lines of the command, either as a decoded string (if `text=True`)\n                      or as bytes (if `text=False`).\n\n    Raises:\n        CalledProcessError: If the subprocess exits with a non-zero status and `check=True`.\n\n    Examples:\n        &gt;&gt;&gt; async for line in run_live([\"tail\", \"-f\", \"/var/log/auth.log\"]):\n        ...     log.info(line)\n    \"\"\"\n    # proc_tracker optionally keeps track of which processes are running under which modules\n    # this allows for graceful SIGINTing of a module's processes in the case when it's killed\n    proc_tracker = kwargs.pop(\"_proc_tracker\", set())\n    log_stderr = kwargs.pop(\"_log_stderr\", True)\n    proc, _input, command = await self._spawn_proc(*command, **kwargs)\n    if proc is not None:\n        proc_tracker.add(proc)\n        try:\n            input_task = None\n            if _input is not None:\n                input_task = asyncio.create_task(_write_stdin(proc, _input))\n\n            while 1:\n                try:\n                    if idle_timeout is not None:\n                        line = await asyncio.wait_for(proc.stdout.readline(), timeout=idle_timeout)\n                    else:\n                        line = await proc.stdout.readline()\n                except asyncio.exceptions.TimeoutError:\n                    proc.send_signal(SIGINT)\n                    raise\n                except ValueError as e:\n                    command_str = \" \".join([str(c) for c in command])\n                    log.warning(f\"Error executing command {command_str}: {e}\")\n                    log.trace(traceback.format_exc())\n                    continue\n                if not line:\n                    break\n                if text:\n                    line = smart_decode(line).rstrip(\"\\r\\n\")\n                else:\n                    line = line.rstrip(b\"\\r\\n\")\n                yield line\n\n            if input_task is not None:\n                try:\n                    await input_task\n                except ConnectionError:\n                    log.trace(f\"ConnectionError in command: {command}, kwargs={kwargs}\")\n                    log.trace(traceback.format_exc())\n            await proc.wait()\n\n            if proc.returncode:\n                stdout, stderr = await proc.communicate()\n                if text:\n                    if stderr is not None:\n                        stderr = smart_decode(stderr)\n                    if stdout is not None:\n                        stdout = smart_decode(stdout)\n                if check:\n                    raise CalledProcessError(proc.returncode, command, output=stdout, stderr=stderr)\n                # surface stderr\n                if stderr and log_stderr:\n                    command_str = \" \".join(command)\n                    log.warning(f\"Stderr for run_live({command_str}):\\n\\t{stderr}\")\n        finally:\n            proc_tracker.remove(proc)\n</code></pre>"},{"location":"dev/helpers/dns/","title":"DNS","text":"<p>These are helpers related to DNS resolution. They are used throughout BBOT and its modules for performing DNS lookups and detecting DNS wildcards, etc.</p> <p>Note that these helpers can be invoked directly from <code>self.helpers</code>, e.g.:</p> <pre><code>self.helpers.resolve(\"evilcorp.com\")\n</code></pre>"},{"location":"dev/helpers/dns/#bbot.core.helpers.dns.DNSHelper","title":"DNSHelper","text":"<p>               Bases: <code>EngineClient</code></p> Source code in <code>bbot/core/helpers/dns/dns.py</code> <pre><code>class DNSHelper(EngineClient):\n\n    SERVER_CLASS = DNSEngine\n    ERROR_CLASS = DNSError\n\n    \"\"\"Helper class for DNS-related operations within BBOT.\n\n    This class provides mechanisms for host resolution, wildcard domain detection, event tagging, and more.\n    It centralizes all DNS-related activities in BBOT, offering both synchronous and asynchronous methods\n    for DNS resolution, as well as various utilities for batch resolution and DNS query filtering.\n\n    Attributes:\n        parent_helper: A reference to the instantiated `ConfigAwareHelper` (typically `scan.helpers`).\n        resolver (BBOTAsyncResolver): An asynchronous DNS resolver tailored for BBOT with rate-limiting capabilities.\n        timeout (int): The timeout value for DNS queries. Defaults to 5 seconds.\n        retries (int): The number of retries for failed DNS queries. Defaults to 1.\n        abort_threshold (int): The threshold for aborting after consecutive failed queries. Defaults to 50.\n        runaway_limit (int): Maximum allowed distance for consecutive DNS resolutions. Defaults to 5.\n        all_rdtypes (list): A list of DNS record types to be considered during operations.\n        wildcard_ignore (tuple): Domains to be ignored during wildcard detection.\n        wildcard_tests (int): Number of tests to be run for wildcard detection. Defaults to 5.\n        _wildcard_cache (dict): Cache for wildcard detection results.\n        _dns_cache (LRUCache): Cache for DNS resolution results, limited in size.\n        resolver_file (Path): File containing system's current resolver nameservers.\n        filter_bad_ptrs (bool): Whether to filter out DNS names that appear to be auto-generated PTR records. Defaults to True.\n\n    Args:\n        parent_helper: The parent helper object with configuration details and utilities.\n\n    Raises:\n        DNSError: If an issue arises when creating the BBOTAsyncResolver instance.\n\n    Examples:\n        &gt;&gt;&gt; dns_helper = DNSHelper(parent_config)\n        &gt;&gt;&gt; resolved_host = dns_helper.resolver.resolve(\"example.com\")\n    \"\"\"\n\n    def __init__(self, parent_helper):\n        self.parent_helper = parent_helper\n        self.config = self.parent_helper.config\n        self.dns_config = self.config.get(\"dns\", {})\n        engine_debug = self.config.get(\"engine\", {}).get(\"debug\", False)\n        super().__init__(server_kwargs={\"config\": self.config}, debug=engine_debug)\n\n        # resolver\n        self.timeout = self.dns_config.get(\"timeout\", 5)\n        self.resolver = dns.asyncresolver.Resolver()\n        self.resolver.rotate = True\n        self.resolver.timeout = self.timeout\n        self.resolver.lifetime = self.timeout\n\n        self.runaway_limit = self.dns_config.get(\"runaway_limit\", 5)\n\n        # wildcard handling\n        self.wildcard_disable = self.dns_config.get(\"wildcard_disable\", False)\n        self.wildcard_ignore = RadixTarget()\n        for d in self.dns_config.get(\"wildcard_ignore\", []):\n            self.wildcard_ignore.insert(d)\n\n        # copy the system's current resolvers to a text file for tool use\n        self.system_resolvers = dns.resolver.Resolver().nameservers\n        # TODO: DNS server speed test (start in background task)\n        self.resolver_file = self.parent_helper.tempfile(self.system_resolvers, pipe=False)\n\n        # brute force helper\n        self._brute = None\n\n        self._is_wildcard_cache = LFUCache(maxsize=1000)\n        self._is_wildcard_domain_cache = LFUCache(maxsize=1000)\n\n    async def resolve(self, query, **kwargs):\n        return await self.run_and_return(\"resolve\", query=query, **kwargs)\n\n    async def resolve_raw(self, query, **kwargs):\n        return await self.run_and_return(\"resolve_raw\", query=query, **kwargs)\n\n    async def resolve_batch(self, queries, **kwargs):\n        agen = self.run_and_yield(\"resolve_batch\", queries=queries, **kwargs)\n        while 1:\n            try:\n                yield await agen.__anext__()\n            except (StopAsyncIteration, GeneratorExit):\n                await agen.aclose()\n                break\n\n    async def resolve_raw_batch(self, queries):\n        agen = self.run_and_yield(\"resolve_raw_batch\", queries=queries)\n        while 1:\n            try:\n                yield await agen.__anext__()\n            except (StopAsyncIteration, GeneratorExit):\n                await agen.aclose()\n                break\n\n    @property\n    def brute(self):\n        if self._brute is None:\n            from .brute import DNSBrute\n\n            self._brute = DNSBrute(self.parent_helper)\n        return self._brute\n\n    @async_cachedmethod(\n        lambda self: self._is_wildcard_cache,\n        key=lambda query, rdtypes, raw_dns_records: (query, tuple(sorted(rdtypes)), bool(raw_dns_records)),\n    )\n    async def is_wildcard(self, query, rdtypes, raw_dns_records=None):\n        \"\"\"\n        Use this method to check whether a *host* is a wildcard entry\n\n        This can reliably tell the difference between a valid DNS record and a wildcard within a wildcard domain.\n\n        If you want to know whether a domain is using wildcard DNS, use `is_wildcard_domain()` instead.\n\n        Args:\n            query (str): The hostname to check for a wildcard entry.\n            ips (list, optional): List of IPs to compare against, typically obtained from a previous DNS resolution of the query.\n            rdtype (str, optional): The DNS record type (e.g., \"A\", \"AAAA\") to consider during the check.\n\n        Returns:\n            dict: A dictionary indicating if the query is a wildcard for each checked DNS record type.\n                Keys are DNS record types like \"A\", \"AAAA\", etc.\n                Values are tuples where the first element is a boolean indicating if the query is a wildcard,\n                and the second element is the wildcard parent if it's a wildcard.\n\n        Raises:\n            ValueError: If only one of `ips` or `rdtype` is specified or if no valid IPs are specified.\n\n        Examples:\n            &gt;&gt;&gt; is_wildcard(\"www.github.io\")\n            {\"A\": (True, \"github.io\"), \"AAAA\": (True, \"github.io\")}\n\n            &gt;&gt;&gt; is_wildcard(\"www.evilcorp.com\", ips=[\"93.184.216.34\"], rdtype=\"A\")\n            {\"A\": (False, \"evilcorp.com\")}\n\n        Note:\n            `is_wildcard` can be True, False, or None (indicating that wildcard detection was inconclusive)\n        \"\"\"\n        query = self._wildcard_prevalidation(query)\n        if not query:\n            return {}\n\n        # skip check if the query is a domain\n        if is_domain(query):\n            return {}\n\n        return await self.run_and_return(\"is_wildcard\", query=query, rdtypes=rdtypes, raw_dns_records=raw_dns_records)\n\n    @async_cachedmethod(\n        lambda self: self._is_wildcard_domain_cache, key=lambda domain, rdtypes: (domain, tuple(sorted(rdtypes)))\n    )\n    async def is_wildcard_domain(self, domain, rdtypes):\n        domain = self._wildcard_prevalidation(domain)\n        if not domain:\n            return {}\n\n        return await self.run_and_return(\"is_wildcard_domain\", domain=domain, rdtypes=rdtypes)\n\n    def _wildcard_prevalidation(self, host):\n        if self.wildcard_disable:\n            return False\n\n        host = clean_dns_record(host)\n        # skip check if it's an IP or a plain hostname\n        if is_ip(host) or not \".\" in host:\n            return False\n\n        # skip if query isn't a dns name\n        if not is_dns_name(host):\n            return False\n\n        # skip check if the query's parent domain is excluded in the config\n        wildcard_ignore = self.wildcard_ignore.search(host)\n        if wildcard_ignore:\n            log.debug(f\"Skipping wildcard detection on {host} because {wildcard_ignore} is excluded in the config\")\n            return False\n\n        return host\n\n    async def _mock_dns(self, mock_data, custom_lookup_fn=None):\n        from .mock import MockResolver\n\n        self.resolver = MockResolver(mock_data, custom_lookup_fn=custom_lookup_fn)\n        await self.run_and_return(\"_mock_dns\", mock_data=mock_data, custom_lookup_fn=custom_lookup_fn)\n</code></pre>"},{"location":"dev/helpers/dns/#bbot.core.helpers.dns.DNSHelper.resolve","title":"resolve  <code>async</code>","text":"<pre><code>resolve(query, **kwargs)\n</code></pre> Source code in <code>bbot/core/helpers/dns/dns.py</code> <pre><code>async def resolve(self, query, **kwargs):\n    return await self.run_and_return(\"resolve\", query=query, **kwargs)\n</code></pre>"},{"location":"dev/helpers/dns/#bbot.core.helpers.dns.DNSHelper.resolve_batch","title":"resolve_batch  <code>async</code>","text":"<pre><code>resolve_batch(queries, **kwargs)\n</code></pre> Source code in <code>bbot/core/helpers/dns/dns.py</code> <pre><code>async def resolve_batch(self, queries, **kwargs):\n    agen = self.run_and_yield(\"resolve_batch\", queries=queries, **kwargs)\n    while 1:\n        try:\n            yield await agen.__anext__()\n        except (StopAsyncIteration, GeneratorExit):\n            await agen.aclose()\n            break\n</code></pre>"},{"location":"dev/helpers/dns/#bbot.core.helpers.dns.DNSHelper.resolve_raw","title":"resolve_raw  <code>async</code>","text":"<pre><code>resolve_raw(query, **kwargs)\n</code></pre> Source code in <code>bbot/core/helpers/dns/dns.py</code> <pre><code>async def resolve_raw(self, query, **kwargs):\n    return await self.run_and_return(\"resolve_raw\", query=query, **kwargs)\n</code></pre>"},{"location":"dev/helpers/dns/#bbot.core.helpers.dns.DNSHelper.is_wildcard","title":"is_wildcard  <code>async</code>","text":"<pre><code>is_wildcard(query, rdtypes, raw_dns_records=None)\n</code></pre> <p>Use this method to check whether a host is a wildcard entry</p> <p>This can reliably tell the difference between a valid DNS record and a wildcard within a wildcard domain.</p> <p>If you want to know whether a domain is using wildcard DNS, use <code>is_wildcard_domain()</code> instead.</p> <p>Parameters:</p> <ul> <li> <code>query</code>               (<code>str</code>)           \u2013            <p>The hostname to check for a wildcard entry.</p> </li> <li> <code>ips</code>               (<code>list</code>)           \u2013            <p>List of IPs to compare against, typically obtained from a previous DNS resolution of the query.</p> </li> <li> <code>rdtype</code>               (<code>str</code>)           \u2013            <p>The DNS record type (e.g., \"A\", \"AAAA\") to consider during the check.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>dict</code>          \u2013            <p>A dictionary indicating if the query is a wildcard for each checked DNS record type. Keys are DNS record types like \"A\", \"AAAA\", etc. Values are tuples where the first element is a boolean indicating if the query is a wildcard, and the second element is the wildcard parent if it's a wildcard.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>ValueError</code>             \u2013            <p>If only one of <code>ips</code> or <code>rdtype</code> is specified or if no valid IPs are specified.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; is_wildcard(\"www.github.io\")\n{\"A\": (True, \"github.io\"), \"AAAA\": (True, \"github.io\")}\n</code></pre> <pre><code>&gt;&gt;&gt; is_wildcard(\"www.evilcorp.com\", ips=[\"93.184.216.34\"], rdtype=\"A\")\n{\"A\": (False, \"evilcorp.com\")}\n</code></pre> Note <p><code>is_wildcard</code> can be True, False, or None (indicating that wildcard detection was inconclusive)</p> Source code in <code>bbot/core/helpers/dns/dns.py</code> <pre><code>@async_cachedmethod(\n    lambda self: self._is_wildcard_cache,\n    key=lambda query, rdtypes, raw_dns_records: (query, tuple(sorted(rdtypes)), bool(raw_dns_records)),\n)\nasync def is_wildcard(self, query, rdtypes, raw_dns_records=None):\n    \"\"\"\n    Use this method to check whether a *host* is a wildcard entry\n\n    This can reliably tell the difference between a valid DNS record and a wildcard within a wildcard domain.\n\n    If you want to know whether a domain is using wildcard DNS, use `is_wildcard_domain()` instead.\n\n    Args:\n        query (str): The hostname to check for a wildcard entry.\n        ips (list, optional): List of IPs to compare against, typically obtained from a previous DNS resolution of the query.\n        rdtype (str, optional): The DNS record type (e.g., \"A\", \"AAAA\") to consider during the check.\n\n    Returns:\n        dict: A dictionary indicating if the query is a wildcard for each checked DNS record type.\n            Keys are DNS record types like \"A\", \"AAAA\", etc.\n            Values are tuples where the first element is a boolean indicating if the query is a wildcard,\n            and the second element is the wildcard parent if it's a wildcard.\n\n    Raises:\n        ValueError: If only one of `ips` or `rdtype` is specified or if no valid IPs are specified.\n\n    Examples:\n        &gt;&gt;&gt; is_wildcard(\"www.github.io\")\n        {\"A\": (True, \"github.io\"), \"AAAA\": (True, \"github.io\")}\n\n        &gt;&gt;&gt; is_wildcard(\"www.evilcorp.com\", ips=[\"93.184.216.34\"], rdtype=\"A\")\n        {\"A\": (False, \"evilcorp.com\")}\n\n    Note:\n        `is_wildcard` can be True, False, or None (indicating that wildcard detection was inconclusive)\n    \"\"\"\n    query = self._wildcard_prevalidation(query)\n    if not query:\n        return {}\n\n    # skip check if the query is a domain\n    if is_domain(query):\n        return {}\n\n    return await self.run_and_return(\"is_wildcard\", query=query, rdtypes=rdtypes, raw_dns_records=raw_dns_records)\n</code></pre>"},{"location":"dev/helpers/dns/#bbot.core.helpers.dns.DNSHelper.is_wildcard_domain","title":"is_wildcard_domain  <code>async</code>","text":"<pre><code>is_wildcard_domain(domain, rdtypes)\n</code></pre> Source code in <code>bbot/core/helpers/dns/dns.py</code> <pre><code>@async_cachedmethod(\n    lambda self: self._is_wildcard_domain_cache, key=lambda domain, rdtypes: (domain, tuple(sorted(rdtypes)))\n)\nasync def is_wildcard_domain(self, domain, rdtypes):\n    domain = self._wildcard_prevalidation(domain)\n    if not domain:\n        return {}\n\n    return await self.run_and_return(\"is_wildcard_domain\", domain=domain, rdtypes=rdtypes)\n</code></pre>"},{"location":"dev/helpers/interactsh/","title":"Interact.sh","text":"<p>A pure python implementation of ProjectDiscovery's interact.sh.</p> <p>\"Interactsh is an open-source tool for detecting out-of-band interactions. It is a tool designed to detect vulnerabilities that cause external interactions.\"</p> <ul> <li>https://app.interactsh.com</li> <li>https://github.com/projectdiscovery/interactsh</li> </ul> <p>This class facilitates interactions with the interact.sh service for out-of-band data exfiltration and vulnerability confirmation. It allows for customization by accepting server and token parameters from the configuration provided by <code>parent_helper</code>.</p> <p>Attributes:</p> <ul> <li> <code>parent_helper</code>               (<code>ConfigAwareHelper</code>)           \u2013            <p>An instance of a helper class containing configuration data.</p> </li> <li> <code>server</code>               (<code>str</code>)           \u2013            <p>The server to be used. If None (the default), a random server will be chosen from a predetermined list.</p> </li> <li> <code>correlation_id</code>               (<code>str</code>)           \u2013            <p>An identifier to correlate requests and responses. Default is None.</p> </li> <li> <code>custom_server</code>               (<code>str</code>)           \u2013            <p>Optional. A custom interact.sh server. Loaded from configuration.</p> </li> <li> <code>token</code>               (<code>str</code>)           \u2013            <p>Optional. A token for interact.sh API. Loaded from configuration.</p> </li> <li> <code>_poll_task</code>               (<code>AsyncTask</code>)           \u2013            <p>The task responsible for polling the interact.sh server.</p> </li> </ul> <p>Examples:</p> <pre><code># instantiate interact.sh client (no requests are sent yet)\n&gt;&gt;&gt; interactsh_client = self.helpers.interactsh()\n# register with an interact.sh server\n&gt;&gt;&gt; interactsh_domain = await interactsh_client.register()\n[INFO] Registering with interact.sh server: oast.me\n[INFO] Successfully registered to interactsh server oast.me with correlation_id rg99x2f860h5466ou3so [rg99x2f860h5466ou3so86i07n1m3013k.oast.me]\n# simulate an out-of-band interaction\n&gt;&gt;&gt; await self.helpers.request(f\"https://{interactsh_domain}/test\")\n# wait for out-of-band interaction to be registered\n&gt;&gt;&gt; await asyncio.sleep(10)\n&gt;&gt;&gt; data_list = await interactsh_client.poll()\n&gt;&gt;&gt; print(data_list)\n[\n    {\n        \"protocol\": \"dns\",\n        \"unique-id\": \"rg99x2f860h5466ou3so86i07n1m3013k\",\n        \"full-id\": \"rg99x2f860h5466ou3so86i07n1m3013k\",\n        \"q-type\": \"A\",\n        \"raw-request\": \"...\",\n        \"remote-address\": \"1.2.3.4\",\n        \"timestamp\": \"2023-09-15T21:09:23.187226851Z\"\n    },\n    {\n        \"protocol\": \"http\",\n        \"unique-id\": \"rg99x2f860h5466ou3so86i07n1m3013k\",\n        \"full-id\": \"rg99x2f860h5466ou3so86i07n1m3013k\",\n        \"raw-request\": \"GET /test HTTP/1.1 ...\",\n        \"remote-address\": \"1.2.3.4\",\n        \"timestamp\": \"2023-09-15T21:09:24.155677967Z\"\n    }\n]\n# finally, shut down the client\n&gt;&gt;&gt; await interactsh_client.deregister()\n</code></pre> Source code in <code>bbot/core/helpers/interactsh.py</code> <pre><code>class Interactsh:\n    \"\"\"\n    A pure python implementation of ProjectDiscovery's interact.sh.\n\n    *\"Interactsh is an open-source tool for detecting out-of-band interactions. It is a tool designed to detect vulnerabilities that cause external interactions.\"*\n\n    - https://app.interactsh.com\n    - https://github.com/projectdiscovery/interactsh\n\n    This class facilitates interactions with the interact.sh service for\n    out-of-band data exfiltration and vulnerability confirmation. It allows\n    for customization by accepting server and token parameters from the\n    configuration provided by `parent_helper`.\n\n    Attributes:\n        parent_helper (ConfigAwareHelper): An instance of a helper class containing configuration data.\n        server (str): The server to be used. If None (the default), a random server will be chosen from a predetermined list.\n        correlation_id (str): An identifier to correlate requests and responses. Default is None.\n        custom_server (str): Optional. A custom interact.sh server. Loaded from configuration.\n        token (str): Optional. A token for interact.sh API. Loaded from configuration.\n        _poll_task (AsyncTask): The task responsible for polling the interact.sh server.\n\n    Examples:\n        ```python\n        # instantiate interact.sh client (no requests are sent yet)\n        &gt;&gt;&gt; interactsh_client = self.helpers.interactsh()\n        # register with an interact.sh server\n        &gt;&gt;&gt; interactsh_domain = await interactsh_client.register()\n        [INFO] Registering with interact.sh server: oast.me\n        [INFO] Successfully registered to interactsh server oast.me with correlation_id rg99x2f860h5466ou3so [rg99x2f860h5466ou3so86i07n1m3013k.oast.me]\n        # simulate an out-of-band interaction\n        &gt;&gt;&gt; await self.helpers.request(f\"https://{interactsh_domain}/test\")\n        # wait for out-of-band interaction to be registered\n        &gt;&gt;&gt; await asyncio.sleep(10)\n        &gt;&gt;&gt; data_list = await interactsh_client.poll()\n        &gt;&gt;&gt; print(data_list)\n        [\n            {\n                \"protocol\": \"dns\",\n                \"unique-id\": \"rg99x2f860h5466ou3so86i07n1m3013k\",\n                \"full-id\": \"rg99x2f860h5466ou3so86i07n1m3013k\",\n                \"q-type\": \"A\",\n                \"raw-request\": \"...\",\n                \"remote-address\": \"1.2.3.4\",\n                \"timestamp\": \"2023-09-15T21:09:23.187226851Z\"\n            },\n            {\n                \"protocol\": \"http\",\n                \"unique-id\": \"rg99x2f860h5466ou3so86i07n1m3013k\",\n                \"full-id\": \"rg99x2f860h5466ou3so86i07n1m3013k\",\n                \"raw-request\": \"GET /test HTTP/1.1 ...\",\n                \"remote-address\": \"1.2.3.4\",\n                \"timestamp\": \"2023-09-15T21:09:24.155677967Z\"\n            }\n        ]\n        # finally, shut down the client\n        &gt;&gt;&gt; await interactsh_client.deregister()\n        ```\n    \"\"\"\n\n    def __init__(self, parent_helper, poll_interval=10):\n        self.parent_helper = parent_helper\n        self.server = None\n        self.correlation_id = None\n        self.custom_server = self.parent_helper.config.get(\"interactsh_server\", None)\n        self.token = self.parent_helper.config.get(\"interactsh_token\", None)\n        self.poll_interval = poll_interval\n        self._poll_task = None\n\n    async def register(self, callback=None):\n        \"\"\"\n        Registers the instance with an interact.sh server and sets up polling.\n\n        Generates RSA keys for secure communication, builds a correlation ID,\n        and sends a POST request to an interact.sh server to register. Optionally,\n        starts an asynchronous polling task to listen for interactions.\n\n        Args:\n            callback (callable, optional): A function to be called each time new interactions are received.\n\n        Returns:\n            str: The registered domain for out-of-band interactions.\n\n        Raises:\n            InteractshError: If registration with an interact.sh server fails.\n\n        Examples:\n            &gt;&gt;&gt; interactsh_client = self.helpers.interactsh()\n            &gt;&gt;&gt; registered_domain = await interactsh_client.register()\n            [INFO] Registering with interact.sh server: oast.me\n            [INFO] Successfully registered to interactsh server oast.me with correlation_id rg99x2f860h5466ou3so [rg99x2f860h5466ou3so86i07n1m3013k.oast.me]\n        \"\"\"\n        rsa = RSA.generate(1024)\n\n        self.public_key = rsa.publickey().exportKey()\n        self.private_key = rsa.exportKey()\n\n        encoded_public_key = base64.b64encode(self.public_key).decode(\"utf8\")\n\n        uuid = uuid4().hex.ljust(33, \"a\")\n        guid = \"\".join(i if i.isdigit() else chr(ord(i) + random.randint(0, 20)) for i in uuid)\n\n        self.correlation_id = guid[:20]\n        self.secret = str(uuid4())\n        headers = {}\n\n        if self.custom_server:\n            if not self.token:\n                log.verbose(\"Interact.sh token is not set\")\n            else:\n                headers[\"Authorization\"] = self.token\n            self.server_list = [str(self.custom_server)]\n        else:\n            self.server_list = random.sample(server_list, k=len(server_list))\n        for server in self.server_list:\n            log.info(f\"Registering with interact.sh server: {server}\")\n            data = {\n                \"public-key\": encoded_public_key,\n                \"secret-key\": self.secret,\n                \"correlation-id\": self.correlation_id,\n            }\n            r = await self.parent_helper.request(\n                f\"https://{server}/register\", headers=headers, json=data, method=\"POST\"\n            )\n            if r is None:\n                continue\n            try:\n                msg = r.json().get(\"message\", \"\")\n                assert \"registration successful\" in msg\n            except Exception:\n                log.debug(f\"Failed to register with interactsh server {self.server}\")\n                continue\n            self.server = server\n            self.domain = f\"{guid}.{self.server}\"\n            break\n\n        if not self.server:\n            raise InteractshError(f\"Failed to register with an interactsh server\")\n\n        log.info(\n            f\"Successfully registered to interactsh server {self.server} with correlation_id {self.correlation_id} [{self.domain}]\"\n        )\n\n        if callable(callback):\n            self._poll_task = asyncio.create_task(self.poll_loop(callback))\n\n        return self.domain\n\n    async def deregister(self):\n        \"\"\"\n        Deregisters the instance from the interact.sh server and cancels the polling task.\n\n        Sends a POST request to the server to deregister, using the correlation ID\n        and secret key generated during registration. Optionally, if a polling\n        task was started, it is cancelled.\n\n        Raises:\n            InteractshError: If required information is missing or if deregistration fails.\n\n        Examples:\n            &gt;&gt;&gt; await interactsh_client.deregister()\n        \"\"\"\n        if not self.server or not self.correlation_id or not self.secret:\n            raise InteractshError(f\"Missing required information to deregister\")\n\n        headers = {}\n        if self.token:\n            headers[\"Authorization\"] = self.token\n\n        data = {\"secret-key\": self.secret, \"correlation-id\": self.correlation_id}\n\n        r = await self.parent_helper.request(\n            f\"https://{self.server}/deregister\", headers=headers, json=data, method=\"POST\"\n        )\n\n        if self._poll_task is not None:\n            self._poll_task.cancel()\n\n        if \"success\" not in getattr(r, \"text\", \"\"):\n            raise InteractshError(f\"Failed to de-register with interactsh server {self.server}\")\n\n    async def poll(self):\n        \"\"\"\n        Polls the interact.sh server for interactions tied to the current instance.\n\n        Sends a GET request to the server to fetch interactions associated with the\n        current correlation_id and secret key. Returned interactions are decrypted\n        using an AES key provided by the server response.\n\n        Raises:\n            InteractshError: If required information for polling is missing.\n\n        Returns:\n            list: A list of decrypted interaction data dictionaries.\n\n        Examples:\n            &gt;&gt;&gt; data_list = await interactsh_client.poll()\n            &gt;&gt;&gt; print(data_list)\n            [\n                {\n                    \"protocol\": \"dns\",\n                    \"unique-id\": \"rg99x2f860h5466ou3so86i07n1m3013k\",\n                    ...\n                },\n                ...\n            ]\n        \"\"\"\n        if not self.server or not self.correlation_id or not self.secret:\n            raise InteractshError(f\"Missing required information to poll\")\n\n        headers = {}\n        if self.token:\n            headers[\"Authorization\"] = self.token\n\n        try:\n            r = await self.parent_helper.request(\n                f\"https://{self.server}/poll?id={self.correlation_id}&amp;secret={self.secret}\", headers=headers\n            )\n            if r is None:\n                raise InteractshError(\"Error polling interact.sh: No response from server\")\n\n            ret = []\n            data_list = r.json().get(\"data\", None)\n            if data_list:\n                aes_key = r.json()[\"aes_key\"]\n\n                for data in data_list:\n                    decrypted_data = self._decrypt(aes_key, data)\n                    ret.append(decrypted_data)\n            return ret\n        except Exception as e:\n            raise InteractshError(f\"Error polling interact.sh: {e}\")\n\n    async def poll_loop(self, callback):\n        \"\"\"\n        Starts a polling loop to continuously check for interactions with the interact.sh server.\n\n        Continuously polls the interact.sh server for interactions tied to the current instance,\n        using the `poll` method. When interactions are received, it executes the given callback\n        function with each interaction data.\n\n        Parameters:\n            callback (callable): The function to be called for every interaction received from the server.\n\n        Returns:\n            awaitable: An awaitable object that executes the internal `_poll_loop` method.\n\n        Examples:\n            &gt;&gt;&gt; await interactsh_client.poll_loop(my_callback)\n        \"\"\"\n        async with self.parent_helper.scan._acatch(context=self._poll_loop):\n            return await self._poll_loop(callback)\n\n    async def _poll_loop(self, callback):\n        while 1:\n            if self.parent_helper.scan.stopping:\n                await asyncio.sleep(1)\n                continue\n            data_list = []\n            try:\n                data_list = await self.poll()\n            except InteractshError as e:\n                log.warning(e)\n                log.trace(traceback.format_exc())\n            if not data_list:\n                await asyncio.sleep(self.poll_interval)\n                continue\n            for data in data_list:\n                if data:\n                    await self.parent_helper.execute_sync_or_async(callback, data)\n\n    def _decrypt(self, aes_key, data):\n        \"\"\"\n        Decrypts and returns the data received from the interact.sh server.\n\n        Uses RSA and AES for decrypting the data. RSA with PKCS1_OAEP and SHA256 is used to decrypt the AES key,\n        and then AES (CFB mode) is used to decrypt the actual data payload.\n\n        Parameters:\n            aes_key (str): The AES key for decryption, encrypted with RSA and base64 encoded.\n            data (str): The data payload to decrypt, which is base64 encoded and AES encrypted.\n\n        Returns:\n            dict: The decrypted data, loaded as a JSON object.\n\n        Examples:\n            &gt;&gt;&gt; decrypted_data = self._decrypt(aes_key, data)\n        \"\"\"\n        private_key = RSA.importKey(self.private_key)\n        cipher = PKCS1_OAEP.new(private_key, hashAlgo=SHA256)\n        aes_plain_key = cipher.decrypt(base64.b64decode(aes_key))\n        decode = base64.b64decode(data)\n        bs = AES.block_size\n        iv = decode[:bs]\n        cryptor = AES.new(key=aes_plain_key, mode=AES.MODE_CFB, IV=iv, segment_size=128)\n        plain_text = cryptor.decrypt(decode)\n        return json.loads(plain_text[16:])\n</code></pre>"},{"location":"dev/helpers/interactsh/#bbot.core.helpers.interactsh.Interactsh.deregister","title":"deregister  <code>async</code>","text":"<pre><code>deregister()\n</code></pre> <p>Deregisters the instance from the interact.sh server and cancels the polling task.</p> <p>Sends a POST request to the server to deregister, using the correlation ID and secret key generated during registration. Optionally, if a polling task was started, it is cancelled.</p> <p>Raises:</p> <ul> <li> <code>InteractshError</code>             \u2013            <p>If required information is missing or if deregistration fails.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; await interactsh_client.deregister()\n</code></pre> Source code in <code>bbot/core/helpers/interactsh.py</code> <pre><code>async def deregister(self):\n    \"\"\"\n    Deregisters the instance from the interact.sh server and cancels the polling task.\n\n    Sends a POST request to the server to deregister, using the correlation ID\n    and secret key generated during registration. Optionally, if a polling\n    task was started, it is cancelled.\n\n    Raises:\n        InteractshError: If required information is missing or if deregistration fails.\n\n    Examples:\n        &gt;&gt;&gt; await interactsh_client.deregister()\n    \"\"\"\n    if not self.server or not self.correlation_id or not self.secret:\n        raise InteractshError(f\"Missing required information to deregister\")\n\n    headers = {}\n    if self.token:\n        headers[\"Authorization\"] = self.token\n\n    data = {\"secret-key\": self.secret, \"correlation-id\": self.correlation_id}\n\n    r = await self.parent_helper.request(\n        f\"https://{self.server}/deregister\", headers=headers, json=data, method=\"POST\"\n    )\n\n    if self._poll_task is not None:\n        self._poll_task.cancel()\n\n    if \"success\" not in getattr(r, \"text\", \"\"):\n        raise InteractshError(f\"Failed to de-register with interactsh server {self.server}\")\n</code></pre>"},{"location":"dev/helpers/interactsh/#bbot.core.helpers.interactsh.Interactsh.poll","title":"poll  <code>async</code>","text":"<pre><code>poll()\n</code></pre> <p>Polls the interact.sh server for interactions tied to the current instance.</p> <p>Sends a GET request to the server to fetch interactions associated with the current correlation_id and secret key. Returned interactions are decrypted using an AES key provided by the server response.</p> <p>Raises:</p> <ul> <li> <code>InteractshError</code>             \u2013            <p>If required information for polling is missing.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>list</code>          \u2013            <p>A list of decrypted interaction data dictionaries.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; data_list = await interactsh_client.poll()\n&gt;&gt;&gt; print(data_list)\n[\n    {\n        \"protocol\": \"dns\",\n        \"unique-id\": \"rg99x2f860h5466ou3so86i07n1m3013k\",\n        ...\n    },\n    ...\n]\n</code></pre> Source code in <code>bbot/core/helpers/interactsh.py</code> <pre><code>async def poll(self):\n    \"\"\"\n    Polls the interact.sh server for interactions tied to the current instance.\n\n    Sends a GET request to the server to fetch interactions associated with the\n    current correlation_id and secret key. Returned interactions are decrypted\n    using an AES key provided by the server response.\n\n    Raises:\n        InteractshError: If required information for polling is missing.\n\n    Returns:\n        list: A list of decrypted interaction data dictionaries.\n\n    Examples:\n        &gt;&gt;&gt; data_list = await interactsh_client.poll()\n        &gt;&gt;&gt; print(data_list)\n        [\n            {\n                \"protocol\": \"dns\",\n                \"unique-id\": \"rg99x2f860h5466ou3so86i07n1m3013k\",\n                ...\n            },\n            ...\n        ]\n    \"\"\"\n    if not self.server or not self.correlation_id or not self.secret:\n        raise InteractshError(f\"Missing required information to poll\")\n\n    headers = {}\n    if self.token:\n        headers[\"Authorization\"] = self.token\n\n    try:\n        r = await self.parent_helper.request(\n            f\"https://{self.server}/poll?id={self.correlation_id}&amp;secret={self.secret}\", headers=headers\n        )\n        if r is None:\n            raise InteractshError(\"Error polling interact.sh: No response from server\")\n\n        ret = []\n        data_list = r.json().get(\"data\", None)\n        if data_list:\n            aes_key = r.json()[\"aes_key\"]\n\n            for data in data_list:\n                decrypted_data = self._decrypt(aes_key, data)\n                ret.append(decrypted_data)\n        return ret\n    except Exception as e:\n        raise InteractshError(f\"Error polling interact.sh: {e}\")\n</code></pre>"},{"location":"dev/helpers/interactsh/#bbot.core.helpers.interactsh.Interactsh.poll_loop","title":"poll_loop  <code>async</code>","text":"<pre><code>poll_loop(callback)\n</code></pre> <p>Starts a polling loop to continuously check for interactions with the interact.sh server.</p> <p>Continuously polls the interact.sh server for interactions tied to the current instance, using the <code>poll</code> method. When interactions are received, it executes the given callback function with each interaction data.</p> <p>Parameters:</p> <ul> <li> <code>callback</code>               (<code>callable</code>)           \u2013            <p>The function to be called for every interaction received from the server.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>awaitable</code>          \u2013            <p>An awaitable object that executes the internal <code>_poll_loop</code> method.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; await interactsh_client.poll_loop(my_callback)\n</code></pre> Source code in <code>bbot/core/helpers/interactsh.py</code> <pre><code>async def poll_loop(self, callback):\n    \"\"\"\n    Starts a polling loop to continuously check for interactions with the interact.sh server.\n\n    Continuously polls the interact.sh server for interactions tied to the current instance,\n    using the `poll` method. When interactions are received, it executes the given callback\n    function with each interaction data.\n\n    Parameters:\n        callback (callable): The function to be called for every interaction received from the server.\n\n    Returns:\n        awaitable: An awaitable object that executes the internal `_poll_loop` method.\n\n    Examples:\n        &gt;&gt;&gt; await interactsh_client.poll_loop(my_callback)\n    \"\"\"\n    async with self.parent_helper.scan._acatch(context=self._poll_loop):\n        return await self._poll_loop(callback)\n</code></pre>"},{"location":"dev/helpers/interactsh/#bbot.core.helpers.interactsh.Interactsh.register","title":"register  <code>async</code>","text":"<pre><code>register(callback=None)\n</code></pre> <p>Registers the instance with an interact.sh server and sets up polling.</p> <p>Generates RSA keys for secure communication, builds a correlation ID, and sends a POST request to an interact.sh server to register. Optionally, starts an asynchronous polling task to listen for interactions.</p> <p>Parameters:</p> <ul> <li> <code>callback</code>               (<code>callable</code>, default:                   <code>None</code> )           \u2013            <p>A function to be called each time new interactions are received.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>The registered domain for out-of-band interactions.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>InteractshError</code>             \u2013            <p>If registration with an interact.sh server fails.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; interactsh_client = self.helpers.interactsh()\n&gt;&gt;&gt; registered_domain = await interactsh_client.register()\n[INFO] Registering with interact.sh server: oast.me\n[INFO] Successfully registered to interactsh server oast.me with correlation_id rg99x2f860h5466ou3so [rg99x2f860h5466ou3so86i07n1m3013k.oast.me]\n</code></pre> Source code in <code>bbot/core/helpers/interactsh.py</code> <pre><code>async def register(self, callback=None):\n    \"\"\"\n    Registers the instance with an interact.sh server and sets up polling.\n\n    Generates RSA keys for secure communication, builds a correlation ID,\n    and sends a POST request to an interact.sh server to register. Optionally,\n    starts an asynchronous polling task to listen for interactions.\n\n    Args:\n        callback (callable, optional): A function to be called each time new interactions are received.\n\n    Returns:\n        str: The registered domain for out-of-band interactions.\n\n    Raises:\n        InteractshError: If registration with an interact.sh server fails.\n\n    Examples:\n        &gt;&gt;&gt; interactsh_client = self.helpers.interactsh()\n        &gt;&gt;&gt; registered_domain = await interactsh_client.register()\n        [INFO] Registering with interact.sh server: oast.me\n        [INFO] Successfully registered to interactsh server oast.me with correlation_id rg99x2f860h5466ou3so [rg99x2f860h5466ou3so86i07n1m3013k.oast.me]\n    \"\"\"\n    rsa = RSA.generate(1024)\n\n    self.public_key = rsa.publickey().exportKey()\n    self.private_key = rsa.exportKey()\n\n    encoded_public_key = base64.b64encode(self.public_key).decode(\"utf8\")\n\n    uuid = uuid4().hex.ljust(33, \"a\")\n    guid = \"\".join(i if i.isdigit() else chr(ord(i) + random.randint(0, 20)) for i in uuid)\n\n    self.correlation_id = guid[:20]\n    self.secret = str(uuid4())\n    headers = {}\n\n    if self.custom_server:\n        if not self.token:\n            log.verbose(\"Interact.sh token is not set\")\n        else:\n            headers[\"Authorization\"] = self.token\n        self.server_list = [str(self.custom_server)]\n    else:\n        self.server_list = random.sample(server_list, k=len(server_list))\n    for server in self.server_list:\n        log.info(f\"Registering with interact.sh server: {server}\")\n        data = {\n            \"public-key\": encoded_public_key,\n            \"secret-key\": self.secret,\n            \"correlation-id\": self.correlation_id,\n        }\n        r = await self.parent_helper.request(\n            f\"https://{server}/register\", headers=headers, json=data, method=\"POST\"\n        )\n        if r is None:\n            continue\n        try:\n            msg = r.json().get(\"message\", \"\")\n            assert \"registration successful\" in msg\n        except Exception:\n            log.debug(f\"Failed to register with interactsh server {self.server}\")\n            continue\n        self.server = server\n        self.domain = f\"{guid}.{self.server}\"\n        break\n\n    if not self.server:\n        raise InteractshError(f\"Failed to register with an interactsh server\")\n\n    log.info(\n        f\"Successfully registered to interactsh server {self.server} with correlation_id {self.correlation_id} [{self.domain}]\"\n    )\n\n    if callable(callback):\n        self._poll_task = asyncio.create_task(self.poll_loop(callback))\n\n    return self.domain\n</code></pre>"},{"location":"dev/helpers/misc/","title":"Misc Helpers","text":"<p>These are miscellaneous helpers, used throughout BBOT and its modules for simple tasks such as parsing domains, ports, urls, etc.</p>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.add_get_params","title":"add_get_params","text":"<pre><code>add_get_params(url, params)\n</code></pre> <p>Add or update query parameters to the given URL.</p> <p>This function takes an existing URL and a dictionary of query parameters, updates or adds these parameters to the URL, and returns a new URL.</p> <p>Parameters:</p> <ul> <li> <code>url</code>               (<code>Union[str, ParseResult]</code>)           \u2013            <p>The original URL.</p> </li> <li> <code>params</code>               (<code>Dict[str, Any]</code>)           \u2013            <p>A dictionary containing the query parameters to be added or updated.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>ParseResult</code>          \u2013            <p>A named 6-tuple containing the components of the modified URL.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; add_get_params('https://www.evilcorp.com?foo=1', {'bar': 2})\nParseResult(scheme='https', netloc='www.evilcorp.com', path='', params='', query='foo=1&amp;bar=2', fragment='')\n</code></pre> <pre><code>&gt;&gt;&gt; add_get_params('https://www.evilcorp.com?foo=1', {'foo': 2})\nParseResult(scheme='https', netloc='www.evilcorp.com', path='', params='', query='foo=2', fragment='')\n</code></pre> Source code in <code>bbot/core/helpers/url.py</code> <pre><code>def add_get_params(url, params):\n    \"\"\"\n    Add or update query parameters to the given URL.\n\n    This function takes an existing URL and a dictionary of query parameters,\n    updates or adds these parameters to the URL, and returns a new URL.\n\n    Args:\n        url (Union[str, ParseResult]): The original URL.\n        params (Dict[str, Any]): A dictionary containing the query parameters to be added or updated.\n\n    Returns:\n        ParseResult: A named 6-tuple containing the components of the modified URL.\n\n    Examples:\n        &gt;&gt;&gt; add_get_params('https://www.evilcorp.com?foo=1', {'bar': 2})\n        ParseResult(scheme='https', netloc='www.evilcorp.com', path='', params='', query='foo=1&amp;bar=2', fragment='')\n\n        &gt;&gt;&gt; add_get_params('https://www.evilcorp.com?foo=1', {'foo': 2})\n        ParseResult(scheme='https', netloc='www.evilcorp.com', path='', params='', query='foo=2', fragment='')\n    \"\"\"\n    parsed = parse_url(url)\n    old_params = dict(parse_qs(parsed.query))\n    old_params.update(params)\n    return parsed._replace(query=urlencode(old_params, doseq=True))\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.as_completed","title":"as_completed  <code>async</code>","text":"<pre><code>as_completed(coros)\n</code></pre> <p>Async generator that yields completed Tasks as they are completed.</p> <p>Parameters:</p> <ul> <li> <code>coros</code>               (<code>iterable</code>)           \u2013            <p>An iterable of coroutine objects or asyncio Tasks.</p> </li> </ul> <p>Yields:</p> <ul> <li>           \u2013            <p>asyncio.Task: A Task object that has completed its execution.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; async def main():\n...     async for task in as_completed([coro1(), coro2(), coro3()]):\n...         result = task.result()\n...         print(f'Task completed with result: {result}')\n</code></pre> <pre><code>&gt;&gt;&gt; asyncio.run(main())\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>async def as_completed(coros):\n    \"\"\"\n    Async generator that yields completed Tasks as they are completed.\n\n    Args:\n        coros (iterable): An iterable of coroutine objects or asyncio Tasks.\n\n    Yields:\n        asyncio.Task: A Task object that has completed its execution.\n\n    Examples:\n        &gt;&gt;&gt; async def main():\n        ...     async for task in as_completed([coro1(), coro2(), coro3()]):\n        ...         result = task.result()\n        ...         print(f'Task completed with result: {result}')\n\n        &gt;&gt;&gt; asyncio.run(main())\n    \"\"\"\n    tasks = {coro if isinstance(coro, asyncio.Task) else asyncio.create_task(coro): coro for coro in coros}\n    while tasks:\n        done, _ = await asyncio.wait(tasks.keys(), return_when=asyncio.FIRST_COMPLETED)\n        for task in done:\n            tasks.pop(task)\n            yield task\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.backup_file","title":"backup_file","text":"<pre><code>backup_file(filename, max_backups=10)\n</code></pre> <p>Renames a file by appending an iteration number as a backup. Recursively renames files up to a specified maximum number of backups.</p> <p>Parameters:</p> <ul> <li> <code>filename</code>               (<code>str or Path</code>)           \u2013            <p>The file to backup.</p> </li> <li> <code>max_backups</code>               (<code>int</code>, default:                   <code>10</code> )           \u2013            <p>The maximum number of backups to keep. Defaults to 10.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>pathlib.Path: The new backup filepath.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; backup_file(\"/tmp/test.txt\")\nPosixPath(\"/tmp/test.0.txt\")\n&gt;&gt;&gt; backup_file(\"/tmp/test.0.txt\")\nPosixPath(\"/tmp/test.1.txt\")\n&gt;&gt;&gt; backup_file(\"/tmp/test.1.txt\")\nPosixPath(\"/tmp/test.2.txt\")\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def backup_file(filename, max_backups=10):\n    \"\"\"\n    Renames a file by appending an iteration number as a backup. Recursively renames\n    files up to a specified maximum number of backups.\n\n    Args:\n        filename (str or pathlib.Path): The file to backup.\n        max_backups (int, optional): The maximum number of backups to keep. Defaults to 10.\n\n    Returns:\n        pathlib.Path: The new backup filepath.\n\n    Examples:\n        &gt;&gt;&gt; backup_file(\"/tmp/test.txt\")\n        PosixPath(\"/tmp/test.0.txt\")\n        &gt;&gt;&gt; backup_file(\"/tmp/test.0.txt\")\n        PosixPath(\"/tmp/test.1.txt\")\n        &gt;&gt;&gt; backup_file(\"/tmp/test.1.txt\")\n        PosixPath(\"/tmp/test.2.txt\")\n    \"\"\"\n    filename = Path(filename).resolve()\n    suffixes = [s.strip(\".\") for s in filename.suffixes]\n    iteration = 1\n    with suppress(Exception):\n        iteration = min(max_backups - 1, max(0, int(suffixes[0]))) + 1\n        suffixes = suffixes[1:]\n    stem = filename.stem.split(\".\")[0]\n    destination = filename.parent / f\"{stem}.{iteration}.{'.'.join(suffixes)}\"\n    if destination.exists() and iteration &lt; max_backups:\n        backup_file(destination)\n    if filename.exists():\n        filename.rename(destination)\n    return destination\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.best_http_status","title":"best_http_status","text":"<pre><code>best_http_status(code1, code2)\n</code></pre> <p>Determine the better HTTP status code between two given codes.</p> <p>The 'better' status code is considered based on typical usage and priority in HTTP communication. Lower codes are generally better than higher codes. Within the same class (e.g., 2xx), a lower code is better. Between different classes, the order of preference is 2xx &gt; 3xx &gt; 1xx &gt; 4xx &gt; 5xx.</p> <p>Parameters:</p> <ul> <li> <code>code1</code>               (<code>int</code>)           \u2013            <p>The first HTTP status code.</p> </li> <li> <code>code2</code>               (<code>int</code>)           \u2013            <p>The second HTTP status code.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>int</code>          \u2013            <p>The better HTTP status code between the two provided codes.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; better_http_status(200, 404)\n200\n&gt;&gt;&gt; better_http_status(500, 400)\n400\n&gt;&gt;&gt; better_http_status(301, 302)\n301\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def best_http_status(code1, code2):\n    \"\"\"\n    Determine the better HTTP status code between two given codes.\n\n    The 'better' status code is considered based on typical usage and priority in HTTP communication.\n    Lower codes are generally better than higher codes. Within the same class (e.g., 2xx), a lower code is better.\n    Between different classes, the order of preference is 2xx &gt; 3xx &gt; 1xx &gt; 4xx &gt; 5xx.\n\n    Args:\n        code1 (int): The first HTTP status code.\n        code2 (int): The second HTTP status code.\n\n    Returns:\n        int: The better HTTP status code between the two provided codes.\n\n    Examples:\n        &gt;&gt;&gt; better_http_status(200, 404)\n        200\n        &gt;&gt;&gt; better_http_status(500, 400)\n        400\n        &gt;&gt;&gt; better_http_status(301, 302)\n        301\n    \"\"\"\n\n    # Classify the codes into their respective categories (1xx, 2xx, 3xx, 4xx, 5xx)\n    def classify_code(code):\n        return int(code) // 100\n\n    class1 = classify_code(code1)\n    class2 = classify_code(code2)\n\n    # Priority order for classes\n    priority_order = {2: 1, 3: 2, 1: 3, 4: 4, 5: 5}\n\n    # Compare based on class priority\n    p1 = priority_order.get(class1, 10)\n    p2 = priority_order.get(class2, 10)\n    if p1 != p2:\n        return code1 if p1 &lt; p2 else code2\n\n    # If in the same class, the lower code is better\n    return min(code1, code2)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.bytes_to_human","title":"bytes_to_human","text":"<pre><code>bytes_to_human(_bytes)\n</code></pre> <p>Convert a bytes size to a human-readable string.</p> <p>This function converts a numeric bytes value into a human-readable string format, complete with the appropriate unit symbol (B, KB, MB, GB, etc.).</p> <p>Parameters:</p> <ul> <li> <code>_bytes</code>               (<code>int</code>)           \u2013            <p>The number of bytes to convert.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>A string representing the number of bytes in a more readable format, rounded to two  decimal places.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; bytes_to_human(1234129384)\n'1.15GB'\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def bytes_to_human(_bytes):\n    \"\"\"Convert a bytes size to a human-readable string.\n\n    This function converts a numeric bytes value into a human-readable string format, complete\n    with the appropriate unit symbol (B, KB, MB, GB, etc.).\n\n    Args:\n        _bytes (int): The number of bytes to convert.\n\n    Returns:\n        str: A string representing the number of bytes in a more readable format, rounded to two\n             decimal places.\n\n    Examples:\n        &gt;&gt;&gt; bytes_to_human(1234129384)\n        '1.15GB'\n    \"\"\"\n    sizes = [\"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\", \"ZB\"]\n    units = {}\n    for count, size in enumerate(sizes):\n        units[size] = pow(1024, count)\n    for size in sizes:\n        if abs(_bytes) &lt; 1024.0:\n            if size == sizes[0]:\n                _bytes = str(int(_bytes))\n            else:\n                _bytes = f\"{_bytes:.2f}\"\n            return f\"{_bytes}{size}\"\n        _bytes /= 1024\n    raise ValueError(f'Unable to convert \"{_bytes}\" to human filesize')\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.can_sudo_without_password","title":"can_sudo_without_password","text":"<pre><code>can_sudo_without_password()\n</code></pre> <p>Check if the current user has passwordless sudo access.</p> <p>This function checks whether the current user can use sudo without entering a password. It runs a command with sudo and checks the return code to determine this.</p> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the current user can use sudo without a password, False otherwise.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; can_sudo_without_password()\nTrue\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def can_sudo_without_password():\n    \"\"\"Check if the current user has passwordless sudo access.\n\n    This function checks whether the current user can use sudo without entering a password.\n    It runs a command with sudo and checks the return code to determine this.\n\n    Returns:\n        bool: True if the current user can use sudo without a password, False otherwise.\n\n    Examples:\n        &gt;&gt;&gt; can_sudo_without_password()\n        True\n    \"\"\"\n    if os.geteuid() != 0:\n        env = dict(os.environ)\n        env[\"SUDO_ASKPASS\"] = \"/bin/false\"\n        try:\n            sp.run([\"sudo\", \"-K\"], stderr=sp.DEVNULL, stdout=sp.DEVNULL, check=True, env=env)\n            sp.run([\"sudo\", \"-An\", \"/bin/true\"], stderr=sp.DEVNULL, stdout=sp.DEVNULL, check=True, env=env)\n        except sp.CalledProcessError:\n            return False\n    return True\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.cancel_tasks","title":"cancel_tasks  <code>async</code>","text":"<pre><code>cancel_tasks(tasks, ignore_errors=True)\n</code></pre> <p>Asynchronously cancels a list of asyncio tasks.</p> <p>Parameters:</p> <ul> <li> <code>tasks</code>               (<code>list[Task]</code>)           \u2013            <p>A list of asyncio Task objects to cancel.</p> </li> <li> <code>ignore_errors</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to ignore errors other than asyncio.CancelledError. Defaults to True.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; async def main():\n...     task1 = asyncio.create_task(async_function1())\n...     task2 = asyncio.create_task(async_function2())\n...     await cancel_tasks([task1, task2])\n...\n&gt;&gt;&gt; asyncio.run(main())\n</code></pre> Note <p>This function will not cancel the current task that it is called from.</p> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>async def cancel_tasks(tasks, ignore_errors=True):\n    \"\"\"\n    Asynchronously cancels a list of asyncio tasks.\n\n    Args:\n        tasks (list[Task]): A list of asyncio Task objects to cancel.\n        ignore_errors (bool, optional): Whether to ignore errors other than asyncio.CancelledError. Defaults to True.\n\n    Examples:\n        &gt;&gt;&gt; async def main():\n        ...     task1 = asyncio.create_task(async_function1())\n        ...     task2 = asyncio.create_task(async_function2())\n        ...     await cancel_tasks([task1, task2])\n        ...\n        &gt;&gt;&gt; asyncio.run(main())\n\n    Note:\n        This function will not cancel the current task that it is called from.\n    \"\"\"\n    current_task = asyncio.current_task()\n    tasks = [t for t in tasks if t != current_task]\n    for task in tasks:\n        # log.debug(f\"Cancelling task: {task}\")\n        task.cancel()\n    if ignore_errors:\n        for task in tasks:\n            try:\n                await task\n            except BaseException as e:\n                if not isinstance(e, asyncio.CancelledError):\n                    import traceback\n\n                    log.trace(traceback.format_exc())\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.cancel_tasks_sync","title":"cancel_tasks_sync","text":"<pre><code>cancel_tasks_sync(tasks)\n</code></pre> <p>Synchronously cancels a list of asyncio tasks.</p> <p>Parameters:</p> <ul> <li> <code>tasks</code>               (<code>list[Task]</code>)           \u2013            <p>A list of asyncio Task objects to cancel.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; loop = asyncio.get_event_loop()\n&gt;&gt;&gt; task1 = loop.create_task(some_async_function1())\n&gt;&gt;&gt; task2 = loop.create_task(some_async_function2())\n&gt;&gt;&gt; cancel_tasks_sync([task1, task2])\n</code></pre> Note <p>This function will not cancel the current task from which it is called.</p> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def cancel_tasks_sync(tasks):\n    \"\"\"\n    Synchronously cancels a list of asyncio tasks.\n\n    Args:\n        tasks (list[Task]): A list of asyncio Task objects to cancel.\n\n    Examples:\n        &gt;&gt;&gt; loop = asyncio.get_event_loop()\n        &gt;&gt;&gt; task1 = loop.create_task(some_async_function1())\n        &gt;&gt;&gt; task2 = loop.create_task(some_async_function2())\n        &gt;&gt;&gt; cancel_tasks_sync([task1, task2])\n\n    Note:\n        This function will not cancel the current task from which it is called.\n    \"\"\"\n    current_task = asyncio.current_task()\n    for task in tasks:\n        if task != current_task:\n            # log.debug(f\"Cancelling task: {task}\")\n            task.cancel()\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.chain_lists","title":"chain_lists","text":"<pre><code>chain_lists(l, try_files=False, msg=None, remove_blank=True, validate=False, validate_chars='&lt;&gt;:\"/\\\\|?*)')\n</code></pre> <p>Chains together list elements, allowing for entries separated by commas.</p> <p>This function takes a list <code>l</code> and flattens it by splitting its entries on commas. It also allows you to optionally open entries as files and add their contents to the list.</p> <p>The order of entries is preserved, and deduplication is performed automatically.</p> <p>Parameters:</p> <ul> <li> <code>l</code>               (<code>list</code>)           \u2013            <p>The list of strings to chain together.</p> </li> <li> <code>try_files</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to try to open entries as files. Defaults to False.</p> </li> <li> <code>msg</code>               (<code>str</code>, default:                   <code>None</code> )           \u2013            <p>An optional message to log when reading from a file. Defaults to None.</p> </li> <li> <code>remove_blank</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to remove blank entries from the list. Defaults to True.</p> </li> <li> <code>validate</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to perform validation for undesirable characters. Defaults to False.</p> </li> <li> <code>validate_chars</code>               (<code>str</code>, default:                   <code>'&lt;&gt;:\"/\\\\|?*)'</code> )           \u2013            <p>When performing validation, what additional set of characters to block (blocks non-printable ascii automatically). Defaults to '&lt;&gt;:\"/|?*)'</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>list</code>          \u2013            <p>The list of chained elements.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>ValueError</code>             \u2013            <p>If the input string contains invalid characters, when enabled (off by default).</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; chain_lists([\"a\", \"b,c,d\"])\n['a', 'b', 'c', 'd']\n</code></pre> <pre><code>&gt;&gt;&gt; chain_lists([\"a,file.txt\", \"c,d\"], try_files=True)\n['a', 'f_line1', 'f_line2', 'f_line3', 'c', 'd']\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def chain_lists(\n    l,\n    try_files=False,\n    msg=None,\n    remove_blank=True,\n    validate=False,\n    validate_chars='&lt;&gt;:\"/\\\\|?*)',\n):\n    \"\"\"Chains together list elements, allowing for entries separated by commas.\n\n    This function takes a list `l` and flattens it by splitting its entries on commas.\n    It also allows you to optionally open entries as files and add their contents to the list.\n\n    The order of entries is preserved, and deduplication is performed automatically.\n\n    Args:\n        l (list): The list of strings to chain together.\n        try_files (bool, optional): Whether to try to open entries as files. Defaults to False.\n        msg (str, optional): An optional message to log when reading from a file. Defaults to None.\n        remove_blank (bool, optional): Whether to remove blank entries from the list. Defaults to True.\n        validate (bool, optional): Whether to perform validation for undesirable characters. Defaults to False.\n        validate_chars (str, optional): When performing validation, what additional set of characters to block (blocks non-printable ascii automatically). Defaults to '&lt;&gt;:\"/\\\\|?*)'\n\n    Returns:\n        list: The list of chained elements.\n\n    Raises:\n        ValueError: If the input string contains invalid characters, when enabled (off by default).\n\n    Examples:\n        &gt;&gt;&gt; chain_lists([\"a\", \"b,c,d\"])\n        ['a', 'b', 'c', 'd']\n\n        &gt;&gt;&gt; chain_lists([\"a,file.txt\", \"c,d\"], try_files=True)\n        ['a', 'f_line1', 'f_line2', 'f_line3', 'c', 'd']\n    \"\"\"\n    if isinstance(l, str):\n        l = [l]\n    final_list = dict()\n    for entry in l:\n        for s in split_regex.split(entry):\n            f = s.strip()\n            if validate:\n                if any((c in validate_chars) or (ord(c) &lt; 32 and c != \" \") for c in f):\n                    raise ValueError(f\"Invalid character in string: {f}\")\n            f_path = Path(f).resolve()\n            if try_files and f_path.is_file():\n                if msg is not None:\n                    new_msg = str(msg).format(filename=f_path)\n                    log.info(new_msg)\n                for line in str_or_file(f):\n                    final_list[line] = None\n            else:\n                final_list[f] = None\n\n    ret = list(final_list)\n    if remove_blank:\n        ret = [r for r in ret if r]\n    return ret\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.charset","title":"charset","text":"<pre><code>charset(p)\n</code></pre> <p>Determine the character set of the given string based on the types of characters it contains.</p> <p>Parameters:</p> <ul> <li> <code>p</code>               (<code>str</code>)           \u2013            <p>The string whose character set is to be determined.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>int</code>          \u2013            <p>A bitmask representing the types of characters present in the string. - CHAR_LOWER = 1: Lowercase alphabets - CHAR_UPPER = 2: Uppercase alphabets - CHAR_DIGIT = 4: Digits - CHAR_SYMBOL = 8: Symbols/Special characters</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; charset('abc')\n1\n</code></pre> <pre><code>&gt;&gt;&gt; charset('abcABC')\n3\n</code></pre> <pre><code>&gt;&gt;&gt; charset('abc123')\n5\n</code></pre> <pre><code>&gt;&gt;&gt; charset('!abc123')\n13\n</code></pre> Source code in <code>bbot/core/helpers/url.py</code> <pre><code>def charset(p):\n    \"\"\"\n    Determine the character set of the given string based on the types of characters it contains.\n\n    Args:\n        p (str): The string whose character set is to be determined.\n\n    Returns:\n        int: A bitmask representing the types of characters present in the string.\n            - CHAR_LOWER = 1: Lowercase alphabets\n            - CHAR_UPPER = 2: Uppercase alphabets\n            - CHAR_DIGIT = 4: Digits\n            - CHAR_SYMBOL = 8: Symbols/Special characters\n\n    Examples:\n        &gt;&gt;&gt; charset('abc')\n        1\n\n        &gt;&gt;&gt; charset('abcABC')\n        3\n\n        &gt;&gt;&gt; charset('abc123')\n        5\n\n        &gt;&gt;&gt; charset('!abc123')\n        13\n    \"\"\"\n    ret = 0\n    for c in p:\n        if c.islower():\n            ret |= CHAR_LOWER\n        elif c.isupper():\n            ret |= CHAR_UPPER\n        elif c.isnumeric():\n            ret |= CHAR_DIGIT\n        else:\n            ret |= CHAR_SYMBOL\n    return ret\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.clean_dict","title":"clean_dict","text":"<pre><code>clean_dict(d, *key_names, fuzzy=False, exclude_keys=None, _prev_key=None)\n</code></pre> <p>Recursively clean unwanted keys from a dictionary. Useful for removing secrets from a config.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>dict</code>)           \u2013            <p>The input dictionary.</p> </li> <li> <code>*key_names</code>           \u2013            <p>Names of keys to remove.</p> </li> <li> <code>fuzzy</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to perform fuzzy matching on keys.</p> </li> <li> <code>exclude_keys</code>               (<code>(list, None)</code>, default:                   <code>None</code> )           \u2013            <p>List of keys to be excluded from removal.</p> </li> <li> <code>_prev_key</code>               (<code>(str, None)</code>, default:                   <code>None</code> )           \u2013            <p>For internal recursive use; the previous key in the hierarchy.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>dict</code>          \u2013            <p>A dictionary cleaned of the keys specified in key_names.</p> </li> </ul> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def clean_dict(d, *key_names, fuzzy=False, exclude_keys=None, _prev_key=None):\n    \"\"\"\n    Recursively clean unwanted keys from a dictionary.\n    Useful for removing secrets from a config.\n\n    Args:\n        d (dict): The input dictionary.\n        *key_names: Names of keys to remove.\n        fuzzy (bool): Whether to perform fuzzy matching on keys.\n        exclude_keys (list, None): List of keys to be excluded from removal.\n        _prev_key (str, None): For internal recursive use; the previous key in the hierarchy.\n\n    Returns:\n        dict: A dictionary cleaned of the keys specified in key_names.\n\n    \"\"\"\n    if exclude_keys is None:\n        exclude_keys = []\n    if isinstance(exclude_keys, str):\n        exclude_keys = [exclude_keys]\n    d = copy.deepcopy(d)\n    if isinstance(d, dict):\n        for key, val in list(d.items()):\n            if key in key_names or (fuzzy and any(k in key for k in key_names)):\n                if _prev_key not in exclude_keys:\n                    d.pop(key)\n                    continue\n            d[key] = clean_dict(val, *key_names, fuzzy=fuzzy, _prev_key=key, exclude_keys=exclude_keys)\n    return d\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.clean_dns_record","title":"clean_dns_record","text":"<pre><code>clean_dns_record(record)\n</code></pre> <p>Cleans and formats a given DNS record for further processing.</p> <p>This static method converts the DNS record to text format if it's not already a string. It also removes any trailing dots and converts the record to lowercase.</p> <p>Parameters:</p> <ul> <li> <code>record</code>               (<code>str or Rdata</code>)           \u2013            <p>The DNS record to clean.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>The cleaned and formatted DNS record.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; clean_dns_record('www.evilcorp.com.')\n'www.evilcorp.com'\n</code></pre> <pre><code>&gt;&gt;&gt; from dns.rrset import from_text\n&gt;&gt;&gt; record = from_text('www.evilcorp.com', 3600, 'IN', 'A', '1.2.3.4')[0]\n&gt;&gt;&gt; clean_dns_record(record)\n'1.2.3.4'\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def clean_dns_record(record):\n    \"\"\"\n    Cleans and formats a given DNS record for further processing.\n\n    This static method converts the DNS record to text format if it's not already a string.\n    It also removes any trailing dots and converts the record to lowercase.\n\n    Args:\n        record (str or dns.rdata.Rdata): The DNS record to clean.\n\n    Returns:\n        str: The cleaned and formatted DNS record.\n\n    Examples:\n        &gt;&gt;&gt; clean_dns_record('www.evilcorp.com.')\n        'www.evilcorp.com'\n\n        &gt;&gt;&gt; from dns.rrset import from_text\n        &gt;&gt;&gt; record = from_text('www.evilcorp.com', 3600, 'IN', 'A', '1.2.3.4')[0]\n        &gt;&gt;&gt; clean_dns_record(record)\n        '1.2.3.4'\n    \"\"\"\n    if not isinstance(record, str):\n        record = str(record.to_text())\n    return str(record).rstrip(\".\").lower()\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.clean_old","title":"clean_old","text":"<pre><code>clean_old(d, keep=10, filter=lambda x: True, key=latest_mtime, reverse=True, raise_error=False)\n</code></pre> <p>Clean up old files and directories within a given directory based on various filtering and sorting options.</p> <p>This function removes the oldest files and directories in the provided directory 'd' that exceed a specified threshold ('keep'). The items to be deleted can be filtered using a lambda function 'filter', and they are sorted by a key function, defaulting to latest modification time.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>str or Path</code>)           \u2013            <p>The directory path to clean up.</p> </li> <li> <code>keep</code>               (<code>int</code>, default:                   <code>10</code> )           \u2013            <p>The number of items to keep. Ones beyond this count will be removed.</p> </li> <li> <code>filter</code>               (<code>Callable</code>, default:                   <code>lambda x: True</code> )           \u2013            <p>A lambda function for filtering which files or directories to consider.                Defaults to a lambda function that returns True for all.</p> </li> <li> <code>key</code>               (<code>Callable</code>, default:                   <code>latest_mtime</code> )           \u2013            <p>A function to sort the files and directories. Defaults to latest modification time.</p> </li> <li> <code>reverse</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to reverse the order of sorted items before removing. Defaults to True.</p> </li> <li> <code>raise_error</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to raise an error if directory deletion fails. Defaults to False.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; clean_old(\"~/.bbot/scans\", filter=lambda x: x.is_dir() and scan_name_regex.match(x.name))\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def clean_old(d, keep=10, filter=lambda x: True, key=latest_mtime, reverse=True, raise_error=False):\n    \"\"\"Clean up old files and directories within a given directory based on various filtering and sorting options.\n\n    This function removes the oldest files and directories in the provided directory 'd' that exceed a specified\n    threshold ('keep'). The items to be deleted can be filtered using a lambda function 'filter', and they are\n    sorted by a key function, defaulting to latest modification time.\n\n    Args:\n        d (str or Path): The directory path to clean up.\n        keep (int): The number of items to keep. Ones beyond this count will be removed.\n        filter (Callable): A lambda function for filtering which files or directories to consider.\n                           Defaults to a lambda function that returns True for all.\n        key (Callable): A function to sort the files and directories. Defaults to latest modification time.\n        reverse (bool): Whether to reverse the order of sorted items before removing. Defaults to True.\n        raise_error (bool): Whether to raise an error if directory deletion fails. Defaults to False.\n\n    Examples:\n        &gt;&gt;&gt; clean_old(\"~/.bbot/scans\", filter=lambda x: x.is_dir() and scan_name_regex.match(x.name))\n    \"\"\"\n    d = Path(d)\n    if not d.is_dir():\n        return\n    paths = [x for x in d.iterdir() if filter(x)]\n    paths.sort(key=key, reverse=reverse)\n    for path in paths[keep:]:\n        try:\n            log.debug(f\"Removing {path}\")\n            rm_rf(path)\n        except Exception as e:\n            msg = f\"Failed to delete directory: {path}, {e}\"\n            if raise_error:\n                raise errors.DirectoryDeletionError()\n            log.warning(msg)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.closest_match","title":"closest_match","text":"<pre><code>closest_match(s, choices, n=1, cutoff=0.0)\n</code></pre> <p>Finds the closest matching strings from a list of choices based on a given string.</p> <p>This function uses the difflib library to find the closest matches to a given string <code>s</code> from a list of <code>choices</code>. It can return either the single best match or a list of the top <code>n</code> best matches.</p> <p>Parameters:</p> <ul> <li> <code>s</code>               (<code>str</code>)           \u2013            <p>The string for which to find the closest match.</p> </li> <li> <code>choices</code>               (<code>list</code>)           \u2013            <p>A list of strings to compare against.</p> </li> <li> <code>n</code>               (<code>int</code>, default:                   <code>1</code> )           \u2013            <p>The number of best matches to return. Defaults to 1.</p> </li> <li> <code>cutoff</code>               (<code>float</code>, default:                   <code>0.0</code> )           \u2013            <p>A float value that defines the similarity threshold. Strings with similarity below this value are not considered. Defaults to 0.0.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>str or list: Either the closest matching string or a list of the <code>n</code> closest matching strings.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; closest_match(\"asdf\", [\"asd\", \"fds\"])\n'asd'\n&gt;&gt;&gt; closest_match(\"asdf\", [\"asd\", \"fds\", \"asdff\"], n=3)\n['asdff', 'asd', 'fds']\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def closest_match(s, choices, n=1, cutoff=0.0):\n    \"\"\"Finds the closest matching strings from a list of choices based on a given string.\n\n    This function uses the difflib library to find the closest matches to a given string `s` from a list of `choices`.\n    It can return either the single best match or a list of the top `n` best matches.\n\n    Args:\n        s (str): The string for which to find the closest match.\n        choices (list): A list of strings to compare against.\n        n (int, optional): The number of best matches to return. Defaults to 1.\n        cutoff (float, optional): A float value that defines the similarity threshold. Strings with similarity below this value are not considered. Defaults to 0.0.\n\n    Returns:\n        str or list: Either the closest matching string or a list of the `n` closest matching strings.\n\n    Examples:\n        &gt;&gt;&gt; closest_match(\"asdf\", [\"asd\", \"fds\"])\n        'asd'\n        &gt;&gt;&gt; closest_match(\"asdf\", [\"asd\", \"fds\", \"asdff\"], n=3)\n        ['asdff', 'asd', 'fds']\n    \"\"\"\n    import difflib\n\n    matches = difflib.get_close_matches(s, choices, n=n, cutoff=cutoff)\n    if not choices or not matches:\n        return\n    if n == 1:\n        return matches[0]\n    return matches\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.cloudcheck","title":"cloudcheck","text":"<pre><code>cloudcheck(ip)\n</code></pre> <p>Check whether an IP address belongs to a cloud provider and returns the provider name, type, and subnet.</p> <p>Parameters:</p> <ul> <li> <code>ip</code>               (<code>str</code>)           \u2013            <p>The IP address to check.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>tuple</code>          \u2013            <p>A tuple containing provider name (str), provider type (str), and subnet (IPv4Network).</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; cloudcheck(\"168.62.20.37\")\n('Azure', 'cloud', IPv4Network('168.62.0.0/19'))\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def cloudcheck(ip):\n    \"\"\"\n    Check whether an IP address belongs to a cloud provider and returns the provider name, type, and subnet.\n\n    Args:\n        ip (str): The IP address to check.\n\n    Returns:\n        tuple: A tuple containing provider name (str), provider type (str), and subnet (IPv4Network).\n\n    Examples:\n        &gt;&gt;&gt; cloudcheck(\"168.62.20.37\")\n        ('Azure', 'cloud', IPv4Network('168.62.0.0/19'))\n    \"\"\"\n    import cloudcheck as _cloudcheck\n\n    return _cloudcheck.check(ip)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.cpu_architecture","title":"cpu_architecture","text":"<pre><code>cpu_architecture()\n</code></pre> <p>Return the CPU architecture of the current system.</p> <p>This function fetches and returns the architecture type of the CPU where the code is being executed. It maps common identifiers like \"x86_64\" to more general types like \"amd64\".</p> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>A string representing the CPU architecture, such as \"amd64\", \"armv7\", or \"arm64\".</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; cpu_architecture()\n'amd64'\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def cpu_architecture():\n    \"\"\"Return the CPU architecture of the current system.\n\n    This function fetches and returns the architecture type of the CPU where the code is being executed.\n    It maps common identifiers like \"x86_64\" to more general types like \"amd64\".\n\n    Returns:\n        str: A string representing the CPU architecture, such as \"amd64\", \"armv7\", or \"arm64\".\n\n    Examples:\n        &gt;&gt;&gt; cpu_architecture()\n        'amd64'\n    \"\"\"\n    import platform\n\n    uname = platform.uname()\n    arch = uname.machine.lower()\n    if arch.startswith(\"aarch\"):\n        return \"arm64\"\n    elif arch == \"x86_64\":\n        return \"amd64\"\n    return arch\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.delete_file","title":"delete_file","text":"<pre><code>delete_file(path)\n</code></pre> <p>Deletes a file at the given path.</p> <p>Parameters:</p> <ul> <li> <code>path</code>               (<code>str or Path</code>)           \u2013            <p>The path to the file to be deleted.</p> </li> </ul> Note <p>This function suppresses all exceptions to ensure that the program continues running even if the file could not be deleted.</p> <p>Examples:</p> <pre><code>&gt;&gt;&gt; delete_file(\"/tmp/test/file1.txt\")\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def delete_file(path):\n    \"\"\"Deletes a file at the given path.\n\n    Args:\n        path (str or Path): The path to the file to be deleted.\n\n    Note:\n        This function suppresses all exceptions to ensure that the program continues running even if the file could not be deleted.\n\n    Examples:\n        &gt;&gt;&gt; delete_file(\"/tmp/test/file1.txt\")\n    \"\"\"\n    with suppress(Exception):\n        Path(path).unlink(missing_ok=True)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.domain_parents","title":"domain_parents","text":"<pre><code>domain_parents(d, include_self=False)\n</code></pre> <p>Generate a list of parent domains for a given domain string.</p> <p>This function takes an input string <code>d</code> and generates a list of parent domains in decreasing order of specificity. If <code>include_self</code> is set to True, the list will also include the input domain if it is not a top-level domain.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>str</code>)           \u2013            <p>The input string representing a domain or subdomain.</p> </li> <li> <code>include_self</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to include the input domain itself. Defaults to False.</p> </li> </ul> <p>Yields:</p> <ul> <li> <code>str</code>          \u2013            <p>Parent domains of the input string in decreasing order of specificity.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; list(domain_parents(\"test.www.evilcorp.co.uk\"))\n[\"www.evilcorp.co.uk\", \"evilcorp.co.uk\"]\n</code></pre> Notes <ul> <li>Port, if present in input, is preserved in the output.</li> </ul> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def domain_parents(d, include_self=False):\n    \"\"\"\n    Generate a list of parent domains for a given domain string.\n\n    This function takes an input string `d` and generates a list of parent domains in decreasing order of specificity.\n    If `include_self` is set to True, the list will also include the input domain if it is not a top-level domain.\n\n    Args:\n        d (str): The input string representing a domain or subdomain.\n        include_self (bool, optional): Whether to include the input domain itself. Defaults to False.\n\n    Yields:\n        str: Parent domains of the input string in decreasing order of specificity.\n\n    Examples:\n        &gt;&gt;&gt; list(domain_parents(\"test.www.evilcorp.co.uk\"))\n        [\"www.evilcorp.co.uk\", \"evilcorp.co.uk\"]\n\n    Notes:\n        - Port, if present in input, is preserved in the output.\n    \"\"\"\n\n    parent = str(d)\n    if include_self and not is_domain(parent):\n        yield parent\n    while 1:\n        parent = parent_domain(parent)\n        if is_subdomain(parent):\n            yield parent\n            continue\n        elif is_domain(parent):\n            yield parent\n        break\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.domain_stem","title":"domain_stem","text":"<pre><code>domain_stem(domain)\n</code></pre> <p>Returns an abbreviated representation of the hostname by removing the TLD (Top-Level Domain).</p> <p>Parameters:</p> <ul> <li> <code>domain</code>               (<code>str</code>)           \u2013            <p>The full domain name to be abbreviated.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>An abbreviated domain string without the TLD.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; domain_stem(\"www.evilcorp.com\")\n\"www.evilcorp\"\n</code></pre> Notes <ul> <li>Utilizes the <code>tldextract</code> function for domain parsing.</li> </ul> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def domain_stem(domain):\n    \"\"\"\n    Returns an abbreviated representation of the hostname by removing the TLD (Top-Level Domain).\n\n    Args:\n        domain (str): The full domain name to be abbreviated.\n\n    Returns:\n        str: An abbreviated domain string without the TLD.\n\n    Examples:\n        &gt;&gt;&gt; domain_stem(\"www.evilcorp.com\")\n        \"www.evilcorp\"\n\n    Notes:\n        - Utilizes the `tldextract` function for domain parsing.\n    \"\"\"\n    parsed = tldextract(str(domain))\n    return f\".\".join(parsed.subdomain.split(\".\") + parsed.domain.split(\".\")).strip(\".\")\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.execute_sync_or_async","title":"execute_sync_or_async  <code>async</code>","text":"<pre><code>execute_sync_or_async(callback, *args, **kwargs)\n</code></pre> <p>Execute a function or coroutine, handling either synchronous or asynchronous invocation.</p> <p>Parameters:</p> <ul> <li> <code>callback</code>               (<code>Union[Callable, Coroutine]</code>)           \u2013            <p>The function or coroutine to execute.</p> </li> <li> <code>*args</code>           \u2013            <p>Variable-length argument list to pass to the callback.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Arbitrary keyword arguments to pass to the callback.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>Any</code>          \u2013            <p>The return value from the executed function or coroutine.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; async def foo_async(x):\n...     return x + 1\n&gt;&gt;&gt; def foo_sync(x):\n...     return x + 1\n</code></pre> <pre><code>&gt;&gt;&gt; asyncio.run(execute_sync_or_async(foo_async, 1))\n2\n</code></pre> <pre><code>&gt;&gt;&gt; asyncio.run(execute_sync_or_async(foo_sync, 1))\n2\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>async def execute_sync_or_async(callback, *args, **kwargs):\n    \"\"\"\n    Execute a function or coroutine, handling either synchronous or asynchronous invocation.\n\n    Args:\n        callback (Union[Callable, Coroutine]): The function or coroutine to execute.\n        *args: Variable-length argument list to pass to the callback.\n        **kwargs: Arbitrary keyword arguments to pass to the callback.\n\n    Returns:\n        Any: The return value from the executed function or coroutine.\n\n    Examples:\n        &gt;&gt;&gt; async def foo_async(x):\n        ...     return x + 1\n        &gt;&gt;&gt; def foo_sync(x):\n        ...     return x + 1\n\n        &gt;&gt;&gt; asyncio.run(execute_sync_or_async(foo_async, 1))\n        2\n\n        &gt;&gt;&gt; asyncio.run(execute_sync_or_async(foo_sync, 1))\n        2\n    \"\"\"\n    if is_async_function(callback):\n        return await callback(*args, **kwargs)\n    else:\n        return callback(*args, **kwargs)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.extract_emails","title":"extract_emails","text":"<pre><code>extract_emails(s)\n</code></pre> <p>Extract email addresses from a body of text</p> <p>This function takes in a string and yields all email addresses found in it. The emails are converted to lower case before yielding. It utilizes regular expressions for email pattern matching.</p> <p>Parameters:</p> <ul> <li> <code>s</code>               (<code>str</code>)           \u2013            <p>The input string from which to extract email addresses.</p> </li> </ul> <p>Yields:</p> <ul> <li> <code>str</code>          \u2013            <p>Yields email addresses found in the input string, in lower case.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; list(extract_emails(\"Contact us at info@evilcorp.com and support@evilcorp.com\"))\n['info@evilcorp.com', 'support@evilcorp.com']\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def extract_emails(s):\n    \"\"\"\n    Extract email addresses from a body of text\n\n    This function takes in a string and yields all email addresses found in it.\n    The emails are converted to lower case before yielding. It utilizes\n    regular expressions for email pattern matching.\n\n    Args:\n        s (str): The input string from which to extract email addresses.\n\n    Yields:\n        str: Yields email addresses found in the input string, in lower case.\n\n    Examples:\n        &gt;&gt;&gt; list(extract_emails(\"Contact us at info@evilcorp.com and support@evilcorp.com\"))\n        ['info@evilcorp.com', 'support@evilcorp.com']\n    \"\"\"\n    for email in bbot_regexes.email_regex.findall(smart_decode(s)):\n        yield email.lower()\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.extract_host","title":"extract_host","text":"<pre><code>extract_host(s)\n</code></pre> <p>Attempts to find and extract the host portion of a string.</p> <p>Parameters:</p> <ul> <li> <code>s</code>               (<code>str</code>)           \u2013            <p>The string from which to extract the host.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>tuple</code>          \u2013            <p>A tuple containing three strings:    (hostname (None if not found), string_before_hostname, string_after_hostname).</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; extract_host(\"evilcorp.com:80\")\n(\"evilcorp.com\", \"\", \":80\")\n</code></pre> <pre><code>&gt;&gt;&gt; extract_host(\"http://evilcorp.com:80/asdf.php?a=b\")\n(\"evilcorp.com\", \"http://\", \":80/asdf.php?a=b\")\n</code></pre> <pre><code>&gt;&gt;&gt; extract_host(\"bob@evilcorp.com\")\n(\"evilcorp.com\", \"bob@\", \"\")\n</code></pre> <pre><code>&gt;&gt;&gt; extract_host(\"[dead::beef]:22\")\n(\"dead::beef\", \"[\", \"]:22\")\n</code></pre> <pre><code>&gt;&gt;&gt; extract_host(\"ftp://username:password@my-ftp.com/my-file.csv\")\n(\n    \"my-ftp.com\",\n    \"ftp://username:password@\",\n    \"/my-file.csv\",\n)\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def extract_host(s):\n    \"\"\"\n    Attempts to find and extract the host portion of a string.\n\n    Args:\n        s (str): The string from which to extract the host.\n\n    Returns:\n        tuple: A tuple containing three strings:\n               (hostname (None if not found), string_before_hostname, string_after_hostname).\n\n    Examples:\n        &gt;&gt;&gt; extract_host(\"evilcorp.com:80\")\n        (\"evilcorp.com\", \"\", \":80\")\n\n        &gt;&gt;&gt; extract_host(\"http://evilcorp.com:80/asdf.php?a=b\")\n        (\"evilcorp.com\", \"http://\", \":80/asdf.php?a=b\")\n\n        &gt;&gt;&gt; extract_host(\"bob@evilcorp.com\")\n        (\"evilcorp.com\", \"bob@\", \"\")\n\n        &gt;&gt;&gt; extract_host(\"[dead::beef]:22\")\n        (\"dead::beef\", \"[\", \"]:22\")\n\n        &gt;&gt;&gt; extract_host(\"ftp://username:password@my-ftp.com/my-file.csv\")\n        (\n            \"my-ftp.com\",\n            \"ftp://username:password@\",\n            \"/my-file.csv\",\n        )\n    \"\"\"\n    s = smart_decode(s)\n    match = bbot_regexes.extract_host_regex.search(s)\n\n    if match:\n        hostname = match.group(1)\n        before = s[: match.start(1)]\n        after = s[match.end(1) :]\n        host, port = split_host_port(hostname)\n        netloc = make_netloc(host, port)\n        if netloc != hostname:\n            # invalid host / port\n            return (None, s, \"\")\n        if host is not None:\n            if port is not None:\n                after = f\":{port}{after}\"\n            if is_ip(host, version=6) and hostname.startswith(\"[\"):\n                before = f\"{before}[\"\n                after = f\"]{after}\"\n            hostname = str(host)\n        return (hostname, before, after)\n\n    return (None, s, \"\")\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.extract_params_json","title":"extract_params_json","text":"<pre><code>extract_params_json(json_data, compare_mode='getparam')\n</code></pre> <p>Extracts key-value pairs from a JSON object and returns them as a set of tuples. Used by the <code>paramminer_headers</code> module.</p> <p>Parameters:</p> <ul> <li> <code>json_data</code>               (<code>str</code>)           \u2013            <p>JSON-formatted string containing key-value pairs.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>set</code>          \u2013            <p>A set of tuples containing the keys and their corresponding values present in the JSON object.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; extract_params_json('{\"a\": 1, \"b\": {\"c\": 2}}')\n{('a', 1), ('b', {'c': 2}), ('c', 2)}\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def extract_params_json(json_data, compare_mode=\"getparam\"):\n    \"\"\"\n    Extracts key-value pairs from a JSON object and returns them as a set of tuples. Used by the `paramminer_headers` module.\n\n    Args:\n        json_data (str): JSON-formatted string containing key-value pairs.\n\n    Returns:\n        set: A set of tuples containing the keys and their corresponding values present in the JSON object.\n\n    Raises:\n        Returns an empty set if JSONDecodeError occurs.\n\n    Examples:\n        &gt;&gt;&gt; extract_params_json('{\"a\": 1, \"b\": {\"c\": 2}}')\n        {('a', 1), ('b', {'c': 2}), ('c', 2)}\n    \"\"\"\n    try:\n        data = json.loads(json_data)\n    except json.JSONDecodeError:\n        return set()\n\n    key_value_pairs = set()\n    stack = [(data, \"\")]\n\n    while stack:\n        current_data, path = stack.pop()\n        if isinstance(current_data, dict):\n            for key, value in current_data.items():\n                full_key = f\"{path}.{key}\" if path else key\n                if isinstance(value, dict):\n                    stack.append((value, full_key))\n                elif isinstance(value, list):\n                    stack.append((value, full_key))\n                else:\n                    if validate_parameter(full_key, compare_mode):\n                        key_value_pairs.add((full_key, value))\n        elif isinstance(current_data, list):\n            for item in current_data:\n                if isinstance(item, (dict, list)):\n                    stack.append((item, path))\n    return key_value_pairs\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.extract_params_xml","title":"extract_params_xml","text":"<pre><code>extract_params_xml(xml_data, compare_mode='getparam')\n</code></pre> <p>Extracts tags and their text values from an XML object and returns them as a set of tuples.</p> <p>Parameters:</p> <ul> <li> <code>xml_data</code>               (<code>str</code>)           \u2013            <p>XML-formatted string containing elements.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>set</code>          \u2013            <p>A set of tuples containing the tags and their corresponding text values present in the XML object.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; extract_params_xml('&lt;root&gt;&lt;child1&gt;&lt;child2&gt;value&lt;/child2&gt;&lt;/child1&gt;&lt;/root&gt;')\n{('root', None), ('child1', None), ('child2', 'value')}\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def extract_params_xml(xml_data, compare_mode=\"getparam\"):\n    \"\"\"\n    Extracts tags and their text values from an XML object and returns them as a set of tuples.\n\n    Args:\n        xml_data (str): XML-formatted string containing elements.\n\n    Returns:\n        set: A set of tuples containing the tags and their corresponding text values present in the XML object.\n\n    Raises:\n        Returns an empty set if ParseError occurs.\n\n    Examples:\n        &gt;&gt;&gt; extract_params_xml('&lt;root&gt;&lt;child1&gt;&lt;child2&gt;value&lt;/child2&gt;&lt;/child1&gt;&lt;/root&gt;')\n        {('root', None), ('child1', None), ('child2', 'value')}\n    \"\"\"\n    import xml.etree.ElementTree as ET\n\n    try:\n        root = ET.fromstring(xml_data)\n    except ET.ParseError:\n        return set()\n\n    tag_value_pairs = set()\n    stack = [root]\n\n    while stack:\n        current_element = stack.pop()\n        if validate_parameter(current_element.tag, compare_mode):\n            tag_value_pairs.add((current_element.tag, current_element.text))\n        for child in current_element:\n            stack.append(child)\n    return tag_value_pairs\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.extract_words","title":"extract_words","text":"<pre><code>extract_words(data, acronyms=True, wordninja=True, model=None, max_length=100, word_regexes=None)\n</code></pre> <p>Intelligently extracts words from given data.</p> <p>This function uses regular expressions and optionally wordninja to extract words from a given text string. Thanks to wordninja it can handle concatenated words intelligently.</p> <p>Parameters:</p> <ul> <li> <code>data</code>               (<code>str</code>)           \u2013            <p>The data from which words are to be extracted.</p> </li> <li> <code>acronyms</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to include acronyms. Defaults to True.</p> </li> <li> <code>wordninja</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to use the wordninja library to split concatenated words. Defaults to True.</p> </li> <li> <code>model</code>               (<code>object</code>, default:                   <code>None</code> )           \u2013            <p>A custom wordninja model for special types of data such as DNS names.</p> </li> <li> <code>max_length</code>               (<code>int</code>, default:                   <code>100</code> )           \u2013            <p>Maximum length for a word to be included. Defaults to 100.</p> </li> <li> <code>word_regexes</code>               (<code>list</code>, default:                   <code>None</code> )           \u2013            <p>A list of compiled regular expression objects for word extraction. Defaults to None.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>set</code>          \u2013            <p>A set of extracted words.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; extract_words('blacklanternsecurity')\n{'black', 'lantern', 'security', 'bls', 'blacklanternsecurity'}\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def extract_words(data, acronyms=True, wordninja=True, model=None, max_length=100, word_regexes=None):\n    \"\"\"Intelligently extracts words from given data.\n\n    This function uses regular expressions and optionally wordninja to extract words\n    from a given text string. Thanks to wordninja it can handle concatenated words intelligently.\n\n    Args:\n        data (str): The data from which words are to be extracted.\n        acronyms (bool, optional): Whether to include acronyms. Defaults to True.\n        wordninja (bool, optional): Whether to use the wordninja library to split concatenated words. Defaults to True.\n        model (object, optional): A custom wordninja model for special types of data such as DNS names.\n        max_length (int, optional): Maximum length for a word to be included. Defaults to 100.\n        word_regexes (list, optional): A list of compiled regular expression objects for word extraction. Defaults to None.\n\n    Returns:\n        set: A set of extracted words.\n\n    Examples:\n        &gt;&gt;&gt; extract_words('blacklanternsecurity')\n        {'black', 'lantern', 'security', 'bls', 'blacklanternsecurity'}\n    \"\"\"\n    import wordninja as _wordninja\n\n    if word_regexes is None:\n        word_regexes = bbot_regexes.word_regexes\n    words = set()\n    data = smart_decode(data)\n    for r in word_regexes:\n        for word in set(r.findall(data)):\n            # blacklanternsecurity\n            if len(word) &lt;= max_length:\n                words.add(word)\n\n    # blacklanternsecurity --&gt; ['black', 'lantern', 'security']\n    # max_slice_length = 3\n    for word in list(words):\n        if wordninja:\n            if model is None:\n                model = _wordninja\n            subwords = model.split(word)\n            for subword in subwords:\n                words.add(subword)\n        # this section generates compound words\n        # it is interesting but currently disabled the quality of its output doesn't quite justify its quantity\n        # blacklanternsecurity --&gt; ['black', 'lantern', 'security', 'blacklantern', 'lanternsecurity']\n        # for s, e in combinations(range(len(subwords) + 1), 2):\n        #    if e - s &lt;= max_slice_length:\n        #        subword_slice = \"\".join(subwords[s:e])\n        #        words.add(subword_slice)\n        # blacklanternsecurity --&gt; bls\n        if acronyms:\n            if len(subwords) &gt; 1:\n                words.add(\"\".join([c[0] for c in subwords if len(c) &gt; 0]))\n\n    return words\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.filesize","title":"filesize","text":"<pre><code>filesize(f)\n</code></pre> <p>Get the file size of a given file.</p> <p>This function takes a file path as an argument and returns its size in bytes. If the path does not point to a file, the function returns 0.</p> <p>Parameters:</p> <ul> <li> <code>f</code>               (<code>str or Path</code>)           \u2013            <p>The file path for which to get the size.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>int</code>          \u2013            <p>The size of the file in bytes, or 0 if the path does not point to a file.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; filesize(\"/path/to/file.txt\")\n1024\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def filesize(f):\n    \"\"\"Get the file size of a given file.\n\n    This function takes a file path as an argument and returns its size in bytes. If the path\n    does not point to a file, the function returns 0.\n\n    Args:\n        f (str or Path): The file path for which to get the size.\n\n    Returns:\n        int: The size of the file in bytes, or 0 if the path does not point to a file.\n\n    Examples:\n        &gt;&gt;&gt; filesize(\"/path/to/file.txt\")\n        1024\n    \"\"\"\n    f = Path(f)\n    if f.is_file():\n        return f.stat().st_size\n    return 0\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.filter_dict","title":"filter_dict","text":"<pre><code>filter_dict(d, *key_names, fuzzy=False, exclude_keys=None, _prev_key=None)\n</code></pre> <p>Recursively filter a dictionary based on key names.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>dict</code>)           \u2013            <p>The input dictionary.</p> </li> <li> <code>*key_names</code>           \u2013            <p>Names of keys to filter for.</p> </li> <li> <code>fuzzy</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to perform fuzzy matching on keys.</p> </li> <li> <code>exclude_keys</code>               (<code>(list, None)</code>, default:                   <code>None</code> )           \u2013            <p>List of keys to be excluded from the final dict.</p> </li> <li> <code>_prev_key</code>               (<code>(str, None)</code>, default:                   <code>None</code> )           \u2013            <p>For internal recursive use; the previous key in the hierarchy.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>dict</code>          \u2013            <p>A dictionary containing only the keys specified in key_names.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; filter_dict({\"key1\": \"test\", \"key2\": \"asdf\"}, \"key2\")\n{\"key2\": \"asdf\"}\n&gt;&gt;&gt; filter_dict({\"key1\": \"test\", \"key2\": {\"key3\": \"asdf\"}}, \"key1\", \"key3\", exclude_keys=\"key2\")\n{'key1': 'test'}\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def filter_dict(d, *key_names, fuzzy=False, exclude_keys=None, _prev_key=None):\n    \"\"\"\n    Recursively filter a dictionary based on key names.\n\n    Args:\n        d (dict): The input dictionary.\n        *key_names: Names of keys to filter for.\n        fuzzy (bool): Whether to perform fuzzy matching on keys.\n        exclude_keys (list, None): List of keys to be excluded from the final dict.\n        _prev_key (str, None): For internal recursive use; the previous key in the hierarchy.\n\n    Returns:\n        dict: A dictionary containing only the keys specified in key_names.\n\n    Examples:\n        &gt;&gt;&gt; filter_dict({\"key1\": \"test\", \"key2\": \"asdf\"}, \"key2\")\n        {\"key2\": \"asdf\"}\n        &gt;&gt;&gt; filter_dict({\"key1\": \"test\", \"key2\": {\"key3\": \"asdf\"}}, \"key1\", \"key3\", exclude_keys=\"key2\")\n        {'key1': 'test'}\n    \"\"\"\n    if exclude_keys is None:\n        exclude_keys = []\n    if isinstance(exclude_keys, str):\n        exclude_keys = [exclude_keys]\n    ret = {}\n    if isinstance(d, dict):\n        for key in d:\n            if key in key_names or (fuzzy and any(k in key for k in key_names)):\n                if not any(k in exclude_keys for k in [key, _prev_key]):\n                    ret[key] = copy.deepcopy(d[key])\n            elif isinstance(d[key], list) or isinstance(d[key], dict):\n                child = filter_dict(d[key], *key_names, fuzzy=fuzzy, _prev_key=key, exclude_keys=exclude_keys)\n                if child:\n                    ret[key] = child\n    return ret\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.gen_numbers","title":"gen_numbers","text":"<pre><code>gen_numbers(n, padding=2)\n</code></pre> <p>Generates numbers with variable padding and returns them as a set of strings.</p> <p>Parameters:</p> <ul> <li> <code>n</code>               (<code>int</code>)           \u2013            <p>The upper limit of numbers to generate, exclusive.</p> </li> <li> <code>padding</code>               (<code>int</code>, default:                   <code>2</code> )           \u2013            <p>The maximum number of digits to pad the numbers with. Defaults to 2.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>set</code>          \u2013            <p>A set of string representations of numbers with varying degrees of padding.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; gen_numbers(5)\n{'0', '00', '01', '02', '03', '04', '1', '2', '3', '4'}\n</code></pre> <pre><code>&gt;&gt;&gt; gen_numbers(3, padding=3)\n{'0', '00', '000', '001', '002', '01', '02', '1', '2'}\n</code></pre> <pre><code>&gt;&gt;&gt; gen_numbers(5, padding=1)\n{'0', '1', '2', '3', '4'}\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def gen_numbers(n, padding=2):\n    \"\"\"Generates numbers with variable padding and returns them as a set of strings.\n\n    Args:\n        n (int): The upper limit of numbers to generate, exclusive.\n        padding (int, optional): The maximum number of digits to pad the numbers with. Defaults to 2.\n\n    Returns:\n        set: A set of string representations of numbers with varying degrees of padding.\n\n    Examples:\n        &gt;&gt;&gt; gen_numbers(5)\n        {'0', '00', '01', '02', '03', '04', '1', '2', '3', '4'}\n\n        &gt;&gt;&gt; gen_numbers(3, padding=3)\n        {'0', '00', '000', '001', '002', '01', '02', '1', '2'}\n\n        &gt;&gt;&gt; gen_numbers(5, padding=1)\n        {'0', '1', '2', '3', '4'}\n    \"\"\"\n    results = set()\n    for i in range(n):\n        for p in range(1, padding + 1):\n            results.add(str(i).zfill(p))\n    return results\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.get_closest_match","title":"get_closest_match","text":"<pre><code>get_closest_match(s, choices, msg=None)\n</code></pre> <p>Finds the closest match from a list of choices for a given string.</p> <p>This function is particularly useful for CLI applications where you want to validate flags or modules.</p> <p>Parameters:</p> <ul> <li> <code>s</code>               (<code>str</code>)           \u2013            <p>The string for which to find the closest match.</p> </li> <li> <code>choices</code>               (<code>list</code>)           \u2013            <p>A list of strings to compare against.</p> </li> <li> <code>msg</code>               (<code>str</code>, default:                   <code>None</code> )           \u2013            <p>Additional message to prepend in the warning message. Defaults to None.</p> </li> <li> <code>loglevel</code>               (<code>str</code>)           \u2013            <p>The log level to use for the warning message. Defaults to \"HUGEWARNING\".</p> </li> <li> <code>exitcode</code>               (<code>int</code>)           \u2013            <p>The exit code to use when exiting the program. Defaults to 2.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; get_closest_match(\"some_module\", [\"some_mod\", \"some_other_mod\"], msg=\"module\")\n# Output: Could not find module \"some_module\". Did you mean \"some_mod\"?\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def get_closest_match(s, choices, msg=None):\n    \"\"\"Finds the closest match from a list of choices for a given string.\n\n    This function is particularly useful for CLI applications where you want to validate flags or modules.\n\n    Args:\n        s (str): The string for which to find the closest match.\n        choices (list): A list of strings to compare against.\n        msg (str, optional): Additional message to prepend in the warning message. Defaults to None.\n        loglevel (str, optional): The log level to use for the warning message. Defaults to \"HUGEWARNING\".\n        exitcode (int, optional): The exit code to use when exiting the program. Defaults to 2.\n\n    Examples:\n        &gt;&gt;&gt; get_closest_match(\"some_module\", [\"some_mod\", \"some_other_mod\"], msg=\"module\")\n        # Output: Could not find module \"some_module\". Did you mean \"some_mod\"?\n    \"\"\"\n    if msg is None:\n        msg = \"\"\n    else:\n        msg += \" \"\n    closest = closest_match(s, choices)\n    return f'Could not find {msg}\"{s}\". Did you mean \"{closest}\"?'\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.get_exception_chain","title":"get_exception_chain","text":"<pre><code>get_exception_chain(e)\n</code></pre> <p>Retrieves the full chain of exceptions leading to the given exception.</p> <p>Parameters:</p> <ul> <li> <code>e</code>               (<code>BaseException</code>)           \u2013            <p>The exception for which to get the chain.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>list[BaseException]: List of exceptions in the chain, from the given exception back to the root cause.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; try:\n...     raise ValueError(\"This is a value error\")\n... except ValueError as e:\n...     exc_chain = get_exception_chain(e)\n...     for exc in exc_chain:\n...         print(exc)\nThis is a value error\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def get_exception_chain(e):\n    \"\"\"\n    Retrieves the full chain of exceptions leading to the given exception.\n\n    Args:\n        e (BaseException): The exception for which to get the chain.\n\n    Returns:\n        list[BaseException]: List of exceptions in the chain, from the given exception back to the root cause.\n\n    Examples:\n        &gt;&gt;&gt; try:\n        ...     raise ValueError(\"This is a value error\")\n        ... except ValueError as e:\n        ...     exc_chain = get_exception_chain(e)\n        ...     for exc in exc_chain:\n        ...         print(exc)\n        This is a value error\n    \"\"\"\n    exception_chain = []\n    current_exception = e\n    while current_exception is not None:\n        exception_chain.append(current_exception)\n        current_exception = getattr(current_exception, \"__context__\", None)\n    return exception_chain\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.get_file_extension","title":"get_file_extension","text":"<pre><code>get_file_extension(s)\n</code></pre> <p>Extracts the file extension from a given string representing a URL or file path.</p> <p>Parameters:</p> <ul> <li> <code>s</code>               (<code>str</code>)           \u2013            <p>The string from which to extract the file extension.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>The file extension, or an empty string if no extension is found.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; get_file_extension(\"https://evilcorp.com/api/test.php\")\n\"php\"\n&gt;&gt;&gt; get_file_extension(\"/etc/test.conf\")\n\"conf\"\n&gt;&gt;&gt; get_file_extension(\"/etc/passwd\")\n\"\"\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def get_file_extension(s):\n    \"\"\"\n    Extracts the file extension from a given string representing a URL or file path.\n\n    Args:\n        s (str): The string from which to extract the file extension.\n\n    Returns:\n        str: The file extension, or an empty string if no extension is found.\n\n    Examples:\n        &gt;&gt;&gt; get_file_extension(\"https://evilcorp.com/api/test.php\")\n        \"php\"\n        &gt;&gt;&gt; get_file_extension(\"/etc/test.conf\")\n        \"conf\"\n        &gt;&gt;&gt; get_file_extension(\"/etc/passwd\")\n        \"\"\n    \"\"\"\n    s = str(s).lower().strip()\n    rightmost_section = s.rsplit(\"/\", 1)[-1]\n    if \".\" in rightmost_section:\n        extension = rightmost_section.rsplit(\".\", 1)[-1]\n        return extension\n    return \"\"\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.get_get_params","title":"get_get_params","text":"<pre><code>get_get_params(url)\n</code></pre> <p>Extract the query parameters from the given URL as a dictionary.</p> <p>Parameters:</p> <ul> <li> <code>url</code>               (<code>Union[str, ParseResult]</code>)           \u2013            <p>The URL from which to extract query parameters.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>Dict[str, List[str]]: A dictionary containing the query parameters and their values.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; get_get_params('https://www.evilcorp.com?foo=1&amp;bar=2')\n{'foo': ['1'], 'bar': ['2']}\n</code></pre> <pre><code>&gt;&gt;&gt; get_get_params('https://www.evilcorp.com?foo=1&amp;foo=2')\n{'foo': ['1', '2']}\n</code></pre> Source code in <code>bbot/core/helpers/url.py</code> <pre><code>def get_get_params(url):\n    \"\"\"\n    Extract the query parameters from the given URL as a dictionary.\n\n    Args:\n        url (Union[str, ParseResult]): The URL from which to extract query parameters.\n\n    Returns:\n        Dict[str, List[str]]: A dictionary containing the query parameters and their values.\n\n    Examples:\n        &gt;&gt;&gt; get_get_params('https://www.evilcorp.com?foo=1&amp;bar=2')\n        {'foo': ['1'], 'bar': ['2']}\n\n        &gt;&gt;&gt; get_get_params('https://www.evilcorp.com?foo=1&amp;foo=2')\n        {'foo': ['1', '2']}\n    \"\"\"\n    parsed = parse_url(url)\n    return dict(parse_qs(parsed.query))\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.get_keys_in_dot_syntax","title":"get_keys_in_dot_syntax","text":"<pre><code>get_keys_in_dot_syntax(config)\n</code></pre> <p>Retrieve all keys in an OmegaConf configuration in dot notation.</p> <p>This function converts an OmegaConf configuration into a list of keys represented in dot notation.</p> <p>Parameters:</p> <ul> <li> <code>config</code>               (<code>DictConfig</code>)           \u2013            <p>The OmegaConf configuration object.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>List[str]: A list of keys in dot notation.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; config = OmegaConf.create({\n...     \"web\": {\n...         \"test\": True\n...     },\n...     \"db\": {\n...         \"host\": \"localhost\",\n...         \"port\": 5432\n...     }\n... })\n&gt;&gt;&gt; get_keys_in_dot_syntax(config)\n['web.test', 'db.host', 'db.port']\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def get_keys_in_dot_syntax(config):\n    \"\"\"Retrieve all keys in an OmegaConf configuration in dot notation.\n\n    This function converts an OmegaConf configuration into a list of keys\n    represented in dot notation.\n\n    Args:\n        config (DictConfig): The OmegaConf configuration object.\n\n    Returns:\n        List[str]: A list of keys in dot notation.\n\n    Examples:\n        &gt;&gt;&gt; config = OmegaConf.create({\n        ...     \"web\": {\n        ...         \"test\": True\n        ...     },\n        ...     \"db\": {\n        ...         \"host\": \"localhost\",\n        ...         \"port\": 5432\n        ...     }\n        ... })\n        &gt;&gt;&gt; get_keys_in_dot_syntax(config)\n        ['web.test', 'db.host', 'db.port']\n    \"\"\"\n    from omegaconf import OmegaConf\n\n    container = OmegaConf.to_container(config, resolve=True)\n    keys = []\n\n    def recursive_keys(d, parent_key=\"\"):\n        for k, v in d.items():\n            full_key = f\"{parent_key}.{k}\" if parent_key else k\n            if isinstance(v, dict):\n                recursive_keys(v, full_key)\n            else:\n                keys.append(full_key)\n\n    recursive_keys(container)\n    return keys\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.get_size","title":"get_size","text":"<pre><code>get_size(obj, max_depth=5, seen=None)\n</code></pre> <p>Roughly estimate the memory footprint of a Python object using recursion.</p> <p>Parameters:</p> <ul> <li> <code>obj</code>               (<code>any</code>)           \u2013            <p>The object whose size is to be determined.</p> </li> <li> <code>max_depth</code>               (<code>int</code>, default:                   <code>5</code> )           \u2013            <p>Maximum depth to which nested objects will be inspected. Defaults to 5.</p> </li> <li> <code>seen</code>               (<code>set</code>, default:                   <code>None</code> )           \u2013            <p>Objects that have already been accounted for, to avoid loops.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>int</code>          \u2013            <p>Approximate memory footprint of the object in bytes.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; get_size(my_list)\n4200\n</code></pre> <pre><code>&gt;&gt;&gt; get_size(my_dict, max_depth=3)\n8400\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def get_size(obj, max_depth=5, seen=None):\n    \"\"\"\n    Roughly estimate the memory footprint of a Python object using recursion.\n\n    Parameters:\n        obj (any): The object whose size is to be determined.\n        max_depth (int, optional): Maximum depth to which nested objects will be inspected. Defaults to 5.\n        seen (set, optional): Objects that have already been accounted for, to avoid loops.\n\n    Returns:\n        int: Approximate memory footprint of the object in bytes.\n\n    Examples:\n        &gt;&gt;&gt; get_size(my_list)\n        4200\n\n        &gt;&gt;&gt; get_size(my_dict, max_depth=3)\n        8400\n    \"\"\"\n    from collections.abc import Mapping\n\n    # If seen is not provided, initialize an empty set\n    if seen is None:\n        seen = set()\n    # Get the id of the object\n    obj_id = id(obj)\n    # Decrease the maximum depth for the next recursion\n    new_max_depth = max_depth - 1\n    # If the object has already been seen or we've reached the maximum recursion depth, return 0\n    if obj_id in seen or new_max_depth &lt;= 0:\n        return 0\n    # Get the size of the object\n    size = sys.getsizeof(obj)\n    # Add the object's id to the set of seen objects\n    seen.add(obj_id)\n    # If the object has a __dict__ attribute, we want to measure its size\n    if hasattr(obj, \"__dict__\"):\n        # Iterate over the Method Resolution Order (MRO) of the class of the object\n        for cls in obj.__class__.__mro__:\n            # If the class's __dict__ contains a __dict__ key\n            if \"__dict__\" in cls.__dict__:\n                for k, v in obj.__dict__.items():\n                    size += get_size(k, new_max_depth, seen)\n                    size += get_size(v, new_max_depth, seen)\n                break\n    # If the object is a mapping (like a dictionary), we want to measure the size of its items\n    if isinstance(obj, Mapping):\n        with suppress(StopIteration):\n            k, v = next(iter(obj.items()))\n            size += (get_size(k, new_max_depth, seen) + get_size(v, new_max_depth, seen)) * len(obj)\n    # If the object is a container (like a list or tuple) but not a string or bytes-like object\n    elif isinstance(obj, (list, tuple, set)):\n        with suppress(StopIteration):\n            size += get_size(next(iter(obj)), new_max_depth, seen) * len(obj)\n    # If the object has __slots__, we want to measure the size of the attributes in __slots__\n    if hasattr(obj, \"__slots__\"):\n        size += sum(get_size(getattr(obj, s), new_max_depth, seen) for s in obj.__slots__ if hasattr(obj, s))\n    return size\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.get_traceback_details","title":"get_traceback_details","text":"<pre><code>get_traceback_details(e)\n</code></pre> <p>Retrieves detailed information from the traceback of an exception.</p> <p>Parameters:</p> <ul> <li> <code>e</code>               (<code>BaseException</code>)           \u2013            <p>The exception for which to get traceback details.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>tuple</code>          \u2013            <p>A tuple containing filename (str), line number (int), and function name (str) where the exception was raised.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; try:\n...     raise ValueError(\"This is a value error\")\n... except ValueError as e:\n...     filename, lineno, funcname = get_traceback_details(e)\n...     print(f\"File: {filename}, Line: {lineno}, Function: {funcname}\")\nFile: &lt;stdin&gt;, Line: 2, Function: &lt;module&gt;\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def get_traceback_details(e):\n    \"\"\"\n    Retrieves detailed information from the traceback of an exception.\n\n    Args:\n        e (BaseException): The exception for which to get traceback details.\n\n    Returns:\n        tuple: A tuple containing filename (str), line number (int), and function name (str) where the exception was raised.\n\n    Examples:\n        &gt;&gt;&gt; try:\n        ...     raise ValueError(\"This is a value error\")\n        ... except ValueError as e:\n        ...     filename, lineno, funcname = get_traceback_details(e)\n        ...     print(f\"File: {filename}, Line: {lineno}, Function: {funcname}\")\n        File: &lt;stdin&gt;, Line: 2, Function: &lt;module&gt;\n    \"\"\"\n    import traceback\n\n    tb = traceback.extract_tb(e.__traceback__)\n    last_frame = tb[-1]  # Get the last frame in the traceback (the one where the exception was raised)\n    filename = last_frame.filename\n    lineno = last_frame.lineno\n    funcname = last_frame.name\n    return filename, lineno, funcname\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.grouper","title":"grouper","text":"<pre><code>grouper(iterable, n)\n</code></pre> <p>Grouper groups an iterable into chunks of a given size.</p> <p>Parameters:</p> <ul> <li> <code>iterable</code>               (<code>iterable</code>)           \u2013            <p>The iterable to be chunked.</p> </li> <li> <code>n</code>               (<code>int</code>)           \u2013            <p>The size of each chunk.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>iterator</code>          \u2013            <p>An iterator that produces lists of elements from the original iterable, each of length <code>n</code> or less.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; list(grouper('ABCDEFG', 3))\n[['A', 'B', 'C'], ['D', 'E', 'F'], ['G']]\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def grouper(iterable, n):\n    \"\"\"\n    Grouper groups an iterable into chunks of a given size.\n\n    Args:\n        iterable (iterable): The iterable to be chunked.\n        n (int): The size of each chunk.\n\n    Returns:\n        iterator: An iterator that produces lists of elements from the original iterable, each of length `n` or less.\n\n    Examples:\n        &gt;&gt;&gt; list(grouper('ABCDEFG', 3))\n        [['A', 'B', 'C'], ['D', 'E', 'F'], ['G']]\n    \"\"\"\n    from itertools import islice\n\n    iterable = iter(iterable)\n    return iter(lambda: list(islice(iterable, n)), [])\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.hash_url","title":"hash_url","text":"<pre><code>hash_url(url)\n</code></pre> <p>Hashes a URL for the purpose of cleaning or collapsing similar URLs.</p> <p>Parameters:</p> <ul> <li> <code>url</code>               (<code>str</code>)           \u2013            <p>The URL to be hashed.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>int</code>          \u2013            <p>The hash value of the cleaned URL.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; hash_url('https://www.evilcorp.com')\n-7448777882396416944\n</code></pre> <pre><code>&gt;&gt;&gt; hash_url('https://www.evilcorp.com/page/1')\n-8101275613229735915\n</code></pre> <pre><code>&gt;&gt;&gt; hash_url('https://www.evilcorp.com/page/2')\n-8101275613229735915\n</code></pre> Source code in <code>bbot/core/helpers/url.py</code> <pre><code>def hash_url(url):\n    \"\"\"\n    Hashes a URL for the purpose of cleaning or collapsing similar URLs.\n\n    Args:\n        url (str): The URL to be hashed.\n\n    Returns:\n        int: The hash value of the cleaned URL.\n\n    Examples:\n        &gt;&gt;&gt; hash_url('https://www.evilcorp.com')\n        -7448777882396416944\n\n        &gt;&gt;&gt; hash_url('https://www.evilcorp.com/page/1')\n        -8101275613229735915\n\n        &gt;&gt;&gt; hash_url('https://www.evilcorp.com/page/2')\n        -8101275613229735915\n    \"\"\"\n    parsed = parse_url(url)\n    parsed = parsed._replace(fragment=\"\", query=\"\")\n    to_hash = [parsed.netloc]\n    for segment in parsed.path.split(\"/\"):\n        hash_segment = []\n        hash_segment.append(charset(segment))\n        hash_segment.append(param_type(segment))\n        dot_split = segment.split(\".\")\n        if len(dot_split) &gt; 1:\n            hash_segment.append(dot_split[-1])\n        else:\n            hash_segment.append(\"\")\n        to_hash.append(tuple(hash_segment))\n    return hash(tuple(to_hash))\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.human_timedelta","title":"human_timedelta","text":"<pre><code>human_timedelta(d)\n</code></pre> <p>Convert a TimeDelta object into a human-readable string.</p> <p>This function takes a datetime.timedelta object and converts it into a string format that is easier to read and understand.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>timedelta</code>)           \u2013            <p>The TimeDelta object to convert.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>A string representation of the TimeDelta object in human-readable form.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; from datetime import datetime\n&gt;&gt;&gt;\n&gt;&gt;&gt; start_time = datetime.now()\n&gt;&gt;&gt; end_time = datetime.now()\n&gt;&gt;&gt; elapsed_time = end_time - start_time\n&gt;&gt;&gt; human_timedelta(elapsed_time)\n'2 hours, 30 minutes, 15 seconds'\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def human_timedelta(d):\n    \"\"\"Convert a TimeDelta object into a human-readable string.\n\n    This function takes a datetime.timedelta object and converts it into a string format that\n    is easier to read and understand.\n\n    Args:\n        d (datetime.timedelta): The TimeDelta object to convert.\n\n    Returns:\n        str: A string representation of the TimeDelta object in human-readable form.\n\n    Examples:\n        &gt;&gt;&gt; from datetime import datetime\n        &gt;&gt;&gt;\n        &gt;&gt;&gt; start_time = datetime.now()\n        &gt;&gt;&gt; end_time = datetime.now()\n        &gt;&gt;&gt; elapsed_time = end_time - start_time\n        &gt;&gt;&gt; human_timedelta(elapsed_time)\n        '2 hours, 30 minutes, 15 seconds'\n    \"\"\"\n    hours, remainder = divmod(d.seconds, 3600)\n    minutes, seconds = divmod(remainder, 60)\n    result = []\n    if hours:\n        result.append(f\"{hours:,} hour\" + (\"s\" if hours &gt; 1 else \"\"))\n    if minutes:\n        result.append(f\"{minutes:,} minute\" + (\"s\" if minutes &gt; 1 else \"\"))\n    if seconds:\n        result.append(f\"{seconds:,} second\" + (\"s\" if seconds &gt; 1 else \"\"))\n    ret = \", \".join(result)\n    if not ret:\n        ret = \"0 seconds\"\n    return ret\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.human_to_bytes","title":"human_to_bytes","text":"<pre><code>human_to_bytes(filesize)\n</code></pre> <p>Convert a human-readable file size string to its bytes equivalent.</p> <p>This function takes a human-readable file size string, such as \"2.5GB\", and converts it to its equivalent number of bytes.</p> <p>Parameters:</p> <ul> <li> <code>filesize</code>               (<code>str or int</code>)           \u2013            <p>The human-readable file size string or integer bytes value to convert.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>int</code>          \u2013            <p>The number of bytes equivalent to the input human-readable file size.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>ValueError</code>             \u2013            <p>If the input string cannot be converted to bytes.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; human_to_bytes(\"23.23gb\")\n24943022571\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def human_to_bytes(filesize):\n    \"\"\"Convert a human-readable file size string to its bytes equivalent.\n\n    This function takes a human-readable file size string, such as \"2.5GB\", and converts it\n    to its equivalent number of bytes.\n\n    Args:\n        filesize (str or int): The human-readable file size string or integer bytes value to convert.\n\n    Returns:\n        int: The number of bytes equivalent to the input human-readable file size.\n\n    Raises:\n        ValueError: If the input string cannot be converted to bytes.\n\n    Examples:\n        &gt;&gt;&gt; human_to_bytes(\"23.23gb\")\n        24943022571\n    \"\"\"\n    if isinstance(filesize, int):\n        return filesize\n    sizes = [\"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\", \"ZB\"]\n    units = {}\n    for count, size in enumerate(sizes):\n        size_increment = pow(1024, count)\n        units[size] = size_increment\n        if len(size) == 2:\n            units[size[0]] = size_increment\n    match = filesize_regex.match(filesize)\n    try:\n        if match:\n            num, size = match.groups()\n            size = size.upper()\n            size_increment = units[size]\n            return int(float(num) * size_increment)\n    except KeyError:\n        pass\n    raise ValueError(f'Unable to convert filesize \"{filesize}\" to bytes')\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.in_exception_chain","title":"in_exception_chain","text":"<pre><code>in_exception_chain(e, exc_types)\n</code></pre> <p>Given an Exception and a list of Exception types, returns whether any of the specified types are contained anywhere in the Exception chain.</p> <p>Parameters:</p> <ul> <li> <code>e</code>               (<code>BaseException</code>)           \u2013            <p>The exception to check</p> </li> <li> <code>exc_types</code>               (<code>list[Exception]</code>)           \u2013            <p>Exception types to consider intentional cancellations. Default is KeyboardInterrupt</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>Whether the error is the result of an intentional cancellaion</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; try:\n...     raise ValueError(\"This is a value error\")\n... except Exception as e:\n...     if not in_exception_chain(e, (KeyboardInterrupt, asyncio.CancelledError)):\n...         raise\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def in_exception_chain(e, exc_types):\n    \"\"\"\n    Given an Exception and a list of Exception types, returns whether any of the specified types are contained anywhere in the Exception chain.\n\n    Args:\n        e (BaseException): The exception to check\n        exc_types (list[Exception]): Exception types to consider intentional cancellations. Default is KeyboardInterrupt\n\n    Returns:\n        bool: Whether the error is the result of an intentional cancellaion\n\n    Examples:\n        &gt;&gt;&gt; try:\n        ...     raise ValueError(\"This is a value error\")\n        ... except Exception as e:\n        ...     if not in_exception_chain(e, (KeyboardInterrupt, asyncio.CancelledError)):\n        ...         raise\n    \"\"\"\n    return any([isinstance(_, exc_types) for _ in get_exception_chain(e)])\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.integer_to_ordinal","title":"integer_to_ordinal","text":"<pre><code>integer_to_ordinal(n)\n</code></pre> <p>Convert an integer to its ordinal representation.</p> <p>Parameters:</p> <ul> <li> <code>n</code>               (<code>int</code>)           \u2013            <p>The integer to convert.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>The ordinal representation of the integer.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; integer_to_ordinal(1)\n'1st'\n&gt;&gt;&gt; integer_to_ordinal(2)\n'2nd'\n&gt;&gt;&gt; integer_to_ordinal(3)\n'3rd'\n&gt;&gt;&gt; integer_to_ordinal(11)\n'11th'\n&gt;&gt;&gt; integer_to_ordinal(21)\n'21st'\n&gt;&gt;&gt; integer_to_ordinal(101)\n'101st'\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def integer_to_ordinal(n):\n    \"\"\"\n    Convert an integer to its ordinal representation.\n\n    Args:\n        n (int): The integer to convert.\n\n    Returns:\n        str: The ordinal representation of the integer.\n\n    Examples:\n        &gt;&gt;&gt; integer_to_ordinal(1)\n        '1st'\n        &gt;&gt;&gt; integer_to_ordinal(2)\n        '2nd'\n        &gt;&gt;&gt; integer_to_ordinal(3)\n        '3rd'\n        &gt;&gt;&gt; integer_to_ordinal(11)\n        '11th'\n        &gt;&gt;&gt; integer_to_ordinal(21)\n        '21st'\n        &gt;&gt;&gt; integer_to_ordinal(101)\n        '101st'\n    \"\"\"\n    # Check the last digit\n    last_digit = n % 10\n    # Check the last two digits for special cases (11th, 12th, 13th)\n    last_two_digits = n % 100\n\n    if 10 &lt;= last_two_digits &lt;= 20:\n        suffix = \"th\"\n    else:\n        if last_digit == 1:\n            suffix = \"st\"\n        elif last_digit == 2:\n            suffix = \"nd\"\n        elif last_digit == 3:\n            suffix = \"rd\"\n        else:\n            suffix = \"th\"\n\n    return f\"{n}{suffix}\"\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.ip_network_parents","title":"ip_network_parents","text":"<pre><code>ip_network_parents(i, include_self=False)\n</code></pre> <p>Generates all parent IP networks for a given IP address or network, optionally including the network itself.</p> <p>Parameters:</p> <ul> <li> <code>i</code>               (<code>str or IPv4Network / IPv6Network</code>)           \u2013            <p>The IP address or network to find parents for.</p> </li> <li> <code>include_self</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to include the network itself in the result. Default is False.</p> </li> </ul> <p>Yields:</p> <ul> <li>           \u2013            <p>ipaddress.IPv4Network or ipaddress.IPv6Network: Parent IP networks in descending order of prefix length.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; list(ip_network_parents(\"192.168.1.1\"))\n[ipaddress.IPv4Network('192.168.1.0/31'), ipaddress.IPv4Network('192.168.1.0/30'), ... , ipaddress.IPv4Network('0.0.0.0/0')]\n</code></pre> Notes <ul> <li>Utilizes Python's built-in <code>ipaddress</code> module for network operations.</li> </ul> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def ip_network_parents(i, include_self=False):\n    \"\"\"\n    Generates all parent IP networks for a given IP address or network, optionally including the network itself.\n\n    Args:\n        i (str or ipaddress.IPv4Network/ipaddress.IPv6Network): The IP address or network to find parents for.\n        include_self (bool, optional): Whether to include the network itself in the result. Default is False.\n\n    Yields:\n        ipaddress.IPv4Network or ipaddress.IPv6Network: Parent IP networks in descending order of prefix length.\n\n    Examples:\n        &gt;&gt;&gt; list(ip_network_parents(\"192.168.1.1\"))\n        [ipaddress.IPv4Network('192.168.1.0/31'), ipaddress.IPv4Network('192.168.1.0/30'), ... , ipaddress.IPv4Network('0.0.0.0/0')]\n\n    Notes:\n        - Utilizes Python's built-in `ipaddress` module for network operations.\n    \"\"\"\n    net = ipaddress.ip_network(i, strict=False)\n    for i in range(net.prefixlen - (0 if include_self else 1), -1, -1):\n        yield ipaddress.ip_network(f\"{net.network_address}/{i}\", strict=False)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.is_async_function","title":"is_async_function","text":"<pre><code>is_async_function(f)\n</code></pre> <p>Check if a given function is an asynchronous function.</p> <p>Parameters:</p> <ul> <li> <code>f</code>               (<code>function</code>)           \u2013            <p>The function to check.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the function is asynchronous, False otherwise.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; async def foo():\n...     pass\n&gt;&gt;&gt; is_async_function(foo)\nTrue\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def is_async_function(f):\n    \"\"\"\n    Check if a given function is an asynchronous function.\n\n    Args:\n        f (function): The function to check.\n\n    Returns:\n        bool: True if the function is asynchronous, False otherwise.\n\n    Examples:\n        &gt;&gt;&gt; async def foo():\n        ...     pass\n        &gt;&gt;&gt; is_async_function(foo)\n        True\n    \"\"\"\n    import inspect\n\n    return inspect.iscoroutinefunction(f)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.is_dns_name","title":"is_dns_name","text":"<pre><code>is_dns_name(d, include_local=True)\n</code></pre> <p>Determines if the given string is a valid DNS name.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>str</code>)           \u2013            <p>The string to be checked.</p> </li> <li> <code>include_local</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Consider local hostnames to be valid (hostnames without periods)</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the string is a valid DNS name, False otherwise.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; is_dns_name('www.example.com')\nTrue\n&gt;&gt;&gt; is_dns_name('localhost')\nTrue\n&gt;&gt;&gt; is_dns_name('localhost', include_local=False)\nFalse\n&gt;&gt;&gt; is_dns_name('192.168.1.1')\nFalse\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def is_dns_name(d, include_local=True):\n    \"\"\"\n    Determines if the given string is a valid DNS name.\n\n    Args:\n        d (str): The string to be checked.\n        include_local (bool): Consider local hostnames to be valid (hostnames without periods)\n\n    Returns:\n        bool: True if the string is a valid DNS name, False otherwise.\n\n    Examples:\n        &gt;&gt;&gt; is_dns_name('www.example.com')\n        True\n        &gt;&gt;&gt; is_dns_name('localhost')\n        True\n        &gt;&gt;&gt; is_dns_name('localhost', include_local=False)\n        False\n        &gt;&gt;&gt; is_dns_name('192.168.1.1')\n        False\n    \"\"\"\n    if is_ip(d):\n        return False\n    d = smart_decode(d)\n    if include_local:\n        if bbot_regexes.hostname_regex.match(d):\n            return True\n    if bbot_regexes.dns_name_validation_regex.match(d):\n        return True\n    return False\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.is_domain","title":"is_domain","text":"<pre><code>is_domain(d)\n</code></pre> <p>Check if the given input represents a domain without subdomains.</p> <p>This function takes an input string <code>d</code> and returns True if it represents a domain without any subdomains. Otherwise, it returns False.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>str</code>)           \u2013            <p>The input string containing the domain.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the input is a domain without subdomains, False otherwise.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; is_domain(\"evilcorp.co.uk\")\nTrue\n</code></pre> <pre><code>&gt;&gt;&gt; is_domain(\"www.evilcorp.co.uk\")\nFalse\n</code></pre> Notes <ul> <li>Port, if present in input, is ignored.</li> </ul> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def is_domain(d):\n    \"\"\"\n    Check if the given input represents a domain without subdomains.\n\n    This function takes an input string `d` and returns True if it represents a domain without any subdomains.\n    Otherwise, it returns False.\n\n    Args:\n        d (str): The input string containing the domain.\n\n    Returns:\n        bool: True if the input is a domain without subdomains, False otherwise.\n\n    Examples:\n        &gt;&gt;&gt; is_domain(\"evilcorp.co.uk\")\n        True\n\n        &gt;&gt;&gt; is_domain(\"www.evilcorp.co.uk\")\n        False\n\n    Notes:\n        - Port, if present in input, is ignored.\n    \"\"\"\n    d, _ = split_host_port(d)\n    if is_ip(d):\n        return False\n    extracted = tldextract(d)\n    if extracted.registered_domain:\n        if not extracted.subdomain:\n            return True\n    else:\n        return d.count(\".\") == 1\n    return False\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.is_file","title":"is_file","text":"<pre><code>is_file(f)\n</code></pre> <p>Check if a path points to a file.</p> <p>Parameters:</p> <ul> <li> <code>f</code>               (<code>str</code>)           \u2013            <p>Path to the file.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the path is a file, False otherwise.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; is_file(\"/etc/passwd\")\nTrue\n</code></pre> <pre><code>&gt;&gt;&gt; is_file(\"/nonexistent\")\nFalse\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def is_file(f):\n    \"\"\"\n    Check if a path points to a file.\n\n    Parameters:\n        f (str): Path to the file.\n\n    Returns:\n        bool: True if the path is a file, False otherwise.\n\n    Examples:\n        &gt;&gt;&gt; is_file(\"/etc/passwd\")\n        True\n\n        &gt;&gt;&gt; is_file(\"/nonexistent\")\n        False\n    \"\"\"\n    with suppress(Exception):\n        return Path(f).is_file()\n    return False\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.is_ip","title":"is_ip","text":"<pre><code>is_ip(d, version=None, include_network=False)\n</code></pre> <p>Checks if the given string or object represents a valid IP address.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>str or IPvXAddress</code>)           \u2013            <p>The IP address to check.</p> </li> <li> <code>include_network</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to include network types (IPv4Network or IPv6Network). Defaults to False.</p> </li> <li> <code>version</code>               (<code>int</code>, default:                   <code>None</code> )           \u2013            <p>The IP version to validate (4 or 6). Default is None.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the string or object is a valid IP address, False otherwise.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; is_ip('192.168.1.1')\nTrue\n&gt;&gt;&gt; is_ip('bad::c0de', version=6)\nTrue\n&gt;&gt;&gt; is_ip('bad::c0de', version=4)\nFalse\n&gt;&gt;&gt; is_ip('evilcorp.com')\nFalse\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def is_ip(d, version=None, include_network=False):\n    \"\"\"\n    Checks if the given string or object represents a valid IP address.\n\n    Args:\n        d (str or ipaddress.IPvXAddress): The IP address to check.\n        include_network (bool, optional): Whether to include network types (IPv4Network or IPv6Network). Defaults to False.\n        version (int, optional): The IP version to validate (4 or 6). Default is None.\n\n    Returns:\n        bool: True if the string or object is a valid IP address, False otherwise.\n\n    Examples:\n        &gt;&gt;&gt; is_ip('192.168.1.1')\n        True\n        &gt;&gt;&gt; is_ip('bad::c0de', version=6)\n        True\n        &gt;&gt;&gt; is_ip('bad::c0de', version=4)\n        False\n        &gt;&gt;&gt; is_ip('evilcorp.com')\n        False\n    \"\"\"\n    ip = None\n    try:\n        ip = ipaddress.ip_address(d)\n    except Exception:\n        if include_network:\n            try:\n                ip = ipaddress.ip_network(d, strict=False)\n            except Exception:\n                pass\n    if ip is not None and (version is None or ip.version == version):\n        return True\n    return False\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.is_ip_type","title":"is_ip_type","text":"<pre><code>is_ip_type(i, network=None)\n</code></pre> <p>Checks if the given object is an instance of an IPv4 or IPv6 type from the ipaddress module.</p> <p>Parameters:</p> <ul> <li> <code>i</code>               (<code>_BaseV4 or _BaseV6</code>)           \u2013            <p>The IP object to check.</p> </li> <li> <code>network</code>               (<code>bool</code>, default:                   <code>None</code> )           \u2013            <p>Whether to restrict the check to network types (IPv4Network or IPv6Network). Defaults to False.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the object is an instance of ipaddress._BaseV4 or ipaddress._BaseV6, False otherwise.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; is_ip_type(ipaddress.IPv6Address('dead::beef'))\nTrue\n&gt;&gt;&gt; is_ip_type(ipaddress.IPv4Network('192.168.1.0/24'))\nTrue\n&gt;&gt;&gt; is_ip_type(\"192.168.1.0/24\")\nFalse\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def is_ip_type(i, network=None):\n    \"\"\"\n    Checks if the given object is an instance of an IPv4 or IPv6 type from the ipaddress module.\n\n    Args:\n        i (ipaddress._BaseV4 or ipaddress._BaseV6): The IP object to check.\n        network (bool, optional): Whether to restrict the check to network types (IPv4Network or IPv6Network). Defaults to False.\n\n    Returns:\n        bool: True if the object is an instance of ipaddress._BaseV4 or ipaddress._BaseV6, False otherwise.\n\n    Examples:\n        &gt;&gt;&gt; is_ip_type(ipaddress.IPv6Address('dead::beef'))\n        True\n        &gt;&gt;&gt; is_ip_type(ipaddress.IPv4Network('192.168.1.0/24'))\n        True\n        &gt;&gt;&gt; is_ip_type(\"192.168.1.0/24\")\n        False\n    \"\"\"\n    if network is not None:\n        is_network = ipaddress._BaseNetwork in i.__class__.__mro__\n        if network:\n            return is_network\n        else:\n            return not is_network\n    return ipaddress._IPAddressBase in i.__class__.__mro__\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.is_port","title":"is_port","text":"<pre><code>is_port(p)\n</code></pre> <p>Checks if the given string represents a valid port number.</p> <p>Parameters:</p> <ul> <li> <code>p</code>               (<code>str or int</code>)           \u2013            <p>The port number to check.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the port number is valid, False otherwise.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; is_port('80')\nTrue\n&gt;&gt;&gt; is_port('70000')\nFalse\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def is_port(p):\n    \"\"\"\n    Checks if the given string represents a valid port number.\n\n    Args:\n        p (str or int): The port number to check.\n\n    Returns:\n        bool: True if the port number is valid, False otherwise.\n\n    Examples:\n        &gt;&gt;&gt; is_port('80')\n        True\n        &gt;&gt;&gt; is_port('70000')\n        False\n    \"\"\"\n\n    p = str(p)\n    return p and p.isdigit() and 0 &lt;= int(p) &lt;= 65535\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.is_ptr","title":"is_ptr","text":"<pre><code>is_ptr(d)\n</code></pre> <p>Check if the given input represents a PTR record domain.</p> <p>This function takes an input string <code>d</code> and returns True if it matches the PTR record format. Otherwise, it returns False.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>str</code>)           \u2013            <p>The input string potentially representing a PTR record domain.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the input matches PTR record format, False otherwise.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; is_ptr(\"wsc-11-22-33-44.evilcorp.com\")\nTrue\n</code></pre> <pre><code>&gt;&gt;&gt; is_ptr(\"www2.evilcorp.com\")\nFalse\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def is_ptr(d):\n    \"\"\"\n    Check if the given input represents a PTR record domain.\n\n    This function takes an input string `d` and returns True if it matches the PTR record format.\n    Otherwise, it returns False.\n\n    Args:\n        d (str): The input string potentially representing a PTR record domain.\n\n    Returns:\n        bool: True if the input matches PTR record format, False otherwise.\n\n    Examples:\n        &gt;&gt;&gt; is_ptr(\"wsc-11-22-33-44.evilcorp.com\")\n        True\n\n        &gt;&gt;&gt; is_ptr(\"www2.evilcorp.com\")\n        False\n    \"\"\"\n    return bool(bbot_regexes.ptr_regex.search(str(d)))\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.is_subdomain","title":"is_subdomain","text":"<pre><code>is_subdomain(d)\n</code></pre> <p>Check if the given input represents a subdomain.</p> <p>This function takes an input string <code>d</code> and returns True if it represents a subdomain. Otherwise, it returns False.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>str</code>)           \u2013            <p>The input string containing the domain or subdomain.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the input is a subdomain, False otherwise.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; is_subdomain(\"www.evilcorp.co.uk\")\nTrue\n</code></pre> <pre><code>&gt;&gt;&gt; is_subdomain(\"evilcorp.co.uk\")\nFalse\n</code></pre> Notes <ul> <li>Port, if present in input, is ignored.</li> </ul> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def is_subdomain(d):\n    \"\"\"\n    Check if the given input represents a subdomain.\n\n    This function takes an input string `d` and returns True if it represents a subdomain.\n    Otherwise, it returns False.\n\n    Args:\n        d (str): The input string containing the domain or subdomain.\n\n    Returns:\n        bool: True if the input is a subdomain, False otherwise.\n\n    Examples:\n        &gt;&gt;&gt; is_subdomain(\"www.evilcorp.co.uk\")\n        True\n\n        &gt;&gt;&gt; is_subdomain(\"evilcorp.co.uk\")\n        False\n\n    Notes:\n        - Port, if present in input, is ignored.\n    \"\"\"\n    d, _ = split_host_port(d)\n    if is_ip(d):\n        return False\n    extracted = tldextract(d)\n    if extracted.registered_domain:\n        if extracted.subdomain:\n            return True\n    else:\n        return d.count(\".\") &gt; 1\n    return False\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.is_uri","title":"is_uri","text":"<pre><code>is_uri(u, return_scheme=False)\n</code></pre> <p>Check if the given input represents a URI and optionally return its scheme.</p> <p>This function takes an input string <code>u</code> and returns True if it matches a URI format. When <code>return_scheme</code> is True, it returns the URI scheme instead of a boolean.</p> <p>Parameters:</p> <ul> <li> <code>u</code>               (<code>str</code>)           \u2013            <p>The input string potentially representing a URI.</p> </li> <li> <code>return_scheme</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to return the URI scheme. Defaults to False.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>Union[bool, str]: True if the input matches a URI format; the URI scheme if <code>return_scheme</code> is True.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; is_uri(\"http://evilcorp.com\")\nTrue\n</code></pre> <pre><code>&gt;&gt;&gt; is_uri(\"ftp://evilcorp.com\")\nTrue\n</code></pre> <pre><code>&gt;&gt;&gt; is_uri(\"evilcorp.com\")\nFalse\n</code></pre> <pre><code>&gt;&gt;&gt; is_uri(\"ftp://evilcorp.com\", return_scheme=True)\n\"ftp\"\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def is_uri(u, return_scheme=False):\n    \"\"\"\n    Check if the given input represents a URI and optionally return its scheme.\n\n    This function takes an input string `u` and returns True if it matches a URI format.\n    When `return_scheme` is True, it returns the URI scheme instead of a boolean.\n\n    Args:\n        u (str): The input string potentially representing a URI.\n        return_scheme (bool, optional): Whether to return the URI scheme. Defaults to False.\n\n    Returns:\n        Union[bool, str]: True if the input matches a URI format; the URI scheme if `return_scheme` is True.\n\n    Examples:\n        &gt;&gt;&gt; is_uri(\"http://evilcorp.com\")\n        True\n\n        &gt;&gt;&gt; is_uri(\"ftp://evilcorp.com\")\n        True\n\n        &gt;&gt;&gt; is_uri(\"evilcorp.com\")\n        False\n\n        &gt;&gt;&gt; is_uri(\"ftp://evilcorp.com\", return_scheme=True)\n        \"ftp\"\n    \"\"\"\n    match = uri_regex.match(u)\n    if return_scheme:\n        if match:\n            return match.groups()[0].lower()\n        return \"\"\n    return bool(match)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.is_url","title":"is_url","text":"<pre><code>is_url(u)\n</code></pre> <p>Check if the given input represents a valid URL.</p> <p>This function takes an input string <code>u</code> and returns True if it matches any of the predefined URL formats. Otherwise, it returns False.</p> <p>Parameters:</p> <ul> <li> <code>u</code>               (<code>str</code>)           \u2013            <p>The input string potentially representing a URL.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the input matches a valid URL format, False otherwise.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; is_url(\"https://evilcorp.com\")\nTrue\n</code></pre> <pre><code>&gt;&gt;&gt; is_url(\"not-a-url\")\nFalse\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def is_url(u):\n    \"\"\"\n    Check if the given input represents a valid URL.\n\n    This function takes an input string `u` and returns True if it matches any of the predefined URL formats.\n    Otherwise, it returns False.\n\n    Args:\n        u (str): The input string potentially representing a URL.\n\n    Returns:\n        bool: True if the input matches a valid URL format, False otherwise.\n\n    Examples:\n        &gt;&gt;&gt; is_url(\"https://evilcorp.com\")\n        True\n\n        &gt;&gt;&gt; is_url(\"not-a-url\")\n        False\n    \"\"\"\n    u = str(u)\n    for r in bbot_regexes.event_type_regexes[\"URL\"]:\n        if r.match(u):\n            return True\n    return False\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.kill_children","title":"kill_children","text":"<pre><code>kill_children(parent_pid=None, sig=None)\n</code></pre> <p>Forgive me father for I have sinned</p> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def kill_children(parent_pid=None, sig=None):\n    \"\"\"\n    Forgive me father for I have sinned\n    \"\"\"\n    import psutil\n    import signal\n\n    if sig is None:\n        sig = signal.SIGTERM\n\n    try:\n        parent = psutil.Process(parent_pid)\n    except psutil.NoSuchProcess:\n        log.debug(f\"No such PID: {parent_pid}\")\n        return\n    log.debug(f\"Killing children of process ID {parent.pid}\")\n    children = parent.children(recursive=True)\n    for child in children:\n        log.debug(f\"Killing child with PID {child.pid}\")\n        if child.name != \"python\":\n            try:\n                child.send_signal(sig)\n            except psutil.NoSuchProcess:\n                log.debug(f\"No such PID: {child.pid}\")\n            except psutil.AccessDenied:\n                log.debug(f\"Error killing PID: {child.pid} - access denied\")\n    log.debug(f\"Finished killing children of process ID {parent.pid}\")\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.latest_mtime","title":"latest_mtime","text":"<pre><code>latest_mtime(d)\n</code></pre> <p>Get the latest modified time of any file or sub-directory in a given directory.</p> <p>This function takes a directory path as an argument and returns the latest modified time of any contained file or directory, recursively. It's useful for sorting directories by modified time for cleanup or other purposes.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>str or Path</code>)           \u2013            <p>The directory path to search for the latest modified time.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>float</code>          \u2013            <p>The latest modified time in Unix timestamp format.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; latest_mtime(\"~/.bbot/scans/mushy_susan\")\n1659016928.2848816\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def latest_mtime(d):\n    \"\"\"Get the latest modified time of any file or sub-directory in a given directory.\n\n    This function takes a directory path as an argument and returns the latest modified time\n    of any contained file or directory, recursively. It's useful for sorting directories by\n    modified time for cleanup or other purposes.\n\n    Args:\n        d (str or Path): The directory path to search for the latest modified time.\n\n    Returns:\n        float: The latest modified time in Unix timestamp format.\n\n    Examples:\n        &gt;&gt;&gt; latest_mtime(\"~/.bbot/scans/mushy_susan\")\n        1659016928.2848816\n    \"\"\"\n    d = Path(d).resolve()\n    mtimes = [d.lstat().st_mtime]\n    if d.is_dir():\n        to_list = d.glob(\"**/*\")\n    else:\n        to_list = [d]\n    for e in to_list:\n        mtimes.append(e.lstat().st_mtime)\n    return max(mtimes)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.list_files","title":"list_files","text":"<pre><code>list_files(directory, filter=lambda x: True)\n</code></pre> <p>Lists files in a given directory that meet a specified filter condition.</p> <p>Parameters:</p> <ul> <li> <code>directory</code>               (<code>str</code>)           \u2013            <p>The directory where to list files.</p> </li> <li> <code>filter</code>               (<code>callable</code>, default:                   <code>lambda x: True</code> )           \u2013            <p>A function to filter the files. Defaults to a lambda function that returns True for all files.</p> </li> </ul> <p>Yields:</p> <ul> <li> <code>Path</code>          \u2013            <p>A Path object for each file that meets the filter condition.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; list(list_files(\"/tmp/test\"))\n[Path('/tmp/test/file1.py'), Path('/tmp/test/file2.txt')]\n</code></pre> <pre><code>&gt;&gt;&gt; list(list_files(\"/tmp/test\"), filter=lambda f: f.suffix == \".py\")\n[Path('/tmp/test/file1.py')]\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def list_files(directory, filter=lambda x: True):\n    \"\"\"Lists files in a given directory that meet a specified filter condition.\n\n    Args:\n        directory (str): The directory where to list files.\n        filter (callable, optional): A function to filter the files. Defaults to a lambda function that returns True for all files.\n\n    Yields:\n        Path: A Path object for each file that meets the filter condition.\n\n    Examples:\n        &gt;&gt;&gt; list(list_files(\"/tmp/test\"))\n        [Path('/tmp/test/file1.py'), Path('/tmp/test/file2.txt')]\n\n        &gt;&gt;&gt; list(list_files(\"/tmp/test\"), filter=lambda f: f.suffix == \".py\")\n        [Path('/tmp/test/file1.py')]\n    \"\"\"\n    directory = Path(directory).resolve()\n    if directory.is_dir():\n        for file in directory.iterdir():\n            if file.is_file() and filter(file):\n                yield file\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.make_date","title":"make_date","text":"<pre><code>make_date(d=None, microseconds=False)\n</code></pre> <p>Generates a string representation of the current date and time, with optional microsecond precision.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>datetime</code>, default:                   <code>None</code> )           \u2013            <p>A datetime object to convert. Defaults to the current date and time.</p> </li> <li> <code>microseconds</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to include microseconds. Defaults to False.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>A string representation of the date and time, formatted as YYYYMMDD_HHMM_SS or YYYYMMDD_HHMM_SSFFFFFF if microseconds are included.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; make_date()\n\"20220707_1325_50\"\n&gt;&gt;&gt; make_date(microseconds=True)\n\"20220707_1330_35167617\"\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def make_date(d=None, microseconds=False):\n    \"\"\"\n    Generates a string representation of the current date and time, with optional microsecond precision.\n\n    Args:\n        d (datetime, optional): A datetime object to convert. Defaults to the current date and time.\n        microseconds (bool, optional): Whether to include microseconds. Defaults to False.\n\n    Returns:\n        str: A string representation of the date and time, formatted as YYYYMMDD_HHMM_SS or YYYYMMDD_HHMM_SSFFFFFF if microseconds are included.\n\n    Examples:\n        &gt;&gt;&gt; make_date()\n        \"20220707_1325_50\"\n        &gt;&gt;&gt; make_date(microseconds=True)\n        \"20220707_1330_35167617\"\n    \"\"\"\n    from datetime import datetime\n\n    f = \"%Y%m%d_%H%M_%S\"\n    if microseconds:\n        f += \"%f\"\n    if d is None:\n        d = datetime.now()\n    return d.strftime(f)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.make_ip_type","title":"make_ip_type","text":"<pre><code>make_ip_type(s)\n</code></pre> <p>Convert a string to its corresponding IP address or network type.</p> <p>This function attempts to convert the input string <code>s</code> into either an IPv4 or IPv6 address object, or an IPv4 or IPv6 network object. If none of these conversions are possible, the original string is returned.</p> <p>Parameters:</p> <ul> <li> <code>s</code>               (<code>str</code>)           \u2013            <p>The input string to be converted.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>Union[IPv4Address, IPv6Address, IPv4Network, IPv6Network, str]: The converted object or original string.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; make_ip_type(\"dead::beef\")\nIPv6Address('dead::beef')\n</code></pre> <pre><code>&gt;&gt;&gt; make_ip_type(\"192.168.1.0/24\")\nIPv4Network('192.168.1.0/24')\n</code></pre> <pre><code>&gt;&gt;&gt; make_ip_type(\"evilcorp.com\")\n'evilcorp.com'\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def make_ip_type(s):\n    \"\"\"\n    Convert a string to its corresponding IP address or network type.\n\n    This function attempts to convert the input string `s` into either an IPv4 or IPv6 address object,\n    or an IPv4 or IPv6 network object. If none of these conversions are possible, the original string is returned.\n\n    Args:\n        s (str): The input string to be converted.\n\n    Returns:\n        Union[IPv4Address, IPv6Address, IPv4Network, IPv6Network, str]: The converted object or original string.\n\n    Examples:\n        &gt;&gt;&gt; make_ip_type(\"dead::beef\")\n        IPv6Address('dead::beef')\n\n        &gt;&gt;&gt; make_ip_type(\"192.168.1.0/24\")\n        IPv4Network('192.168.1.0/24')\n\n        &gt;&gt;&gt; make_ip_type(\"evilcorp.com\")\n        'evilcorp.com'\n    \"\"\"\n    if not s:\n        raise ValueError(f'Invalid hostname: \"{s}\"')\n    # IP address\n    with suppress(Exception):\n        return ipaddress.ip_address(s)\n    # IP network\n    with suppress(Exception):\n        return ipaddress.ip_network(s, strict=False)\n    return s\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.make_netloc","title":"make_netloc","text":"<pre><code>make_netloc(host, port=None)\n</code></pre> <p>Constructs a network location string from a given host and port.</p> <p>Parameters:</p> <ul> <li> <code>host</code>               (<code>str</code>)           \u2013            <p>The hostname or IP address.</p> </li> <li> <code>port</code>               (<code>int</code>, default:                   <code>None</code> )           \u2013            <p>The port number. If None, the port is omitted.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>A network location string in the form 'host' or 'host:port'.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; make_netloc(\"192.168.1.1\", None)\n\"192.168.1.1\"\n</code></pre> <pre><code>&gt;&gt;&gt; make_netloc(\"192.168.1.1\", 443)\n\"192.168.1.1:443\"\n</code></pre> <pre><code>&gt;&gt;&gt; make_netloc(\"evilcorp.com\", 80)\n\"evilcorp.com:80\"\n</code></pre> <pre><code>&gt;&gt;&gt; make_netloc(\"dead::beef\", None)\n\"[dead::beef]\"\n</code></pre> <pre><code>&gt;&gt;&gt; make_netloc(\"dead::beef\", 443)\n\"[dead::beef]:443\"\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def make_netloc(host, port=None):\n    \"\"\"Constructs a network location string from a given host and port.\n\n    Args:\n        host (str): The hostname or IP address.\n        port (int, optional): The port number. If None, the port is omitted.\n\n    Returns:\n        str: A network location string in the form 'host' or 'host:port'.\n\n    Examples:\n        &gt;&gt;&gt; make_netloc(\"192.168.1.1\", None)\n        \"192.168.1.1\"\n\n        &gt;&gt;&gt; make_netloc(\"192.168.1.1\", 443)\n        \"192.168.1.1:443\"\n\n        &gt;&gt;&gt; make_netloc(\"evilcorp.com\", 80)\n        \"evilcorp.com:80\"\n\n        &gt;&gt;&gt; make_netloc(\"dead::beef\", None)\n        \"[dead::beef]\"\n\n        &gt;&gt;&gt; make_netloc(\"dead::beef\", 443)\n        \"[dead::beef]:443\"\n    \"\"\"\n    if is_ip(host, version=6):\n        host = f\"[{host}]\"\n    if port is None:\n        return str(host)\n    return f\"{host}:{port}\"\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.make_table","title":"make_table","text":"<pre><code>make_table(rows, header, **kwargs)\n</code></pre> <p>Generate a formatted table from the given rows and headers.</p> <p>This function uses the <code>tabulate</code> package to generate a table with formatting options. It can accept various input formats and table styles, which can be customized using optional arguments.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Positional arguments to be passed to <code>tabulate.tabulate</code>.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Keyword arguments to customize table formatting. - tablefmt (str, optional): Table format. Default is 'grid'. - disable_numparse (bool, optional): Disable automatic number parsing. Default is True. - maxcolwidths (int, optional): Maximum column width. Default is 40.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>A string representing the formatted table.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; print(make_table([[\"row1\", \"row1\"], [\"row2\", \"row2\"]], [\"header1\", \"header2\"]))\n+-----------+-----------+\n| header1   | header2   |\n+===========+===========+\n| row1      | row1      |\n+-----------+-----------+\n| row2      | row2      |\n+-----------+-----------+\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def make_table(rows, header, **kwargs):\n    \"\"\"Generate a formatted table from the given rows and headers.\n\n    This function uses the `tabulate` package to generate a table with formatting options.\n    It can accept various input formats and table styles, which can be customized using optional arguments.\n\n    Args:\n        *args: Positional arguments to be passed to `tabulate.tabulate`.\n        **kwargs: Keyword arguments to customize table formatting.\n            - tablefmt (str, optional): Table format. Default is 'grid'.\n            - disable_numparse (bool, optional): Disable automatic number parsing. Default is True.\n            - maxcolwidths (int, optional): Maximum column width. Default is 40.\n\n    Returns:\n        str: A string representing the formatted table.\n\n    Examples:\n        &gt;&gt;&gt; print(make_table([[\"row1\", \"row1\"], [\"row2\", \"row2\"]], [\"header1\", \"header2\"]))\n        +-----------+-----------+\n        | header1   | header2   |\n        +===========+===========+\n        | row1      | row1      |\n        +-----------+-----------+\n        | row2      | row2      |\n        +-----------+-----------+\n    \"\"\"\n    from tabulate import tabulate\n\n    # fix IndexError: list index out of range\n    if not rows:\n        rows = [[]]\n    tablefmt = os.environ.get(\"BBOT_TABLE_FORMAT\", None)\n    defaults = {\"tablefmt\": \"grid\", \"disable_numparse\": True, \"maxcolwidths\": None}\n    if tablefmt is None:\n        defaults.update({\"maxcolwidths\": 40})\n    else:\n        defaults.update({\"tablefmt\": tablefmt})\n    for k, v in defaults.items():\n        if k not in kwargs:\n            kwargs[k] = v\n    # don't wrap columns in markdown\n    if tablefmt in (\"github\", \"markdown\"):\n        kwargs.pop(\"maxcolwidths\")\n        # escape problematic markdown characters in rows\n\n        def markdown_escape(s):\n            return str(s).replace(\"|\", \"&amp;#124;\")\n\n        rows = [[markdown_escape(f) for f in row] for row in rows]\n        header = [markdown_escape(h) for h in header]\n    return tabulate(rows, header, **kwargs)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.memory_status","title":"memory_status","text":"<pre><code>memory_status()\n</code></pre> <p>Return statistics on system memory consumption.</p> <p>The function returns a <code>psutil</code> named tuple that contains statistics on system virtual memory usage, such as total memory, used memory, available memory, and more.</p> <p>Returns:</p> <ul> <li>           \u2013            <p>psutil._pslinux.svmem: A named tuple representing various statistics about system virtual memory usage.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; mem = memory_status()\n&gt;&gt;&gt; mem.available\n13195399168\n</code></pre> <pre><code>&gt;&gt;&gt; mem = memory_status()\n&gt;&gt;&gt; mem.percent\n79.0\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def memory_status():\n    \"\"\"Return statistics on system memory consumption.\n\n    The function returns a `psutil` named tuple that contains statistics on\n    system virtual memory usage, such as total memory, used memory, available\n    memory, and more.\n\n    Returns:\n        psutil._pslinux.svmem: A named tuple representing various statistics\n            about system virtual memory usage.\n\n    Examples:\n        &gt;&gt;&gt; mem = memory_status()\n        &gt;&gt;&gt; mem.available\n        13195399168\n\n        &gt;&gt;&gt; mem = memory_status()\n        &gt;&gt;&gt; mem.percent\n        79.0\n    \"\"\"\n    import psutil\n\n    return psutil.virtual_memory()\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.mkdir","title":"mkdir","text":"<pre><code>mkdir(path, check_writable=True, raise_error=True)\n</code></pre> <p>Creates a directory and optionally checks if it's writable.</p> <p>Parameters:</p> <ul> <li> <code>path</code>               (<code>str or Path</code>)           \u2013            <p>The directory to create.</p> </li> <li> <code>check_writable</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to check if the directory is writable. Default is True.</p> </li> <li> <code>raise_error</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to raise an error if the directory creation fails. Default is True.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the directory is successfully created (and writable, if check_writable=True); otherwise False.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>DirectoryCreationError</code>             \u2013            <p>Raised if the directory cannot be created and <code>raise_error=True</code>.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; mkdir(\"/tmp/new_dir\")\nTrue\n&gt;&gt;&gt; mkdir(\"/restricted_dir\", check_writable=False, raise_error=False)\nFalse\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def mkdir(path, check_writable=True, raise_error=True):\n    \"\"\"\n    Creates a directory and optionally checks if it's writable.\n\n    Args:\n        path (str or Path): The directory to create.\n        check_writable (bool, optional): Whether to check if the directory is writable. Default is True.\n        raise_error (bool, optional): Whether to raise an error if the directory creation fails. Default is True.\n\n    Returns:\n        bool: True if the directory is successfully created (and writable, if check_writable=True); otherwise False.\n\n    Raises:\n        DirectoryCreationError: Raised if the directory cannot be created and `raise_error=True`.\n\n    Examples:\n        &gt;&gt;&gt; mkdir(\"/tmp/new_dir\")\n        True\n        &gt;&gt;&gt; mkdir(\"/restricted_dir\", check_writable=False, raise_error=False)\n        False\n    \"\"\"\n    path = Path(path).resolve()\n    touchfile = path / f\".{rand_string()}\"\n    try:\n        path.mkdir(exist_ok=True, parents=True)\n        if check_writable:\n            touchfile.touch()\n        return True\n    except Exception as e:\n        if raise_error:\n            raise errors.DirectoryCreationError(f\"Failed to create directory at {path}: {e}\")\n    finally:\n        with suppress(Exception):\n            touchfile.unlink()\n    return False\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.os_platform","title":"os_platform","text":"<pre><code>os_platform()\n</code></pre> <p>Return the OS platform of the current system.</p> <p>This function fetches and returns the OS type where the code is being executed. It converts the platform identifier to lowercase.</p> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>A string representing the OS platform, such as \"linux\", \"darwin\", or \"windows\".</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; os_platform()\n'linux'\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def os_platform():\n    \"\"\"Return the OS platform of the current system.\n\n    This function fetches and returns the OS type where the code is being executed.\n    It converts the platform identifier to lowercase.\n\n    Returns:\n        str: A string representing the OS platform, such as \"linux\", \"darwin\", or \"windows\".\n\n    Examples:\n        &gt;&gt;&gt; os_platform()\n        'linux'\n    \"\"\"\n    import platform\n\n    return platform.system().lower()\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.os_platform_friendly","title":"os_platform_friendly","text":"<pre><code>os_platform_friendly()\n</code></pre> <p>Return a human-friendly OS platform string, suitable for golang release binaries.</p> <p>This function fetches the OS platform and modifies it to a more human-readable format if necessary. Specifically, it changes \"darwin\" to \"macOS\".</p> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>A string representing the human-friendly OS platform, such as \"macOS\", \"linux\", or \"windows\".</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; os_platform_friendly()\n'macOS'\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def os_platform_friendly():\n    \"\"\"Return a human-friendly OS platform string, suitable for golang release binaries.\n\n    This function fetches the OS platform and modifies it to a more human-readable format if necessary.\n    Specifically, it changes \"darwin\" to \"macOS\".\n\n    Returns:\n        str: A string representing the human-friendly OS platform, such as \"macOS\", \"linux\", or \"windows\".\n\n    Examples:\n        &gt;&gt;&gt; os_platform_friendly()\n        'macOS'\n    \"\"\"\n    p = os_platform()\n    if p == \"darwin\":\n        return \"macOS\"\n    return p\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.param_type","title":"param_type","text":"<pre><code>param_type(p)\n</code></pre> <p>Evaluates the type of the given parameter.</p> <p>Parameters:</p> <ul> <li> <code>p</code>               (<code>str</code>)           \u2013            <p>The parameter whose type is to be evaluated.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>int</code>          \u2013            <p>An integer representing the type of parameter. - 1: Integer - 2: UUID - 3: Other</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; param_type('123')\n1\n</code></pre> <pre><code>&gt;&gt;&gt; param_type('550e8400-e29b-41d4-a716-446655440000')\n2\n</code></pre> <pre><code>&gt;&gt;&gt; param_type('abc')\n3\n</code></pre> Source code in <code>bbot/core/helpers/url.py</code> <pre><code>def param_type(p):\n    \"\"\"\n    Evaluates the type of the given parameter.\n\n    Args:\n        p (str): The parameter whose type is to be evaluated.\n\n    Returns:\n        int: An integer representing the type of parameter.\n            - 1: Integer\n            - 2: UUID\n            - 3: Other\n\n    Examples:\n        &gt;&gt;&gt; param_type('123')\n        1\n\n        &gt;&gt;&gt; param_type('550e8400-e29b-41d4-a716-446655440000')\n        2\n\n        &gt;&gt;&gt; param_type('abc')\n        3\n    \"\"\"\n    try:\n        int(p)\n        return 1\n    except Exception:\n        with suppress(Exception):\n            uuid.UUID(p)\n            return 2\n    return 3\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.parent_domain","title":"parent_domain","text":"<pre><code>parent_domain(d)\n</code></pre> <p>Retrieve the parent domain of a given subdomain string.</p> <p>This function takes an input string <code>d</code> representing a subdomain and returns its parent domain. If the input does not represent a subdomain, it returns the input as is.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>str</code>)           \u2013            <p>The input string representing a subdomain or domain.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>The parent domain of the subdomain, or the original input if it is not a subdomain.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; parent_domain(\"www.internal.evilcorp.co.uk\")\n\"internal.evilcorp.co.uk\"\n</code></pre> <pre><code>&gt;&gt;&gt; parent_domain(\"www.internal.evilcorp.co.uk:8080\")\n\"internal.evilcorp.co.uk:8080\"\n</code></pre> <pre><code>&gt;&gt;&gt; parent_domain(\"www.evilcorp.co.uk\")\n\"evilcorp.co.uk\"\n</code></pre> <pre><code>&gt;&gt;&gt; parent_domain(\"evilcorp.co.uk\")\n\"evilcorp.co.uk\"\n</code></pre> Notes <ul> <li>Port, if present in input, is preserved in the output.</li> </ul> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def parent_domain(d):\n    \"\"\"\n    Retrieve the parent domain of a given subdomain string.\n\n    This function takes an input string `d` representing a subdomain and returns its parent domain.\n    If the input does not represent a subdomain, it returns the input as is.\n\n    Args:\n        d (str): The input string representing a subdomain or domain.\n\n    Returns:\n        str: The parent domain of the subdomain, or the original input if it is not a subdomain.\n\n    Examples:\n        &gt;&gt;&gt; parent_domain(\"www.internal.evilcorp.co.uk\")\n        \"internal.evilcorp.co.uk\"\n\n        &gt;&gt;&gt; parent_domain(\"www.internal.evilcorp.co.uk:8080\")\n        \"internal.evilcorp.co.uk:8080\"\n\n        &gt;&gt;&gt; parent_domain(\"www.evilcorp.co.uk\")\n        \"evilcorp.co.uk\"\n\n        &gt;&gt;&gt; parent_domain(\"evilcorp.co.uk\")\n        \"evilcorp.co.uk\"\n\n    Notes:\n        - Port, if present in input, is preserved in the output.\n    \"\"\"\n    host, port = split_host_port(d)\n    if is_subdomain(d):\n        return make_netloc(\".\".join(str(host).split(\".\")[1:]), port)\n    return d\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.parent_url","title":"parent_url","text":"<pre><code>parent_url(u)\n</code></pre> <p>Retrieve the parent URL of a given URL.</p> <p>This function takes an input string <code>u</code> representing a URL and returns its parent URL. If the input URL does not have a parent (i.e., it's already the top-level), it returns None.</p> <p>Parameters:</p> <ul> <li> <code>u</code>               (<code>str</code>)           \u2013            <p>The input string representing a URL.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>Union[str, None]: The parent URL of the input URL, or None if it has no parent.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; parent_url(\"https://evilcorp.com/sub/path/\")\n\"https://evilcorp.com/sub/\"\n</code></pre> <pre><code>&gt;&gt;&gt; parent_url(\"https://evilcorp.com/\")\nNone\n</code></pre> Notes <ul> <li>Only the path component of the URL is modified.</li> <li>All other components like scheme, netloc, query, and fragment are preserved.</li> </ul> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def parent_url(u):\n    \"\"\"\n    Retrieve the parent URL of a given URL.\n\n    This function takes an input string `u` representing a URL and returns its parent URL.\n    If the input URL does not have a parent (i.e., it's already the top-level), it returns None.\n\n    Args:\n        u (str): The input string representing a URL.\n\n    Returns:\n        Union[str, None]: The parent URL of the input URL, or None if it has no parent.\n\n    Examples:\n        &gt;&gt;&gt; parent_url(\"https://evilcorp.com/sub/path/\")\n        \"https://evilcorp.com/sub/\"\n\n        &gt;&gt;&gt; parent_url(\"https://evilcorp.com/\")\n        None\n\n    Notes:\n        - Only the path component of the URL is modified.\n        - All other components like scheme, netloc, query, and fragment are preserved.\n    \"\"\"\n    parsed = urlparse(u)\n    path = Path(parsed.path)\n    if path.parent == path:\n        return None\n    else:\n        return urlunparse(parsed._replace(path=str(path.parent), query=\"\"))\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.parse_port_string","title":"parse_port_string","text":"<pre><code>parse_port_string(port_string)\n</code></pre> <p>Parses a string containing ports and port ranges into a list of individual ports.</p> <p>Parameters:</p> <ul> <li> <code>port_string</code>               (<code>str</code>)           \u2013            <p>The string containing individual ports and port ranges separated by commas.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>list</code>          \u2013            <p>A list of individual ports parsed from the input string.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>ValueError</code>             \u2013            <p>If the input string contains invalid ports or port ranges.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; parse_port_string(\"22,80,1000-1002\")\n[22, 80, 1000, 1001, 1002]\n</code></pre> <pre><code>&gt;&gt;&gt; parse_port_string(\"1-2,3-5\")\n[1, 2, 3, 4, 5]\n</code></pre> <pre><code>&gt;&gt;&gt; parse_port_string(\"invalid\")\nValueError: Invalid port or port range: invalid\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def parse_port_string(port_string):\n    \"\"\"\n    Parses a string containing ports and port ranges into a list of individual ports.\n\n    Args:\n        port_string (str): The string containing individual ports and port ranges separated by commas.\n\n    Returns:\n        list: A list of individual ports parsed from the input string.\n\n    Raises:\n        ValueError: If the input string contains invalid ports or port ranges.\n\n    Examples:\n        &gt;&gt;&gt; parse_port_string(\"22,80,1000-1002\")\n        [22, 80, 1000, 1001, 1002]\n\n        &gt;&gt;&gt; parse_port_string(\"1-2,3-5\")\n        [1, 2, 3, 4, 5]\n\n        &gt;&gt;&gt; parse_port_string(\"invalid\")\n        ValueError: Invalid port or port range: invalid\n    \"\"\"\n    elements = str(port_string).split(\",\")\n    ports = []\n\n    for element in elements:\n        if element.isdigit():\n            port = int(element)\n            if 1 &lt;= port &lt;= 65535:\n                ports.append(port)\n            else:\n                raise ValueError(f\"Invalid port: {element}\")\n        elif \"-\" in element:\n            range_parts = element.split(\"-\")\n            if len(range_parts) != 2 or not all(part.isdigit() for part in range_parts):\n                raise ValueError(f\"Invalid port or port range: {element}\")\n            start, end = map(int, range_parts)\n            if not (1 &lt;= start &lt; end &lt;= 65535):\n                raise ValueError(f\"Invalid port range: {element}\")\n            ports.extend(range(start, end + 1))\n        else:\n            raise ValueError(f\"Invalid port or port range: {element}\")\n\n    return ports\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.parse_url","title":"parse_url","text":"<pre><code>parse_url(url)\n</code></pre> <p>Parse the given URL string or ParseResult object and return a ParseResult.</p> <p>This function checks if the input is already a ParseResult object. If it is, it returns the object as-is. Otherwise, it parses the given URL string using <code>urlparse</code>.</p> <p>Parameters:</p> <ul> <li> <code>url</code>               (<code>Union[str, ParseResult]</code>)           \u2013            <p>The URL string or ParseResult object to be parsed.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>ParseResult</code>          \u2013            <p>A named 6-tuple that contains the components of a URL.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; parse_url('https://www.evilcorp.com')\nParseResult(scheme='https', netloc='www.evilcorp.com', path='', params='', query='', fragment='')\n</code></pre> Source code in <code>bbot/core/helpers/url.py</code> <pre><code>def parse_url(url):\n    \"\"\"\n    Parse the given URL string or ParseResult object and return a ParseResult.\n\n    This function checks if the input is already a ParseResult object. If it is,\n    it returns the object as-is. Otherwise, it parses the given URL string using\n    `urlparse`.\n\n    Args:\n        url (Union[str, ParseResult]): The URL string or ParseResult object to be parsed.\n\n    Returns:\n        ParseResult: A named 6-tuple that contains the components of a URL.\n\n    Examples:\n        &gt;&gt;&gt; parse_url('https://www.evilcorp.com')\n        ParseResult(scheme='https', netloc='www.evilcorp.com', path='', params='', query='', fragment='')\n    \"\"\"\n    if isinstance(url, ParseResult):\n        return url\n    return urlparse(url)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.rand_string","title":"rand_string","text":"<pre><code>rand_string(length=10, digits=True)\n</code></pre> <p>Generates a random string of specified length.</p> <p>Parameters:</p> <ul> <li> <code>length</code>               (<code>int</code>, default:                   <code>10</code> )           \u2013            <p>The length of the random string. Defaults to 10.</p> </li> <li> <code>digits</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to include digits in the string. Defaults to True.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>A random string of the specified length.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; rand_string()\n'c4hp4i9jzx'\n&gt;&gt;&gt; rand_string(20)\n'ap4rsdtg5iw7ey7y3oa5'\n&gt;&gt;&gt; rand_string(30, digits=False)\n'xdmyxtglqfzqktngkesyulwbfrihva'\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def rand_string(length=10, digits=True):\n    \"\"\"\n    Generates a random string of specified length.\n\n    Args:\n        length (int, optional): The length of the random string. Defaults to 10.\n        digits (bool, optional): Whether to include digits in the string. Defaults to True.\n\n    Returns:\n        str: A random string of the specified length.\n\n    Examples:\n        &gt;&gt;&gt; rand_string()\n        'c4hp4i9jzx'\n        &gt;&gt;&gt; rand_string(20)\n        'ap4rsdtg5iw7ey7y3oa5'\n        &gt;&gt;&gt; rand_string(30, digits=False)\n        'xdmyxtglqfzqktngkesyulwbfrihva'\n    \"\"\"\n    pool = rand_pool\n    if digits:\n        pool = rand_pool_digits\n    return \"\".join([random.choice(pool) for _ in range(int(length))])\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.read_file","title":"read_file","text":"<pre><code>read_file(filename)\n</code></pre> <p>Reads a file line by line and yields each line without line breaks.</p> <p>Parameters:</p> <ul> <li> <code>filename</code>               (<code>str or Path</code>)           \u2013            <p>The path to the file to read.</p> </li> </ul> <p>Yields:</p> <ul> <li> <code>str</code>          \u2013            <p>A line from the file without the trailing line break.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; for line in read_file(\"/tmp/file.txt\"):\n...     print(line)\nfile_line1\nfile_line2\nfile_line3\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def read_file(filename):\n    \"\"\"Reads a file line by line and yields each line without line breaks.\n\n    Args:\n        filename (str or Path): The path to the file to read.\n\n    Yields:\n        str: A line from the file without the trailing line break.\n\n    Examples:\n        &gt;&gt;&gt; for line in read_file(\"/tmp/file.txt\"):\n        ...     print(line)\n        file_line1\n        file_line2\n        file_line3\n    \"\"\"\n    with open(filename, errors=\"ignore\") as f:\n        for line in f:\n            yield line.rstrip(\"\\r\\n\")\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.recursive_decode","title":"recursive_decode","text":"<pre><code>recursive_decode(data, max_depth=5)\n</code></pre> <p>Recursively decodes doubly or triply-encoded strings to their original form.</p> <p>Supports both URL-encoding and backslash-escapes (including unicode)</p> <p>Parameters:</p> <ul> <li> <code>data</code>               (<code>str</code>)           \u2013            <p>The data to decode.</p> </li> <li> <code>max_depth</code>               (<code>int</code>, default:                   <code>5</code> )           \u2013            <p>Maximum recursion depth for decoding. Defaults to 5.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>The decoded string.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; recursive_decode(\"Hello%20world%21\")\n\"Hello world!\"\n&gt;&gt;&gt; recursive_decode(\"Hello%20%5Cu041f%5Cu0440%5Cu0438%5Cu0432%5Cu0435%5Cu0442\")\n\"Hello \u041f\u0440\u0438\u0432\u0435\u0442\"\n&gt;&gt;&gt; recursive_dcode(\"%5Cu0020%5Cu041f%5Cu0440%5Cu0438%5Cu0432%5Cu0435%5Cu0442%5Cu0021\")\n\" \u041f\u0440\u0438\u0432\u0435\u0442!\"\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def recursive_decode(data, max_depth=5):\n    \"\"\"\n    Recursively decodes doubly or triply-encoded strings to their original form.\n\n    Supports both URL-encoding and backslash-escapes (including unicode)\n\n    Args:\n        data (str): The data to decode.\n        max_depth (int, optional): Maximum recursion depth for decoding. Defaults to 5.\n\n    Returns:\n        str: The decoded string.\n\n    Examples:\n        &gt;&gt;&gt; recursive_decode(\"Hello%20world%21\")\n        \"Hello world!\"\n        &gt;&gt;&gt; recursive_decode(\"Hello%20%5Cu041f%5Cu0440%5Cu0438%5Cu0432%5Cu0435%5Cu0442\")\n        \"Hello \u041f\u0440\u0438\u0432\u0435\u0442\"\n        &gt;&gt;&gt; recursive_dcode(\"%5Cu0020%5Cu041f%5Cu0440%5Cu0438%5Cu0432%5Cu0435%5Cu0442%5Cu0021\")\n        \" \u041f\u0440\u0438\u0432\u0435\u0442!\"\n    \"\"\"\n    import codecs\n\n    # Decode newline and tab escapes\n    data = backslash_regex.sub(\n        lambda match: {\"n\": \"\\n\", \"t\": \"\\t\", \"r\": \"\\r\", \"b\": \"\\b\", \"v\": \"\\v\"}.get(match.group(\"char\")), data\n    )\n    data = smart_decode(data)\n    if max_depth == 0:\n        return data\n    # Decode URL encoding\n    data = unquote(data, errors=\"ignore\")\n    # Decode Unicode escapes\n    with suppress(UnicodeEncodeError):\n        data = ensure_utf8_compliant(codecs.decode(data, \"unicode_escape\", errors=\"ignore\"))\n    # Check if there's still URL-encoded or Unicode-escaped content\n    if encoded_regex.search(data):\n        # If yes, continue decoding\n        return recursive_decode(data, max_depth=max_depth - 1)\n    return data\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.rm_at_exit","title":"rm_at_exit","text":"<pre><code>rm_at_exit(path)\n</code></pre> <p>Registers a file to be automatically deleted when the program exits.</p> <p>Parameters:</p> <ul> <li> <code>path</code>               (<code>str or Path</code>)           \u2013            <p>The path to the file to be deleted upon program exit.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; rm_at_exit(\"/tmp/test/file1.txt\")\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def rm_at_exit(path):\n    \"\"\"Registers a file to be automatically deleted when the program exits.\n\n    Args:\n        path (str or Path): The path to the file to be deleted upon program exit.\n\n    Examples:\n        &gt;&gt;&gt; rm_at_exit(\"/tmp/test/file1.txt\")\n    \"\"\"\n    import atexit\n\n    atexit.register(delete_file, path)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.rm_rf","title":"rm_rf","text":"<pre><code>rm_rf(f)\n</code></pre> <p>Recursively delete a directory</p> <p>Parameters:</p> <ul> <li> <code>f</code>               (<code>str or Path</code>)           \u2013            <p>The directory path to delete.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; rm_rf(\"/tmp/httpx98323849\")\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def rm_rf(f):\n    \"\"\"Recursively delete a directory\n\n    Args:\n        f (str or Path): The directory path to delete.\n\n    Examples:\n        &gt;&gt;&gt; rm_rf(\"/tmp/httpx98323849\")\n    \"\"\"\n    import shutil\n\n    shutil.rmtree(f)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.safe_format","title":"safe_format","text":"<pre><code>safe_format(s, **kwargs)\n</code></pre> <p>Format string while ignoring unused keys (prevents KeyError)</p> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def safe_format(s, **kwargs):\n    \"\"\"\n    Format string while ignoring unused keys (prevents KeyError)\n    \"\"\"\n    return s.format_map(SafeDict(kwargs))\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.search_dict_by_key","title":"search_dict_by_key","text":"<pre><code>search_dict_by_key(key, d)\n</code></pre> <p>Search a nested dictionary or list of dictionaries by a key and yield all matching values.</p> <p>Parameters:</p> <ul> <li> <code>key</code>               (<code>str</code>)           \u2013            <p>The key to search for.</p> </li> <li> <code>d</code>               (<code>Union[dict, list]</code>)           \u2013            <p>The dictionary or list of dictionaries to search.</p> </li> </ul> <p>Yields:</p> <ul> <li> <code>Any</code>          \u2013            <p>Yields all values that match the provided key.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; d = {'a': 1, 'b': {'c': 2, 'a': 3}, 'd': [{'a': 4}, {'e': 5}]}\n&gt;&gt;&gt; list(search_dict_by_key('a', d))\n[1, 3, 4]\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def search_dict_by_key(key, d):\n    \"\"\"Search a nested dictionary or list of dictionaries by a key and yield all matching values.\n\n    Args:\n        key (str): The key to search for.\n        d (Union[dict, list]): The dictionary or list of dictionaries to search.\n\n    Yields:\n        Any: Yields all values that match the provided key.\n\n    Examples:\n        &gt;&gt;&gt; d = {'a': 1, 'b': {'c': 2, 'a': 3}, 'd': [{'a': 4}, {'e': 5}]}\n        &gt;&gt;&gt; list(search_dict_by_key('a', d))\n        [1, 3, 4]\n    \"\"\"\n    if isinstance(d, dict):\n        if key in d:\n            yield d[key]\n        for k, v in d.items():\n            yield from search_dict_by_key(key, v)\n    elif isinstance(d, list):\n        for v in d:\n            yield from search_dict_by_key(key, v)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.search_dict_values","title":"search_dict_values","text":"<pre><code>search_dict_values(d, *regexes)\n</code></pre> <p>Recursively search a dictionary's values based on provided regex patterns.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>Union[dict, list, str]</code>)           \u2013            <p>The dictionary, list, or string to search.</p> </li> <li> <code>*regexes</code>           \u2013            <p>Arbitrary number of compiled regex patterns.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>Generator</code>          \u2013            <p>Yields matching values based on the provided regex patterns.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; dict_to_search = {\n...     \"key1\": {\n...         \"key2\": [\n...             {\n...                 \"key3\": \"A URL: https://www.evilcorp.com\"\n...             }\n...         ]\n...     }\n... }\n&gt;&gt;&gt; url_regexes = re.compile(r'https?://[^\\s&lt;&gt;\"]+|www\\.[^\\s&lt;&gt;\"]+')\n&gt;&gt;&gt; list(search_dict_values(dict_to_search, url_regexes))\n[\"https://www.evilcorp.com\"]\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def search_dict_values(d, *regexes):\n    \"\"\"Recursively search a dictionary's values based on provided regex patterns.\n\n    Args:\n        d (Union[dict, list, str]): The dictionary, list, or string to search.\n        *regexes: Arbitrary number of compiled regex patterns.\n\n    Returns:\n        Generator: Yields matching values based on the provided regex patterns.\n\n    Examples:\n        &gt;&gt;&gt; dict_to_search = {\n        ...     \"key1\": {\n        ...         \"key2\": [\n        ...             {\n        ...                 \"key3\": \"A URL: https://www.evilcorp.com\"\n        ...             }\n        ...         ]\n        ...     }\n        ... }\n        &gt;&gt;&gt; url_regexes = re.compile(r'https?://[^\\\\s&lt;&gt;\"]+|www\\\\.[^\\\\s&lt;&gt;\"]+')\n        &gt;&gt;&gt; list(search_dict_values(dict_to_search, url_regexes))\n        [\"https://www.evilcorp.com\"]\n    \"\"\"\n\n    results = set()\n    if isinstance(d, str):\n        for r in regexes:\n            for match in r.finditer(d):\n                result = match.group()\n                h = hash(result)\n                if h not in results:\n                    results.add(h)\n                    yield result\n    elif isinstance(d, dict):\n        for _, v in d.items():\n            yield from search_dict_values(v, *regexes)\n    elif isinstance(d, list):\n        for v in d:\n            yield from search_dict_values(v, *regexes)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.search_format_dict","title":"search_format_dict","text":"<pre><code>search_format_dict(d, **kwargs)\n</code></pre> <p>Recursively format string values in a dictionary or list using the provided keyword arguments.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>Union[dict, list, str]</code>)           \u2013            <p>The dictionary, list, or string to format.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Arbitrary keyword arguments used for string formatting.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>Union[dict, list, str]: The formatted dictionary, list, or string.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; search_format_dict({\"test\": \"#{name} is awesome\"}, name=\"keanu\")\n{\"test\": \"keanu is awesome\"}\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def search_format_dict(d, **kwargs):\n    \"\"\"Recursively format string values in a dictionary or list using the provided keyword arguments.\n\n    Args:\n        d (Union[dict, list, str]): The dictionary, list, or string to format.\n        **kwargs: Arbitrary keyword arguments used for string formatting.\n\n    Returns:\n        Union[dict, list, str]: The formatted dictionary, list, or string.\n\n    Examples:\n        &gt;&gt;&gt; search_format_dict({\"test\": \"#{name} is awesome\"}, name=\"keanu\")\n        {\"test\": \"keanu is awesome\"}\n    \"\"\"\n    if isinstance(d, dict):\n        return {k: search_format_dict(v, **kwargs) for k, v in d.items()}\n    elif isinstance(d, list):\n        return [search_format_dict(v, **kwargs) for v in d]\n    elif isinstance(d, str):\n        for find, replace in kwargs.items():\n            find = \"#{\" + str(find) + \"}\"\n            d = d.replace(find, replace)\n    return d\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.sha1","title":"sha1","text":"<pre><code>sha1(data)\n</code></pre> <p>Computes the SHA-1 hash of the given data.</p> <p>Parameters:</p> <ul> <li> <code>data</code>               (<code>str or dict</code>)           \u2013            <p>The data to hash. If a dictionary, it is first converted to a JSON string with sorted keys.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>hashlib.Hash: SHA-1 hash object of the input data.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; sha1(\"asdf\").hexdigest()\n'3da541559918a808c2402bba5012f6c60b27661c'\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def sha1(data):\n    \"\"\"\n    Computes the SHA-1 hash of the given data.\n\n    Args:\n        data (str or dict): The data to hash. If a dictionary, it is first converted to a JSON string with sorted keys.\n\n    Returns:\n        hashlib.Hash: SHA-1 hash object of the input data.\n\n    Examples:\n        &gt;&gt;&gt; sha1(\"asdf\").hexdigest()\n        '3da541559918a808c2402bba5012f6c60b27661c'\n    \"\"\"\n    from hashlib import sha1 as hashlib_sha1\n\n    if isinstance(data, dict):\n        data = json.dumps(data, sort_keys=True)\n    return hashlib_sha1(smart_encode(data))\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.smart_decode","title":"smart_decode","text":"<pre><code>smart_decode(data)\n</code></pre> <p>Decodes the input data to a UTF-8 string, silently ignoring errors.</p> <p>Parameters:</p> <ul> <li> <code>data</code>               (<code>str or bytes</code>)           \u2013            <p>The data to decode.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>The decoded string.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; smart_decode(b\"asdf\")\n\"asdf\"\n&gt;&gt;&gt; smart_decode(\"asdf\")\n\"asdf\"\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def smart_decode(data):\n    \"\"\"\n    Decodes the input data to a UTF-8 string, silently ignoring errors.\n\n    Args:\n        data (str or bytes): The data to decode.\n\n    Returns:\n        str: The decoded string.\n\n    Examples:\n        &gt;&gt;&gt; smart_decode(b\"asdf\")\n        \"asdf\"\n        &gt;&gt;&gt; smart_decode(\"asdf\")\n        \"asdf\"\n    \"\"\"\n    if isinstance(data, bytes):\n        return data.decode(\"utf-8\", errors=\"ignore\")\n    else:\n        return str(data)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.smart_decode_punycode","title":"smart_decode_punycode","text":"<pre><code>smart_decode_punycode(text: str) -&gt; str\n</code></pre> <p>xn--eckwd4c7c.xn--zckzah --&gt; \u30c9\u30e1\u30a4\u30f3.\u30c6\u30b9\u30c8</p> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def smart_decode_punycode(text: str) -&gt; str:\n    \"\"\"\n    xn--eckwd4c7c.xn--zckzah --&gt; \u30c9\u30e1\u30a4\u30f3.\u30c6\u30b9\u30c8\n    \"\"\"\n    import idna\n\n    host, before, after = extract_host(text)\n    if host is None:\n        return text\n\n    try:\n        host = idna.decode(host)\n    except UnicodeError:\n        pass  # If decoding fails, leave the host as it is\n\n    return f\"{before}{host}{after}\"\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.smart_encode","title":"smart_encode","text":"<pre><code>smart_encode(data)\n</code></pre> <p>Encodes the input data to bytes using UTF-8 encoding, silently ignoring errors.</p> <p>Parameters:</p> <ul> <li> <code>data</code>               (<code>str or bytes</code>)           \u2013            <p>The data to encode.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>bytes</code>          \u2013            <p>The encoded bytes.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; smart_encode(\"asdf\")\nb\"asdf\"\n&gt;&gt;&gt; smart_encode(b\"asdf\")\nb\"asdf\"\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def smart_encode(data):\n    \"\"\"\n    Encodes the input data to bytes using UTF-8 encoding, silently ignoring errors.\n\n    Args:\n        data (str or bytes): The data to encode.\n\n    Returns:\n        bytes: The encoded bytes.\n\n    Examples:\n        &gt;&gt;&gt; smart_encode(\"asdf\")\n        b\"asdf\"\n        &gt;&gt;&gt; smart_encode(b\"asdf\")\n        b\"asdf\"\n    \"\"\"\n    if isinstance(data, bytes):\n        return data\n    return str(data).encode(\"utf-8\", errors=\"ignore\")\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.smart_encode_punycode","title":"smart_encode_punycode","text":"<pre><code>smart_encode_punycode(text: str) -&gt; str\n</code></pre> <p>\u30c9\u30e1\u30a4\u30f3.\u30c6\u30b9\u30c8 --&gt; xn--eckwd4c7c.xn--zckzah</p> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def smart_encode_punycode(text: str) -&gt; str:\n    \"\"\"\n    \u30c9\u30e1\u30a4\u30f3.\u30c6\u30b9\u30c8 --&gt; xn--eckwd4c7c.xn--zckzah\n    \"\"\"\n    import idna\n\n    host, before, after = extract_host(text)\n    if host is None:\n        return text\n\n    try:\n        host = idna.encode(host).decode(errors=\"ignore\")\n    except UnicodeError:\n        pass  # If encoding fails, leave the host as it is\n\n    return f\"{before}{host}{after}\"\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.split_domain","title":"split_domain","text":"<pre><code>split_domain(hostname)\n</code></pre> <p>Splits the hostname into its subdomain and registered domain components.</p> <p>Parameters:</p> <ul> <li> <code>hostname</code>               (<code>str</code>)           \u2013            <p>The full hostname to be split.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>tuple</code>          \u2013            <p>A tuple containing the subdomain and registered domain.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; split_domain(\"www.internal.evilcorp.co.uk\")\n(\"www.internal\", \"evilcorp.co.uk\")\n</code></pre> Notes <ul> <li>Utilizes the <code>tldextract</code> function to first break down the hostname.</li> </ul> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def split_domain(hostname):\n    \"\"\"\n    Splits the hostname into its subdomain and registered domain components.\n\n    Args:\n        hostname (str): The full hostname to be split.\n\n    Returns:\n        tuple: A tuple containing the subdomain and registered domain.\n\n    Examples:\n        &gt;&gt;&gt; split_domain(\"www.internal.evilcorp.co.uk\")\n        (\"www.internal\", \"evilcorp.co.uk\")\n\n    Notes:\n        - Utilizes the `tldextract` function to first break down the hostname.\n    \"\"\"\n    if is_ip(hostname):\n        return (\"\", hostname)\n    parsed = tldextract(hostname)\n    subdomain = parsed.subdomain\n    domain = parsed.registered_domain\n    if not domain:\n        split = hostname.split(\".\")\n        subdomain = \".\".join(split[:-2])\n        domain = \".\".join(split[-2:])\n    return (subdomain, domain)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.split_host_port","title":"split_host_port","text":"<pre><code>split_host_port(d)\n</code></pre> <p>Parse a string containing a host and port into a tuple.</p> <p>This function takes an input string <code>d</code> and returns a tuple containing the host and port. The host is converted to its appropriate IP address type if possible. The port is inferred based on the scheme if not provided.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>str</code>)           \u2013            <p>The input string containing the host and possibly the port.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>Tuple[Union[IPv4Address, IPv6Address, str], Optional[int]]: Tuple containing the host and port.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; split_host_port(\"evilcorp.com:443\")\n(\"evilcorp.com\", 443)\n</code></pre> <pre><code>&gt;&gt;&gt; split_host_port(\"192.168.1.1:443\")\n(IPv4Address('192.168.1.1'), 443)\n</code></pre> <pre><code>&gt;&gt;&gt; split_host_port(\"[dead::beef]:443\")\n(IPv6Address('dead::beef'), 443)\n</code></pre> Notes <ul> <li>If port is not provided, it is inferred based on the scheme:<ul> <li>For \"https\" and \"wss\", port 443 is used.</li> <li>For \"http\" and \"ws\", port 80 is used.</li> </ul> </li> </ul> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def split_host_port(d):\n    \"\"\"\n    Parse a string containing a host and port into a tuple.\n\n    This function takes an input string `d` and returns a tuple containing the host and port.\n    The host is converted to its appropriate IP address type if possible. The port is inferred\n    based on the scheme if not provided.\n\n    Args:\n        d (str): The input string containing the host and possibly the port.\n\n    Returns:\n        Tuple[Union[IPv4Address, IPv6Address, str], Optional[int]]: Tuple containing the host and port.\n\n    Examples:\n        &gt;&gt;&gt; split_host_port(\"evilcorp.com:443\")\n        (\"evilcorp.com\", 443)\n\n        &gt;&gt;&gt; split_host_port(\"192.168.1.1:443\")\n        (IPv4Address('192.168.1.1'), 443)\n\n        &gt;&gt;&gt; split_host_port(\"[dead::beef]:443\")\n        (IPv6Address('dead::beef'), 443)\n\n    Notes:\n        - If port is not provided, it is inferred based on the scheme:\n            - For \"https\" and \"wss\", port 443 is used.\n            - For \"http\" and \"ws\", port 80 is used.\n    \"\"\"\n    d = str(d)\n    host = None\n    port = None\n    scheme = None\n    if is_ip(d):\n        return make_ip_type(d), port\n\n    match = bbot_regexes.split_host_port_regex.match(d)\n    if match is None:\n        raise ValueError(f'split_port() failed to parse \"{d}\"')\n    scheme = match.group(\"scheme\")\n    netloc = match.group(\"netloc\")\n    if netloc is None:\n        raise ValueError(f'split_port() failed to parse \"{d}\"')\n\n    match = bbot_regexes.extract_open_port_regex.match(netloc)\n    if match is None:\n        raise ValueError(f'split_port() failed to parse netloc \"{netloc}\" (original value: {d})')\n\n    host = match.group(2)\n    if host is None:\n        host = match.group(1)\n    if host is None:\n        raise ValueError(f'split_port() failed to locate host in netloc \"{netloc}\" (original value: {d})')\n\n    port = match.group(3)\n    if port is None and scheme is not None:\n        scheme = scheme.lower()\n        if scheme in (\"https\", \"wss\"):\n            port = 443\n        elif scheme in (\"http\", \"ws\"):\n            port = 80\n    elif port is not None:\n        with suppress(ValueError):\n            port = int(port)\n\n    return make_ip_type(host), port\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.split_list","title":"split_list","text":"<pre><code>split_list(alist, wanted_parts=2)\n</code></pre> <p>Splits a list into a specified number of approximately equal parts.</p> <p>Parameters:</p> <ul> <li> <code>alist</code>               (<code>list</code>)           \u2013            <p>The list to be split.</p> </li> <li> <code>wanted_parts</code>               (<code>int</code>, default:                   <code>2</code> )           \u2013            <p>The number of parts to split the list into.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>list</code>          \u2013            <p>A list of lists, each containing a portion of the original list.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; split_list([1, 2, 3, 4, 5])\n[[1, 2], [3, 4, 5]]\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def split_list(alist, wanted_parts=2):\n    \"\"\"\n    Splits a list into a specified number of approximately equal parts.\n\n    Args:\n        alist (list): The list to be split.\n        wanted_parts (int): The number of parts to split the list into.\n\n    Returns:\n        list: A list of lists, each containing a portion of the original list.\n\n    Examples:\n        &gt;&gt;&gt; split_list([1, 2, 3, 4, 5])\n        [[1, 2], [3, 4, 5]]\n    \"\"\"\n    length = len(alist)\n    return [alist[i * length // wanted_parts : (i + 1) * length // wanted_parts] for i in range(wanted_parts)]\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.str_or_file","title":"str_or_file","text":"<pre><code>str_or_file(s)\n</code></pre> <p>Reads a string or file and yields its content line-by-line.</p> <p>This function tries to open the given string <code>s</code> as a file and yields its lines. If it fails to open <code>s</code> as a file, it treats <code>s</code> as a regular string and yields it as is.</p> <p>Parameters:</p> <ul> <li> <code>s</code>               (<code>str</code>)           \u2013            <p>The string or file path to read.</p> </li> </ul> <p>Yields:</p> <ul> <li> <code>str</code>          \u2013            <p>Either lines from the file or the original string.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; list(str_or_file(\"file.txt\"))\n['file_line1', 'file_line2', 'file_line3']\n&gt;&gt;&gt; list(str_or_file(\"not_a_file\"))\n['not_a_file']\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def str_or_file(s):\n    \"\"\"Reads a string or file and yields its content line-by-line.\n\n    This function tries to open the given string `s` as a file and yields its lines.\n    If it fails to open `s` as a file, it treats `s` as a regular string and yields it as is.\n\n    Args:\n        s (str): The string or file path to read.\n\n    Yields:\n        str: Either lines from the file or the original string.\n\n    Examples:\n        &gt;&gt;&gt; list(str_or_file(\"file.txt\"))\n        ['file_line1', 'file_line2', 'file_line3']\n        &gt;&gt;&gt; list(str_or_file(\"not_a_file\"))\n        ['not_a_file']\n    \"\"\"\n    try:\n        with open(s, errors=\"ignore\") as f:\n            for line in f:\n                yield line.rstrip(\"\\r\\n\")\n    except OSError:\n        yield s\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.subdomain_depth","title":"subdomain_depth","text":"<pre><code>subdomain_depth(d)\n</code></pre> <p>Calculate the depth of subdomains within a given domain name.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>str</code>)           \u2013            <p>The domain name to analyze.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>int</code>          \u2013            <p>The depth of the subdomain. For example, a hostname \"5.4.3.2.1.evilcorp.com\"</p> </li> <li>           \u2013            <p>has a subdomain depth of 5.</p> </li> </ul> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def subdomain_depth(d):\n    \"\"\"\n    Calculate the depth of subdomains within a given domain name.\n\n    Args:\n        d (str): The domain name to analyze.\n\n    Returns:\n        int: The depth of the subdomain. For example, a hostname \"5.4.3.2.1.evilcorp.com\"\n        has a subdomain depth of 5.\n    \"\"\"\n    subdomain, domain = split_domain(d)\n    if not subdomain:\n        return 0\n    return subdomain.count(\".\") + 1\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.swap_status","title":"swap_status","text":"<pre><code>swap_status()\n</code></pre> <p>Return statistics on swap memory consumption.</p> <p>The function returns a <code>psutil</code> named tuple that contains statistics on system swap memory usage, such as total swap, used swap, free swap, and more.</p> <p>Returns:</p> <ul> <li>           \u2013            <p>psutil._common.sswap: A named tuple representing various statistics about system swap memory usage.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; swap = swap_status()\n&gt;&gt;&gt; swap.total\n4294967296\n</code></pre> <pre><code>&gt;&gt;&gt; swap = swap_status()\n&gt;&gt;&gt; swap.used\n2097152\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def swap_status():\n    \"\"\"Return statistics on swap memory consumption.\n\n    The function returns a `psutil` named tuple that contains statistics on\n    system swap memory usage, such as total swap, used swap, free swap, and more.\n\n    Returns:\n        psutil._common.sswap: A named tuple representing various statistics\n            about system swap memory usage.\n\n    Examples:\n        &gt;&gt;&gt; swap = swap_status()\n        &gt;&gt;&gt; swap.total\n        4294967296\n\n        &gt;&gt;&gt; swap = swap_status()\n        &gt;&gt;&gt; swap.used\n        2097152\n    \"\"\"\n    import psutil\n\n    return psutil.swap_memory()\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.tagify","title":"tagify","text":"<pre><code>tagify(s, delimiter=None, maxlen=None)\n</code></pre> <p>Sanitize a string into a tag-friendly format.</p> <p>Converts a given string to lowercase and replaces all characters not matching [a-z0-9] with hyphens. Optionally truncates the result to 'maxlen' characters.</p> <p>Parameters:</p> <ul> <li> <code>s</code>               (<code>str</code>)           \u2013            <p>The input string to sanitize.</p> </li> <li> <code>maxlen</code>               (<code>int</code>, default:                   <code>None</code> )           \u2013            <p>The maximum length for the tag. Defaults to None.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>A sanitized, tag-friendly string.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; tagify(\"HTTP Web Title\")\n'http-web-title'\n&gt;&gt;&gt; tagify(\"HTTP Web Title\", maxlen=8)\n'http-web'\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def tagify(s, delimiter=None, maxlen=None):\n    \"\"\"Sanitize a string into a tag-friendly format.\n\n    Converts a given string to lowercase and replaces all characters not matching\n    [a-z0-9] with hyphens. Optionally truncates the result to 'maxlen' characters.\n\n    Args:\n        s (str): The input string to sanitize.\n        maxlen (int, optional): The maximum length for the tag. Defaults to None.\n\n    Returns:\n        str: A sanitized, tag-friendly string.\n\n    Examples:\n        &gt;&gt;&gt; tagify(\"HTTP Web Title\")\n        'http-web-title'\n        &gt;&gt;&gt; tagify(\"HTTP Web Title\", maxlen=8)\n        'http-web'\n    \"\"\"\n    if delimiter is None:\n        delimiter = \"-\"\n    ret = str(s).lower()\n    return tag_filter_regex.sub(delimiter, ret)[:maxlen].strip(delimiter)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.tldextract","title":"tldextract","text":"<pre><code>tldextract(data)\n</code></pre> <p>Extracts the subdomain, domain, and suffix from a URL string.</p> <p>Parameters:</p> <ul> <li> <code>data</code>               (<code>str</code>)           \u2013            <p>The URL string to be processed.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>ExtractResult</code>          \u2013            <p>A named tuple containing the subdomain, domain, and suffix.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; tldextract(\"www.evilcorp.co.uk\")\nExtractResult(subdomain='www', domain='evilcorp', suffix='co.uk')\n</code></pre> Notes <ul> <li>Utilizes <code>smart_decode</code> to preprocess the data.</li> <li>Makes use of the <code>tldextract</code> library for extraction.</li> </ul> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def tldextract(data):\n    \"\"\"\n    Extracts the subdomain, domain, and suffix from a URL string.\n\n    Args:\n        data (str): The URL string to be processed.\n\n    Returns:\n        ExtractResult: A named tuple containing the subdomain, domain, and suffix.\n\n    Examples:\n        &gt;&gt;&gt; tldextract(\"www.evilcorp.co.uk\")\n        ExtractResult(subdomain='www', domain='evilcorp', suffix='co.uk')\n\n    Notes:\n        - Utilizes `smart_decode` to preprocess the data.\n        - Makes use of the `tldextract` library for extraction.\n    \"\"\"\n    import tldextract as _tldextract\n\n    return _tldextract.extract(smart_decode(data))\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.top_tcp_ports","title":"top_tcp_ports","text":"<pre><code>top_tcp_ports(n, as_string=False)\n</code></pre> <p>Returns the top n TCP ports as evaluated by nmap</p> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def top_tcp_ports(n, as_string=False):\n    \"\"\"\n    Returns the top *n* TCP ports as evaluated by nmap\n    \"\"\"\n    top_ports_file = Path(__file__).parent.parent.parent / \"wordlists\" / \"top_open_ports_nmap.txt\"\n\n    global top_ports_cache\n    if top_ports_cache is None:\n        # Read the open ports from the file\n        with open(top_ports_file, \"r\") as f:\n            top_ports_cache = [int(line.strip()) for line in f]\n\n        # If n is greater than the length of the ports list, add remaining ports from range(1, 65536)\n        unique_ports = set(top_ports_cache)\n        top_ports_cache.extend([port for port in range(1, 65536) if port not in unique_ports])\n\n    top_ports = top_ports_cache[:n]\n    if as_string:\n        return \",\".join([str(s) for s in top_ports])\n    return top_ports\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.truncate_filename","title":"truncate_filename","text":"<pre><code>truncate_filename(file_path, max_length=255)\n</code></pre> <p>Truncate the filename while preserving the file extension to ensure the total path length does not exceed the maximum length.</p> <p>Parameters:</p> <ul> <li> <code>file_path</code>               (<code>str</code>)           \u2013            <p>The original file path.</p> </li> <li> <code>max_length</code>               (<code>int</code>, default:                   <code>255</code> )           \u2013            <p>The maximum allowed length for the total path. Default is 255.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>pathlib.Path: A new Path object with the truncated filename.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>ValueError</code>             \u2013            <p>If the directory path is too long to accommodate any filename within the limit.</p> </li> </ul> Example <p>truncate_filename('/path/to/example_long_filename.txt', 20) PosixPath('/path/to/example.txt')</p> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def truncate_filename(file_path, max_length=255):\n    \"\"\"\n    Truncate the filename while preserving the file extension to ensure the total path length does not exceed the maximum length.\n\n    Args:\n        file_path (str): The original file path.\n        max_length (int): The maximum allowed length for the total path. Default is 255.\n\n    Returns:\n        pathlib.Path: A new Path object with the truncated filename.\n\n    Raises:\n        ValueError: If the directory path is too long to accommodate any filename within the limit.\n\n    Example:\n        &gt;&gt;&gt; truncate_filename('/path/to/example_long_filename.txt', 20)\n        PosixPath('/path/to/example.txt')\n    \"\"\"\n    p = Path(file_path)\n    directory, stem, suffix = p.parent, p.stem, p.suffix\n\n    max_filename_length = max_length - len(str(directory)) - len(suffix) - 1  # 1 for the '/' separator\n\n    if max_filename_length &lt;= 0:\n        raise ValueError(\"The directory path is too long to accommodate any filename within the limit.\")\n\n    if len(stem) &gt; max_filename_length:\n        truncated_stem = stem[:max_filename_length]\n    else:\n        truncated_stem = stem\n\n    new_path = directory / (truncated_stem + suffix)\n    return new_path\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.url_depth","title":"url_depth","text":"<pre><code>url_depth(url)\n</code></pre> <p>Calculate the depth of the given URL based on its path components.</p> <p>Parameters:</p> <ul> <li> <code>url</code>               (<code>Union[str, ParseResult]</code>)           \u2013            <p>The URL whose depth is to be calculated.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>int</code>          \u2013            <p>The depth of the URL, based on its path components.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; url_depth('https://www.evilcorp.com/foo/bar/')\n2\n</code></pre> <pre><code>&gt;&gt;&gt; url_depth('https://www.evilcorp.com/foo//bar/baz/')\n3\n</code></pre> Source code in <code>bbot/core/helpers/url.py</code> <pre><code>def url_depth(url):\n    \"\"\"\n    Calculate the depth of the given URL based on its path components.\n\n    Args:\n        url (Union[str, ParseResult]): The URL whose depth is to be calculated.\n\n    Returns:\n        int: The depth of the URL, based on its path components.\n\n    Examples:\n        &gt;&gt;&gt; url_depth('https://www.evilcorp.com/foo/bar/')\n        2\n\n        &gt;&gt;&gt; url_depth('https://www.evilcorp.com/foo//bar/baz/')\n        3\n    \"\"\"\n    parsed = parse_url(url)\n    parsed = parsed._replace(path=double_slash_regex.sub(\"/\", parsed.path))\n    split_path = str(parsed.path).strip(\"/\").split(\"/\")\n    split_path = [e for e in split_path if e]\n    return len(split_path)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.url_parents","title":"url_parents","text":"<pre><code>url_parents(u)\n</code></pre> <p>Generate a list of parent URLs for a given URL string.</p> <p>This function takes an input string <code>u</code> representing a URL and generates a list of its parent URLs in decreasing order of specificity.</p> <p>Parameters:</p> <ul> <li> <code>u</code>               (<code>str</code>)           \u2013            <p>The input string representing a URL.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>List[str]: A list of parent URLs of the input URL in decreasing order of specificity.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; url_parents(\"http://www.evilcorp.co.uk/admin/tools/cmd.php\")\n[\"http://www.evilcorp.co.uk/admin/tools/\", \"http://www.evilcorp.co.uk/admin/\", \"http://www.evilcorp.co.uk/\"]\n</code></pre> Notes <ul> <li>The list is generated by continuously calling <code>parent_url</code> until it returns None.</li> <li>All components of the URL except for the path are preserved.</li> </ul> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def url_parents(u):\n    \"\"\"\n    Generate a list of parent URLs for a given URL string.\n\n    This function takes an input string `u` representing a URL and generates a list of its parent URLs in decreasing order of specificity.\n\n    Args:\n        u (str): The input string representing a URL.\n\n    Returns:\n        List[str]: A list of parent URLs of the input URL in decreasing order of specificity.\n\n    Examples:\n        &gt;&gt;&gt; url_parents(\"http://www.evilcorp.co.uk/admin/tools/cmd.php\")\n        [\"http://www.evilcorp.co.uk/admin/tools/\", \"http://www.evilcorp.co.uk/admin/\", \"http://www.evilcorp.co.uk/\"]\n\n    Notes:\n        - The list is generated by continuously calling `parent_url` until it returns None.\n        - All components of the URL except for the path are preserved.\n    \"\"\"\n    parent_list = []\n    while 1:\n        parent = parent_url(u)\n        if parent == None:\n            return parent_list\n        elif parent not in parent_list:\n            parent_list.append(parent)\n            u = parent\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.verify_sudo_password","title":"verify_sudo_password","text":"<pre><code>verify_sudo_password(sudo_pass)\n</code></pre> <p>Verify if the given sudo password is correct.</p> <p>This function checks whether the sudo password provided is valid for the current user. It runs a command with sudo, feeding in the password via stdin, and checks the return code.</p> <p>Parameters:</p> <ul> <li> <code>sudo_pass</code>               (<code>str</code>)           \u2013            <p>The sudo password to verify.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the sudo password is correct, False otherwise.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; verify_sudo_password(\"mysecretpassword\")\nTrue\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def verify_sudo_password(sudo_pass):\n    \"\"\"Verify if the given sudo password is correct.\n\n    This function checks whether the sudo password provided is valid for the current user.\n    It runs a command with sudo, feeding in the password via stdin, and checks the return code.\n\n    Args:\n        sudo_pass (str): The sudo password to verify.\n\n    Returns:\n        bool: True if the sudo password is correct, False otherwise.\n\n    Examples:\n        &gt;&gt;&gt; verify_sudo_password(\"mysecretpassword\")\n        True\n    \"\"\"\n    try:\n        sp.run(\n            [\"sudo\", \"-S\", \"-k\", \"true\"],\n            input=smart_encode(sudo_pass),\n            stderr=sp.DEVNULL,\n            stdout=sp.DEVNULL,\n            check=True,\n        )\n    except sp.CalledProcessError:\n        return False\n    return True\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.weighted_shuffle","title":"weighted_shuffle","text":"<pre><code>weighted_shuffle(items, weights)\n</code></pre> <p>Shuffles a list of items based on their corresponding weights.</p> <p>Parameters:</p> <ul> <li> <code>items</code>               (<code>list</code>)           \u2013            <p>The list of items to shuffle.</p> </li> <li> <code>weights</code>               (<code>list</code>)           \u2013            <p>The list of weights corresponding to each item.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>list</code>          \u2013            <p>A new list containing the shuffled items.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; items = ['apple', 'banana', 'cherry']\n&gt;&gt;&gt; weights = [0.4, 0.5, 0.1]\n&gt;&gt;&gt; weighted_shuffle(items, weights)\n['banana', 'apple', 'cherry']\n&gt;&gt;&gt; weighted_shuffle(items, weights)\n['apple', 'banana', 'cherry']\n&gt;&gt;&gt; weighted_shuffle(items, weights)\n['apple', 'banana', 'cherry']\n&gt;&gt;&gt; weighted_shuffle(items, weights)\n['banana', 'apple', 'cherry']\n</code></pre> Note <p>The sum of all weights does not have to be 1. They will be normalized internally.</p> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def weighted_shuffle(items, weights):\n    \"\"\"\n    Shuffles a list of items based on their corresponding weights.\n\n    Args:\n        items (list): The list of items to shuffle.\n        weights (list): The list of weights corresponding to each item.\n\n    Returns:\n        list: A new list containing the shuffled items.\n\n    Examples:\n        &gt;&gt;&gt; items = ['apple', 'banana', 'cherry']\n        &gt;&gt;&gt; weights = [0.4, 0.5, 0.1]\n        &gt;&gt;&gt; weighted_shuffle(items, weights)\n        ['banana', 'apple', 'cherry']\n        &gt;&gt;&gt; weighted_shuffle(items, weights)\n        ['apple', 'banana', 'cherry']\n        &gt;&gt;&gt; weighted_shuffle(items, weights)\n        ['apple', 'banana', 'cherry']\n        &gt;&gt;&gt; weighted_shuffle(items, weights)\n        ['banana', 'apple', 'cherry']\n\n    Note:\n        The sum of all weights does not have to be 1. They will be normalized internally.\n    \"\"\"\n    # Create a list of tuples where each tuple is (item, weight)\n    pool = list(zip(items, weights))\n\n    shuffled_items = []\n\n    # While there are still items to be chosen...\n    while pool:\n        # Normalize weights\n        total = sum(weight for item, weight in pool)\n        weights = [weight / total for item, weight in pool]\n\n        # Choose an index based on weight\n        chosen_index = random.choices(range(len(pool)), weights=weights, k=1)[0]\n\n        # Add the chosen item to the shuffled list\n        chosen_item, chosen_weight = pool.pop(chosen_index)\n        shuffled_items.append(chosen_item)\n\n    return shuffled_items\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.which","title":"which","text":"<pre><code>which(*executables)\n</code></pre> <p>Finds the full path of the first available executable from a list of executables.</p> <p>Parameters:</p> <ul> <li> <code>*executables</code>               (<code>str</code>, default:                   <code>()</code> )           \u2013            <p>One or more executable names to search for.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>The full path of the first available executable, or None if none are found.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; which(\"python\", \"python3\")\n\"/usr/bin/python\"\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def which(*executables):\n    \"\"\"Finds the full path of the first available executable from a list of executables.\n\n    Args:\n        *executables (str): One or more executable names to search for.\n\n    Returns:\n        str: The full path of the first available executable, or None if none are found.\n\n    Examples:\n        &gt;&gt;&gt; which(\"python\", \"python3\")\n        \"/usr/bin/python\"\n    \"\"\"\n    import shutil\n\n    for e in executables:\n        location = shutil.which(e)\n        if location:\n            return location\n</code></pre>"},{"location":"dev/helpers/web/","title":"Web","text":"<p>These are helpers for making various web requests.</p> <p>Note that these helpers can be invoked directly from <code>self.helpers</code>, e.g.:</p> <pre><code>self.helpers.request(\"https://www.evilcorp.com\")\n</code></pre>"},{"location":"dev/helpers/web/#bbot.core.helpers.web.WebHelper","title":"WebHelper","text":"<p>               Bases: <code>EngineClient</code></p> Source code in <code>bbot/core/helpers/web/web.py</code> <pre><code>class WebHelper(EngineClient):\n\n    SERVER_CLASS = HTTPEngine\n    ERROR_CLASS = WebError\n\n    \"\"\"\n    Main utility class for managing HTTP operations in BBOT. It serves as a wrapper around the BBOTAsyncClient,\n    which itself is a subclass of httpx.AsyncClient. The class provides functionalities to make HTTP requests,\n    download files, and handle cached wordlists.\n\n    Attributes:\n        parent_helper (object): The parent helper object containing scan configurations.\n        http_debug (bool): Flag to indicate whether HTTP debugging is enabled.\n        ssl_verify (bool): Flag to indicate whether SSL verification is enabled.\n        web_client (BBOTAsyncClient): An instance of BBOTAsyncClient for making HTTP requests.\n        client_only_options (tuple): A tuple of options only applicable to the web client.\n\n    Examples:\n        Basic web request:\n        &gt;&gt;&gt; response = await self.helpers.request(\"https://www.evilcorp.com\")\n\n        Download file:\n        &gt;&gt;&gt; filename = await self.helpers.download(\"https://www.evilcorp.com/passwords.docx\")\n\n        Download wordlist (cached for 30 days by default):\n        &gt;&gt;&gt; filename = await self.helpers.wordlist(\"https://www.evilcorp.com/wordlist.txt\")\n    \"\"\"\n\n    def __init__(self, parent_helper):\n        self.parent_helper = parent_helper\n        self.preset = self.parent_helper.preset\n        self.config = self.preset.config\n        self.web_config = self.config.get(\"web\", {})\n        self.web_spider_depth = self.web_config.get(\"spider_depth\", 1)\n        self.web_spider_distance = self.web_config.get(\"spider_distance\", 0)\n        self.web_clients = {}\n        self.target = self.preset.target\n        self.ssl_verify = self.config.get(\"ssl_verify\", False)\n        engine_debug = self.config.get(\"engine\", {}).get(\"debug\", False)\n        super().__init__(\n            server_kwargs={\"config\": self.config, \"target\": self.parent_helper.preset.target.minimal},\n            debug=engine_debug,\n        )\n\n    def AsyncClient(self, *args, **kwargs):\n        # cache by retries to prevent unwanted accumulation of clients\n        # (they are not garbage-collected)\n        retries = kwargs.get(\"retries\", 1)\n        try:\n            return self.web_clients[retries]\n        except KeyError:\n            from .client import BBOTAsyncClient\n\n            client = BBOTAsyncClient.from_config(self.config, self.target, *args, persist_cookies=False, **kwargs)\n            self.web_clients[client.retries] = client\n            return client\n\n    async def request(self, *args, **kwargs):\n        \"\"\"\n        Asynchronous function for making HTTP requests, intended to be the most basic web request function\n        used widely across BBOT and within this helper class. Handles various exceptions and timeouts\n        that might occur during the request.\n\n        This function automatically respects the scan's global timeout, proxy, headers, etc.\n        Headers you specify will be merged with the scan's. Your arguments take ultimate precedence,\n        meaning you can override the scan's values if you want.\n\n        Args:\n            url (str): The URL to send the request to.\n            method (str, optional): The HTTP method to use for the request. Defaults to 'GET'.\n            headers (dict, optional): Dictionary of HTTP headers to send with the request.\n            params (dict, optional): Dictionary, list of tuples, or bytes to send in the query string.\n            cookies (dict, optional): Dictionary or CookieJar object containing cookies.\n            json (Any, optional): A JSON serializable Python object to send in the body.\n            data (dict, optional): Dictionary, list of tuples, or bytes to send in the body.\n            files (dict, optional): Dictionary of 'name': file-like-objects for multipart encoding upload.\n            auth (tuple, optional): Auth tuple to enable Basic/Digest/Custom HTTP auth.\n            timeout (float, optional): The maximum time to wait for the request to complete.\n            proxies (dict, optional): Dictionary mapping protocol schemes to proxy URLs.\n            allow_redirects (bool, optional): Enables or disables redirection. Defaults to None.\n            stream (bool, optional): Enables or disables response streaming.\n            raise_error (bool, optional): Whether to raise exceptions for HTTP connect, timeout errors. Defaults to False.\n            client (httpx.AsyncClient, optional): A specific httpx.AsyncClient to use for the request. Defaults to self.web_client.\n            cache_for (int, optional): Time in seconds to cache the request. Not used currently. Defaults to None.\n\n        Raises:\n            httpx.TimeoutException: If the request times out.\n            httpx.ConnectError: If the connection fails.\n            httpx.RequestError: For other request-related errors.\n\n        Returns:\n            httpx.Response or None: The HTTP response object returned by the httpx library.\n\n        Examples:\n            &gt;&gt;&gt; response = await self.helpers.request(\"https://www.evilcorp.com\")\n\n            &gt;&gt;&gt; response = await self.helpers.request(\"https://api.evilcorp.com/\", method=\"POST\", data=\"stuff\")\n\n        Note:\n            If the web request fails, it will return None unless `raise_error` is `True`.\n        \"\"\"\n        raise_error = kwargs.get(\"raise_error\", False)\n        result = await self.run_and_return(\"request\", *args, **kwargs)\n        if isinstance(result, dict) and \"_request_error\" in result:\n            if raise_error:\n                error_msg = result[\"_request_error\"]\n                response = result[\"_response\"]\n                error = self.ERROR_CLASS(error_msg)\n                error.response = response\n                raise error\n        return result\n\n    async def request_batch(self, urls, *args, **kwargs):\n        \"\"\"\n        Given a list of URLs, request them in parallel and yield responses as they come in.\n\n        Args:\n            urls (list[str]): List of URLs to visit\n            *args: Positional arguments to pass through to httpx\n            **kwargs: Keyword arguments to pass through to httpx\n\n        Examples:\n            &gt;&gt;&gt; async for url, response in self.helpers.request_batch(urls, headers={\"X-Test\": \"Test\"}):\n            &gt;&gt;&gt;     if response is not None and response.status_code == 200:\n            &gt;&gt;&gt;         self.hugesuccess(response)\n        \"\"\"\n        agen = self.run_and_yield(\"request_batch\", urls, *args, **kwargs)\n        while 1:\n            try:\n                yield await agen.__anext__()\n            except (StopAsyncIteration, GeneratorExit):\n                await agen.aclose()\n                break\n\n    async def request_custom_batch(self, urls_and_kwargs):\n        \"\"\"\n        Make web requests in parallel with custom options for each request. Yield responses as they come in.\n\n        Similar to `request_batch` except it allows individual arguments for each URL.\n\n        Args:\n            urls_and_kwargs (list[tuple]): List of tuples in the format: (url, kwargs, custom_tracker)\n                where custom_tracker is an optional value for your own internal use. You may use it to\n                help correlate requests, etc.\n\n        Examples:\n            &gt;&gt;&gt; urls_and_kwargs = [\n            &gt;&gt;&gt;     (\"http://evilcorp.com/1\", {\"method\": \"GET\"}, \"request-1\"),\n            &gt;&gt;&gt;     (\"http://evilcorp.com/2\", {\"method\": \"POST\"}, \"request-2\"),\n            &gt;&gt;&gt; ]\n            &gt;&gt;&gt; async for url, kwargs, custom_tracker, response in self.helpers.request_custom_batch(\n            &gt;&gt;&gt;     urls_and_kwargs\n            &gt;&gt;&gt; ):\n            &gt;&gt;&gt;     if response is not None and response.status_code == 200:\n            &gt;&gt;&gt;         self.hugesuccess(response)\n        \"\"\"\n        agen = self.run_and_yield(\"request_custom_batch\", urls_and_kwargs)\n        while 1:\n            try:\n                yield await agen.__anext__()\n            except (StopAsyncIteration, GeneratorExit):\n                await agen.aclose()\n                break\n\n    async def download(self, url, **kwargs):\n        \"\"\"\n        Asynchronous function for downloading files from a given URL. Supports caching with an optional\n        time period in hours via the \"cache_hrs\" keyword argument. In case of successful download,\n        returns the full path of the saved filename. If the download fails, returns None.\n\n        Args:\n            url (str): The URL of the file to download.\n            filename (str, optional): The filename to save the downloaded file as.\n                If not provided, will generate based on URL.\n            max_size (str or int): Maximum filesize as a string (\"5MB\") or integer in bytes.\n            cache_hrs (float, optional): The number of hours to cache the downloaded file.\n                A negative value disables caching. Defaults to -1.\n            method (str, optional): The HTTP method to use for the request, defaults to 'GET'.\n            raise_error (bool, optional): Whether to raise exceptions for HTTP connect, timeout errors. Defaults to False.\n            **kwargs: Additional keyword arguments to pass to the httpx request.\n\n        Returns:\n            Path or None: The full path of the downloaded file as a Path object if successful, otherwise None.\n\n        Examples:\n            &gt;&gt;&gt; filepath = await self.helpers.download(\"https://www.evilcorp.com/passwords.docx\", cache_hrs=24)\n        \"\"\"\n        success = False\n        raise_error = kwargs.get(\"raise_error\", False)\n        filename = kwargs.pop(\"filename\", self.parent_helper.cache_filename(url))\n        filename = truncate_filename(Path(filename).resolve())\n        kwargs[\"filename\"] = filename\n        max_size = kwargs.pop(\"max_size\", None)\n        if max_size is not None:\n            max_size = self.parent_helper.human_to_bytes(max_size)\n            kwargs[\"max_size\"] = max_size\n        cache_hrs = float(kwargs.pop(\"cache_hrs\", -1))\n        if cache_hrs &gt; 0 and self.parent_helper.is_cached(url):\n            log.debug(f\"{url} is cached at {self.parent_helper.cache_filename(url)}\")\n            success = True\n        else:\n            result = await self.run_and_return(\"download\", url, **kwargs)\n            if isinstance(result, dict) and \"_download_error\" in result:\n                if raise_error:\n                    error_msg = result[\"_download_error\"]\n                    response = result[\"_response\"]\n                    error = self.ERROR_CLASS(error_msg)\n                    error.response = response\n                    raise error\n            elif result:\n                success = True\n\n        if success:\n            return filename\n\n    async def wordlist(self, path, lines=None, **kwargs):\n        \"\"\"\n        Asynchronous function for retrieving wordlists, either from a local path or a URL.\n        Allows for optional line-based truncation and caching. Returns the full path of the wordlist\n        file or a truncated version of it.\n\n        Args:\n            path (str): The local or remote path of the wordlist.\n            lines (int, optional): Number of lines to read from the wordlist.\n                If specified, will return a truncated wordlist with this many lines.\n            cache_hrs (float, optional): Number of hours to cache the downloaded wordlist.\n                Defaults to 720 hours (30 days) for remote wordlists.\n            **kwargs: Additional keyword arguments to pass to the 'download' function for remote wordlists.\n\n        Returns:\n            Path: The full path of the wordlist (or its truncated version) as a Path object.\n\n        Raises:\n            WordlistError: If the path is invalid or the wordlist could not be retrieved or found.\n\n        Examples:\n            Fetching full wordlist\n            &gt;&gt;&gt; wordlist_path = await self.helpers.wordlist(\"https://www.evilcorp.com/wordlist.txt\")\n\n            Fetching and truncating to the first 100 lines\n            &gt;&gt;&gt; wordlist_path = await self.helpers.wordlist(\"/root/rockyou.txt\", lines=100)\n        \"\"\"\n        if not path:\n            raise WordlistError(f\"Invalid wordlist: {path}\")\n        if not \"cache_hrs\" in kwargs:\n            kwargs[\"cache_hrs\"] = 720\n        if self.parent_helper.is_url(path):\n            filename = await self.download(str(path), **kwargs)\n            if filename is None:\n                raise WordlistError(f\"Unable to retrieve wordlist from {path}\")\n        else:\n            filename = Path(path).resolve()\n            if not filename.is_file():\n                raise WordlistError(f\"Unable to find wordlist at {path}\")\n\n        if lines is None:\n            return filename\n        else:\n            lines = int(lines)\n            with open(filename) as f:\n                read_lines = f.readlines()\n            cache_key = f\"{filename}:{lines}\"\n            truncated_filename = self.parent_helper.cache_filename(cache_key)\n            with open(truncated_filename, \"w\") as f:\n                for line in read_lines[:lines]:\n                    f.write(line)\n            return truncated_filename\n\n    async def curl(self, *args, **kwargs):\n        \"\"\"\n        An asynchronous function that runs a cURL command with specified arguments and options.\n\n        This function constructs and executes a cURL command based on the provided parameters.\n        It offers support for various cURL options such as headers, post data, and cookies.\n\n        Args:\n            *args: Variable length argument list for positional arguments. Unused in this function.\n            url (str): The URL for the cURL request. Mandatory.\n            raw_path (bool, optional): If True, activates '--path-as-is' in cURL. Defaults to False.\n            headers (dict, optional): A dictionary of HTTP headers to include in the request.\n            ignore_bbot_global_settings (bool, optional): If True, ignores the global settings of BBOT. Defaults to False.\n            post_data (dict, optional): A dictionary containing data to be sent in the request body.\n            method (str, optional): The HTTP method to use for the request (e.g., 'GET', 'POST').\n            cookies (dict, optional): A dictionary of cookies to include in the request.\n            path_override (str, optional): Overrides the request-target to use in the HTTP request line.\n            head_mode (bool, optional): If True, includes '-I' to fetch headers only. Defaults to None.\n            raw_body (str, optional): Raw string to be sent in the body of the request.\n            **kwargs: Arbitrary keyword arguments that will be forwarded to the HTTP request function.\n\n        Returns:\n            str: The output of the cURL command.\n\n        Raises:\n            CurlError: If 'url' is not supplied.\n\n        Examples:\n            &gt;&gt;&gt; output = await curl(url=\"https://example.com\", headers={\"X-Header\": \"Wat\"})\n            &gt;&gt;&gt; print(output)\n        \"\"\"\n        url = kwargs.get(\"url\", \"\")\n\n        if not url:\n            raise CurlError(\"No URL supplied to CURL helper\")\n\n        curl_command = [\"curl\", url, \"-s\"]\n\n        raw_path = kwargs.get(\"raw_path\", False)\n        if raw_path:\n            curl_command.append(\"--path-as-is\")\n\n        # respect global ssl verify settings\n        if self.ssl_verify is not True:\n            curl_command.append(\"-k\")\n\n        headers = kwargs.get(\"headers\", {})\n\n        ignore_bbot_global_settings = kwargs.get(\"ignore_bbot_global_settings\", False)\n\n        if ignore_bbot_global_settings:\n            log.debug(\"ignore_bbot_global_settings enabled. Global settings will not be applied\")\n        else:\n            http_timeout = self.parent_helper.web_config.get(\"http_timeout\", 20)\n            user_agent = self.parent_helper.web_config.get(\"user_agent\", \"BBOT\")\n\n            if \"User-Agent\" not in headers:\n                headers[\"User-Agent\"] = user_agent\n\n            # only add custom headers if the URL is in-scope\n            if self.parent_helper.preset.in_scope(url):\n                for hk, hv in self.web_config.get(\"http_headers\", {}).items():\n                    headers[hk] = hv\n\n            # add the timeout\n            if not \"timeout\" in kwargs:\n                timeout = http_timeout\n\n            curl_command.append(\"-m\")\n            curl_command.append(str(timeout))\n\n        for k, v in headers.items():\n            if isinstance(v, list):\n                for x in v:\n                    curl_command.append(\"-H\")\n                    curl_command.append(f\"{k}: {x}\")\n\n            else:\n                curl_command.append(\"-H\")\n                curl_command.append(f\"{k}: {v}\")\n\n        post_data = kwargs.get(\"post_data\", {})\n        if len(post_data.items()) &gt; 0:\n            curl_command.append(\"-d\")\n            post_data_str = \"\"\n            for k, v in post_data.items():\n                post_data_str += f\"&amp;{k}={v}\"\n            curl_command.append(post_data_str.lstrip(\"&amp;\"))\n\n        method = kwargs.get(\"method\", \"\")\n        if method:\n            curl_command.append(\"-X\")\n            curl_command.append(method)\n\n        cookies = kwargs.get(\"cookies\", \"\")\n        if cookies:\n            curl_command.append(\"-b\")\n            cookies_str = \"\"\n            for k, v in cookies.items():\n                cookies_str += f\"{k}={v}; \"\n            curl_command.append(f'{cookies_str.rstrip(\" \")}')\n\n        path_override = kwargs.get(\"path_override\", None)\n        if path_override:\n            curl_command.append(\"--request-target\")\n            curl_command.append(f\"{path_override}\")\n\n        head_mode = kwargs.get(\"head_mode\", None)\n        if head_mode:\n            curl_command.append(\"-I\")\n\n        raw_body = kwargs.get(\"raw_body\", None)\n        if raw_body:\n            curl_command.append(\"-d\")\n            curl_command.append(raw_body)\n\n        output = (await self.parent_helper.run(curl_command)).stdout\n        return output\n\n    def beautifulsoup(\n        self,\n        markup,\n        features=\"html.parser\",\n        builder=None,\n        parse_only=None,\n        from_encoding=None,\n        exclude_encodings=None,\n        element_classes=None,\n        **kwargs,\n    ):\n        \"\"\"\n        Naviate, Search, Modify, Parse, or PrettyPrint HTML Content.\n        More information at https://beautiful-soup-4.readthedocs.io/en/latest/\n\n        Args:\n            markup: A string or a file-like object representing markup to be parsed.\n            features: Desirable features of the parser to be used.\n                This may be the name of a specific parser (\"lxml\",\n                \"lxml-xml\", \"html.parser\", or \"html5lib\") or it may be\n                the type of markup to be used (\"html\", \"html5\", \"xml\").\n                Defaults to 'html.parser'.\n            builder: A TreeBuilder subclass to instantiate (or instance to use)\n                instead of looking one up based on `features`.\n            parse_only: A SoupStrainer. Only parts of the document\n                matching the SoupStrainer will be considered.\n            from_encoding: A string indicating the encoding of the\n                document to be parsed.\n            exclude_encodings = A list of strings indicating\n                encodings known to be wrong.\n            element_classes = A dictionary mapping BeautifulSoup\n                classes like Tag and NavigableString, to other classes you'd\n                like to be instantiated instead as the parse tree is\n                built.\n            **kwargs = For backwards compatibility purposes.\n\n        Returns:\n            soup: An instance of the BeautifulSoup class\n\n        Todo:\n            - Write tests for this function\n\n        Examples:\n            &gt;&gt;&gt; soup = self.helpers.beautifulsoup(event.data[\"body\"], \"html.parser\")\n            Perform an html parse of the 'markup' argument and return a soup instance\n\n            &gt;&gt;&gt; email_type = soup.find(type=\"email\")\n            Searches the soup instance for all occurances of the passed in argument\n        \"\"\"\n        try:\n            soup = BeautifulSoup(\n                markup, features, builder, parse_only, from_encoding, exclude_encodings, element_classes, **kwargs\n            )\n            return soup\n        except Exception as e:\n            log.debug(f\"Error parsing beautifulsoup: {e}\")\n            return False\n\n    def response_to_json(self, response):\n        \"\"\"\n        Convert web response to JSON object, similar to the output of `httpx -irr -json`\n        \"\"\"\n\n        if response is None:\n            return\n\n        import mmh3\n        from datetime import datetime\n        from hashlib import md5, sha256\n        from bbot.core.helpers.misc import tagify, urlparse, split_host_port, smart_decode\n\n        request = response.request\n        url = str(request.url)\n        parsed_url = urlparse(url)\n        netloc = parsed_url.netloc\n        scheme = parsed_url.scheme.lower()\n        host, port = split_host_port(f\"{scheme}://{netloc}\")\n\n        raw_headers = \"\\r\\n\".join([f\"{k}: {v}\" for k, v in response.headers.items()])\n        raw_headers_encoded = raw_headers.encode()\n\n        headers = {}\n        for k, v in response.headers.items():\n            k = tagify(k, delimiter=\"_\")\n            headers[k] = v\n\n        j = {\n            \"timestamp\": datetime.now().isoformat(),\n            \"hash\": {\n                \"body_md5\": md5(response.content).hexdigest(),\n                \"body_mmh3\": mmh3.hash(response.content),\n                \"body_sha256\": sha256(response.content).hexdigest(),\n                # \"body_simhash\": \"TODO\",\n                \"header_md5\": md5(raw_headers_encoded).hexdigest(),\n                \"header_mmh3\": mmh3.hash(raw_headers_encoded),\n                \"header_sha256\": sha256(raw_headers_encoded).hexdigest(),\n                # \"header_simhash\": \"TODO\",\n            },\n            \"header\": headers,\n            \"body\": smart_decode(response.content),\n            \"content_type\": headers.get(\"content_type\", \"\").split(\";\")[0].strip(),\n            \"url\": url,\n            \"host\": str(host),\n            \"port\": port,\n            \"scheme\": scheme,\n            \"method\": response.request.method,\n            \"path\": parsed_url.path,\n            \"raw_header\": raw_headers,\n            \"status_code\": response.status_code,\n        }\n\n        return j\n</code></pre>"},{"location":"dev/helpers/web/#bbot.core.helpers.web.WebHelper.ERROR_CLASS","title":"ERROR_CLASS  <code>class-attribute</code> <code>instance-attribute</code>","text":"<pre><code>ERROR_CLASS = WebError\n</code></pre> <p>Main utility class for managing HTTP operations in BBOT. It serves as a wrapper around the BBOTAsyncClient, which itself is a subclass of httpx.AsyncClient. The class provides functionalities to make HTTP requests, download files, and handle cached wordlists.</p> <p>Attributes:</p> <ul> <li> <code>parent_helper</code>               (<code>object</code>)           \u2013            <p>The parent helper object containing scan configurations.</p> </li> <li> <code>http_debug</code>               (<code>bool</code>)           \u2013            <p>Flag to indicate whether HTTP debugging is enabled.</p> </li> <li> <code>ssl_verify</code>               (<code>bool</code>)           \u2013            <p>Flag to indicate whether SSL verification is enabled.</p> </li> <li> <code>web_client</code>               (<code>BBOTAsyncClient</code>)           \u2013            <p>An instance of BBOTAsyncClient for making HTTP requests.</p> </li> <li> <code>client_only_options</code>               (<code>tuple</code>)           \u2013            <p>A tuple of options only applicable to the web client.</p> </li> </ul> <p>Examples:</p> <p>Basic web request:</p> <pre><code>&gt;&gt;&gt; response = await self.helpers.request(\"https://www.evilcorp.com\")\n</code></pre> <p>Download file:</p> <pre><code>&gt;&gt;&gt; filename = await self.helpers.download(\"https://www.evilcorp.com/passwords.docx\")\n</code></pre> <p>Download wordlist (cached for 30 days by default):</p> <pre><code>&gt;&gt;&gt; filename = await self.helpers.wordlist(\"https://www.evilcorp.com/wordlist.txt\")\n</code></pre>"},{"location":"dev/helpers/web/#bbot.core.helpers.web.WebHelper.beautifulsoup","title":"beautifulsoup","text":"<pre><code>beautifulsoup(markup, features='html.parser', builder=None, parse_only=None, from_encoding=None, exclude_encodings=None, element_classes=None, **kwargs)\n</code></pre> <p>Naviate, Search, Modify, Parse, or PrettyPrint HTML Content. More information at https://beautiful-soup-4.readthedocs.io/en/latest/</p> <p>Parameters:</p> <ul> <li> <code>markup</code>           \u2013            <p>A string or a file-like object representing markup to be parsed.</p> </li> <li> <code>features</code>           \u2013            <p>Desirable features of the parser to be used. This may be the name of a specific parser (\"lxml\", \"lxml-xml\", \"html.parser\", or \"html5lib\") or it may be the type of markup to be used (\"html\", \"html5\", \"xml\"). Defaults to 'html.parser'.</p> </li> <li> <code>builder</code>           \u2013            <p>A TreeBuilder subclass to instantiate (or instance to use) instead of looking one up based on <code>features</code>.</p> </li> <li> <code>parse_only</code>           \u2013            <p>A SoupStrainer. Only parts of the document matching the SoupStrainer will be considered.</p> </li> <li> <code>from_encoding</code>           \u2013            <p>A string indicating the encoding of the document to be parsed.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>soup</code>          \u2013            <p>An instance of the BeautifulSoup class</p> </li> </ul> Todo <ul> <li>Write tests for this function</li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; soup = self.helpers.beautifulsoup(event.data[\"body\"], \"html.parser\")\nPerform an html parse of the 'markup' argument and return a soup instance\n</code></pre> <pre><code>&gt;&gt;&gt; email_type = soup.find(type=\"email\")\nSearches the soup instance for all occurances of the passed in argument\n</code></pre> Source code in <code>bbot/core/helpers/web/web.py</code> <pre><code>def beautifulsoup(\n    self,\n    markup,\n    features=\"html.parser\",\n    builder=None,\n    parse_only=None,\n    from_encoding=None,\n    exclude_encodings=None,\n    element_classes=None,\n    **kwargs,\n):\n    \"\"\"\n    Naviate, Search, Modify, Parse, or PrettyPrint HTML Content.\n    More information at https://beautiful-soup-4.readthedocs.io/en/latest/\n\n    Args:\n        markup: A string or a file-like object representing markup to be parsed.\n        features: Desirable features of the parser to be used.\n            This may be the name of a specific parser (\"lxml\",\n            \"lxml-xml\", \"html.parser\", or \"html5lib\") or it may be\n            the type of markup to be used (\"html\", \"html5\", \"xml\").\n            Defaults to 'html.parser'.\n        builder: A TreeBuilder subclass to instantiate (or instance to use)\n            instead of looking one up based on `features`.\n        parse_only: A SoupStrainer. Only parts of the document\n            matching the SoupStrainer will be considered.\n        from_encoding: A string indicating the encoding of the\n            document to be parsed.\n        exclude_encodings = A list of strings indicating\n            encodings known to be wrong.\n        element_classes = A dictionary mapping BeautifulSoup\n            classes like Tag and NavigableString, to other classes you'd\n            like to be instantiated instead as the parse tree is\n            built.\n        **kwargs = For backwards compatibility purposes.\n\n    Returns:\n        soup: An instance of the BeautifulSoup class\n\n    Todo:\n        - Write tests for this function\n\n    Examples:\n        &gt;&gt;&gt; soup = self.helpers.beautifulsoup(event.data[\"body\"], \"html.parser\")\n        Perform an html parse of the 'markup' argument and return a soup instance\n\n        &gt;&gt;&gt; email_type = soup.find(type=\"email\")\n        Searches the soup instance for all occurances of the passed in argument\n    \"\"\"\n    try:\n        soup = BeautifulSoup(\n            markup, features, builder, parse_only, from_encoding, exclude_encodings, element_classes, **kwargs\n        )\n        return soup\n    except Exception as e:\n        log.debug(f\"Error parsing beautifulsoup: {e}\")\n        return False\n</code></pre>"},{"location":"dev/helpers/web/#bbot.core.helpers.web.WebHelper.curl","title":"curl  <code>async</code>","text":"<pre><code>curl(*args, **kwargs)\n</code></pre> <p>An asynchronous function that runs a cURL command with specified arguments and options.</p> <p>This function constructs and executes a cURL command based on the provided parameters. It offers support for various cURL options such as headers, post data, and cookies.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Variable length argument list for positional arguments. Unused in this function.</p> </li> <li> <code>url</code>               (<code>str</code>)           \u2013            <p>The URL for the cURL request. Mandatory.</p> </li> <li> <code>raw_path</code>               (<code>bool</code>)           \u2013            <p>If True, activates '--path-as-is' in cURL. Defaults to False.</p> </li> <li> <code>headers</code>               (<code>dict</code>)           \u2013            <p>A dictionary of HTTP headers to include in the request.</p> </li> <li> <code>ignore_bbot_global_settings</code>               (<code>bool</code>)           \u2013            <p>If True, ignores the global settings of BBOT. Defaults to False.</p> </li> <li> <code>post_data</code>               (<code>dict</code>)           \u2013            <p>A dictionary containing data to be sent in the request body.</p> </li> <li> <code>method</code>               (<code>str</code>)           \u2013            <p>The HTTP method to use for the request (e.g., 'GET', 'POST').</p> </li> <li> <code>cookies</code>               (<code>dict</code>)           \u2013            <p>A dictionary of cookies to include in the request.</p> </li> <li> <code>path_override</code>               (<code>str</code>)           \u2013            <p>Overrides the request-target to use in the HTTP request line.</p> </li> <li> <code>head_mode</code>               (<code>bool</code>)           \u2013            <p>If True, includes '-I' to fetch headers only. Defaults to None.</p> </li> <li> <code>raw_body</code>               (<code>str</code>)           \u2013            <p>Raw string to be sent in the body of the request.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Arbitrary keyword arguments that will be forwarded to the HTTP request function.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>The output of the cURL command.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>CurlError</code>             \u2013            <p>If 'url' is not supplied.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; output = await curl(url=\"https://example.com\", headers={\"X-Header\": \"Wat\"})\n&gt;&gt;&gt; print(output)\n</code></pre> Source code in <code>bbot/core/helpers/web/web.py</code> <pre><code>async def curl(self, *args, **kwargs):\n    \"\"\"\n    An asynchronous function that runs a cURL command with specified arguments and options.\n\n    This function constructs and executes a cURL command based on the provided parameters.\n    It offers support for various cURL options such as headers, post data, and cookies.\n\n    Args:\n        *args: Variable length argument list for positional arguments. Unused in this function.\n        url (str): The URL for the cURL request. Mandatory.\n        raw_path (bool, optional): If True, activates '--path-as-is' in cURL. Defaults to False.\n        headers (dict, optional): A dictionary of HTTP headers to include in the request.\n        ignore_bbot_global_settings (bool, optional): If True, ignores the global settings of BBOT. Defaults to False.\n        post_data (dict, optional): A dictionary containing data to be sent in the request body.\n        method (str, optional): The HTTP method to use for the request (e.g., 'GET', 'POST').\n        cookies (dict, optional): A dictionary of cookies to include in the request.\n        path_override (str, optional): Overrides the request-target to use in the HTTP request line.\n        head_mode (bool, optional): If True, includes '-I' to fetch headers only. Defaults to None.\n        raw_body (str, optional): Raw string to be sent in the body of the request.\n        **kwargs: Arbitrary keyword arguments that will be forwarded to the HTTP request function.\n\n    Returns:\n        str: The output of the cURL command.\n\n    Raises:\n        CurlError: If 'url' is not supplied.\n\n    Examples:\n        &gt;&gt;&gt; output = await curl(url=\"https://example.com\", headers={\"X-Header\": \"Wat\"})\n        &gt;&gt;&gt; print(output)\n    \"\"\"\n    url = kwargs.get(\"url\", \"\")\n\n    if not url:\n        raise CurlError(\"No URL supplied to CURL helper\")\n\n    curl_command = [\"curl\", url, \"-s\"]\n\n    raw_path = kwargs.get(\"raw_path\", False)\n    if raw_path:\n        curl_command.append(\"--path-as-is\")\n\n    # respect global ssl verify settings\n    if self.ssl_verify is not True:\n        curl_command.append(\"-k\")\n\n    headers = kwargs.get(\"headers\", {})\n\n    ignore_bbot_global_settings = kwargs.get(\"ignore_bbot_global_settings\", False)\n\n    if ignore_bbot_global_settings:\n        log.debug(\"ignore_bbot_global_settings enabled. Global settings will not be applied\")\n    else:\n        http_timeout = self.parent_helper.web_config.get(\"http_timeout\", 20)\n        user_agent = self.parent_helper.web_config.get(\"user_agent\", \"BBOT\")\n\n        if \"User-Agent\" not in headers:\n            headers[\"User-Agent\"] = user_agent\n\n        # only add custom headers if the URL is in-scope\n        if self.parent_helper.preset.in_scope(url):\n            for hk, hv in self.web_config.get(\"http_headers\", {}).items():\n                headers[hk] = hv\n\n        # add the timeout\n        if not \"timeout\" in kwargs:\n            timeout = http_timeout\n\n        curl_command.append(\"-m\")\n        curl_command.append(str(timeout))\n\n    for k, v in headers.items():\n        if isinstance(v, list):\n            for x in v:\n                curl_command.append(\"-H\")\n                curl_command.append(f\"{k}: {x}\")\n\n        else:\n            curl_command.append(\"-H\")\n            curl_command.append(f\"{k}: {v}\")\n\n    post_data = kwargs.get(\"post_data\", {})\n    if len(post_data.items()) &gt; 0:\n        curl_command.append(\"-d\")\n        post_data_str = \"\"\n        for k, v in post_data.items():\n            post_data_str += f\"&amp;{k}={v}\"\n        curl_command.append(post_data_str.lstrip(\"&amp;\"))\n\n    method = kwargs.get(\"method\", \"\")\n    if method:\n        curl_command.append(\"-X\")\n        curl_command.append(method)\n\n    cookies = kwargs.get(\"cookies\", \"\")\n    if cookies:\n        curl_command.append(\"-b\")\n        cookies_str = \"\"\n        for k, v in cookies.items():\n            cookies_str += f\"{k}={v}; \"\n        curl_command.append(f'{cookies_str.rstrip(\" \")}')\n\n    path_override = kwargs.get(\"path_override\", None)\n    if path_override:\n        curl_command.append(\"--request-target\")\n        curl_command.append(f\"{path_override}\")\n\n    head_mode = kwargs.get(\"head_mode\", None)\n    if head_mode:\n        curl_command.append(\"-I\")\n\n    raw_body = kwargs.get(\"raw_body\", None)\n    if raw_body:\n        curl_command.append(\"-d\")\n        curl_command.append(raw_body)\n\n    output = (await self.parent_helper.run(curl_command)).stdout\n    return output\n</code></pre>"},{"location":"dev/helpers/web/#bbot.core.helpers.web.WebHelper.download","title":"download  <code>async</code>","text":"<pre><code>download(url, **kwargs)\n</code></pre> <p>Asynchronous function for downloading files from a given URL. Supports caching with an optional time period in hours via the \"cache_hrs\" keyword argument. In case of successful download, returns the full path of the saved filename. If the download fails, returns None.</p> <p>Parameters:</p> <ul> <li> <code>url</code>               (<code>str</code>)           \u2013            <p>The URL of the file to download.</p> </li> <li> <code>filename</code>               (<code>str</code>)           \u2013            <p>The filename to save the downloaded file as. If not provided, will generate based on URL.</p> </li> <li> <code>max_size</code>               (<code>str or int</code>)           \u2013            <p>Maximum filesize as a string (\"5MB\") or integer in bytes.</p> </li> <li> <code>cache_hrs</code>               (<code>float</code>)           \u2013            <p>The number of hours to cache the downloaded file. A negative value disables caching. Defaults to -1.</p> </li> <li> <code>method</code>               (<code>str</code>)           \u2013            <p>The HTTP method to use for the request, defaults to 'GET'.</p> </li> <li> <code>raise_error</code>               (<code>bool</code>)           \u2013            <p>Whether to raise exceptions for HTTP connect, timeout errors. Defaults to False.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Additional keyword arguments to pass to the httpx request.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>Path or None: The full path of the downloaded file as a Path object if successful, otherwise None.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; filepath = await self.helpers.download(\"https://www.evilcorp.com/passwords.docx\", cache_hrs=24)\n</code></pre> Source code in <code>bbot/core/helpers/web/web.py</code> <pre><code>async def download(self, url, **kwargs):\n    \"\"\"\n    Asynchronous function for downloading files from a given URL. Supports caching with an optional\n    time period in hours via the \"cache_hrs\" keyword argument. In case of successful download,\n    returns the full path of the saved filename. If the download fails, returns None.\n\n    Args:\n        url (str): The URL of the file to download.\n        filename (str, optional): The filename to save the downloaded file as.\n            If not provided, will generate based on URL.\n        max_size (str or int): Maximum filesize as a string (\"5MB\") or integer in bytes.\n        cache_hrs (float, optional): The number of hours to cache the downloaded file.\n            A negative value disables caching. Defaults to -1.\n        method (str, optional): The HTTP method to use for the request, defaults to 'GET'.\n        raise_error (bool, optional): Whether to raise exceptions for HTTP connect, timeout errors. Defaults to False.\n        **kwargs: Additional keyword arguments to pass to the httpx request.\n\n    Returns:\n        Path or None: The full path of the downloaded file as a Path object if successful, otherwise None.\n\n    Examples:\n        &gt;&gt;&gt; filepath = await self.helpers.download(\"https://www.evilcorp.com/passwords.docx\", cache_hrs=24)\n    \"\"\"\n    success = False\n    raise_error = kwargs.get(\"raise_error\", False)\n    filename = kwargs.pop(\"filename\", self.parent_helper.cache_filename(url))\n    filename = truncate_filename(Path(filename).resolve())\n    kwargs[\"filename\"] = filename\n    max_size = kwargs.pop(\"max_size\", None)\n    if max_size is not None:\n        max_size = self.parent_helper.human_to_bytes(max_size)\n        kwargs[\"max_size\"] = max_size\n    cache_hrs = float(kwargs.pop(\"cache_hrs\", -1))\n    if cache_hrs &gt; 0 and self.parent_helper.is_cached(url):\n        log.debug(f\"{url} is cached at {self.parent_helper.cache_filename(url)}\")\n        success = True\n    else:\n        result = await self.run_and_return(\"download\", url, **kwargs)\n        if isinstance(result, dict) and \"_download_error\" in result:\n            if raise_error:\n                error_msg = result[\"_download_error\"]\n                response = result[\"_response\"]\n                error = self.ERROR_CLASS(error_msg)\n                error.response = response\n                raise error\n        elif result:\n            success = True\n\n    if success:\n        return filename\n</code></pre>"},{"location":"dev/helpers/web/#bbot.core.helpers.web.WebHelper.request","title":"request  <code>async</code>","text":"<pre><code>request(*args, **kwargs)\n</code></pre> <p>Asynchronous function for making HTTP requests, intended to be the most basic web request function used widely across BBOT and within this helper class. Handles various exceptions and timeouts that might occur during the request.</p> <p>This function automatically respects the scan's global timeout, proxy, headers, etc. Headers you specify will be merged with the scan's. Your arguments take ultimate precedence, meaning you can override the scan's values if you want.</p> <p>Parameters:</p> <ul> <li> <code>url</code>               (<code>str</code>)           \u2013            <p>The URL to send the request to.</p> </li> <li> <code>method</code>               (<code>str</code>)           \u2013            <p>The HTTP method to use for the request. Defaults to 'GET'.</p> </li> <li> <code>headers</code>               (<code>dict</code>)           \u2013            <p>Dictionary of HTTP headers to send with the request.</p> </li> <li> <code>params</code>               (<code>dict</code>)           \u2013            <p>Dictionary, list of tuples, or bytes to send in the query string.</p> </li> <li> <code>cookies</code>               (<code>dict</code>)           \u2013            <p>Dictionary or CookieJar object containing cookies.</p> </li> <li> <code>json</code>               (<code>Any</code>)           \u2013            <p>A JSON serializable Python object to send in the body.</p> </li> <li> <code>data</code>               (<code>dict</code>)           \u2013            <p>Dictionary, list of tuples, or bytes to send in the body.</p> </li> <li> <code>files</code>               (<code>dict</code>)           \u2013            <p>Dictionary of 'name': file-like-objects for multipart encoding upload.</p> </li> <li> <code>auth</code>               (<code>tuple</code>)           \u2013            <p>Auth tuple to enable Basic/Digest/Custom HTTP auth.</p> </li> <li> <code>timeout</code>               (<code>float</code>)           \u2013            <p>The maximum time to wait for the request to complete.</p> </li> <li> <code>proxies</code>               (<code>dict</code>)           \u2013            <p>Dictionary mapping protocol schemes to proxy URLs.</p> </li> <li> <code>allow_redirects</code>               (<code>bool</code>)           \u2013            <p>Enables or disables redirection. Defaults to None.</p> </li> <li> <code>stream</code>               (<code>bool</code>)           \u2013            <p>Enables or disables response streaming.</p> </li> <li> <code>raise_error</code>               (<code>bool</code>)           \u2013            <p>Whether to raise exceptions for HTTP connect, timeout errors. Defaults to False.</p> </li> <li> <code>client</code>               (<code>AsyncClient</code>)           \u2013            <p>A specific httpx.AsyncClient to use for the request. Defaults to self.web_client.</p> </li> <li> <code>cache_for</code>               (<code>int</code>)           \u2013            <p>Time in seconds to cache the request. Not used currently. Defaults to None.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>TimeoutException</code>             \u2013            <p>If the request times out.</p> </li> <li> <code>ConnectError</code>             \u2013            <p>If the connection fails.</p> </li> <li> <code>RequestError</code>             \u2013            <p>For other request-related errors.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>httpx.Response or None: The HTTP response object returned by the httpx library.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; response = await self.helpers.request(\"https://www.evilcorp.com\")\n</code></pre> <pre><code>&gt;&gt;&gt; response = await self.helpers.request(\"https://api.evilcorp.com/\", method=\"POST\", data=\"stuff\")\n</code></pre> Note <p>If the web request fails, it will return None unless <code>raise_error</code> is <code>True</code>.</p> Source code in <code>bbot/core/helpers/web/web.py</code> <pre><code>async def request(self, *args, **kwargs):\n    \"\"\"\n    Asynchronous function for making HTTP requests, intended to be the most basic web request function\n    used widely across BBOT and within this helper class. Handles various exceptions and timeouts\n    that might occur during the request.\n\n    This function automatically respects the scan's global timeout, proxy, headers, etc.\n    Headers you specify will be merged with the scan's. Your arguments take ultimate precedence,\n    meaning you can override the scan's values if you want.\n\n    Args:\n        url (str): The URL to send the request to.\n        method (str, optional): The HTTP method to use for the request. Defaults to 'GET'.\n        headers (dict, optional): Dictionary of HTTP headers to send with the request.\n        params (dict, optional): Dictionary, list of tuples, or bytes to send in the query string.\n        cookies (dict, optional): Dictionary or CookieJar object containing cookies.\n        json (Any, optional): A JSON serializable Python object to send in the body.\n        data (dict, optional): Dictionary, list of tuples, or bytes to send in the body.\n        files (dict, optional): Dictionary of 'name': file-like-objects for multipart encoding upload.\n        auth (tuple, optional): Auth tuple to enable Basic/Digest/Custom HTTP auth.\n        timeout (float, optional): The maximum time to wait for the request to complete.\n        proxies (dict, optional): Dictionary mapping protocol schemes to proxy URLs.\n        allow_redirects (bool, optional): Enables or disables redirection. Defaults to None.\n        stream (bool, optional): Enables or disables response streaming.\n        raise_error (bool, optional): Whether to raise exceptions for HTTP connect, timeout errors. Defaults to False.\n        client (httpx.AsyncClient, optional): A specific httpx.AsyncClient to use for the request. Defaults to self.web_client.\n        cache_for (int, optional): Time in seconds to cache the request. Not used currently. Defaults to None.\n\n    Raises:\n        httpx.TimeoutException: If the request times out.\n        httpx.ConnectError: If the connection fails.\n        httpx.RequestError: For other request-related errors.\n\n    Returns:\n        httpx.Response or None: The HTTP response object returned by the httpx library.\n\n    Examples:\n        &gt;&gt;&gt; response = await self.helpers.request(\"https://www.evilcorp.com\")\n\n        &gt;&gt;&gt; response = await self.helpers.request(\"https://api.evilcorp.com/\", method=\"POST\", data=\"stuff\")\n\n    Note:\n        If the web request fails, it will return None unless `raise_error` is `True`.\n    \"\"\"\n    raise_error = kwargs.get(\"raise_error\", False)\n    result = await self.run_and_return(\"request\", *args, **kwargs)\n    if isinstance(result, dict) and \"_request_error\" in result:\n        if raise_error:\n            error_msg = result[\"_request_error\"]\n            response = result[\"_response\"]\n            error = self.ERROR_CLASS(error_msg)\n            error.response = response\n            raise error\n    return result\n</code></pre>"},{"location":"dev/helpers/web/#bbot.core.helpers.web.WebHelper.request_batch","title":"request_batch  <code>async</code>","text":"<pre><code>request_batch(urls, *args, **kwargs)\n</code></pre> <p>Given a list of URLs, request them in parallel and yield responses as they come in.</p> <p>Parameters:</p> <ul> <li> <code>urls</code>               (<code>list[str]</code>)           \u2013            <p>List of URLs to visit</p> </li> <li> <code>*args</code>           \u2013            <p>Positional arguments to pass through to httpx</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Keyword arguments to pass through to httpx</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; async for url, response in self.helpers.request_batch(urls, headers={\"X-Test\": \"Test\"}):\n&gt;&gt;&gt;     if response is not None and response.status_code == 200:\n&gt;&gt;&gt;         self.hugesuccess(response)\n</code></pre> Source code in <code>bbot/core/helpers/web/web.py</code> <pre><code>async def request_batch(self, urls, *args, **kwargs):\n    \"\"\"\n    Given a list of URLs, request them in parallel and yield responses as they come in.\n\n    Args:\n        urls (list[str]): List of URLs to visit\n        *args: Positional arguments to pass through to httpx\n        **kwargs: Keyword arguments to pass through to httpx\n\n    Examples:\n        &gt;&gt;&gt; async for url, response in self.helpers.request_batch(urls, headers={\"X-Test\": \"Test\"}):\n        &gt;&gt;&gt;     if response is not None and response.status_code == 200:\n        &gt;&gt;&gt;         self.hugesuccess(response)\n    \"\"\"\n    agen = self.run_and_yield(\"request_batch\", urls, *args, **kwargs)\n    while 1:\n        try:\n            yield await agen.__anext__()\n        except (StopAsyncIteration, GeneratorExit):\n            await agen.aclose()\n            break\n</code></pre>"},{"location":"dev/helpers/web/#bbot.core.helpers.web.WebHelper.request_custom_batch","title":"request_custom_batch  <code>async</code>","text":"<pre><code>request_custom_batch(urls_and_kwargs)\n</code></pre> <p>Make web requests in parallel with custom options for each request. Yield responses as they come in.</p> <p>Similar to <code>request_batch</code> except it allows individual arguments for each URL.</p> <p>Parameters:</p> <ul> <li> <code>urls_and_kwargs</code>               (<code>list[tuple]</code>)           \u2013            <p>List of tuples in the format: (url, kwargs, custom_tracker) where custom_tracker is an optional value for your own internal use. You may use it to help correlate requests, etc.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; urls_and_kwargs = [\n&gt;&gt;&gt;     (\"http://evilcorp.com/1\", {\"method\": \"GET\"}, \"request-1\"),\n&gt;&gt;&gt;     (\"http://evilcorp.com/2\", {\"method\": \"POST\"}, \"request-2\"),\n&gt;&gt;&gt; ]\n&gt;&gt;&gt; async for url, kwargs, custom_tracker, response in self.helpers.request_custom_batch(\n&gt;&gt;&gt;     urls_and_kwargs\n&gt;&gt;&gt; ):\n&gt;&gt;&gt;     if response is not None and response.status_code == 200:\n&gt;&gt;&gt;         self.hugesuccess(response)\n</code></pre> Source code in <code>bbot/core/helpers/web/web.py</code> <pre><code>async def request_custom_batch(self, urls_and_kwargs):\n    \"\"\"\n    Make web requests in parallel with custom options for each request. Yield responses as they come in.\n\n    Similar to `request_batch` except it allows individual arguments for each URL.\n\n    Args:\n        urls_and_kwargs (list[tuple]): List of tuples in the format: (url, kwargs, custom_tracker)\n            where custom_tracker is an optional value for your own internal use. You may use it to\n            help correlate requests, etc.\n\n    Examples:\n        &gt;&gt;&gt; urls_and_kwargs = [\n        &gt;&gt;&gt;     (\"http://evilcorp.com/1\", {\"method\": \"GET\"}, \"request-1\"),\n        &gt;&gt;&gt;     (\"http://evilcorp.com/2\", {\"method\": \"POST\"}, \"request-2\"),\n        &gt;&gt;&gt; ]\n        &gt;&gt;&gt; async for url, kwargs, custom_tracker, response in self.helpers.request_custom_batch(\n        &gt;&gt;&gt;     urls_and_kwargs\n        &gt;&gt;&gt; ):\n        &gt;&gt;&gt;     if response is not None and response.status_code == 200:\n        &gt;&gt;&gt;         self.hugesuccess(response)\n    \"\"\"\n    agen = self.run_and_yield(\"request_custom_batch\", urls_and_kwargs)\n    while 1:\n        try:\n            yield await agen.__anext__()\n        except (StopAsyncIteration, GeneratorExit):\n            await agen.aclose()\n            break\n</code></pre>"},{"location":"dev/helpers/web/#bbot.core.helpers.web.WebHelper.response_to_json","title":"response_to_json","text":"<pre><code>response_to_json(response)\n</code></pre> <p>Convert web response to JSON object, similar to the output of <code>httpx -irr -json</code></p> Source code in <code>bbot/core/helpers/web/web.py</code> <pre><code>def response_to_json(self, response):\n    \"\"\"\n    Convert web response to JSON object, similar to the output of `httpx -irr -json`\n    \"\"\"\n\n    if response is None:\n        return\n\n    import mmh3\n    from datetime import datetime\n    from hashlib import md5, sha256\n    from bbot.core.helpers.misc import tagify, urlparse, split_host_port, smart_decode\n\n    request = response.request\n    url = str(request.url)\n    parsed_url = urlparse(url)\n    netloc = parsed_url.netloc\n    scheme = parsed_url.scheme.lower()\n    host, port = split_host_port(f\"{scheme}://{netloc}\")\n\n    raw_headers = \"\\r\\n\".join([f\"{k}: {v}\" for k, v in response.headers.items()])\n    raw_headers_encoded = raw_headers.encode()\n\n    headers = {}\n    for k, v in response.headers.items():\n        k = tagify(k, delimiter=\"_\")\n        headers[k] = v\n\n    j = {\n        \"timestamp\": datetime.now().isoformat(),\n        \"hash\": {\n            \"body_md5\": md5(response.content).hexdigest(),\n            \"body_mmh3\": mmh3.hash(response.content),\n            \"body_sha256\": sha256(response.content).hexdigest(),\n            # \"body_simhash\": \"TODO\",\n            \"header_md5\": md5(raw_headers_encoded).hexdigest(),\n            \"header_mmh3\": mmh3.hash(raw_headers_encoded),\n            \"header_sha256\": sha256(raw_headers_encoded).hexdigest(),\n            # \"header_simhash\": \"TODO\",\n        },\n        \"header\": headers,\n        \"body\": smart_decode(response.content),\n        \"content_type\": headers.get(\"content_type\", \"\").split(\";\")[0].strip(),\n        \"url\": url,\n        \"host\": str(host),\n        \"port\": port,\n        \"scheme\": scheme,\n        \"method\": response.request.method,\n        \"path\": parsed_url.path,\n        \"raw_header\": raw_headers,\n        \"status_code\": response.status_code,\n    }\n\n    return j\n</code></pre>"},{"location":"dev/helpers/web/#bbot.core.helpers.web.WebHelper.wordlist","title":"wordlist  <code>async</code>","text":"<pre><code>wordlist(path, lines=None, **kwargs)\n</code></pre> <p>Asynchronous function for retrieving wordlists, either from a local path or a URL. Allows for optional line-based truncation and caching. Returns the full path of the wordlist file or a truncated version of it.</p> <p>Parameters:</p> <ul> <li> <code>path</code>               (<code>str</code>)           \u2013            <p>The local or remote path of the wordlist.</p> </li> <li> <code>lines</code>               (<code>int</code>, default:                   <code>None</code> )           \u2013            <p>Number of lines to read from the wordlist. If specified, will return a truncated wordlist with this many lines.</p> </li> <li> <code>cache_hrs</code>               (<code>float</code>)           \u2013            <p>Number of hours to cache the downloaded wordlist. Defaults to 720 hours (30 days) for remote wordlists.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Additional keyword arguments to pass to the 'download' function for remote wordlists.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>Path</code>          \u2013            <p>The full path of the wordlist (or its truncated version) as a Path object.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>WordlistError</code>             \u2013            <p>If the path is invalid or the wordlist could not be retrieved or found.</p> </li> </ul> <p>Examples:</p> <p>Fetching full wordlist</p> <pre><code>&gt;&gt;&gt; wordlist_path = await self.helpers.wordlist(\"https://www.evilcorp.com/wordlist.txt\")\n</code></pre> <p>Fetching and truncating to the first 100 lines</p> <pre><code>&gt;&gt;&gt; wordlist_path = await self.helpers.wordlist(\"/root/rockyou.txt\", lines=100)\n</code></pre> Source code in <code>bbot/core/helpers/web/web.py</code> <pre><code>async def wordlist(self, path, lines=None, **kwargs):\n    \"\"\"\n    Asynchronous function for retrieving wordlists, either from a local path or a URL.\n    Allows for optional line-based truncation and caching. Returns the full path of the wordlist\n    file or a truncated version of it.\n\n    Args:\n        path (str): The local or remote path of the wordlist.\n        lines (int, optional): Number of lines to read from the wordlist.\n            If specified, will return a truncated wordlist with this many lines.\n        cache_hrs (float, optional): Number of hours to cache the downloaded wordlist.\n            Defaults to 720 hours (30 days) for remote wordlists.\n        **kwargs: Additional keyword arguments to pass to the 'download' function for remote wordlists.\n\n    Returns:\n        Path: The full path of the wordlist (or its truncated version) as a Path object.\n\n    Raises:\n        WordlistError: If the path is invalid or the wordlist could not be retrieved or found.\n\n    Examples:\n        Fetching full wordlist\n        &gt;&gt;&gt; wordlist_path = await self.helpers.wordlist(\"https://www.evilcorp.com/wordlist.txt\")\n\n        Fetching and truncating to the first 100 lines\n        &gt;&gt;&gt; wordlist_path = await self.helpers.wordlist(\"/root/rockyou.txt\", lines=100)\n    \"\"\"\n    if not path:\n        raise WordlistError(f\"Invalid wordlist: {path}\")\n    if not \"cache_hrs\" in kwargs:\n        kwargs[\"cache_hrs\"] = 720\n    if self.parent_helper.is_url(path):\n        filename = await self.download(str(path), **kwargs)\n        if filename is None:\n            raise WordlistError(f\"Unable to retrieve wordlist from {path}\")\n    else:\n        filename = Path(path).resolve()\n        if not filename.is_file():\n            raise WordlistError(f\"Unable to find wordlist at {path}\")\n\n    if lines is None:\n        return filename\n    else:\n        lines = int(lines)\n        with open(filename) as f:\n            read_lines = f.readlines()\n        cache_key = f\"{filename}:{lines}\"\n        truncated_filename = self.parent_helper.cache_filename(cache_key)\n        with open(truncated_filename, \"w\") as f:\n            for line in read_lines[:lines]:\n                f.write(line)\n        return truncated_filename\n</code></pre>"},{"location":"dev/helpers/wordcloud/","title":"Word Cloud","text":"<p>These are helpers related to BBOT's Word Cloud, a mechanism for storing target-specific keywords that are useful for custom wordlists, etc.</p> <p>Note that these helpers can be invoked directly from <code>self.helpers</code>, e.g.:</p> <pre><code>self.helpers.word_cloud\n</code></pre>"},{"location":"dev/helpers/wordcloud/#bbot.core.helpers.wordcloud.DNSMutator","title":"DNSMutator","text":"<p>               Bases: <code>Mutator</code></p> <p>DNS-specific mutator used by the <code>dnsbrute_mutations</code> module to generate target-specific subdomain mutations.</p> <p>This class extends the Mutator base class to add DNS-specific logic for generating subdomain mutations based on input words. It utilizes custom word extraction patterns and a wordninja model trained on DNS-specific data.</p> <p>Examples:</p> <pre><code>&gt;&gt;&gt; s = Scanner(\"www1.evilcorp.com\", \"www-test.evilcorp.com\")\n&gt;&gt;&gt; s.start_without_generator()\n&gt;&gt;&gt; s.helpers.word_cloud.dns_mutator.mutations(\"word\")\n[\n    \"word\",\n    \"word-test\",\n    \"word1\",\n    \"wordtest\",\n    \"www-word\",\n    \"wwwword\"\n]\n</code></pre> Source code in <code>bbot/core/helpers/wordcloud.py</code> <pre><code>class DNSMutator(Mutator):\n    \"\"\"\n    DNS-specific mutator used by the `dnsbrute_mutations` module to generate target-specific subdomain mutations.\n\n    This class extends the Mutator base class to add DNS-specific logic for generating\n    subdomain mutations based on input words. It utilizes custom word extraction patterns\n    and a wordninja model trained on DNS-specific data.\n\n    Examples:\n        &gt;&gt;&gt; s = Scanner(\"www1.evilcorp.com\", \"www-test.evilcorp.com\")\n        &gt;&gt;&gt; s.start_without_generator()\n        &gt;&gt;&gt; s.helpers.word_cloud.dns_mutator.mutations(\"word\")\n        [\n            \"word\",\n            \"word-test\",\n            \"word1\",\n            \"wordtest\",\n            \"www-word\",\n            \"wwwword\"\n        ]\n    \"\"\"\n\n    extract_word_regexes = [\n        re.compile(r, re.I)\n        for r in [\n            r\"[a-z]+\",\n            r\"[a-z_-]+\",\n            r\"[a-z0-9]+\",\n            r\"[a-z0-9_-]+\",\n        ]\n    ]\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        wordlist_dir = Path(__file__).parent.parent.parent / \"wordlists\"\n        wordninja_dns_wordlist = wordlist_dir / \"wordninja_dns.txt.gz\"\n        self.model = wordninja.LanguageModel(wordninja_dns_wordlist)\n\n    def mutations(self, words, max_mutations=None):\n        if isinstance(words, str):\n            words = [words]\n        new_words = set()\n        for word in words:\n            for e in extract_words(word, acronyms=False, model=self.model, word_regexes=self.extract_word_regexes):\n                new_words.add(e)\n        return super().mutations(new_words, max_mutations=max_mutations)\n\n    def add_word(self, word):\n        spans = set()\n        mutations = set()\n        for r in self.extract_word_regexes:\n            for match in r.finditer(word):\n                span = match.span()\n                if span not in spans:\n                    spans.add(span)\n        for start, end in spans:\n            match_str = word[start:end]\n            # skip digits\n            if match_str.isdigit():\n                continue\n            before = word[:start]\n            after = word[end:]\n            basic_mutation = (before, None, after)\n            mutations.add(basic_mutation)\n            match_str_split = self.model.split(match_str)\n            if len(match_str_split) &gt; 1:\n                for i, s in enumerate(match_str_split):\n                    if s.isdigit():\n                        continue\n                    split_before = \"\".join(match_str_split[:i])\n                    split_after = \"\".join(match_str_split[i + 1 :])\n                    wordninja_mutation = (before + split_before, None, split_after + after)\n                    mutations.add(wordninja_mutation)\n        for m in mutations:\n            self._add_mutation(m)\n</code></pre>"},{"location":"dev/helpers/wordcloud/#bbot.core.helpers.wordcloud.Mutator","title":"Mutator","text":"<p>               Bases: <code>dict</code></p> <p>Base class for generating mutations from a list of words. It accumulates words and produces mutations from them.</p> Source code in <code>bbot/core/helpers/wordcloud.py</code> <pre><code>class Mutator(dict):\n    \"\"\"\n    Base class for generating mutations from a list of words.\n    It accumulates words and produces mutations from them.\n    \"\"\"\n\n    def mutations(self, words, max_mutations=None):\n        mutations = self.top_mutations(max_mutations)\n        ret = set()\n        if isinstance(words, str):\n            words = [words]\n        for word in words:\n            for m in self.mutate(word, mutations=mutations):\n                ret.add(\"\".join(m))\n        return ret\n\n    def mutate(self, word, max_mutations=None, mutations=None):\n        if mutations is None:\n            mutations = self.top_mutations(max_mutations)\n        for mutation, count in mutations.items():\n            ret = []\n            for s in mutation:\n                if s is not None:\n                    ret.append(s)\n                else:\n                    ret.append(word)\n            yield ret\n\n    def top_mutations(self, n=None):\n        if n is not None:\n            return dict(sorted(self.items(), key=lambda x: x[-1], reverse=True)[:n])\n        else:\n            return dict(self)\n\n    def _add_mutation(self, mutation):\n        if None not in mutation:\n            return\n        mutation = tuple([m for m in mutation if m != \"\"])\n        try:\n            self[mutation] += 1\n        except KeyError:\n            self[mutation] = 1\n\n    def add_word(self, word):\n        pass\n</code></pre>"},{"location":"dev/helpers/wordcloud/#bbot.core.helpers.wordcloud.WordCloud","title":"WordCloud","text":"<p>               Bases: <code>dict</code></p> <p>WordCloud is a specialized dictionary-like class for storing and aggregating words extracted from various data sources such as DNS names and URLs. The class is intended to facilitate the generation of target-specific wordlists and mutations.</p> <p>The WordCloud class can be accessed and manipulated like a standard Python dictionary. It also offers additional methods for generating mutations based on the words it contains.</p> <p>Attributes:</p> <ul> <li> <code>parent_helper</code>           \u2013            <p>The parent helper object that provides necessary utilities.</p> </li> <li> <code>devops_mutations</code>           \u2013            <p>A set containing common devops-related mutations, loaded from a file.</p> </li> <li> <code>dns_mutator</code>           \u2013            <p>An instance of the DNSMutator class for generating DNS-based mutations.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; s = Scanner(\"www1.evilcorp.com\", \"www-test.evilcorp.com\")\n&gt;&gt;&gt; s.start_without_generator()\n&gt;&gt;&gt; print(s.helpers.word_cloud)\n{\n    \"evilcorp\": 2,\n    \"ec\": 2,\n    \"www1\": 1,\n    \"evil\": 2,\n    \"www\": 2,\n    \"w1\": 1,\n    \"corp\": 2,\n    \"1\": 1,\n    \"wt\": 1,\n    \"test\": 1,\n    \"www-test\": 1\n}\n</code></pre> <pre><code>&gt;&gt;&gt; s.helpers.word_cloud.mutations([\"word\"], cloud=True, numbers=0, devops=False, letters=False)\n[\n    [\n        \"1\",\n        \"word\"\n    ],\n    [\n        \"corp\",\n        \"word\"\n    ],\n    [\n        \"ec\",\n        \"word\"\n    ],\n    [\n        \"evil\",\n        \"word\"\n    ],\n    ...\n]\n</code></pre> <pre><code>&gt;&gt;&gt; s.helpers.word_cloud.dns_mutator.mutations(\"word\")\n[\n    \"word\",\n    \"word-test\",\n    \"word1\",\n    \"wordtest\",\n    \"www-word\",\n    \"wwwword\"\n]\n</code></pre> Source code in <code>bbot/core/helpers/wordcloud.py</code> <pre><code>class WordCloud(dict):\n    \"\"\"\n    WordCloud is a specialized dictionary-like class for storing and aggregating\n    words extracted from various data sources such as DNS names and URLs. The class\n    is intended to facilitate the generation of target-specific wordlists and mutations.\n\n    The WordCloud class can be accessed and manipulated like a standard Python dictionary.\n    It also offers additional methods for generating mutations based on the words it contains.\n\n    Attributes:\n        parent_helper: The parent helper object that provides necessary utilities.\n        devops_mutations: A set containing common devops-related mutations, loaded from a file.\n        dns_mutator: An instance of the DNSMutator class for generating DNS-based mutations.\n\n    Examples:\n        &gt;&gt;&gt; s = Scanner(\"www1.evilcorp.com\", \"www-test.evilcorp.com\")\n        &gt;&gt;&gt; s.start_without_generator()\n        &gt;&gt;&gt; print(s.helpers.word_cloud)\n        {\n            \"evilcorp\": 2,\n            \"ec\": 2,\n            \"www1\": 1,\n            \"evil\": 2,\n            \"www\": 2,\n            \"w1\": 1,\n            \"corp\": 2,\n            \"1\": 1,\n            \"wt\": 1,\n            \"test\": 1,\n            \"www-test\": 1\n        }\n\n        &gt;&gt;&gt; s.helpers.word_cloud.mutations([\"word\"], cloud=True, numbers=0, devops=False, letters=False)\n        [\n            [\n                \"1\",\n                \"word\"\n            ],\n            [\n                \"corp\",\n                \"word\"\n            ],\n            [\n                \"ec\",\n                \"word\"\n            ],\n            [\n                \"evil\",\n                \"word\"\n            ],\n            ...\n        ]\n\n        &gt;&gt;&gt; s.helpers.word_cloud.dns_mutator.mutations(\"word\")\n        [\n            \"word\",\n            \"word-test\",\n            \"word1\",\n            \"wordtest\",\n            \"www-word\",\n            \"wwwword\"\n        ]\n    \"\"\"\n\n    def __init__(self, parent_helper, *args, **kwargs):\n        self.parent_helper = parent_helper\n\n        devops_filename = self.parent_helper.wordlist_dir / \"devops_mutations.txt\"\n        self.devops_mutations = set(self.parent_helper.read_file(devops_filename))\n\n        self.dns_mutator = DNSMutator()\n\n        super().__init__(*args, **kwargs)\n\n    def mutations(\n        self, words, devops=True, cloud=True, letters=True, numbers=5, number_padding=2, substitute_numbers=True\n    ):\n        \"\"\"\n        Generate various mutations for the given list of words based on different criteria.\n\n        Yields tuples of strings which can be joined on the desired delimiter, e.g. \"-\" or \"_\".\n\n        Args:\n            words (Union[str, Iterable[str]]): A single word or list of words to mutate.\n            devops (bool): Whether to include devops-related mutations.\n            cloud (bool): Whether to include mutations from the word cloud.\n            letters (bool): Whether to include letter-based mutations.\n            numbers (int): The maximum numeric mutations to include.\n            number_padding (int): Padding for numeric mutations.\n            substitute_numbers (bool): Whether to substitute numbers in mutations.\n\n        Yields:\n            tuple: A tuple containing each of the mutation segments.\n        \"\"\"\n        if isinstance(words, str):\n            words = (words,)\n        results = set()\n        for word in words:\n            h = hash(word)\n            if not h in results:\n                results.add(h)\n                yield (word,)\n        if numbers &gt; 0:\n            if substitute_numbers:\n                for word in words:\n                    for number_mutation in self.get_number_mutations(word, n=numbers, padding=number_padding):\n                        h = hash(number_mutation)\n                        if not h in results:\n                            results.add(h)\n                            yield (number_mutation,)\n        for word in words:\n            for modifier in self.modifiers(\n                devops=devops, cloud=cloud, letters=letters, numbers=numbers, number_padding=number_padding\n            ):\n                a = (word, modifier)\n                b = (modifier, word)\n                for _ in (a, b):\n                    h = hash(_)\n                    if h not in results:\n                        results.add(h)\n                        yield _\n\n    def modifiers(self, devops=True, cloud=True, letters=True, numbers=5, number_padding=2):\n        modifiers = set()\n        if devops:\n            modifiers.update(self.devops_mutations)\n        if cloud:\n            modifiers.update(set(self))\n        if letters:\n            modifiers.update(set(string.ascii_lowercase))\n        if numbers &gt; 0:\n            modifiers.update(self.parent_helper.gen_numbers(numbers, number_padding))\n        return modifiers\n\n    def absorb_event(self, event):\n        \"\"\"\n        Absorbs an event from a BBOT scan into the word cloud.\n\n        This method updates the word cloud by extracting words from the given event. It aims to avoid including PTR\n        (Pointer) records, as they tend to produce unhelpful mutations in the word cloud.\n\n        Args:\n            event (Event): The event object containing the words to be absorbed into the word cloud.\n        \"\"\"\n        for word in event.words:\n            self.add_word(word)\n        if event.scope_distance == 0 and event.type.startswith(\"DNS_NAME\"):\n            subdomain = tldextract(event.data).subdomain\n            if subdomain and not self.parent_helper.is_ptr(subdomain):\n                for s in subdomain.split(\".\"):\n                    self.dns_mutator.add_word(s)\n\n    def absorb_word(self, word, wordninja=True):\n        \"\"\"\n        Absorbs a word into the word cloud after splitting it using a word extraction algorithm.\n\n        This method splits the input word into smaller meaningful words using word extraction, and then adds each\n        of them to the word cloud. The splitting is done using a predefined algorithm in the parent helper.\n\n        Args:\n            word (str): The word to be split and absorbed into the word cloud.\n            wordninja (bool, optional): If True, word extraction is enabled. Defaults to True.\n\n        Examples:\n            &gt;&gt;&gt; self.helpers.word_cloud.absorb_word(\"blacklantern\")\n            &gt;&gt;&gt; print(self.helpers.word_cloud)\n            {\n                \"blacklantern\": 1,\n                \"black\": 1,\n                \"bl\": 1,\n                \"lantern\": 1\n            }\n        \"\"\"\n        for w in self.parent_helper.extract_words(word, wordninja=wordninja):\n            self.add_word(w)\n\n    def add_word(self, word, lowercase=True):\n        \"\"\"\n        Adds a word to the word cloud.\n\n        This method updates the word cloud by adding a given word. If the word already exists in the cloud,\n        its frequency count is incremented by 1. Optionally, the word can be converted to lowercase before adding.\n\n        Args:\n            word (str): The word to be added to the word cloud.\n            lowercase (bool, optional): If True, the word will be converted to lowercase before adding. Defaults to True.\n\n        Examples:\n            &gt;&gt;&gt; self.helpers.word_cloud.add_word(\"Example\")\n            &gt;&gt;&gt; self.helpers.word_cloud.add_word(\"example\")\n            &gt;&gt;&gt; print(self.helpers.word_cloud)\n            {'example': 2}\n        \"\"\"\n        if lowercase:\n            word = word.lower()\n        try:\n            self[word] += 1\n        except KeyError:\n            self[word] = 1\n\n    def get_number_mutations(self, base, n=5, padding=2):\n        \"\"\"\n        Generates mutations of a base string by modifying the numerical parts or appending numbers.\n\n        This method detects existing numbers in the base string and tries incrementing and decrementing them within a\n        specified range. It also appends numbers at the end or after each word to generate more mutations.\n\n        Args:\n            base (str): The base string to generate mutations from.\n            n (int, optional): The range of numbers to use for incrementing/decrementing. Defaults to 5.\n            padding (int, optional): Zero-pad numbers up to this length. Defaults to 2.\n\n        Returns:\n            set: A set of mutated strings based on the base input.\n\n        Examples:\n            &gt;&gt;&gt; self.helpers.word_cloud.get_number_mutations(\"www2-test\", n=2)\n            {\n                \"www0-test\",\n                \"www1-test\",\n                \"www2-test\",\n                \"www2-test0\",\n                \"www2-test00\",\n                \"www2-test01\",\n                \"www2-test1\",\n                \"www3-test\",\n                \"www4-test\"\n            }\n        \"\"\"\n        results = set()\n\n        # detects numbers and increments/decrements them\n        # e.g. for \"base2_p013\", we would try:\n        # - \"base0_p013\" through \"base12_p013\"\n        # - \"base2_p003\" through \"base2_p023\"\n        # limited to three iterations for sanity's sake\n        for match in list(self.parent_helper.regexes.num_regex.finditer(base))[-3:]:\n            span = match.span()\n            before = base[: span[0]]\n            after = base[span[-1] :]\n            number = base[span[0] : span[-1]]\n            numlen = len(number)\n            maxnum = min(int(\"9\" * numlen), int(number) + n)\n            minnum = max(0, int(number) - n)\n            for i in range(minnum, maxnum + 1):\n                filled_num = str(i).zfill(numlen)\n                results.add(f\"{before}{filled_num}{after}\")\n                if not number.startswith(\"0\"):\n                    results.add(f\"{before}{i}{after}\")\n\n        # appends numbers after each word\n        # e.g., for \"base_www\", we would try:\n        # - \"base1_www\", \"base2_www\", etc.\n        # - \"base_www1\", \"base_www2\", etc.\n        # limited to three iterations for sanity's sake\n        number_suffixes = self.parent_helper.gen_numbers(n, padding)\n        for match in list(self.parent_helper.regexes.word_regex.finditer(base))[-3:]:\n            span = match.span()\n            for suffix in number_suffixes:\n                before = base[: span[-1]]\n                after = base[span[-1] :]\n                # skip if there's already a number\n                if len(after) &gt; 1 and not after[0].isdigit():\n                    results.add(f\"{before}{suffix}{after}\")\n        # basic cases so we don't miss anything\n        for s in number_suffixes:\n            results.add(f\"{base}{s}\")\n            results.add(base)\n\n        return results\n\n    def truncate(self, limit):\n        \"\"\"\n        Truncates the word cloud dictionary to retain only the top `limit` entries based on their occurrence frequencies.\n\n        Args:\n            limit (int): The maximum number of entries to retain in the word cloud.\n\n        Examples:\n            &gt;&gt;&gt; self.helpers.word_cloud.update({\"apple\": 5, \"banana\": 2, \"cherry\": 8})\n            &gt;&gt;&gt; self.helpers.word_cloud.truncate(2)\n            &gt;&gt;&gt; self.helpers.word_cloud\n            {'cherry': 8, 'apple': 5}\n        \"\"\"\n        new_self = dict(self.json(limit=limit))\n        self.clear()\n        self.update(new_self)\n\n    def json(self, limit=None):\n        \"\"\"\n        Returns the word cloud as a sorted OrderedDict, optionally truncated to the top `limit` entries.\n\n        Args:\n            limit (int, optional): The maximum number of entries to include in the returned OrderedDict. If None, all entries are included.\n\n        Returns:\n            OrderedDict: A dictionary sorted by word frequencies, potentially truncated to the top `limit` entries.\n\n        Examples:\n            &gt;&gt;&gt; self.helpers.word_cloud.update({\"apple\": 5, \"banana\": 2, \"cherry\": 8})\n            &gt;&gt;&gt; self.helpers.word_cloud.json(limit=2)\n            OrderedDict([('cherry', 8), ('apple', 5)])\n        \"\"\"\n        cloud_sorted = sorted(self.items(), key=lambda x: x[-1], reverse=True)\n        if limit is not None:\n            cloud_sorted = cloud_sorted[:limit]\n        return OrderedDict(cloud_sorted)\n\n    @property\n    def default_filename(self):\n        return self.parent_helper.preset.scan.home / f\"wordcloud.tsv\"\n\n    def save(self, filename=None, limit=None):\n        \"\"\"\n        Saves the word cloud to a file. The cloud can optionally be truncated to the top `limit` entries.\n\n        Args:\n            filename (str, optional): The path to the file where the word cloud will be saved. If None, uses a default filename.\n            limit (int, optional): The maximum number of entries to save to the file. If None, all entries are saved.\n\n        Returns:\n            tuple: A tuple containing a boolean indicating success or failure, and the resolved filename.\n\n        Examples:\n            &gt;&gt;&gt; self.helpers.word_cloud.update({\"apple\": 5, \"banana\": 2, \"cherry\": 8})\n            &gt;&gt;&gt; self.helpers.word_cloud.save(filename=\"word_cloud.txt\", limit=2)\n            (True, Path('word_cloud.txt'))\n        \"\"\"\n        if filename is None:\n            filename = self.default_filename\n        else:\n            filename = Path(filename).resolve()\n        try:\n            if not self.parent_helper.mkdir(filename.parent):\n                log.error(f\"Failure creating or error writing to {filename.parent} when saving word cloud\")\n                return\n            if len(self) &gt; 0:\n                log.debug(f\"Saving word cloud to {filename}\")\n                with open(str(filename), mode=\"w\", newline=\"\") as f:\n                    c = csv.writer(f, delimiter=\"\\t\")\n                    for word, count in self.json(limit).items():\n                        c.writerow([count, word])\n                log.debug(f\"Saved word cloud ({len(self):,} words) to {filename}\")\n                return True, filename\n            else:\n                log.debug(f\"No words to save\")\n        except Exception as e:\n            import traceback\n\n            log.warning(f\"Failed to save word cloud to {filename}: {e}\")\n            log.trace(traceback.format_exc())\n        return False, filename\n\n    def load(self, filename=None):\n        \"\"\"\n        Loads a word cloud from a file. The file can be either a standard wordlist with one entry per line\n        or a .tsv (tab-separated) file where the first row is the count and the second row is the associated entry.\n\n        Args:\n            filename (str, optional): The path to the file from which to load the word cloud. If None, uses a default filename.\n        \"\"\"\n        if filename is None:\n            wordcloud_path = self.default_filename\n        else:\n            wordcloud_path = Path(filename).resolve()\n        log.verbose(f\"Loading word cloud from {wordcloud_path}\")\n        try:\n            with open(str(wordcloud_path), newline=\"\") as f:\n                c = csv.reader(f, delimiter=\"\\t\")\n                for row in c:\n                    if len(row) == 1:\n                        self.add_word(row[0])\n                    elif len(row) == 2:\n                        with suppress(Exception):\n                            count, word = row\n                            count = int(count)\n                            self[word] = count\n            if len(self) &gt; 0:\n                log.success(f\"Loaded word cloud ({len(self):,} words) from {wordcloud_path}\")\n        except Exception as e:\n            import traceback\n\n            log_fn = log.debug\n            if filename is not None:\n                log_fn = log.warning\n            log_fn(f\"Failed to load word cloud from {wordcloud_path}: {e}\")\n            if filename is not None:\n                log.trace(traceback.format_exc())\n</code></pre>"},{"location":"dev/helpers/wordcloud/#bbot.core.helpers.wordcloud.WordCloud.absorb_event","title":"absorb_event","text":"<pre><code>absorb_event(event)\n</code></pre> <p>Absorbs an event from a BBOT scan into the word cloud.</p> <p>This method updates the word cloud by extracting words from the given event. It aims to avoid including PTR (Pointer) records, as they tend to produce unhelpful mutations in the word cloud.</p> <p>Parameters:</p> <ul> <li> <code>event</code>               (<code>Event</code>)           \u2013            <p>The event object containing the words to be absorbed into the word cloud.</p> </li> </ul> Source code in <code>bbot/core/helpers/wordcloud.py</code> <pre><code>def absorb_event(self, event):\n    \"\"\"\n    Absorbs an event from a BBOT scan into the word cloud.\n\n    This method updates the word cloud by extracting words from the given event. It aims to avoid including PTR\n    (Pointer) records, as they tend to produce unhelpful mutations in the word cloud.\n\n    Args:\n        event (Event): The event object containing the words to be absorbed into the word cloud.\n    \"\"\"\n    for word in event.words:\n        self.add_word(word)\n    if event.scope_distance == 0 and event.type.startswith(\"DNS_NAME\"):\n        subdomain = tldextract(event.data).subdomain\n        if subdomain and not self.parent_helper.is_ptr(subdomain):\n            for s in subdomain.split(\".\"):\n                self.dns_mutator.add_word(s)\n</code></pre>"},{"location":"dev/helpers/wordcloud/#bbot.core.helpers.wordcloud.WordCloud.absorb_word","title":"absorb_word","text":"<pre><code>absorb_word(word, wordninja=True)\n</code></pre> <p>Absorbs a word into the word cloud after splitting it using a word extraction algorithm.</p> <p>This method splits the input word into smaller meaningful words using word extraction, and then adds each of them to the word cloud. The splitting is done using a predefined algorithm in the parent helper.</p> <p>Parameters:</p> <ul> <li> <code>word</code>               (<code>str</code>)           \u2013            <p>The word to be split and absorbed into the word cloud.</p> </li> <li> <code>wordninja</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>If True, word extraction is enabled. Defaults to True.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.helpers.word_cloud.absorb_word(\"blacklantern\")\n&gt;&gt;&gt; print(self.helpers.word_cloud)\n{\n    \"blacklantern\": 1,\n    \"black\": 1,\n    \"bl\": 1,\n    \"lantern\": 1\n}\n</code></pre> Source code in <code>bbot/core/helpers/wordcloud.py</code> <pre><code>def absorb_word(self, word, wordninja=True):\n    \"\"\"\n    Absorbs a word into the word cloud after splitting it using a word extraction algorithm.\n\n    This method splits the input word into smaller meaningful words using word extraction, and then adds each\n    of them to the word cloud. The splitting is done using a predefined algorithm in the parent helper.\n\n    Args:\n        word (str): The word to be split and absorbed into the word cloud.\n        wordninja (bool, optional): If True, word extraction is enabled. Defaults to True.\n\n    Examples:\n        &gt;&gt;&gt; self.helpers.word_cloud.absorb_word(\"blacklantern\")\n        &gt;&gt;&gt; print(self.helpers.word_cloud)\n        {\n            \"blacklantern\": 1,\n            \"black\": 1,\n            \"bl\": 1,\n            \"lantern\": 1\n        }\n    \"\"\"\n    for w in self.parent_helper.extract_words(word, wordninja=wordninja):\n        self.add_word(w)\n</code></pre>"},{"location":"dev/helpers/wordcloud/#bbot.core.helpers.wordcloud.WordCloud.add_word","title":"add_word","text":"<pre><code>add_word(word, lowercase=True)\n</code></pre> <p>Adds a word to the word cloud.</p> <p>This method updates the word cloud by adding a given word. If the word already exists in the cloud, its frequency count is incremented by 1. Optionally, the word can be converted to lowercase before adding.</p> <p>Parameters:</p> <ul> <li> <code>word</code>               (<code>str</code>)           \u2013            <p>The word to be added to the word cloud.</p> </li> <li> <code>lowercase</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>If True, the word will be converted to lowercase before adding. Defaults to True.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.helpers.word_cloud.add_word(\"Example\")\n&gt;&gt;&gt; self.helpers.word_cloud.add_word(\"example\")\n&gt;&gt;&gt; print(self.helpers.word_cloud)\n{'example': 2}\n</code></pre> Source code in <code>bbot/core/helpers/wordcloud.py</code> <pre><code>def add_word(self, word, lowercase=True):\n    \"\"\"\n    Adds a word to the word cloud.\n\n    This method updates the word cloud by adding a given word. If the word already exists in the cloud,\n    its frequency count is incremented by 1. Optionally, the word can be converted to lowercase before adding.\n\n    Args:\n        word (str): The word to be added to the word cloud.\n        lowercase (bool, optional): If True, the word will be converted to lowercase before adding. Defaults to True.\n\n    Examples:\n        &gt;&gt;&gt; self.helpers.word_cloud.add_word(\"Example\")\n        &gt;&gt;&gt; self.helpers.word_cloud.add_word(\"example\")\n        &gt;&gt;&gt; print(self.helpers.word_cloud)\n        {'example': 2}\n    \"\"\"\n    if lowercase:\n        word = word.lower()\n    try:\n        self[word] += 1\n    except KeyError:\n        self[word] = 1\n</code></pre>"},{"location":"dev/helpers/wordcloud/#bbot.core.helpers.wordcloud.WordCloud.get_number_mutations","title":"get_number_mutations","text":"<pre><code>get_number_mutations(base, n=5, padding=2)\n</code></pre> <p>Generates mutations of a base string by modifying the numerical parts or appending numbers.</p> <p>This method detects existing numbers in the base string and tries incrementing and decrementing them within a specified range. It also appends numbers at the end or after each word to generate more mutations.</p> <p>Parameters:</p> <ul> <li> <code>base</code>               (<code>str</code>)           \u2013            <p>The base string to generate mutations from.</p> </li> <li> <code>n</code>               (<code>int</code>, default:                   <code>5</code> )           \u2013            <p>The range of numbers to use for incrementing/decrementing. Defaults to 5.</p> </li> <li> <code>padding</code>               (<code>int</code>, default:                   <code>2</code> )           \u2013            <p>Zero-pad numbers up to this length. Defaults to 2.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>set</code>          \u2013            <p>A set of mutated strings based on the base input.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.helpers.word_cloud.get_number_mutations(\"www2-test\", n=2)\n{\n    \"www0-test\",\n    \"www1-test\",\n    \"www2-test\",\n    \"www2-test0\",\n    \"www2-test00\",\n    \"www2-test01\",\n    \"www2-test1\",\n    \"www3-test\",\n    \"www4-test\"\n}\n</code></pre> Source code in <code>bbot/core/helpers/wordcloud.py</code> <pre><code>def get_number_mutations(self, base, n=5, padding=2):\n    \"\"\"\n    Generates mutations of a base string by modifying the numerical parts or appending numbers.\n\n    This method detects existing numbers in the base string and tries incrementing and decrementing them within a\n    specified range. It also appends numbers at the end or after each word to generate more mutations.\n\n    Args:\n        base (str): The base string to generate mutations from.\n        n (int, optional): The range of numbers to use for incrementing/decrementing. Defaults to 5.\n        padding (int, optional): Zero-pad numbers up to this length. Defaults to 2.\n\n    Returns:\n        set: A set of mutated strings based on the base input.\n\n    Examples:\n        &gt;&gt;&gt; self.helpers.word_cloud.get_number_mutations(\"www2-test\", n=2)\n        {\n            \"www0-test\",\n            \"www1-test\",\n            \"www2-test\",\n            \"www2-test0\",\n            \"www2-test00\",\n            \"www2-test01\",\n            \"www2-test1\",\n            \"www3-test\",\n            \"www4-test\"\n        }\n    \"\"\"\n    results = set()\n\n    # detects numbers and increments/decrements them\n    # e.g. for \"base2_p013\", we would try:\n    # - \"base0_p013\" through \"base12_p013\"\n    # - \"base2_p003\" through \"base2_p023\"\n    # limited to three iterations for sanity's sake\n    for match in list(self.parent_helper.regexes.num_regex.finditer(base))[-3:]:\n        span = match.span()\n        before = base[: span[0]]\n        after = base[span[-1] :]\n        number = base[span[0] : span[-1]]\n        numlen = len(number)\n        maxnum = min(int(\"9\" * numlen), int(number) + n)\n        minnum = max(0, int(number) - n)\n        for i in range(minnum, maxnum + 1):\n            filled_num = str(i).zfill(numlen)\n            results.add(f\"{before}{filled_num}{after}\")\n            if not number.startswith(\"0\"):\n                results.add(f\"{before}{i}{after}\")\n\n    # appends numbers after each word\n    # e.g., for \"base_www\", we would try:\n    # - \"base1_www\", \"base2_www\", etc.\n    # - \"base_www1\", \"base_www2\", etc.\n    # limited to three iterations for sanity's sake\n    number_suffixes = self.parent_helper.gen_numbers(n, padding)\n    for match in list(self.parent_helper.regexes.word_regex.finditer(base))[-3:]:\n        span = match.span()\n        for suffix in number_suffixes:\n            before = base[: span[-1]]\n            after = base[span[-1] :]\n            # skip if there's already a number\n            if len(after) &gt; 1 and not after[0].isdigit():\n                results.add(f\"{before}{suffix}{after}\")\n    # basic cases so we don't miss anything\n    for s in number_suffixes:\n        results.add(f\"{base}{s}\")\n        results.add(base)\n\n    return results\n</code></pre>"},{"location":"dev/helpers/wordcloud/#bbot.core.helpers.wordcloud.WordCloud.json","title":"json","text":"<pre><code>json(limit=None)\n</code></pre> <p>Returns the word cloud as a sorted OrderedDict, optionally truncated to the top <code>limit</code> entries.</p> <p>Parameters:</p> <ul> <li> <code>limit</code>               (<code>int</code>, default:                   <code>None</code> )           \u2013            <p>The maximum number of entries to include in the returned OrderedDict. If None, all entries are included.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>OrderedDict</code>          \u2013            <p>A dictionary sorted by word frequencies, potentially truncated to the top <code>limit</code> entries.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.helpers.word_cloud.update({\"apple\": 5, \"banana\": 2, \"cherry\": 8})\n&gt;&gt;&gt; self.helpers.word_cloud.json(limit=2)\nOrderedDict([('cherry', 8), ('apple', 5)])\n</code></pre> Source code in <code>bbot/core/helpers/wordcloud.py</code> <pre><code>def json(self, limit=None):\n    \"\"\"\n    Returns the word cloud as a sorted OrderedDict, optionally truncated to the top `limit` entries.\n\n    Args:\n        limit (int, optional): The maximum number of entries to include in the returned OrderedDict. If None, all entries are included.\n\n    Returns:\n        OrderedDict: A dictionary sorted by word frequencies, potentially truncated to the top `limit` entries.\n\n    Examples:\n        &gt;&gt;&gt; self.helpers.word_cloud.update({\"apple\": 5, \"banana\": 2, \"cherry\": 8})\n        &gt;&gt;&gt; self.helpers.word_cloud.json(limit=2)\n        OrderedDict([('cherry', 8), ('apple', 5)])\n    \"\"\"\n    cloud_sorted = sorted(self.items(), key=lambda x: x[-1], reverse=True)\n    if limit is not None:\n        cloud_sorted = cloud_sorted[:limit]\n    return OrderedDict(cloud_sorted)\n</code></pre>"},{"location":"dev/helpers/wordcloud/#bbot.core.helpers.wordcloud.WordCloud.load","title":"load","text":"<pre><code>load(filename=None)\n</code></pre> <p>Loads a word cloud from a file. The file can be either a standard wordlist with one entry per line or a .tsv (tab-separated) file where the first row is the count and the second row is the associated entry.</p> <p>Parameters:</p> <ul> <li> <code>filename</code>               (<code>str</code>, default:                   <code>None</code> )           \u2013            <p>The path to the file from which to load the word cloud. If None, uses a default filename.</p> </li> </ul> Source code in <code>bbot/core/helpers/wordcloud.py</code> <pre><code>def load(self, filename=None):\n    \"\"\"\n    Loads a word cloud from a file. The file can be either a standard wordlist with one entry per line\n    or a .tsv (tab-separated) file where the first row is the count and the second row is the associated entry.\n\n    Args:\n        filename (str, optional): The path to the file from which to load the word cloud. If None, uses a default filename.\n    \"\"\"\n    if filename is None:\n        wordcloud_path = self.default_filename\n    else:\n        wordcloud_path = Path(filename).resolve()\n    log.verbose(f\"Loading word cloud from {wordcloud_path}\")\n    try:\n        with open(str(wordcloud_path), newline=\"\") as f:\n            c = csv.reader(f, delimiter=\"\\t\")\n            for row in c:\n                if len(row) == 1:\n                    self.add_word(row[0])\n                elif len(row) == 2:\n                    with suppress(Exception):\n                        count, word = row\n                        count = int(count)\n                        self[word] = count\n        if len(self) &gt; 0:\n            log.success(f\"Loaded word cloud ({len(self):,} words) from {wordcloud_path}\")\n    except Exception as e:\n        import traceback\n\n        log_fn = log.debug\n        if filename is not None:\n            log_fn = log.warning\n        log_fn(f\"Failed to load word cloud from {wordcloud_path}: {e}\")\n        if filename is not None:\n            log.trace(traceback.format_exc())\n</code></pre>"},{"location":"dev/helpers/wordcloud/#bbot.core.helpers.wordcloud.WordCloud.mutations","title":"mutations","text":"<pre><code>mutations(words, devops=True, cloud=True, letters=True, numbers=5, number_padding=2, substitute_numbers=True)\n</code></pre> <p>Generate various mutations for the given list of words based on different criteria.</p> <p>Yields tuples of strings which can be joined on the desired delimiter, e.g. \"-\" or \"_\".</p> <p>Parameters:</p> <ul> <li> <code>words</code>               (<code>Union[str, Iterable[str]]</code>)           \u2013            <p>A single word or list of words to mutate.</p> </li> <li> <code>devops</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to include devops-related mutations.</p> </li> <li> <code>cloud</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to include mutations from the word cloud.</p> </li> <li> <code>letters</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to include letter-based mutations.</p> </li> <li> <code>numbers</code>               (<code>int</code>, default:                   <code>5</code> )           \u2013            <p>The maximum numeric mutations to include.</p> </li> <li> <code>number_padding</code>               (<code>int</code>, default:                   <code>2</code> )           \u2013            <p>Padding for numeric mutations.</p> </li> <li> <code>substitute_numbers</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to substitute numbers in mutations.</p> </li> </ul> <p>Yields:</p> <ul> <li> <code>tuple</code>          \u2013            <p>A tuple containing each of the mutation segments.</p> </li> </ul> Source code in <code>bbot/core/helpers/wordcloud.py</code> <pre><code>def mutations(\n    self, words, devops=True, cloud=True, letters=True, numbers=5, number_padding=2, substitute_numbers=True\n):\n    \"\"\"\n    Generate various mutations for the given list of words based on different criteria.\n\n    Yields tuples of strings which can be joined on the desired delimiter, e.g. \"-\" or \"_\".\n\n    Args:\n        words (Union[str, Iterable[str]]): A single word or list of words to mutate.\n        devops (bool): Whether to include devops-related mutations.\n        cloud (bool): Whether to include mutations from the word cloud.\n        letters (bool): Whether to include letter-based mutations.\n        numbers (int): The maximum numeric mutations to include.\n        number_padding (int): Padding for numeric mutations.\n        substitute_numbers (bool): Whether to substitute numbers in mutations.\n\n    Yields:\n        tuple: A tuple containing each of the mutation segments.\n    \"\"\"\n    if isinstance(words, str):\n        words = (words,)\n    results = set()\n    for word in words:\n        h = hash(word)\n        if not h in results:\n            results.add(h)\n            yield (word,)\n    if numbers &gt; 0:\n        if substitute_numbers:\n            for word in words:\n                for number_mutation in self.get_number_mutations(word, n=numbers, padding=number_padding):\n                    h = hash(number_mutation)\n                    if not h in results:\n                        results.add(h)\n                        yield (number_mutation,)\n    for word in words:\n        for modifier in self.modifiers(\n            devops=devops, cloud=cloud, letters=letters, numbers=numbers, number_padding=number_padding\n        ):\n            a = (word, modifier)\n            b = (modifier, word)\n            for _ in (a, b):\n                h = hash(_)\n                if h not in results:\n                    results.add(h)\n                    yield _\n</code></pre>"},{"location":"dev/helpers/wordcloud/#bbot.core.helpers.wordcloud.WordCloud.save","title":"save","text":"<pre><code>save(filename=None, limit=None)\n</code></pre> <p>Saves the word cloud to a file. The cloud can optionally be truncated to the top <code>limit</code> entries.</p> <p>Parameters:</p> <ul> <li> <code>filename</code>               (<code>str</code>, default:                   <code>None</code> )           \u2013            <p>The path to the file where the word cloud will be saved. If None, uses a default filename.</p> </li> <li> <code>limit</code>               (<code>int</code>, default:                   <code>None</code> )           \u2013            <p>The maximum number of entries to save to the file. If None, all entries are saved.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>tuple</code>          \u2013            <p>A tuple containing a boolean indicating success or failure, and the resolved filename.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.helpers.word_cloud.update({\"apple\": 5, \"banana\": 2, \"cherry\": 8})\n&gt;&gt;&gt; self.helpers.word_cloud.save(filename=\"word_cloud.txt\", limit=2)\n(True, Path('word_cloud.txt'))\n</code></pre> Source code in <code>bbot/core/helpers/wordcloud.py</code> <pre><code>def save(self, filename=None, limit=None):\n    \"\"\"\n    Saves the word cloud to a file. The cloud can optionally be truncated to the top `limit` entries.\n\n    Args:\n        filename (str, optional): The path to the file where the word cloud will be saved. If None, uses a default filename.\n        limit (int, optional): The maximum number of entries to save to the file. If None, all entries are saved.\n\n    Returns:\n        tuple: A tuple containing a boolean indicating success or failure, and the resolved filename.\n\n    Examples:\n        &gt;&gt;&gt; self.helpers.word_cloud.update({\"apple\": 5, \"banana\": 2, \"cherry\": 8})\n        &gt;&gt;&gt; self.helpers.word_cloud.save(filename=\"word_cloud.txt\", limit=2)\n        (True, Path('word_cloud.txt'))\n    \"\"\"\n    if filename is None:\n        filename = self.default_filename\n    else:\n        filename = Path(filename).resolve()\n    try:\n        if not self.parent_helper.mkdir(filename.parent):\n            log.error(f\"Failure creating or error writing to {filename.parent} when saving word cloud\")\n            return\n        if len(self) &gt; 0:\n            log.debug(f\"Saving word cloud to {filename}\")\n            with open(str(filename), mode=\"w\", newline=\"\") as f:\n                c = csv.writer(f, delimiter=\"\\t\")\n                for word, count in self.json(limit).items():\n                    c.writerow([count, word])\n            log.debug(f\"Saved word cloud ({len(self):,} words) to {filename}\")\n            return True, filename\n        else:\n            log.debug(f\"No words to save\")\n    except Exception as e:\n        import traceback\n\n        log.warning(f\"Failed to save word cloud to {filename}: {e}\")\n        log.trace(traceback.format_exc())\n    return False, filename\n</code></pre>"},{"location":"dev/helpers/wordcloud/#bbot.core.helpers.wordcloud.WordCloud.truncate","title":"truncate","text":"<pre><code>truncate(limit)\n</code></pre> <p>Truncates the word cloud dictionary to retain only the top <code>limit</code> entries based on their occurrence frequencies.</p> <p>Parameters:</p> <ul> <li> <code>limit</code>               (<code>int</code>)           \u2013            <p>The maximum number of entries to retain in the word cloud.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.helpers.word_cloud.update({\"apple\": 5, \"banana\": 2, \"cherry\": 8})\n&gt;&gt;&gt; self.helpers.word_cloud.truncate(2)\n&gt;&gt;&gt; self.helpers.word_cloud\n{'cherry': 8, 'apple': 5}\n</code></pre> Source code in <code>bbot/core/helpers/wordcloud.py</code> <pre><code>def truncate(self, limit):\n    \"\"\"\n    Truncates the word cloud dictionary to retain only the top `limit` entries based on their occurrence frequencies.\n\n    Args:\n        limit (int): The maximum number of entries to retain in the word cloud.\n\n    Examples:\n        &gt;&gt;&gt; self.helpers.word_cloud.update({\"apple\": 5, \"banana\": 2, \"cherry\": 8})\n        &gt;&gt;&gt; self.helpers.word_cloud.truncate(2)\n        &gt;&gt;&gt; self.helpers.word_cloud\n        {'cherry': 8, 'apple': 5}\n    \"\"\"\n    new_self = dict(self.json(limit=limit))\n    self.clear()\n    self.update(new_self)\n</code></pre>"},{"location":"modules/custom_yara_rules/","title":"Custom Yara Rules","text":""},{"location":"modules/custom_yara_rules/#overview","title":"Overview","text":"<p>Through the <code>excavate</code> internal module, BBOT supports searching through HTTP response data using custom YARA rules. </p> <p>This feature can be utilized with the command line option <code>--custom-yara-rules</code> or <code>-cy</code>, followed by a file containing the YARA rules.</p> <p>Example:</p> <pre><code>bbot -m httpx --custom-yara-rules=test.yara -t http://example.com/\n</code></pre> <p>Where <code>test.yara</code> is a file on the filesystem. The file can contain multiple YARA rules, separated by lines.</p> <p>YARA rules can be quite simple, the simplest example being a single string search:</p> <pre><code>rule find_string {\n    strings:\n        $str1 = \"AAAABBBB\"\n\n    condition:\n        $str1\n}\n</code></pre> <p>To look for multiple strings, and match if any of them were to hit:</p> <pre><code>rule find_string {\n    strings:\n        $str1 = \"AAAABBBB\"\n        $str2 = \"CCCCDDDD\"\n\n    condition:\n        any of them\n}\n</code></pre> <p>One of the most important capabilities is the use of regexes within the rule, as shown in the following example.</p> <pre><code>rule find_AAAABBBB_regex {\n    strings:\n        $regex = /A{1,4}B{1,4}/\n\n    condition:\n        $regex\n}\n</code></pre> <p>Note: YARA uses it's own regex engine that is not a 1:1 match with python regexes. This means many existing regexes will have to be modified before they will work with YARA. The good news is: YARA's regex engine is FAST, immensely more fast than pythons!</p> <p>Further discussion of art of writing complex YARA rules goes far beyond the scope of this documentation. A good place to start learning more is the official YARA documentation. </p> <p>The YARA engine provides plenty of room to make highly complex signatures possible, with various conditional operators available. Multiple signatures can be linked together to create sophisticated detection rules that can identify a wide range of specific content. This flexibility allows the crafting of efficient rules for detecting security vulnerabilities, leveraging logical operators, regular expressions, and other powerful features. Additionally, YARA's modular structure supports easy updates and maintenance of signature sets.</p>"},{"location":"modules/custom_yara_rules/#custom-options","title":"Custom options","text":"<p>BBOT supports the use of a few custom <code>meta</code> attributes within YARA rules, which will alter the behavior of the rule and the post-processing of the results.</p>"},{"location":"modules/custom_yara_rules/#description","title":"description","text":"<p>The description of the rule. Will end up in the description of any produced events if defined.</p> <p>Example with no description provided:</p> <pre><code>[FINDING] {\"description\": \"Custom Yara Rule [find_string] Matched via identifier [str1]\", \"host\": \"example.com\", \"url\": \"http://example.com\"} excavate\n</code></pre> <p>Example with the description added:</p> <pre><code>[FINDING] {\"description\": \"Custom Yara Rule [AAAABBBB] with description: [contains our test string] Matched via identifier [str1]\", \"host\": \"example.com, \"url\": \"http://example.com\"}     excavate\n</code></pre> <p>That FINDING was produced with the following signature:</p> <pre><code>rule AAAABBBB {\n\n    meta:\n        description = \"contains our test string\"\n    strings:\n        $str1 = \"AAAABBBB\"\n    condition:\n        $str1\n}\n</code></pre>"},{"location":"modules/custom_yara_rules/#tags","title":"tags","text":"<p>Tags specified with this option will be passed-on to any resulting emitted events. Tags are provided as a comma separated string, as shown below:</p> <p>Lets expand on the previous example:</p> <pre><code>rule AAAABBBB {\n\n    meta:\n        description = \"contains our test string\"\n        tags = \"tag1,tag2,tag3\"\n    strings:\n        $str1 = \"AAAABBBB\"\n    condition:\n        $str1\n}\n</code></pre> <p>Now, the BBOT FINDING includes these custom tags, as with the following output:</p> <pre><code>[FINDING] {\"description\": \"Custom Yara Rule [AAAABBBB] with description: [contains our test string] Matched via identifier [str1]\", \"host\": \"example.com\", \"url\": \"http://example.com/\"} excavate   (tag1, tag2, tag3)\n</code></pre>"},{"location":"modules/custom_yara_rules/#emit_match","title":"emit_match","text":"<p>When set to True, the contents returned from a successful extraction via a YARA regex will be included in the FINDING event which is emitted.</p> <p>Consider the following example YARA rule:</p> <pre><code>rule SubstackLink\n{\n    meta:\n        description = \"contains a Substack link\"\n        emit_match = true\n    strings:\n        $substack_link = /https?:\\/\\/[a-zA-Z0-9.-]+\\.substack\\.com/\n    condition:\n        $substack_link\n}\n</code></pre> <p>When run against the Black Lantern Security homepage with the following BBOT command:</p> <pre><code>bbot -m httpx --custom-yara-rules=substack.yara -t http://www.blacklanternsecurity.com/\n</code></pre> <p>We get the following result. Note that the finding now contains the actual link that was identified with the regex.</p> <pre><code>[FINDING] {\"description\": \"Custom Yara Rule [SubstackLink] with description: [contains a Substack link] Matched via identifier [substack_link] and extracted [https://blacklanternsecurity.substack.com]\", \"host\": \"www.blacklanternsecurity.com\", \"url\": \"https://www.blacklanternsecurity.com/\"}    excavate\n</code></pre>"},{"location":"modules/internal_modules/","title":"List of Modules","text":""},{"location":"modules/internal_modules/#what-are-internal-modules","title":"What are internal modules?","text":"<p>Internal modules are just like regular modules, except that they run all the time. They do not have to be explicitly enabled. They can, however, be explicitly disabled if needed.</p> <p>Turning them off is simple, a root-level config option is present which can be set to False to disable them:</p> <pre><code># Infer certain events from others, e.g. IPs from IP ranges, DNS_NAMEs from URLs, etc.\nspeculate: True\n# Passively search event data for URLs, hostnames, emails, etc.\nexcavate: True\n# Summarize activity at the end of a scan\naggregate: True\n# DNS resolution\ndnsresolve: True\n# Cloud provider tagging\ncloudcheck: True\n</code></pre> <p>These modules are executing core functionality that is normally essential for a typical BBOT scan. Let's take a quick look at each one's functionality:</p>"},{"location":"modules/internal_modules/#aggregate","title":"aggregate","text":"<p>Summarize statistics at the end of a scan. Disable if you don't want to see this table.</p>"},{"location":"modules/internal_modules/#cloud","title":"cloud","text":"<p>The cloud module looks at events and tries to determine if they are associated with a cloud provider and tags them as such, and can also identify certain cloud resources</p>"},{"location":"modules/internal_modules/#dns","title":"dns","text":"<p>The DNS internal module controls the basic DNS resoultion the BBOT performs, and all of the supporting machinery like wildcard detection, etc.</p>"},{"location":"modules/internal_modules/#excavate","title":"excavate","text":"<p>The excavate internal module designed to passively extract valuable information from HTTP response data. It primarily uses YARA regexes to extract information, with various events being produced from the post-processing of the YARA results.</p> <p>Here is a summary of the data it produces:</p>"},{"location":"modules/internal_modules/#urls","title":"URLs","text":"<p>By extracting URLs from all visited pages, this is actually already half of a web-spider. The other half is recursion, which is baked in to BBOT from the ground up. Therefore, protections are in place by default in the form of <code>web_spider_distance</code> and <code>web_spider_depth</code> settings. These settings govern restrictions to URLs recursively harvested from HTTP responses, preventing endless runaway scans. However, in the right situation the controlled use of a web-spider is extremely powerful.</p>"},{"location":"modules/internal_modules/#parameter-extraction","title":"Parameter Extraction","text":"<p>Parameter Extraction The parameter extraction functionality identifies and extracts key web parameters from HTTP responses, and produced <code>WEB_PARAMETER</code> events. This includes parameters found in GET and POST requests, HTML forms, and jQuery requests. Currently, these are only used by the <code>hunt</code> module, and by the <code>paramminer</code> modules, to a limited degree. However, future functionality will make extensive use of these events.</p>"},{"location":"modules/internal_modules/#email-extraction","title":"Email Extraction","text":"<p>Detect email addresses within HTTP_RESPONSE data. </p>"},{"location":"modules/internal_modules/#error-detection","title":"Error Detection","text":"<p>Scans for verbose error messages in HTTP responses and raw text data. By identifying specific error signatures from various programming languages and frameworks, this feature helps uncover misconfigurations, debugging information, and potential vulnerabilities. This insight is invaluable for identifying weak points or anomalies in web applications.</p>"},{"location":"modules/internal_modules/#content-security-policy-csp-extraction","title":"Content Security Policy (CSP) Extraction","text":"<p>The CSP extraction capability focuses on extracting domains from Content-Security-Policy headers. By analyzing these headers, BBOT can identify additional domains which can get fed back into the scan.</p>"},{"location":"modules/internal_modules/#serialization-detection","title":"Serialization Detection","text":"<p>Serialized objects are a common source of serious security vulnerablities. Excavate aims to detect those used in Java, .NET, and PHP applications. </p>"},{"location":"modules/internal_modules/#functionality-detection","title":"Functionality Detection","text":"<p>Looks for specific web functionalities such as file upload fields and WSDL URLs. By identifying these elements, BBOT can pinpoint areas of the application that may require further scrutiny for security vulnerabilities.</p>"},{"location":"modules/internal_modules/#non-http-scheme-detection","title":"Non-HTTP Scheme Detection","text":"<p>The non-HTTP scheme detection capability extracts URLs with non-HTTP schemes, such as ftp, mailto, and javascript. By identifying these URLs, BBOT can uncover additional vectors for attack or information leakage.</p>"},{"location":"modules/internal_modules/#custom-yara-rules","title":"Custom Yara Rules","text":"<p>Excavate supports the use of custom YARA rules, which wil be added to the other rules before the scan start. For more info, view this.</p>"},{"location":"modules/internal_modules/#speculate","title":"speculate","text":"<p>Speculate is all about inferring one data type from another, particularly when certain tools like port scanners are not enabled. This is essential functionality for most BBOT scans, allowing for the discovery of web resources when starting with a DNS-only target list without a port scanner. It bridges gaps in the data, providing a more comprehensive view of the target by leveraging existing information.</p> <ul> <li>IP_RANGE: Converts an IP range into individual IP addresses and emits them as IP_ADDRESS events.</li> <li>DNS_NAME: Generates parent domains from DNS names.</li> <li>URL and URL_UNVERIFIED: Infers open TCP ports from URLs and speculates on sub-directory URLs.</li> <li>General URL Speculation: Emits URL_UNVERIFIED events for URLs not already in the event's history.</li> <li>IP_ADDRESS / DNS_NAME: Infers open TCP ports if active port scanning is not enabled.</li> <li>ORG_STUB: Derives organization stubs from TLDs, social stubs, or Azure tenant names and emits them as ORG_STUB events.</li> <li>USERNAME: Converts usernames to email addresses if they validate as such.</li> </ul>"},{"location":"modules/list_of_modules/","title":"List of Modules","text":"Module Type Needs API Key Description Flags Consumed Events Produced Events Author Created Date ajaxpro scan No Check for potentially vulnerable Ajaxpro instances active, safe, web-thorough HTTP_RESPONSE, URL FINDING, VULNERABILITY @liquidsec 2024-01-18 baddns scan No Check hosts for domain/subdomain takeovers active, baddns, cloud-enum, safe, subdomain-hijack, web-basic DNS_NAME, DNS_NAME_UNRESOLVED FINDING, VULNERABILITY @liquidsec 2024-01-18 baddns_direct scan No Check for unusual subdomain / service takeover edge cases that require direct detection active, baddns, cloud-enum, safe, subdomain-enum STORAGE_BUCKET, URL FINDING, VULNERABILITY @liquidsec 2024-01-29 baddns_zone scan No Check hosts for DNS zone transfers and NSEC walks active, baddns, cloud-enum, safe, subdomain-enum DNS_NAME FINDING, VULNERABILITY @liquidsec 2024-01-29 badsecrets scan No Library for detecting known or weak secrets across many web frameworks active, safe, web-basic HTTP_RESPONSE FINDING, TECHNOLOGY, VULNERABILITY @liquidsec 2022-11-19 bucket_amazon scan No Check for S3 buckets related to target active, cloud-enum, safe, web-basic DNS_NAME, STORAGE_BUCKET FINDING, STORAGE_BUCKET @TheTechromancer 2022-11-04 bucket_azure scan No Check for Azure storage blobs related to target active, cloud-enum, safe, web-basic DNS_NAME, STORAGE_BUCKET FINDING, STORAGE_BUCKET @TheTechromancer 2022-11-04 bucket_digitalocean scan No Check for DigitalOcean spaces related to target active, cloud-enum, safe, slow, web-thorough DNS_NAME, STORAGE_BUCKET FINDING, STORAGE_BUCKET @TheTechromancer 2022-11-08 bucket_firebase scan No Check for open Firebase databases related to target active, cloud-enum, safe, web-basic DNS_NAME, STORAGE_BUCKET FINDING, STORAGE_BUCKET @TheTechromancer 2023-03-20 bucket_google scan No Check for Google object storage related to target active, cloud-enum, safe, web-basic DNS_NAME, STORAGE_BUCKET FINDING, STORAGE_BUCKET @TheTechromancer 2022-11-04 bypass403 scan No Check 403 pages for common bypasses active, aggressive, web-thorough URL FINDING @liquidsec 2022-07-05 dastardly scan No Lightweight web application security scanner active, aggressive, deadly, slow, web-thorough HTTP_RESPONSE FINDING, VULNERABILITY @domwhewell-sage 2023-12-11 dnsbrute scan No Brute-force subdomains with massdns + static wordlist active, aggressive, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2024-04-24 dnsbrute_mutations scan No Brute-force subdomains with massdns + target-specific mutations active, aggressive, slow, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2024-04-25 dnscommonsrv scan No Check for common SRV records active, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-05-15 dotnetnuke scan No Scan for critical DotNetNuke (DNN) vulnerabilities active, aggressive, web-thorough HTTP_RESPONSE TECHNOLOGY, VULNERABILITY @liquidsec 2023-11-21 ffuf scan No A fast web fuzzer written in Go active, aggressive, deadly URL URL_UNVERIFIED @liquidsec 2022-04-10 ffuf_shortnames scan No Use ffuf in combination IIS shortnames active, aggressive, iis-shortnames, web-thorough URL_HINT URL_UNVERIFIED @liquidsec 2022-07-05 filedownload scan No Download common filetypes such as PDF, DOCX, PPTX, etc. active, safe, web-basic HTTP_RESPONSE, URL_UNVERIFIED FILESYSTEM @TheTechromancer 2023-10-11 fingerprintx scan No Fingerprint exposed services like RDP, SSH, MySQL, etc. active, safe, service-enum, slow OPEN_TCP_PORT PROTOCOL @TheTechromancer 2023-01-30 generic_ssrf scan No Check for generic SSRFs active, aggressive, web-thorough URL VULNERABILITY @liquidsec 2022-07-30 git scan No Check for exposed .git repositories active, code-enum, safe, web-basic URL FINDING @TheTechromancer 2023-05-30 gitlab scan No Detect GitLab instances and query them for repositories active, code-enum, safe HTTP_RESPONSE, SOCIAL, TECHNOLOGY CODE_REPOSITORY, FINDING, SOCIAL, TECHNOLOGY @TheTechromancer 2024-03-11 gowitness scan No Take screenshots of webpages active, safe, web-screenshots SOCIAL, URL TECHNOLOGY, URL, URL_UNVERIFIED, WEBSCREENSHOT @TheTechromancer 2022-07-08 host_header scan No Try common HTTP Host header spoofing techniques active, aggressive, web-thorough HTTP_RESPONSE FINDING @liquidsec 2022-07-27 httpx scan No Visit webpages. Many other modules rely on httpx active, cloud-enum, safe, social-enum, subdomain-enum, web-basic OPEN_TCP_PORT, URL, URL_UNVERIFIED HTTP_RESPONSE, URL @TheTechromancer 2022-07-08 hunt scan No Watch for commonly-exploitable HTTP parameters active, safe, web-thorough WEB_PARAMETER FINDING @liquidsec 2022-07-20 iis_shortnames scan No Check for IIS shortname vulnerability active, iis-shortnames, safe, web-basic URL URL_HINT @liquidsec 2022-04-15 newsletters scan No Searches for Newsletter Submission Entry Fields on Websites active, safe HTTP_RESPONSE FINDING @stryker2k2 2024-02-02 ntlm scan No Watch for HTTP endpoints that support NTLM authentication active, safe, web-basic HTTP_RESPONSE, URL DNS_NAME, FINDING @liquidsec 2022-07-25 nuclei scan No Fast and customisable vulnerability scanner active, aggressive, deadly URL FINDING, TECHNOLOGY, VULNERABILITY @TheTechromancer 2022-03-12 oauth scan No Enumerate OAUTH and OpenID Connect services active, affiliates, cloud-enum, safe, subdomain-enum, web-basic DNS_NAME, URL_UNVERIFIED DNS_NAME @TheTechromancer 2023-07-12 paramminer_cookies scan No Smart brute-force to check for common HTTP cookie parameters active, aggressive, slow, web-paramminer HTTP_RESPONSE, WEB_PARAMETER FINDING, WEB_PARAMETER @liquidsec 2022-06-27 paramminer_getparams scan No Use smart brute-force to check for common HTTP GET parameters active, aggressive, slow, web-paramminer HTTP_RESPONSE, WEB_PARAMETER FINDING, WEB_PARAMETER @liquidsec 2022-06-28 paramminer_headers scan No Use smart brute-force to check for common HTTP header parameters active, aggressive, slow, web-paramminer HTTP_RESPONSE, WEB_PARAMETER WEB_PARAMETER @liquidsec 2022-04-15 portscan scan No Port scan with masscan. By default, scans top 100 ports. active, portscan, safe DNS_NAME, IP_ADDRESS, IP_RANGE OPEN_TCP_PORT @TheTechromancer 2024-05-15 robots scan No Look for and parse robots.txt active, safe, web-basic URL URL_UNVERIFIED @liquidsec 2023-02-01 secretsdb scan No Detect common secrets with secrets-patterns-db active, safe, web-basic HTTP_RESPONSE FINDING @TheTechromancer 2023-03-17 securitytxt scan No Check for security.txt content active, cloud-enum, safe, subdomain-enum, web-basic DNS_NAME EMAIL_ADDRESS, URL_UNVERIFIED @colin-stubbs 2024-05-26 smuggler scan No Check for HTTP smuggling active, aggressive, slow, web-thorough URL FINDING @liquidsec 2022-07-06 sslcert scan No Visit open ports and retrieve SSL certificates active, affiliates, email-enum, safe, subdomain-enum, web-basic OPEN_TCP_PORT DNS_NAME, EMAIL_ADDRESS @TheTechromancer 2022-03-30 telerik scan No Scan for critical Telerik vulnerabilities active, aggressive, web-thorough HTTP_RESPONSE, URL FINDING, VULNERABILITY @liquidsec 2022-04-10 url_manipulation scan No Attempt to identify URL parsing/routing based vulnerabilities active, aggressive, web-thorough URL FINDING @liquidsec 2022-09-27 vhost scan No Fuzz for virtual hosts active, aggressive, deadly, slow URL DNS_NAME, VHOST @liquidsec 2022-05-02 wafw00f scan No Web Application Firewall Fingerprinting Tool active, aggressive URL WAF @liquidsec 2023-02-15 wappalyzer scan No Extract technologies from web responses active, safe, web-basic HTTP_RESPONSE TECHNOLOGY @liquidsec 2022-04-15 wpscan scan No Wordpress security scanner. Highly recommended to use an API key for better results. active, aggressive HTTP_RESPONSE, TECHNOLOGY FINDING, TECHNOLOGY, URL_UNVERIFIED, VULNERABILITY @domwhewell-sage 2024-05-29 affiliates scan No Summarize affiliate domains at the end of a scan affiliates, passive, report, safe * @TheTechromancer 2022-07-25 anubisdb scan No Query jldc.me's database for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-10-04 apkpure scan No Download android applications from apkpure.com code-enum, passive, safe MOBILE_APP FILESYSTEM @domwhewell-sage 2024-10-11 asn scan No Query ripe and bgpview.io for ASNs passive, report, safe, subdomain-enum IP_ADDRESS ASN @TheTechromancer 2022-07-25 azure_realm scan No Retrieves the \"AuthURL\" from login.microsoftonline.com/getuserrealm affiliates, cloud-enum, passive, safe, subdomain-enum, web-basic DNS_NAME URL_UNVERIFIED @TheTechromancer 2023-07-12 azure_tenant scan No Query Azure for tenant sister domains affiliates, cloud-enum, passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2024-07-04 bevigil scan Yes Retrieve OSINT data from mobile applications using BeVigil passive, safe, subdomain-enum DNS_NAME DNS_NAME, URL_UNVERIFIED @alt-glitch 2022-10-26 binaryedge scan Yes Query the BinaryEdge API passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-08-17 bucket_file_enum scan No Works in conjunction with the filedownload module to download files from open storage buckets. Currently supported cloud providers: AWS, DigitalOcean cloud-enum, passive, safe STORAGE_BUCKET URL_UNVERIFIED @TheTechromancer 2023-11-14 bufferoverrun scan Yes Query BufferOverrun's TLS API for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2024-10-23 builtwith scan Yes Query Builtwith.com for subdomains affiliates, passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-08-23 c99 scan Yes Query the C99 API for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-07-08 censys scan Yes Query the Censys API passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-08-04 certspotter scan No Query Certspotter's API for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-07-28 chaos scan Yes Query ProjectDiscovery's Chaos API for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-08-14 code_repository scan No Look for code repository links in webpages code-enum, passive, safe URL_UNVERIFIED CODE_REPOSITORY @domwhewell-sage 2024-05-15 columbus scan No Query the Columbus Project API for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2023-06-01 credshed scan Yes Send queries to your own credshed server to check for known credentials of your targets passive, safe DNS_NAME EMAIL_ADDRESS, HASHED_PASSWORD, PASSWORD, USERNAME @SpamFaux 2023-10-12 crt scan No Query crt.sh (certificate transparency) for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-05-13 dehashed scan Yes Execute queries against dehashed.com for exposed credentials email-enum, passive, safe DNS_NAME EMAIL_ADDRESS, HASHED_PASSWORD, PASSWORD, USERNAME @SpamFaux 2023-10-12 digitorus scan No Query certificatedetails.com for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2023-07-25 dnsbimi scan No Check DNS_NAME's for BIMI records to find image and certificate hosting URL's cloud-enum, passive, safe, subdomain-enum DNS_NAME RAW_DNS_RECORD, URL_UNVERIFIED @colin-stubbs 2024-11-15 dnscaa scan No Check for CAA records email-enum, passive, safe, subdomain-enum DNS_NAME DNS_NAME, EMAIL_ADDRESS, URL_UNVERIFIED @colin-stubbs 2024-05-26 dnsdumpster scan No Query dnsdumpster for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-03-12 docker_pull scan No Download images from a docker repository code-enum, passive, safe, slow CODE_REPOSITORY FILESYSTEM @domwhewell-sage 2024-03-24 dockerhub scan No Search for docker repositories of discovered orgs/usernames code-enum, passive, safe ORG_STUB, SOCIAL CODE_REPOSITORY, SOCIAL, URL_UNVERIFIED @domwhewell-sage 2024-03-12 emailformat scan No Query email-format.com for email addresses email-enum, passive, safe DNS_NAME EMAIL_ADDRESS @TheTechromancer 2022-07-11 extractous scan No Module to extract data from files passive, safe FILESYSTEM RAW_TEXT @domwhewell-sage 2024-06-03 fullhunt scan Yes Query the fullhunt.io API for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-08-24 git_clone scan No Clone code github repositories code-enum, passive, safe, slow CODE_REPOSITORY FILESYSTEM @domwhewell-sage 2024-03-08 github_codesearch scan Yes Query Github's API for code containing the target domain name code-enum, passive, safe, subdomain-enum DNS_NAME CODE_REPOSITORY, URL_UNVERIFIED @domwhewell-sage 2023-12-14 github_org scan No Query Github's API for organization and member repositories code-enum, passive, safe, subdomain-enum ORG_STUB, SOCIAL CODE_REPOSITORY @domwhewell-sage 2023-12-14 github_workflows scan No Download a github repositories workflow logs and workflow artifacts code-enum, passive, safe CODE_REPOSITORY FILESYSTEM @domwhewell-sage 2024-04-29 google_playstore scan No Search for android applications on play.google.com code-enum, passive, safe CODE_REPOSITORY, ORG_STUB MOBILE_APP @domwhewell-sage 2024-10-08 hackertarget scan No Query the hackertarget.com API for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-07-28 hunterio scan Yes Query hunter.io for emails email-enum, passive, safe, subdomain-enum DNS_NAME DNS_NAME, EMAIL_ADDRESS, URL_UNVERIFIED @TheTechromancer 2022-04-25 internetdb scan No Query Shodan's InternetDB for open ports, hostnames, technologies, and vulnerabilities passive, portscan, safe, subdomain-enum DNS_NAME, IP_ADDRESS DNS_NAME, FINDING, OPEN_TCP_PORT, TECHNOLOGY, VULNERABILITY @TheTechromancer 2023-12-22 ip2location scan Yes Query IP2location.io's API for geolocation information. passive, safe IP_ADDRESS GEOLOCATION @TheTechromancer 2023-09-12 ipneighbor scan No Look beside IPs in their surrounding subnet aggressive, passive, subdomain-enum IP_ADDRESS IP_ADDRESS @TheTechromancer 2022-06-08 ipstack scan Yes Query IPStack's GeoIP API passive, safe IP_ADDRESS GEOLOCATION @tycoonslive 2022-11-26 jadx scan No Decompile APKs and XAPKs using JADX passive, safe FILESYSTEM FILESYSTEM @domwhewell-sage 2024-11-04 leakix scan No Query leakix.net for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-07-11 myssl scan No Query myssl.com's API for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2023-07-10 otx scan No Query otx.alienvault.com for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-08-24 passivetotal scan Yes Query the PassiveTotal API for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-08-08 pgp scan No Query common PGP servers for email addresses email-enum, passive, safe DNS_NAME EMAIL_ADDRESS @TheTechromancer 2022-08-10 postman scan No Query Postman's API for related workspaces, collections, requests and download them code-enum, passive, safe, subdomain-enum ORG_STUB, SOCIAL CODE_REPOSITORY @domwhewell-sage 2024-09-07 postman_download scan No Download workspaces, collections, requests from Postman code-enum, passive, safe, subdomain-enum CODE_REPOSITORY FILESYSTEM @domwhewell-sage 2024-09-07 rapiddns scan No Query rapiddns.io for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-08-24 securitytrails scan Yes Query the SecurityTrails API for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-07-03 shodan_dns scan Yes Query Shodan for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-07-03 sitedossier scan No Query sitedossier.com for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2023-08-04 skymem scan No Query skymem.info for email addresses email-enum, passive, safe DNS_NAME EMAIL_ADDRESS @TheTechromancer 2022-07-11 social scan No Look for social media links in webpages passive, safe, social-enum URL_UNVERIFIED SOCIAL @TheTechromancer 2023-03-28 subdomaincenter scan No Query subdomain.center's API for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2023-07-26 subdomainradar scan Yes Query the Subdomain API for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-07-08 trickest scan Yes Query Trickest's API for subdomains affiliates, passive, safe, subdomain-enum DNS_NAME DNS_NAME @amiremami 2024-07-27 trufflehog scan No TruffleHog is a tool for finding credentials code-enum, passive, safe CODE_REPOSITORY, FILESYSTEM FINDING, VULNERABILITY @domwhewell-sage 2024-03-12 urlscan scan No Query urlscan.io for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME, URL_UNVERIFIED @TheTechromancer 2022-06-09 viewdns scan No Query viewdns.info's reverse whois for related domains affiliates, passive, safe DNS_NAME DNS_NAME @TheTechromancer 2022-07-04 virustotal scan Yes Query VirusTotal's API for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-08-25 wayback scan No Query archive.org's API for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME, URL_UNVERIFIED @liquidsec 2022-04-01 zoomeye scan Yes Query ZoomEye's API for subdomains affiliates, passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-08-03 asset_inventory output No Merge hosts, open ports, technologies, findings, etc. into a single asset inventory CSV DNS_NAME, FINDING, HTTP_RESPONSE, IP_ADDRESS, OPEN_TCP_PORT, TECHNOLOGY, URL, VULNERABILITY, WAF IP_ADDRESS, OPEN_TCP_PORT @liquidsec 2022-09-30 csv output No Output to CSV * @TheTechromancer 2022-04-07 discord output No Message a Discord channel when certain events are encountered * @TheTechromancer 2023-08-14 emails output No Output any email addresses found belonging to the target domain email-enum EMAIL_ADDRESS @domwhewell-sage 2023-12-23 http output No Send every event to a custom URL via a web request * @TheTechromancer 2022-04-13 json output No Output to Newline-Delimited JSON (NDJSON) * @TheTechromancer 2022-04-07 neo4j output No Output to Neo4j * @TheTechromancer 2022-04-07 postgres output No Output scan data to a SQLite database * python output No Output via Python API * @TheTechromancer 2022-09-13 slack output No Message a Slack channel when certain events are encountered * @TheTechromancer 2023-08-14 splunk output No Send every event to a splunk instance through HTTP Event Collector * @w0Tx 2024-02-17 sqlite output No Output scan data to a SQLite database * stdout output No Output to text * subdomains output No Output only resolved, in-scope subdomains subdomain-enum DNS_NAME, DNS_NAME_UNRESOLVED @TheTechromancer 2023-07-31 teams output No Message a Teams channel when certain events are encountered * @TheTechromancer 2023-08-14 txt output No Output to text * web_report output No Create a markdown report with web assets FINDING, TECHNOLOGY, URL, VHOST, VULNERABILITY @liquidsec 2023-02-08 websocket output No Output to websockets * @TheTechromancer 2022-04-15 cloudcheck internal No Tag events by cloud provider, identify cloud resources like storage buckets * dnsresolve internal No * aggregate internal No Summarize statistics at the end of a scan passive, safe @TheTechromancer 2022-07-25 excavate internal No Passively extract juicy tidbits from scan data passive HTTP_RESPONSE, RAW_TEXT URL_UNVERIFIED, WEB_PARAMETER @liquidsec 2022-06-27 speculate internal No Derive certain event types from others by common sense passive AZURE_TENANT, DNS_NAME, DNS_NAME_UNRESOLVED, HTTP_RESPONSE, IP_ADDRESS, IP_RANGE, SOCIAL, STORAGE_BUCKET, URL, URL_UNVERIFIED, USERNAME DNS_NAME, FINDING, IP_ADDRESS, OPEN_TCP_PORT, ORG_STUB @liquidsec 2022-05-03 <p>For a list of module config options, see Module Options.</p>"},{"location":"modules/nuclei/","title":"Nuclei","text":""},{"location":"modules/nuclei/#overview","title":"Overview","text":"<p>BBOT integrates with Nuclei, an open-source web vulnerability scanner by Project Discovery. This is one of the ways BBOT makes it possible to go from a single target domain/IP all the way to confirmed vulnerabilities, in one scan. </p> <p></p> <ul> <li>The BBOT Nuclei module ingests [URL] events and emits events of type [VULNERABILITY] or [FINDING]</li> <li>Vulnerabilities will inherit their severity from the Nuclei templates</li> <li>Nuclei templates of severity INFO will be emitted as [FINDINGS]</li> </ul>"},{"location":"modules/nuclei/#default-behavior","title":"Default Behavior","text":"<ul> <li>By default, only \"directory URLs\" (URLs ending in a slash) will be scanned, but ALL templates will be used (BE CAREFUL!)</li> <li>Because it's so aggressive, Nuclei is considered a deadly module. This means you need to use the flag --allow-deadly to turn it on.</li> </ul>"},{"location":"modules/nuclei/#specifying-custom-templates","title":"Specifying custom templates","text":"<p>You can specify individual nuclei templates by setting the <code>modules.nuclei.templates</code> to their comma-separated filenames:</p> <pre><code>bbot -m nuclei -c modules.nuclei.templates=http/takeovers/airee-takeover.yaml,http/takeovers/cargo-takeover.yaml\n</code></pre> <p>...or via the config:</p> <pre><code>modules:\n  nuclei:\n    templates: http/takeovers/airee-takeover.yaml,http/takeovers/cargo-takeover.yaml\n</code></pre>"},{"location":"modules/nuclei/#configuration-and-options","title":"Configuration and Options","text":"<p>The Nuclei module has many configuration options:</p> Config Option Type Description Default modules.nuclei.batch_size int Number of targets to send to Nuclei per batch (default 200) 200 modules.nuclei.budget int Used in budget mode to set the number of requests which will be allotted to the nuclei scan 1 modules.nuclei.concurrency int maximum number of templates to be executed in parallel (default 25) 25 modules.nuclei.directory_only bool Filter out 'file' URL event (default True) True modules.nuclei.etags str tags to exclude from the scan modules.nuclei.mode str manual | technology | severe | budget. Technology: Only activate based on technology events that match nuclei tags (nuclei -as mode). Manual (DEFAULT): Fully manual settings. Severe: Only critical and high severity templates without intrusive. Budget: Limit Nuclei to a specified number of HTTP requests manual modules.nuclei.ratelimit int maximum number of requests to send per second (default 150) 150 modules.nuclei.retries int number of times to retry a failed request (default 0) 0 modules.nuclei.severity str Filter based on severity field available in the template. modules.nuclei.silent bool Don't display nuclei's banner or status messages False modules.nuclei.tags str execute a subset of templates that contain the provided tags modules.nuclei.templates str template or template directory paths to include in the scan modules.nuclei.version str nuclei version 3.3.5 <p>Most of these you probably will NOT want to change. In particular, we advise against changing the version of Nuclei, as it's possible the latest version won't work right with BBOT.</p> <p>We also do not recommend changing directory_only mode. This will cause Nuclei to process every URL. Because BBOT is recursive, this can get very out-of-hand very quickly, depending on which other modules are in use.</p>"},{"location":"modules/nuclei/#modes","title":"Modes","text":"<p>The modes with the Nuclei module are generally in place to help you limit the number of templates you are scanning with, to make your scans quicker. </p>"},{"location":"modules/nuclei/#manual","title":"Manual","text":"<p>This is the default setting, and will use all templates. However, if you're looking to do something particular, you might pair this with some of the pass-through options shown in the next setting.</p>"},{"location":"modules/nuclei/#severe","title":"Severe","text":"<p>severe mode uses only high/critical severity templates. It also excludes the intrusive tag. This is intended to be a shortcut for times when you need to rapidly identify high severity vulnerabilities but can't afford the full scan. Because most templates are INFO, LOW, or MEDIUM, your scan will finish much faster.</p>"},{"location":"modules/nuclei/#technology","title":"Technology","text":"<p>This is equivalent to the Nuclei '-as' scan option. It only use templates that match detected technologies, using wappalyzer-based signatures. This can be a nice way to run a light-weight scan that still has a chance to find some good vulnerabilities.</p>"},{"location":"modules/nuclei/#budget","title":"Budget","text":"<p>Budget mode is unique to BBOT.</p> <p>For larger scans with thousands of targets, doing a FULL Nuclei scan (1000s of Requests) for each is not realistic.  As an alternative to the other modes, you can take advantage of Nuclei's \"collapsible\" template feature. </p> <p>For only the cost of one (or more) \"extra\" request(s) per host, it can activate several hundred modules. These are modules which happen to look at a BaseUrl, and typically look for a specific string or other attribute. Nuclei is smart about reusing the request data when it can, and we can use this to our advantage. </p> <p>The budget parameter is the # of extra requests per host you are willing to send to \"feed\" Nuclei templates (defaults to 1). For those times when vulnerability scanning isn't the main focus, but you want to look for easy wins.</p> <p>Of course, there is a rapidly diminishing return when you set he value to more than a handful. Eventually, this becomes 1 template per 1 budget value increase. However, in the 1-10 range there is a lot of value. This graphic should give you a rough visual idea of this concept.</p> <p></p>"},{"location":"modules/nuclei/#nuclei-pass-through-options","title":"Nuclei pass-through options","text":"<p>Most of the rest of the options are usually passed straight through to Nuclei when its executed. You can do things like set specific tags to include, (or exclude with etags), exactly how you'd do with Nuclei directly. You can also limit the templates with severity.</p> <p>The ratelimit and concurrency settings default to the same defaults that Nuclei does. These are relatively sane settings, but if you are in a sensitive environment it can certainly help to turn them down.</p> <p>templates will allow you to set your own templates directory. This can be very useful if you have your own custom templates that you want to use with BBOT.</p>"},{"location":"modules/nuclei/#example-commands","title":"Example Commands","text":"<pre><code># Scan a SINGLE target with a basic port scan and web modules\nbbot -f web-basic -m portscan nuclei --allow-deadly -t app.evilcorp.com\n</code></pre> <pre><code># Scanning MULTIPLE targets\nbbot -f web-basic -m portscan nuclei --allow-deadly -t app1.evilcorp.com app2.evilcorp.com app3.evilcorp.com\n</code></pre> <pre><code># Scanning MULTIPLE targets while performing subdomain enumeration\nbbot -f subdomain-enum web-basic -m portscan nuclei --allow-deadly -t app1.evilcorp.com app2.evilcorp.com app3.evilcorp.com\n</code></pre> <pre><code># Scanning MULTIPLE targets on a BUDGET\nbbot -f subdomain-enum web-basic -m portscan nuclei --allow-deadly -c modules.nuclei.mode=budget -t app1.evilcorp.com app2.evilcorp.com app3.evilcorp.com\n</code></pre>"},{"location":"scanning/","title":"Scanning Overview","text":""},{"location":"scanning/#scan-names","title":"Scan Names","text":"<p>Every BBOT scan gets a random, mildly-entertaining name like <code>demonic_jimmy</code>. Output for that scan, including scan stats and any web screenshots, are saved to a folder by that name in <code>~/.bbot/scans</code>. The most recent 20 scans are kept, and older ones are removed.</p> <p>If you don't want a random name, you can change it with <code>-n</code>. You can also change the location of BBOT's output with <code>-o</code>:</p> <pre><code># save everything to the folder \"my_scan\" in the current directory\nbbot -t evilcorp.com -f subdomain-enum -m gowitness -n my_scan -o .\n</code></pre> <p>If you reuse a scan name, BBOT will automatically append to your previous output files.</p>"},{"location":"scanning/#targets-t","title":"Targets (<code>-t</code>)","text":"<p>Targets declare what's in-scope, and seed a scan with initial data. BBOT accepts an unlimited number of targets. They can be any of the following:</p> <ul> <li><code>DNS_NAME</code> (<code>evilcorp.com</code>)</li> <li><code>IP_ADDRESS</code> (<code>1.2.3.4</code>)</li> <li><code>IP_RANGE</code> (<code>1.2.3.0/24</code>)</li> <li><code>OPEN_TCP_PORT</code> (<code>192.168.0.1:80</code>)</li> <li><code>URL</code> (<code>https://www.evilcorp.com</code>)</li> </ul> <p>Note that BBOT only discriminates down to the host level. This means, for example, if you specify a URL <code>https://www.evilcorp.com</code> as the target, the scan will be seeded with that URL, but the scope of the scan will be the entire host, <code>www.evilcorp.com</code>. Other ports/URLs on that same host may also be scanned.</p> <p>You can specify targets directly on the command line, load them from files, or both! For example:</p> <pre><code>$ cat targets.txt\n4.3.2.1\n10.0.0.2:80\n1.2.3.0/24\nevilcorp.com\nevilcorp.co.uk\nhttps://www.evilcorp.co.uk\n\n# load targets from a file and from the command-line\n$ bbot -t targets.txt fsociety.com 5.6.7.0/24 -m nmap\n</code></pre> <p>On start, BBOT automatically converts Targets into Events.</p>"},{"location":"scanning/#modules-m","title":"Modules (<code>-m</code>)","text":"<p>To see a full list of modules and their descriptions, use <code>bbot -l</code> or see List of Modules.</p> <p>Modules are the part of BBOT that does the work -- port scanning, subdomain brute-forcing, API querying, etc. Modules consume Events (<code>IP_ADDRESS</code>, <code>DNS_NAME</code>, etc.) from each other, process the data in a useful way, then emit the results as new events. You can enable individual modules with <code>-m</code>.</p> <pre><code># Enable modules: nmap, sslcert, and httpx\nbbot -t www.evilcorp.com -m nmap sslcert httpx\n</code></pre>"},{"location":"scanning/#types-of-modules","title":"Types of Modules","text":"<p>Modules fall into three categories:</p> <ul> <li>Scan Modules:<ul> <li>These make up the majority of modules. Examples are <code>nmap</code>, <code>sslcert</code>, <code>httpx</code>, etc. Enable with <code>-m</code>.</li> </ul> </li> <li>Output Modules:<ul> <li>These output scan data to different formats/destinations. <code>human</code>, <code>json</code>, and <code>csv</code> are enabled by default. Enable others with <code>-om</code>. (See: Output)</li> </ul> </li> <li>Internal Modules:<ul> <li>These modules perform essential, common-sense tasks. They are always enabled, unless explicitly disabled via the config (e.g. <code>-c speculate=false</code>).<ul> <li><code>aggregate</code>: Summarizes results at the end of a scan</li> <li><code>excavate</code>: Extracts useful data such as subdomains from webpages, etc.</li> <li><code>speculate</code>: Intelligently infers new events, e.g. <code>OPEN_TCP_PORT</code> from <code>URL</code> or <code>IP_ADDRESS</code> from <code>IP_NETWORK</code>.</li> </ul> </li> </ul> </li> </ul> <p>For details in the inner workings of modules, see Creating a Module.</p>"},{"location":"scanning/#flags-f","title":"Flags (<code>-f</code>)","text":"<p>Flags are how BBOT categorizes its modules. In a way, you can think of them as groups. Flags let you enable a bunch of similar modules at the same time without having to specify them each individually. For example, <code>-f subdomain-enum</code> would enable every module with the <code>subdomain-enum</code> flag.</p> <pre><code># list all subdomain-enum modules\nbbot -f subdomain-enum -l\n</code></pre>"},{"location":"scanning/#filtering-modules","title":"Filtering Modules","text":"<p>Modules can be easily enabled/disabled based on their flags:</p> <ul> <li><code>-f</code> Enable these flags (e.g. <code>-f subdomain-enum</code>)</li> <li><code>-rf</code> Require modules to have this flag (e.g. <code>-rf safe</code>)</li> <li><code>-ef</code> Exclude these flags (e.g. <code>-ef slow</code>)</li> <li><code>-em</code> Exclude these individual modules (e.g. <code>-em ipneighbor</code>)</li> <li><code>-lf</code> List all available flags</li> </ul> <p>Every module is either <code>safe</code> or <code>aggressive</code>, and either <code>active</code> or <code>passive</code>. These can be useful for filtering. For example, if you wanted to enable all the <code>safe</code> modules, but exclude active ones, you could do:</p> <pre><code># Enable safe modules but exclude active ones\nbbot -t evilcorp.com -f safe -ef active\n</code></pre> <p>This is equivalent to requiring the passive flag:</p> <pre><code># Enable safe modules but only if they're also passive\nbbot -t evilcorp.com -f safe -rf passive\n</code></pre> <p>A single module can have multiple flags. For example, the <code>securitytrails</code> module is <code>passive</code>, <code>safe</code>, <code>subdomain-enum</code>. Below is a full list of flags and their associated modules.</p>"},{"location":"scanning/#list-of-flags","title":"List of Flags","text":"Flag # Modules Description Modules safe 91 Non-intrusive, safe to run affiliates, aggregate, ajaxpro, anubisdb, apkpure, asn, azure_realm, azure_tenant, baddns, baddns_direct, baddns_zone, badsecrets, bevigil, binaryedge, bucket_amazon, bucket_azure, bucket_digitalocean, bucket_file_enum, bucket_firebase, bucket_google, bufferoverrun, builtwith, c99, censys, certspotter, chaos, code_repository, columbus, credshed, crt, dehashed, digitorus, dnsbimi, dnscaa, dnscommonsrv, dnsdumpster, docker_pull, dockerhub, emailformat, extractous, filedownload, fingerprintx, fullhunt, git, git_clone, github_codesearch, github_org, github_workflows, gitlab, google_playstore, gowitness, hackertarget, httpx, hunt, hunterio, iis_shortnames, internetdb, ip2location, ipstack, jadx, leakix, myssl, newsletters, ntlm, oauth, otx, passivetotal, pgp, portscan, postman, postman_download, rapiddns, robots, secretsdb, securitytrails, securitytxt, shodan_dns, sitedossier, skymem, social, sslcert, subdomaincenter, subdomainradar, trickest, trufflehog, urlscan, viewdns, virustotal, wappalyzer, wayback, zoomeye passive 66 Never connects to target systems affiliates, aggregate, anubisdb, apkpure, asn, azure_realm, azure_tenant, bevigil, binaryedge, bucket_file_enum, bufferoverrun, builtwith, c99, censys, certspotter, chaos, code_repository, columbus, credshed, crt, dehashed, digitorus, dnsbimi, dnscaa, dnsdumpster, docker_pull, dockerhub, emailformat, excavate, extractous, fullhunt, git_clone, github_codesearch, github_org, github_workflows, google_playstore, hackertarget, hunterio, internetdb, ip2location, ipneighbor, ipstack, jadx, leakix, myssl, otx, passivetotal, pgp, postman, postman_download, rapiddns, securitytrails, shodan_dns, sitedossier, skymem, social, speculate, subdomaincenter, subdomainradar, trickest, trufflehog, urlscan, viewdns, virustotal, wayback, zoomeye subdomain-enum 52 Enumerates subdomains anubisdb, asn, azure_realm, azure_tenant, baddns_direct, baddns_zone, bevigil, binaryedge, bufferoverrun, builtwith, c99, censys, certspotter, chaos, columbus, crt, digitorus, dnsbimi, dnsbrute, dnsbrute_mutations, dnscaa, dnscommonsrv, dnsdumpster, fullhunt, github_codesearch, github_org, hackertarget, httpx, hunterio, internetdb, ipneighbor, leakix, myssl, oauth, otx, passivetotal, postman, postman_download, rapiddns, securitytrails, securitytxt, shodan_dns, sitedossier, sslcert, subdomaincenter, subdomainradar, subdomains, trickest, urlscan, virustotal, wayback, zoomeye active 47 Makes active connections to target systems ajaxpro, baddns, baddns_direct, baddns_zone, badsecrets, bucket_amazon, bucket_azure, bucket_digitalocean, bucket_firebase, bucket_google, bypass403, dastardly, dnsbrute, dnsbrute_mutations, dnscommonsrv, dotnetnuke, ffuf, ffuf_shortnames, filedownload, fingerprintx, generic_ssrf, git, gitlab, gowitness, host_header, httpx, hunt, iis_shortnames, newsletters, ntlm, nuclei, oauth, paramminer_cookies, paramminer_getparams, paramminer_headers, portscan, robots, secretsdb, securitytxt, smuggler, sslcert, telerik, url_manipulation, vhost, wafw00f, wappalyzer, wpscan aggressive 20 Generates a large amount of network traffic bypass403, dastardly, dnsbrute, dnsbrute_mutations, dotnetnuke, ffuf, ffuf_shortnames, generic_ssrf, host_header, ipneighbor, nuclei, paramminer_cookies, paramminer_getparams, paramminer_headers, smuggler, telerik, url_manipulation, vhost, wafw00f, wpscan web-basic 18 Basic, non-intrusive web scan functionality azure_realm, baddns, badsecrets, bucket_amazon, bucket_azure, bucket_firebase, bucket_google, filedownload, git, httpx, iis_shortnames, ntlm, oauth, robots, secretsdb, securitytxt, sslcert, wappalyzer cloud-enum 15 Enumerates cloud resources azure_realm, azure_tenant, baddns, baddns_direct, baddns_zone, bucket_amazon, bucket_azure, bucket_digitalocean, bucket_file_enum, bucket_firebase, bucket_google, dnsbimi, httpx, oauth, securitytxt code-enum 14 Find public code repositories and search them for secrets etc. apkpure, code_repository, docker_pull, dockerhub, git, git_clone, github_codesearch, github_org, github_workflows, gitlab, google_playstore, postman, postman_download, trufflehog web-thorough 12 More advanced web scanning functionality ajaxpro, bucket_digitalocean, bypass403, dastardly, dotnetnuke, ffuf_shortnames, generic_ssrf, host_header, hunt, smuggler, telerik, url_manipulation slow 11 May take a long time to complete bucket_digitalocean, dastardly, dnsbrute_mutations, docker_pull, fingerprintx, git_clone, paramminer_cookies, paramminer_getparams, paramminer_headers, smuggler, vhost affiliates 9 Discovers affiliated hostnames/domains affiliates, azure_realm, azure_tenant, builtwith, oauth, sslcert, trickest, viewdns, zoomeye email-enum 8 Enumerates email addresses dehashed, dnscaa, emailformat, emails, hunterio, pgp, skymem, sslcert deadly 4 Highly aggressive dastardly, ffuf, nuclei, vhost baddns 3 Runs all modules from the DNS auditing tool BadDNS baddns, baddns_direct, baddns_zone web-paramminer 3 Discovers HTTP parameters through brute-force paramminer_cookies, paramminer_getparams, paramminer_headers iis-shortnames 2 Scans for IIS Shortname vulnerability ffuf_shortnames, iis_shortnames portscan 2 Discovers open ports internetdb, portscan report 2 Generates a report at the end of the scan affiliates, asn social-enum 2 Enumerates social media httpx, social service-enum 1 Identifies protocols running on open ports fingerprintx subdomain-hijack 1 Detects hijackable subdomains baddns web-screenshots 1 Takes screenshots of web pages gowitness"},{"location":"scanning/#dependencies","title":"Dependencies","text":"<p>BBOT modules have external dependencies ranging from OS packages (<code>openssl</code>) to binaries (<code>nmap</code>) to Python libraries (<code>wappalyzer</code>). When a module is enabled, installation of its dependencies happens at runtime with Ansible. BBOT provides several command-line flags to control how dependencies are installed.</p> <ul> <li><code>--no-deps</code> - Don't install module dependencies</li> <li><code>--force-deps</code> - Force install all module dependencies</li> <li><code>--retry-deps</code> - Try again to install failed module dependencies</li> <li><code>--ignore-failed-deps</code> - Run modules even if they have failed dependencies</li> <li><code>--install-all-deps</code> - Install dependencies for all modules (useful if you are provisioning a pentest system and want to install everything ahead of time)</li> </ul> <p>For details on how Ansible playbooks are attached to BBOT modules, see How to Write a Module.</p>"},{"location":"scanning/#scope","title":"Scope","text":"<p>For pentesters and bug bounty hunters, staying in scope is extremely important. BBOT takes this seriously, meaning that active modules (e.g. <code>nuclei</code>) will only touch in-scope resources.</p> <p>By default, scope is whatever you specify with <code>-t</code>. This includes child subdomains. For example, if you specify <code>-t evilcorp.com</code>, all its subdomains (<code>www.evilcorp.com</code>, <code>mail.evilcorp.com</code>, etc.) also become in-scope.</p>"},{"location":"scanning/#scope-distance","title":"Scope Distance","text":"<p>Since BBOT is recursive, it would quickly resort to scanning the entire internet without some kind of restraining mechanism. To solve this problem, every event discovered by BBOT is assigned a Scope Distance. Scope distance represents how far out from the main scope that data was discovered.</p> <p>For example, if your target is <code>evilcorp.com</code>, <code>www.evilcorp.com</code> would have a scope distance of <code>0</code> (i.e. in-scope). If BBOT discovers that <code>www.evilcorp.com</code> resolves to <code>1.2.3.4</code>, <code>1.2.3.4</code> is one hop away, which means it would have a scope distance of <code>1</code>. If <code>1.2.3.4</code> has a PTR record that points to <code>ecorp.blob.core.windows.net</code>, <code>ecorp.blob.core.windows.net</code> is two hops away, so its scope distance is <code>2</code>.</p> <p>Scope distance continues to increase the further out you get. Most modules (e.g. <code>nuclei</code> and <code>nmap</code>) only consume in-scope events. Certain other passive modules such as <code>asn</code> accept out to distance <code>1</code>. By default, DNS resolution happens out to a distance of <code>2</code>. Upon its discovery, any event that's determined to be in-scope (e.g. <code>www.evilcorp.com</code>) immediately becomes distance <code>0</code>, and the cycle starts over.</p>"},{"location":"scanning/#displaying-out-of-scope-events","title":"Displaying Out-of-scope Events","text":"<p>By default, BBOT only displays in-scope events (with a few exceptions such as <code>STORAGE_BUCKET</code>s). If you want to see more, you must increase the config value of <code>scope.report_distance</code>:</p> <pre><code># display out-of-scope events up to one hop away from the main scope\nbbot -t evilcorp.com -f subdomain-enum -c scope.report_distance=1\n</code></pre>"},{"location":"scanning/#strict-scope","title":"Strict Scope","text":"<p>If you want to scan only that specific target hostname and none of its children, you can specify <code>--strict-scope</code>.</p> <p>Note that <code>--strict-scope</code> only applies to targets and whitelists, but not blacklists. This means that if you put <code>internal.evilcorp.com</code> in your blacklist, you can be sure none of its subdomains will be scanned, even when using <code>--strict-scope</code>.</p>"},{"location":"scanning/#whitelists-and-blacklists","title":"Whitelists and Blacklists","text":"<p>BBOT allows precise control over scope with whitelists and blacklists. These both use the same syntax as <code>--target</code>, meaning they accept the same event types, and you can specify an unlimited number of them, via a file, the CLI, or both.</p>"},{"location":"scanning/#whitelists","title":"Whitelists","text":"<p><code>--whitelist</code> enables you to override what's in scope. For example, if you want to run nuclei against <code>evilcorp.com</code>, but stay only inside their corporate IP range of <code>1.2.3.0/24</code>, you can accomplish this like so:</p> <pre><code># Seed scan with evilcorp.com, but restrict scope to 1.2.3.0/24\nbbot -t evilcorp.com --whitelist 1.2.3.0/24 -f subdomain-enum -m nmap nuclei --allow-deadly\n</code></pre>"},{"location":"scanning/#blacklists","title":"Blacklists","text":"<p><code>--blacklist</code> takes ultimate precedence. Anything in the blacklist is completely excluded from the scan, even if it's in the whitelist.</p> <pre><code># Scan evilcorp.com, but exclude internal.evilcorp.com and its children\nbbot -t evilcorp.com --blacklist internal.evilcorp.com -f subdomain-enum -m nmap nuclei --allow-deadly\n</code></pre>"},{"location":"scanning/#blacklist-by-regex","title":"Blacklist by Regex","text":"<p>Blacklists also accept regex patterns. These regexes are are checked against the full URL, including the host and path.</p> <p>To specify a regex, prefix the pattern with <code>RE:</code>. For example, to exclude all events containing \"signout\", you could do:</p> <pre><code>bbot -t evilcorp.com --blacklist \"RE:signout\"\n</code></pre> <p>Note that this would blacklist both of the following events:</p> <ul> <li><code>[URL]       http://evilcorp.com/signout.aspx</code></li> <li><code>[DNS_NAME]  signout.evilcorp.com</code></li> </ul> <p>If you only want to blacklist the URL, you could narrow the regex like so:</p> <pre><code>bbot -t evilcorp.com --blacklist 'RE:signout\\.aspx$'\n</code></pre> <p>Similar to targets and whitelists, blacklists can be specified in your preset. The <code>spider</code> preset makes use of this to prevent the spider from following logout links:</p> spider.yml<pre><code>description: Recursive web spider\n\nmodules:\n  - httpx\n\nblacklist:\n  # Prevent spider from invalidating sessions by logging out\n  - \"RE:/.*(sign|log)[_-]?out\"\n\nconfig:\n  web:\n    # how many links to follow in a row\n    spider_distance: 2\n    # don't follow links whose directory depth is higher than 4\n    spider_depth: 4\n    # maximum number of links to follow per page\n    spider_links_per_page: 25\n</code></pre>"},{"location":"scanning/#dns-wildcards","title":"DNS Wildcards","text":"<p>BBOT has robust wildcard detection built-in. It can reliably detect wildcard domains, and will tag them accordingly:</p> <pre><code>[DNS_NAME]      github.io   TARGET  (a-record, a-wildcard-domain, aaaa-wildcard-domain, wildcard-domain)\n                                               ^^^^^^^^^^^^^^^^^  ^^^^^^^^^^^^^^^^^^^^  ^^^^^^^^^^^^^^^\n</code></pre> <p>Wildcard hosts are collapsed into a single host beginning with <code>_wildcard</code>:</p> <pre><code>[DNS_NAME]      _wildcard.github.io     TARGET  (a-record, a-wildcard, a-wildcard-domain, aaaa-record, aaaa-wildcard, aaaa-wildcard-domain, wildcard, wildcard-domain)\n                ^^^^^^^^^\n</code></pre> <p>If you don't want this, you can disable wildcard detection on a domain-to-domain basis in the config:</p> ~/.bbot/config/bbot.yml<pre><code>dns:\n  wildcard_ignore:\n    - evilcorp.com\n    - evilcorp.co.uk\n</code></pre> <p>There are certain edge cases (such as with dynamic DNS rules) where BBOT's wildcard detection fails. In these cases, you can try increasing the number of wildcard checks in the config:</p> ~/.bbot/config/bbot.yml<pre><code># default == 10\ndns:\n  wildcard_tests: 20\n</code></pre> <p>If that doesn't work you can consider blacklisting the offending domain.</p>"},{"location":"scanning/advanced/","title":"Advanced","text":"<p>Below you can find some advanced uses of BBOT.</p>"},{"location":"scanning/advanced/#bbot-as-a-python-library","title":"BBOT as a Python library","text":""},{"location":"scanning/advanced/#synchronous","title":"Synchronous","text":"<pre><code>from bbot.scanner import Scanner\n\nif __name__ == \"__main__\":\n    scan = Scanner(\"evilcorp.com\", presets=[\"subdomain-enum\"])\n    for event in scan.start():\n        print(event)\n</code></pre>"},{"location":"scanning/advanced/#asynchronous","title":"Asynchronous","text":"<pre><code>from bbot.scanner import Scanner\n\nasync def main():\n    scan = Scanner(\"evilcorp.com\", presets=[\"subdomain-enum\"])\n    async for event in scan.async_start():\n        print(event.json())\n\nif __name__ == \"__main__\":\n    import asyncio\n    asyncio.run(main())\n</code></pre>"},{"location":"scanning/advanced/#command-line-help","title":"Command-Line Help","text":"<pre><code>usage: bbot [-h] [-t TARGET [TARGET ...]] [-w WHITELIST [WHITELIST ...]]\n               [-b BLACKLIST [BLACKLIST ...]] [--strict-scope]\n               [-p [PRESET ...]] [-c [CONFIG ...]] [-lp]\n               [-m MODULE [MODULE ...]] [-l] [-lmo] [-em MODULE [MODULE ...]]\n               [-f FLAG [FLAG ...]] [-lf] [-rf FLAG [FLAG ...]]\n               [-ef FLAG [FLAG ...]] [--allow-deadly] [-n SCAN_NAME] [-v] [-d]\n               [-s] [--force] [-y] [--fast-mode] [--dry-run]\n               [--current-preset] [--current-preset-full] [-o DIR]\n               [-om MODULE [MODULE ...]] [--json] [--brief]\n               [--event-types EVENT_TYPES [EVENT_TYPES ...]]\n               [--no-deps | --force-deps | --retry-deps | --ignore-failed-deps | --install-all-deps]\n               [--version] [--proxy HTTP_PROXY]\n               [-H CUSTOM_HEADERS [CUSTOM_HEADERS ...]]\n               [--custom-yara-rules CUSTOM_YARA_RULES]\n\nBighuge BLS OSINT Tool\n\noptions:\n  -h, --help            show this help message and exit\n\nTarget:\n  -t TARGET [TARGET ...], --targets TARGET [TARGET ...]\n                        Targets to seed the scan\n  -w WHITELIST [WHITELIST ...], --whitelist WHITELIST [WHITELIST ...]\n                        What's considered in-scope (by default it's the same as --targets)\n  -b BLACKLIST [BLACKLIST ...], --blacklist BLACKLIST [BLACKLIST ...]\n                        Don't touch these things\n  --strict-scope        Don't consider subdomains of target/whitelist to be in-scope\n\nPresets:\n  -p [PRESET ...], --preset [PRESET ...]\n                        Enable BBOT preset(s)\n  -c [CONFIG ...], --config [CONFIG ...]\n                        Custom config options in key=value format: e.g. 'modules.shodan.api_key=1234'\n  -lp, --list-presets   List available presets.\n\nModules:\n  -m MODULE [MODULE ...], --modules MODULE [MODULE ...]\n                        Modules to enable. Choices: affiliates,ajaxpro,anubisdb,apkpure,asn,azure_realm,azure_tenant,baddns,baddns_direct,baddns_zone,badsecrets,bevigil,binaryedge,bucket_amazon,bucket_azure,bucket_digitalocean,bucket_file_enum,bucket_firebase,bucket_google,bufferoverrun,builtwith,bypass403,c99,censys,certspotter,chaos,code_repository,columbus,credshed,crt,dastardly,dehashed,digitorus,dnsbimi,dnsbrute,dnsbrute_mutations,dnscaa,dnscommonsrv,dnsdumpster,docker_pull,dockerhub,dotnetnuke,emailformat,extractous,ffuf,ffuf_shortnames,filedownload,fingerprintx,fullhunt,generic_ssrf,git,git_clone,github_codesearch,github_org,github_workflows,gitlab,google_playstore,gowitness,hackertarget,host_header,httpx,hunt,hunterio,iis_shortnames,internetdb,ip2location,ipneighbor,ipstack,jadx,leakix,myssl,newsletters,ntlm,nuclei,oauth,otx,paramminer_cookies,paramminer_getparams,paramminer_headers,passivetotal,pgp,portscan,postman,postman_download,rapiddns,robots,secretsdb,securitytrails,securitytxt,shodan_dns,sitedossier,skymem,smuggler,social,sslcert,subdomaincenter,subdomainradar,telerik,trickest,trufflehog,url_manipulation,urlscan,vhost,viewdns,virustotal,wafw00f,wappalyzer,wayback,wpscan,zoomeye\n  -l, --list-modules    List available modules.\n  -lmo, --list-module-options\n                        Show all module config options\n  -em MODULE [MODULE ...], --exclude-modules MODULE [MODULE ...]\n                        Exclude these modules.\n  -f FLAG [FLAG ...], --flags FLAG [FLAG ...]\n                        Enable modules by flag. Choices: active,affiliates,aggressive,baddns,cloud-enum,code-enum,deadly,email-enum,iis-shortnames,passive,portscan,report,safe,service-enum,slow,social-enum,subdomain-enum,subdomain-hijack,web-basic,web-paramminer,web-screenshots,web-thorough\n  -lf, --list-flags     List available flags.\n  -rf FLAG [FLAG ...], --require-flags FLAG [FLAG ...]\n                        Only enable modules with these flags (e.g. -rf passive)\n  -ef FLAG [FLAG ...], --exclude-flags FLAG [FLAG ...]\n                        Disable modules with these flags. (e.g. -ef aggressive)\n  --allow-deadly        Enable the use of highly aggressive modules\n\nScan:\n  -n SCAN_NAME, --name SCAN_NAME\n                        Name of scan (default: random)\n  -v, --verbose         Be more verbose\n  -d, --debug           Enable debugging\n  -s, --silent          Be quiet\n  --force               Run scan even in the case of condition violations or failed module setups\n  -y, --yes             Skip scan confirmation prompt\n  --fast-mode           Scan only the provided targets as fast as possible, with no extra discovery\n  --dry-run             Abort before executing scan\n  --current-preset      Show the current preset in YAML format\n  --current-preset-full\n                        Show the current preset in its full form, including defaults\n\nOutput:\n  -o DIR, --output-dir DIR\n                        Directory to output scan results\n  -om MODULE [MODULE ...], --output-modules MODULE [MODULE ...]\n                        Output module(s). Choices: asset_inventory,csv,discord,emails,http,json,neo4j,postgres,python,slack,splunk,sqlite,stdout,subdomains,teams,txt,web_report,websocket\n  --json, -j            Output scan data in JSON format\n  --brief, -br          Output only the data itself\n  --event-types EVENT_TYPES [EVENT_TYPES ...]\n                        Choose which event types to display\n\nModule dependencies:\n  Control how modules install their dependencies\n\n  --no-deps             Don't install module dependencies\n  --force-deps          Force install all module dependencies\n  --retry-deps          Try again to install failed module dependencies\n  --ignore-failed-deps  Run modules even if they have failed dependencies\n  --install-all-deps    Install dependencies for all modules\n\nMisc:\n  --version             show BBOT version and exit\n  --proxy HTTP_PROXY    Use this proxy for all HTTP requests\n  -H CUSTOM_HEADERS [CUSTOM_HEADERS ...], --custom-headers CUSTOM_HEADERS [CUSTOM_HEADERS ...]\n                        List of custom headers as key value pairs (header=value).\n  --custom-yara-rules CUSTOM_YARA_RULES, -cy CUSTOM_YARA_RULES\n                        Add custom yara rules to excavate\n\nEXAMPLES\n\n    Subdomains:\n        bbot -t evilcorp.com -p subdomain-enum\n\n    Subdomains (passive only):\n        bbot -t evilcorp.com -p subdomain-enum -rf passive\n\n    Subdomains + port scan + web screenshots:\n        bbot -t evilcorp.com -p subdomain-enum -m portscan gowitness -n my_scan -o .\n\n    Subdomains + basic web scan:\n        bbot -t evilcorp.com -p subdomain-enum web-basic\n\n    Web spider:\n        bbot -t www.evilcorp.com -p spider -c web.spider_distance=2 web.spider_depth=2\n\n    Everything everywhere all at once:\n        bbot -t evilcorp.com -p kitchen-sink\n\n    List modules:\n        bbot -l\n\n    List presets:\n        bbot -lp\n\n    List flags:\n        bbot -lf\n</code></pre>"},{"location":"scanning/configuration/","title":"Configuration Overview","text":"<p>Normally, Presets are used to configure a scan. However, there may be cases where you want to change BBOT's global defaults so a certain option is always set, even if it's not specified in a preset.</p> <p>BBOT has a YAML config at <code>~/.config/bbot.yml</code>. This is the first config that BBOT loads, so it's a good place to put default settings like <code>http_proxy</code>, <code>max_threads</code>, or <code>http_user_agent</code>. You can also put any module settings here, including API keys.</p> <p>For a list of all possible config options, see:</p> <ul> <li>Global Options</li> <li>Module Options</li> </ul> <p>For examples of common config changes, see Tips and Tricks.</p>"},{"location":"scanning/configuration/#configuration-files","title":"Configuration Files","text":"<p>BBOT loads its config from the following files, in this order (last one loaded == highest priority):</p> <ul> <li><code>~/.config/bbot/bbot.yml</code>  &lt;-- Global BBOT config</li> <li>presets (<code>-p</code>)             &lt;-- Presets are good for scan-specific settings</li> <li>command line (<code>-c</code>)        &lt;-- CLI overrides everything</li> </ul> <p><code>bbot.yml</code> will be automatically created for you when you first run BBOT.</p>"},{"location":"scanning/configuration/#yaml-config-vs-command-line","title":"YAML Config vs Command Line","text":"<p>You can specify config options either via the command line or the config. For example, if you want to proxy your BBOT scan through a local proxy like Burp Suite, you could either do:</p> <pre><code># send BBOT traffic through an HTTP proxy\nbbot -t evilcorp.com -c http_proxy=http://127.0.0.1:8080\n</code></pre> <p>Or, in <code>~/.config/bbot/config.yml</code>:</p> ~/.bbot/config/bbot.yml<pre><code>http_proxy: http://127.0.0.1:8080\n</code></pre> <p>These two are equivalent.</p> <p>Config options specified via the command-line take precedence over all others. You can give BBOT a custom config file with <code>-c myconf.yml</code>, or individual arguments like this: <code>-c modules.shodan_dns.api_key=deadbeef</code>. To display the full and current BBOT config, including any command-line arguments, use <code>bbot -c</code>.</p> <p>Note that placing the following in <code>bbot.yml</code>: ~/.bbot/config/bbot.yml<pre><code>modules:\n  shodan_dns:\n    api_key: deadbeef\n</code></pre> Is the same as: <pre><code>bbot -c modules.shodan_dns.api_key=deadbeef\n</code></pre></p>"},{"location":"scanning/configuration/#global-config-options","title":"Global Config Options","text":"<p>Below is a full list of the config options supported, along with their defaults.</p> defaults.yml<pre><code>### BASIC OPTIONS ###\n\n# BBOT working directory\nhome: ~/.bbot\n# How many scan results to keep before cleaning up the older ones\nkeep_scans: 20\n# Interval for displaying status messages\nstatus_frequency: 15\n# Include the raw data of files (i.e. PDFs, web screenshots) as base64 in the event\nfile_blobs: false\n# Include the raw data of directories (i.e. git repos) as tar.gz base64 in the event\nfolder_blobs: false\n\n### SCOPE ###\n\nscope:\n  # strict scope means only exact DNS names are considered in-scope\n  # subdomains are not included unless they are explicitly provided in the target list\n  strict: false\n  # Filter by scope distance which events are displayed in the output\n  # 0 == show only in-scope events (affiliates are always shown)\n  # 1 == show all events up to distance-1 (1 hop from target)\n  report_distance: 0\n  # How far out from the main scope to search\n  # Do not change this setting unless you know what you're doing\n  search_distance: 0\n\n### DNS ###\n\ndns:\n  # Completely disable DNS resolution (careful if you have IP whitelists/blacklists, consider using minimal=true instead)\n  disable: false\n  # Speed up scan by not creating any new DNS events, and only resolving A and AAAA records\n  minimal: false\n  # How many instances of the dns module to run concurrently\n  threads: 25\n  # How many concurrent DNS resolvers to use when brute-forcing\n  # (under the hood this is passed through directly to massdns -s)\n  brute_threads: 1000\n  # nameservers to use for DNS brute-forcing\n  # default is updated weekly and contains ~10K high-quality public servers\n  brute_nameservers: https://raw.githubusercontent.com/blacklanternsecurity/public-dns-servers/master/nameservers.txt\n  # How far away from the main target to explore via DNS resolution (independent of scope.search_distance)\n  # This is safe to change\n  search_distance: 1\n  # Limit how many DNS records can be followed in a row (stop malicious/runaway DNS records)\n  runaway_limit: 5\n  # DNS query timeout\n  timeout: 5\n  # How many times to retry DNS queries\n  retries: 1\n  # Completely disable BBOT's DNS wildcard detection\n  wildcard_disable: False\n  # Disable BBOT's DNS wildcard detection for select domains\n  wildcard_ignore: []\n  # How many sanity checks to make when verifying wildcard DNS\n  # Increase this value if BBOT's wildcard detection isn't working\n  wildcard_tests: 10\n  # Skip DNS requests for a certain domain and rdtype after encountering this many timeouts or SERVFAILs\n  # This helps prevent faulty DNS servers from hanging up the scan\n  abort_threshold: 50\n  # Don't show PTR records containing IP addresses\n  filter_ptrs: true\n  # Enable/disable debug messages for DNS queries\n  debug: false\n  # For performance reasons, always skip these DNS queries\n  # Microsoft's DNS infrastructure is misconfigured so that certain queries to mail.protection.outlook.com always time out\n  omit_queries:\n    - SRV:mail.protection.outlook.com\n    - CNAME:mail.protection.outlook.com\n    - TXT:mail.protection.outlook.com\n\n### WEB ###\n\nweb:\n  # HTTP proxy\n  http_proxy: \n  # Web user-agent\n  user_agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.2151.97\n  # Set the maximum number of HTTP links that can be followed in a row (0 == no spidering allowed)\n  spider_distance: 0\n  # Set the maximum directory depth for the web spider\n  spider_depth: 1\n  # Set the maximum number of links that can be followed per page\n  spider_links_per_page: 25\n  # HTTP timeout (for Python requests; API calls, etc.)\n  http_timeout: 10\n  # HTTP timeout (for httpx)\n  httpx_timeout: 5\n  # Custom HTTP headers (e.g. cookies, etc.)\n  # in the format { \"Header-Key\": \"header_value\" }\n  # These are attached to all in-scope HTTP requests\n  # Note that some modules (e.g. github) may end up sending these to out-of-scope resources\n  http_headers: {}\n  # HTTP retries (for Python requests; API calls, etc.)\n  http_retries: 1\n  # HTTP retries (for httpx)\n  httpx_retries: 1\n  # Enable/disable debug messages for web requests/responses\n  debug: false\n  # Maximum number of HTTP redirects to follow\n  http_max_redirects: 5\n  # Whether to verify SSL certificates\n  ssl_verify: false\n\n### ENGINE ###\n\nengine:\n  debug: false\n\n# Tool dependencies\ndeps:\n  ffuf:\n    version: \"2.1.0\"\n\n### ADVANCED OPTIONS ###\n\n# Load BBOT modules from these custom paths\nmodule_dirs: []\n\n# Infer certain events from others, e.g. IPs from IP ranges, DNS_NAMEs from URLs, etc.\nspeculate: True\n# Passively search event data for URLs, hostnames, emails, etc.\nexcavate: True\n# Summarize activity at the end of a scan\naggregate: True\n# DNS resolution, wildcard detection, etc.\ndnsresolve: True\n# Cloud provider tagging\ncloudcheck: True\n\n# How to handle installation of module dependencies\n# Choices are:\n#  - abort_on_failure (default) - if a module dependency fails to install, abort the scan\n#  - retry_failed - try again to install failed dependencies\n#  - ignore_failed - run the scan regardless of what happens with dependency installation\n#  - disable - completely disable BBOT's dependency system (you are responsible for installing tools, pip packages, etc.)\ndeps_behavior: abort_on_failure\n\n# Strip querystring from URLs by default\nurl_querystring_remove: True\n# When query string is retained, by default collapse parameter values down to a single value per parameter\nurl_querystring_collapse: True\n\n# Completely ignore URLs with these extensions\nurl_extension_blacklist:\n  # images\n  - png\n  - jpg\n  - bmp\n  - ico\n  - jpeg\n  - gif\n  - svg\n  - webp\n  # web/fonts\n  - css\n  - woff\n  - woff2\n  - ttf\n  - eot\n  - sass\n  - scss\n  # audio\n  - mp3\n  - m4a\n  - wav\n  - flac\n  # video\n  - mp4\n  - mkv\n  - avi\n  - wmv\n  - mov\n  - flv\n  - webm\n# Distribute URLs with these extensions only to httpx (these are omitted from output)\nurl_extension_httpx_only:\n  - js\n# Don't output these types of events (they are still distributed to modules)\nomit_event_types:\n  - HTTP_RESPONSE\n  - RAW_TEXT\n  - URL_UNVERIFIED\n  - DNS_NAME_UNRESOLVED\n  - FILESYSTEM\n  - WEB_PARAMETER\n  - RAW_DNS_RECORD\n  # - IP_ADDRESS\n\n# Custom interactsh server settings\ninteractsh_server: null\ninteractsh_token: null\ninteractsh_disable: false\n</code></pre>"},{"location":"scanning/configuration/#module-config-options","title":"Module Config Options","text":"<p>Many modules accept their own configuration options. These options have the ability to change their behavior. For example, the <code>portscan</code> module accepts options for <code>ports</code>, <code>rate</code>, etc. Below is a list of all possible module config options.</p> Config Option Type Description Default modules.baddns.custom_nameservers list Force BadDNS to use a list of custom nameservers [] modules.baddns.enabled_submodules list A list of submodules to enable. Empty list (default) enables CNAME, TXT and MX Only [] modules.baddns.only_high_confidence bool Do not emit low-confidence or generic detections False modules.baddns_direct.custom_nameservers list Force BadDNS to use a list of custom nameservers [] modules.baddns_zone.custom_nameservers list Force BadDNS to use a list of custom nameservers [] modules.baddns_zone.only_high_confidence bool Do not emit low-confidence or generic detections False modules.badsecrets.custom_secrets NoneType Include custom secrets loaded from a local file None modules.bucket_amazon.permutations bool Whether to try permutations False modules.bucket_azure.permutations bool Whether to try permutations False modules.bucket_digitalocean.permutations bool Whether to try permutations False modules.bucket_firebase.permutations bool Whether to try permutations False modules.bucket_google.permutations bool Whether to try permutations False modules.dnsbrute.max_depth int How many subdomains deep to brute force, i.e. 5.4.3.2.1.evilcorp.com 5 modules.dnsbrute.wordlist str Subdomain wordlist URL https://raw.githubusercontent.com/danielmiessler/SecLists/master/Discovery/DNS/subdomains-top1million-5000.txt modules.dnsbrute_mutations.max_mutations int Maximum number of target-specific mutations to try per subdomain 100 modules.dnscommonsrv.max_depth int The maximum subdomain depth to brute-force SRV records 2 modules.ffuf.extensions str Optionally include a list of extensions to extend the keyword with (comma separated) modules.ffuf.lines int take only the first N lines from the wordlist when finding directories 5000 modules.ffuf.max_depth int the maximum directory depth to attempt to solve 0 modules.ffuf.wordlist str Specify wordlist to use when finding directories https://raw.githubusercontent.com/danielmiessler/SecLists/master/Discovery/Web-Content/raft-small-directories.txt modules.ffuf_shortnames.extensions str Optionally include a list of extensions to extend the keyword with (comma separated) modules.ffuf_shortnames.find_common_prefixes bool Attempt to automatically detect common prefixes and make additional ffuf runs against them False modules.ffuf_shortnames.find_delimiters bool Attempt to detect common delimiters and make additional ffuf runs against them True modules.ffuf_shortnames.ignore_redirects bool Explicitly ignore redirects (301,302) True modules.ffuf_shortnames.lines int take only the first N lines from the wordlist when finding directories 1000000 modules.ffuf_shortnames.max_depth int the maximum directory depth to attempt to solve 1 modules.ffuf_shortnames.version str ffuf version 2.0.0 modules.ffuf_shortnames.wordlist str Specify wordlist to use when finding directories modules.ffuf_shortnames.wordlist_extensions str Specify wordlist to use when making extension lists modules.filedownload.base_64_encoded_file str Stream the bytes of a file and encode them in base 64 for event data. false modules.filedownload.extensions list File extensions to download ['bak', 'bash', 'bashrc', 'cfg', 'conf', 'crt', 'csv', 'db', 'dll', 'doc', 'docx', 'exe', 'ica', 'indd', 'ini', 'jar', 'key', 'log', 'markdown', 'md', 'msi', 'odg', 'odp', 'ods', 'odt', 'pdf', 'pem', 'pps', 'ppsx', 'ppt', 'pptx', 'ps1', 'pub', 'raw', 'rdp', 'sh', 'sql', 'sqlite', 'swp', 'sxw', 'tar.gz', 'tar', 'txt', 'vbs', 'war', 'wpd', 'xls', 'xlsx', 'xml', 'yaml', 'yml', 'zip'] modules.filedownload.max_filesize str Cancel download if filesize is greater than this size 10MB modules.fingerprintx.skip_common_web bool Skip common web ports such as 80, 443, 8080, 8443, etc. True modules.fingerprintx.version str fingerprintx version 1.1.4 modules.gitlab.api_key str Gitlab access token modules.gowitness.idle_timeout int Skip the current gowitness batch if it stalls for longer than this many seconds 1800 modules.gowitness.output_path str Where to save screenshots modules.gowitness.resolution_x int Screenshot resolution x 1440 modules.gowitness.resolution_y int Screenshot resolution y 900 modules.gowitness.social bool Whether to screenshot social media webpages False modules.gowitness.threads int How many gowitness threads to spawn (default is number of CPUs x 2) 0 modules.gowitness.timeout int Preflight check timeout 10 modules.gowitness.version str Gowitness version 2.4.2 modules.httpx.in_scope_only bool Only visit web reparents that are in scope. True modules.httpx.max_response_size int Max response size in bytes 5242880 modules.httpx.probe_all_ips bool Probe all the ips associated with same host False modules.httpx.store_responses bool Save raw HTTP responses to scan folder False modules.httpx.threads int Number of httpx threads to use 50 modules.httpx.version str httpx version 1.2.5 modules.iis_shortnames.detect_only bool Only detect the vulnerability and do not run the shortname scanner True modules.iis_shortnames.max_node_count int Limit how many nodes to attempt to resolve on any given recursion branch 50 modules.ntlm.try_all bool Try every NTLM endpoint False modules.nuclei.batch_size int Number of targets to send to Nuclei per batch (default 200) 200 modules.nuclei.budget int Used in budget mode to set the number of requests which will be allotted to the nuclei scan 1 modules.nuclei.concurrency int maximum number of templates to be executed in parallel (default 25) 25 modules.nuclei.directory_only bool Filter out 'file' URL event (default True) True modules.nuclei.etags str tags to exclude from the scan modules.nuclei.mode str manual | technology | severe | budget. Technology: Only activate based on technology events that match nuclei tags (nuclei -as mode). Manual (DEFAULT): Fully manual settings. Severe: Only critical and high severity templates without intrusive. Budget: Limit Nuclei to a specified number of HTTP requests manual modules.nuclei.ratelimit int maximum number of requests to send per second (default 150) 150 modules.nuclei.retries int number of times to retry a failed request (default 0) 0 modules.nuclei.severity str Filter based on severity field available in the template. modules.nuclei.silent bool Don't display nuclei's banner or status messages False modules.nuclei.tags str execute a subset of templates that contain the provided tags modules.nuclei.templates str template or template directory paths to include in the scan modules.nuclei.version str nuclei version 3.3.5 modules.oauth.try_all bool Check for OAUTH/IODC on every subdomain and URL. False modules.paramminer_cookies.recycle_words bool Attempt to use words found during the scan on all other endpoints False modules.paramminer_cookies.skip_boring_words bool Remove commonly uninteresting words from the wordlist True modules.paramminer_cookies.wordlist str Define the wordlist to be used to derive cookies modules.paramminer_getparams.recycle_words bool Attempt to use words found during the scan on all other endpoints False modules.paramminer_getparams.skip_boring_words bool Remove commonly uninteresting words from the wordlist True modules.paramminer_getparams.wordlist str Define the wordlist to be used to derive headers modules.paramminer_headers.recycle_words bool Attempt to use words found during the scan on all other endpoints False modules.paramminer_headers.skip_boring_words bool Remove commonly uninteresting words from the wordlist True modules.paramminer_headers.wordlist str Define the wordlist to be used to derive headers modules.portscan.adapter str Manually specify a network interface, such as \"eth0\" or \"tun0\". If not specified, the first network interface found with a default gateway will be used. modules.portscan.adapter_ip str Send packets using this IP address. Not needed unless masscan's autodetection fails modules.portscan.adapter_mac str Send packets using this as the source MAC address. Not needed unless masscan's autodetection fails modules.portscan.ping_first bool Only portscan hosts that reply to pings False modules.portscan.ping_only bool Ping sweep only, no portscan False modules.portscan.ports str Ports to scan modules.portscan.rate int Rate in packets per second 300 modules.portscan.router_mac str Send packets to this MAC address as the destination. Not needed unless masscan's autodetection fails modules.portscan.top_ports int Top ports to scan (default 100) (to override, specify 'ports') 100 modules.portscan.wait int Seconds to wait for replies after scan is complete 5 modules.robots.include_allow bool Include 'Allow' Entries True modules.robots.include_disallow bool Include 'Disallow' Entries True modules.robots.include_sitemap bool Include 'sitemap' entries False modules.secretsdb.min_confidence int Only use signatures with this confidence score or higher 99 modules.secretsdb.signatures str File path or URL to YAML signatures https://raw.githubusercontent.com/blacklanternsecurity/secrets-patterns-db/master/db/rules-stable.yml modules.securitytxt.emails bool emit EMAIL_ADDRESS events True modules.securitytxt.urls bool emit URL_UNVERIFIED events True modules.sslcert.skip_non_ssl bool Don't try common non-SSL ports True modules.sslcert.timeout float Socket connect timeout in seconds 5.0 modules.telerik.exploit_RAU_crypto bool Attempt to confirm any RAU AXD detections are vulnerable False modules.url_manipulation.allow_redirects bool Allowing redirects will sometimes create false positives. Disallowing will sometimes create false negatives. Allowed by default. True modules.vhost.force_basehost str Use a custom base host (e.g. evilcorp.com) instead of the default behavior of using the current URL modules.vhost.lines int take only the first N lines from the wordlist when finding directories 5000 modules.vhost.wordlist str Wordlist containing subdomains https://raw.githubusercontent.com/danielmiessler/SecLists/master/Discovery/DNS/subdomains-top1million-5000.txt modules.wafw00f.generic_detect bool When no specific WAF detections are made, try to perform a generic detect True modules.wpscan.api_key str WPScan API Key modules.wpscan.connection_timeout int The connection timeout in seconds (default 2) 2 modules.wpscan.disable_tls_checks bool Disables the SSL/TLS certificate verification (Default True) True modules.wpscan.enumerate str Enumeration Process see wpscan help documentation (default: vp,vt,cb,dbe) vp,vt,cb,dbe modules.wpscan.force bool Do not check if the target is running WordPress or returns a 403 False modules.wpscan.request_timeout int The request timeout in seconds (default 5) 5 modules.wpscan.threads int How many wpscan threads to spawn (default is 5) 5 modules.anubisdb.limit int Limit the number of subdomains returned per query (increasing this may slow the scan due to garbage results from this API) 1000 modules.apkpure.output_folder str Folder to download apk's to modules.bevigil.api_key str BeVigil OSINT API Key modules.bevigil.urls bool Emit URLs in addition to DNS_NAMEs False modules.binaryedge.api_key str BinaryEdge API key modules.binaryedge.max_records int Limit results to help prevent exceeding API quota 1000 modules.bucket_file_enum.file_limit int Limit the number of files downloaded per bucket 50 modules.bufferoverrun.api_key str BufferOverrun API key modules.bufferoverrun.commercial bool Use commercial API False modules.builtwith.api_key str Builtwith API key modules.builtwith.redirects bool Also look up inbound and outbound redirects True modules.c99.api_key str c99.nl API key modules.censys.api_key str Censys.io API Key in the format of 'key:secret' modules.censys.max_pages int Maximum number of pages to fetch (100 results per page) 5 modules.chaos.api_key str Chaos API key modules.credshed.credshed_url str URL of credshed server modules.credshed.password str Credshed password modules.credshed.username str Credshed username modules.dehashed.api_key str DeHashed API Key modules.dehashed.username str Email Address associated with your API key modules.dnsbimi.emit_raw_dns_records bool Emit RAW_DNS_RECORD events False modules.dnsbimi.emit_urls bool Emit URL_UNVERIFIED events True modules.dnsbimi.selectors str CSV list of BIMI selectors to check default,email,mail,bimi modules.dnscaa.dns_names bool emit DNS_NAME events True modules.dnscaa.emails bool emit EMAIL_ADDRESS events True modules.dnscaa.in_scope_only bool Only check in-scope domains True modules.dnscaa.urls bool emit URL_UNVERIFIED events True modules.docker_pull.all_tags bool Download all tags from each registry (Default False) False modules.docker_pull.output_folder str Folder to download docker repositories to modules.extractous.extensions list File extensions to parse ['bak', 'bash', 'bashrc', 'conf', 'cfg', 'crt', 'csv', 'db', 'sqlite', 'doc', 'docx', 'ica', 'indd', 'ini', 'key', 'pub', 'log', 'markdown', 'md', 'odg', 'odp', 'ods', 'odt', 'pdf', 'pem', 'pps', 'ppsx', 'ppt', 'pptx', 'ps1', 'rdp', 'sh', 'sql', 'swp', 'sxw', 'txt', 'vbs', 'wpd', 'xls', 'xlsx', 'xml', 'yml', 'yaml'] modules.fullhunt.api_key str FullHunt API Key modules.git_clone.api_key str Github token modules.git_clone.output_folder str Folder to clone repositories to modules.github_codesearch.api_key str Github token modules.github_codesearch.limit int Limit code search to this many results 100 modules.github_org.api_key str Github token modules.github_org.include_member_repos bool Also enumerate organization members' repositories False modules.github_org.include_members bool Enumerate organization members True modules.github_workflows.api_key str Github token modules.github_workflows.num_logs int For each workflow fetch the last N successful runs logs (max 100) 1 modules.hunterio.api_key str Hunter.IO API key modules.internetdb.show_open_ports bool Display OPEN_TCP_PORT events in output, even if they didn't lead to an interesting discovery False modules.ip2location.api_key str IP2location.io API Key modules.ip2location.lang str Translation information(ISO639-1). The translation is only applicable for continent, country, region and city name. modules.ipneighbor.num_bits int Netmask size (in CIDR notation) to check. Default is 4 bits (16 hosts) 4 modules.ipstack.api_key str IPStack GeoIP API Key modules.jadx.threads int Maximum jadx threads for extracting apk's, default: 4 4 modules.leakix.api_key str LeakIX API Key modules.passivetotal.api_key str PassiveTotal API Key in the format of 'username:api_key' modules.pgp.search_urls list PGP key servers to search <code>['https://keyserver.ubuntu.com/pks/lookup?fingerprint=on&amp;op=vindex&amp;search=&lt;query&gt;', 'http://the.earth.li:11371/pks/lookup?fingerprint=on&amp;op=vindex&amp;search=&lt;query&gt;', 'https://pgpkeys.eu/pks/lookup?search=&lt;query&gt;&amp;op=index', 'https://pgp.mit.edu/pks/lookup?search=&lt;query&gt;&amp;op=index']</code> modules.postman_download.api_key str Postman API Key modules.postman_download.output_folder str Folder to download postman workspaces to modules.securitytrails.api_key str SecurityTrails API key modules.shodan_dns.api_key str Shodan API key modules.subdomainradar.api_key str SubDomainRadar.io API key modules.subdomainradar.group str The enumeration group to use. Choose from fast, medium, deep fast modules.subdomainradar.timeout int Timeout in seconds 120 modules.trickest.api_key str Trickest API key modules.trufflehog.concurrency int Number of concurrent workers 8 modules.trufflehog.config str File path or URL to YAML trufflehog config modules.trufflehog.deleted_forks bool Scan for deleted github forks. WARNING: This is SLOW. For a smaller repository, this process can take 20 minutes. For a larger repository, it could take hours. False modules.trufflehog.only_verified bool Only report credentials that have been verified True modules.trufflehog.version str trufflehog version 3.83.7 modules.urlscan.urls bool Emit URLs in addition to DNS_NAMEs False modules.virustotal.api_key str VirusTotal API Key modules.wayback.garbage_threshold int Dedupe similar urls if they are in a group of this size or higher (lower values == less garbage data) 10 modules.wayback.urls bool emit URLs in addition to DNS_NAMEs False modules.zoomeye.api_key str ZoomEye API key modules.zoomeye.include_related bool Include domains which may be related to the target False modules.zoomeye.max_pages int How many pages of results to fetch 20 modules.asset_inventory.output_file str Set a custom output file modules.asset_inventory.recheck bool When use_previous=True, don't retain past details like open ports or findings. Instead, allow them to be rediscovered by the new scan False modules.asset_inventory.summary_netmask int Subnet mask to use when summarizing IP addresses at end of scan 16 modules.asset_inventory.use_previous bool <code>Emit previous asset inventory as new events (use in conjunction with -n &lt;old_scan_name&gt;)</code> False modules.csv.output_file str Output to CSV file modules.discord.event_types list Types of events to send ['VULNERABILITY', 'FINDING'] modules.discord.min_severity str Only allow VULNERABILITY events of this severity or higher LOW modules.discord.webhook_url str Discord webhook URL modules.emails.output_file str Output to file modules.http.bearer str Authorization Bearer token modules.http.method str HTTP method POST modules.http.password str Password (basic auth) modules.http.siem_friendly bool Format JSON in a SIEM-friendly way for ingestion into Elastic, Splunk, etc. False modules.http.timeout int HTTP timeout 10 modules.http.url str Web URL modules.http.username str Username (basic auth) modules.json.output_file str Output to file modules.json.siem_friendly bool Output JSON in a SIEM-friendly format for ingestion into Elastic, Splunk, etc. False modules.neo4j.password str Neo4j password bbotislife modules.neo4j.uri str Neo4j server + port bolt://localhost:7687 modules.neo4j.username str Neo4j username neo4j modules.postgres.database str The database name to connect to bbot modules.postgres.host str The server running Postgres localhost modules.postgres.password str The password to connect to Postgres bbotislife modules.postgres.port int The port to connect to Postgres 5432 modules.postgres.username str The username to connect to Postgres postgres modules.slack.event_types list Types of events to send ['VULNERABILITY', 'FINDING'] modules.slack.min_severity str Only allow VULNERABILITY events of this severity or higher LOW modules.slack.webhook_url str Discord webhook URL modules.splunk.hectoken str HEC Token modules.splunk.index str Index to send data to modules.splunk.source str Source path to be added to the metadata modules.splunk.timeout int HTTP timeout 10 modules.splunk.url str Web URL modules.sqlite.database str The path to the sqlite database file modules.stdout.accept_dupes bool Whether to show duplicate events, default True True modules.stdout.event_fields list Which event fields to display [] modules.stdout.event_types list Which events to display, default all event types [] modules.stdout.format str Which text format to display, choices: text,json text modules.stdout.in_scope_only bool Whether to only show in-scope events False modules.subdomains.include_unresolved bool Include unresolved subdomains in output False modules.subdomains.output_file str Output to file modules.teams.event_types list Types of events to send ['VULNERABILITY', 'FINDING'] modules.teams.min_severity str Only allow VULNERABILITY events of this severity or higher LOW modules.teams.webhook_url str Teams webhook URL modules.txt.output_file str Output to file modules.web_report.css_theme_file str CSS theme URL for HTML output https://cdnjs.cloudflare.com/ajax/libs/github-markdown-css/5.1.0/github-markdown.min.css modules.web_report.output_file str Output to file modules.websocket.preserve_graph bool Preserve full chains of events in the graph (prevents orphans) True modules.websocket.token str Authorization Bearer token modules.websocket.url str Web URL modules.excavate.custom_yara_rules str Include custom Yara rules modules.excavate.retain_querystring bool Keep the querystring intact on emitted WEB_PARAMETERS False modules.excavate.yara_max_match_data int Sets the maximum amount of text that can extracted from a YARA regex 2000 modules.speculate.essential_only bool Only enable essential speculate features (no extra discovery) False modules.speculate.max_hosts int Max number of IP_RANGE hosts to convert into IP_ADDRESS events 65536 modules.speculate.ports str The set of ports to speculate on 80,443"},{"location":"scanning/events/","title":"Events","text":"<p>An Event is a piece of data discovered by BBOT. Examples include <code>IP_ADDRESS</code>, <code>DNS_NAME</code>, <code>EMAIL_ADDRESS</code>, <code>URL</code>, etc. When you run a BBOT scan, events are constantly being exchanged between modules. They are also output to the console:</p> <pre><code>[DNS_NAME]      www.evilcorp.com    sslcert         (distance-0, in-scope, resolved, subdomain, a-record)\n ^^^^^^^^       ^^^^^^^^^^^^^^^^    ^^^^^^^          ^^^^^^^^^^\nevent type      event data          source module    tags\n</code></pre>"},{"location":"scanning/events/#event-attributes","title":"Event Attributes","text":"<p>Each BBOT event has the following attributes. Not all of these attributes are visible in the terminal output. However, they are always saved in <code>output.json</code> in the scan output folder. If you want to see them on the terminal, you can use <code>--json</code>.</p> <ul> <li><code>.type</code>: the event type (e.g. <code>DNS_NAME</code>, <code>IP_ADDRESS</code>, <code>OPEN_TCP_PORT</code>, etc.)</li> <li><code>.id</code>: an identifier representing the event type + a SHA1 hash of its data (note: multiple events can have the same <code>.id</code>)</li> <li><code>.uuid</code>: a universally unique identifier for the event (e.g. <code>DNS_NAME:6c96d512-090a-47f0-82e4-6860e46aac13</code>)</li> <li><code>.scope_description</code>: describes the scope of the event (e.g. <code>in-scope</code>, <code>affiliate</code>, <code>distance-2</code>)</li> <li><code>.data</code>: the actual discovered data (for some events like <code>DNS_NAME</code> or <code>IP_ADDRESS</code>, this is a string. For other more complex events like <code>HTTP_RESPONSE</code>, it's a dictionary)</li> <li><code>.host</code>: the hostname or IP address (e.g. <code>evilcorp.com</code> or <code>1.2.3.4</code>)</li> <li><code>.port</code>: the port number (e.g. <code>80</code>, <code>443</code>)</li> <li><code>.netloc</code>: the network location, including both the hostname and port (e.g. <code>www.evilcorp.com:443</code>)</li> <li><code>.resolved_hosts</code>: a list of all resolved hosts for the event (<code>A</code>, <code>AAAA</code>, and <code>CNAME</code> records)</li> <li><code>.dns_children</code>: a dictionary of all DNS records for the event (typically only present on <code>DNS_NAME</code>)</li> <li><code>.web_spider_distance</code>: a count of how many URL links have been followed in a row to get to this event</li> <li><code>.scope_distance</code>: a count of how many hops it is from the main scope (0 == in-scope)</li> <li><code>.scan</code>: the ID of the scan that produced the event</li> <li><code>.timestamp</code>: the date/time when the event was discovered</li> <li><code>.parent</code>: the ID of the parent event that led to the discovery of this event</li> <li><code>.parent_uuid</code>: the universally unique identifier for the parent event</li> <li><code>.tags</code>: a list of tags describing the event (e.g. <code>mx-record</code>, <code>http-title</code>, etc.)</li> <li><code>.module</code>: the module that discovered the event</li> <li><code>.module_sequence</code>: the recent sequence of modules that were executed to discover the event (including omitted events)</li> <li><code>.discovery_context</code>: a description of the context in which the event was discovered</li> <li><code>.discovery_path</code>: a list of every discovery context leading to this event</li> <li><code>.parent_chain</code>: a list of every event UUID leading to the discovery of this event (corresponds exactly to <code>.discovery_path</code>)</li> </ul> <p>These attributes allow us to construct a visual graph of events (e.g. in Neo4j) and query/filter/grep them more easily. Here is what a typical event looks like in JSON format:</p> <pre><code>{\n  \"type\": \"DNS_NAME\",\n  \"id\": \"DNS_NAME:33bc005c2bdfea4d73e07db733bd11861cf6520e\",\n  \"uuid\": \"DNS_NAME:6c96d512-090a-47f0-82e4-6860e46aac13\",\n  \"scope_description\": \"in-scope\",\n  \"data\": \"link.evilcorp.com\",\n  \"host\": \"link.evilcorp.com\",\n  \"resolved_hosts\": [\n    \"184.31.52.65\",\n    \"2600:1402:b800:d82::700\",\n    \"2600:1402:b800:d87::700\",\n    \"link.evilcorp.com.edgekey.net\"\n  ],\n  \"dns_children\": {\n    \"A\": [\n      \"184.31.52.65\"\n    ],\n    \"AAAA\": [\n      \"2600:1402:b800:d82::700\",\n      \"2600:1402:b800:d87::700\"\n    ],\n    \"CNAME\": [\n      \"link.evilcorp.com.edgekey.net\"\n    ]\n  },\n  \"web_spider_distance\": 0,\n  \"scope_distance\": 0,\n  \"scan\": \"SCAN:b6ef48bc036bc8d001595ae5061846a7e6beadb6\",\n  \"timestamp\": \"2024-10-18T15:40:13.716880+00:00\",\n  \"parent\": \"DNS_NAME:94c92b7eaed431b37ae2a757fec4e678cc3bd213\",\n  \"parent_uuid\": \"DNS_NAME:c737dffa-d4f0-4b6e-a72d-cc8c05bd892e\",\n  \"tags\": [\n    \"subdomain\",\n    \"a-record\",\n    \"cdn-akamai\",\n    \"in-scope\",\n    \"cname-record\",\n    \"aaaa-record\"\n  ],\n  \"module\": \"speculate\",\n  \"module_sequence\": \"speculate-&gt;speculate\",\n  \"discovery_context\": \"speculated parent DNS_NAME: link.evilcorp.com\",\n  \"discovery_path\": [\n    \"Scan insidious_frederick seeded with DNS_NAME: evilcorp.com\",\n    \"TXT record for evilcorp.com contains IP_ADDRESS: 149.72.247.52\",\n    \"PTR record for 149.72.247.52 contains DNS_NAME: o1.ptr2410.link.evilcorp.com\",\n    \"speculated parent DNS_NAME: ptr2410.link.evilcorp.com\",\n    \"speculated parent DNS_NAME: link.evilcorp.com\"\n  ],\n  \"parent_chain\": [\n    \"DNS_NAME:34c657a3-0bfa-457e-9e6e-0f22f04b8da5\",\n    \"IP_ADDRESS:efc0fb3b-1b42-44da-916e-83db2360e10e\",\n    \"DNS_NAME:c737dffa-d4f0-4b6e-a72d-cc8c05bd892e\",\n    \"DNS_NAME_UNRESOLVED:722a3473-30c6-40f1-90aa-908d47105d5a\",\n    \"DNS_NAME:6c96d512-090a-47f0-82e4-6860e46aac13\"\n  ]\n}\n</code></pre> <p>For a more detailed description of BBOT events, see Developer Documentation - Event.</p> <p>Below is a full list of event types along with which modules produce/consume them.</p>"},{"location":"scanning/events/#list-of-event-types","title":"List of Event Types","text":"Event Type # Consuming Modules # Producing Modules Consuming Modules Producing Modules * 17 0 affiliates, cloudcheck, csv, discord, dnsresolve, http, json, neo4j, postgres, python, slack, splunk, sqlite, stdout, teams, txt, websocket ASN 0 1 asn AZURE_TENANT 1 0 speculate CODE_REPOSITORY 6 6 docker_pull, git_clone, github_workflows, google_playstore, postman_download, trufflehog code_repository, dockerhub, github_codesearch, github_org, gitlab, postman DNS_NAME 59 43 anubisdb, asset_inventory, azure_realm, azure_tenant, baddns, baddns_zone, bevigil, binaryedge, bucket_amazon, bucket_azure, bucket_digitalocean, bucket_firebase, bucket_google, bufferoverrun, builtwith, c99, censys, certspotter, chaos, columbus, credshed, crt, dehashed, digitorus, dnsbimi, dnsbrute, dnsbrute_mutations, dnscaa, dnscommonsrv, dnsdumpster, emailformat, fullhunt, github_codesearch, hackertarget, hunterio, internetdb, leakix, myssl, oauth, otx, passivetotal, pgp, portscan, rapiddns, securitytrails, securitytxt, shodan_dns, sitedossier, skymem, speculate, subdomaincenter, subdomainradar, subdomains, trickest, urlscan, viewdns, virustotal, wayback, zoomeye anubisdb, azure_tenant, bevigil, binaryedge, bufferoverrun, builtwith, c99, censys, certspotter, chaos, columbus, crt, digitorus, dnsbrute, dnsbrute_mutations, dnscaa, dnscommonsrv, dnsdumpster, fullhunt, hackertarget, hunterio, internetdb, leakix, myssl, ntlm, oauth, otx, passivetotal, rapiddns, securitytrails, shodan_dns, sitedossier, speculate, sslcert, subdomaincenter, subdomainradar, trickest, urlscan, vhost, viewdns, virustotal, wayback, zoomeye DNS_NAME_UNRESOLVED 3 0 baddns, speculate, subdomains EMAIL_ADDRESS 1 9 emails credshed, dehashed, dnscaa, emailformat, hunterio, pgp, securitytxt, skymem, sslcert FILESYSTEM 3 7 extractous, jadx, trufflehog apkpure, docker_pull, filedownload, git_clone, github_workflows, jadx, postman_download FINDING 2 29 asset_inventory, web_report ajaxpro, baddns, baddns_direct, baddns_zone, badsecrets, bucket_amazon, bucket_azure, bucket_digitalocean, bucket_firebase, bucket_google, bypass403, dastardly, git, gitlab, host_header, hunt, internetdb, newsletters, ntlm, nuclei, paramminer_cookies, paramminer_getparams, secretsdb, smuggler, speculate, telerik, trufflehog, url_manipulation, wpscan GEOLOCATION 0 2 ip2location, ipstack HASHED_PASSWORD 0 2 credshed, dehashed HTTP_RESPONSE 19 1 ajaxpro, asset_inventory, badsecrets, dastardly, dotnetnuke, excavate, filedownload, gitlab, host_header, newsletters, ntlm, paramminer_cookies, paramminer_getparams, paramminer_headers, secretsdb, speculate, telerik, wappalyzer, wpscan httpx IP_ADDRESS 8 3 asn, asset_inventory, internetdb, ip2location, ipneighbor, ipstack, portscan, speculate asset_inventory, ipneighbor, speculate IP_RANGE 2 0 portscan, speculate MOBILE_APP 1 1 apkpure google_playstore OPEN_TCP_PORT 4 4 asset_inventory, fingerprintx, httpx, sslcert asset_inventory, internetdb, portscan, speculate ORG_STUB 4 1 dockerhub, github_org, google_playstore, postman speculate PASSWORD 0 2 credshed, dehashed PROTOCOL 0 1 fingerprintx RAW_DNS_RECORD 0 1 dnsbimi RAW_TEXT 1 1 excavate extractous SOCIAL 6 3 dockerhub, github_org, gitlab, gowitness, postman, speculate dockerhub, gitlab, social STORAGE_BUCKET 8 5 baddns_direct, bucket_amazon, bucket_azure, bucket_digitalocean, bucket_file_enum, bucket_firebase, bucket_google, speculate bucket_amazon, bucket_azure, bucket_digitalocean, bucket_firebase, bucket_google TECHNOLOGY 4 8 asset_inventory, gitlab, web_report, wpscan badsecrets, dotnetnuke, gitlab, gowitness, internetdb, nuclei, wappalyzer, wpscan URL 20 2 ajaxpro, asset_inventory, baddns_direct, bypass403, ffuf, generic_ssrf, git, gowitness, httpx, iis_shortnames, ntlm, nuclei, robots, smuggler, speculate, telerik, url_manipulation, vhost, wafw00f, web_report gowitness, httpx URL_HINT 1 1 ffuf_shortnames iis_shortnames URL_UNVERIFIED 6 17 code_repository, filedownload, httpx, oauth, social, speculate azure_realm, bevigil, bucket_file_enum, dnsbimi, dnscaa, dockerhub, excavate, ffuf, ffuf_shortnames, github_codesearch, gowitness, hunterio, robots, securitytxt, urlscan, wayback, wpscan USERNAME 1 2 speculate credshed, dehashed VHOST 1 1 web_report vhost VULNERABILITY 2 13 asset_inventory, web_report ajaxpro, baddns, baddns_direct, baddns_zone, badsecrets, dastardly, dotnetnuke, generic_ssrf, internetdb, nuclei, telerik, trufflehog, wpscan WAF 1 1 asset_inventory wafw00f WEBSCREENSHOT 0 1 gowitness WEB_PARAMETER 4 4 hunt, paramminer_cookies, paramminer_getparams, paramminer_headers excavate, paramminer_cookies, paramminer_getparams, paramminer_headers"},{"location":"scanning/events/#findings-vs-vulnerabilities","title":"Findings Vs. Vulnerabilities","text":"<p>BBOT has a sharp distinction between Findings and Vulnerabilities:</p> <p>VULNERABILITY</p> <ul> <li>There's a higher standard for what is allowed to be a vulnerability. They should be considered confirmed and actionable - no additional confirmation required</li> <li>They are always assigned a severity. The possible severities are: LOW, MEDIUM, HIGH, or CRITICAL</li> </ul> <p>FINDING</p> <ul> <li>Findings can range anywhere from \"slightly interesting behavior\" to \"likely, but unconfirmed vulnerability\"</li> <li>Are often false positives</li> </ul> <p>By making this separation, actionable vulnerabilities can be identified quickly in the midst of a large scan</p>"},{"location":"scanning/output/","title":"Output","text":"<p>By default, BBOT saves its output in TXT, JSON, and CSV formats. The filenames are logged at the end of each scan: </p> <p>Every BBOT scan gets a unique and mildly-entertaining name like <code>demonic_jimmy</code>. Output for that scan, including scan stats and any web screenshots, etc., are saved to a folder by that name in <code>~/.bbot/scans</code>. The most recent 20 scans are kept, and older ones are removed. You can change the location of BBOT's output with <code>--output</code>, and you can also pick a custom scan name with <code>--name</code>.</p> <p>If you reuse a scan name, it will append to its original output files and leverage the previous.</p>"},{"location":"scanning/output/#output-modules","title":"Output Modules","text":"<p>Multiple simultaneous output formats are possible because of output modules. Output modules are similar to normal modules except they are enabled with <code>-om</code>.</p>"},{"location":"scanning/output/#stdout","title":"STDOUT","text":"<p>The <code>stdout</code> output module is what you see when you execute BBOT in the terminal. By default it looks the same as the <code>txt</code> module, but it has options you can customize. You can filter by event type, choose the data format (<code>text</code>, <code>json</code>), and which fields you want to see:</p> Config Option Type Description Default modules.stdout.accept_dupes bool Whether to show duplicate events, default True True modules.stdout.event_fields list Which event fields to display [] modules.stdout.event_types list Which events to display, default all event types [] modules.stdout.format str Which text format to display, choices: text,json text modules.stdout.in_scope_only bool Whether to only show in-scope events False"},{"location":"scanning/output/#txt","title":"TXT","text":"<p><code>txt</code> output is tab-delimited, so it's easy to grep:</p> <pre><code># grep out only the DNS_NAMEs\ncat ~/.bbot/scans/extreme_johnny/output.txt | grep '[DNS_NAME]' | cut -f2\nevilcorp.com\nwww.evilcorp.com\nmail.evilcorp.com\n</code></pre>"},{"location":"scanning/output/#csv","title":"CSV","text":"<p>The <code>csv</code> output module produces a CSV like this:</p> Event type Event data IP Address Source Module Scope Distance Event Tags DNS_NAME evilcorp.com 1.2.3.4 TARGET 0 a-record,cdn-github,distance-0,domain,in-scope,mx-record,ns-record,resolved,soa-record,target,txt-record DNS_NAME www.evilcorp.com 2.3.4.5 certspotter 0 a-record,aaaa-record,cdn-github,cname-record,distance-0,in-scope,resolved,subdomain URL http://www.evilcorp.com 2.3.4.5 httpx 0 a-record,aaaa-record,cdn-github,cname-record,distance-0,in-scope,resolved,subdomain DNS_NAME admin.evilcorp.com 5.6.7.8 otx 0 a-record,aaaa-record,cloud-azure,cname-record,distance-0,in-scope,resolved,subdomain"},{"location":"scanning/output/#json","title":"JSON","text":"<p>If you manually enable the <code>json</code> output module, it will go to stdout:</p> <pre><code>bbot -t evilcorp.com -om json | jq\n</code></pre> <p>You will then see events like this:</p> <pre><code>{\n  \"type\": \"IP_ADDRESS\",\n  \"id\": \"IP_ADDRESS:13cd09c2adf0860a582240229cd7ad1dccdb5eb1\",\n  \"data\": \"1.2.3.4\",\n  \"scope_distance\": 1,\n  \"scan\": \"SCAN:64c0e076516ae7aa6502fd99489693d0d5ec26cc\",\n  \"timestamp\": 1688518967.740472,\n  \"resolved_hosts\": [\"1.2.3.4\"],\n  \"parent\": \"DNS_NAME:2da045542abbf86723f22383d04eb453e573723c\",\n  \"tags\": [\"distance-1\", \"ipv4\", \"internal\"],\n  \"module\": \"A\",\n  \"module_sequence\": \"A\"\n}\n</code></pre> <p>You can filter on the JSON output with <code>jq</code>:</p> <pre><code># pull out only the .data attribute of every DNS_NAME\n$ jq -r 'select(.type==\"DNS_NAME\") | .data' ~/.bbot/scans/extreme_johnny/output.json\nevilcorp.com\nwww.evilcorp.com\nmail.evilcorp.com\n</code></pre>"},{"location":"scanning/output/#discord-slack-teams","title":"Discord / Slack / Teams","text":"<p>BBOT supports output via webhooks to <code>discord</code>, <code>slack</code>, and <code>teams</code>. To use them, you must specify a webhook URL either in the config:</p> discord_preset.yml<pre><code>config:\n  modules:\n    discord:\n      webhook_url: https://discord.com/api/webhooks/1234/deadbeef\n</code></pre> <p>...or on the command line: <pre><code>bbot -t evilcorp.com -om discord -c modules.discord.webhook_url=https://discord.com/api/webhooks/1234/deadbeef\n</code></pre></p> <p>By default, only <code>VULNERABILITY</code> and <code>FINDING</code> events are sent, but this can be customized by setting <code>event_types</code> in the config like so:</p> discord_preset.yml<pre><code>config:\n  modules:\n    discord:\n      event_types:\n        - VULNERABILITY\n        - FINDING\n        - STORAGE_BUCKET\n</code></pre> <p>...or on the command line: <pre><code>bbot -t evilcorp.com -om discord -c modules.discord.event_types=[\"STORAGE_BUCKET\",\"FINDING\",\"VULNERABILITY\"]\n</code></pre></p> <p>You can also filter on the severity of <code>VULNERABILITY</code> events by setting <code>min_severity</code>:</p> discord_preset.yml<pre><code>config:\n  modules:\n    discord:\n      min_severity: HIGH\n</code></pre>"},{"location":"scanning/output/#http","title":"HTTP","text":"<p>The <code>http</code> output module sends events in JSON format to a desired HTTP endpoint.</p> <pre><code># POST scan results to localhost\nbbot -t evilcorp.com -om http -c modules.http.url=http://localhost:8000\n</code></pre> <p>You can customize the HTTP method if needed. Authentication is also supported:</p> http_preset.yml<pre><code>config:\n  modules:\n    http:\n      url: https://localhost:8000\n      method: PUT\n      # Authorization: Bearer\n      bearer: &lt;bearer_token&gt;\n      # OR\n      username: bob\n      password: P@ssw0rd\n</code></pre>"},{"location":"scanning/output/#elasticsearch","title":"Elasticsearch","text":"<p>When outputting to Elastic, use the <code>http</code> output module with the following settings (replace <code>&lt;your_index&gt;</code> with your desired index, e.g. <code>bbot</code>):</p> <pre><code># send scan results directly to elasticsearch\nbbot -t evilcorp.com -om http -c \\\n  modules.http.url=http://localhost:8000/&lt;your_index&gt;/_doc \\\n  modules.http.siem_friendly=true \\\n  modules.http.username=elastic \\\n  modules.http.password=changeme\n</code></pre> <p>Alternatively, via a preset:</p> elastic_preset.yml<pre><code>config:\n  modules:\n    http:\n      url: http://localhost:8000/&lt;your_index&gt;/_doc\n      siem_friendly: true\n      username: elastic\n      password: changeme\n</code></pre>"},{"location":"scanning/output/#splunk","title":"Splunk","text":"<p>The <code>splunk</code> output module sends events in JSON format to a desired splunk instance via HEC.</p> <p>You can customize this output with the following config options:</p> splunk_preset.yml<pre><code>config:\n  modules:\n    splunk:\n      # The full URL with the URI `/services/collector/event`\n      url: https://localhost:8088/services/collector/event\n      # Generated from splunk webui\n      hectoken: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\n      # Defaults to `main` if not set\n      index: my-specific-index\n      # Defaults to `bbot` if not set\n      source: /my/source.json\n</code></pre>"},{"location":"scanning/output/#asset-inventory","title":"Asset Inventory","text":"<p>The <code>asset_inventory</code> module produces a CSV like this:</p> Host Provider IP(s) Status Open Ports evilcorp.com cdn-github 1.2.3.4 Active 80,443 www.evilcorp.com cdn-github 2.3.4.5 Active 22,80,443 admin.evilcorp.com cloud-azure 5.6.7.8 N/A"},{"location":"scanning/output/#sqlite","title":"SQLite","text":"<p>The <code>sqlite</code> output module produces a SQLite database containing all events, scans, and targets. By default, it will be saved in the scan directory as <code>output.sqlite</code>.</p> <pre><code># specifying a custom database path\nbbot -t evilcorp.com -om sqlite -c modules.sqlite.database=/tmp/bbot.sqlite\n</code></pre>"},{"location":"scanning/output/#postgres","title":"Postgres","text":"<p>The <code>postgres</code> output module allows you to ingest events, scans, and targets into a Postgres database. By default, it will connect to the server on <code>localhost</code> with a username of <code>postgres</code> and password of <code>bbotislife</code>. You can change this behavior in the config.</p> <pre><code># specifying an alternate database\nbbot -t evilcorp.com -om postgres -c modules.postgres.database=custom_bbot_db\n</code></pre> postgres_preset.yml<pre><code>config:\n  modules:\n    postgres:\n      host: psq.fsociety.local\n      database: custom_bbot_db\n      port: 5432\n      username: postgres\n      password: bbotislife\n</code></pre>"},{"location":"scanning/output/#subdomains","title":"Subdomains","text":"<p>The <code>subdomains</code> output module produces simple text file containing only in-scope and resolved subdomains:</p> subdomains.txt<pre><code>evilcorp.com\nwww.evilcorp.com\nmail.evilcorp.com\nportal.evilcorp.com\n</code></pre>"},{"location":"scanning/output/#neo4j","title":"Neo4j","text":"<p>Neo4j is the funnest (and prettiest) way to view and interact with BBOT data.</p> <p></p> <ul> <li>You can get Neo4j up and running with a single docker command:</li> </ul> <pre><code># start Neo4j in the background with docker\ndocker run -d -p 7687:7687 -p 7474:7474 -v \"$(pwd)/neo4j/:/data/\" -e NEO4J_AUTH=neo4j/bbotislife neo4j\n</code></pre> <ul> <li>After that, run bbot with <code>-om neo4j</code></li> </ul> <pre><code>bbot -f subdomain-enum -t evilcorp.com -om neo4j\n</code></pre> <ul> <li>Log in at http://localhost:7474 with <code>neo4j</code> / <code>bbotislife</code></li> </ul>"},{"location":"scanning/output/#cypher-queries-and-tips","title":"Cypher Queries and Tips","text":"<p>Neo4j uses the Cypher Query Language for its graph query language. Cypher uses common clauses to craft relational queries and present the desired data in multiple formats. </p> <p>Cypher queries can be broken down into three required pieces; selection, filter, and presentation. The selection piece identifies what data that will be searched against - 90% of the time the \"MATCH\" clause will be enough but there are means to read from csv or json data files. In all of these examples the \"MATCH\" clause will be used. The filter piece helps to focus in on the required data and used the \"WHERE\" clause to accomplish this effort (most basic operators can be used). Finally, the presentation section identifies how the data should be presented back to the querier. While neo4j is a graph database, it can be used in a traditional table view.</p> <p>A simple query to grab every URL event with \".com\" in the BBOT data field would look like this: <code>MATCH (u:URL) WHERE u.data contains \".com\" RETURN u</code></p> <p>In this query the following can be identified: - Within the MATCH statement \"u\" is a variable and can be any value needed by the user while the \"URL\" label is a direct relationship to the BBOT event type. - The WHERE statement allows the query to filter on any of the BBOT event properties like data, tag, or even the label itself.  - The RETURN statement is a general presentation of the whole URL event but this can be narrowed down to present any of the specific properties of the BBOT event (<code>RETURN u.data, u.tags</code>).</p> <p>The following are a few recommended queries to get started with:</p> <pre><code>// Get all \"in-scope\" DNS Nodes and return just data and tags properties\nMATCH (n:DNS_NAME)\nWHERE \"in-scope\" IN n.tags\nRETURN n.data, n.tags\n</code></pre> <pre><code>// Get the count of labels/BBOT events in the Neo4j Database\nMATCH (n)\nRETURN labels(n), count(n)\n</code></pre> <pre><code>// Get a graph of open ports associated with each domain\nMATCH z = ((n:DNS_NAME) --&gt; (p:OPEN_TCP_PORT))\nRETURN z\n</code></pre> <pre><code>// Get all domains and IP addresses with open TCP ports\nMATCH (n) --&gt; (p:OPEN_TCP_PORT)\nWHERE \"in-scope\" in n.tags and (n:DNS_NAME or n:IP_ADDRESS)\nWITH *, TAIL(SPLIT(p.data, ':')) AS port\nRETURN n.data, collect(distinct port)\n</code></pre> <pre><code>// Clear the database\nMATCH (n) DETACH DELETE n\n</code></pre> <p>This is not an exhaustive list of clauses, filters, or other means to use cypher and should be considered a starting point. To build more advanced queries consider reading Neo4j's Cypher documentation. </p> <p>Additional note: these sample queries are dependent on the existence of the data in the target neo4j database. </p>"},{"location":"scanning/presets/","title":"Presets","text":"<p>Once you start customizing BBOT, your commands can start to get really long. Presets let you put all your scan settings in a single file:</p> <pre><code>bbot -p ./my_preset.yml\n</code></pre> <p>A Preset is a YAML file that can include scan targets, modules, and config options like API keys.</p> <p>A typical preset looks like this:</p> subdomain-enum.yml<pre><code>description: Enumerate subdomains via APIs, brute-force\n\nflags:\n  - subdomain-enum\n\noutput_modules:\n  - subdomains\n</code></pre>"},{"location":"scanning/presets/#how-to-use-presets-p","title":"How to use Presets (<code>-p</code>)","text":"<p>BBOT has a ready-made collection of presets for common tasks like subdomain enumeration and web spidering. They live in <code>~/.bbot/presets</code>.</p> <p>To list them, you can do:</p> <pre><code># list available presets\nbbot -lp\n</code></pre> <p>Enable them with <code>-p</code>:</p> <pre><code># do a subdomain enumeration \nbbot -t evilcorp.com -p subdomain-enum\n\n# multiple presets - subdomain enumeration + web spider\nbbot -t evilcorp.com -p subdomain-enum spider\n\n# start with a preset but only enable modules that have the 'passive' flag\nbbot -t evilcorp.com -p subdomain-enum -rf passive\n\n# preset + manual config override\nbbot -t www.evilcorp.com -p spider -c web.spider_distance=10\n</code></pre> <p>You can build on the default presets, or create your own. Here's an example of a custom preset that builds on <code>subdomain-enum</code>:</p> my_subdomains.yml<pre><code>description: Do a subdomain enumeration + basic web scan + nuclei\n\ntarget:\n  - evilcorp.com\n\ninclude:\n  # include these default presets\n  - subdomain-enum\n  - web-basic\n\nmodules:\n  # enable nuclei in addition to the other modules\n  - nuclei\n\nconfig:\n  # global config options\n  web:\n    http_proxy: http://127.0.0.1:8080\n  # module config options\n  modules:\n    # api keys\n    securitytrails:\n      api_key: 21a270d5f59c9b05813a72bb41707266\n    virustotal:\n      # multiple API keys are allowed\n      api_key:\n        - 4f41243847da693a4f356c0486114bc6\n        - 5bc6ed268ab6488270e496d3183a1a27\n</code></pre> <p>To execute your custom preset, you do:</p> <pre><code>bbot -p ./my_subdomains.yml\n</code></pre>"},{"location":"scanning/presets/#preset-load-order","title":"Preset Load Order","text":"<p>When you enable multiple presets, the order matters. In the case of a conflict, the last preset will always win. This means, for example, if you have a custom preset called <code>my_spider</code> that sets <code>web.spider_distance</code> to 1:</p> my_spider.yml<pre><code>config:\n  web:\n    spider_distance: 1\n</code></pre> <p>...and you enable it alongside the default <code>spider</code> preset in this order:</p> <pre><code>bbot -t evilcorp.com -p ./my_spider.yml spider\n</code></pre> <p>...the value of <code>web.spider_distance</code> will be overridden by <code>spider</code>. To ensure this doesn't happen, you would want to switch the order of the presets:</p> <pre><code>bbot -t evilcorp.com -p spider ./my_spider.yml\n</code></pre>"},{"location":"scanning/presets/#validating-presets","title":"Validating Presets","text":"<p>To make sure BBOT is configured the way you expect, you can always check the <code>--current-preset</code> to show the final version of the config that will be used when BBOT executes:</p> <pre><code># verify the preset is what you want\nbbot -p ./mypreset.yml --current-preset\n</code></pre>"},{"location":"scanning/presets/#advanced-usage","title":"Advanced Usage","text":"<p>BBOT Presets support advanced features like environment variable substitution and custom conditions.</p>"},{"location":"scanning/presets/#custom-modules","title":"Custom Modules","text":"<p>If you want to use a custom BBOT <code>.py</code> module, you can either move it into <code>bbot/modules</code> where BBOT is installed, or add its parent folder to <code>module_dirs</code> like so:</p> custom_modules.yml<pre><code># load extra BBOT modules from this locaation\nmodule_dirs:\n  - /home/user/custom_modules\n</code></pre>"},{"location":"scanning/presets/#environment-variables","title":"Environment Variables","text":"<p>You can insert environment variables into your preset like this: <code>${env:&lt;variable&gt;}</code>:</p> my_nuclei.yml<pre><code>description: Do a nuclei scan\n\ntarget:\n  - evilcorp.com\n\nmodules:\n  - nuclei\n\nconfig:\n  modules:\n    nuclei:\n      # allow the nuclei templates to be specified at runtime via an environment variable\n      tags: ${env:NUCLEI_TAGS}\n</code></pre> <pre><code>NUCLEI_TAGS=apache,nginx bbot -p ./my_nuclei.yml\n</code></pre>"},{"location":"scanning/presets/#conditions","title":"Conditions","text":"<p>Sometimes, you might need to add custom logic to a preset. BBOT supports this via <code>conditions</code>. The <code>conditions</code> attribute allows you to specify a list of custom conditions that will be evaluated before the scan starts. This is useful for performing last-minute sanity checks, or changing the behavior of the scan based on custom criteria.</p> my_preset.yml<pre><code>description: Abort if nuclei templates aren't specified\n\nmodules:\n  - nuclei\n\nconditions:\n  - |\n    {% if not config.modules.nuclei.templates %}\n      {{ abort(\"Don't forget to set your templates!\") }}\n    {% endif %}\n</code></pre> my_preset.yml<pre><code>description: Enable ffuf but only when the web spider isn't also enabled\n\nmodules:\n  - ffuf\n\nconditions:\n  - |\n    {% if config.web.spider_distance &gt; 0 and config.web.spider_depth &gt; 0 %}\n      {{ warn(\"Disabling ffuf because the web spider is enabled\") }}\n      {{ preset.exclude_module(\"ffuf\") }}\n    {% endif %}\n</code></pre> <p>Conditions use Jinja, which means they can contain Python code. They run inside a sandboxed environment which has access to the following variables:</p> <ul> <li><code>preset</code> - the current preset object</li> <li><code>config</code> - the current config (an alias for <code>preset.config</code>)</li> <li><code>warn(message)</code> - display a custom warning message to the user</li> <li><code>abort(message)</code> - abort the scan with an optional message</li> </ul> <p>If you aren't able to accomplish what you want with conditions, or if you need access to a new variable/function, please let us know on Github.</p>"},{"location":"scanning/presets_list/","title":"List of Presets","text":"<p>Below is a list of every default BBOT preset, including its YAML.</p>"},{"location":"scanning/presets_list/#baddns-thorough","title":"baddns-thorough","text":"<p>Run all baddns modules and submodules.</p> <code>baddns-thorough.yml</code> ~/.bbot/presets/baddns-thorough.yml<pre><code>description: Run all baddns modules and submodules.\n\n\nmodules:\n  - baddns\n  - baddns_zone\n  - baddns_direct\n\nconfig:\n  modules:\n    baddns:\n      enabled_submodules: [CNAME,references,MX,NS,TXT]\n</code></pre> <p>Modules: 4</p>"},{"location":"scanning/presets_list/#cloud-enum","title":"cloud-enum","text":"<p>Enumerate cloud resources such as storage buckets, etc.</p> <code>cloud-enum.yml</code> ~/.bbot/presets/cloud-enum.yml<pre><code>description: Enumerate cloud resources such as storage buckets, etc.\n\ninclude:\n  - subdomain-enum\n\nflags:\n  - cloud-enum\n</code></pre> <p>Modules: 59</p>"},{"location":"scanning/presets_list/#code-enum","title":"code-enum","text":"<p>Enumerate Git repositories, Docker images, etc.</p> <code>code-enum.yml</code> ~/.bbot/presets/code-enum.yml<pre><code>description: Enumerate Git repositories, Docker images, etc.\n\nflags:\n  - code-enum\n</code></pre> <p>Modules: 16</p>"},{"location":"scanning/presets_list/#dirbust-heavy","title":"dirbust-heavy","text":"<p>Recursive web directory brute-force (aggressive)</p> <code>dirbust-heavy.yml</code> ~/.bbot/presets/web/dirbust-heavy.yml<pre><code>description: Recursive web directory brute-force (aggressive)\n\ninclude:\n  - spider\n\nflags:\n  - iis-shortnames\n\nmodules:\n  - ffuf\n  - wayback\n\nconfig:\n  modules:\n    iis_shortnames:\n      # we exploit the shortnames vulnerability to produce URL_HINTs which are consumed by ffuf_shortnames\n      detect_only: False\n    ffuf:\n      depth: 3\n      lines: 5000\n      extensions:\n        - php\n        - asp\n        - aspx\n        - ashx\n        - asmx\n        - jsp\n        - jspx\n        - cfm\n        - zip\n        - conf\n        - config\n        - xml\n        - json\n        - yml\n        - yaml\n    # emit URLs from wayback\n    wayback:\n      urls: True\n</code></pre> <p>Category: web</p> <p>Modules: 5</p>"},{"location":"scanning/presets_list/#dirbust-light","title":"dirbust-light","text":"<p>Basic web directory brute-force (surface-level directories only)</p> <code>dirbust-light.yml</code> ~/.bbot/presets/web/dirbust-light.yml<pre><code>description: Basic web directory brute-force (surface-level directories only)\n\ninclude:\n  - iis-shortnames\n\nmodules:\n  - ffuf\n\nconfig:\n  modules:\n    ffuf:\n      # wordlist size = 1000\n      lines: 1000\n</code></pre> <p>Category: web</p> <p>Modules: 4</p>"},{"location":"scanning/presets_list/#dotnet-audit","title":"dotnet-audit","text":"<p>Comprehensive scan for all IIS/.NET specific modules and module settings</p> <code>dotnet-audit.yml</code> ~/.bbot/presets/web/dotnet-audit.yml<pre><code>description: Comprehensive scan for all IIS/.NET specific modules and module settings\n\n\ninclude:\n  - iis-shortnames\n\nmodules:\n  - httpx\n  - badsecrets\n  - ffuf_shortnames\n  - ffuf\n  - telerik\n  - ajaxpro\n  - dotnetnuke\n\nconfig:\n  modules:\n    ffuf:\n      extensions: asp,aspx,ashx,asmx,ascx\n    telerik:\n      exploit_RAU_crypto: True\n</code></pre> <p>Category: web</p> <p>Modules: 8</p>"},{"location":"scanning/presets_list/#email-enum","title":"email-enum","text":"<p>Enumerate email addresses from APIs, web crawling, etc.</p> <code>email-enum.yml</code> ~/.bbot/presets/email-enum.yml<pre><code>description: Enumerate email addresses from APIs, web crawling, etc.\n\nflags:\n  - email-enum\n\noutput_modules:\n  - emails\n</code></pre> <p>Modules: 7</p>"},{"location":"scanning/presets_list/#fast","title":"fast","text":"<p>Scan only the provided targets as fast as possible - no extra discovery</p> <code>fast.yml</code> ~/.bbot/presets/fast.yml<pre><code>description: Scan only the provided targets as fast as possible - no extra discovery\n\nexclude_modules:\n  - excavate\n\nconfig:\n  # only scan the exact targets specified\n  scope:\n    strict: true\n  # speed up dns resolution by doing A/AAAA only - not MX/NS/SRV/etc\n  dns:\n    minimal: true\n  # essential speculation only\n  modules:\n    speculate:\n      essential_only: true\n</code></pre> <p>Modules: 0</p>"},{"location":"scanning/presets_list/#iis-shortnames","title":"iis-shortnames","text":"<p>Recursively enumerate IIS shortnames</p> <code>iis-shortnames.yml</code> ~/.bbot/presets/web/iis-shortnames.yml<pre><code>description: Recursively enumerate IIS shortnames\n\nflags:\n  - iis-shortnames\n\nconfig:\n  modules:\n    iis_shortnames:\n      # exploit the vulnerability\n      detect_only: false\n</code></pre> <p>Category: web</p> <p>Modules: 3</p>"},{"location":"scanning/presets_list/#kitchen-sink","title":"kitchen-sink","text":"<p>Everything everywhere all at once</p> <code>kitchen-sink.yml</code> ~/.bbot/presets/kitchen-sink.yml<pre><code>description: Everything everywhere all at once\n\ninclude:\n  - subdomain-enum\n  - cloud-enum\n  - code-enum\n  - email-enum\n  - spider\n  - web-basic\n  - paramminer\n  - dirbust-light\n  - web-screenshots\n  - baddns-thorough\n\nconfig:\n  modules:\n    baddns:\n      enable_references: True\n</code></pre> <p>Modules: 86</p>"},{"location":"scanning/presets_list/#paramminer","title":"paramminer","text":"<p>Discover new web parameters via brute-force</p> <code>paramminer.yml</code> ~/.bbot/presets/web/paramminer.yml<pre><code>description: Discover new web parameters via brute-force\n\nflags:\n  - web-paramminer\n\nmodules:\n  - httpx\n\nconfig:\n  web:\n    spider_distance: 1\n    spider_depth: 4\n</code></pre> <p>Category: web</p> <p>Modules: 4</p>"},{"location":"scanning/presets_list/#spider","title":"spider","text":"<p>Recursive web spider</p> <code>spider.yml</code> ~/.bbot/presets/spider.yml<pre><code>description: Recursive web spider\n\nmodules:\n  - httpx\n\nblacklist:\n  # Prevent spider from invalidating sessions by logging out\n  - \"RE:/.*(sign|log)[_-]?out\"\n\nconfig:\n  web:\n    # how many links to follow in a row\n    spider_distance: 2\n    # don't follow links whose directory depth is higher than 4\n    spider_depth: 4\n    # maximum number of links to follow per page\n    spider_links_per_page: 25\n</code></pre> <p>Modules: 1</p>"},{"location":"scanning/presets_list/#subdomain-enum","title":"subdomain-enum","text":"<p>Enumerate subdomains via APIs, brute-force</p> <code>subdomain-enum.yml</code> ~/.bbot/presets/subdomain-enum.yml<pre><code>description: Enumerate subdomains via APIs, brute-force\n\nflags:\n  # enable every module with the subdomain-enum flag\n  - subdomain-enum\n\noutput_modules:\n  # output unique subdomains to TXT file\n  - subdomains\n\nconfig:\n  dns:\n    threads: 25\n    brute_threads: 1000\n  # put your API keys here\n  # modules:\n  #   github:\n  #     api_key: \"\"\n  #   chaos:\n  #     api_key: \"\"\n  #   securitytrails:\n  #     api_key: \"\"\n</code></pre> <p>Modules: 52</p>"},{"location":"scanning/presets_list/#web-basic","title":"web-basic","text":"<p>Quick web scan</p> <code>web-basic.yml</code> ~/.bbot/presets/web-basic.yml<pre><code>description: Quick web scan\n\ninclude:\n  - iis-shortnames\n\nflags:\n  - web-basic\n</code></pre> <p>Modules: 19</p>"},{"location":"scanning/presets_list/#web-screenshots","title":"web-screenshots","text":"<p>Take screenshots of webpages</p> <code>web-screenshots.yml</code> ~/.bbot/presets/web-screenshots.yml<pre><code>description: Take screenshots of webpages\n\nflags:\n  - web-screenshots\n\nconfig:\n  modules:\n    gowitness:\n      resolution_x: 1440\n      resolution_y: 900\n      # folder to output web screenshots (default is inside ~/.bbot/scans/scan_name)\n      output_path: \"\"\n      # whether to take screenshots of social media pages\n      social: True\n</code></pre> <p>Modules: 3</p>"},{"location":"scanning/presets_list/#web-thorough","title":"web-thorough","text":"<p>Aggressive web scan</p> <code>web-thorough.yml</code> ~/.bbot/presets/web-thorough.yml<pre><code>description: Aggressive web scan\n\ninclude:\n  # include the web-basic preset\n  - web-basic\n\nflags:\n  - web-thorough\n</code></pre> <p>Modules: 30</p>"},{"location":"scanning/presets_list/#table-of-default-presets","title":"Table of Default Presets","text":"<p>Here is a the same data, but in a table:</p> Preset Category Description # Modules Modules baddns-thorough Run all baddns modules and submodules. 4 baddns, baddns_direct, baddns_zone, httpx cloud-enum Enumerate cloud resources such as storage buckets, etc. 59 anubisdb, asn, azure_realm, azure_tenant, baddns, baddns_direct, baddns_zone, bevigil, binaryedge, bucket_amazon, bucket_azure, bucket_digitalocean, bucket_file_enum, bucket_firebase, bucket_google, bufferoverrun, builtwith, c99, censys, certspotter, chaos, columbus, crt, digitorus, dnsbimi, dnsbrute, dnsbrute_mutations, dnscaa, dnscommonsrv, dnsdumpster, fullhunt, github_codesearch, github_org, hackertarget, httpx, hunterio, internetdb, ipneighbor, leakix, myssl, oauth, otx, passivetotal, postman, postman_download, rapiddns, securitytrails, securitytxt, shodan_dns, sitedossier, social, sslcert, subdomaincenter, subdomainradar, trickest, urlscan, virustotal, wayback, zoomeye code-enum Enumerate Git repositories, Docker images, etc. 16 apkpure, code_repository, docker_pull, dockerhub, git, git_clone, github_codesearch, github_org, github_workflows, gitlab, google_playstore, httpx, postman, postman_download, social, trufflehog dirbust-heavy web Recursive web directory brute-force (aggressive) 5 ffuf, ffuf_shortnames, httpx, iis_shortnames, wayback dirbust-light web Basic web directory brute-force (surface-level directories only) 4 ffuf, ffuf_shortnames, httpx, iis_shortnames dotnet-audit web Comprehensive scan for all IIS/.NET specific modules and module settings 8 ajaxpro, badsecrets, dotnetnuke, ffuf, ffuf_shortnames, httpx, iis_shortnames, telerik email-enum Enumerate email addresses from APIs, web crawling, etc. 7 dehashed, dnscaa, emailformat, hunterio, pgp, skymem, sslcert fast Scan only the provided targets as fast as possible - no extra discovery 0 iis-shortnames web Recursively enumerate IIS shortnames 3 ffuf_shortnames, httpx, iis_shortnames kitchen-sink Everything everywhere all at once 86 anubisdb, apkpure, asn, azure_realm, azure_tenant, baddns, baddns_direct, baddns_zone, badsecrets, bevigil, binaryedge, bucket_amazon, bucket_azure, bucket_digitalocean, bucket_file_enum, bucket_firebase, bucket_google, bufferoverrun, builtwith, c99, censys, certspotter, chaos, code_repository, columbus, crt, dehashed, digitorus, dnsbimi, dnsbrute, dnsbrute_mutations, dnscaa, dnscommonsrv, dnsdumpster, docker_pull, dockerhub, emailformat, ffuf, ffuf_shortnames, filedownload, fullhunt, git, git_clone, github_codesearch, github_org, github_workflows, gitlab, google_playstore, gowitness, hackertarget, httpx, hunterio, iis_shortnames, internetdb, ipneighbor, leakix, myssl, ntlm, oauth, otx, paramminer_cookies, paramminer_getparams, paramminer_headers, passivetotal, pgp, postman, postman_download, rapiddns, robots, secretsdb, securitytrails, securitytxt, shodan_dns, sitedossier, skymem, social, sslcert, subdomaincenter, subdomainradar, trickest, trufflehog, urlscan, virustotal, wappalyzer, wayback, zoomeye paramminer web Discover new web parameters via brute-force 4 httpx, paramminer_cookies, paramminer_getparams, paramminer_headers spider Recursive web spider 1 httpx subdomain-enum Enumerate subdomains via APIs, brute-force 52 anubisdb, asn, azure_realm, azure_tenant, baddns_direct, baddns_zone, bevigil, binaryedge, bufferoverrun, builtwith, c99, censys, certspotter, chaos, columbus, crt, digitorus, dnsbimi, dnsbrute, dnsbrute_mutations, dnscaa, dnscommonsrv, dnsdumpster, fullhunt, github_codesearch, github_org, hackertarget, httpx, hunterio, internetdb, ipneighbor, leakix, myssl, oauth, otx, passivetotal, postman, postman_download, rapiddns, securitytrails, securitytxt, shodan_dns, sitedossier, social, sslcert, subdomaincenter, subdomainradar, trickest, urlscan, virustotal, wayback, zoomeye web-basic Quick web scan 19 azure_realm, baddns, badsecrets, bucket_amazon, bucket_azure, bucket_firebase, bucket_google, ffuf_shortnames, filedownload, git, httpx, iis_shortnames, ntlm, oauth, robots, secretsdb, securitytxt, sslcert, wappalyzer web-screenshots Take screenshots of webpages 3 gowitness, httpx, social web-thorough Aggressive web scan 30 ajaxpro, azure_realm, baddns, badsecrets, bucket_amazon, bucket_azure, bucket_digitalocean, bucket_firebase, bucket_google, bypass403, dastardly, dotnetnuke, ffuf_shortnames, filedownload, generic_ssrf, git, host_header, httpx, hunt, iis_shortnames, ntlm, oauth, robots, secretsdb, securitytxt, smuggler, sslcert, telerik, url_manipulation, wappalyzer"},{"location":"scanning/tips_and_tricks/","title":"Tips and Tricks","text":"<p>Below are some helpful tricks to help you in your adventures.</p>"},{"location":"scanning/tips_and_tricks/#change-verbosity-during-scan","title":"Change Verbosity During Scan","text":"<p>Press enter during a BBOT scan to change the log level. This will allow you to see debugging messages, etc.</p> <p></p>"},{"location":"scanning/tips_and_tricks/#kill-individual-module-during-scan","title":"Kill Individual Module During Scan","text":"<p>Sometimes a certain module can get stuck or slow down the scan. If this happens and you want to kill it, just type \"<code>kill &lt;module&gt;</code>\" in the terminal and press enter. This will kill and disable the module for the rest of the scan.</p> <p>You can also kill multiple modules at a time by specifying them in a space or comma-separated list:</p> <pre><code>kill httpx sslcert\n</code></pre> <p></p>"},{"location":"scanning/tips_and_tricks/#common-config-changes","title":"Common Config Changes","text":""},{"location":"scanning/tips_and_tricks/#speed-up-slow-modules","title":"Speed Up Slow Modules","text":"<p>BBOT modules can be parallelized so that more than one instance runs at a time. By default, many modules are already set to reasonable defaults:</p> <pre><code>class baddns(BaseModule):\n    module_threads = 8\n</code></pre> <p>To override this, you can set a module's <code>module_threads</code> in the config:</p> <pre><code># increase baddns threads to 20\nbbot -t evilcorp.com -m baddns -c modules.baddns.module_threads=20\n</code></pre>"},{"location":"scanning/tips_and_tricks/#boost-dns-brute-force-speed","title":"Boost DNS Brute-force Speed","text":"<p>If you have a fast internet connection or are running BBOT from a cloud VM, you can speed up subdomain enumeration by cranking the threads for <code>massdns</code>. The default is <code>1000</code>, which is about 1MB/s of DNS traffic:</p> <pre><code># massdns with 5000 resolvers, about 5MB/s\nbbot -t evilcorp.com -f subdomain-enum -c dns.brute_threads=5000\n</code></pre>"},{"location":"scanning/tips_and_tricks/#web-spider","title":"Web Spider","text":"<p>The web spider is great for finding juicy data like subdomains, email addresses, and javascript secrets buried in webpages. However since it can lengthen the duration of a scan, it's disabled by default. To enable the web spider, you must increase the value of <code>web.spider_distance</code>.</p> <p>The web spider is controlled with three config values:</p> <ul> <li><code>web.spider_depth</code> (default: <code>1</code>: the maximum directory depth allowed. This is to prevent the spider from delving too deep into a website.</li> <li><code>web.spider_distance</code> (<code>0</code> == all spidering disabled, default: <code>0</code>): the maximum number of links that can be followed in a row. This is designed to limit the spider in cases where <code>web.spider_depth</code> fails (e.g. for an ecommerce website with thousands of base-level URLs).</li> <li><code>web.spider_links_per_page</code> (default: <code>25</code>): the maximum number of links per page that can be followed. This is designed to save you in cases where a single page has hundreds or thousands of links.</li> </ul> <p>Here is a typical example:</p> spider.yml<pre><code>config:\n  web:\n    spider_depth: 2\n    spider_distance: 2\n    spider_links_per_page: 25\n</code></pre> <pre><code># run the web spider against www.evilcorp.com\nbbot -t www.evilcorp.com -m httpx -c spider.yml\n</code></pre> <p>You can also pair the web spider with subdomain enumeration:</p> <pre><code># spider every subdomain of evilcorp.com\nbbot -t evilcorp.com -f subdomain-enum -c spider.yml\n</code></pre>"},{"location":"scanning/tips_and_tricks/#ingesting-bbot-data-into-siem-elastic-splunk","title":"Ingesting BBOT Data Into SIEM (Elastic, Splunk)","text":"<p>If your goal is to run a BBOT scan and later feed its data into a SIEM such as Elastic, be sure to enable this option when scanning:</p> <pre><code>bbot -t evilcorp.com -c modules.json.siem_friendly=true\n</code></pre> <p>This ensures the <code>.data</code> event attribute is always the same type (a dictionary), by nesting it like so: <pre><code>{\n  \"type\": \"DNS_NAME\",\n  \"data\": {\n    \"DNS_NAME\": \"blacklanternsecurity.com\"\n  }\n}\n</code></pre></p>"},{"location":"scanning/tips_and_tricks/#custom-http-proxy","title":"Custom HTTP Proxy","text":"<p>Web pentesters may appreciate BBOT's ability to quickly populate Burp Suite site maps for all subdomains in a target. If your scan includes gowitness, this will capture the traffic as if you manually visited each website in your browser -- including auxiliary web resources and javascript API calls. To accomplish this, set the <code>web.http_proxy</code> config option like so:</p> <pre><code># enumerate subdomains, take web screenshots, proxy through Burp\nbbot -t evilcorp.com -f subdomain-enum -m gowitness -c web.http_proxy=http://127.0.0.1:8080\n</code></pre>"},{"location":"scanning/tips_and_tricks/#display-http_response-events","title":"Display <code>HTTP_RESPONSE</code> Events","text":"<p>BBOT's <code>httpx</code> module emits <code>HTTP_RESPONSE</code> events, but by default they're hidden from output. These events contain the full raw HTTP body along with headers, etc. If you want to see them, you can modify <code>omit_event_types</code> in the config:</p> ~/.bbot/config/bbot.yml<pre><code>omit_event_types:\n  - URL_UNVERIFIED\n  # - HTTP_RESPONSE\n</code></pre>"},{"location":"scanning/tips_and_tricks/#display-out-of-scope-events","title":"Display Out-of-scope Events","text":"<p>By default, BBOT only shows in-scope events (with a few exceptions for things like storage buckets). If you want to see events that BBOT is emitting internally (such as for DNS resolution, etc.), you can increase <code>scope.report_distance</code> in the config or on the command line like so: <pre><code># display events up to scope distance 2 (default == 0)\nbbot -f subdomain-enum -t evilcorp.com -c scope.report_distance=2\n</code></pre></p>"},{"location":"scanning/tips_and_tricks/#speed-up-scans-by-disabling-dns-resolution","title":"Speed Up Scans By Disabling DNS Resolution","text":"<p>If you already have a list of discovered targets (e.g. URLs), you can speed up the scan by skipping BBOT's DNS resolution. You can do this by setting <code>dns.disable</code> to <code>true</code>:</p> <pre><code># completely disable DNS resolution\nbbot -m httpx gowitness wappalyzer -t urls.txt -c dns.disable=true\n</code></pre> <p>Note that the above setting completely disables DNS resolution, meaning even <code>A</code> and <code>AAAA</code> records are not resolved. This can cause problems if you're using an IP whitelist or blacklist. In this case, you'll want to use <code>dns.minimal</code> instead:</p> <pre><code># only resolve A and AAAA records\nbbot -m httpx gowitness wappalyzer -t urls.txt -c dns.minimal=true\n</code></pre>"},{"location":"scanning/tips_and_tricks/#faq","title":"FAQ","text":""},{"location":"scanning/tips_and_tricks/#what-is-url_unverified","title":"What is <code>URL_UNVERIFIED</code>?","text":"<p><code>URL_UNVERIFIED</code> events are URLs that haven't yet been visited by <code>httpx</code>. Once <code>httpx</code> visits them, it reraises them as <code>URL</code>s, tagged with their resulting status code.</p> <p>For example, when <code>excavate</code> gets an <code>HTTP_RESPONSE</code> event, it extracts links from the raw HTTP response as <code>URL_UNVERIFIED</code>s and then passes them back to <code>httpx</code> to be visited.</p> <p>By default, <code>URL_UNVERIFIED</code>s are hidden from output. If you want to see all of them including the out-of-scope ones, you can do it by changing <code>omit_event_types</code> and <code>scope.report_distance</code> in the config like so:</p> <pre><code># visit www.evilcorp.com and extract all the links\nbbot -t www.evilcorp.com -m httpx -c omit_event_types=[] scope.report_distance=2\n</code></pre>"}]}
\ No newline at end of file
+{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Getting Started","text":"<p>A BBOT scan in real-time - visualization with VivaGraphJS</p>"},{"location":"#installation","title":"Installation","text":"<p>Supported Platforms</p> <p>Only Linux is supported at this time. Windows and macOS are not supported. If you use one of these platforms, consider using Docker.</p> <p>BBOT offers multiple methods of installation, including pipx and Docker. If you plan to dev on BBOT, see Installation (Poetry).</p>"},{"location":"#python-pip-pipx","title":"Python (pip / pipx)","text":"Note <p><code>pipx</code> installs BBOT inside its own virtual environment.</p> <pre><code># stable version\npipx install bbot\n\n# bleeding edge (dev branch)\npipx install --pip-args '\\--pre' bbot\n\n# execute bbot command\nbbot --help\n</code></pre>"},{"location":"#docker","title":"Docker","text":"<p>Docker images are provided, along with helper script <code>bbot-docker.sh</code> to persist your scan data.</p> <pre><code># bleeding edge (dev)\ndocker run -it blacklanternsecurity/bbot --help\n\n# stable\ndocker run -it blacklanternsecurity/bbot:stable --help\n\n# helper script\ngit clone https://github.com/blacklanternsecurity/bbot &amp;&amp; cd bbot\n./bbot-docker.sh --help\n</code></pre>"},{"location":"#example-commands","title":"Example Commands","text":"<p>Below are some examples of common scans.</p> <p>Subdomains:</p> <pre><code># Perform a full subdomain enumeration on evilcorp.com\nbbot -t evilcorp.com -p subdomain-enum\n</code></pre> <p>Subdomains (passive only):</p> <pre><code># Perform a passive-only subdomain enumeration on evilcorp.com\nbbot -t evilcorp.com -p subdomain-enum -rf passive\n</code></pre> <p>Subdomains + port scan + web screenshots:</p> <pre><code># Port-scan every subdomain, screenshot every webpage, output to current directory\nbbot -t evilcorp.com -p subdomain-enum -m portscan gowitness -n my_scan -o .\n</code></pre> <p>Subdomains + basic web scan:</p> <pre><code># A basic web scan includes wappalyzer, robots.txt, and other non-intrusive web modules\nbbot -t evilcorp.com -p subdomain-enum web-basic\n</code></pre> <p>Web spider:</p> <pre><code># Crawl www.evilcorp.com up to a max depth of 2, automatically extracting emails, secrets, etc.\nbbot -t www.evilcorp.com -p spider -c web.spider_distance=2 web.spider_depth=2\n</code></pre> <p>Everything everywhere all at once:</p> <pre><code># Subdomains, emails, cloud buckets, port scan, basic web, web screenshots, nuclei\nbbot -t evilcorp.com -p kitchen-sink\n</code></pre>"},{"location":"#api-keys","title":"API Keys","text":"<p>BBOT works just fine without API keys. However, there are certain modules that need them to function. If you have API keys and want to make use of these modules, you can place them either in your preset:</p> my_preset.yml<pre><code>description: My custom subdomain enum preset\n\ninclude:\n  - subdomain-enum\n  - cloud-enum\n\nconfig:\n  modules:\n    shodan_dns:\n      api_key: deadbeef\n    virustotal:\n      api_key: cafebabe\n</code></pre> <p>...in BBOT's global YAML config (<code>~/.config/bbot/bbot.yml</code>):</p> <p>Note: this will ensure the API keys are used in all scans, regardless of preset.</p> ~/.config/bbot/bbot.yml<pre><code>modules:\n  shodan_dns:\n    api_key: deadbeef\n  virustotal:\n    api_key: cafebabe\n</code></pre> <p>...or directly on the command-line:</p> <pre><code># specify API key with -c\nbbot -t evilcorp.com -f subdomain-enum -c modules.shodan_dns.api_key=deadbeef modules.virustotal.api_key=cafebabe\n</code></pre> <p>For more information, see Configuration. For a full list of modules, including which ones require API keys, see List of Modules.</p> <p>Next Up: Scanning --&gt;</p>"},{"location":"comparison/","title":"Comparison to Other Tools","text":"<p>BBOT does a lot more than just subdomain enumeration. However, subdomain enumeration is arguably the most important part of OSINT, and since there's so many subdomain enumeration tools out there, they're the easiest class of tool to compare it to.</p> <p>Thanks to BBOT's recursive nature (and its <code>dnsbrute_mutations</code> module with its NLP-powered subdomain mutations), it typically finds about 20-25% more than other tools such as <code>Amass</code> or <code>theHarvester</code>. This holds true especially for larger targets like <code>delta.com</code> (1000+ subdomains):</p>"},{"location":"comparison/#subdomains-found","title":"Subdomains Found","text":""},{"location":"comparison/#runtimes-lower-is-better","title":"Runtimes (Lower is Better)","text":"<p>For a detailed analysis of this data, please see Subdomain Enumeration Tool Face-Off</p>"},{"location":"comparison/#ebaycom-larger-domain","title":"Ebay.com (larger domain)","text":"<p>Note that in this benchmark, Spiderfoot crashed after ~20 minutes due to excessive memory usage. Amass never finished and had to be cancelled after 24h. All other tools finished successfully.</p>"},{"location":"contribution/","title":"Contribution","text":"<p>We welcome contributions! If you have an idea for a new module, or are a Python developer who wants to get involved, please fork us or come talk to us on Discord.</p> <p>To get started devving, see the following links:</p> <ul> <li>Setting up a Dev Environment</li> <li>How to Write a BBOT Module</li> <li>Discord Bot Example</li> </ul>"},{"location":"how_it_works/","title":"How it Works","text":""},{"location":"how_it_works/#bbots-recursive-philosophy","title":"BBOT's Recursive Philosophy","text":"<p>It's well-known that when you're doing recon, it's best to do it recursively. However, there are very few recursive tools, and the main reason for this is because making a recursive tool is hard. In particular, it's very difficult to build a large-scale recursive system that interacts with the internet, and to keep it stable. When we first set out to make BBOT, we didn't know this, and it was definitely a lesson we learned the hard way. BBOT's stability is thanks to its extensive Unit Tests.</p> <p>BBOT inherits its recursive philosophy from Spiderfoot, which means it is also event-driven. Each of BBOT's 100+ modules consume a certain type of Event, use it to discover something new, and produce new events, which get distributed to all the other modules. This happens again and again -- thousands of times during a scan -- spidering outwards in a recursive web of discovery.</p> <p>Below is an interactive graph showing the relationships between modules and the event types they produce and consume.</p>"},{"location":"how_it_works/#how-bbot-modules-work-together","title":"How BBOT Modules Work Together","text":"<p>Each BBOT module does one specific task, such as querying an API for subdomains, or running a tool like <code>nuclei</code>, and is carefully designed to work together with other modules inside BBOT's recursive system.</p> <p>For example, the <code>portscan</code> module consumes <code>DNS_NAME</code>, and produces <code>OPEN_TCP_PORT</code>. The <code>sslcert</code> module consumes <code>OPEN_TCP_PORT</code> and produces <code>DNS_NAME</code>. You can see how even these two modules, when enabled together, will feed each other recursively.</p> <p></p> <p>Because of this, enabling even one module has the potential to increase your results exponentially. This is exactly how BBOT is able to outperform other tools.</p> <p>To learn more about how events flow inside BBOT, see BBOT Internal Architecture.</p>"},{"location":"release_history/","title":"Release History","text":""},{"location":"release_history/#212-nov-1-2024","title":"2.1.2 - Nov 1, 2024","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/1909</li> </ul>"},{"location":"release_history/#211-oct-31-2024","title":"2.1.1 - Oct 31, 2024","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/1885</li> </ul>"},{"location":"release_history/#210-oct-18-2024","title":"2.1.0 - Oct 18, 2024","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/1724</li> </ul>"},{"location":"release_history/#201-aug-29-2024","title":"2.0.1 - Aug 29, 2024","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/1650</li> </ul>"},{"location":"release_history/#200-aug-9-2024","title":"2.0.0 - Aug 9, 2024","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/1424</li> </ul>"},{"location":"release_history/#118-may-29-2024","title":"1.1.8 - May 29, 2024","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/1382</li> </ul>"},{"location":"release_history/#117-may-15-2024","title":"1.1.7 - May 15, 2024","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/1119</li> </ul>"},{"location":"release_history/#116-feb-21-2024","title":"1.1.6 - Feb 21, 2024","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/1002</li> </ul>"},{"location":"release_history/#115-jan-15-2024","title":"1.1.5 - Jan 15, 2024","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/996</li> </ul>"},{"location":"release_history/#114-jan-11-2024","title":"1.1.4 - Jan 11, 2024","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/837</li> </ul>"},{"location":"release_history/#113-nov-4-2023","title":"1.1.3 - Nov 4, 2023","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/823</li> </ul>"},{"location":"release_history/#112-nov-3-2023","title":"1.1.2 - Nov 3, 2023","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/777</li> </ul>"},{"location":"release_history/#111-oct-11-2023","title":"1.1.1 - Oct 11, 2023","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/668</li> </ul>"},{"location":"release_history/#110-aug-4-2023","title":"1.1.0 - Aug 4, 2023","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/598</li> </ul>"},{"location":"release_history/#105-mar-10-2023","title":"1.0.5 - Mar 10, 2023","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/352</li> </ul>"},{"location":"release_history/#105-mar-10-2023_1","title":"1.0.5 - Mar 10, 2023","text":"<ul> <li>https://github.com/blacklanternsecurity/bbot/pull/352</li> </ul>"},{"location":"troubleshooting/","title":"Troubleshooting","text":""},{"location":"troubleshooting/#installation-troubleshooting","title":"Installation troubleshooting","text":"<ul> <li><code>Fatal error from pip prevented installation.</code></li> <li><code>ERROR: No matching distribution found for bbot</code></li> <li><code>bash: /home/user/.local/bin/bbot: /home/user/.local/pipx/venvs/bbot/bin/python: bad interpreter</code></li> </ul> <p>If you get errors resembling any of the above, it's probably because your Python version is too old. To install a newer version (3.9+ is required), you will need to do something like this: <pre><code># install a newer version of python\nsudo apt install python3.9 python3.9-venv\n# install pipx\npython3.9 -m pip install --user pipx\n# add pipx to your path\npython3.9 -m pipx ensurepath\n# reboot\nreboot\n# install bbot\npython3.9 -m pipx install bbot\n# run bbot\nbbot --help\n</code></pre></p>"},{"location":"troubleshooting/#modulenotfounderror","title":"<code>ModuleNotFoundError</code>","text":"<p>If you run into a <code>ModuleNotFoundError</code>, try running your <code>bbot</code> command again with <code>--force-deps</code>. This will repair your modules' Python dependencies.</p>"},{"location":"troubleshooting/#regenerate-config","title":"Regenerate Config","text":"<p>As a troubleshooting step it is sometimes useful to clear out your older configs and let BBOT generate new ones. This will ensure that new defaults are property restored, etc. <pre><code># make a backup of the old configs\nmv ~/.config/bbot ~/.config/bbot.bak\n\n# generate new configs\nbbot\n</code></pre></p>"},{"location":"dev/","title":"BBOT Developer Reference","text":"<p>BBOT exposes a Python API that allows you to create, start, and stop scans.</p> <p>Documented in this section are commonly-used classes and functions within BBOT, along with usage examples.</p>"},{"location":"dev/#adding-bbot-to-your-python-project","title":"Adding BBOT to Your Python Project","text":"<p>If you are using Poetry, you can add BBOT to your python environment like this:</p> <pre><code># stable\npoetry add bbot\n\n# bleeding-edge (dev branch)\npoetry add bbot --allow-prereleases\n</code></pre>"},{"location":"dev/#running-a-bbot-scan-from-python","title":"Running a BBOT Scan from Python","text":""},{"location":"dev/#synchronous","title":"Synchronous","text":"<pre><code>from bbot.scanner import Scanner\n\nif __name__ == \"__main__\":\n    scan = Scanner(\"evilcorp.com\", presets=[\"subdomain-enum\"])\n    for event in scan.start():\n        print(event)\n</code></pre>"},{"location":"dev/#asynchronous","title":"Asynchronous","text":"<pre><code>from bbot.scanner import Scanner\n\nasync def main():\n    scan = Scanner(\"evilcorp.com\", presets=[\"subdomain-enum\"])\n    async for event in scan.async_start():\n        print(event.json())\n\nif __name__ == \"__main__\":\n    import asyncio\n    asyncio.run(main())\n</code></pre> <p>For a full listing of <code>Scanner</code> attributes and functions, see the <code>Scanner</code> Code Reference.</p>"},{"location":"dev/#multiple-targets","title":"Multiple Targets","text":"<p>You can specify any number of targets:</p> <pre><code># create a scan against multiple targets\nscan = Scanner(\n    \"evilcorp.com\",\n    \"evilcorp.org\",\n    \"evilcorp.ce\",\n    \"4.3.2.1\",\n    \"1.2.3.4/24\",\n    presets=[\"subdomain-enum\"]\n)\n\n# this is the same as:\ntargets = [\"evilcorp.com\", \"evilcorp.org\", \"evilcorp.ce\", \"4.3.2.1\", \"1.2.3.4/24\"]\nscan = Scanner(*targets, presets=[\"subdomain-enum\"])\n</code></pre> <p>For more details, including which types of targets are valid, see Targets</p>"},{"location":"dev/#other-custom-options","title":"Other Custom Options","text":"<p>In many cases, using a Preset like <code>subdomain-enum</code> is sufficient. However, the <code>Scanner</code> is flexible and accepts many other arguments that can override the default functionality. You can specify <code>flags</code>, <code>modules</code>, <code>output_modules</code>, a <code>whitelist</code> or <code>blacklist</code>, and custom <code>config</code> options:</p> <pre><code># create a scan against multiple targets\nscan = Scanner(\n    # targets\n    \"evilcorp.com\",\n    \"4.3.2.1\",\n    # enable these presets\n    presets=[\"subdomain-enum\"],\n    # whitelist these hosts\n    whitelist=[\"evilcorp.com\", \"evilcorp.org\"],\n    # blacklist these hosts\n    blacklist=[\"prod.evilcorp.com\"],\n    # also enable these individual modules\n    modules=[\"nuclei\", \"ipstack\"],\n    # exclude modules with these flags\n    exclude_flags=[\"slow\"],\n    # custom config options\n    config={\n        \"modules\": {\n            \"nuclei\": {\n                \"tags\": \"apache,nginx\"\n            }\n        }\n    }\n)\n</code></pre> <p>For a list of all the possible scan options, see the <code>Presets</code> Code Reference</p>"},{"location":"dev/architecture/","title":"BBOT Internal Architecture","text":"<p>Here is a basic overview of BBOT's internal architecture.</p>"},{"location":"dev/architecture/#queues","title":"Queues","text":"<p>Being both recursive and event-driven, BBOT makes heavy use of queues. These enable smooth communication between the modules, and ensure that large numbers of events can be produced without slowing down or clogging up the scan.</p> <p>Every module in BBOT has both an incoming and outgoing queue. Event types matching the module's <code>WATCHED_EVENTS</code> (e.g. <code>DNS_NAME</code>) are queued in its incoming queue, and processed by the module's <code>handle_event()</code> (or <code>handle_batch()</code> in the case of batched modules). If the module finds anything interesting, it creates an event and places it in its outgoing queue, to be processed by the scan and redistributed to other modules.</p>"},{"location":"dev/architecture/#event-flow","title":"Event Flow","text":"<p>Below is a graph showing the internal event flow in BBOT. White lines represent queues. Notice how some modules run in sequence, while others run in parallel. With the exception of a few specific modules, most BBOT modules are parallelized.</p> <p></p> <p>For a higher-level overview, see How it Works.</p>"},{"location":"dev/basemodule/","title":"BaseModule","text":""},{"location":"dev/basemodule/#bbot.modules.base.BaseModule","title":"BaseModule","text":"<p>The base class for all BBOT modules.</p> <p>Attributes:</p> <ul> <li> <code>watched_events</code>               (<code>List</code>)           \u2013            <p>Event types to watch.</p> </li> <li> <code>produced_events</code>               (<code>List</code>)           \u2013            <p>Event types to produce.</p> </li> <li> <code>meta</code>               (<code>Dict</code>)           \u2013            <p>Metadata about the module, such as whether authentication is required and a description.</p> </li> <li> <code>flags</code>               (<code>List</code>)           \u2013            <p>Flags indicating the type of module (must have at least \"safe\" or \"aggressive\" and \"passive\" or \"active\").</p> </li> <li> <code>deps_modules</code>               (<code>List</code>)           \u2013            <p>Other BBOT modules this module depends on. Empty list by default.</p> </li> <li> <code>deps_pip</code>               (<code>List</code>)           \u2013            <p>Python dependencies to install via pip. Empty list by default.</p> </li> <li> <code>deps_apt</code>               (<code>List</code>)           \u2013            <p>APT package dependencies to install. Empty list by default.</p> </li> <li> <code>deps_shell</code>               (<code>List</code>)           \u2013            <p>Other dependencies installed via shell commands. Uses ansible.builtin.shell. Empty list by default.</p> </li> <li> <code>deps_ansible</code>               (<code>List</code>)           \u2013            <p>Additional Ansible tasks for complex dependencies. Empty list by default.</p> </li> <li> <code>accept_dupes</code>               (<code>bool</code>)           \u2013            <p>Whether to accept incoming duplicate events. Default is False.</p> </li> <li> <code>suppress_dupes</code>               (<code>bool</code>)           \u2013            <p>Whether to suppress outgoing duplicate events. Default is True.</p> </li> <li> <code>per_host_only</code>               (<code>bool</code>)           \u2013            <p>Limit the module to only scanning once per host. Default is False.</p> </li> <li> <code>per_hostport_only</code>               (<code>bool</code>)           \u2013            <p>Limit the module to only scanning once per host:port. Default is False.</p> </li> <li> <code>per_domain_only</code>               (<code>bool</code>)           \u2013            <p>Limit the module to only scanning once per domain. Default is False.</p> </li> <li> <code>scope_distance_modifier</code>               (<code>(int, None)</code>)           \u2013            <p>Modifies scope distance acceptance for events. Default is 0. <pre><code>None == accept all events\n2 == accept events up to and including the scan's configured search distance plus two\n1 == accept events up to and including the scan's configured search distance plus one\n0 == (DEFAULT) accept events up to and including the scan's configured search distance\n</code></pre></p> </li> <li> <code>target_only</code>               (<code>bool</code>)           \u2013            <p>Accept only the initial target event(s). Default is False.</p> </li> <li> <code>in_scope_only</code>               (<code>bool</code>)           \u2013            <p>Accept only explicitly in-scope events. Default is False.</p> </li> <li> <code>options</code>               (<code>Dict</code>)           \u2013            <p>Customizable options for the module, e.g., {\"api_key\": \"\"}. Empty dict by default.</p> </li> <li> <code>options_desc</code>               (<code>Dict</code>)           \u2013            <p>Descriptions for options, e.g., {\"api_key\": \"API Key\"}. Empty dict by default.</p> </li> <li> <code>module_threads</code>               (<code>int</code>)           \u2013            <p>Maximum concurrent instances of handle_event() or handle_batch(). Default is 1.</p> </li> <li> <code>batch_size</code>               (<code>int</code>)           \u2013            <p>Size of batches processed by handle_batch(). Default is 1.</p> </li> <li> <code>batch_wait</code>               (<code>int</code>)           \u2013            <p>Seconds to wait before force-submitting a batch. Default is 10.</p> </li> <li> <code>api_failure_abort_threshold</code>               (<code>int</code>)           \u2013            <p>Threshold for setting error state after failed HTTP requests (only takes effect when <code>api_request()</code> is used. Default is 5.</p> </li> <li> <code>_preserve_graph</code>               (<code>bool</code>)           \u2013            <p>When set to True, accept events that may be duplicates but are necessary for construction of complete graph. Typically only enabled for output modules that need to maintain full chains of events, e.g. <code>neo4j</code> and <code>json</code>. Default is False.</p> </li> <li> <code>_stats_exclude</code>               (<code>bool</code>)           \u2013            <p>Whether to exclude this module from scan statistics. Default is False.</p> </li> <li> <code>_qsize</code>               (<code>int</code>)           \u2013            <p>Outgoing queue size (0 for infinite). Default is 0.</p> </li> <li> <code>_priority</code>               (<code>int</code>)           \u2013            <p>Priority level of events raised by this module, 1-5. Default is 3.</p> </li> <li> <code>_name</code>               (<code>str</code>)           \u2013            <p>Module name, overridden automatically. Default is 'base'.</p> </li> <li> <code>_type</code>               (<code>str</code>)           \u2013            <p>Module type, for differentiating between normal and output modules. Default is 'scan'.</p> </li> </ul> Source code in <code>bbot/modules/base.py</code> <pre><code>class BaseModule:\n    \"\"\"The base class for all BBOT modules.\n\n    Attributes:\n        watched_events (List): Event types to watch.\n\n        produced_events (List): Event types to produce.\n\n        meta (Dict): Metadata about the module, such as whether authentication is required and a description.\n\n        flags (List): Flags indicating the type of module (must have at least \"safe\" or \"aggressive\" and \"passive\" or \"active\").\n\n        deps_modules (List): Other BBOT modules this module depends on. Empty list by default.\n\n        deps_pip (List): Python dependencies to install via pip. Empty list by default.\n\n        deps_apt (List): APT package dependencies to install. Empty list by default.\n\n        deps_shell (List): Other dependencies installed via shell commands. Uses [ansible.builtin.shell](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/shell_module.html). Empty list by default.\n\n        deps_ansible (List): Additional Ansible tasks for complex dependencies. Empty list by default.\n\n        accept_dupes (bool): Whether to accept incoming duplicate events. Default is False.\n\n        suppress_dupes (bool): Whether to suppress outgoing duplicate events. Default is True.\n\n        per_host_only (bool): Limit the module to only scanning once per host. Default is False.\n\n        per_hostport_only (bool): Limit the module to only scanning once per host:port. Default is False.\n\n        per_domain_only (bool): Limit the module to only scanning once per domain. Default is False.\n\n        scope_distance_modifier (int, None): Modifies scope distance acceptance for events. Default is 0.\n            ```\n            None == accept all events\n            2 == accept events up to and including the scan's configured search distance plus two\n            1 == accept events up to and including the scan's configured search distance plus one\n            0 == (DEFAULT) accept events up to and including the scan's configured search distance\n            ```\n\n        target_only (bool): Accept only the initial target event(s). Default is False.\n\n        in_scope_only (bool): Accept only explicitly in-scope events. Default is False.\n\n        options (Dict): Customizable options for the module, e.g., {\"api_key\": \"\"}. Empty dict by default.\n\n        options_desc (Dict): Descriptions for options, e.g., {\"api_key\": \"API Key\"}. Empty dict by default.\n\n        module_threads (int): Maximum concurrent instances of handle_event() or handle_batch(). Default is 1.\n\n        batch_size (int): Size of batches processed by handle_batch(). Default is 1.\n\n        batch_wait (int): Seconds to wait before force-submitting a batch. Default is 10.\n\n        api_failure_abort_threshold (int): Threshold for setting error state after failed HTTP requests (only takes effect when `api_request()` is used. Default is 5.\n\n        _preserve_graph (bool): When set to True, accept events that may be duplicates but are necessary for construction of complete graph. Typically only enabled for output modules that need to maintain full chains of events, e.g. `neo4j` and `json`. Default is False.\n\n        _stats_exclude (bool): Whether to exclude this module from scan statistics. Default is False.\n\n        _qsize (int): Outgoing queue size (0 for infinite). Default is 0.\n\n        _priority (int): Priority level of events raised by this module, 1-5. Default is 3.\n\n        _name (str): Module name, overridden automatically. Default is 'base'.\n\n        _type (str): Module type, for differentiating between normal and output modules. Default is 'scan'.\n    \"\"\"\n\n    watched_events = []\n    produced_events = []\n    meta = {\"auth_required\": False, \"description\": \"Base module\"}\n    flags = []\n    options = {}\n    options_desc = {}\n\n    deps_modules = []\n    deps_pip = []\n    deps_apt = []\n    deps_shell = []\n    deps_ansible = []\n\n    accept_dupes = False\n    suppress_dupes = True\n    per_host_only = False\n    per_hostport_only = False\n    per_domain_only = False\n    scope_distance_modifier = 0\n    target_only = False\n    in_scope_only = False\n\n    _module_threads = 1\n    _batch_size = 1\n    batch_wait = 10\n\n    # API retries, etc.\n    _api_retries = 2\n    # disable the module after this many failed attempts in a row\n    _api_failure_abort_threshold = 3\n    # sleep for this many seconds after being rate limited\n    _429_sleep_interval = 30\n\n    default_discovery_context = \"{module} discovered {event.type}: {event.data}\"\n\n    _preserve_graph = False\n    _stats_exclude = False\n    _qsize = 1000\n    _priority = 3\n    _name = \"base\"\n    _type = \"scan\"\n    _intercept = False\n    _shuffle_incoming_queue = True\n\n    def __init__(self, scan):\n        \"\"\"Initializes a module instance.\n\n        Args:\n            scan: The BBOT scan object associated with this module instance.\n\n        Attributes:\n            scan: The scan object associated with this module.\n\n            errored (bool): Whether the module has errored out. Default is False.\n        \"\"\"\n        self.scan = scan\n        self.errored = False\n        self._log = None\n        self._incoming_event_queue = None\n        self._outgoing_event_queue = None\n        # track incoming events to prevent unwanted duplicates\n        self._incoming_dup_tracker = set()\n        # tracks which subprocesses are running under this module\n        self._proc_tracker = set()\n        # seconds since we've submitted a batch\n        self._last_submitted_batch = None\n        # additional callbacks to be executed alongside self.cleanup()\n        self.cleanup_callbacks = []\n        self._cleanedup = False\n        self._watched_events = None\n\n        self._task_counter = TaskCounter()\n\n        # string constant\n        self._custom_filter_criteria_msg = \"it did not meet custom filter criteria\"\n\n        self._api_keys = []\n\n        # track number of failures (for .api_request())\n        self._api_request_failures = 0\n\n        self._tasks = []\n        self._event_received = asyncio.Condition()\n        self._event_queued = asyncio.Condition()\n\n        # used for optional \"per host\" tracking\n        self._per_host_tracker = set()\n\n    async def setup(self):\n        \"\"\"\n        Performs one-time setup tasks for the module.\n\n        This method is responsible for preparing the module for its operation, which may include tasks\n        such as downloading necessary resources, validating configuration parameters, or other preliminary\n        checks.\n\n        Returns:\n            tuple:\n                - bool or None: A status indicating the outcome of the setup process. Returns `True` if\n                the setup was successful, `None` for a soft-fail where the module setup did not succeed\n                but the scan will continue with the module disabled, and `False` for a hard-fail where\n                the setup failure causes the scan to abort.\n                - str, optional: A reason for the setup failure, provided only when the setup does not\n                succeed (i.e., returns `None` or `False`).\n\n        Examples:\n            &gt;&gt;&gt; async def setup(self):\n            &gt;&gt;&gt;     if not self.config.get(\"api_key\"):\n            &gt;&gt;&gt;         # Soft-fail: Configuration missing an API key\n            &gt;&gt;&gt;         return None, \"No API key specified\"\n\n            &gt;&gt;&gt; async def setup(self):\n            &gt;&gt;&gt;     try:\n            &gt;&gt;&gt;         wordlist = await self.helpers.wordlist(\"https://raw.githubusercontent.com/user/wordlist.txt\")\n            &gt;&gt;&gt;     except WordlistError as e:\n            &gt;&gt;&gt;         # Hard-fail: Error retrieving wordlist\n            &gt;&gt;&gt;         return False, f\"Error retrieving wordlist: {e}\"\n\n            &gt;&gt;&gt; async def setup(self):\n            &gt;&gt;&gt;     self.timeout = self.config.get(\"timeout\", 5)\n            &gt;&gt;&gt;     # Success: Setup completed without issues\n            &gt;&gt;&gt;     return True\n        \"\"\"\n\n        return True\n\n    async def handle_event(self, event):\n        \"\"\"Asynchronously handles incoming events that the module is configured to watch.\n\n        This method is automatically invoked when an event that matches any in `watched_events` is encountered during a scan. Override this method to implement custom event-handling logic for your module.\n\n        Args:\n            event (Event): The event object containing details about the incoming event.\n\n        Note:\n            This method should be overridden if the `batch_size` attribute of the module is set to 1.\n\n        Returns:\n            None\n        \"\"\"\n        pass\n\n    async def handle_batch(self, *events):\n        \"\"\"Handles incoming events in batches for optimized processing.\n\n        This method is automatically called when multiple events that match any in `watched_events` are encountered and the `batch_size` attribute is set to a value greater than 1. Override this method to implement custom batch event-handling logic for your module.\n\n        Args:\n            *events (Event): A variable number of Event objects to be processed in a batch.\n\n        Note:\n            This method should be overridden if the `batch_size` attribute of the module is set to a value greater than 1.\n\n        Returns:\n            None\n        \"\"\"\n        pass\n\n    async def filter_event(self, event):\n        \"\"\"Asynchronously filters incoming events based on custom criteria.\n\n        Override this method for more granular control over which events are accepted by your module. This method is called automatically before `handle_event()` for each incoming event that matches any in `watched_events`.\n\n        Args:\n            event (Event): The incoming Event object to be filtered.\n\n        Returns:\n            tuple: A 2-tuple where the first value is a bool indicating whether the event should be accepted, and the second value is a string explaining the reason for its acceptance or rejection. By default, returns `(True, None)` to indicate acceptance without reason.\n\n        Note:\n            This method should be overridden if the module requires custom logic for event filtering.\n        \"\"\"\n        return True\n\n    async def finish(self):\n        \"\"\"Asynchronously performs final tasks as the scan nears completion.\n\n        This method can be overridden to execute any necessary finalization logic. For example, if the module relies on a word cloud, you might wait for the scan to finish to ensure the word cloud is most complete before running an operation.\n\n        Returns:\n            None\n\n        Warnings:\n            This method may be called multiple times since it can raise events, which may re-trigger the \"finish\" phase of the scan. Optional to override.\n        \"\"\"\n        return\n\n    async def report(self):\n        \"\"\"Asynchronously executes a final task after the scan is complete but before cleanup.\n\n        This method can be overridden to aggregate data and raise summary events at the end of the scan.\n\n        Returns:\n            None\n\n        Note:\n            This method is called only once per scan.\n        \"\"\"\n        return\n\n    async def cleanup(self):\n        \"\"\"Asynchronously performs final cleanup operations after the scan is complete.\n\n        This method can be overridden to implement custom cleanup logic. It is called only once per scan and may not raise events.\n\n        Returns:\n            None\n\n        Note:\n            This method is called only once per scan and may not raise events.\n        \"\"\"\n        return\n\n    async def require_api_key(self):\n        \"\"\"\n        Asynchronously checks if an API key is required and valid.\n\n        Args:\n            None\n\n        Returns:\n            bool or tuple: Returns True if API key is valid and ready.\n                          Returns a tuple (None, \"error message\") otherwise.\n\n        Notes:\n            - Fetches the API key from the configuration.\n            - Calls the 'ping()' method to test API accessibility.\n            - Sets the API key readiness status accordingly.\n        \"\"\"\n        self.api_key = self.config.get(\"api_key\", \"\")\n        if self.auth_secret:\n            try:\n                await self.ping()\n                self.hugesuccess(f\"API is ready\")\n                return True, \"\"\n            except Exception as e:\n                self.trace(traceback.format_exc())\n                return None, f\"Error with API ({str(e).strip()})\"\n        else:\n            return None, \"No API key set\"\n\n    @property\n    def api_key(self):\n        if self._api_keys:\n            return self._api_keys[0]\n\n    @api_key.setter\n    def api_key(self, api_keys):\n        if isinstance(api_keys, str):\n            api_keys = [api_keys]\n        self._api_keys = list(api_keys)\n\n    def cycle_api_key(self):\n        if len(self._api_keys) &gt; 1:\n            self.verbose(f\"Cycling API key\")\n            self._api_keys.insert(0, self._api_keys.pop())\n        else:\n            self.debug(f\"No extra API keys to cycle\")\n\n    @property\n    def api_retries(self):\n        return max(self._api_retries + 1, len(self._api_keys))\n\n    @property\n    def api_failure_abort_threshold(self):\n        return (self.api_retries * self._api_failure_abort_threshold) + 1\n\n    async def ping(self, url=None):\n        \"\"\"Asynchronously checks the health of the configured API.\n\n        This method is used in conjunction with require_api_key() to verify that the API is not just configured, but also responsive. It makes a test request to a known endpoint to validate the API's health.\n\n        The method uses the `ping_url` attribute if defined, or falls back to a provided URL. If neither is available, no request is made.\n\n        Args:\n            url (str, optional): A specific URL to use for the ping request. If not provided, the method will use the `ping_url` attribute.\n\n        Returns:\n            None\n\n        Raises:\n            ValueError: If the API response is not successful (status code != 200).\n\n        Example Usage:\n            To use this method, simply define the `ping_url` attribute in your module:\n\n            class MyModule(BaseModule):\n                ping_url = \"https://api.example.com/ping\"\n\n            Alternatively, you can override this method for more complex health checks:\n\n            async def ping(self):\n                r = await self.api_request(f\"{self.base_url}/complex-health-check\")\n                if r.status_code != 200 or r.json().get('status') != 'healthy':\n                    raise ValueError(f\"API unhealthy: {r.text}\")\n        \"\"\"\n        if url is None:\n            url = getattr(self, \"ping_url\", \"\")\n        if url:\n            r = await self.api_request(url)\n            if getattr(r, \"status_code\", 0) != 200:\n                response_text = getattr(r, \"text\", \"no response from server\")\n                raise ValueError(response_text)\n\n    @property\n    def batch_size(self):\n        batch_size = self.config.get(\"batch_size\", None)\n        # only allow overriding the batch size if its default value is greater than 1\n        # this prevents modules from being accidentally neutered by an incorrect batch_size setting\n        if batch_size is None or self._batch_size == 1:\n            batch_size = self._batch_size\n        return batch_size\n\n    @property\n    def module_threads(self):\n        module_threads = self.config.get(\"module_threads\", None)\n        if module_threads is None:\n            module_threads = self._module_threads\n        return module_threads\n\n    @property\n    def auth_secret(self):\n        \"\"\"Indicates if the module is properly configured for authentication.\n\n        This read-only property should be used to check whether all necessary attributes (e.g., API keys, tokens, etc.) are configured to perform authenticated requests in the module. Commonly used in setup or initialization steps.\n\n        Returns:\n            bool: True if the module is properly configured for authentication, otherwise False.\n        \"\"\"\n        return getattr(self, \"api_key\", \"\")\n\n    def get_watched_events(self):\n        \"\"\"Retrieve the set of events that the module is interested in observing.\n\n        Override this method if the set of events the module should watch needs to be determined dynamically, e.g., based on configuration options or other runtime conditions.\n\n        Returns:\n            set: The set of event types that this module will handle.\n        \"\"\"\n        if self._watched_events is None:\n            self._watched_events = set(self.watched_events)\n        return self._watched_events\n\n    async def _handle_batch(self):\n        \"\"\"\n        Asynchronously handles a batch of events in the module.\n\n        Args:\n            None\n\n        Returns:\n            bool: True if events were submitted for processing, False otherwise.\n\n        Notes:\n            - The method is wrapped in a task counter to monitor asynchronous operations.\n            - Checks if there are any events in the incoming queue and module is not in an error state.\n            - Invokes '_events_waiting()' to fetch a batch of events.\n            - Calls the module's 'handle_batch()' method to process these events.\n            - If a \"FINISHED\" event is found, invokes 'finish()' method of the module.\n        \"\"\"\n        finish = False\n        async with self._task_counter.count(f\"{self.name}.handle_batch()\") as counter:\n            submitted = False\n            if self.batch_size &lt;= 1:\n                return\n            if self.num_incoming_events &gt; 0:\n                events, finish = await self._events_waiting()\n                if events and not self.errored:\n                    counter.n = len(events)\n                    self.verbose(f\"Handling batch of {len(events):,} events\")\n                    submitted = True\n                    async with self.scan._acatch(f\"{self.name}.handle_batch()\"):\n                        await self.handle_batch(*events)\n                    self.verbose(f\"Finished handling batch of {len(events):,} events\")\n        if finish:\n            context = f\"{self.name}.finish()\"\n            async with self.scan._acatch(context), self._task_counter.count(context):\n                await self.finish()\n        return submitted\n\n    def make_event(self, *args, **kwargs):\n        \"\"\"Create an event for the scan.\n\n        Raises a validation error if the event could not be created, unless raise_error is set to False.\n\n        Args:\n            *args: Positional arguments to be passed to the scan's make_event method.\n            **kwargs: Keyword arguments to be passed to the scan's make_event method.\n            raise_error (bool, optional): Whether to raise a validation error if the event could not be created. Defaults to False.\n\n        Examples:\n            &gt;&gt;&gt; new_event = self.make_event(\"1.2.3.4\", parent=event)\n            &gt;&gt;&gt; await self.emit_event(new_event)\n\n        Returns:\n            Event or None: The created event, or None if a validation error occurred and raise_error was False.\n\n        Raises:\n            ValidationError: If the event could not be validated and raise_error is True.\n        \"\"\"\n        raise_error = kwargs.pop(\"raise_error\", False)\n        module = kwargs.pop(\"module\", None)\n        if module is None:\n            if (not args) or getattr(args[0], \"module\", None) is None:\n                kwargs[\"module\"] = self\n        try:\n            event = self.scan.make_event(*args, **kwargs)\n        except ValidationError as e:\n            if raise_error:\n                raise\n            self.warning(f\"{e}\")\n            return\n        return event\n\n    async def emit_event(self, *args, **kwargs):\n        \"\"\"Emit an event to the event queue and distribute it to interested modules.\n\n        This is how modules \"return\" data.\n\n        The method first creates an event object by calling `self.make_event()` with the provided arguments.\n        Then, the event is queued for outgoing distribution using `self.queue_outgoing_event()`.\n\n        Args:\n            *args: Positional arguments to be passed to `self.make_event()` for event creation.\n            **kwargs: Keyword arguments to be passed for event creation or configuration of the emit action.\n                ```markdown\n                - on_success_callback: Optional callback function to execute upon successful event emission.\n                - abort_if: Optional condition under which the event emission should be aborted.\n                - quick: Optional flag to indicate whether the event should be processed quickly.\n                ```\n\n        Examples:\n            &gt;&gt;&gt; await self.emit_event(\"www.evilcorp.com\", parent=event, tags=[\"affiliate\"])\n\n            &gt;&gt;&gt; new_event = self.make_event(\"1.2.3.4\", parent=event)\n            &gt;&gt;&gt; await self.emit_event(new_event)\n\n        Returns:\n            None\n\n        Raises:\n            ValidationError: If the event cannot be validated (handled in `self.make_event()`).\n        \"\"\"\n        event_kwargs = dict(kwargs)\n        emit_kwargs = {}\n        for o in (\"on_success_callback\", \"abort_if\", \"quick\"):\n            v = event_kwargs.pop(o, None)\n            if v is not None:\n                emit_kwargs[o] = v\n        event = self.make_event(*args, **event_kwargs)\n        if event:\n            await self.queue_outgoing_event(event, **emit_kwargs)\n        return event\n\n    async def _events_waiting(self, batch_size=None):\n        \"\"\"\n        Asynchronously fetches events from the incoming_event_queue, up to a specified batch size.\n\n        Args:\n            None\n\n        Returns:\n            tuple: A tuple containing two elements:\n                - events (list): A list of acceptable events from the queue.\n                - finish (bool): A flag indicating if a \"FINISHED\" event is encountered.\n\n        Notes:\n            - The method pulls events from incoming_event_queue using 'get_nowait()'.\n            - Events go through '_event_postcheck()' for validation.\n            - \"FINISHED\" events are handled differently and the finish flag is set to True.\n            - If the queue is empty or the batch size is reached, the loop breaks.\n        \"\"\"\n        if batch_size is None:\n            batch_size = self.batch_size\n        events = []\n        finish = False\n        while self.incoming_event_queue:\n            if batch_size != -1 and len(events) &gt; self.batch_size:\n                break\n            try:\n                event = self.incoming_event_queue.get_nowait()\n                self.debug(f\"Got {event} from {getattr(event, 'module', 'unknown_module')}\")\n                acceptable, reason = await self._event_postcheck(event)\n                if acceptable:\n                    if event.type == \"FINISHED\":\n                        finish = True\n                    else:\n                        events.append(event)\n                        self.scan.stats.event_consumed(event, self)\n                elif reason:\n                    self.debug(f\"Not accepting {event} because {reason}\")\n            except asyncio.queues.QueueEmpty:\n                break\n        return events, finish\n\n    @property\n    def num_incoming_events(self):\n        ret = 0\n        if self.incoming_event_queue is not False:\n            ret = self.incoming_event_queue.qsize()\n        return ret\n\n    def start(self):\n        self._tasks = [\n            asyncio.create_task(self._worker(), name=f\"{self.scan.name}.{self.name}._worker()\")\n            for _ in range(self.module_threads)\n        ]\n\n    async def _setup(self):\n        \"\"\"\n        Asynchronously sets up the module by invoking its 'setup()' method.\n\n        This method catches exceptions during setup, sets the module's error state if necessary, and determines the\n        status code based on the result of the setup process.\n\n        Args:\n            None\n\n        Returns:\n            tuple: A tuple containing the module's name, status (True for success, False for hard-fail, None for soft-fail),\n            and an optional status message.\n\n        Raises:\n            Exception: Captured exceptions from the 'setup()' method are logged, but not propagated.\n\n        Notes:\n            - The 'setup()' method can return either a simple boolean status or a tuple of status and message.\n            - A WordlistError exception triggers a soft-fail status.\n            - The debug log will contain setup status information for the module.\n        \"\"\"\n        status_codes = {False: \"hard-fail\", None: \"soft-fail\", True: \"success\"}\n\n        status = False\n        self.debug(f\"Setting up module {self.name}\")\n        try:\n            result = await self.setup()\n            if type(result) == tuple and len(result) == 2:\n                status, msg = result\n            else:\n                status = result\n                msg = status_codes[status]\n            self.debug(f\"Finished setting up module {self.name}\")\n        except Exception as e:\n            self.set_error_state(f\"Unexpected error during module setup: {e}\", critical=True)\n            msg = f\"{e}\"\n            self.trace()\n        return self, status, str(msg)\n\n    async def _worker(self):\n        \"\"\"\n        The core worker loop for the module, responsible for handling events from the incoming event queue.\n\n        This method is a coroutine and is run asynchronously. Multiple instances can run simultaneously based on\n        the 'module_threads' configuration. The worker dequeues events from 'incoming_event_queue', performs\n        necessary prechecks, and passes the event to the appropriate handler function.\n\n        Args:\n            None\n\n        Returns:\n            None\n\n        Raises:\n            asyncio.CancelledError: If the worker is cancelled during its operation.\n\n        Notes:\n            - The worker is sensitive to the 'stopping' flag of the scan. It will terminate if this flag is set.\n            - The worker handles backpressure by pausing when the outgoing event queue is full.\n            - Batch processing is supported and is activated when 'batch_size' &gt; 1.\n            - Each event is subject to a post-check via '_event_postcheck()' to decide whether it should be handled.\n            - Special 'FINISHED' events trigger the 'finish()' method of the module.\n        \"\"\"\n        async with self.scan._acatch(context=self._worker, unhandled_is_critical=True):\n            try:\n                while not self.scan.stopping and not self.errored:\n                    # hold the reigns if our outgoing queue is full\n                    if self._qsize &gt; 0 and self.outgoing_event_queue.qsize() &gt;= self._qsize:\n                        await asyncio.sleep(0.1)\n                        continue\n\n                    if self.batch_size &gt; 1:\n                        submitted = await self._handle_batch()\n                        if not submitted:\n                            async with self._event_received:\n                                await self._event_received.wait()\n\n                    else:\n                        try:\n                            if self.incoming_event_queue is not False:\n                                event = await self.incoming_event_queue.get()\n                            else:\n                                self.debug(f\"Event queue is in bad state\")\n                                break\n                        except asyncio.queues.QueueEmpty:\n                            continue\n                        self.debug(f\"Got {event} from {getattr(event, 'module', 'unknown_module')}\")\n                        async with self._task_counter.count(f\"event_postcheck({event})\"):\n                            acceptable, reason = await self._event_postcheck(event)\n                        if acceptable:\n                            if event.type == \"FINISHED\":\n                                context = f\"{self.name}.finish()\"\n                                async with self.scan._acatch(context), self._task_counter.count(context):\n                                    await self.finish()\n                            else:\n                                context = f\"{self.name}.handle_event({event})\"\n                                self.scan.stats.event_consumed(event, self)\n                                self.debug(f\"Handling {event}\")\n                                async with self.scan._acatch(context), self._task_counter.count(context):\n                                    await self.handle_event(event)\n                                self.debug(f\"Finished handling {event}\")\n                        else:\n                            self.debug(f\"Not accepting {event} because {reason}\")\n            except asyncio.CancelledError:\n                # this trace was used for debugging leaked CancelledErrors from inside httpx\n                # self.log.trace(\"Worker cancelled\")\n                raise\n            except BaseException as e:\n                if self.helpers.in_exception_chain(e, (KeyboardInterrupt,)):\n                    self.scan.stop()\n                else:\n                    self.error(f\"Critical failure in module {self.name}: {e}\")\n                    self.error(traceback.format_exc())\n        self.log.trace(f\"Worker stopped\")\n\n    @property\n    def max_scope_distance(self):\n        if self.in_scope_only or self.target_only:\n            return 0\n        if self.scope_distance_modifier is None:\n            return 999\n        return max(0, self.scan.scope_search_distance + self.scope_distance_modifier)\n\n    def _event_precheck(self, event):\n        \"\"\"\n        Pre-checks an event to determine if it should be accepted by the module for queuing.\n\n        This method is called when an event is about to be enqueued into the module's incoming event queue.\n        It applies various filters such as special signal event types, module error state, watched event types, and more\n        to decide whether or not the event should be enqueued.\n\n        Args:\n            event (Event): The event object to check.\n\n        Returns:\n            tuple: A tuple (bool, str) where the bool indicates if the event should be accepted, and the str gives the reason.\n\n        Examples:\n            &gt;&gt;&gt; result, reason = self._event_precheck(event)\n            &gt;&gt;&gt; if result:\n            ...     self.incoming_event_queue.put_nowait(event)\n            ... else:\n            ...     self.debug(f\"Not accepting {event} because {reason}\")\n\n        Notes:\n            - The method considers special signal event types like \"FINISHED\".\n            - Checks whether the module is in an error state.\n            - Checks if the event type matches the types this module is interested in (`watched_events`).\n            - Checks for events tagged as 'target' if the module has `target_only` flag set.\n            - Applies specific filtering based on event type and module name.\n        \"\"\"\n\n        # special signal event types\n        if event.type in (\"FINISHED\",):\n            return True, \"its type is FINISHED\"\n        if self.errored:\n            return False, f\"module is in error state\"\n        # exclude non-watched types\n        if not any(t in self.get_watched_events() for t in (\"*\", event.type)):\n            return False, \"its type is not in watched_events\"\n        if self.target_only:\n            if \"target\" not in event.tags:\n                return False, \"it did not meet target_only filter criteria\"\n\n        # exclude certain URLs (e.g. javascript):\n        # TODO: revisit this after httpx rework\n        if event.type.startswith(\"URL\") and self.name != \"httpx\" and \"httpx-only\" in event.tags:\n            return False, \"its extension was listed in url_extension_httpx_only\"\n\n        return True, \"precheck succeeded\"\n\n    async def _event_postcheck(self, event):\n        \"\"\"\n        A simple wrapper for dup tracking\n        \"\"\"\n        # special exception for \"FINISHED\" event\n        if event.type in (\"FINISHED\",):\n            return True, \"\"\n        acceptable, reason = await self._event_postcheck_inner(event)\n        if acceptable:\n            # check duplicates\n            is_incoming_duplicate, reason = self.is_incoming_duplicate(event, add=True)\n            if is_incoming_duplicate and not self.accept_dupes:\n                return False, f\"module has already seen it\" + (f\" ({reason})\" if reason else \"\")\n\n        return acceptable, reason\n\n    async def _event_postcheck_inner(self, event):\n        \"\"\"\n        Post-checks an event to determine if it should be accepted by the module for handling.\n\n        This method is called when an event is dequeued from the module's incoming event queue, right before it is actually processed.\n        It applies various filters such as scope, custom filtering logic, and per-host tracking to decide the event's fate.\n\n        Args:\n            event (Event): The event object to check.\n\n        Returns:\n            tuple: A tuple (bool, str) where the bool indicates if the event should be accepted, and the str gives the reason.\n\n        Notes:\n            - Override the `filter_event` method for custom filtering logic.\n            - This method also maintains host-based tracking when the `per_host_only` or similar flags are set.\n            - The method will also update event production stats for output modules.\n        \"\"\"\n        # force-output certain events to the graph\n        if self._is_graph_important(event):\n            return True, \"event is critical to the graph\"\n\n        # check scope distance\n        filter_result, reason = self._scope_distance_check(event)\n        if not filter_result:\n            return filter_result, reason\n\n        # custom filtering\n        async with self.scan._acatch(context=self.filter_event):\n            try:\n                filter_result = await self.filter_event(event)\n            except Exception as e:\n                msg = f\"Unhandled exception in {self.name}.filter_event({event}): {e}\"\n                self.error(msg)\n                return False, msg\n            msg = str(self._custom_filter_criteria_msg)\n            with suppress(ValueError, TypeError):\n                filter_result, reason = filter_result\n                msg += f\": {reason}\"\n            if not filter_result:\n                return False, msg\n\n        self.debug(f\"{event} passed post-check\")\n        return True, \"\"\n\n    def _scope_distance_check(self, event):\n        if self.in_scope_only:\n            if event.scope_distance &gt; 0:\n                return False, \"it did not meet in_scope_only filter criteria\"\n        if self.scope_distance_modifier is not None:\n            if event.scope_distance &lt; 0:\n                return False, f\"its scope_distance ({event.scope_distance}) is invalid.\"\n            elif event.scope_distance &gt; self.max_scope_distance:\n                return (\n                    False,\n                    f\"its scope_distance ({event.scope_distance}) exceeds the maximum allowed by the scan ({self.scan.scope_search_distance}) + the module ({self.scope_distance_modifier}) == {self.max_scope_distance}\",\n                )\n        return True, \"\"\n\n    async def _cleanup(self):\n        if not self._cleanedup:\n            self._cleanedup = True\n            for callback in [self.cleanup] + self.cleanup_callbacks:\n                context = f\"{self.name}.cleanup()\"\n                if callable(callback):\n                    async with self.scan._acatch(context), self._task_counter.count(context):\n                        await self.helpers.execute_sync_or_async(callback)\n\n    async def queue_event(self, event):\n        \"\"\"\n        Asynchronously queues an incoming event to the module's event queue for further processing.\n\n        The function performs an initial check to see if the event is acceptable for queuing.\n        If the event passes the check, it is put into the `incoming_event_queue`.\n\n        Args:\n            event: The event object to be queued.\n\n        Returns:\n            None: The function doesn't return anything but modifies the state of the `incoming_event_queue`.\n\n        Examples:\n            &gt;&gt;&gt; await self.queue_event(some_event)\n\n        Raises:\n            AttributeError: If the module is not in an acceptable state to queue incoming events.\n        \"\"\"\n        async with self._task_counter.count(\"queue_event()\", _log=False):\n            if self.incoming_event_queue is False:\n                self.debug(f\"Not in an acceptable state to queue incoming event\")\n                return\n            acceptable, reason = self._event_precheck(event)\n            if not acceptable:\n                if reason and reason != \"its type is not in watched_events\":\n                    self.debug(f\"Not queueing {event} because {reason}\")\n                return\n            else:\n                self.debug(f\"Queueing {event} because {reason}\")\n            try:\n                self.incoming_event_queue.put_nowait(event)\n                async with self._event_received:\n                    self._event_received.notify()\n                if event.type != \"FINISHED\":\n                    self.scan._new_activity = True\n            except AttributeError:\n                self.debug(f\"Not in an acceptable state to queue incoming event\")\n\n    async def queue_outgoing_event(self, event, **kwargs):\n        \"\"\"\n        Queues an outgoing event to the module's outgoing event queue for further processing.\n\n        The function attempts to put the event into the `outgoing_event_queue` immediately.\n        If it's not possible due to the current state of the module, an AttributeError is raised, and a debug log is generated.\n\n        Args:\n            event: The event object to be queued.\n            **kwargs: Additional keyword arguments to be associated with the event.\n\n        Returns:\n            None: The function doesn't return anything but modifies the state of the `outgoing_event_queue`.\n\n        Examples:\n            &gt;&gt;&gt; self.queue_outgoing_event(some_outgoing_event, abort_if=lambda e: \"unresolved\" in e.tags)\n\n        Raises:\n            AttributeError: If the module is not in an acceptable state to queue outgoing events.\n        \"\"\"\n        try:\n            await self.outgoing_event_queue.put((event, kwargs))\n        except AttributeError:\n            self.debug(f\"Not in an acceptable state to queue outgoing event\")\n\n    def set_error_state(self, message=None, clear_outgoing_queue=False, critical=False):\n        \"\"\"\n        Puts the module into an errored state where it cannot accept new events. Optionally logs a warning message.\n\n        The function sets the module's `errored` attribute to True and logs a warning with the optional message.\n        It also clears the incoming event queue to prevent further processing and updates its status to False.\n\n        Args:\n            message (str, optional): Additional message to be logged along with the warning.\n\n        Returns:\n            None: The function doesn't return anything but updates the `errored` state and clears the incoming event queue.\n\n        Examples:\n            &gt;&gt;&gt; self.set_error_state()\n            &gt;&gt;&gt; self.set_error_state(\"Failed to connect to the server\")\n\n        Notes:\n            - The function sets `self._incoming_event_queue` to False to prevent its further use.\n            - If the module was already in an errored state, the function will not reset the error state or the queue.\n        \"\"\"\n        if not self.errored:\n            log_msg = \"Setting error state\"\n            if message is not None:\n                log_msg += f\": {message}\"\n            if critical:\n                log_fn = self.error\n            else:\n                log_fn = self.warning\n            log_fn(log_msg)\n            self.errored = True\n            # clear incoming queue\n            if self.incoming_event_queue is not False:\n                self.debug(f\"Emptying event_queue\")\n                with suppress(asyncio.queues.QueueEmpty):\n                    while 1:\n                        self.incoming_event_queue.get_nowait()\n                # set queue to None to prevent its use\n                # if there are leftover objects in the queue, the scan will hang.\n                self._incoming_event_queue = False\n\n            if clear_outgoing_queue:\n                with suppress(asyncio.queues.QueueEmpty):\n                    while 1:\n                        self.outgoing_event_queue.get_nowait()\n\n    def is_incoming_duplicate(self, event, add=False):\n        if event.type in (\"FINISHED\",):\n            return False, \"\"\n        reason = \"\"\n        try:\n            event_hash = self._incoming_dedup_hash(event)\n        except Exception as e:\n            msg = f\"Unhandled exception in {self.name}._incoming_dedup_hash({event}): {e}\"\n            self.error(msg)\n            return True, msg\n        with suppress(TypeError, ValueError):\n            event_hash, reason = event_hash\n        is_dup = event_hash in self._incoming_dup_tracker\n        if add:\n            self._incoming_dup_tracker.add(event_hash)\n        return is_dup, reason\n\n    def _incoming_dedup_hash(self, event):\n        \"\"\"\n        Determines the criteria for what is considered to be a duplicate event if `accept_dupes` is False.\n        \"\"\"\n        if self.per_host_only:\n            return self.get_per_host_hash(event), \"per_host_only=True\"\n        if self.per_hostport_only:\n            return self.get_per_hostport_hash(event), \"per_hostport_only=True\"\n        elif self.per_domain_only:\n            return self.get_per_domain_hash(event), \"per_domain_only=True\"\n        return hash(event), \"\"\n\n    def _outgoing_dedup_hash(self, event):\n        \"\"\"\n        Determines the criteria for what is considered to be a duplicate event if `suppress_dupes` is True.\n\n        We take into account the `internal` attribute we don't want an internal event (which isn't distributed to output modules)\n        to inadvertently suppress a non-internal event.\n        \"\"\"\n        return hash((event, self.name, event.internal, event.always_emit))\n\n    def get_per_host_hash(self, event):\n        \"\"\"\n        Computes a per-host hash value for a given event. This method may be optionally overridden in subclasses.\n\n        The function uses the event's `host` to create a string to be hashed.\n\n        Args:\n            event (Event): The event object containing host information.\n\n        Returns:\n            int: The hash value computed for the host.\n\n        Examples:\n            &gt;&gt;&gt; event = self.make_event(\"https://example.com:8443\")\n            &gt;&gt;&gt; self.get_per_host_hash(event)\n        \"\"\"\n        return hash(event.host)\n\n    def get_per_hostport_hash(self, event):\n        \"\"\"\n        Computes a per-host:port hash value for a given event. This method may be optionally overridden in subclasses.\n\n        The function uses the event's `host`, `port`, and `scheme` (for URLs) to create a string to be hashed.\n        The hash value is used for distinguishing events related to the same host.\n\n        Args:\n            event (Event): The event object containing host, port, or parsed URL information.\n\n        Returns:\n            int: The hash value computed for the host.\n\n        Examples:\n            &gt;&gt;&gt; event = self.make_event(\"https://example.com:8443\")\n            &gt;&gt;&gt; self.get_per_hostport_hash(event)\n        \"\"\"\n        parsed = getattr(event, \"parsed_url\", None)\n        if parsed is None:\n            to_hash = self.helpers.make_netloc(event.host, event.port)\n        else:\n            to_hash = f\"{parsed.scheme}://{parsed.netloc}/\"\n        return hash(to_hash)\n\n    def get_per_domain_hash(self, event):\n        \"\"\"\n        Computes a per-domain hash value for a given event. This method may be optionally overridden in subclasses.\n\n        Events with the same root domain will receive the same hash value.\n\n        Args:\n            event (Event): The event object containing host, port, or parsed URL information.\n\n        Returns:\n            int: The hash value computed for the domain.\n\n        Examples:\n            &gt;&gt;&gt; event = self.make_event(\"https://www.example.com:8443\")\n            &gt;&gt;&gt; self.get_per_domain_hash(event)\n        \"\"\"\n        _, domain = self.helpers.split_domain(event.host)\n        return hash(domain)\n\n    @property\n    def name(self):\n        return str(self._name)\n\n    @property\n    def helpers(self):\n        return self.scan.helpers\n\n    @property\n    def status(self):\n        \"\"\"\n        Provides the current status of the module as a dictionary.\n\n        The dictionary contains the following keys:\n            - 'events': A sub-dictionary with 'incoming' and 'outgoing' keys, representing the number of events in the respective queues.\n            - 'tasks': The current value of the task counter.\n            - 'errored': A boolean value indicating if the module is in an error state.\n            - 'running': A boolean value indicating if the module is currently processing data.\n\n        Returns:\n            dict: A dictionary containing the current status of the module.\n\n        Examples:\n            &gt;&gt;&gt; self.status\n            {'events': {'incoming': 5, 'outgoing': 2}, 'tasks': 3, 'errored': False, 'running': True}\n        \"\"\"\n        status = {\n            \"events\": {\"incoming\": self.num_incoming_events, \"outgoing\": self.outgoing_event_queue.qsize()},\n            \"tasks\": self._task_counter.value,\n            \"errored\": self.errored,\n        }\n        status[\"running\"] = self.running\n        return status\n\n    @property\n    def running(self):\n        \"\"\"Property indicating whether the module is currently processing data.\n\n        This property checks if the task counter (`self._task_counter.value`) is greater than zero,\n        indicating that there are ongoing tasks in the module.\n\n        Returns:\n            bool: True if the module is currently processing data, False otherwise.\n        \"\"\"\n        return self._task_counter.value &gt; 0\n\n    @property\n    def finished(self):\n        \"\"\"Property indicating whether the module has finished processing.\n\n        This property checks three conditions to determine if the module is finished:\n        1. The module is not currently running (`self.running` is False).\n        2. The number of incoming events in the queue is zero or less (`self.num_incoming_events &lt;= 0`).\n        3. The number of outgoing events in the queue is zero or less (`self.outgoing_event_queue.qsize() &lt;= 0`).\n\n        Returns:\n            bool: True if the module has finished processing, False otherwise.\n        \"\"\"\n        return not self.running and self.num_incoming_events &lt;= 0 and self.outgoing_event_queue.qsize() &lt;= 0\n\n    async def run_process(self, *args, **kwargs):\n        kwargs[\"_proc_tracker\"] = self._proc_tracker\n        return await self.helpers.run(*args, **kwargs)\n\n    async def run_process_live(self, *args, **kwargs):\n        kwargs[\"_proc_tracker\"] = self._proc_tracker\n        async for line in self.helpers.run_live(*args, **kwargs):\n            yield line\n\n    def prepare_api_request(self, url, kwargs):\n        \"\"\"\n        Prepare an API request by adding the necessary authentication - header, bearer token, etc.\n        \"\"\"\n        if self.api_key:\n            url = url.format(api_key=self.api_key)\n            if not \"headers\" in kwargs:\n                kwargs[\"headers\"] = {}\n            kwargs[\"headers\"][\"Authorization\"] = f\"Bearer {self.api_key}\"\n        return url, kwargs\n\n    async def api_request(self, *args, **kwargs):\n        \"\"\"\n        Makes an HTTP request while automatically:\n            - avoiding rate limits (sleep/retry)\n            - cycling API keys\n            - cancelling after too many failed attempts\n        \"\"\"\n        url = args[0] if args else kwargs.pop(\"url\", \"\")\n\n        # loop until we have a successful request\n        for _ in range(self.api_retries):\n            if not \"headers\" in kwargs:\n                kwargs[\"headers\"] = {}\n            new_url, kwargs = self.prepare_api_request(url, kwargs)\n            kwargs[\"url\"] = new_url\n\n            r = await self.helpers.request(**kwargs)\n            success = False if r is None else r.is_success\n\n            if success:\n                self._api_request_failures = 0\n            else:\n                status_code = getattr(r, \"status_code\", 0)\n                response_text = getattr(r, \"text\", \"\")\n                self.trace(f\"API response to {url} failed with status code {status_code}: {response_text}\")\n                self._api_request_failures += 1\n                if self._api_request_failures &gt;= self.api_failure_abort_threshold:\n                    self.set_error_state(\n                        f\"Setting error state due to {self._api_request_failures:,} failed HTTP requests\"\n                    )\n                else:\n                    # sleep for a bit if we're being rate limited\n                    if status_code == 429:\n                        self.verbose(\n                            f\"Sleeping for {self._429_sleep_interval:,} seconds due to rate limit (HTTP status: 429)\"\n                        )\n                        await asyncio.sleep(self._429_sleep_interval)\n                    elif self._api_keys:\n                        # if request failed, cycle API keys and try again\n                        self.cycle_api_key()\n                    continue\n            break\n\n        return r\n\n    async def api_page_iter(self, url, page_size=100, json=True, next_key=None, **requests_kwargs):\n        \"\"\"\n        An asynchronous generator function for iterating through paginated API data.\n\n        This function continuously makes requests to a specified API URL, incrementing the page number\n        or applying a custom pagination function, and yields the received data one page at a time.\n        It is well-suited for APIs that provide paginated results.\n\n        Args:\n            url (str): The initial API URL. Can contain placeholders for 'page', 'page_size', and 'offset'.\n            page_size (int, optional): The number of items per page. Defaults to 100.\n            json (bool, optional): If True, attempts to deserialize the response content to a JSON object. Defaults to True.\n            next_key (callable, optional): A function that takes the last page's data and returns the URL for the next page. Defaults to None.\n            **requests_kwargs: Arbitrary keyword arguments that will be forwarded to the HTTP request function.\n\n        Yields:\n            dict or httpx.Response: If 'json' is True, yields a dictionary containing the parsed JSON data. Otherwise, yields the raw HTTP response.\n\n        Note:\n            The loop will continue indefinitely unless manually stopped. Make sure to break out of the loop once the last page has been received.\n\n        Examples:\n            &gt;&gt;&gt; agen = api_page_iter('https://api.example.com/data?page={page}&amp;page_size={page_size}')\n            &gt;&gt;&gt; try:\n            &gt;&gt;&gt;     async for page in agen:\n            &gt;&gt;&gt;         subdomains = page[\"subdomains\"]\n            &gt;&gt;&gt;         self.hugesuccess(subdomains)\n            &gt;&gt;&gt;         if not subdomains:\n            &gt;&gt;&gt;             break\n            &gt;&gt;&gt; finally:\n            &gt;&gt;&gt;     agen.aclose()\n        \"\"\"\n        page = 1\n        offset = 0\n        result = None\n        while 1:\n            if result and callable(next_key):\n                try:\n                    new_url = next_key(result)\n                except Exception as e:\n                    self.debug(f\"Failed to extract next page of results from {url}: {e}\")\n                    self.debug(traceback.format_exc())\n            else:\n                new_url = self.helpers.safe_format(url, page=page, page_size=page_size, offset=offset)\n            result = await self.api_request(new_url, **requests_kwargs)\n            if result is None:\n                self.verbose(f\"api_page_iter() got no response for {url}\")\n                break\n            try:\n                if json:\n                    result = result.json()\n                yield result\n            except Exception:\n                self.warning(f'Error in api_page_iter() for url: \"{new_url}\"')\n                self.trace(traceback.format_exc())\n                break\n            finally:\n                offset += page_size\n                page += 1\n\n    @property\n    def preset(self):\n        return self.scan.preset\n\n    @property\n    def config(self):\n        \"\"\"Property that provides easy access to the module's configuration in the scan's config.\n\n        This property serves as a shortcut to retrieve the module-specific configuration from\n        `self.scan.config`. If no configuration is found for this module, an empty dictionary is returned.\n\n        Returns:\n            dict: The configuration dictionary specific to this module.\n        \"\"\"\n        config = self.scan.config.get(\"modules\", {}).get(self.name, {})\n        if config is None:\n            config = {}\n        return config\n\n    @property\n    def incoming_event_queue(self):\n        if self._incoming_event_queue is None:\n            if self._shuffle_incoming_queue:\n                self._incoming_event_queue = ShuffleQueue()\n            else:\n                self._incoming_event_queue = asyncio.Queue()\n        return self._incoming_event_queue\n\n    @property\n    def outgoing_event_queue(self):\n        if self._outgoing_event_queue is None:\n            self._outgoing_event_queue = ShuffleQueue(self._qsize)\n        return self._outgoing_event_queue\n\n    @property\n    def priority(self):\n        \"\"\"\n        Gets the priority level of the module as an integer.\n\n        The priority level is constrained to be between 1 and 5, inclusive.\n        A lower value indicates a higher priority.\n\n        Returns:\n            int: The priority level of the module, constrained between 1 and 5.\n\n        Examples:\n            &gt;&gt;&gt; self.priority\n            3\n        \"\"\"\n        return int(max(1, min(5, self._priority)))\n\n    @property\n    def auth_required(self):\n        return self.meta.get(\"auth_required\", False)\n\n    @property\n    def http_timeout(self):\n        \"\"\"\n        Convenience shortcut to `http_timeout` in the config\n        \"\"\"\n        return self.scan.web_config.get(\"http_timeout\", 10)\n\n    @property\n    def log(self):\n        if getattr(self, \"_log\", None) is None:\n            self._log = logging.getLogger(f\"bbot.modules.{self.name}\")\n        return self._log\n\n    @property\n    def memory_usage(self):\n        \"\"\"Property that calculates the current memory usage of the module in bytes.\n\n        This property uses the `get_size` function to estimate the memory consumption\n        of the module object. The depth of the object graph traversal is limited to 3 levels\n        to avoid performance issues. Commonly shared objects like `self.scan`, `self.helpers`,\n        are excluded from the calculation to prevent double-counting.\n\n        Returns:\n            int: The estimated memory usage of the module in bytes.\n        \"\"\"\n        seen = {self.scan, self.helpers, self.log}  # noqa\n        return get_size(self, max_depth=3, seen=seen)\n\n    def __str__(self):\n        return self.name\n\n    def log_table(self, *args, **kwargs):\n        \"\"\"Logs a table to the console and optionally writes it to a file.\n\n        This function generates a table using `self.helpers.make_table`, then logs each line\n        of the table as an info-level log. If a table_name is provided, it also writes the table to a file.\n\n        Args:\n            *args: Variable length argument list to be passed to `self.helpers.make_table`.\n            **kwargs: Arbitrary keyword arguments. If 'table_name' is specified, the table will be written to a file.\n\n        Returns:\n            str: The generated table as a string.\n\n        Examples:\n            &gt;&gt;&gt; self.log_table(['Header1', 'Header2'], [['row1col1', 'row1col2'], ['row2col1', 'row2col2']], table_name=\"my_table\")\n        \"\"\"\n        table_name = kwargs.pop(\"table_name\", None)\n        max_log_entries = kwargs.pop(\"max_log_entries\", None)\n        table = self.helpers.make_table(*args, **kwargs)\n        lines_logged = 0\n        for line in table.splitlines():\n            if max_log_entries is not None and lines_logged &gt; max_log_entries:\n                break\n            self.info(line)\n            lines_logged += 1\n        if table_name is not None:\n            date = self.helpers.make_date()\n            filename = self.scan.home / f\"{self.helpers.tagify(table_name)}-table-{date}.txt\"\n            with open(filename, \"w\") as f:\n                f.write(table)\n            self.verbose(f\"Wrote {table_name} to {filename}\")\n        return table\n\n    def _is_graph_important(self, event):\n        return self.preserve_graph and getattr(event, \"_graph_important\", False) and not getattr(event, \"_omit\", False)\n\n    @property\n    def preserve_graph(self):\n        preserve_graph = self.config.get(\"preserve_graph\", None)\n        if preserve_graph is None:\n            preserve_graph = self._preserve_graph\n        return preserve_graph\n\n    def debug(self, *args, trace=False, **kwargs):\n        \"\"\"Logs debug messages and optionally the stack trace of the most recent exception.\n\n        Args:\n            *args: Variable-length argument list to pass to the logger.\n            trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to False.\n            **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n        Examples:\n            &gt;&gt;&gt; self.debug(\"This is a debug message\")\n            &gt;&gt;&gt; self.debug(\"This is a debug message with a trace\", trace=True)\n        \"\"\"\n        self.log.debug(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def verbose(self, *args, trace=False, **kwargs):\n        \"\"\"Logs messages and optionally the stack trace of the most recent exception.\n\n        Args:\n            *args: Variable-length argument list to pass to the logger.\n            trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to False.\n            **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n        Examples:\n            &gt;&gt;&gt; self.verbose(\"This is a verbose message\")\n            &gt;&gt;&gt; self.verbose(\"This is a verbose message with a trace\", trace=True)\n        \"\"\"\n        self.log.verbose(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def hugeverbose(self, *args, trace=False, **kwargs):\n        \"\"\"Logs a whole message in emboldened white text, and optionally the stack trace of the most recent exception.\n\n        Args:\n            *args: Variable-length argument list to pass to the logger.\n            trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to False.\n            **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n        Examples:\n            &gt;&gt;&gt; self.hugeverbose(\"This is a huge verbose message\")\n            &gt;&gt;&gt; self.hugeverbose(\"This is a huge verbose message with a trace\", trace=True)\n        \"\"\"\n        self.log.hugeverbose(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def info(self, *args, trace=False, **kwargs):\n        \"\"\"Logs informational messages and optionally the stack trace of the most recent exception.\n\n        Args:\n            *args: Variable-length argument list to pass to the logger.\n            trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to False.\n            **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n        Examples:\n            &gt;&gt;&gt; self.info(\"This is an informational message\")\n            &gt;&gt;&gt; self.info(\"This is an informational message with a trace\", trace=True)\n        \"\"\"\n        self.log.info(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def hugeinfo(self, *args, trace=False, **kwargs):\n        \"\"\"Logs a whole message in emboldened blue text, and optionally the stack trace of the most recent exception.\n\n        Args:\n            *args: Variable-length argument list to pass to the logger.\n            trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to False.\n            **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n        Examples:\n            &gt;&gt;&gt; self.hugeinfo(\"This is a huge informational message\")\n            &gt;&gt;&gt; self.hugeinfo(\"This is a huge informational message with a trace\", trace=True)\n        \"\"\"\n        self.log.hugeinfo(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def success(self, *args, trace=False, **kwargs):\n        \"\"\"Logs a success message, and optionally the stack trace of the most recent exception.\n\n        Args:\n            *args: Variable-length argument list to pass to the logger.\n            trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to False.\n            **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n        Examples:\n            &gt;&gt;&gt; self.success(\"Operation completed successfully\")\n            &gt;&gt;&gt; self.success(\"Operation completed with a trace\", trace=True)\n        \"\"\"\n        self.log.success(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def hugesuccess(self, *args, trace=False, **kwargs):\n        \"\"\"Logs a whole message in emboldened green text, and optionally the stack trace of the most recent exception.\n\n        Args:\n            *args: Variable-length argument list to pass to the logger.\n            trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to False.\n            **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n        Examples:\n            &gt;&gt;&gt; self.hugesuccess(\"This is a huge success message\")\n            &gt;&gt;&gt; self.hugesuccess(\"This is a huge success message with a trace\", trace=True)\n        \"\"\"\n        self.log.hugesuccess(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def warning(self, *args, trace=True, **kwargs):\n        \"\"\"Logs a warning message, and optionally the stack trace of the most recent exception.\n\n        Args:\n            *args: Variable-length argument list to pass to the logger.\n            trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to True.\n            **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n        Examples:\n            &gt;&gt;&gt; self.warning(\"This is a warning message\")\n            &gt;&gt;&gt; self.warning(\"This is a warning message with a trace\", trace=False)\n        \"\"\"\n        self.log.warning(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def hugewarning(self, *args, trace=True, **kwargs):\n        \"\"\"Logs a whole message in emboldened orange text, and optionally the stack trace of the most recent exception.\n\n        Args:\n            *args: Variable-length argument list to pass to the logger.\n            trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to True.\n            **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n        Examples:\n            &gt;&gt;&gt; self.hugewarning(\"This is a huge warning message\")\n            &gt;&gt;&gt; self.hugewarning(\"This is a huge warning message with a trace\", trace=False)\n        \"\"\"\n        self.log.hugewarning(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def error(self, *args, trace=True, **kwargs):\n        \"\"\"Logs an error message, and optionally the stack trace of the most recent exception.\n\n        Args:\n            *args: Variable-length argument list to pass to the logger.\n            trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to True.\n            **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n        Examples:\n            &gt;&gt;&gt; self.error(\"This is an error message\")\n            &gt;&gt;&gt; self.error(\"This is an error message with a trace\", trace=False)\n        \"\"\"\n        self.log.error(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def trace(self, msg=None):\n        \"\"\"Logs the stack trace of the most recently caught exception.\n\n        This method captures the type, value, and traceback of the most recent exception and logs it using the trace level. It is typically used for debugging purposes.\n\n        Anything logged using this method will always be written to the scan's `debug.log`, even if debugging is not enabled.\n\n        Examples:\n            &gt;&gt;&gt; try:\n            &gt;&gt;&gt;     1 / 0\n            &gt;&gt;&gt; except ZeroDivisionError:\n            &gt;&gt;&gt;     self.trace()\n        \"\"\"\n        if msg is None:\n            e_type, e_val, e_traceback = exc_info()\n            if e_type is not None:\n                self.log.trace(traceback.format_exc())\n        else:\n            self.log.trace(msg)\n\n    def critical(self, *args, trace=True, **kwargs):\n        \"\"\"Logs a whole message in emboldened red text, and optionally the stack trace of the most recent exception.\n\n        Args:\n            *args: Variable-length argument list to pass to the logger.\n            trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to True.\n            **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n        Examples:\n            &gt;&gt;&gt; self.critical(\"This is a critical message\")\n            &gt;&gt;&gt; self.critical(\"This is a critical message with a trace\", trace=False)\n        \"\"\"\n        self.log.critical(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n        if trace:\n            self.trace()\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.auth_secret","title":"auth_secret  <code>property</code>","text":"<pre><code>auth_secret\n</code></pre> <p>Indicates if the module is properly configured for authentication.</p> <p>This read-only property should be used to check whether all necessary attributes (e.g., API keys, tokens, etc.) are configured to perform authenticated requests in the module. Commonly used in setup or initialization steps.</p> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the module is properly configured for authentication, otherwise False.</p> </li> </ul>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.config","title":"config  <code>property</code>","text":"<pre><code>config\n</code></pre> <p>Property that provides easy access to the module's configuration in the scan's config.</p> <p>This property serves as a shortcut to retrieve the module-specific configuration from <code>self.scan.config</code>. If no configuration is found for this module, an empty dictionary is returned.</p> <p>Returns:</p> <ul> <li> <code>dict</code>          \u2013            <p>The configuration dictionary specific to this module.</p> </li> </ul>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.finished","title":"finished  <code>property</code>","text":"<pre><code>finished\n</code></pre> <p>Property indicating whether the module has finished processing.</p> <p>This property checks three conditions to determine if the module is finished: 1. The module is not currently running (<code>self.running</code> is False). 2. The number of incoming events in the queue is zero or less (<code>self.num_incoming_events &lt;= 0</code>). 3. The number of outgoing events in the queue is zero or less (<code>self.outgoing_event_queue.qsize() &lt;= 0</code>).</p> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the module has finished processing, False otherwise.</p> </li> </ul>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.http_timeout","title":"http_timeout  <code>property</code>","text":"<pre><code>http_timeout\n</code></pre> <p>Convenience shortcut to <code>http_timeout</code> in the config</p>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.memory_usage","title":"memory_usage  <code>property</code>","text":"<pre><code>memory_usage\n</code></pre> <p>Property that calculates the current memory usage of the module in bytes.</p> <p>This property uses the <code>get_size</code> function to estimate the memory consumption of the module object. The depth of the object graph traversal is limited to 3 levels to avoid performance issues. Commonly shared objects like <code>self.scan</code>, <code>self.helpers</code>, are excluded from the calculation to prevent double-counting.</p> <p>Returns:</p> <ul> <li> <code>int</code>          \u2013            <p>The estimated memory usage of the module in bytes.</p> </li> </ul>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.priority","title":"priority  <code>property</code>","text":"<pre><code>priority\n</code></pre> <p>Gets the priority level of the module as an integer.</p> <p>The priority level is constrained to be between 1 and 5, inclusive. A lower value indicates a higher priority.</p> <p>Returns:</p> <ul> <li> <code>int</code>          \u2013            <p>The priority level of the module, constrained between 1 and 5.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.priority\n3\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.running","title":"running  <code>property</code>","text":"<pre><code>running\n</code></pre> <p>Property indicating whether the module is currently processing data.</p> <p>This property checks if the task counter (<code>self._task_counter.value</code>) is greater than zero, indicating that there are ongoing tasks in the module.</p> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the module is currently processing data, False otherwise.</p> </li> </ul>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.status","title":"status  <code>property</code>","text":"<pre><code>status\n</code></pre> <p>Provides the current status of the module as a dictionary.</p> The dictionary contains the following keys <ul> <li>'events': A sub-dictionary with 'incoming' and 'outgoing' keys, representing the number of events in the respective queues.</li> <li>'tasks': The current value of the task counter.</li> <li>'errored': A boolean value indicating if the module is in an error state.</li> <li>'running': A boolean value indicating if the module is currently processing data.</li> </ul> <p>Returns:</p> <ul> <li> <code>dict</code>          \u2013            <p>A dictionary containing the current status of the module.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.status\n{'events': {'incoming': 5, 'outgoing': 2}, 'tasks': 3, 'errored': False, 'running': True}\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.__init__","title":"__init__","text":"<pre><code>__init__(scan)\n</code></pre> <p>Initializes a module instance.</p> <p>Parameters:</p> <ul> <li> <code>scan</code>           \u2013            <p>The BBOT scan object associated with this module instance.</p> </li> </ul> <p>Attributes:</p> <ul> <li> <code>scan</code>           \u2013            <p>The scan object associated with this module.</p> </li> <li> <code>errored</code>               (<code>bool</code>)           \u2013            <p>Whether the module has errored out. Default is False.</p> </li> </ul> Source code in <code>bbot/modules/base.py</code> <pre><code>def __init__(self, scan):\n    \"\"\"Initializes a module instance.\n\n    Args:\n        scan: The BBOT scan object associated with this module instance.\n\n    Attributes:\n        scan: The scan object associated with this module.\n\n        errored (bool): Whether the module has errored out. Default is False.\n    \"\"\"\n    self.scan = scan\n    self.errored = False\n    self._log = None\n    self._incoming_event_queue = None\n    self._outgoing_event_queue = None\n    # track incoming events to prevent unwanted duplicates\n    self._incoming_dup_tracker = set()\n    # tracks which subprocesses are running under this module\n    self._proc_tracker = set()\n    # seconds since we've submitted a batch\n    self._last_submitted_batch = None\n    # additional callbacks to be executed alongside self.cleanup()\n    self.cleanup_callbacks = []\n    self._cleanedup = False\n    self._watched_events = None\n\n    self._task_counter = TaskCounter()\n\n    # string constant\n    self._custom_filter_criteria_msg = \"it did not meet custom filter criteria\"\n\n    self._api_keys = []\n\n    # track number of failures (for .api_request())\n    self._api_request_failures = 0\n\n    self._tasks = []\n    self._event_received = asyncio.Condition()\n    self._event_queued = asyncio.Condition()\n\n    # used for optional \"per host\" tracking\n    self._per_host_tracker = set()\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.api_page_iter","title":"api_page_iter  <code>async</code>","text":"<pre><code>api_page_iter(url, page_size=100, json=True, next_key=None, **requests_kwargs)\n</code></pre> <p>An asynchronous generator function for iterating through paginated API data.</p> <p>This function continuously makes requests to a specified API URL, incrementing the page number or applying a custom pagination function, and yields the received data one page at a time. It is well-suited for APIs that provide paginated results.</p> <p>Parameters:</p> <ul> <li> <code>url</code>               (<code>str</code>)           \u2013            <p>The initial API URL. Can contain placeholders for 'page', 'page_size', and 'offset'.</p> </li> <li> <code>page_size</code>               (<code>int</code>, default:                   <code>100</code> )           \u2013            <p>The number of items per page. Defaults to 100.</p> </li> <li> <code>json</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>If True, attempts to deserialize the response content to a JSON object. Defaults to True.</p> </li> <li> <code>next_key</code>               (<code>callable</code>, default:                   <code>None</code> )           \u2013            <p>A function that takes the last page's data and returns the URL for the next page. Defaults to None.</p> </li> <li> <code>**requests_kwargs</code>           \u2013            <p>Arbitrary keyword arguments that will be forwarded to the HTTP request function.</p> </li> </ul> <p>Yields:</p> <ul> <li>           \u2013            <p>dict or httpx.Response: If 'json' is True, yields a dictionary containing the parsed JSON data. Otherwise, yields the raw HTTP response.</p> </li> </ul> Note <p>The loop will continue indefinitely unless manually stopped. Make sure to break out of the loop once the last page has been received.</p> <p>Examples:</p> <pre><code>&gt;&gt;&gt; agen = api_page_iter('https://api.example.com/data?page={page}&amp;page_size={page_size}')\n&gt;&gt;&gt; try:\n&gt;&gt;&gt;     async for page in agen:\n&gt;&gt;&gt;         subdomains = page[\"subdomains\"]\n&gt;&gt;&gt;         self.hugesuccess(subdomains)\n&gt;&gt;&gt;         if not subdomains:\n&gt;&gt;&gt;             break\n&gt;&gt;&gt; finally:\n&gt;&gt;&gt;     agen.aclose()\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>async def api_page_iter(self, url, page_size=100, json=True, next_key=None, **requests_kwargs):\n    \"\"\"\n    An asynchronous generator function for iterating through paginated API data.\n\n    This function continuously makes requests to a specified API URL, incrementing the page number\n    or applying a custom pagination function, and yields the received data one page at a time.\n    It is well-suited for APIs that provide paginated results.\n\n    Args:\n        url (str): The initial API URL. Can contain placeholders for 'page', 'page_size', and 'offset'.\n        page_size (int, optional): The number of items per page. Defaults to 100.\n        json (bool, optional): If True, attempts to deserialize the response content to a JSON object. Defaults to True.\n        next_key (callable, optional): A function that takes the last page's data and returns the URL for the next page. Defaults to None.\n        **requests_kwargs: Arbitrary keyword arguments that will be forwarded to the HTTP request function.\n\n    Yields:\n        dict or httpx.Response: If 'json' is True, yields a dictionary containing the parsed JSON data. Otherwise, yields the raw HTTP response.\n\n    Note:\n        The loop will continue indefinitely unless manually stopped. Make sure to break out of the loop once the last page has been received.\n\n    Examples:\n        &gt;&gt;&gt; agen = api_page_iter('https://api.example.com/data?page={page}&amp;page_size={page_size}')\n        &gt;&gt;&gt; try:\n        &gt;&gt;&gt;     async for page in agen:\n        &gt;&gt;&gt;         subdomains = page[\"subdomains\"]\n        &gt;&gt;&gt;         self.hugesuccess(subdomains)\n        &gt;&gt;&gt;         if not subdomains:\n        &gt;&gt;&gt;             break\n        &gt;&gt;&gt; finally:\n        &gt;&gt;&gt;     agen.aclose()\n    \"\"\"\n    page = 1\n    offset = 0\n    result = None\n    while 1:\n        if result and callable(next_key):\n            try:\n                new_url = next_key(result)\n            except Exception as e:\n                self.debug(f\"Failed to extract next page of results from {url}: {e}\")\n                self.debug(traceback.format_exc())\n        else:\n            new_url = self.helpers.safe_format(url, page=page, page_size=page_size, offset=offset)\n        result = await self.api_request(new_url, **requests_kwargs)\n        if result is None:\n            self.verbose(f\"api_page_iter() got no response for {url}\")\n            break\n        try:\n            if json:\n                result = result.json()\n            yield result\n        except Exception:\n            self.warning(f'Error in api_page_iter() for url: \"{new_url}\"')\n            self.trace(traceback.format_exc())\n            break\n        finally:\n            offset += page_size\n            page += 1\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.api_request","title":"api_request  <code>async</code>","text":"<pre><code>api_request(*args, **kwargs)\n</code></pre> Makes an HTTP request while automatically <ul> <li>avoiding rate limits (sleep/retry)</li> <li>cycling API keys</li> <li>cancelling after too many failed attempts</li> </ul> Source code in <code>bbot/modules/base.py</code> <pre><code>async def api_request(self, *args, **kwargs):\n    \"\"\"\n    Makes an HTTP request while automatically:\n        - avoiding rate limits (sleep/retry)\n        - cycling API keys\n        - cancelling after too many failed attempts\n    \"\"\"\n    url = args[0] if args else kwargs.pop(\"url\", \"\")\n\n    # loop until we have a successful request\n    for _ in range(self.api_retries):\n        if not \"headers\" in kwargs:\n            kwargs[\"headers\"] = {}\n        new_url, kwargs = self.prepare_api_request(url, kwargs)\n        kwargs[\"url\"] = new_url\n\n        r = await self.helpers.request(**kwargs)\n        success = False if r is None else r.is_success\n\n        if success:\n            self._api_request_failures = 0\n        else:\n            status_code = getattr(r, \"status_code\", 0)\n            response_text = getattr(r, \"text\", \"\")\n            self.trace(f\"API response to {url} failed with status code {status_code}: {response_text}\")\n            self._api_request_failures += 1\n            if self._api_request_failures &gt;= self.api_failure_abort_threshold:\n                self.set_error_state(\n                    f\"Setting error state due to {self._api_request_failures:,} failed HTTP requests\"\n                )\n            else:\n                # sleep for a bit if we're being rate limited\n                if status_code == 429:\n                    self.verbose(\n                        f\"Sleeping for {self._429_sleep_interval:,} seconds due to rate limit (HTTP status: 429)\"\n                    )\n                    await asyncio.sleep(self._429_sleep_interval)\n                elif self._api_keys:\n                    # if request failed, cycle API keys and try again\n                    self.cycle_api_key()\n                continue\n        break\n\n    return r\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.cleanup","title":"cleanup  <code>async</code>","text":"<pre><code>cleanup()\n</code></pre> <p>Asynchronously performs final cleanup operations after the scan is complete.</p> <p>This method can be overridden to implement custom cleanup logic. It is called only once per scan and may not raise events.</p> <p>Returns:</p> <ul> <li>           \u2013            <p>None</p> </li> </ul> Note <p>This method is called only once per scan and may not raise events.</p> Source code in <code>bbot/modules/base.py</code> <pre><code>async def cleanup(self):\n    \"\"\"Asynchronously performs final cleanup operations after the scan is complete.\n\n    This method can be overridden to implement custom cleanup logic. It is called only once per scan and may not raise events.\n\n    Returns:\n        None\n\n    Note:\n        This method is called only once per scan and may not raise events.\n    \"\"\"\n    return\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.critical","title":"critical","text":"<pre><code>critical(*args, trace=True, **kwargs)\n</code></pre> <p>Logs a whole message in emboldened red text, and optionally the stack trace of the most recent exception.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Variable-length argument list to pass to the logger.</p> </li> <li> <code>trace</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to log the stack trace of the most recently caught exception. Defaults to True.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Arbitrary keyword arguments to pass to the logger.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.critical(\"This is a critical message\")\n&gt;&gt;&gt; self.critical(\"This is a critical message with a trace\", trace=False)\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def critical(self, *args, trace=True, **kwargs):\n    \"\"\"Logs a whole message in emboldened red text, and optionally the stack trace of the most recent exception.\n\n    Args:\n        *args: Variable-length argument list to pass to the logger.\n        trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to True.\n        **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n    Examples:\n        &gt;&gt;&gt; self.critical(\"This is a critical message\")\n        &gt;&gt;&gt; self.critical(\"This is a critical message with a trace\", trace=False)\n    \"\"\"\n    self.log.critical(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n    if trace:\n        self.trace()\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.debug","title":"debug","text":"<pre><code>debug(*args, trace=False, **kwargs)\n</code></pre> <p>Logs debug messages and optionally the stack trace of the most recent exception.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Variable-length argument list to pass to the logger.</p> </li> <li> <code>trace</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to log the stack trace of the most recently caught exception. Defaults to False.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Arbitrary keyword arguments to pass to the logger.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.debug(\"This is a debug message\")\n&gt;&gt;&gt; self.debug(\"This is a debug message with a trace\", trace=True)\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def debug(self, *args, trace=False, **kwargs):\n    \"\"\"Logs debug messages and optionally the stack trace of the most recent exception.\n\n    Args:\n        *args: Variable-length argument list to pass to the logger.\n        trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to False.\n        **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n    Examples:\n        &gt;&gt;&gt; self.debug(\"This is a debug message\")\n        &gt;&gt;&gt; self.debug(\"This is a debug message with a trace\", trace=True)\n    \"\"\"\n    self.log.debug(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n    if trace:\n        self.trace()\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.emit_event","title":"emit_event  <code>async</code>","text":"<pre><code>emit_event(*args, **kwargs)\n</code></pre> <p>Emit an event to the event queue and distribute it to interested modules.</p> <p>This is how modules \"return\" data.</p> <p>The method first creates an event object by calling <code>self.make_event()</code> with the provided arguments. Then, the event is queued for outgoing distribution using <code>self.queue_outgoing_event()</code>.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Positional arguments to be passed to <code>self.make_event()</code> for event creation.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Keyword arguments to be passed for event creation or configuration of the emit action. <pre><code>- on_success_callback: Optional callback function to execute upon successful event emission.\n- abort_if: Optional condition under which the event emission should be aborted.\n- quick: Optional flag to indicate whether the event should be processed quickly.\n</code></pre></p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; await self.emit_event(\"www.evilcorp.com\", parent=event, tags=[\"affiliate\"])\n</code></pre> <pre><code>&gt;&gt;&gt; new_event = self.make_event(\"1.2.3.4\", parent=event)\n&gt;&gt;&gt; await self.emit_event(new_event)\n</code></pre> <p>Returns:</p> <ul> <li>           \u2013            <p>None</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>ValidationError</code>             \u2013            <p>If the event cannot be validated (handled in <code>self.make_event()</code>).</p> </li> </ul> Source code in <code>bbot/modules/base.py</code> <pre><code>async def emit_event(self, *args, **kwargs):\n    \"\"\"Emit an event to the event queue and distribute it to interested modules.\n\n    This is how modules \"return\" data.\n\n    The method first creates an event object by calling `self.make_event()` with the provided arguments.\n    Then, the event is queued for outgoing distribution using `self.queue_outgoing_event()`.\n\n    Args:\n        *args: Positional arguments to be passed to `self.make_event()` for event creation.\n        **kwargs: Keyword arguments to be passed for event creation or configuration of the emit action.\n            ```markdown\n            - on_success_callback: Optional callback function to execute upon successful event emission.\n            - abort_if: Optional condition under which the event emission should be aborted.\n            - quick: Optional flag to indicate whether the event should be processed quickly.\n            ```\n\n    Examples:\n        &gt;&gt;&gt; await self.emit_event(\"www.evilcorp.com\", parent=event, tags=[\"affiliate\"])\n\n        &gt;&gt;&gt; new_event = self.make_event(\"1.2.3.4\", parent=event)\n        &gt;&gt;&gt; await self.emit_event(new_event)\n\n    Returns:\n        None\n\n    Raises:\n        ValidationError: If the event cannot be validated (handled in `self.make_event()`).\n    \"\"\"\n    event_kwargs = dict(kwargs)\n    emit_kwargs = {}\n    for o in (\"on_success_callback\", \"abort_if\", \"quick\"):\n        v = event_kwargs.pop(o, None)\n        if v is not None:\n            emit_kwargs[o] = v\n    event = self.make_event(*args, **event_kwargs)\n    if event:\n        await self.queue_outgoing_event(event, **emit_kwargs)\n    return event\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.error","title":"error","text":"<pre><code>error(*args, trace=True, **kwargs)\n</code></pre> <p>Logs an error message, and optionally the stack trace of the most recent exception.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Variable-length argument list to pass to the logger.</p> </li> <li> <code>trace</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to log the stack trace of the most recently caught exception. Defaults to True.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Arbitrary keyword arguments to pass to the logger.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.error(\"This is an error message\")\n&gt;&gt;&gt; self.error(\"This is an error message with a trace\", trace=False)\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def error(self, *args, trace=True, **kwargs):\n    \"\"\"Logs an error message, and optionally the stack trace of the most recent exception.\n\n    Args:\n        *args: Variable-length argument list to pass to the logger.\n        trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to True.\n        **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n    Examples:\n        &gt;&gt;&gt; self.error(\"This is an error message\")\n        &gt;&gt;&gt; self.error(\"This is an error message with a trace\", trace=False)\n    \"\"\"\n    self.log.error(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n    if trace:\n        self.trace()\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.filter_event","title":"filter_event  <code>async</code>","text":"<pre><code>filter_event(event)\n</code></pre> <p>Asynchronously filters incoming events based on custom criteria.</p> <p>Override this method for more granular control over which events are accepted by your module. This method is called automatically before <code>handle_event()</code> for each incoming event that matches any in <code>watched_events</code>.</p> <p>Parameters:</p> <ul> <li> <code>event</code>               (<code>Event</code>)           \u2013            <p>The incoming Event object to be filtered.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>tuple</code>          \u2013            <p>A 2-tuple where the first value is a bool indicating whether the event should be accepted, and the second value is a string explaining the reason for its acceptance or rejection. By default, returns <code>(True, None)</code> to indicate acceptance without reason.</p> </li> </ul> Note <p>This method should be overridden if the module requires custom logic for event filtering.</p> Source code in <code>bbot/modules/base.py</code> <pre><code>async def filter_event(self, event):\n    \"\"\"Asynchronously filters incoming events based on custom criteria.\n\n    Override this method for more granular control over which events are accepted by your module. This method is called automatically before `handle_event()` for each incoming event that matches any in `watched_events`.\n\n    Args:\n        event (Event): The incoming Event object to be filtered.\n\n    Returns:\n        tuple: A 2-tuple where the first value is a bool indicating whether the event should be accepted, and the second value is a string explaining the reason for its acceptance or rejection. By default, returns `(True, None)` to indicate acceptance without reason.\n\n    Note:\n        This method should be overridden if the module requires custom logic for event filtering.\n    \"\"\"\n    return True\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.finish","title":"finish  <code>async</code>","text":"<pre><code>finish()\n</code></pre> <p>Asynchronously performs final tasks as the scan nears completion.</p> <p>This method can be overridden to execute any necessary finalization logic. For example, if the module relies on a word cloud, you might wait for the scan to finish to ensure the word cloud is most complete before running an operation.</p> <p>Returns:</p> <ul> <li>           \u2013            <p>None</p> </li> </ul> Source code in <code>bbot/modules/base.py</code> <pre><code>async def finish(self):\n    \"\"\"Asynchronously performs final tasks as the scan nears completion.\n\n    This method can be overridden to execute any necessary finalization logic. For example, if the module relies on a word cloud, you might wait for the scan to finish to ensure the word cloud is most complete before running an operation.\n\n    Returns:\n        None\n\n    Warnings:\n        This method may be called multiple times since it can raise events, which may re-trigger the \"finish\" phase of the scan. Optional to override.\n    \"\"\"\n    return\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.get_per_domain_hash","title":"get_per_domain_hash","text":"<pre><code>get_per_domain_hash(event)\n</code></pre> <p>Computes a per-domain hash value for a given event. This method may be optionally overridden in subclasses.</p> <p>Events with the same root domain will receive the same hash value.</p> <p>Parameters:</p> <ul> <li> <code>event</code>               (<code>Event</code>)           \u2013            <p>The event object containing host, port, or parsed URL information.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>int</code>          \u2013            <p>The hash value computed for the domain.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; event = self.make_event(\"https://www.example.com:8443\")\n&gt;&gt;&gt; self.get_per_domain_hash(event)\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def get_per_domain_hash(self, event):\n    \"\"\"\n    Computes a per-domain hash value for a given event. This method may be optionally overridden in subclasses.\n\n    Events with the same root domain will receive the same hash value.\n\n    Args:\n        event (Event): The event object containing host, port, or parsed URL information.\n\n    Returns:\n        int: The hash value computed for the domain.\n\n    Examples:\n        &gt;&gt;&gt; event = self.make_event(\"https://www.example.com:8443\")\n        &gt;&gt;&gt; self.get_per_domain_hash(event)\n    \"\"\"\n    _, domain = self.helpers.split_domain(event.host)\n    return hash(domain)\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.get_per_host_hash","title":"get_per_host_hash","text":"<pre><code>get_per_host_hash(event)\n</code></pre> <p>Computes a per-host hash value for a given event. This method may be optionally overridden in subclasses.</p> <p>The function uses the event's <code>host</code> to create a string to be hashed.</p> <p>Parameters:</p> <ul> <li> <code>event</code>               (<code>Event</code>)           \u2013            <p>The event object containing host information.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>int</code>          \u2013            <p>The hash value computed for the host.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; event = self.make_event(\"https://example.com:8443\")\n&gt;&gt;&gt; self.get_per_host_hash(event)\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def get_per_host_hash(self, event):\n    \"\"\"\n    Computes a per-host hash value for a given event. This method may be optionally overridden in subclasses.\n\n    The function uses the event's `host` to create a string to be hashed.\n\n    Args:\n        event (Event): The event object containing host information.\n\n    Returns:\n        int: The hash value computed for the host.\n\n    Examples:\n        &gt;&gt;&gt; event = self.make_event(\"https://example.com:8443\")\n        &gt;&gt;&gt; self.get_per_host_hash(event)\n    \"\"\"\n    return hash(event.host)\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.get_per_hostport_hash","title":"get_per_hostport_hash","text":"<pre><code>get_per_hostport_hash(event)\n</code></pre> <p>Computes a per-host:port hash value for a given event. This method may be optionally overridden in subclasses.</p> <p>The function uses the event's <code>host</code>, <code>port</code>, and <code>scheme</code> (for URLs) to create a string to be hashed. The hash value is used for distinguishing events related to the same host.</p> <p>Parameters:</p> <ul> <li> <code>event</code>               (<code>Event</code>)           \u2013            <p>The event object containing host, port, or parsed URL information.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>int</code>          \u2013            <p>The hash value computed for the host.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; event = self.make_event(\"https://example.com:8443\")\n&gt;&gt;&gt; self.get_per_hostport_hash(event)\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def get_per_hostport_hash(self, event):\n    \"\"\"\n    Computes a per-host:port hash value for a given event. This method may be optionally overridden in subclasses.\n\n    The function uses the event's `host`, `port`, and `scheme` (for URLs) to create a string to be hashed.\n    The hash value is used for distinguishing events related to the same host.\n\n    Args:\n        event (Event): The event object containing host, port, or parsed URL information.\n\n    Returns:\n        int: The hash value computed for the host.\n\n    Examples:\n        &gt;&gt;&gt; event = self.make_event(\"https://example.com:8443\")\n        &gt;&gt;&gt; self.get_per_hostport_hash(event)\n    \"\"\"\n    parsed = getattr(event, \"parsed_url\", None)\n    if parsed is None:\n        to_hash = self.helpers.make_netloc(event.host, event.port)\n    else:\n        to_hash = f\"{parsed.scheme}://{parsed.netloc}/\"\n    return hash(to_hash)\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.get_watched_events","title":"get_watched_events","text":"<pre><code>get_watched_events()\n</code></pre> <p>Retrieve the set of events that the module is interested in observing.</p> <p>Override this method if the set of events the module should watch needs to be determined dynamically, e.g., based on configuration options or other runtime conditions.</p> <p>Returns:</p> <ul> <li> <code>set</code>          \u2013            <p>The set of event types that this module will handle.</p> </li> </ul> Source code in <code>bbot/modules/base.py</code> <pre><code>def get_watched_events(self):\n    \"\"\"Retrieve the set of events that the module is interested in observing.\n\n    Override this method if the set of events the module should watch needs to be determined dynamically, e.g., based on configuration options or other runtime conditions.\n\n    Returns:\n        set: The set of event types that this module will handle.\n    \"\"\"\n    if self._watched_events is None:\n        self._watched_events = set(self.watched_events)\n    return self._watched_events\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.handle_batch","title":"handle_batch  <code>async</code>","text":"<pre><code>handle_batch(*events)\n</code></pre> <p>Handles incoming events in batches for optimized processing.</p> <p>This method is automatically called when multiple events that match any in <code>watched_events</code> are encountered and the <code>batch_size</code> attribute is set to a value greater than 1. Override this method to implement custom batch event-handling logic for your module.</p> <p>Parameters:</p> <ul> <li> <code>*events</code>               (<code>Event</code>, default:                   <code>()</code> )           \u2013            <p>A variable number of Event objects to be processed in a batch.</p> </li> </ul> Note <p>This method should be overridden if the <code>batch_size</code> attribute of the module is set to a value greater than 1.</p> <p>Returns:</p> <ul> <li>           \u2013            <p>None</p> </li> </ul> Source code in <code>bbot/modules/base.py</code> <pre><code>async def handle_batch(self, *events):\n    \"\"\"Handles incoming events in batches for optimized processing.\n\n    This method is automatically called when multiple events that match any in `watched_events` are encountered and the `batch_size` attribute is set to a value greater than 1. Override this method to implement custom batch event-handling logic for your module.\n\n    Args:\n        *events (Event): A variable number of Event objects to be processed in a batch.\n\n    Note:\n        This method should be overridden if the `batch_size` attribute of the module is set to a value greater than 1.\n\n    Returns:\n        None\n    \"\"\"\n    pass\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.handle_event","title":"handle_event  <code>async</code>","text":"<pre><code>handle_event(event)\n</code></pre> <p>Asynchronously handles incoming events that the module is configured to watch.</p> <p>This method is automatically invoked when an event that matches any in <code>watched_events</code> is encountered during a scan. Override this method to implement custom event-handling logic for your module.</p> <p>Parameters:</p> <ul> <li> <code>event</code>               (<code>Event</code>)           \u2013            <p>The event object containing details about the incoming event.</p> </li> </ul> Note <p>This method should be overridden if the <code>batch_size</code> attribute of the module is set to 1.</p> <p>Returns:</p> <ul> <li>           \u2013            <p>None</p> </li> </ul> Source code in <code>bbot/modules/base.py</code> <pre><code>async def handle_event(self, event):\n    \"\"\"Asynchronously handles incoming events that the module is configured to watch.\n\n    This method is automatically invoked when an event that matches any in `watched_events` is encountered during a scan. Override this method to implement custom event-handling logic for your module.\n\n    Args:\n        event (Event): The event object containing details about the incoming event.\n\n    Note:\n        This method should be overridden if the `batch_size` attribute of the module is set to 1.\n\n    Returns:\n        None\n    \"\"\"\n    pass\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.hugeinfo","title":"hugeinfo","text":"<pre><code>hugeinfo(*args, trace=False, **kwargs)\n</code></pre> <p>Logs a whole message in emboldened blue text, and optionally the stack trace of the most recent exception.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Variable-length argument list to pass to the logger.</p> </li> <li> <code>trace</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to log the stack trace of the most recently caught exception. Defaults to False.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Arbitrary keyword arguments to pass to the logger.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.hugeinfo(\"This is a huge informational message\")\n&gt;&gt;&gt; self.hugeinfo(\"This is a huge informational message with a trace\", trace=True)\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def hugeinfo(self, *args, trace=False, **kwargs):\n    \"\"\"Logs a whole message in emboldened blue text, and optionally the stack trace of the most recent exception.\n\n    Args:\n        *args: Variable-length argument list to pass to the logger.\n        trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to False.\n        **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n    Examples:\n        &gt;&gt;&gt; self.hugeinfo(\"This is a huge informational message\")\n        &gt;&gt;&gt; self.hugeinfo(\"This is a huge informational message with a trace\", trace=True)\n    \"\"\"\n    self.log.hugeinfo(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n    if trace:\n        self.trace()\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.hugesuccess","title":"hugesuccess","text":"<pre><code>hugesuccess(*args, trace=False, **kwargs)\n</code></pre> <p>Logs a whole message in emboldened green text, and optionally the stack trace of the most recent exception.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Variable-length argument list to pass to the logger.</p> </li> <li> <code>trace</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to log the stack trace of the most recently caught exception. Defaults to False.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Arbitrary keyword arguments to pass to the logger.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.hugesuccess(\"This is a huge success message\")\n&gt;&gt;&gt; self.hugesuccess(\"This is a huge success message with a trace\", trace=True)\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def hugesuccess(self, *args, trace=False, **kwargs):\n    \"\"\"Logs a whole message in emboldened green text, and optionally the stack trace of the most recent exception.\n\n    Args:\n        *args: Variable-length argument list to pass to the logger.\n        trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to False.\n        **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n    Examples:\n        &gt;&gt;&gt; self.hugesuccess(\"This is a huge success message\")\n        &gt;&gt;&gt; self.hugesuccess(\"This is a huge success message with a trace\", trace=True)\n    \"\"\"\n    self.log.hugesuccess(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n    if trace:\n        self.trace()\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.hugeverbose","title":"hugeverbose","text":"<pre><code>hugeverbose(*args, trace=False, **kwargs)\n</code></pre> <p>Logs a whole message in emboldened white text, and optionally the stack trace of the most recent exception.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Variable-length argument list to pass to the logger.</p> </li> <li> <code>trace</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to log the stack trace of the most recently caught exception. Defaults to False.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Arbitrary keyword arguments to pass to the logger.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.hugeverbose(\"This is a huge verbose message\")\n&gt;&gt;&gt; self.hugeverbose(\"This is a huge verbose message with a trace\", trace=True)\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def hugeverbose(self, *args, trace=False, **kwargs):\n    \"\"\"Logs a whole message in emboldened white text, and optionally the stack trace of the most recent exception.\n\n    Args:\n        *args: Variable-length argument list to pass to the logger.\n        trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to False.\n        **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n    Examples:\n        &gt;&gt;&gt; self.hugeverbose(\"This is a huge verbose message\")\n        &gt;&gt;&gt; self.hugeverbose(\"This is a huge verbose message with a trace\", trace=True)\n    \"\"\"\n    self.log.hugeverbose(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n    if trace:\n        self.trace()\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.hugewarning","title":"hugewarning","text":"<pre><code>hugewarning(*args, trace=True, **kwargs)\n</code></pre> <p>Logs a whole message in emboldened orange text, and optionally the stack trace of the most recent exception.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Variable-length argument list to pass to the logger.</p> </li> <li> <code>trace</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to log the stack trace of the most recently caught exception. Defaults to True.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Arbitrary keyword arguments to pass to the logger.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.hugewarning(\"This is a huge warning message\")\n&gt;&gt;&gt; self.hugewarning(\"This is a huge warning message with a trace\", trace=False)\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def hugewarning(self, *args, trace=True, **kwargs):\n    \"\"\"Logs a whole message in emboldened orange text, and optionally the stack trace of the most recent exception.\n\n    Args:\n        *args: Variable-length argument list to pass to the logger.\n        trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to True.\n        **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n    Examples:\n        &gt;&gt;&gt; self.hugewarning(\"This is a huge warning message\")\n        &gt;&gt;&gt; self.hugewarning(\"This is a huge warning message with a trace\", trace=False)\n    \"\"\"\n    self.log.hugewarning(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n    if trace:\n        self.trace()\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.info","title":"info","text":"<pre><code>info(*args, trace=False, **kwargs)\n</code></pre> <p>Logs informational messages and optionally the stack trace of the most recent exception.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Variable-length argument list to pass to the logger.</p> </li> <li> <code>trace</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to log the stack trace of the most recently caught exception. Defaults to False.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Arbitrary keyword arguments to pass to the logger.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.info(\"This is an informational message\")\n&gt;&gt;&gt; self.info(\"This is an informational message with a trace\", trace=True)\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def info(self, *args, trace=False, **kwargs):\n    \"\"\"Logs informational messages and optionally the stack trace of the most recent exception.\n\n    Args:\n        *args: Variable-length argument list to pass to the logger.\n        trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to False.\n        **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n    Examples:\n        &gt;&gt;&gt; self.info(\"This is an informational message\")\n        &gt;&gt;&gt; self.info(\"This is an informational message with a trace\", trace=True)\n    \"\"\"\n    self.log.info(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n    if trace:\n        self.trace()\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.log_table","title":"log_table","text":"<pre><code>log_table(*args, **kwargs)\n</code></pre> <p>Logs a table to the console and optionally writes it to a file.</p> <p>This function generates a table using <code>self.helpers.make_table</code>, then logs each line of the table as an info-level log. If a table_name is provided, it also writes the table to a file.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Variable length argument list to be passed to <code>self.helpers.make_table</code>.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Arbitrary keyword arguments. If 'table_name' is specified, the table will be written to a file.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>The generated table as a string.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.log_table(['Header1', 'Header2'], [['row1col1', 'row1col2'], ['row2col1', 'row2col2']], table_name=\"my_table\")\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def log_table(self, *args, **kwargs):\n    \"\"\"Logs a table to the console and optionally writes it to a file.\n\n    This function generates a table using `self.helpers.make_table`, then logs each line\n    of the table as an info-level log. If a table_name is provided, it also writes the table to a file.\n\n    Args:\n        *args: Variable length argument list to be passed to `self.helpers.make_table`.\n        **kwargs: Arbitrary keyword arguments. If 'table_name' is specified, the table will be written to a file.\n\n    Returns:\n        str: The generated table as a string.\n\n    Examples:\n        &gt;&gt;&gt; self.log_table(['Header1', 'Header2'], [['row1col1', 'row1col2'], ['row2col1', 'row2col2']], table_name=\"my_table\")\n    \"\"\"\n    table_name = kwargs.pop(\"table_name\", None)\n    max_log_entries = kwargs.pop(\"max_log_entries\", None)\n    table = self.helpers.make_table(*args, **kwargs)\n    lines_logged = 0\n    for line in table.splitlines():\n        if max_log_entries is not None and lines_logged &gt; max_log_entries:\n            break\n        self.info(line)\n        lines_logged += 1\n    if table_name is not None:\n        date = self.helpers.make_date()\n        filename = self.scan.home / f\"{self.helpers.tagify(table_name)}-table-{date}.txt\"\n        with open(filename, \"w\") as f:\n            f.write(table)\n        self.verbose(f\"Wrote {table_name} to {filename}\")\n    return table\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.make_event","title":"make_event","text":"<pre><code>make_event(*args, **kwargs)\n</code></pre> <p>Create an event for the scan.</p> <p>Raises a validation error if the event could not be created, unless raise_error is set to False.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Positional arguments to be passed to the scan's make_event method.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Keyword arguments to be passed to the scan's make_event method.</p> </li> <li> <code>raise_error</code>               (<code>bool</code>)           \u2013            <p>Whether to raise a validation error if the event could not be created. Defaults to False.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; new_event = self.make_event(\"1.2.3.4\", parent=event)\n&gt;&gt;&gt; await self.emit_event(new_event)\n</code></pre> <p>Returns:</p> <ul> <li>           \u2013            <p>Event or None: The created event, or None if a validation error occurred and raise_error was False.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>ValidationError</code>             \u2013            <p>If the event could not be validated and raise_error is True.</p> </li> </ul> Source code in <code>bbot/modules/base.py</code> <pre><code>def make_event(self, *args, **kwargs):\n    \"\"\"Create an event for the scan.\n\n    Raises a validation error if the event could not be created, unless raise_error is set to False.\n\n    Args:\n        *args: Positional arguments to be passed to the scan's make_event method.\n        **kwargs: Keyword arguments to be passed to the scan's make_event method.\n        raise_error (bool, optional): Whether to raise a validation error if the event could not be created. Defaults to False.\n\n    Examples:\n        &gt;&gt;&gt; new_event = self.make_event(\"1.2.3.4\", parent=event)\n        &gt;&gt;&gt; await self.emit_event(new_event)\n\n    Returns:\n        Event or None: The created event, or None if a validation error occurred and raise_error was False.\n\n    Raises:\n        ValidationError: If the event could not be validated and raise_error is True.\n    \"\"\"\n    raise_error = kwargs.pop(\"raise_error\", False)\n    module = kwargs.pop(\"module\", None)\n    if module is None:\n        if (not args) or getattr(args[0], \"module\", None) is None:\n            kwargs[\"module\"] = self\n    try:\n        event = self.scan.make_event(*args, **kwargs)\n    except ValidationError as e:\n        if raise_error:\n            raise\n        self.warning(f\"{e}\")\n        return\n    return event\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.ping","title":"ping  <code>async</code>","text":"<pre><code>ping(url=None)\n</code></pre> <p>Asynchronously checks the health of the configured API.</p> <p>This method is used in conjunction with require_api_key() to verify that the API is not just configured, but also responsive. It makes a test request to a known endpoint to validate the API's health.</p> <p>The method uses the <code>ping_url</code> attribute if defined, or falls back to a provided URL. If neither is available, no request is made.</p> <p>Parameters:</p> <ul> <li> <code>url</code>               (<code>str</code>, default:                   <code>None</code> )           \u2013            <p>A specific URL to use for the ping request. If not provided, the method will use the <code>ping_url</code> attribute.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>None</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>ValueError</code>             \u2013            <p>If the API response is not successful (status code != 200).</p> </li> </ul> Example Usage <p>To use this method, simply define the <code>ping_url</code> attribute in your module:</p> <p>class MyModule(BaseModule):     ping_url = \"https://api.example.com/ping\"</p> <p>Alternatively, you can override this method for more complex health checks:</p> <p>async def ping(self):     r = await self.api_request(f\"{self.base_url}/complex-health-check\")     if r.status_code != 200 or r.json().get('status') != 'healthy':         raise ValueError(f\"API unhealthy: {r.text}\")</p> Source code in <code>bbot/modules/base.py</code> <pre><code>async def ping(self, url=None):\n    \"\"\"Asynchronously checks the health of the configured API.\n\n    This method is used in conjunction with require_api_key() to verify that the API is not just configured, but also responsive. It makes a test request to a known endpoint to validate the API's health.\n\n    The method uses the `ping_url` attribute if defined, or falls back to a provided URL. If neither is available, no request is made.\n\n    Args:\n        url (str, optional): A specific URL to use for the ping request. If not provided, the method will use the `ping_url` attribute.\n\n    Returns:\n        None\n\n    Raises:\n        ValueError: If the API response is not successful (status code != 200).\n\n    Example Usage:\n        To use this method, simply define the `ping_url` attribute in your module:\n\n        class MyModule(BaseModule):\n            ping_url = \"https://api.example.com/ping\"\n\n        Alternatively, you can override this method for more complex health checks:\n\n        async def ping(self):\n            r = await self.api_request(f\"{self.base_url}/complex-health-check\")\n            if r.status_code != 200 or r.json().get('status') != 'healthy':\n                raise ValueError(f\"API unhealthy: {r.text}\")\n    \"\"\"\n    if url is None:\n        url = getattr(self, \"ping_url\", \"\")\n    if url:\n        r = await self.api_request(url)\n        if getattr(r, \"status_code\", 0) != 200:\n            response_text = getattr(r, \"text\", \"no response from server\")\n            raise ValueError(response_text)\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.prepare_api_request","title":"prepare_api_request","text":"<pre><code>prepare_api_request(url, kwargs)\n</code></pre> <p>Prepare an API request by adding the necessary authentication - header, bearer token, etc.</p> Source code in <code>bbot/modules/base.py</code> <pre><code>def prepare_api_request(self, url, kwargs):\n    \"\"\"\n    Prepare an API request by adding the necessary authentication - header, bearer token, etc.\n    \"\"\"\n    if self.api_key:\n        url = url.format(api_key=self.api_key)\n        if not \"headers\" in kwargs:\n            kwargs[\"headers\"] = {}\n        kwargs[\"headers\"][\"Authorization\"] = f\"Bearer {self.api_key}\"\n    return url, kwargs\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.queue_event","title":"queue_event  <code>async</code>","text":"<pre><code>queue_event(event)\n</code></pre> <p>Asynchronously queues an incoming event to the module's event queue for further processing.</p> <p>The function performs an initial check to see if the event is acceptable for queuing. If the event passes the check, it is put into the <code>incoming_event_queue</code>.</p> <p>Parameters:</p> <ul> <li> <code>event</code>           \u2013            <p>The event object to be queued.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>None</code>          \u2013            <p>The function doesn't return anything but modifies the state of the <code>incoming_event_queue</code>.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; await self.queue_event(some_event)\n</code></pre> <p>Raises:</p> <ul> <li> <code>AttributeError</code>             \u2013            <p>If the module is not in an acceptable state to queue incoming events.</p> </li> </ul> Source code in <code>bbot/modules/base.py</code> <pre><code>async def queue_event(self, event):\n    \"\"\"\n    Asynchronously queues an incoming event to the module's event queue for further processing.\n\n    The function performs an initial check to see if the event is acceptable for queuing.\n    If the event passes the check, it is put into the `incoming_event_queue`.\n\n    Args:\n        event: The event object to be queued.\n\n    Returns:\n        None: The function doesn't return anything but modifies the state of the `incoming_event_queue`.\n\n    Examples:\n        &gt;&gt;&gt; await self.queue_event(some_event)\n\n    Raises:\n        AttributeError: If the module is not in an acceptable state to queue incoming events.\n    \"\"\"\n    async with self._task_counter.count(\"queue_event()\", _log=False):\n        if self.incoming_event_queue is False:\n            self.debug(f\"Not in an acceptable state to queue incoming event\")\n            return\n        acceptable, reason = self._event_precheck(event)\n        if not acceptable:\n            if reason and reason != \"its type is not in watched_events\":\n                self.debug(f\"Not queueing {event} because {reason}\")\n            return\n        else:\n            self.debug(f\"Queueing {event} because {reason}\")\n        try:\n            self.incoming_event_queue.put_nowait(event)\n            async with self._event_received:\n                self._event_received.notify()\n            if event.type != \"FINISHED\":\n                self.scan._new_activity = True\n        except AttributeError:\n            self.debug(f\"Not in an acceptable state to queue incoming event\")\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.queue_outgoing_event","title":"queue_outgoing_event  <code>async</code>","text":"<pre><code>queue_outgoing_event(event, **kwargs)\n</code></pre> <p>Queues an outgoing event to the module's outgoing event queue for further processing.</p> <p>The function attempts to put the event into the <code>outgoing_event_queue</code> immediately. If it's not possible due to the current state of the module, an AttributeError is raised, and a debug log is generated.</p> <p>Parameters:</p> <ul> <li> <code>event</code>           \u2013            <p>The event object to be queued.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Additional keyword arguments to be associated with the event.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>None</code>          \u2013            <p>The function doesn't return anything but modifies the state of the <code>outgoing_event_queue</code>.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.queue_outgoing_event(some_outgoing_event, abort_if=lambda e: \"unresolved\" in e.tags)\n</code></pre> <p>Raises:</p> <ul> <li> <code>AttributeError</code>             \u2013            <p>If the module is not in an acceptable state to queue outgoing events.</p> </li> </ul> Source code in <code>bbot/modules/base.py</code> <pre><code>async def queue_outgoing_event(self, event, **kwargs):\n    \"\"\"\n    Queues an outgoing event to the module's outgoing event queue for further processing.\n\n    The function attempts to put the event into the `outgoing_event_queue` immediately.\n    If it's not possible due to the current state of the module, an AttributeError is raised, and a debug log is generated.\n\n    Args:\n        event: The event object to be queued.\n        **kwargs: Additional keyword arguments to be associated with the event.\n\n    Returns:\n        None: The function doesn't return anything but modifies the state of the `outgoing_event_queue`.\n\n    Examples:\n        &gt;&gt;&gt; self.queue_outgoing_event(some_outgoing_event, abort_if=lambda e: \"unresolved\" in e.tags)\n\n    Raises:\n        AttributeError: If the module is not in an acceptable state to queue outgoing events.\n    \"\"\"\n    try:\n        await self.outgoing_event_queue.put((event, kwargs))\n    except AttributeError:\n        self.debug(f\"Not in an acceptable state to queue outgoing event\")\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.report","title":"report  <code>async</code>","text":"<pre><code>report()\n</code></pre> <p>Asynchronously executes a final task after the scan is complete but before cleanup.</p> <p>This method can be overridden to aggregate data and raise summary events at the end of the scan.</p> <p>Returns:</p> <ul> <li>           \u2013            <p>None</p> </li> </ul> Note <p>This method is called only once per scan.</p> Source code in <code>bbot/modules/base.py</code> <pre><code>async def report(self):\n    \"\"\"Asynchronously executes a final task after the scan is complete but before cleanup.\n\n    This method can be overridden to aggregate data and raise summary events at the end of the scan.\n\n    Returns:\n        None\n\n    Note:\n        This method is called only once per scan.\n    \"\"\"\n    return\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.require_api_key","title":"require_api_key  <code>async</code>","text":"<pre><code>require_api_key()\n</code></pre> <p>Asynchronously checks if an API key is required and valid.</p> <p>Returns:</p> <ul> <li>           \u2013            <p>bool or tuple: Returns True if API key is valid and ready.           Returns a tuple (None, \"error message\") otherwise.</p> </li> </ul> Notes <ul> <li>Fetches the API key from the configuration.</li> <li>Calls the 'ping()' method to test API accessibility.</li> <li>Sets the API key readiness status accordingly.</li> </ul> Source code in <code>bbot/modules/base.py</code> <pre><code>async def require_api_key(self):\n    \"\"\"\n    Asynchronously checks if an API key is required and valid.\n\n    Args:\n        None\n\n    Returns:\n        bool or tuple: Returns True if API key is valid and ready.\n                      Returns a tuple (None, \"error message\") otherwise.\n\n    Notes:\n        - Fetches the API key from the configuration.\n        - Calls the 'ping()' method to test API accessibility.\n        - Sets the API key readiness status accordingly.\n    \"\"\"\n    self.api_key = self.config.get(\"api_key\", \"\")\n    if self.auth_secret:\n        try:\n            await self.ping()\n            self.hugesuccess(f\"API is ready\")\n            return True, \"\"\n        except Exception as e:\n            self.trace(traceback.format_exc())\n            return None, f\"Error with API ({str(e).strip()})\"\n    else:\n        return None, \"No API key set\"\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.set_error_state","title":"set_error_state","text":"<pre><code>set_error_state(message=None, clear_outgoing_queue=False, critical=False)\n</code></pre> <p>Puts the module into an errored state where it cannot accept new events. Optionally logs a warning message.</p> <p>The function sets the module's <code>errored</code> attribute to True and logs a warning with the optional message. It also clears the incoming event queue to prevent further processing and updates its status to False.</p> <p>Parameters:</p> <ul> <li> <code>message</code>               (<code>str</code>, default:                   <code>None</code> )           \u2013            <p>Additional message to be logged along with the warning.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>None</code>          \u2013            <p>The function doesn't return anything but updates the <code>errored</code> state and clears the incoming event queue.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.set_error_state()\n&gt;&gt;&gt; self.set_error_state(\"Failed to connect to the server\")\n</code></pre> Notes <ul> <li>The function sets <code>self._incoming_event_queue</code> to False to prevent its further use.</li> <li>If the module was already in an errored state, the function will not reset the error state or the queue.</li> </ul> Source code in <code>bbot/modules/base.py</code> <pre><code>def set_error_state(self, message=None, clear_outgoing_queue=False, critical=False):\n    \"\"\"\n    Puts the module into an errored state where it cannot accept new events. Optionally logs a warning message.\n\n    The function sets the module's `errored` attribute to True and logs a warning with the optional message.\n    It also clears the incoming event queue to prevent further processing and updates its status to False.\n\n    Args:\n        message (str, optional): Additional message to be logged along with the warning.\n\n    Returns:\n        None: The function doesn't return anything but updates the `errored` state and clears the incoming event queue.\n\n    Examples:\n        &gt;&gt;&gt; self.set_error_state()\n        &gt;&gt;&gt; self.set_error_state(\"Failed to connect to the server\")\n\n    Notes:\n        - The function sets `self._incoming_event_queue` to False to prevent its further use.\n        - If the module was already in an errored state, the function will not reset the error state or the queue.\n    \"\"\"\n    if not self.errored:\n        log_msg = \"Setting error state\"\n        if message is not None:\n            log_msg += f\": {message}\"\n        if critical:\n            log_fn = self.error\n        else:\n            log_fn = self.warning\n        log_fn(log_msg)\n        self.errored = True\n        # clear incoming queue\n        if self.incoming_event_queue is not False:\n            self.debug(f\"Emptying event_queue\")\n            with suppress(asyncio.queues.QueueEmpty):\n                while 1:\n                    self.incoming_event_queue.get_nowait()\n            # set queue to None to prevent its use\n            # if there are leftover objects in the queue, the scan will hang.\n            self._incoming_event_queue = False\n\n        if clear_outgoing_queue:\n            with suppress(asyncio.queues.QueueEmpty):\n                while 1:\n                    self.outgoing_event_queue.get_nowait()\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.setup","title":"setup  <code>async</code>","text":"<pre><code>setup()\n</code></pre> <p>Performs one-time setup tasks for the module.</p> <p>This method is responsible for preparing the module for its operation, which may include tasks such as downloading necessary resources, validating configuration parameters, or other preliminary checks.</p> <p>Returns:</p> <ul> <li> <code>tuple</code>          \u2013            <ul> <li>bool or None: A status indicating the outcome of the setup process. Returns <code>True</code> if the setup was successful, <code>None</code> for a soft-fail where the module setup did not succeed but the scan will continue with the module disabled, and <code>False</code> for a hard-fail where the setup failure causes the scan to abort.</li> <li>str, optional: A reason for the setup failure, provided only when the setup does not succeed (i.e., returns <code>None</code> or <code>False</code>).</li> </ul> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; async def setup(self):\n&gt;&gt;&gt;     if not self.config.get(\"api_key\"):\n&gt;&gt;&gt;         # Soft-fail: Configuration missing an API key\n&gt;&gt;&gt;         return None, \"No API key specified\"\n</code></pre> <pre><code>&gt;&gt;&gt; async def setup(self):\n&gt;&gt;&gt;     try:\n&gt;&gt;&gt;         wordlist = await self.helpers.wordlist(\"https://raw.githubusercontent.com/user/wordlist.txt\")\n&gt;&gt;&gt;     except WordlistError as e:\n&gt;&gt;&gt;         # Hard-fail: Error retrieving wordlist\n&gt;&gt;&gt;         return False, f\"Error retrieving wordlist: {e}\"\n</code></pre> <pre><code>&gt;&gt;&gt; async def setup(self):\n&gt;&gt;&gt;     self.timeout = self.config.get(\"timeout\", 5)\n&gt;&gt;&gt;     # Success: Setup completed without issues\n&gt;&gt;&gt;     return True\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>async def setup(self):\n    \"\"\"\n    Performs one-time setup tasks for the module.\n\n    This method is responsible for preparing the module for its operation, which may include tasks\n    such as downloading necessary resources, validating configuration parameters, or other preliminary\n    checks.\n\n    Returns:\n        tuple:\n            - bool or None: A status indicating the outcome of the setup process. Returns `True` if\n            the setup was successful, `None` for a soft-fail where the module setup did not succeed\n            but the scan will continue with the module disabled, and `False` for a hard-fail where\n            the setup failure causes the scan to abort.\n            - str, optional: A reason for the setup failure, provided only when the setup does not\n            succeed (i.e., returns `None` or `False`).\n\n    Examples:\n        &gt;&gt;&gt; async def setup(self):\n        &gt;&gt;&gt;     if not self.config.get(\"api_key\"):\n        &gt;&gt;&gt;         # Soft-fail: Configuration missing an API key\n        &gt;&gt;&gt;         return None, \"No API key specified\"\n\n        &gt;&gt;&gt; async def setup(self):\n        &gt;&gt;&gt;     try:\n        &gt;&gt;&gt;         wordlist = await self.helpers.wordlist(\"https://raw.githubusercontent.com/user/wordlist.txt\")\n        &gt;&gt;&gt;     except WordlistError as e:\n        &gt;&gt;&gt;         # Hard-fail: Error retrieving wordlist\n        &gt;&gt;&gt;         return False, f\"Error retrieving wordlist: {e}\"\n\n        &gt;&gt;&gt; async def setup(self):\n        &gt;&gt;&gt;     self.timeout = self.config.get(\"timeout\", 5)\n        &gt;&gt;&gt;     # Success: Setup completed without issues\n        &gt;&gt;&gt;     return True\n    \"\"\"\n\n    return True\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.success","title":"success","text":"<pre><code>success(*args, trace=False, **kwargs)\n</code></pre> <p>Logs a success message, and optionally the stack trace of the most recent exception.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Variable-length argument list to pass to the logger.</p> </li> <li> <code>trace</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to log the stack trace of the most recently caught exception. Defaults to False.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Arbitrary keyword arguments to pass to the logger.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.success(\"Operation completed successfully\")\n&gt;&gt;&gt; self.success(\"Operation completed with a trace\", trace=True)\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def success(self, *args, trace=False, **kwargs):\n    \"\"\"Logs a success message, and optionally the stack trace of the most recent exception.\n\n    Args:\n        *args: Variable-length argument list to pass to the logger.\n        trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to False.\n        **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n    Examples:\n        &gt;&gt;&gt; self.success(\"Operation completed successfully\")\n        &gt;&gt;&gt; self.success(\"Operation completed with a trace\", trace=True)\n    \"\"\"\n    self.log.success(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n    if trace:\n        self.trace()\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.trace","title":"trace","text":"<pre><code>trace(msg=None)\n</code></pre> <p>Logs the stack trace of the most recently caught exception.</p> <p>This method captures the type, value, and traceback of the most recent exception and logs it using the trace level. It is typically used for debugging purposes.</p> <p>Anything logged using this method will always be written to the scan's <code>debug.log</code>, even if debugging is not enabled.</p> <p>Examples:</p> <pre><code>&gt;&gt;&gt; try:\n&gt;&gt;&gt;     1 / 0\n&gt;&gt;&gt; except ZeroDivisionError:\n&gt;&gt;&gt;     self.trace()\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def trace(self, msg=None):\n    \"\"\"Logs the stack trace of the most recently caught exception.\n\n    This method captures the type, value, and traceback of the most recent exception and logs it using the trace level. It is typically used for debugging purposes.\n\n    Anything logged using this method will always be written to the scan's `debug.log`, even if debugging is not enabled.\n\n    Examples:\n        &gt;&gt;&gt; try:\n        &gt;&gt;&gt;     1 / 0\n        &gt;&gt;&gt; except ZeroDivisionError:\n        &gt;&gt;&gt;     self.trace()\n    \"\"\"\n    if msg is None:\n        e_type, e_val, e_traceback = exc_info()\n        if e_type is not None:\n            self.log.trace(traceback.format_exc())\n    else:\n        self.log.trace(msg)\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.verbose","title":"verbose","text":"<pre><code>verbose(*args, trace=False, **kwargs)\n</code></pre> <p>Logs messages and optionally the stack trace of the most recent exception.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Variable-length argument list to pass to the logger.</p> </li> <li> <code>trace</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to log the stack trace of the most recently caught exception. Defaults to False.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Arbitrary keyword arguments to pass to the logger.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.verbose(\"This is a verbose message\")\n&gt;&gt;&gt; self.verbose(\"This is a verbose message with a trace\", trace=True)\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def verbose(self, *args, trace=False, **kwargs):\n    \"\"\"Logs messages and optionally the stack trace of the most recent exception.\n\n    Args:\n        *args: Variable-length argument list to pass to the logger.\n        trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to False.\n        **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n    Examples:\n        &gt;&gt;&gt; self.verbose(\"This is a verbose message\")\n        &gt;&gt;&gt; self.verbose(\"This is a verbose message with a trace\", trace=True)\n    \"\"\"\n    self.log.verbose(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n    if trace:\n        self.trace()\n</code></pre>"},{"location":"dev/basemodule/#bbot.modules.base.BaseModule.warning","title":"warning","text":"<pre><code>warning(*args, trace=True, **kwargs)\n</code></pre> <p>Logs a warning message, and optionally the stack trace of the most recent exception.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Variable-length argument list to pass to the logger.</p> </li> <li> <code>trace</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to log the stack trace of the most recently caught exception. Defaults to True.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Arbitrary keyword arguments to pass to the logger.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.warning(\"This is a warning message\")\n&gt;&gt;&gt; self.warning(\"This is a warning message with a trace\", trace=False)\n</code></pre> Source code in <code>bbot/modules/base.py</code> <pre><code>def warning(self, *args, trace=True, **kwargs):\n    \"\"\"Logs a warning message, and optionally the stack trace of the most recent exception.\n\n    Args:\n        *args: Variable-length argument list to pass to the logger.\n        trace (bool, optional): Whether to log the stack trace of the most recently caught exception. Defaults to True.\n        **kwargs: Arbitrary keyword arguments to pass to the logger.\n\n    Examples:\n        &gt;&gt;&gt; self.warning(\"This is a warning message\")\n        &gt;&gt;&gt; self.warning(\"This is a warning message with a trace\", trace=False)\n    \"\"\"\n    self.log.warning(*args, extra={\"scan_id\": self.scan.id}, **kwargs)\n    if trace:\n        self.trace()\n</code></pre>"},{"location":"dev/core/","title":"BBOTCore","text":""},{"location":"dev/core/#bbot.core.core.BBOTCore","title":"BBOTCore","text":"<p>This is the first thing that loads when you import BBOT.</p> <p>Unlike a Preset, BBOTCore holds only the config, not scan-specific stuff like targets, flags, modules, etc.</p> <p>Its main jobs are:</p> <ul> <li>set up logging</li> <li>keep separation between the <code>default</code> and <code>custom</code> config (this allows presets to only display the config options that have changed)</li> <li>allow for easy merging of configs</li> <li>load quickly</li> </ul> Source code in <code>bbot/core/core.py</code> <pre><code>class BBOTCore:\n    \"\"\"\n    This is the first thing that loads when you import BBOT.\n\n    Unlike a Preset, BBOTCore holds only the config, not scan-specific stuff like targets, flags, modules, etc.\n\n    Its main jobs are:\n\n    - set up logging\n    - keep separation between the `default` and `custom` config (this allows presets to only display the config options that have changed)\n    - allow for easy merging of configs\n    - load quickly\n    \"\"\"\n\n    # used for filtering out sensitive config values\n    secrets_strings = [\"api_key\", \"username\", \"password\", \"token\", \"secret\", \"_id\"]\n    # don't filter/remove entries under this key\n    secrets_exclude_keys = [\"modules\"]\n\n    def __init__(self):\n        self._logger = None\n        self._files_config = None\n\n        self._config = None\n        self._custom_config = None\n\n        # bare minimum == logging\n        self.logger\n        self.log = logging.getLogger(\"bbot.core\")\n\n        self._prep_multiprocessing()\n\n    def _prep_multiprocessing(self):\n        import multiprocessing\n        from .helpers.process import BBOTProcess\n\n        if SHARED_INTERPRETER_STATE.is_main_process:\n            # if this is the main bbot process, set the logger and queue for the first time\n            from functools import partialmethod\n\n            BBOTProcess.__init__ = partialmethod(\n                BBOTProcess.__init__, log_level=self.logger.log_level, log_queue=self.logger.queue\n            )\n\n        # this makes our process class the default for process pools, etc.\n        mp_context = multiprocessing.get_context(\"spawn\")\n        mp_context.Process = BBOTProcess\n\n    @property\n    def home(self):\n        return Path(self.config[\"home\"]).expanduser().resolve()\n\n    @property\n    def cache_dir(self):\n        return self.home / \"cache\"\n\n    @property\n    def tools_dir(self):\n        return self.home / \"tools\"\n\n    @property\n    def temp_dir(self):\n        return self.home / \"temp\"\n\n    @property\n    def lib_dir(self):\n        return self.home / \"lib\"\n\n    @property\n    def scans_dir(self):\n        return self.home / \"scans\"\n\n    @property\n    def config(self):\n        \"\"\"\n        .config is just .default_config + .custom_config merged together\n\n        any new values should be added to custom_config.\n        \"\"\"\n        if self._config is None:\n            self._config = OmegaConf.merge(self.default_config, self.custom_config)\n            # set read-only flag (change .custom_config instead)\n            OmegaConf.set_readonly(self._config, True)\n        return self._config\n\n    @property\n    def default_config(self):\n        \"\"\"\n        The default BBOT config (from `defaults.yml`). Read-only.\n        \"\"\"\n        global DEFAULT_CONFIG\n        if DEFAULT_CONFIG is None:\n            self.default_config = self.files_config.get_default_config()\n            # ensure bbot home dir\n            if not \"home\" in self.default_config:\n                self.default_config[\"home\"] = \"~/.bbot\"\n        return DEFAULT_CONFIG\n\n    @default_config.setter\n    def default_config(self, value):\n        # we temporarily clear out the config so it can be refreshed if/when default_config changes\n        global DEFAULT_CONFIG\n        self._config = None\n        DEFAULT_CONFIG = value\n        # set read-only flag (change .custom_config instead)\n        OmegaConf.set_readonly(DEFAULT_CONFIG, True)\n\n    @property\n    def custom_config(self):\n        \"\"\"\n        Custom BBOT config (from `~/.config/bbot/bbot.yml`)\n        \"\"\"\n        # we temporarily clear out the config so it can be refreshed if/when custom_config changes\n        self._config = None\n        if self._custom_config is None:\n            self.custom_config = self.files_config.get_custom_config()\n        return self._custom_config\n\n    @custom_config.setter\n    def custom_config(self, value):\n        # we temporarily clear out the config so it can be refreshed if/when custom_config changes\n        self._config = None\n        # ensure the modules key is always a dictionary\n        modules_entry = value.get(\"modules\", None)\n        if modules_entry is not None and not OmegaConf.is_dict(modules_entry):\n            value[\"modules\"] = {}\n        self._custom_config = value\n\n    def no_secrets_config(self, config):\n        from .helpers.misc import clean_dict\n\n        with suppress(ValueError):\n            config = OmegaConf.to_object(config)\n\n        return clean_dict(\n            config,\n            *self.secrets_strings,\n            fuzzy=True,\n            exclude_keys=self.secrets_exclude_keys,\n        )\n\n    def secrets_only_config(self, config):\n        from .helpers.misc import filter_dict\n\n        with suppress(ValueError):\n            config = OmegaConf.to_object(config)\n\n        return filter_dict(\n            config,\n            *self.secrets_strings,\n            fuzzy=True,\n            exclude_keys=self.secrets_exclude_keys,\n        )\n\n    def merge_custom(self, config):\n        \"\"\"\n        Merge a config into the custom config.\n        \"\"\"\n        self.custom_config = OmegaConf.merge(self.custom_config, OmegaConf.create(config))\n\n    def merge_default(self, config):\n        \"\"\"\n        Merge a config into the default config.\n        \"\"\"\n        self.default_config = OmegaConf.merge(self.default_config, OmegaConf.create(config))\n\n    def copy(self):\n        \"\"\"\n        Return a semi-shallow copy of self. (`custom_config` is copied, but `default_config` stays the same)\n        \"\"\"\n        core_copy = copy(self)\n        core_copy._custom_config = self._custom_config.copy()\n        return core_copy\n\n    @property\n    def files_config(self):\n        \"\"\"\n        Get the configs from `bbot.yml` and `defaults.yml`\n        \"\"\"\n        if self._files_config is None:\n            from .config import files\n\n            self.files = files\n            self._files_config = files.BBOTConfigFiles(self)\n        return self._files_config\n\n    def create_process(self, *args, **kwargs):\n        if os.environ.get(\"BBOT_TESTING\", \"\") == \"True\":\n            process = self.create_thread(*args, **kwargs)\n        else:\n            if SHARED_INTERPRETER_STATE.is_scan_process:\n                from .helpers.process import BBOTProcess\n\n                process = BBOTProcess(*args, **kwargs)\n            else:\n                import multiprocessing\n\n                raise BBOTError(f\"Tried to start server from process {multiprocessing.current_process().name}\")\n        process.daemon = True\n        return process\n\n    def create_thread(self, *args, **kwargs):\n        from .helpers.process import BBOTThread\n\n        return BBOTThread(*args, **kwargs)\n\n    @property\n    def logger(self):\n        self.config\n        if self._logger is None:\n            from .config.logger import BBOTLogger\n\n            self._logger = BBOTLogger(self)\n        return self._logger\n</code></pre>"},{"location":"dev/core/#bbot.core.core.BBOTCore.config","title":"config  <code>property</code>","text":"<pre><code>config\n</code></pre> <p>.config is just .default_config + .custom_config merged together</p> <p>any new values should be added to custom_config.</p>"},{"location":"dev/core/#bbot.core.core.BBOTCore.custom_config","title":"custom_config  <code>property</code> <code>writable</code>","text":"<pre><code>custom_config\n</code></pre> <p>Custom BBOT config (from <code>~/.config/bbot/bbot.yml</code>)</p>"},{"location":"dev/core/#bbot.core.core.BBOTCore.default_config","title":"default_config  <code>property</code> <code>writable</code>","text":"<pre><code>default_config\n</code></pre> <p>The default BBOT config (from <code>defaults.yml</code>). Read-only.</p>"},{"location":"dev/core/#bbot.core.core.BBOTCore.files_config","title":"files_config  <code>property</code>","text":"<pre><code>files_config\n</code></pre> <p>Get the configs from <code>bbot.yml</code> and <code>defaults.yml</code></p>"},{"location":"dev/core/#bbot.core.core.BBOTCore.copy","title":"copy","text":"<pre><code>copy()\n</code></pre> <p>Return a semi-shallow copy of self. (<code>custom_config</code> is copied, but <code>default_config</code> stays the same)</p> Source code in <code>bbot/core/core.py</code> <pre><code>def copy(self):\n    \"\"\"\n    Return a semi-shallow copy of self. (`custom_config` is copied, but `default_config` stays the same)\n    \"\"\"\n    core_copy = copy(self)\n    core_copy._custom_config = self._custom_config.copy()\n    return core_copy\n</code></pre>"},{"location":"dev/core/#bbot.core.core.BBOTCore.merge_custom","title":"merge_custom","text":"<pre><code>merge_custom(config)\n</code></pre> <p>Merge a config into the custom config.</p> Source code in <code>bbot/core/core.py</code> <pre><code>def merge_custom(self, config):\n    \"\"\"\n    Merge a config into the custom config.\n    \"\"\"\n    self.custom_config = OmegaConf.merge(self.custom_config, OmegaConf.create(config))\n</code></pre>"},{"location":"dev/core/#bbot.core.core.BBOTCore.merge_default","title":"merge_default","text":"<pre><code>merge_default(config)\n</code></pre> <p>Merge a config into the default config.</p> Source code in <code>bbot/core/core.py</code> <pre><code>def merge_default(self, config):\n    \"\"\"\n    Merge a config into the default config.\n    \"\"\"\n    self.default_config = OmegaConf.merge(self.default_config, OmegaConf.create(config))\n</code></pre>"},{"location":"dev/dev_environment/","title":"Setting Up a Dev Environment","text":"<p>The following will show you how to set up a fully functioning python environment for devving on BBOT.</p>"},{"location":"dev/dev_environment/#installation-poetry","title":"Installation (Poetry)","text":"<p>Poetry is the recommended method of installation if you want to dev on BBOT. To set up a dev environment with Poetry, you can follow these steps:</p> <ul> <li>Fork BBOT on GitHub</li> <li>Clone your fork and set up a development environment with Poetry:</li> </ul> <pre><code># clone your forked repo and cd into it\ngit clone git@github.com/&lt;username&gt;/bbot.git\ncd bbot\n\n# install poetry\ncurl -sSL https://install.python-poetry.org | python3 -\n\n# install pip dependencies\npoetry install\n# install pre-commit hooks, etc.\npoetry run pre-commit install\n\n# enter virtual environment\npoetry shell\n\nbbot --help\n</code></pre> <ul> <li>Now, any changes you make in the code will be reflected in the <code>bbot</code> command.</li> <li>After making your changes, run the tests locally to ensure they pass.</li> </ul> <pre><code># auto-format code indentation, etc.\nblack .\n\n# run tests\n./bbot/test/run_tests.sh\n</code></pre> <ul> <li>Finally, commit and push your changes, and create a pull request to the <code>dev</code> branch of the main BBOT repo.</li> </ul>"},{"location":"dev/discord_bot/","title":"Discord Bot Example","text":"<p>Below is a simple Discord bot designed to run BBOT scans.</p> examples/discord_bot.py<pre><code>import discord\nfrom discord.ext import commands\n\nfrom bbot.scanner import Scanner\nfrom bbot.modules.output.discord import Discord\n\n\nclass BBOTDiscordBot(commands.Cog):\n    \"\"\"\n    A simple Discord bot capable of running a BBOT scan.\n\n    To set up:\n        1. Go to Discord Developer Portal (https://discord.com/developers)\n        2. Create a new application\n        3. Create an invite link for the bot, visit the link to invite it to your server\n            - Your Application --&gt; OAuth2 --&gt; URL Generator\n                - For Scopes, select \"bot\"\"\n                - For Bot Permissions, select:\n                    - Read Messages/View Channels\n                    - Send Messages\n        4. Turn on \"Message Content Intent\"\n            - Your Application --&gt; Bot --&gt; Privileged Gateway Intents --&gt; Message Content Intent\n        5. Copy your Discord Bot Token and put it at the top this file\n            - Your Application --&gt; Bot --&gt; Reset Token\n        6. Run this script\n\n    To scan evilcorp.com, you would type:\n\n        /scan evilcorp.com\n\n    Results will be output to the same channel.\n    \"\"\"\n\n    def __init__(self):\n        self.current_scan = None\n\n    @commands.command(name=\"scan\", description=\"Scan a target with BBOT.\")\n    async def scan(self, ctx, target: str):\n        if self.current_scan is not None:\n            self.current_scan.stop()\n        await ctx.send(f\"Starting scan against {target}.\")\n\n        # creates scan instance\n        self.current_scan = Scanner(target, flags=\"subdomain-enum\")\n        discord_module = Discord(self.current_scan)\n\n        seen = set()\n        num_events = 0\n        # start scan and iterate through results\n        async for event in self.current_scan.async_start():\n            if hash(event) in seen:\n                continue\n            seen.add(hash(event))\n            await ctx.send(discord_module.format_message(event))\n            num_events += 1\n\n        await ctx.send(f\"Finished scan against {target}. {num_events:,} results.\")\n        self.current_scan = None\n\n\nif __name__ == \"__main__\":\n    intents = discord.Intents.default()\n    intents.message_content = True\n    bot = commands.Bot(command_prefix=\"/\", intents=intents)\n\n    @bot.event\n    async def on_ready():\n        print(f\"We have logged in as {bot.user}\")\n        await bot.add_cog(BBOTDiscordBot())\n\n    bot.run(\"DISCORD_BOT_TOKEN_HERE\")\n</code></pre>"},{"location":"dev/engine/","title":"Engine","text":""},{"location":"dev/engine/#bbot.core.engine.EngineBase","title":"EngineBase","text":"<p>Base Engine class for Server and Client.</p> <p>An Engine is a simple and lightweight RPC implementation that allows offloading async tasks to a separate process. It leverages ZeroMQ in a ROUTER-DEALER configuration.</p> <p>BBOT makes use of this by spawning a dedicated engine for DNS and HTTP tasks. This offloads I/O and helps free up the main event loop for other tasks.</p> <p>To use Engine, you must subclass both EngineClient and EngineServer.</p> <p>See the respective EngineClient and EngineServer classes for usage examples.</p> Source code in <code>bbot/core/engine.py</code> <pre><code>class EngineBase:\n    \"\"\"\n    Base Engine class for Server and Client.\n\n    An Engine is a simple and lightweight RPC implementation that allows offloading async tasks\n    to a separate process. It leverages ZeroMQ in a ROUTER-DEALER configuration.\n\n    BBOT makes use of this by spawning a dedicated engine for DNS and HTTP tasks.\n    This offloads I/O and helps free up the main event loop for other tasks.\n\n    To use Engine, you must subclass both EngineClient and EngineServer.\n\n    See the respective EngineClient and EngineServer classes for usage examples.\n    \"\"\"\n\n    ERROR_CLASS = BBOTEngineError\n\n    def __init__(self, debug=False):\n        self._shutdown_status = False\n        self.log = logging.getLogger(f\"bbot.core.{self.__class__.__name__.lower()}\")\n        self._engine_debug = debug\n\n    def pickle(self, obj):\n        try:\n            return pickle.dumps(obj)\n        except Exception as e:\n            self.log.error(f\"Error serializing object: {obj}: {e}\")\n            self.log.trace(traceback.format_exc())\n        return error_sentinel\n\n    def unpickle(self, binary):\n        try:\n            return pickle.loads(binary)\n        except Exception as e:\n            self.log.error(f\"Error deserializing binary: {e}\")\n            self.log.trace(f\"Offending binary: {binary}\")\n            self.log.trace(traceback.format_exc())\n        return error_sentinel\n\n    async def _infinite_retry(self, callback, *args, **kwargs):\n        interval = kwargs.pop(\"_interval\", 300)\n        context = kwargs.pop(\"_context\", \"\")\n        # default overall timeout of 10 minutes (300 second interval * 2 iterations)\n        max_retries = kwargs.pop(\"_max_retries\", 1)\n        if not context:\n            context = f\"{callback.__name__}({args}, {kwargs})\"\n        retries = 0\n        while not self._shutdown_status:\n            try:\n                return await asyncio.wait_for(callback(*args, **kwargs), timeout=interval)\n            except (TimeoutError, asyncio.exceptions.TimeoutError):\n                self.log.debug(f\"{self.name}: Timeout after {interval:,} seconds {context}, retrying...\")\n                retries += 1\n                if max_retries is not None and retries &gt; max_retries:\n                    raise TimeoutError(f\"Timed out after {(max_retries+1)*interval:,} seconds {context}\")\n\n    def engine_debug(self, *args, **kwargs):\n        if self._engine_debug:\n            self.log.trace(*args, **kwargs)\n</code></pre>"},{"location":"dev/engine/#bbot.core.engine.EngineClient","title":"EngineClient","text":"<p>               Bases: <code>EngineBase</code></p> <p>The client portion of BBOT's RPC Engine.</p> <p>To create an engine, you must create a subclass of this class and also define methods for each of your desired functions.</p> <p>Note that this only supports async functions. If you need to offload a synchronous function to another CPU, use BBOT's multiprocessing pool instead.</p> <p>Any CPU or I/O intense logic should be implemented in the EngineServer.</p> <p>These functions are typically stubs whose only job is to forward the arguments to the server.</p> <p>Functions with the same names should be defined on the EngineServer.</p> <p>The EngineClient must specify its associated server class via the <code>SERVER_CLASS</code> variable.</p> <p>Depending on whether your function is a generator, you will use either <code>run_and_return()</code>, or <code>run_and_yield</code>.</p> <p>Examples:</p> <pre><code>&gt;&gt;&gt; from bbot.core.engine import EngineClient\n&gt;&gt;&gt;\n&gt;&gt;&gt; class MyClient(EngineClient):\n&gt;&gt;&gt;     SERVER_CLASS = MyServer\n&gt;&gt;&gt;\n&gt;&gt;&gt;     async def my_function(self, **kwargs)\n&gt;&gt;&gt;         return await self.run_and_return(\"my_function\", **kwargs)\n&gt;&gt;&gt;\n&gt;&gt;&gt;     async def my_generator(self, **kwargs):\n&gt;&gt;&gt;         async for _ in self.run_and_yield(\"my_generator\", **kwargs):\n&gt;&gt;&gt;             yield _\n</code></pre> Source code in <code>bbot/core/engine.py</code> <pre><code>class EngineClient(EngineBase):\n    \"\"\"\n    The client portion of BBOT's RPC Engine.\n\n    To create an engine, you must create a subclass of this class and also\n    define methods for each of your desired functions.\n\n    Note that this only supports async functions. If you need to offload a synchronous function to another CPU, use BBOT's multiprocessing pool instead.\n\n    Any CPU or I/O intense logic should be implemented in the EngineServer.\n\n    These functions are typically stubs whose only job is to forward the arguments to the server.\n\n    Functions with the same names should be defined on the EngineServer.\n\n    The EngineClient must specify its associated server class via the `SERVER_CLASS` variable.\n\n    Depending on whether your function is a generator, you will use either `run_and_return()`, or `run_and_yield`.\n\n    Examples:\n        &gt;&gt;&gt; from bbot.core.engine import EngineClient\n        &gt;&gt;&gt;\n        &gt;&gt;&gt; class MyClient(EngineClient):\n        &gt;&gt;&gt;     SERVER_CLASS = MyServer\n        &gt;&gt;&gt;\n        &gt;&gt;&gt;     async def my_function(self, **kwargs)\n        &gt;&gt;&gt;         return await self.run_and_return(\"my_function\", **kwargs)\n        &gt;&gt;&gt;\n        &gt;&gt;&gt;     async def my_generator(self, **kwargs):\n        &gt;&gt;&gt;         async for _ in self.run_and_yield(\"my_generator\", **kwargs):\n        &gt;&gt;&gt;             yield _\n    \"\"\"\n\n    SERVER_CLASS = None\n\n    def __init__(self, debug=False, **kwargs):\n        self.name = f\"EngineClient {self.__class__.__name__}\"\n        super().__init__(debug=debug)\n        self.process = None\n        if self.SERVER_CLASS is None:\n            raise ValueError(f\"Must set EngineClient SERVER_CLASS, {self.SERVER_CLASS}\")\n        self.CMDS = dict(self.SERVER_CLASS.CMDS)\n        for k, v in list(self.CMDS.items()):\n            self.CMDS[v] = k\n        self.socket_address = f\"zmq_{rand_string(8)}.sock\"\n        self.socket_path = Path(tempfile.gettempdir()) / self.socket_address\n        self.server_kwargs = kwargs.pop(\"server_kwargs\", {})\n        self._server_process = None\n        self.context = zmq.asyncio.Context()\n        self.context.setsockopt(zmq.LINGER, 0)\n        self.sockets = set()\n\n    def check_error(self, message):\n        if isinstance(message, dict) and len(message) == 1 and \"_e\" in message:\n            self.engine_debug(f\"{self.name}: got error message: {message}\")\n            error, trace = message[\"_e\"]\n            error = self.ERROR_CLASS(error)\n            error.engine_traceback = trace\n            self.engine_debug(f\"{self.name}: raising {error.__class__.__name__}\")\n            raise error\n        return False\n\n    async def run_and_return(self, command, *args, **kwargs):\n        fn_str = f\"{command}({args}, {kwargs})\"\n        self.engine_debug(f\"{self.name}: executing run-and-return {fn_str}\")\n        if self._shutdown_status and not command == \"_shutdown\":\n            self.log.verbose(f\"{self.name} has been shut down and is not accepting new tasks\")\n            return\n        async with self.new_socket() as socket:\n            try:\n                message = self.make_message(command, args=args, kwargs=kwargs)\n                if message is error_sentinel:\n                    return\n                await socket.send(message)\n                binary = await self._infinite_retry(socket.recv, _context=f\"waiting for return value from {fn_str}\")\n            except BaseException:\n                try:\n                    await self.send_cancel_message(socket, fn_str)\n                except Exception:\n                    self.log.debug(f\"{self.name}: {fn_str} failed to send cancel message after exception\")\n                    self.log.trace(traceback.format_exc())\n                raise\n        # self.log.debug(f\"{self.name}.{command}({kwargs}) got binary: {binary}\")\n        message = self.unpickle(binary)\n        self.engine_debug(f\"{self.name}: {fn_str} got return value: {message}\")\n        # error handling\n        if self.check_error(message):\n            return\n        return message\n\n    async def run_and_yield(self, command, *args, **kwargs):\n        fn_str = f\"{command}({args}, {kwargs})\"\n        self.engine_debug(f\"{self.name}: executing run-and-yield {fn_str}\")\n        if self._shutdown_status:\n            self.log.verbose(\"Engine has been shut down and is not accepting new tasks\")\n            return\n        message = self.make_message(command, args=args, kwargs=kwargs)\n        if message is error_sentinel:\n            return\n        async with self.new_socket() as socket:\n            # TODO: synchronize server-side generator by limiting qsize\n            # socket.setsockopt(zmq.RCVHWM, 1)\n            # socket.setsockopt(zmq.SNDHWM, 1)\n            await socket.send(message)\n            while 1:\n                try:\n                    binary = await self._infinite_retry(\n                        socket.recv, _context=f\"waiting for new iteration from {fn_str}\"\n                    )\n                    # self.log.debug(f\"{self.name}.{command}({kwargs}) got binary: {binary}\")\n                    message = self.unpickle(binary)\n                    self.engine_debug(f\"{self.name}: {fn_str} got iteration: {message}\")\n                    # error handling\n                    if self.check_error(message) or self.check_stop(message):\n                        break\n                    yield message\n                except (StopAsyncIteration, GeneratorExit) as e:\n                    exc_name = e.__class__.__name__\n                    self.engine_debug(f\"{self.name}.{command} got {exc_name}\")\n                    try:\n                        await self.send_cancel_message(socket, fn_str)\n                    except Exception:\n                        self.engine_debug(f\"{self.name}.{command} failed to send cancel message after {exc_name}\")\n                        self.log.trace(traceback.format_exc())\n                    break\n\n    async def send_cancel_message(self, socket, context):\n        \"\"\"\n        Send a cancel message and wait for confirmation from the server\n        \"\"\"\n        # -1 == special \"cancel\" signal\n        message = pickle.dumps({\"c\": -1})\n        await self._infinite_retry(socket.send, message)\n        while 1:\n            response = await self._infinite_retry(\n                socket.recv, _context=f\"waiting for CANCEL_OK from {context}\", _max_retries=4\n            )\n            response = pickle.loads(response)\n            if isinstance(response, dict):\n                response = response.get(\"m\", \"\")\n                if response == \"CANCEL_OK\":\n                    break\n\n    async def send_shutdown_message(self):\n        async with self.new_socket() as socket:\n            # -99 == special shutdown message\n            message = pickle.dumps({\"c\": -99})\n            with suppress(TimeoutError, asyncio.exceptions.TimeoutError):\n                await asyncio.wait_for(socket.send(message), 0.5)\n            with suppress(TimeoutError, asyncio.exceptions.TimeoutError):\n                while 1:\n                    response = await asyncio.wait_for(socket.recv(), 0.5)\n                    response = pickle.loads(response)\n                    if isinstance(response, dict):\n                        response = response.get(\"m\", \"\")\n                        if response == \"SHUTDOWN_OK\":\n                            break\n\n    def check_stop(self, message):\n        if isinstance(message, dict) and len(message) == 1 and \"_s\" in message:\n            return True\n        return False\n\n    def make_message(self, command, args=None, kwargs=None):\n        try:\n            cmd_id = self.CMDS[command]\n        except KeyError:\n            raise KeyError(f'Command \"{command}\" not found. Available commands: {\",\".join(self.available_commands)}')\n        message = {\"c\": cmd_id}\n        if args:\n            message[\"a\"] = args\n        if kwargs:\n            message[\"k\"] = kwargs\n        return pickle.dumps(message)\n\n    @property\n    def available_commands(self):\n        return [s for s in self.CMDS if isinstance(s, str)]\n\n    def start_server(self):\n        process_name = multiprocessing.current_process().name\n        if SHARED_INTERPRETER_STATE.is_scan_process:\n            kwargs = dict(self.server_kwargs)\n            # if we're in tests, we use a single event loop to avoid weird race conditions\n            # this allows us to more easily mock http, etc.\n            if os.environ.get(\"BBOT_TESTING\", \"\") == \"True\":\n                kwargs[\"_loop\"] = get_event_loop()\n            kwargs[\"debug\"] = self._engine_debug\n            self.process = CORE.create_process(\n                target=self.server_process,\n                args=(\n                    self.SERVER_CLASS,\n                    self.socket_path,\n                ),\n                kwargs=kwargs,\n                custom_name=f\"BBOT {self.__class__.__name__}\",\n            )\n            self.process.start()\n            return self.process\n        else:\n            raise BBOTEngineError(\n                f\"Tried to start server from process {process_name}. Did you forget \\\"if __name__ == '__main__'?\\\"\"\n            )\n\n    @staticmethod\n    def server_process(server_class, socket_path, **kwargs):\n        try:\n            loop = kwargs.pop(\"_loop\", None)\n            engine_server = server_class(socket_path, **kwargs)\n            if loop is not None:\n                future = asyncio.run_coroutine_threadsafe(engine_server.worker(), loop)\n                future.result()\n            else:\n                asyncio.run(engine_server.worker())\n        except (asyncio.CancelledError, KeyboardInterrupt, CancelledError):\n            return\n        except Exception:\n            import traceback\n\n            log = logging.getLogger(\"bbot.core.engine.server\")\n            log.critical(f\"Unhandled error in {server_class.__name__} server process: {traceback.format_exc()}\")\n\n    @asynccontextmanager\n    async def new_socket(self):\n        if self._server_process is None:\n            self._server_process = self.start_server()\n            while not self.socket_path.exists():\n                self.engine_debug(f\"{self.name}: waiting for server process to start...\")\n                await asyncio.sleep(0.1)\n        socket = self.context.socket(zmq.DEALER)\n        socket.setsockopt(zmq.LINGER, 0)  # Discard pending messages immediately disconnect() or close()\n        socket.setsockopt(zmq.SNDHWM, 0)  # Unlimited send buffer\n        socket.setsockopt(zmq.RCVHWM, 0)  # Unlimited receive buffer\n        socket.connect(f\"ipc://{self.socket_path}\")\n        self.sockets.add(socket)\n        try:\n            yield socket\n        finally:\n            self.sockets.remove(socket)\n            with suppress(Exception):\n                socket.close()\n\n    async def shutdown(self):\n        if not self._shutdown_status:\n            self._shutdown_status = True\n            self.log.verbose(f\"{self.name}: shutting down...\")\n            # send shutdown signal\n            await self.send_shutdown_message()\n            # then terminate context\n            try:\n                self.context.destroy(linger=0)\n            except Exception:\n                print(traceback.format_exc(), file=sys.stderr)\n            try:\n                self.context.term()\n            except Exception:\n                print(traceback.format_exc(), file=sys.stderr)\n            # delete socket file on exit\n            self.socket_path.unlink(missing_ok=True)\n</code></pre>"},{"location":"dev/engine/#bbot.core.engine.EngineClient.send_cancel_message","title":"send_cancel_message  <code>async</code>","text":"<pre><code>send_cancel_message(socket, context)\n</code></pre> <p>Send a cancel message and wait for confirmation from the server</p> Source code in <code>bbot/core/engine.py</code> <pre><code>async def send_cancel_message(self, socket, context):\n    \"\"\"\n    Send a cancel message and wait for confirmation from the server\n    \"\"\"\n    # -1 == special \"cancel\" signal\n    message = pickle.dumps({\"c\": -1})\n    await self._infinite_retry(socket.send, message)\n    while 1:\n        response = await self._infinite_retry(\n            socket.recv, _context=f\"waiting for CANCEL_OK from {context}\", _max_retries=4\n        )\n        response = pickle.loads(response)\n        if isinstance(response, dict):\n            response = response.get(\"m\", \"\")\n            if response == \"CANCEL_OK\":\n                break\n</code></pre>"},{"location":"dev/engine/#bbot.core.engine.EngineServer","title":"EngineServer","text":"<p>               Bases: <code>EngineBase</code></p> <p>The server portion of BBOT's RPC Engine.</p> <p>Methods defined here must match the methods in your EngineClient.</p> <p>To use the functions, you must create mappings for them in the CMDS attribute, as shown below.</p> <p>Examples:</p> <pre><code>&gt;&gt;&gt; from bbot.core.engine import EngineServer\n&gt;&gt;&gt;\n&gt;&gt;&gt; class MyServer(EngineServer):\n&gt;&gt;&gt;     CMDS = {\n&gt;&gt;&gt;         0: \"my_function\",\n&gt;&gt;&gt;         1: \"my_generator\",\n&gt;&gt;&gt;     }\n&gt;&gt;&gt;\n&gt;&gt;&gt;     def my_function(self, arg1=None):\n&gt;&gt;&gt;         await asyncio.sleep(1)\n&gt;&gt;&gt;         return str(arg1)\n&gt;&gt;&gt;\n&gt;&gt;&gt;     def my_generator(self):\n&gt;&gt;&gt;         for i in range(10):\n&gt;&gt;&gt;             await asyncio.sleep(1)\n&gt;&gt;&gt;             yield i\n</code></pre> Source code in <code>bbot/core/engine.py</code> <pre><code>class EngineServer(EngineBase):\n    \"\"\"\n    The server portion of BBOT's RPC Engine.\n\n    Methods defined here must match the methods in your EngineClient.\n\n    To use the functions, you must create mappings for them in the CMDS attribute, as shown below.\n\n    Examples:\n        &gt;&gt;&gt; from bbot.core.engine import EngineServer\n        &gt;&gt;&gt;\n        &gt;&gt;&gt; class MyServer(EngineServer):\n        &gt;&gt;&gt;     CMDS = {\n        &gt;&gt;&gt;         0: \"my_function\",\n        &gt;&gt;&gt;         1: \"my_generator\",\n        &gt;&gt;&gt;     }\n        &gt;&gt;&gt;\n        &gt;&gt;&gt;     def my_function(self, arg1=None):\n        &gt;&gt;&gt;         await asyncio.sleep(1)\n        &gt;&gt;&gt;         return str(arg1)\n        &gt;&gt;&gt;\n        &gt;&gt;&gt;     def my_generator(self):\n        &gt;&gt;&gt;         for i in range(10):\n        &gt;&gt;&gt;             await asyncio.sleep(1)\n        &gt;&gt;&gt;             yield i\n    \"\"\"\n\n    CMDS = {}\n\n    def __init__(self, socket_path, debug=False):\n        self.name = f\"EngineServer {self.__class__.__name__}\"\n        super().__init__(debug=debug)\n        self.engine_debug(f\"{self.name}: finished setup 1 (_debug={self._engine_debug})\")\n        self.socket_path = socket_path\n        self.client_id_var = contextvars.ContextVar(\"client_id\", default=None)\n        # task &lt;--&gt; client id mapping\n        self.tasks = {}\n        # child tasks spawned by main tasks\n        self.child_tasks = {}\n        self.engine_debug(f\"{self.name}: finished setup 2 (_debug={self._engine_debug})\")\n        if self.socket_path is not None:\n            # create ZeroMQ context\n            self.context = zmq.asyncio.Context()\n            # ROUTER socket can handle multiple concurrent requests\n            self.socket = self.context.socket(zmq.ROUTER)\n            self.socket.setsockopt(zmq.LINGER, 0)  # Discard pending messages immediately disconnect() or close()\n            self.socket.setsockopt(zmq.SNDHWM, 0)  # Unlimited send buffer\n            self.socket.setsockopt(zmq.RCVHWM, 0)  # Unlimited receive buffer\n            # create socket file\n            self.socket.bind(f\"ipc://{self.socket_path}\")\n        self.engine_debug(f\"{self.name}: finished setup 3 (_debug={self._engine_debug})\")\n\n    @contextlib.contextmanager\n    def client_id_context(self, value):\n        token = self.client_id_var.set(value)\n        try:\n            yield\n        finally:\n            self.client_id_var.reset(token)\n\n    async def run_and_return(self, client_id, command_fn, *args, **kwargs):\n        fn_str = f\"{command_fn.__name__}({args}, {kwargs})\"\n        self.engine_debug(fn_str)\n        with self.client_id_context(client_id):\n            try:\n                self.engine_debug(f\"{self.name}: starting run-and-return {fn_str}\")\n                try:\n                    result = await command_fn(*args, **kwargs)\n                except BaseException as e:\n                    if in_exception_chain(e, (KeyboardInterrupt, asyncio.CancelledError)):\n                        log_fn = self.log.debug\n                    else:\n                        log_fn = self.log.error\n                    error = f\"{self.name}: error in {fn_str}: {e}\"\n                    trace = traceback.format_exc()\n                    log_fn(error)\n                    self.log.trace(trace)\n                    result = {\"_e\": (error, trace)}\n                finally:\n                    self.tasks.pop(client_id, None)\n                    self.engine_debug(f\"{self.name}: sending response to {fn_str}: {result}\")\n                    await self.send_socket_multipart(client_id, result)\n            except BaseException as e:\n                self.log.critical(\n                    f\"Unhandled exception in {self.name}.run_and_return({client_id}, {command_fn}, {args}, {kwargs}): {e}\"\n                )\n                self.log.critical(traceback.format_exc())\n            finally:\n                self.engine_debug(f\"{self.name} finished run-and-return {fn_str}\")\n\n    async def run_and_yield(self, client_id, command_fn, *args, **kwargs):\n        fn_str = f\"{command_fn.__name__}({args}, {kwargs})\"\n        with self.client_id_context(client_id):\n            try:\n                self.engine_debug(f\"{self.name}: starting run-and-yield {fn_str}\")\n                try:\n                    async for _ in command_fn(*args, **kwargs):\n                        self.engine_debug(f\"{self.name}: sending iteration for {fn_str}: {_}\")\n                        await self.send_socket_multipart(client_id, _)\n                except BaseException as e:\n                    if in_exception_chain(e, (KeyboardInterrupt, asyncio.CancelledError)):\n                        log_fn = self.log.debug\n                    else:\n                        log_fn = self.log.error\n                    error = f\"{self.name}: error in {fn_str}: {e}\"\n                    trace = traceback.format_exc()\n                    log_fn(error)\n                    self.log.trace(trace)\n                    result = {\"_e\": (error, trace)}\n                    await self.send_socket_multipart(client_id, result)\n                finally:\n                    self.engine_debug(f\"{self.name}: reached end of run-and-yield iteration for {fn_str}\")\n                    # _s == special signal that means StopIteration\n                    await self.send_socket_multipart(client_id, {\"_s\": None})\n                    self.tasks.pop(client_id, None)\n            except BaseException as e:\n                self.log.critical(\n                    f\"Unhandled exception in {self.name}.run_and_yield({client_id}, {command_fn}, {args}, {kwargs}): {e}\"\n                )\n                self.log.critical(traceback.format_exc())\n            finally:\n                self.engine_debug(f\"{self.name}: finished run-and-yield {fn_str}\")\n\n    async def send_socket_multipart(self, client_id, message):\n        try:\n            message = pickle.dumps(message)\n            await self._infinite_retry(self.socket.send_multipart, [client_id, message])\n        except Exception as e:\n            self.log.verbose(f\"{self.name}: error sending ZMQ message: {e}\")\n            self.log.trace(traceback.format_exc())\n\n    def check_error(self, message):\n        if message is error_sentinel:\n            return True\n\n    async def worker(self):\n        self.engine_debug(f\"{self.name}: starting worker\")\n        try:\n            while 1:\n                client_id, binary = await self.socket.recv_multipart()\n                message = self.unpickle(binary)\n                self.engine_debug(f\"{self.name} got message: {message}\")\n                if self.check_error(message):\n                    continue\n\n                cmd = message.get(\"c\", None)\n                if not isinstance(cmd, int):\n                    self.log.warning(f\"{self.name}: no command sent in message: {message}\")\n                    continue\n\n                # -1 == cancel task\n                if cmd == -1:\n                    self.engine_debug(f\"{self.name} got cancel signal\")\n                    await self.send_socket_multipart(client_id, {\"m\": \"CANCEL_OK\"})\n                    await self.cancel_task(client_id)\n                    continue\n\n                # -99 == shutdown task\n                if cmd == -99:\n                    self.log.verbose(f\"{self.name} got shutdown signal\")\n                    await self.send_socket_multipart(client_id, {\"m\": \"SHUTDOWN_OK\"})\n                    await self._shutdown()\n                    return\n\n                args = message.get(\"a\", ())\n                if not isinstance(args, tuple):\n                    self.log.warning(f\"{self.name}: received invalid args of type {type(args)}, should be tuple\")\n                    continue\n                kwargs = message.get(\"k\", {})\n                if not isinstance(kwargs, dict):\n                    self.log.warning(f\"{self.name}: received invalid kwargs of type {type(kwargs)}, should be dict\")\n                    continue\n\n                command_name = self.CMDS[cmd]\n                command_fn = getattr(self, command_name, None)\n\n                if command_fn is None:\n                    self.log.warning(f'{self.name} has no function named \"{command_fn}\"')\n                    continue\n\n                if inspect.isasyncgenfunction(command_fn):\n                    self.engine_debug(f\"{self.name}: creating run-and-yield coroutine for {command_name}()\")\n                    coroutine = self.run_and_yield(client_id, command_fn, *args, **kwargs)\n                else:\n                    self.engine_debug(f\"{self.name}: creating run-and-return coroutine for {command_name}()\")\n                    coroutine = self.run_and_return(client_id, command_fn, *args, **kwargs)\n\n                self.engine_debug(f\"{self.name}: creating task for {command_name}() coroutine\")\n                task = asyncio.create_task(coroutine)\n                self.tasks[client_id] = task, command_fn, args, kwargs\n                self.engine_debug(f\"{self.name}: finished creating task for {command_name}() coroutine\")\n        except BaseException as e:\n            await self._shutdown()\n            if not in_exception_chain(e, (KeyboardInterrupt, asyncio.CancelledError)):\n                self.log.error(f\"{self.name}: error in EngineServer worker: {e}\")\n                self.log.trace(traceback.format_exc())\n        finally:\n            self.engine_debug(f\"{self.name}: finished worker()\")\n\n    async def _shutdown(self):\n        if not self._shutdown_status:\n            self.log.verbose(f\"{self.name}: shutting down...\")\n            self._shutdown_status = True\n            await self.cancel_all_tasks()\n            context = getattr(self, \"context\", None)\n            if context is not None:\n                try:\n                    context.destroy(linger=0)\n                except Exception:\n                    self.log.trace(traceback.format_exc())\n                try:\n                    context.term()\n                except Exception:\n                    self.log.trace(traceback.format_exc())\n            self.log.verbose(f\"{self.name}: finished shutting down\")\n\n    async def task_pool(self, fn, args_kwargs, threads=10, timeout=300, global_kwargs=None):\n        if global_kwargs is None:\n            global_kwargs = {}\n\n        tasks = {}\n        args_kwargs = list(args_kwargs)\n\n        def new_task():\n            if args_kwargs:\n                kwargs = {}\n                tracker = None\n                args = args_kwargs.pop(0)\n                if isinstance(args, (list, tuple)):\n                    # you can specify a custom tracker value if you want\n                    # this helps with correlating results\n                    with suppress(ValueError):\n                        args, kwargs, tracker = args\n                    # or you can just specify args/kwargs\n                    with suppress(ValueError):\n                        args, kwargs = args\n\n                if not isinstance(kwargs, dict):\n                    raise ValueError(f\"kwargs must be dict (got: {kwargs})\")\n                if not isinstance(args, (list, tuple)):\n                    args = [args]\n\n                task = self.new_child_task(fn(*args, **kwargs, **global_kwargs))\n                tasks[task] = (args, kwargs, tracker)\n\n        for _ in range(threads):  # Start initial batch of tasks\n            new_task()\n\n        while tasks:  # While there are tasks pending\n            # Wait for the first task to complete\n            finished = await self.finished_tasks(tasks, timeout=timeout)\n            for task in finished:\n                result = task.result()\n                (args, kwargs, tracker) = tasks.pop(task)\n                yield (args, kwargs, tracker), result\n                new_task()\n\n    def new_child_task(self, coro):\n        \"\"\"\n        Create a new asyncio task, making sure to track it based on the client id.\n\n        This allows the task to be automatically cancelled if its parent is cancelled.\n        \"\"\"\n        client_id = self.client_id_var.get()\n        task = asyncio.create_task(coro)\n\n        if client_id:\n\n            def remove_task(t):\n                tasks = self.child_tasks.get(client_id, set())\n                tasks.discard(t)\n                if not tasks:\n                    self.child_tasks.pop(client_id, None)\n\n            task.add_done_callback(remove_task)\n\n            try:\n                self.child_tasks[client_id].add(task)\n            except KeyError:\n                self.child_tasks[client_id] = {task}\n\n        return task\n\n    async def finished_tasks(self, tasks, timeout=None):\n        \"\"\"\n        Given a list of asyncio tasks, return the ones that are finished with an optional timeout\n        \"\"\"\n        if tasks:\n            try:\n                done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED, timeout=timeout)\n                return done\n            except BaseException as e:\n                if isinstance(e, (TimeoutError, asyncio.exceptions.TimeoutError)):\n                    self.log.warning(f\"{self.name}: Timeout after {timeout:,} seconds in finished_tasks({tasks})\")\n                    for task in list(tasks):\n                        task.cancel()\n                        self._await_cancelled_task(task)\n                else:\n                    if not in_exception_chain(e, (KeyboardInterrupt, asyncio.CancelledError)):\n                        self.log.error(f\"{self.name}: Unhandled exception in finished_tasks({tasks}): {e}\")\n                        self.log.trace(traceback.format_exc())\n                    raise\n        return set()\n\n    async def cancel_task(self, client_id):\n        parent_task = self.tasks.pop(client_id, None)\n        if parent_task is None:\n            return\n        parent_task, _cmd, _args, _kwargs = parent_task\n        self.engine_debug(f\"{self.name}: Cancelling client id {client_id} (task: {parent_task})\")\n        parent_task.cancel()\n        child_tasks = self.child_tasks.pop(client_id, set())\n        if child_tasks:\n            self.engine_debug(f\"{self.name}: Cancelling {len(child_tasks):,} child tasks for client id {client_id}\")\n            for child_task in child_tasks:\n                child_task.cancel()\n\n        for task in [parent_task] + list(child_tasks):\n            await self._await_cancelled_task(task)\n\n    async def _await_cancelled_task(self, task):\n        try:\n            await asyncio.wait_for(task, timeout=10)\n        except (TimeoutError, asyncio.exceptions.TimeoutError):\n            self.log.trace(f\"{self.name}: Timeout cancelling task: {task}\")\n            return\n        except (KeyboardInterrupt, asyncio.CancelledError):\n            return\n        except BaseException as e:\n            self.log.error(f\"Unhandled error in {task.get_coro().__name__}(): {e}\")\n            self.log.trace(traceback.format_exc())\n\n    async def cancel_all_tasks(self):\n        for client_id in list(self.tasks):\n            await self.cancel_task(client_id)\n        for client_id, tasks in self.child_tasks.items():\n            for task in list(tasks):\n                await self._await_cancelled_task(task)\n</code></pre>"},{"location":"dev/engine/#bbot.core.engine.EngineServer.finished_tasks","title":"finished_tasks  <code>async</code>","text":"<pre><code>finished_tasks(tasks, timeout=None)\n</code></pre> <p>Given a list of asyncio tasks, return the ones that are finished with an optional timeout</p> Source code in <code>bbot/core/engine.py</code> <pre><code>async def finished_tasks(self, tasks, timeout=None):\n    \"\"\"\n    Given a list of asyncio tasks, return the ones that are finished with an optional timeout\n    \"\"\"\n    if tasks:\n        try:\n            done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED, timeout=timeout)\n            return done\n        except BaseException as e:\n            if isinstance(e, (TimeoutError, asyncio.exceptions.TimeoutError)):\n                self.log.warning(f\"{self.name}: Timeout after {timeout:,} seconds in finished_tasks({tasks})\")\n                for task in list(tasks):\n                    task.cancel()\n                    self._await_cancelled_task(task)\n            else:\n                if not in_exception_chain(e, (KeyboardInterrupt, asyncio.CancelledError)):\n                    self.log.error(f\"{self.name}: Unhandled exception in finished_tasks({tasks}): {e}\")\n                    self.log.trace(traceback.format_exc())\n                raise\n    return set()\n</code></pre>"},{"location":"dev/engine/#bbot.core.engine.EngineServer.new_child_task","title":"new_child_task","text":"<pre><code>new_child_task(coro)\n</code></pre> <p>Create a new asyncio task, making sure to track it based on the client id.</p> <p>This allows the task to be automatically cancelled if its parent is cancelled.</p> Source code in <code>bbot/core/engine.py</code> <pre><code>def new_child_task(self, coro):\n    \"\"\"\n    Create a new asyncio task, making sure to track it based on the client id.\n\n    This allows the task to be automatically cancelled if its parent is cancelled.\n    \"\"\"\n    client_id = self.client_id_var.get()\n    task = asyncio.create_task(coro)\n\n    if client_id:\n\n        def remove_task(t):\n            tasks = self.child_tasks.get(client_id, set())\n            tasks.discard(t)\n            if not tasks:\n                self.child_tasks.pop(client_id, None)\n\n        task.add_done_callback(remove_task)\n\n        try:\n            self.child_tasks[client_id].add(task)\n        except KeyError:\n            self.child_tasks[client_id] = {task}\n\n    return task\n</code></pre>"},{"location":"dev/event/","title":"Event","text":"<p>This is a developer reference. For a high-level description of BBOT events including a full list of event types, see Events</p>"},{"location":"dev/event/#bbot.core.event.base.make_event","title":"make_event","text":"<pre><code>make_event(data, event_type=None, parent=None, context=None, module=None, scan=None, scans=None, tags=None, confidence=100, dummy=False, internal=None)\n</code></pre> <p>Creates and returns a new event object or modifies an existing one.</p> <p>This function serves as a factory for creating new event objects, either by generating a new <code>Event</code> object or by updating an existing event with additional metadata. If <code>data</code> is already an event, it updates the event based on the additional parameters provided.</p> <p>Parameters:</p> <ul> <li> <code>data</code>               (<code>Union[str, dict, BaseEvent]</code>)           \u2013            <p>The primary data for the event or an existing event object.</p> </li> <li> <code>event_type</code>               (<code>str</code>, default:                   <code>None</code> )           \u2013            <p>Type of the event, e.g., 'IP_ADDRESS'. Auto-detected if not provided.</p> </li> <li> <code>parent</code>               (<code>BaseEvent</code>, default:                   <code>None</code> )           \u2013            <p>Parent event leading to this event's discovery.</p> </li> <li> <code>context</code>               (<code>str</code>, default:                   <code>None</code> )           \u2013            <p>Description of circumstances leading to event's discovery.</p> </li> <li> <code>module</code>               (<code>str</code>, default:                   <code>None</code> )           \u2013            <p>Module that discovered the event.</p> </li> <li> <code>scan</code>               (<code>Scan</code>, default:                   <code>None</code> )           \u2013            <p>BBOT Scan object associated with the event.</p> </li> <li> <code>scans</code>               (<code>List[Scan]</code>, default:                   <code>None</code> )           \u2013            <p>Multiple BBOT Scan objects, primarily used for unserialization.</p> </li> <li> <code>tags</code>               (<code>Union[str, List[str]]</code>, default:                   <code>None</code> )           \u2013            <p>Descriptive tags for the event, as a list or a single string.</p> </li> <li> <code>confidence</code>               (<code>int</code>, default:                   <code>100</code> )           \u2013            <p>Confidence level for the event, on a scale of 1-100. Defaults to 100.</p> </li> <li> <code>dummy</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Disables data validations if set to True. Defaults to False.</p> </li> <li> <code>internal</code>               (<code>Any</code>, default:                   <code>None</code> )           \u2013            <p>Makes the event internal if set to True. Defaults to None.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>BaseEvent</code>          \u2013            <p>A new or updated event object.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>ValidationError</code>             \u2013            <p>Raised when there's an error in event data or type sanitization.</p> </li> </ul> <p>Examples:</p> <p>If inside a module, e.g. from within its <code>handle_event()</code>:</p> <pre><code>&gt;&gt;&gt; self.make_event(\"1.2.3.4\", parent=event)\nIP_ADDRESS(\"1.2.3.4\", module=portscan, tags={'ipv4', 'distance-1'})\n</code></pre> <p>If you're outside a module but you have a scan object:</p> <pre><code>&gt;&gt;&gt; scan.make_event(\"1.2.3.4\", parent=scan.root_event)\nIP_ADDRESS(\"1.2.3.4\", module=None, tags={'ipv4', 'distance-1'})\n</code></pre> <p>If you're outside a scan and just messing around:</p> <pre><code>&gt;&gt;&gt; from bbot.core.event.base import make_event\n&gt;&gt;&gt; make_event(\"1.2.3.4\", dummy=True)\nIP_ADDRESS(\"1.2.3.4\", module=None, tags={'ipv4'})\n</code></pre> Note <p>When working within a module's <code>handle_event()</code>, use the instance method <code>self.make_event()</code> instead of calling this function directly.</p> Source code in <code>bbot/core/event/base.py</code> <pre><code>def make_event(\n    data,\n    event_type=None,\n    parent=None,\n    context=None,\n    module=None,\n    scan=None,\n    scans=None,\n    tags=None,\n    confidence=100,\n    dummy=False,\n    internal=None,\n):\n    \"\"\"\n    Creates and returns a new event object or modifies an existing one.\n\n    This function serves as a factory for creating new event objects, either by generating a new `Event`\n    object or by updating an existing event with additional metadata. If `data` is already an event,\n    it updates the event based on the additional parameters provided.\n\n    Parameters:\n        data (Union[str, dict, BaseEvent]): The primary data for the event or an existing event object.\n        event_type (str, optional): Type of the event, e.g., 'IP_ADDRESS'. Auto-detected if not provided.\n        parent (BaseEvent, optional): Parent event leading to this event's discovery.\n        context (str, optional): Description of circumstances leading to event's discovery.\n        module (str, optional): Module that discovered the event.\n        scan (Scan, optional): BBOT Scan object associated with the event.\n        scans (List[Scan], optional): Multiple BBOT Scan objects, primarily used for unserialization.\n        tags (Union[str, List[str]], optional): Descriptive tags for the event, as a list or a single string.\n        confidence (int, optional): Confidence level for the event, on a scale of 1-100. Defaults to 100.\n        dummy (bool, optional): Disables data validations if set to True. Defaults to False.\n        internal (Any, optional): Makes the event internal if set to True. Defaults to None.\n\n    Returns:\n        BaseEvent: A new or updated event object.\n\n    Raises:\n        ValidationError: Raised when there's an error in event data or type sanitization.\n\n    Examples:\n        If inside a module, e.g. from within its `handle_event()`:\n        &gt;&gt;&gt; self.make_event(\"1.2.3.4\", parent=event)\n        IP_ADDRESS(\"1.2.3.4\", module=portscan, tags={'ipv4', 'distance-1'})\n\n        If you're outside a module but you have a scan object:\n        &gt;&gt;&gt; scan.make_event(\"1.2.3.4\", parent=scan.root_event)\n        IP_ADDRESS(\"1.2.3.4\", module=None, tags={'ipv4', 'distance-1'})\n\n        If you're outside a scan and just messing around:\n        &gt;&gt;&gt; from bbot.core.event.base import make_event\n        &gt;&gt;&gt; make_event(\"1.2.3.4\", dummy=True)\n        IP_ADDRESS(\"1.2.3.4\", module=None, tags={'ipv4'})\n\n    Note:\n        When working within a module's `handle_event()`, use the instance method\n        `self.make_event()` instead of calling this function directly.\n    \"\"\"\n\n    # allow tags to be either a string or an array\n    if not tags:\n        tags = []\n    elif isinstance(tags, str):\n        tags = [tags]\n    tags = set(tags)\n\n    if is_event(data):\n        event = copy(data)\n        if scan is not None and not event.scan:\n            event.scan = scan\n        if scans is not None and not event.scans:\n            event.scans = scans\n        if module is not None:\n            event.module = module\n        if parent is not None:\n            event.parent = parent\n        if context is not None:\n            event.discovery_context = context\n        if internal == True:\n            event.internal = True\n        if tags:\n            event.tags = tags.union(event.tags)\n        event_type = data.type\n        return event\n    else:\n        if event_type is None:\n            event_type, data = get_event_type(data)\n            if not dummy:\n                log.debug(f'Autodetected event type \"{event_type}\" based on data: \"{data}\"')\n\n        event_type = str(event_type).strip().upper()\n\n        # Catch these common whoopsies\n        if event_type in (\"DNS_NAME\", \"IP_ADDRESS\"):\n            # DNS_NAME &lt;--&gt; EMAIL_ADDRESS confusion\n            if validators.soft_validate(data, \"email\"):\n                event_type = \"EMAIL_ADDRESS\"\n            else:\n                # DNS_NAME &lt;--&gt; IP_ADDRESS confusion\n                try:\n                    data = validators.validate_host(data)\n                except Exception as e:\n                    log.trace(traceback.format_exc())\n                    raise ValidationError(f'Error sanitizing event data \"{data}\" for type \"{event_type}\": {e}')\n                data_is_ip = is_ip(data)\n                if event_type == \"DNS_NAME\" and data_is_ip:\n                    event_type = \"IP_ADDRESS\"\n                elif event_type == \"IP_ADDRESS\" and not data_is_ip:\n                    event_type = \"DNS_NAME\"\n        # USERNAME &lt;--&gt; EMAIL_ADDRESS confusion\n        if event_type == \"USERNAME\" and validators.soft_validate(data, \"email\"):\n            event_type = \"EMAIL_ADDRESS\"\n            tags.add(\"affiliate\")\n        # Convert single-host IP_RANGE to IP_ADDRESS\n        if event_type == \"IP_RANGE\":\n            with suppress(Exception):\n                net = ipaddress.ip_network(data, strict=False)\n                if net.prefixlen == net.max_prefixlen:\n                    event_type = \"IP_ADDRESS\"\n                    data = net.network_address\n\n        event_class = globals().get(event_type, DefaultEvent)\n\n        return event_class(\n            data,\n            event_type=event_type,\n            parent=parent,\n            context=context,\n            module=module,\n            scan=scan,\n            scans=scans,\n            tags=tags,\n            confidence=confidence,\n            _dummy=dummy,\n            _internal=internal,\n        )\n</code></pre>"},{"location":"dev/event/#bbot.core.event.base.event_from_json","title":"event_from_json","text":"<pre><code>event_from_json(j, siem_friendly=False)\n</code></pre> <p>Creates an event object from a JSON dictionary.</p> <p>This function deserializes a JSON dictionary to create a new event object, using the <code>make_event</code> function for the actual object creation. It sets additional attributes such as the timestamp and scope distance based on the input JSON.</p> <p>Parameters:</p> <ul> <li> <code>j</code>               (<code>Dict</code>)           \u2013            <p>JSON dictionary containing the event attributes.       Must include keys \"data\" and \"type\".</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>BaseEvent</code>          \u2013            <p>A new event object initialized with attributes from the JSON dictionary.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>ValidationError</code>             \u2013            <p>Raised when the JSON dictionary is missing required fields.</p> </li> </ul> Note <p>The function assumes that the input JSON dictionary is valid and may raise exceptions if required keys are missing. Make sure to validate the JSON input beforehand.</p> Source code in <code>bbot/core/event/base.py</code> <pre><code>def event_from_json(j, siem_friendly=False):\n    \"\"\"\n    Creates an event object from a JSON dictionary.\n\n    This function deserializes a JSON dictionary to create a new event object, using the `make_event` function\n    for the actual object creation. It sets additional attributes such as the timestamp and scope distance\n    based on the input JSON.\n\n    Parameters:\n        j (Dict): JSON dictionary containing the event attributes.\n                  Must include keys \"data\" and \"type\".\n\n    Returns:\n        BaseEvent: A new event object initialized with attributes from the JSON dictionary.\n\n    Raises:\n        ValidationError: Raised when the JSON dictionary is missing required fields.\n\n    Note:\n        The function assumes that the input JSON dictionary is valid and may raise exceptions\n        if required keys are missing. Make sure to validate the JSON input beforehand.\n    \"\"\"\n    try:\n        event_type = j[\"type\"]\n        kwargs = {\n            \"event_type\": event_type,\n            \"scans\": j.get(\"scans\", []),\n            \"tags\": j.get(\"tags\", []),\n            \"confidence\": j.get(\"confidence\", 100),\n            \"context\": j.get(\"discovery_context\", None),\n            \"dummy\": True,\n        }\n        if siem_friendly:\n            data = j[\"data\"][event_type]\n        else:\n            data = j[\"data\"]\n        kwargs[\"data\"] = data\n        event = make_event(**kwargs)\n        event_uuid = j.get(\"uuid\", None)\n        if event_uuid is not None:\n            event._uuid = uuid.UUID(event_uuid.split(\":\")[-1])\n\n        resolved_hosts = j.get(\"resolved_hosts\", [])\n        event._resolved_hosts = set(resolved_hosts)\n\n        event.timestamp = datetime.datetime.fromisoformat(j[\"timestamp\"])\n        event.scope_distance = j[\"scope_distance\"]\n        parent_id = j.get(\"parent\", None)\n        if parent_id is not None:\n            event._parent_id = parent_id\n        parent_uuid = j.get(\"parent_uuid\", None)\n        if parent_uuid is not None:\n            parent_type, parent_uuid = parent_uuid.split(\":\", 1)\n            event._parent_uuid = parent_type + \":\" + str(uuid.UUID(parent_uuid))\n        return event\n    except KeyError as e:\n        raise ValidationError(f\"Event missing required field: {e}\")\n</code></pre>"},{"location":"dev/event/#bbot.core.event.base.BaseEvent","title":"BaseEvent","text":"<p>Represents a piece of data discovered during a BBOT scan.</p> <p>An Event contains various attributes that provide metadata about the discovered data. The attributes assist in understanding the context of the Event and facilitate further filtering and querying. Events are integral in the construction of visual graphs and are the cornerstone of data exchange between BBOT modules.</p> <p>You can inherit from this class when creating a new event type. However, it's not always necessary. You only need to subclass if you want to layer additional functionality on top of the base class.</p> <p>Attributes:</p> <ul> <li> <code>type</code>               (<code>str</code>)           \u2013            <p>Specifies the type of the event, e.g., <code>IP_ADDRESS</code>, <code>DNS_NAME</code>.</p> </li> <li> <code>id</code>               (<code>str</code>)           \u2013            <p>An identifier for the event (event type + sha1 hash of data). NOT universally unique.</p> </li> <li> <code>uuid</code>               (<code>UUID</code>)           \u2013            <p>A universally unique identifier for the event.</p> </li> <li> <code>data</code>               (<code>str or dict</code>)           \u2013            <p>The main data for the event, e.g., a URL or IP address.</p> </li> <li> <code>data_graph</code>               (<code>str</code>)           \u2013            <p>Representation of <code>self.data</code> for graph nodes (e.g. Neo4j).</p> </li> <li> <code>data_human</code>               (<code>str</code>)           \u2013            <p>Representation of <code>self.data</code> for human output.</p> </li> <li> <code>data_id</code>               (<code>str</code>)           \u2013            <p>Representation of <code>self.data</code> used to calculate the event's ID (and ultimately its hash, which is used for deduplication)</p> </li> <li> <code>data_json</code>               (<code>str</code>)           \u2013            <p>Representation of <code>self.data</code> to be used in JSON serialization.</p> </li> <li> <code>host</code>               (<code>str, IPvXAddress, or IPvXNetwork</code>)           \u2013            <p>The associated IP address or hostname for the event</p> </li> <li> <code>host_stem</code>               (<code>str</code>)           \u2013            <p>An abbreviated representation of hostname that removes the TLD, e.g. \"www.evilcorp\". Used by the word cloud.</p> </li> <li> <code>port</code>               (<code>int or None</code>)           \u2013            <p>The port associated with the event, if applicable, else None.</p> </li> <li> <code>words</code>               (<code>set</code>)           \u2013            <p>A list of relevant keywords extracted from the event. Used by the word cloud.</p> </li> <li> <code>scope_distance</code>               (<code>int</code>)           \u2013            <p>Indicates how many hops the event is from the main scope; 0 means in-scope.</p> </li> <li> <code>web_spider_distance</code>               (<code>int</code>)           \u2013            <p>The spider distance from the web root, specific to web crawling.</p> </li> <li> <code>scan</code>               (<code>Scanner</code>)           \u2013            <p>The scan object that generated the event.</p> </li> <li> <code>timestamp</code>               (<code>datetime</code>)           \u2013            <p>The time at which the data was discovered.</p> </li> <li> <code>resolved_hosts</code>               (<code>list of str</code>)           \u2013            <p>List of hosts to which the event data resolves, applicable for URLs and DNS names.</p> </li> <li> <code>parent</code>               (<code>BaseEvent</code>)           \u2013            <p>The parent event that led to the discovery of this event.</p> </li> <li> <code>parent_id</code>               (<code>str</code>)           \u2013            <p>The <code>id</code> attribute of the parent event.</p> </li> <li> <code>parent_uuid</code>               (<code>str</code>)           \u2013            <p>The <code>uuid</code> attribute of the parent event.</p> </li> <li> <code>tags</code>               (<code>set of str</code>)           \u2013            <p>Descriptive tags for the event, e.g., <code>mx-record</code>, <code>in-scope</code>.</p> </li> <li> <code>module</code>               (<code>BaseModule</code>)           \u2013            <p>The module that discovered the event.</p> </li> <li> <code>module_sequence</code>               (<code>str</code>)           \u2013            <p>The sequence of modules that participated in the discovery.</p> </li> </ul> <p>Examples:</p> <pre><code>{\n    \"type\": \"URL\",\n    \"id\": \"URL:017ec8e5dc158c0fd46f07169f8577fb4b45e89a\",\n    \"data\": \"http://www.blacklanternsecurity.com/\",\n    \"web_spider_distance\": 0,\n    \"scope_distance\": 0,\n    \"scan\": \"SCAN:4d786912dbc97be199da13074699c318e2067a7f\",\n    \"timestamp\": 1688526222.723366,\n    \"resolved_hosts\": [\"185.199.108.153\"],\n    \"parent\": \"OPEN_TCP_PORT:cf7e6a937b161217eaed99f0c566eae045d094c7\",\n    \"tags\": [\"in-scope\", \"distance-0\", \"dir\", \"ip-185-199-108-153\", \"status-301\", \"http-title-301-moved-permanently\"],\n    \"module\": \"httpx\",\n    \"module_sequence\": \"httpx\"\n}\n</code></pre> Source code in <code>bbot/core/event/base.py</code> <pre><code>class BaseEvent:\n    \"\"\"\n    Represents a piece of data discovered during a BBOT scan.\n\n    An Event contains various attributes that provide metadata about the discovered data.\n    The attributes assist in understanding the context of the Event and facilitate further\n    filtering and querying. Events are integral in the construction of visual graphs and\n    are the cornerstone of data exchange between BBOT modules.\n\n    You can inherit from this class when creating a new event type. However, it's not always\n    necessary. You only need to subclass if you want to layer additional functionality on\n    top of the base class.\n\n    Attributes:\n        type (str): Specifies the type of the event, e.g., `IP_ADDRESS`, `DNS_NAME`.\n        id (str): An identifier for the event (event type + sha1 hash of data). NOT universally unique.\n        uuid (UUID): A universally unique identifier for the event.\n        data (str or dict): The main data for the event, e.g., a URL or IP address.\n        data_graph (str): Representation of `self.data` for graph nodes (e.g. Neo4j).\n        data_human (str): Representation of `self.data` for human output.\n        data_id (str): Representation of `self.data` used to calculate the event's ID (and ultimately its hash, which is used for deduplication)\n        data_json (str): Representation of `self.data` to be used in JSON serialization.\n        host (str, IPvXAddress, or IPvXNetwork): The associated IP address or hostname for the event\n        host_stem (str): An abbreviated representation of hostname that removes the TLD, e.g. \"www.evilcorp\". Used by the word cloud.\n        port (int or None): The port associated with the event, if applicable, else None.\n        words (set): A list of relevant keywords extracted from the event. Used by the word cloud.\n        scope_distance (int): Indicates how many hops the event is from the main scope; 0 means in-scope.\n        web_spider_distance (int): The spider distance from the web root, specific to web crawling.\n        scan (Scanner): The scan object that generated the event.\n        timestamp (datetime.datetime): The time at which the data was discovered.\n        resolved_hosts (list of str): List of hosts to which the event data resolves, applicable for URLs and DNS names.\n        parent (BaseEvent): The parent event that led to the discovery of this event.\n        parent_id (str): The `id` attribute of the parent event.\n        parent_uuid (str): The `uuid` attribute of the parent event.\n        tags (set of str): Descriptive tags for the event, e.g., `mx-record`, `in-scope`.\n        module (BaseModule): The module that discovered the event.\n        module_sequence (str): The sequence of modules that participated in the discovery.\n\n    Examples:\n        ```json\n        {\n            \"type\": \"URL\",\n            \"id\": \"URL:017ec8e5dc158c0fd46f07169f8577fb4b45e89a\",\n            \"data\": \"http://www.blacklanternsecurity.com/\",\n            \"web_spider_distance\": 0,\n            \"scope_distance\": 0,\n            \"scan\": \"SCAN:4d786912dbc97be199da13074699c318e2067a7f\",\n            \"timestamp\": 1688526222.723366,\n            \"resolved_hosts\": [\"185.199.108.153\"],\n            \"parent\": \"OPEN_TCP_PORT:cf7e6a937b161217eaed99f0c566eae045d094c7\",\n            \"tags\": [\"in-scope\", \"distance-0\", \"dir\", \"ip-185-199-108-153\", \"status-301\", \"http-title-301-moved-permanently\"],\n            \"module\": \"httpx\",\n            \"module_sequence\": \"httpx\"\n        }\n        ```\n    \"\"\"\n\n    # Always emit this event type even if it's not in scope\n    _always_emit = False\n    # Always emit events with these tags even if they're not in scope\n    _always_emit_tags = [\"affiliate\", \"target\"]\n    # Bypass scope checking and dns resolution, distribute immediately to modules\n    # This is useful for \"end-of-line\" events like FINDING and VULNERABILITY\n    _quick_emit = False\n    # Whether this event has been retroactively marked as part of an important discovery chain\n    _graph_important = False\n    # Disables certain data validations\n    _dummy = False\n    # Data validation, if data is a dictionary\n    _data_validator = None\n    # Whether to increment scope distance if the child and parent hosts are the same\n    _scope_distance_increment_same_host = False\n    # Don't allow duplicates to occur within a parent chain\n    # In other words, don't emit the event if the same one already exists in its discovery context\n    _suppress_chain_dupes = False\n\n    def __init__(\n        self,\n        data,\n        event_type,\n        parent=None,\n        context=None,\n        module=None,\n        scan=None,\n        scans=None,\n        tags=None,\n        confidence=100,\n        timestamp=None,\n        _dummy=False,\n        _internal=None,\n    ):\n        \"\"\"\n        Initializes an Event object with the given parameters.\n\n        In most cases, you should use `make_event()` instead of instantiating this class directly.\n        `make_event()` is much friendlier, and can auto-detect the event type for you.\n\n        Attributes:\n            data (str, dict): The primary data for the event.\n            event_type (str, optional): Type of the event, e.g., 'IP_ADDRESS'.\n            parent (BaseEvent, optional): Parent event that led to this event's discovery. Defaults to None.\n            module (str, optional): Module that discovered the event. Defaults to None.\n            scan (Scan, optional): BBOT Scan object. Required unless _dummy is True. Defaults to None.\n            scans (list of Scan, optional): BBOT Scan objects, used primarily when unserializing an Event from the database. Defaults to None.\n            tags (list of str, optional): Descriptive tags for the event. Defaults to None.\n            confidence (int, optional): Confidence level for the event, on a scale of 1-100. Defaults to 100.\n            timestamp (datetime, optional): Time of event discovery. Defaults to current UTC time.\n            _dummy (bool, optional): If True, disables certain data validations. Defaults to False.\n            _internal (Any, optional): If specified, makes the event internal. Defaults to None.\n\n        Raises:\n            ValidationError: If either `scan` or `parent` are not specified and `_dummy` is False.\n        \"\"\"\n        self._uuid = uuid.uuid4()\n        self._id = None\n        self._hash = None\n        self._data = None\n        self.__host = None\n        self._tags = set()\n        self._port = None\n        self._omit = False\n        self.__words = None\n        self._parent = None\n        self._priority = None\n        self._parent_id = None\n        self._parent_uuid = None\n        self._host_original = None\n        self._scope_distance = None\n        self._module_priority = None\n        self._resolved_hosts = set()\n        self.dns_children = dict()\n        self.raw_dns_records = dict()\n        self._discovery_context = \"\"\n        self._discovery_context_regex = re.compile(r\"\\{(?:event|module)[^}]*\\}\")\n        self.web_spider_distance = 0\n\n        # for creating one-off events without enforcing parent requirement\n        self._dummy = _dummy\n        self.module = module\n        self._type = event_type\n\n        # keep track of whether this event has been recorded by the scan\n        self._stats_recorded = False\n\n        if timestamp is not None:\n            self.timestamp = timestamp\n        else:\n            try:\n                self.timestamp = datetime.datetime.now(datetime.UTC)\n            except AttributeError:\n                self.timestamp = datetime.datetime.utcnow()\n\n        self.confidence = int(confidence)\n        self._internal = False\n\n        # self.scan holds the instantiated scan object (for helpers, etc.)\n        self.scan = scan\n        if (not self.scan) and (not self._dummy):\n            raise ValidationError(f\"Must specify scan\")\n        # self.scans holds a list of scan IDs from scans that encountered this event\n        self.scans = []\n        if scans is not None:\n            self.scans = scans\n        if self.scan:\n            self.scans = list(set([self.scan.id] + self.scans))\n\n        try:\n            self.data = self._sanitize_data(data)\n        except Exception as e:\n            log.trace(traceback.format_exc())\n            raise ValidationError(f'Error sanitizing event data \"{data}\" for type \"{self.type}\": {e}')\n\n        if not self.data:\n            raise ValidationError(f'Invalid event data \"{data}\" for type \"{self.type}\"')\n\n        self.parent = parent\n        if (not self.parent) and (not self._dummy):\n            raise ValidationError(f\"Must specify event parent\")\n\n        if tags is not None:\n            for tag in tags:\n                self.add_tag(tag)\n\n        # internal events are not ingested by output modules\n        if not self._dummy:\n            # removed this second part because it was making certain sslcert events internal\n            if _internal:  # or parent._internal:\n                self.internal = True\n\n        if not context:\n            context = getattr(self.module, \"default_discovery_context\", \"\")\n        if context:\n            self.discovery_context = context\n\n    @property\n    def data(self):\n        return self._data\n\n    @property\n    def confidence(self):\n        return self._confidence\n\n    @confidence.setter\n    def confidence(self, confidence):\n        self._confidence = min(100, max(1, int(confidence)))\n\n    @property\n    def cumulative_confidence(self):\n        \"\"\"\n        Considers the confidence of parent events. This is useful for filtering out speculative/unreliable events.\n\n        E.g. an event with a confidence of 50 whose parent is also 50 would have a cumulative confidence of 25.\n\n        A confidence of 100 will reset the cumulative confidence to 100.\n        \"\"\"\n        if self._confidence == 100 or self.parent is None or self.parent is self:\n            return self._confidence\n        return int(self._confidence * self.parent.cumulative_confidence / 100)\n\n    @property\n    def resolved_hosts(self):\n        if is_ip(self.host):\n            return {\n                self.host,\n            }\n        return self._resolved_hosts\n\n    @data.setter\n    def data(self, data):\n        self._hash = None\n        self._data_hash = None\n        self._id = None\n        self.__host = None\n        self._port = None\n        self._data = data\n\n    @property\n    def internal(self):\n        return self._internal\n\n    @internal.setter\n    def internal(self, value):\n        \"\"\"\n        Marks the event as internal, excluding it from output but allowing normal exchange between scan modules.\n\n        Internal events are typically speculative and may not be interesting by themselves but can lead to\n        the discovery of interesting events. This method sets the `_internal` attribute to True and adds the\n        \"internal\" tag.\n\n        Examples of internal events include `OPEN_TCP_PORT`s from the `speculate` module,\n        `IP_ADDRESS`es from the `ipneighbor` module, or out-of-scope `DNS_NAME`s that originate\n        from DNS resolutions.\n\n        The purpose of internal events is to enable speculative/explorative discovery without cluttering\n        the console with irrelevant or uninteresting events.\n        \"\"\"\n        if not value in (True, False):\n            raise ValueError(f'\"internal\" must be boolean, not {type(value)}')\n        if value == True:\n            self.add_tag(\"internal\")\n        else:\n            self.remove_tag(\"internal\")\n        self._internal = value\n\n    @property\n    def host(self):\n        \"\"\"\n        An abbreviated representation of the data that allows comparison with other events.\n        For host types, this is a hostname.\n        This allows comparison of an email or a URL with a domain, and vice versa\n            bob@evilcorp.com        --&gt; evilcorp.com\n            https://evilcorp.com    --&gt; evilcorp.com\n            evilcorp.com:80         --&gt; evilcorp.com\n\n        For IP_* types, this is an instantiated object representing the event's data\n        E.g. for IP_ADDRESS, it could be an ipaddress.IPv4Address() or IPv6Address() object\n        \"\"\"\n        if self.__host is None:\n            self.host = self._host()\n        return self.__host\n\n    @host.setter\n    def host(self, host):\n        if self._host_original is None:\n            self._host_original = host\n        self.__host = host\n\n    @property\n    def host_original(self):\n        \"\"\"\n        Original host data, in case it was changed due to a wildcard DNS, etc.\n        \"\"\"\n        if self._host_original is None:\n            return self.host\n        return self._host_original\n\n    @property\n    def host_filterable(self):\n        \"\"\"\n        A string version of the event that's used for regex-based blacklisting.\n\n        For example, the user can specify \"REGEX:.*.evilcorp.com\" in their blacklist, and this regex\n        will be applied against this property.\n        \"\"\"\n        parsed_url = getattr(self, \"parsed_url\", None)\n        if parsed_url is not None:\n            return parsed_url.geturl()\n        if self.host is not None:\n            return str(self.host)\n        return \"\"\n\n    @property\n    def port(self):\n        self.host\n        if getattr(self, \"parsed_url\", None):\n            if self.parsed_url.port is not None:\n                return self.parsed_url.port\n            elif self.parsed_url.scheme == \"https\":\n                return 443\n            elif self.parsed_url.scheme == \"http\":\n                return 80\n        return self._port\n\n    @property\n    def netloc(self):\n        if self.host and is_ip_type(self.host, network=False):\n            return make_netloc(self.host, self.port)\n        return None\n\n    @property\n    def host_stem(self):\n        \"\"\"\n        An abbreviated representation of hostname that removes the TLD\n            E.g. www.evilcorp.com --&gt; www.evilcorp\n        \"\"\"\n        if self.host and type(self.host) == str:\n            return domain_stem(self.host)\n        else:\n            return f\"{self.host}\"\n\n    @property\n    def discovery_context(self):\n        return self._discovery_context\n\n    @discovery_context.setter\n    def discovery_context(self, context):\n        def replace(match):\n            s = match.group()\n            return s.format(module=self.module, event=self)\n\n        try:\n            self._discovery_context = self._discovery_context_regex.sub(replace, context)\n        except Exception as e:\n            log.trace(f\"Error formatting discovery context for {self}: {e} (context: '{context}')\")\n            self._discovery_context = context\n\n    @property\n    def discovery_path(self):\n        \"\"\"\n        This event's full discovery context, including those of all its parents\n        \"\"\"\n        discovery_path = []\n        if self.parent is not None and self.parent is not self:\n            discovery_path = self.parent.discovery_path\n        return discovery_path + [self.discovery_context]\n\n    @property\n    def parent_chain(self):\n        \"\"\"\n        This event's full discovery context, including those of all its parents\n        \"\"\"\n        parent_chain = []\n        if self.parent is not None and self.parent is not self:\n            parent_chain = self.parent.parent_chain\n        return parent_chain + [str(self.uuid)]\n\n    @property\n    def words(self):\n        if self.__words is None:\n            self.__words = set(self._words())\n        return self.__words\n\n    def _words(self):\n        return set()\n\n    @property\n    def tags(self):\n        return self._tags\n\n    @tags.setter\n    def tags(self, tags):\n        self._tags = set()\n        if isinstance(tags, str):\n            tags = (tags,)\n        for tag in tags:\n            self.add_tag(tag)\n\n    def add_tag(self, tag):\n        self._tags.add(tagify(tag))\n\n    def add_tags(self, tags):\n        for tag in set(tags):\n            self.add_tag(tag)\n\n    def remove_tag(self, tag):\n        with suppress(KeyError):\n            self._tags.remove(tagify(tag))\n\n    @property\n    def always_emit(self):\n        \"\"\"\n        If this returns True, the event will always be distributed to output modules regardless of scope distance\n        \"\"\"\n        always_emit_tags = any(t in self.tags for t in self._always_emit_tags)\n        no_host_information = not bool(self.host)\n        return self._always_emit or always_emit_tags or no_host_information\n\n    @property\n    def id(self):\n        \"\"\"\n        A uniquely identifiable hash of the event from the event type + a SHA1 of its data\n        \"\"\"\n        if self._id is None:\n            self._id = f\"{self.type}:{self.data_hash.hex()}\"\n        return self._id\n\n    @property\n    def uuid(self):\n        \"\"\"\n        A universally unique identifier for the event\n        \"\"\"\n        return f\"{self.type}:{self._uuid}\"\n\n    @property\n    def data_hash(self):\n        \"\"\"\n        A raw byte hash of the event's data\n        \"\"\"\n        if self._data_hash is None:\n            self._data_hash = sha1(self.data_id).digest()\n        return self._data_hash\n\n    @property\n    def scope_distance(self):\n        return self._scope_distance\n\n    @scope_distance.setter\n    def scope_distance(self, scope_distance):\n        \"\"\"\n        Setter for the scope_distance attribute, ensuring it only decreases.\n\n        The scope_distance attribute is designed to never increase; it can only be set to smaller values than\n        the current one. If a larger value is provided, it is ignored. The setter also updates the event's\n        tags to reflect the new scope distance.\n\n        Parameters:\n            scope_distance (int): The new scope distance to set, must be a non-negative integer.\n\n        Note:\n            The method will automatically update the relevant 'distance-' tags associated with the event.\n        \"\"\"\n        if scope_distance &lt; 0:\n            raise ValueError(f\"Invalid scope distance: {scope_distance}\")\n        # ensure scope distance does not increase (only allow setting to smaller values)\n        if self.scope_distance is None:\n            new_scope_distance = scope_distance\n        else:\n            new_scope_distance = min(self.scope_distance, scope_distance)\n        if self._scope_distance != new_scope_distance:\n            # remove old scope distance tags\n            for t in list(self.tags):\n                if t.startswith(\"distance-\"):\n                    self.remove_tag(t)\n            if self.host:\n                if scope_distance == 0:\n                    self.add_tag(\"in-scope\")\n                    self.remove_tag(\"affiliate\")\n                else:\n                    self.remove_tag(\"in-scope\")\n                    self.add_tag(f\"distance-{new_scope_distance}\")\n            self._scope_distance = new_scope_distance\n            # apply recursively to parent events\n            parent_scope_distance = getattr(self.parent, \"scope_distance\", None)\n            if parent_scope_distance is not None and self.parent is not self:\n                self.parent.scope_distance = new_scope_distance + 1\n\n    @property\n    def scope_description(self):\n        \"\"\"\n        Returns a single word describing the scope of the event.\n\n        \"in-scope\" if the event is in scope, \"affiliate\" if it's an affiliate, otherwise \"distance-{scope_distance}\"\n        \"\"\"\n        if self.scope_distance == 0:\n            return \"in-scope\"\n        elif \"affiliate\" in self.tags:\n            return \"affiliate\"\n        return f\"distance-{self.scope_distance}\"\n\n    @property\n    def parent(self):\n        return self._parent\n\n    @parent.setter\n    def parent(self, parent):\n        \"\"\"\n        Setter for the parent attribute, ensuring it's a valid event and updating scope distance.\n\n        Sets the parent of the event and automatically adjusts the scope distance based on the parent event's\n        scope distance. The scope distance is incremented by 1 if the host of the parent event is different\n        from the current event's host.\n\n        Parameters:\n            parent (BaseEvent): The new parent event to set. Must be a valid event object.\n\n        Note:\n            If an invalid parent is provided and the event is not a dummy, a warning will be logged.\n        \"\"\"\n        if is_event(parent):\n            self._parent = parent\n            hosts_are_same = (self.host and parent.host) and (self.host == parent.host)\n            new_scope_distance = int(parent.scope_distance)\n            if self.host and parent.scope_distance is not None:\n                # only increment the scope distance if the host changes\n                if self._scope_distance_increment_same_host or not hosts_are_same:\n                    new_scope_distance += 1\n            self.scope_distance = new_scope_distance\n            # inherit certain tags\n            if hosts_are_same:\n                # inherit web spider distance from parent\n                self.web_spider_distance = getattr(parent, \"web_spider_distance\", 0)\n                event_has_url = getattr(self, \"parsed_url\", None) is not None\n                for t in parent.tags:\n                    if t in (\"affiliate\",):\n                        self.add_tag(t)\n                    elif t.startswith(\"mutation-\"):\n                        self.add_tag(t)\n                    # only add these tags if the event has a URL\n                    if event_has_url:\n                        if t in (\"spider-danger\", \"spider-max\"):\n                            self.add_tag(t)\n        elif not self._dummy:\n            log.warning(f\"Tried to set invalid parent on {self}: (got: {parent})\")\n\n    @property\n    def parent_id(self):\n        parent_id = getattr(self.get_parent(), \"id\", None)\n        if parent_id is not None:\n            return parent_id\n        return self._parent_id\n\n    @property\n    def parent_uuid(self):\n        parent_uuid = getattr(self.get_parent(), \"uuid\", None)\n        if parent_uuid is not None:\n            return parent_uuid\n        return self._parent_uuid\n\n    @property\n    def validators(self):\n        \"\"\"\n        Depending on whether the scan attribute is accessible, return either a config-aware or non-config-aware validator\n\n        This exists to prevent a chicken-and-egg scenario during the creation of certain events such as URLs,\n        whose sanitization behavior is different depending on the config.\n\n        However, thanks to this property, validation can still work in the absence of a config.\n        \"\"\"\n        if self.scan is not None:\n            return self.scan.helpers.config_aware_validators\n        return validators\n\n    def get_parent(self):\n        \"\"\"\n        Takes into account events with the _omit flag\n        \"\"\"\n        if getattr(self.parent, \"_omit\", False):\n            return self.parent.get_parent()\n        return self.parent\n\n    def get_parents(self, omit=False, include_self=False):\n        parents = []\n        e = self\n        if include_self:\n            parents.append(self)\n        while 1:\n            if omit:\n                parent = e.get_parent()\n            else:\n                parent = e.parent\n            if parent is None:\n                break\n            if e == parent:\n                break\n            parents.append(parent)\n            e = parent\n        return parents\n\n    def _host(self):\n        return \"\"\n\n    def _sanitize_data(self, data):\n        \"\"\"\n        Validates and sanitizes the event's data during instantiation.\n\n        By default, uses the '_data_load' method to pre-process the data and then applies the '_data_validator'\n        to validate and create a sanitized dictionary. Raises a ValidationError if any of the validations fail.\n        Subclasses can override this method to provide custom validation logic.\n\n        Returns:\n            Any: The sanitized data.\n\n        Raises:\n            ValidationError: If the data fails to validate.\n        \"\"\"\n        data = self._data_load(data)\n        if self._data_validator is not None:\n            if not isinstance(data, dict):\n                raise ValidationError(f\"data is not of type dict: {data}\")\n            data = self._data_validator(**data).model_dump(exclude_none=True)\n        return self.sanitize_data(data)\n\n    def sanitize_data(self, data):\n        return data\n\n    @property\n    def data_human(self):\n        \"\"\"\n        Human representation of event.data\n        \"\"\"\n        return self._data_human()\n\n    def _data_human(self):\n        if isinstance(self.data, (dict, list)):\n            with suppress(Exception):\n                return json.dumps(self.data, sort_keys=True)\n        return smart_decode(self.data)\n\n    def _data_load(self, data):\n        \"\"\"\n        How to load the event data (JSON-decode it, etc.)\n        \"\"\"\n        return data\n\n    @property\n    def data_id(self):\n        \"\"\"\n        Representation of the event.data used to calculate the event's ID\n        \"\"\"\n        return self._data_id()\n\n    def _data_id(self):\n        return self.data\n\n    @property\n    def pretty_string(self):\n        \"\"\"\n        A human-friendly representation of the event's data. Used for graph representation.\n\n        If the event's data is a dictionary, the function will try to return a JSON-formatted string.\n        Otherwise, it will use smart_decode to convert the data into a string representation.\n\n        Override if necessary.\n\n        Returns:\n            str: The graphical representation of the event's data.\n        \"\"\"\n        return self._pretty_string()\n\n    def _pretty_string(self):\n        return self._data_human()\n\n    @property\n    def data_graph(self):\n        \"\"\"\n        Representation of event.data for neo4j graph nodes\n        \"\"\"\n        return self.pretty_string\n\n    @property\n    def data_json(self):\n        \"\"\"\n        JSON representation of event.data\n        \"\"\"\n        return self.data\n\n    def __contains__(self, other):\n        \"\"\"\n        Allows events to be compared using the \"in\" operator:\n        E.g.:\n            if some_event in other_event:\n                ...\n        \"\"\"\n        try:\n            other = make_event(other, dummy=True)\n        except ValidationError:\n            return False\n        # if hashes match\n        if other == self:\n            return True\n        # if hosts match\n        if self.host and other.host:\n            if self.host == other.host:\n                return True\n            # hostnames and IPs\n            radixtarget = RadixTarget()\n            radixtarget.insert(self.host)\n            return bool(radixtarget.search(other.host))\n        return False\n\n    def json(self, mode=\"json\", siem_friendly=False):\n        \"\"\"\n        Serializes the event object to a JSON-compatible dictionary.\n\n        By default, it includes attributes such as 'type', 'id', 'data', 'scope_distance', and others that are present.\n        Additional specific attributes can be serialized based on the mode specified.\n\n        Parameters:\n            mode (str): Specifies the data serialization mode. Default is \"json\". Other options include \"graph\", \"human\", and \"id\".\n            siem_friendly (bool): Whether to format the JSON in a way that's friendly to SIEM ingestion by Elastic, Splunk, etc. This ensures the value of \"data\" is always the same type (a dictionary).\n\n        Returns:\n            dict: JSON-serializable dictionary representation of the event object.\n        \"\"\"\n        j = dict()\n        # type, ID, scope description\n        for i in (\"type\", \"id\", \"uuid\", \"scope_description\", \"netloc\"):\n            v = getattr(self, i, \"\")\n            if v:\n                j.update({i: str(v)})\n        # event data\n        data_attr = getattr(self, f\"data_{mode}\", None)\n        if data_attr is not None:\n            data = data_attr\n        else:\n            data = smart_decode(self.data)\n        if siem_friendly:\n            j[\"data\"] = {self.type: data}\n        else:\n            j[\"data\"] = data\n        # host, dns children\n        if self.host:\n            j[\"host\"] = str(self.host)\n            j[\"resolved_hosts\"] = sorted(str(h) for h in self.resolved_hosts)\n            j[\"dns_children\"] = {k: list(v) for k, v in self.dns_children.items()}\n        if isinstance(self.port, int):\n            j[\"port\"] = self.port\n        # web spider distance\n        web_spider_distance = getattr(self, \"web_spider_distance\", None)\n        if web_spider_distance is not None:\n            j[\"web_spider_distance\"] = web_spider_distance\n        # scope distance\n        j[\"scope_distance\"] = self.scope_distance\n        # scan\n        if self.scan:\n            j[\"scan\"] = self.scan.id\n        # timestamp\n        j[\"timestamp\"] = self.timestamp.isoformat()\n        # parent event\n        parent_id = self.parent_id\n        if parent_id:\n            j[\"parent\"] = parent_id\n        parent_uuid = self.parent_uuid\n        if parent_uuid:\n            j[\"parent_uuid\"] = parent_uuid\n        # tags\n        if self.tags:\n            j.update({\"tags\": list(self.tags)})\n        # parent module\n        if self.module:\n            j.update({\"module\": str(self.module)})\n        # sequence of modules that led to discovery\n        if self.module_sequence:\n            j.update({\"module_sequence\": str(self.module_sequence)})\n        # discovery context\n        j[\"discovery_context\"] = self.discovery_context\n        j[\"discovery_path\"] = self.discovery_path\n        j[\"parent_chain\"] = self.parent_chain\n\n        # normalize non-primitive python objects\n        for k, v in list(j.items()):\n            if k == \"data\":\n                continue\n            if type(v) not in (str, int, float, bool, list, dict, type(None)):\n                try:\n                    j[k] = json.dumps(v, sort_keys=True)\n                except Exception:\n                    j[k] = smart_decode(v)\n        return j\n\n    @staticmethod\n    def from_json(j):\n        \"\"\"\n        Convenience shortcut to create an Event object from a JSON-compatible dictionary.\n\n        Calls the `event_from_json()` function to deserialize the event.\n\n        Parameters:\n            j (dict): The JSON-compatible dictionary containing event data.\n\n        Returns:\n            Event: The deserialized Event object.\n        \"\"\"\n        return event_from_json(j)\n\n    @property\n    def module_sequence(self):\n        \"\"\"\n        Get a human-friendly string that represents the sequence of modules responsible for generating this event.\n\n        Includes the names of omitted parent events to provide a complete view of the module sequence leading to this event.\n\n        Returns:\n            str: The module sequence in human-friendly format.\n        \"\"\"\n        module_name = getattr(self.module, \"name\", \"\")\n        if getattr(self.parent, \"_omit\", False):\n            module_name = f\"{self.parent.module_sequence}-&gt;{module_name}\"\n        return module_name\n\n    @property\n    def module_priority(self):\n        if self._module_priority is None:\n            module = getattr(self, \"module\", None)\n            self._module_priority = int(max(1, min(5, getattr(module, \"priority\", 3))))\n        return self._module_priority\n\n    @module_priority.setter\n    def module_priority(self, priority):\n        self._module_priority = int(max(1, min(5, priority)))\n\n    @property\n    def priority(self):\n        if self._priority is None:\n            timestamp = self.timestamp.timestamp()\n            if self.parent.timestamp == self.timestamp:\n                self._priority = (timestamp,)\n            else:\n                self._priority = getattr(self.parent, \"priority\", ()) + (timestamp,)\n\n        return self._priority\n\n    @property\n    def type(self):\n        return self._type\n\n    @type.setter\n    def type(self, val):\n        self._type = val\n        self._hash = None\n        self._id = None\n\n    @property\n    def _host_size(self):\n        \"\"\"\n        Used for sorting events by their host size, so that parent ones (e.g. IP subnets) come first\n        \"\"\"\n        if self.host:\n            if isinstance(self.host, str):\n                # smaller domains should come first\n                return len(self.host)\n            else:\n                try:\n                    # bigger IP subnets should come first\n                    return -self.host.num_addresses\n                except AttributeError:\n                    # IP addresses default to 1\n                    return 1\n        return 0\n\n    def __iter__(self):\n        \"\"\"\n        For dict(event)\n        \"\"\"\n        yield from self.json().items()\n\n    def __lt__(self, other):\n        \"\"\"\n        For queue sorting\n        \"\"\"\n        return self.priority &lt; getattr(other, \"priority\", (0,))\n\n    def __gt__(self, other):\n        \"\"\"\n        For queue sorting\n        \"\"\"\n        return self.priority &gt; getattr(other, \"priority\", (0,))\n\n    def __eq__(self, other):\n        try:\n            other = make_event(other, dummy=True)\n        except ValidationError:\n            return False\n        return hash(self) == hash(other)\n\n    def __hash__(self):\n        if self._hash is None:\n            self._hash = hash(self.id)\n        return self._hash\n\n    def __str__(self):\n        max_event_len = 80\n        d = str(self.data).replace(\"\\n\", \"\\\\n\")\n        return f'{self.type}(\"{d[:max_event_len]}{(\"...\" if len(d) &gt; max_event_len else \"\")}\", module={self.module}, tags={self.tags})'\n\n    def __repr__(self):\n        return str(self)\n</code></pre>"},{"location":"dev/event/#bbot.core.event.base.BaseEvent.pretty_string","title":"pretty_string  <code>property</code>","text":"<pre><code>pretty_string\n</code></pre> <p>A human-friendly representation of the event's data. Used for graph representation.</p> <p>If the event's data is a dictionary, the function will try to return a JSON-formatted string. Otherwise, it will use smart_decode to convert the data into a string representation.</p> <p>Override if necessary.</p> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>The graphical representation of the event's data.</p> </li> </ul>"},{"location":"dev/event/#bbot.core.event.base.BaseEvent.module_sequence","title":"module_sequence  <code>property</code>","text":"<pre><code>module_sequence\n</code></pre> <p>Get a human-friendly string that represents the sequence of modules responsible for generating this event.</p> <p>Includes the names of omitted parent events to provide a complete view of the module sequence leading to this event.</p> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>The module sequence in human-friendly format.</p> </li> </ul>"},{"location":"dev/event/#bbot.core.event.base.BaseEvent.__init__","title":"__init__","text":"<pre><code>__init__(data, event_type, parent=None, context=None, module=None, scan=None, scans=None, tags=None, confidence=100, timestamp=None, _dummy=False, _internal=None)\n</code></pre> <p>Initializes an Event object with the given parameters.</p> <p>In most cases, you should use <code>make_event()</code> instead of instantiating this class directly. <code>make_event()</code> is much friendlier, and can auto-detect the event type for you.</p> <p>Attributes:</p> <ul> <li> <code>data</code>               (<code>(str, dict)</code>)           \u2013            <p>The primary data for the event.</p> </li> <li> <code>event_type</code>               (<code>str</code>)           \u2013            <p>Type of the event, e.g., 'IP_ADDRESS'.</p> </li> <li> <code>parent</code>               (<code>BaseEvent</code>)           \u2013            <p>Parent event that led to this event's discovery. Defaults to None.</p> </li> <li> <code>module</code>               (<code>str</code>)           \u2013            <p>Module that discovered the event. Defaults to None.</p> </li> <li> <code>scan</code>               (<code>Scan</code>)           \u2013            <p>BBOT Scan object. Required unless _dummy is True. Defaults to None.</p> </li> <li> <code>scans</code>               (<code>list of Scan</code>)           \u2013            <p>BBOT Scan objects, used primarily when unserializing an Event from the database. Defaults to None.</p> </li> <li> <code>tags</code>               (<code>list of str</code>)           \u2013            <p>Descriptive tags for the event. Defaults to None.</p> </li> <li> <code>confidence</code>               (<code>int</code>)           \u2013            <p>Confidence level for the event, on a scale of 1-100. Defaults to 100.</p> </li> <li> <code>timestamp</code>               (<code>datetime</code>)           \u2013            <p>Time of event discovery. Defaults to current UTC time.</p> </li> <li> <code>_dummy</code>               (<code>bool</code>)           \u2013            <p>If True, disables certain data validations. Defaults to False.</p> </li> <li> <code>_internal</code>               (<code>Any</code>)           \u2013            <p>If specified, makes the event internal. Defaults to None.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>ValidationError</code>             \u2013            <p>If either <code>scan</code> or <code>parent</code> are not specified and <code>_dummy</code> is False.</p> </li> </ul> Source code in <code>bbot/core/event/base.py</code> <pre><code>def __init__(\n    self,\n    data,\n    event_type,\n    parent=None,\n    context=None,\n    module=None,\n    scan=None,\n    scans=None,\n    tags=None,\n    confidence=100,\n    timestamp=None,\n    _dummy=False,\n    _internal=None,\n):\n    \"\"\"\n    Initializes an Event object with the given parameters.\n\n    In most cases, you should use `make_event()` instead of instantiating this class directly.\n    `make_event()` is much friendlier, and can auto-detect the event type for you.\n\n    Attributes:\n        data (str, dict): The primary data for the event.\n        event_type (str, optional): Type of the event, e.g., 'IP_ADDRESS'.\n        parent (BaseEvent, optional): Parent event that led to this event's discovery. Defaults to None.\n        module (str, optional): Module that discovered the event. Defaults to None.\n        scan (Scan, optional): BBOT Scan object. Required unless _dummy is True. Defaults to None.\n        scans (list of Scan, optional): BBOT Scan objects, used primarily when unserializing an Event from the database. Defaults to None.\n        tags (list of str, optional): Descriptive tags for the event. Defaults to None.\n        confidence (int, optional): Confidence level for the event, on a scale of 1-100. Defaults to 100.\n        timestamp (datetime, optional): Time of event discovery. Defaults to current UTC time.\n        _dummy (bool, optional): If True, disables certain data validations. Defaults to False.\n        _internal (Any, optional): If specified, makes the event internal. Defaults to None.\n\n    Raises:\n        ValidationError: If either `scan` or `parent` are not specified and `_dummy` is False.\n    \"\"\"\n    self._uuid = uuid.uuid4()\n    self._id = None\n    self._hash = None\n    self._data = None\n    self.__host = None\n    self._tags = set()\n    self._port = None\n    self._omit = False\n    self.__words = None\n    self._parent = None\n    self._priority = None\n    self._parent_id = None\n    self._parent_uuid = None\n    self._host_original = None\n    self._scope_distance = None\n    self._module_priority = None\n    self._resolved_hosts = set()\n    self.dns_children = dict()\n    self.raw_dns_records = dict()\n    self._discovery_context = \"\"\n    self._discovery_context_regex = re.compile(r\"\\{(?:event|module)[^}]*\\}\")\n    self.web_spider_distance = 0\n\n    # for creating one-off events without enforcing parent requirement\n    self._dummy = _dummy\n    self.module = module\n    self._type = event_type\n\n    # keep track of whether this event has been recorded by the scan\n    self._stats_recorded = False\n\n    if timestamp is not None:\n        self.timestamp = timestamp\n    else:\n        try:\n            self.timestamp = datetime.datetime.now(datetime.UTC)\n        except AttributeError:\n            self.timestamp = datetime.datetime.utcnow()\n\n    self.confidence = int(confidence)\n    self._internal = False\n\n    # self.scan holds the instantiated scan object (for helpers, etc.)\n    self.scan = scan\n    if (not self.scan) and (not self._dummy):\n        raise ValidationError(f\"Must specify scan\")\n    # self.scans holds a list of scan IDs from scans that encountered this event\n    self.scans = []\n    if scans is not None:\n        self.scans = scans\n    if self.scan:\n        self.scans = list(set([self.scan.id] + self.scans))\n\n    try:\n        self.data = self._sanitize_data(data)\n    except Exception as e:\n        log.trace(traceback.format_exc())\n        raise ValidationError(f'Error sanitizing event data \"{data}\" for type \"{self.type}\": {e}')\n\n    if not self.data:\n        raise ValidationError(f'Invalid event data \"{data}\" for type \"{self.type}\"')\n\n    self.parent = parent\n    if (not self.parent) and (not self._dummy):\n        raise ValidationError(f\"Must specify event parent\")\n\n    if tags is not None:\n        for tag in tags:\n            self.add_tag(tag)\n\n    # internal events are not ingested by output modules\n    if not self._dummy:\n        # removed this second part because it was making certain sslcert events internal\n        if _internal:  # or parent._internal:\n            self.internal = True\n\n    if not context:\n        context = getattr(self.module, \"default_discovery_context\", \"\")\n    if context:\n        self.discovery_context = context\n</code></pre>"},{"location":"dev/event/#bbot.core.event.base.BaseEvent.json","title":"json","text":"<pre><code>json(mode='json', siem_friendly=False)\n</code></pre> <p>Serializes the event object to a JSON-compatible dictionary.</p> <p>By default, it includes attributes such as 'type', 'id', 'data', 'scope_distance', and others that are present. Additional specific attributes can be serialized based on the mode specified.</p> <p>Parameters:</p> <ul> <li> <code>mode</code>               (<code>str</code>, default:                   <code>'json'</code> )           \u2013            <p>Specifies the data serialization mode. Default is \"json\". Other options include \"graph\", \"human\", and \"id\".</p> </li> <li> <code>siem_friendly</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to format the JSON in a way that's friendly to SIEM ingestion by Elastic, Splunk, etc. This ensures the value of \"data\" is always the same type (a dictionary).</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>dict</code>          \u2013            <p>JSON-serializable dictionary representation of the event object.</p> </li> </ul> Source code in <code>bbot/core/event/base.py</code> <pre><code>def json(self, mode=\"json\", siem_friendly=False):\n    \"\"\"\n    Serializes the event object to a JSON-compatible dictionary.\n\n    By default, it includes attributes such as 'type', 'id', 'data', 'scope_distance', and others that are present.\n    Additional specific attributes can be serialized based on the mode specified.\n\n    Parameters:\n        mode (str): Specifies the data serialization mode. Default is \"json\". Other options include \"graph\", \"human\", and \"id\".\n        siem_friendly (bool): Whether to format the JSON in a way that's friendly to SIEM ingestion by Elastic, Splunk, etc. This ensures the value of \"data\" is always the same type (a dictionary).\n\n    Returns:\n        dict: JSON-serializable dictionary representation of the event object.\n    \"\"\"\n    j = dict()\n    # type, ID, scope description\n    for i in (\"type\", \"id\", \"uuid\", \"scope_description\", \"netloc\"):\n        v = getattr(self, i, \"\")\n        if v:\n            j.update({i: str(v)})\n    # event data\n    data_attr = getattr(self, f\"data_{mode}\", None)\n    if data_attr is not None:\n        data = data_attr\n    else:\n        data = smart_decode(self.data)\n    if siem_friendly:\n        j[\"data\"] = {self.type: data}\n    else:\n        j[\"data\"] = data\n    # host, dns children\n    if self.host:\n        j[\"host\"] = str(self.host)\n        j[\"resolved_hosts\"] = sorted(str(h) for h in self.resolved_hosts)\n        j[\"dns_children\"] = {k: list(v) for k, v in self.dns_children.items()}\n    if isinstance(self.port, int):\n        j[\"port\"] = self.port\n    # web spider distance\n    web_spider_distance = getattr(self, \"web_spider_distance\", None)\n    if web_spider_distance is not None:\n        j[\"web_spider_distance\"] = web_spider_distance\n    # scope distance\n    j[\"scope_distance\"] = self.scope_distance\n    # scan\n    if self.scan:\n        j[\"scan\"] = self.scan.id\n    # timestamp\n    j[\"timestamp\"] = self.timestamp.isoformat()\n    # parent event\n    parent_id = self.parent_id\n    if parent_id:\n        j[\"parent\"] = parent_id\n    parent_uuid = self.parent_uuid\n    if parent_uuid:\n        j[\"parent_uuid\"] = parent_uuid\n    # tags\n    if self.tags:\n        j.update({\"tags\": list(self.tags)})\n    # parent module\n    if self.module:\n        j.update({\"module\": str(self.module)})\n    # sequence of modules that led to discovery\n    if self.module_sequence:\n        j.update({\"module_sequence\": str(self.module_sequence)})\n    # discovery context\n    j[\"discovery_context\"] = self.discovery_context\n    j[\"discovery_path\"] = self.discovery_path\n    j[\"parent_chain\"] = self.parent_chain\n\n    # normalize non-primitive python objects\n    for k, v in list(j.items()):\n        if k == \"data\":\n            continue\n        if type(v) not in (str, int, float, bool, list, dict, type(None)):\n            try:\n                j[k] = json.dumps(v, sort_keys=True)\n            except Exception:\n                j[k] = smart_decode(v)\n    return j\n</code></pre>"},{"location":"dev/event/#bbot.core.event.base.BaseEvent.from_json","title":"from_json  <code>staticmethod</code>","text":"<pre><code>from_json(j)\n</code></pre> <p>Convenience shortcut to create an Event object from a JSON-compatible dictionary.</p> <p>Calls the <code>event_from_json()</code> function to deserialize the event.</p> <p>Parameters:</p> <ul> <li> <code>j</code>               (<code>dict</code>)           \u2013            <p>The JSON-compatible dictionary containing event data.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>Event</code>          \u2013            <p>The deserialized Event object.</p> </li> </ul> Source code in <code>bbot/core/event/base.py</code> <pre><code>@staticmethod\ndef from_json(j):\n    \"\"\"\n    Convenience shortcut to create an Event object from a JSON-compatible dictionary.\n\n    Calls the `event_from_json()` function to deserialize the event.\n\n    Parameters:\n        j (dict): The JSON-compatible dictionary containing event data.\n\n    Returns:\n        Event: The deserialized Event object.\n    \"\"\"\n    return event_from_json(j)\n</code></pre>"},{"location":"dev/module_howto/","title":"How to Write a BBOT Module","text":"<p>Here we'll go over a basic example of writing a custom BBOT module.</p>"},{"location":"dev/module_howto/#create-the-python-file","title":"Create the python file","text":"<ol> <li>Create a new <code>.py</code> file in <code>bbot/modules</code> (or in a custom module directory)</li> <li>At the top of the file, import <code>BaseModule</code></li> <li>Declare a class that inherits from <code>BaseModule</code></li> <li>the class must have the same name as your file (case-insensitive)</li> <li>Define in <code>watched_events</code> what type of data your module will consume</li> <li>Define in <code>produced_events</code> what type of data your module will produce</li> <li>Define (via <code>flags</code>) whether your module is <code>active</code> or <code>passive</code>, and whether it's <code>safe</code> or <code>aggressive</code></li> <li>Put your main logic in <code>.handle_event()</code></li> </ol> <p>Here is an example of a simple module that performs whois lookups:</p> bbot/modules/whois.py<pre><code>from bbot.modules.base import BaseModule\n\nclass whois(BaseModule):\n    watched_events = [\"DNS_NAME\"] # watch for DNS_NAME events\n    produced_events = [\"WHOIS\"] # we produce WHOIS events\n    flags = [\"passive\", \"safe\"]\n    meta = {\"description\": \"Query WhoisXMLAPI for WHOIS data\"}\n    options = {\"api_key\": \"\"} # module config options\n    options_desc = {\"api_key\": \"WhoisXMLAPI Key\"}\n    per_domain_only = True # only run once per domain\n\n    base_url = \"https://www.whoisxmlapi.com/whoisserver/WhoisService\"\n\n    # one-time setup - runs at the beginning of the scan\n    async def setup(self):\n        self.api_key = self.config.get(\"api_key\")\n        if not self.api_key:\n            # soft-fail if no API key is set\n            return None, \"Must set API key\"\n\n    async def handle_event(self, event):\n        self.hugesuccess(f\"Got {event} (event.data: {event.data})\")\n        _, domain = self.helpers.split_domain(event.data)\n        url = f\"{self.base_url}?apiKey={self.api_key}&amp;domainName={domain}&amp;outputFormat=JSON\"\n        self.hugeinfo(f\"Visiting {url}\")\n        response = await self.helpers.request(url)\n        if response is not None:\n            await self.emit_event(response.json(), \"WHOIS\", parent=event)\n</code></pre>"},{"location":"dev/module_howto/#test-your-new-module","title":"Test your new module","text":"<p>After saving the module, you can run it with <code>-m</code>:</p> <pre><code># run a scan enabling the module in bbot/modules/mymodule.py\nbbot -t evilcorp.com -m whois\n</code></pre>"},{"location":"dev/module_howto/#debugging-your-module","title":"Debugging Your Module","text":"<p>BBOT has a variety of colorful logging functions like <code>self.hugesuccess()</code> that can be useful for debugging.</p> <p>BBOT log levels:</p> <ul> <li><code>critical</code>: bright red</li> <li><code>hugesuccess</code>: bright green</li> <li><code>hugewarning</code>: bright orange</li> <li><code>hugeinfo</code>: bright blue</li> <li><code>error</code>: red</li> <li><code>warning</code>: orange</li> <li><code>info</code>: blue</li> <li><code>verbose</code>: grey (must enable <code>-v</code> to see)</li> <li><code>debug</code>: grey (must enable <code>-d</code> to see)</li> </ul> <p>For details on how tests are written, see Unit Tests.</p>"},{"location":"dev/module_howto/#handle_event-and-emit_event","title":"<code>handle_event()</code> and <code>emit_event()</code>","text":"<p>The <code>handle_event()</code> method is the most important part of the module. By overriding this method, you control what the module does. During a scan, when an event from your <code>watched_events</code> is encountered (a <code>DNS_NAME</code> in this example), <code>handle_event()</code> is automatically called with that event as its argument.</p> <p>The <code>emit_event()</code> method is how modules return data. When you call <code>emit_event()</code>, it creates an event and outputs it, sending it any modules that are interested in that data type.</p>"},{"location":"dev/module_howto/#setup","title":"<code>setup()</code>","text":"<p>A module's <code>setup()</code> method is used for performing one-time setup at the start of the scan, like downloading a wordlist or checking to make sure an API key is valid. It needs to return either:</p> <ol> <li><code>True</code> - module setup succeeded</li> <li><code>None</code> - module setup soft-failed (scan will continue but module will be disabled)</li> <li><code>False</code> - module setup hard-failed (scan will abort)</li> </ol> <p>Optionally, it can also return a reason. Here are some examples:</p> <pre><code>async def setup(self):\n    if not self.config.get(\"api_key\"):\n        # soft-fail\n        return None, \"No API key specified\"\n\nasync def setup(self):\n    try:\n        wordlist = self.helpers.wordlist(\"https://raw.githubusercontent.com/user/wordlist.txt\")\n    except WordlistError as e:\n        # hard-fail\n        return False, f\"Error downloading wordlist: {e}\"\n\nasync def setup(self):\n    self.timeout = self.config.get(\"timeout\", 5)\n    # success\n    return True\n</code></pre>"},{"location":"dev/module_howto/#module-config-options","title":"Module Config Options","text":"<p>Each module can have its own set of config options. These live in the <code>options</code> and <code>options_desc</code> attributes on your class. Both are dictionaries; <code>options</code> is for defaults and <code>options_desc</code> is for descriptions. Here is a typical example:</p> bbot/modules/nmap.py<pre><code>class nmap(BaseModule):\n    # ...\n    options = {\n        \"top_ports\": 100,\n        \"ports\": \"\",\n        \"timing\": \"T4\",\n        \"skip_host_discovery\": True,\n    }\n    options_desc = {\n        \"top_ports\": \"Top ports to scan (default 100) (to override, specify 'ports')\",\n        \"ports\": \"Ports to scan\",\n        \"timing\": \"-T&lt;0-5&gt;: Set timing template (higher is faster)\",\n        \"skip_host_discovery\": \"skip host discovery (-Pn)\",\n    }\n\n    async def setup(self):\n        self.ports = self.config.get(\"ports\", \"\")\n        self.timing = self.config.get(\"timing\", \"T4\")\n        self.top_ports = self.config.get(\"top_ports\", 100)\n        self.skip_host_discovery = self.config.get(\"skip_host_discovery\", True)\n        return True\n</code></pre> <p>Once you've defined these variables, you can pass the options via <code>-c</code>:</p> <pre><code>bbot -m nmap -c modules.nmap.top_ports=250\n</code></pre> <p>... or via the config:</p> ~/.config/bbot/bbot.yml<pre><code>modules:\n  nmap:\n    top_ports: 250\n</code></pre> <p>Inside the module, you access them via <code>self.config</code>, e.g.:</p> <pre><code>self.config.get(\"top_ports\")\n</code></pre>"},{"location":"dev/module_howto/#module-dependencies","title":"Module Dependencies","text":"<p>BBOT automates module dependencies with Ansible. If your module relies on a third-party binary, OS package, or python library, you can specify them in the <code>deps_*</code> attributes of your module.</p> <pre><code>class MyModule(BaseModule):\n    ...\n    deps_apt = [\"chromium-browser\"]\n    deps_ansible = [\n        {\n            \"name\": \"install dev tools\",\n            \"package\": {\"name\": [\"gcc\", \"git\", \"make\"], \"state\": \"present\"},\n            \"become\": True,\n            \"ignore_errors\": True,\n        },\n        {\n            \"name\": \"Download massdns source code\",\n            \"git\": {\n                \"repo\": \"https://github.com/blechschmidt/massdns.git\",\n                \"dest\": \"#{BBOT_TEMP}/massdns\",\n                \"single_branch\": True,\n                \"version\": \"master\",\n            },\n        },\n        {\n            \"name\": \"Build massdns\",\n            \"command\": {\"chdir\": \"#{BBOT_TEMP}/massdns\", \"cmd\": \"make\", \"creates\": \"#{BBOT_TEMP}/massdns/bin/massdns\"},\n        },\n        {\n            \"name\": \"Install massdns\",\n            \"copy\": {\"src\": \"#{BBOT_TEMP}/massdns/bin/massdns\", \"dest\": \"#{BBOT_TOOLS}/\", \"mode\": \"u+x,g+x,o+x\"},\n        },\n    ]\n</code></pre>"},{"location":"dev/module_howto/#load-modules-from-custom-locations","title":"Load Modules from Custom Locations","text":"<p>If you have a custom module and you want to use it with BBOT, you can add its parent folder to <code>module_dirs</code>. This saves you from having to copy it into the BBOT install location. To add a custom module directory, add it to <code>module_dirs</code> in your preset:</p> my_preset.yml<pre><code># load BBOT modules from these additional paths\nmodule_dirs:\n  - /home/user/my_modules\n</code></pre>"},{"location":"dev/presets/","title":"Presets","text":""},{"location":"dev/presets/#bbot.scanner.Preset","title":"Preset","text":"<p>A preset is the central config for a BBOT scan. It contains everything a scan needs to run --     targets, modules, flags, config options like API keys, etc.</p> <p>You can create a preset manually and pass it into <code>Scanner(preset=preset)</code>.     Or, you can pass <code>Preset</code>'s kwargs into <code>Scanner()</code> and it will create the preset for you implicitly.</p> <p>Presets can include other presets (which can in turn include other presets, and so on).     This works by merging each preset in turn using <code>Preset.merge()</code>.     The order matters. In case of a conflict, the last preset to be merged wins priority.</p> <p>Presets can be loaded from or saved to YAML. BBOT has a number of ready-made presets for common tasks like subdomain enumeration, web spidering, dirbusting, etc.</p> <p>Presets are highly customizable via <code>conditions</code>, which use the Jinja2 templating engine.     Using <code>conditions</code>, you can define custom logic to inspect the final preset before the scan starts, and change it if need be.     Based on the state of the preset, you can print a warning message, abort the scan, enable/disable modules, etc..</p> <p>Attributes:</p> <ul> <li> <code>target</code>               (<code>Target</code>)           \u2013            <p>Target(s) of scan.</p> </li> <li> <code>whitelist</code>               (<code>Target</code>)           \u2013            <p>Scan whitelist (by default this is the same as <code>target</code>).</p> </li> <li> <code>blacklist</code>               (<code>Target</code>)           \u2013            <p>Scan blacklist (this takes ultimate precedence).</p> </li> <li> <code>helpers</code>               (<code>ConfigAwareHelper</code>)           \u2013            <p>Helper containing various reusable functions, regexes, etc.</p> </li> <li> <code>output_dir</code>               (<code>Path</code>)           \u2013            <p>Output directory for scan.</p> </li> <li> <code>scan_name</code>               (<code>str</code>)           \u2013            <p>Name of scan. Defaults to random value, e.g. \"demonic_jimmy\".</p> </li> <li> <code>name</code>               (<code>str</code>)           \u2013            <p>Human-friendly name of preset. Used mainly for logging purposes.</p> </li> <li> <code>description</code>               (<code>str</code>)           \u2013            <p>Description of preset.</p> </li> <li> <code>modules</code>               (<code>set</code>)           \u2013            <p>Combined modules to enable for the scan. Includes scan modules, internal modules, and output modules.</p> </li> <li> <code>scan_modules</code>               (<code>set</code>)           \u2013            <p>Modules to enable for the scan.</p> </li> <li> <code>output_modules</code>               (<code>set</code>)           \u2013            <p>Output modules to enable for the scan. (note: if no output modules are specified, this is not populated until .bake())</p> </li> <li> <code>internal_modules</code>               (<code>set</code>)           \u2013            <p>Internal modules for the scan. (note: not populated until .bake())</p> </li> <li> <code>exclude_modules</code>               (<code>set</code>)           \u2013            <p>Modules to exclude from the scan. When set, automatically removes excluded modules.</p> </li> <li> <code>flags</code>               (<code>set</code>)           \u2013            <p>Flags to enable for the scan. When set, automatically enables modules.</p> </li> <li> <code>require_flags</code>               (<code>set</code>)           \u2013            <p>Require modules to have these flags. When set, automatically removes offending modules.</p> </li> <li> <code>exclude_flags</code>               (<code>set</code>)           \u2013            <p>Exclude modules that have any of these flags. When set, automatically removes offending modules.</p> </li> <li> <code>module_dirs</code>               (<code>set</code>)           \u2013            <p>Custom directories from which to load modules (alias to <code>self.module_loader.module_dirs</code>). When set, automatically preloads contained modules.</p> </li> <li> <code>config</code>               (<code>DictConfig</code>)           \u2013            <p>BBOT config (alias to <code>core.config</code>)</p> </li> <li> <code>core</code>               (<code>BBOTCore</code>)           \u2013            <p>Local copy of BBOTCore object.</p> </li> <li> <code>verbose</code>               (<code>bool</code>)           \u2013            <p>Whether log level is currently set to verbose. When set, updates log level for all BBOT log handlers.</p> </li> <li> <code>debug</code>               (<code>bool</code>)           \u2013            <p>Whether log level is currently set to debug. When set, updates log level for all BBOT log handlers.</p> </li> <li> <code>silent</code>               (<code>bool</code>)           \u2013            <p>Whether logging is currently disabled. When set to True, silences all stderr.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; preset = Preset(\n        \"evilcorp.com\",\n        \"1.2.3.0/24\",\n        flags=[\"subdomain-enum\"],\n        modules=[\"nuclei\"],\n        config={\"web\": {\"http_proxy\": \"http://127.0.0.1\"}}\n    )\n&gt;&gt;&gt; scan = Scanner(preset=preset)\n</code></pre> <pre><code>&gt;&gt;&gt; preset = Preset.from_yaml_file(\"my_preset.yml\")\n&gt;&gt;&gt; scan = Scanner(preset=preset)\n</code></pre> Source code in <code>bbot/scanner/preset/preset.py</code> <pre><code>class Preset:\n    \"\"\"\n    A preset is the central config for a BBOT scan. It contains everything a scan needs to run --\n        targets, modules, flags, config options like API keys, etc.\n\n    You can create a preset manually and pass it into `Scanner(preset=preset)`.\n        Or, you can pass `Preset`'s kwargs into `Scanner()` and it will create the preset for you implicitly.\n\n    Presets can include other presets (which can in turn include other presets, and so on).\n        This works by merging each preset in turn using `Preset.merge()`.\n        The order matters. In case of a conflict, the last preset to be merged wins priority.\n\n    Presets can be loaded from or saved to YAML. BBOT has a number of ready-made presets for common tasks like\n    subdomain enumeration, web spidering, dirbusting, etc.\n\n    Presets are highly customizable via `conditions`, which use the Jinja2 templating engine.\n        Using `conditions`, you can define custom logic to inspect the final preset before the scan starts, and change it if need be.\n        Based on the state of the preset, you can print a warning message, abort the scan, enable/disable modules, etc..\n\n    Attributes:\n        target (Target): Target(s) of scan.\n        whitelist (Target): Scan whitelist (by default this is the same as `target`).\n        blacklist (Target): Scan blacklist (this takes ultimate precedence).\n        helpers (ConfigAwareHelper): Helper containing various reusable functions, regexes, etc.\n        output_dir (pathlib.Path): Output directory for scan.\n        scan_name (str): Name of scan. Defaults to random value, e.g. \"demonic_jimmy\".\n        name (str): Human-friendly name of preset. Used mainly for logging purposes.\n        description (str): Description of preset.\n        modules (set): Combined modules to enable for the scan. Includes scan modules, internal modules, and output modules.\n        scan_modules (set): Modules to enable for the scan.\n        output_modules (set): Output modules to enable for the scan. (note: if no output modules are specified, this is not populated until .bake())\n        internal_modules (set): Internal modules for the scan. (note: not populated until .bake())\n        exclude_modules (set): Modules to exclude from the scan. When set, automatically removes excluded modules.\n        flags (set): Flags to enable for the scan. When set, automatically enables modules.\n        require_flags (set): Require modules to have these flags. When set, automatically removes offending modules.\n        exclude_flags (set): Exclude modules that have any of these flags. When set, automatically removes offending modules.\n        module_dirs (set): Custom directories from which to load modules (alias to `self.module_loader.module_dirs`). When set, automatically preloads contained modules.\n        config (omegaconf.dictconfig.DictConfig): BBOT config (alias to `core.config`)\n        core (BBOTCore): Local copy of BBOTCore object.\n        verbose (bool): Whether log level is currently set to verbose. When set, updates log level for all BBOT log handlers.\n        debug (bool): Whether log level is currently set to debug. When set, updates log level for all BBOT log handlers.\n        silent (bool): Whether logging is currently disabled. When set to True, silences all stderr.\n\n    Examples:\n        &gt;&gt;&gt; preset = Preset(\n                \"evilcorp.com\",\n                \"1.2.3.0/24\",\n                flags=[\"subdomain-enum\"],\n                modules=[\"nuclei\"],\n                config={\"web\": {\"http_proxy\": \"http://127.0.0.1\"}}\n            )\n        &gt;&gt;&gt; scan = Scanner(preset=preset)\n\n        &gt;&gt;&gt; preset = Preset.from_yaml_file(\"my_preset.yml\")\n        &gt;&gt;&gt; scan = Scanner(preset=preset)\n    \"\"\"\n\n    def __init__(\n        self,\n        *targets,\n        whitelist=None,\n        blacklist=None,\n        modules=None,\n        output_modules=None,\n        exclude_modules=None,\n        flags=None,\n        require_flags=None,\n        exclude_flags=None,\n        config=None,\n        module_dirs=None,\n        include=None,\n        presets=None,\n        output_dir=None,\n        scan_name=None,\n        name=None,\n        description=None,\n        conditions=None,\n        force_start=False,\n        verbose=False,\n        debug=False,\n        silent=False,\n        _exclude=None,\n        _log=True,\n    ):\n        \"\"\"\n        Initializes the Preset class.\n\n        Args:\n            *targets (str): Target(s) to scan. Types supported: hostnames, IPs, CIDRs, emails, open ports.\n            whitelist (list, optional): Whitelisted target(s) to scan. Defaults to the same as `targets`.\n            blacklist (list, optional): Blacklisted target(s). Takes ultimate precedence. Defaults to empty.\n            modules (list[str], optional): List of scan modules to enable for the scan. Defaults to empty list.\n            output_modules (list[str], optional): List of output modules to use. Defaults to csv, human, and json.\n            exclude_modules (list[str], optional): List of modules to exclude from the scan.\n            require_flags (list[str], optional): Only enable modules if they have these flags.\n            exclude_flags (list[str], optional): Don't enable modules if they have any of these flags.\n            module_dirs (list[str], optional): additional directories to load modules from.\n            config (dict, optional): Additional scan configuration settings.\n            include (list[str], optional): names or filenames of other presets to include.\n            presets (list[str], optional): an alias for `include`.\n            output_dir (str or Path, optional): Directory to store scan output. Defaults to BBOT home directory (`~/.bbot`).\n            scan_name (str, optional): Human-readable name of the scan. If not specified, it will be random, e.g. \"demonic_jimmy\".\n            name (str, optional): Human-readable name of the preset. Used mainly for logging.\n            description (str, optional): Description of the preset.\n            conditions (list[str], optional): Custom conditions to be executed before scan start. Written in Jinja2.\n            force_start (bool, optional): If True, ignore conditional aborts and failed module setups. Just run the scan!\n            verbose (bool, optional): Set the BBOT logger to verbose mode.\n            debug (bool, optional): Set the BBOT logger to debug mode.\n            silent (bool, optional): Silence all stderr (effectively disables the BBOT logger).\n            _exclude (list[Path], optional): Preset filenames to exclude from inclusion. Used internally to prevent infinite recursion in circular or self-referencing presets.\n            _log (bool, optional): Whether to enable logging for the preset. This will record which modules/flags are enabled, etc.\n        \"\"\"\n        # internal variables\n        self._cli = False\n        self._log = _log\n        self.scan = None\n        self._args = None\n        self._environ = None\n        self._helpers = None\n        self._module_loader = None\n        self._yaml_str = \"\"\n        self._baked = False\n\n        self._default_output_modules = None\n        self._default_internal_modules = None\n\n        # modules / flags\n        self.modules = set()\n        self.exclude_modules = set()\n        self.flags = set()\n        self.exclude_flags = set()\n        self.require_flags = set()\n\n        # modules + flags\n        if modules is None:\n            modules = []\n        if isinstance(modules, str):\n            modules = [modules]\n        if output_modules is None:\n            output_modules = []\n        if isinstance(output_modules, str):\n            output_modules = [output_modules]\n        if exclude_modules is None:\n            exclude_modules = []\n        if isinstance(exclude_modules, str):\n            exclude_modules = [exclude_modules]\n        if flags is None:\n            flags = []\n        if isinstance(flags, str):\n            flags = [flags]\n        if exclude_flags is None:\n            exclude_flags = []\n        if isinstance(exclude_flags, str):\n            exclude_flags = [exclude_flags]\n        if require_flags is None:\n            require_flags = []\n        if isinstance(require_flags, str):\n            require_flags = [require_flags]\n\n        # these are used only for preserving the modules as specified in the original preset\n        # this is to ensure the preset looks the same when reserialized\n        self.explicit_scan_modules = set() if modules is None else set(modules)\n        self.explicit_output_modules = set() if output_modules is None else set(output_modules)\n\n        # whether to force-start the scan (ignoring conditional aborts and failed module setups)\n        self.force_start = force_start\n\n        # scan output directory\n        self.output_dir = output_dir\n        # name of scan\n        self.scan_name = scan_name\n\n        # name of preset, default blank\n        self.name = name or \"\"\n        # preset description, default blank\n        self.description = description or \"\"\n\n        # custom conditions, evaluated during .bake()\n        self.conditions = []\n        if conditions is not None:\n            for condition in conditions:\n                self.conditions.append((self.name, condition))\n\n        # keeps track of loaded preset files to prevent infinite circular inclusions\n        self._preset_files_loaded = set()\n        if _exclude is not None:\n            for _filename in _exclude:\n                self._preset_files_loaded.add(Path(_filename).resolve())\n\n        # bbot core config\n        self.core = CORE.copy()\n        if config is None:\n            config = omegaconf.OmegaConf.create({})\n        # merge custom configs if specified by the user\n        self.core.merge_custom(config)\n\n        # log verbosity\n        # actual log verbosity isn't set until .bake()\n        self.verbose = verbose\n        self.debug = debug\n        self.silent = silent\n\n        # custom module directories\n        self._module_dirs = set()\n        self.module_dirs = module_dirs\n\n        # target / whitelist / blacklist\n        # these are temporary receptacles until they all get .baked() together\n        self._seeds = set(targets if targets else [])\n        self._whitelist = set(whitelist) if whitelist else whitelist\n        self._blacklist = set(blacklist if blacklist else [])\n\n        self._target = None\n\n        # \"presets\" is alias to \"include\"\n        if presets and include:\n            raise ValueError(\n                'Cannot use both \"presets\" and \"include\" args at the same time (presets is an alias to include). Please pick one or the other :)'\n            )\n        if presets and not include:\n            include = presets\n        # include other presets\n        if include and not isinstance(include, (list, tuple, set)):\n            include = [include]\n        if include:\n            for included_preset in include:\n                self.include_preset(included_preset)\n\n        # we don't fill self.modules yet (that happens in .bake())\n        self.explicit_scan_modules.update(set(modules))\n        self.explicit_output_modules.update(set(output_modules))\n        self.exclude_modules.update(set(exclude_modules))\n        self.flags.update(set(flags))\n        self.exclude_flags.update(set(exclude_flags))\n        self.require_flags.update(set(require_flags))\n\n    @property\n    def bbot_home(self):\n        return Path(self.config.get(\"home\", \"~/.bbot\")).expanduser().resolve()\n\n    @property\n    def target(self):\n        if self._target is None:\n            raise ValueError(\"Cannot access target before preset is baked (use ._seeds instead)\")\n        return self._target\n\n    @property\n    def seeds(self):\n        if self._seeds is None:\n            raise ValueError(\"Cannot access target before preset is baked (use ._seeds instead)\")\n        return self.target.seeds\n\n    @property\n    def whitelist(self):\n        if self._target is None:\n            raise ValueError(\"Cannot access whitelist before preset is baked (use ._whitelist instead)\")\n        return self.target.whitelist\n\n    @property\n    def blacklist(self):\n        if self._target is None:\n            raise ValueError(\"Cannot access blacklist before preset is baked (use ._blacklist instead)\")\n        return self.target.blacklist\n\n    @property\n    def preset_dir(self):\n        return self.bbot_home / \"presets\"\n\n    @property\n    def default_output_modules(self):\n        if self._default_output_modules is not None:\n            output_modules = self._default_output_modules\n        else:\n            output_modules = [\"python\", \"csv\", \"txt\", \"json\"]\n            if self._cli:\n                output_modules.append(\"stdout\")\n        return output_modules\n\n    @property\n    def default_internal_modules(self):\n        preloaded_internal = self.module_loader.preloaded(type=\"internal\")\n        if self._default_internal_modules is not None:\n            internal_modules = self._default_internal_modules\n        else:\n            internal_modules = list(preloaded_internal)\n        return {k: preloaded_internal[k] for k in internal_modules}\n\n    def merge(self, other):\n        \"\"\"\n        Merge another preset into this one.\n\n        If there are any config conflicts, `other` will win over `self`.\n\n        Args:\n            other (Preset): The preset to merge into this one.\n\n        Examples:\n            &gt;&gt;&gt; preset1 = Preset(modules=[\"portscan\"])\n            &gt;&gt;&gt; preset1.scan_modules\n            ['portscan']\n            &gt;&gt;&gt; preset2 = Preset(modules=[\"sslcert\"])\n            &gt;&gt;&gt; preset2.scan_modules\n            ['sslcert']\n            &gt;&gt;&gt; preset1.merge(preset2)\n            &gt;&gt;&gt; preset1.scan_modules\n            ['portscan', 'sslcert']\n        \"\"\"\n        self.log_debug(f'Merging preset \"{other.name}\" into \"{self.name}\"')\n        # config\n        self.core.merge_custom(other.core.custom_config)\n        self.module_loader.core = self.core\n        # module dirs\n        # modules + flags\n        # establish requirements / exclusions first\n        self.exclude_modules.update(other.exclude_modules)\n        self.require_flags.update(other.require_flags)\n        self.exclude_flags.update(other.exclude_flags)\n        # then it's okay to start enabling modules\n        self.explicit_scan_modules.update(other.explicit_scan_modules)\n        self.explicit_output_modules.update(other.explicit_output_modules)\n        self.flags.update(other.flags)\n\n        # target / scope\n        self._seeds.update(other._seeds)\n        # leave whitelist as None until we encounter one\n        if other._whitelist is not None:\n            if self._whitelist is None:\n                self._whitelist = set(other._whitelist)\n            else:\n                self._whitelist.update(other._whitelist)\n        self._blacklist.update(other._blacklist)\n\n        # module dirs\n        self.module_dirs = self.module_dirs.union(other.module_dirs)\n\n        # log verbosity\n        if other.silent:\n            self.silent = other.silent\n        if other.verbose:\n            self.verbose = other.verbose\n        if other.debug:\n            self.debug = other.debug\n        # scan name\n        if other.scan_name is not None:\n            self.scan_name = other.scan_name\n        if other.output_dir is not None:\n            self.output_dir = other.output_dir\n        # conditions\n        if other.conditions:\n            self.conditions.extend(other.conditions)\n        # misc\n        self.force_start = self.force_start | other.force_start\n        self._cli = self._cli | other._cli\n        # transfer args\n        if other._args is not None:\n            self._args = other._args\n\n    def bake(self, scan=None):\n        \"\"\"\n        Return a \"baked\" copy of this preset, ready for use by a BBOT scan.\n\n        Baking a preset finalizes it by populating `preset.modules` based on flags,\n        performing final validations, and substituting environment variables in preloaded modules.\n        It also evaluates custom `conditions` as specified in the preset.\n\n        This function is automatically called in Scanner.__init__(). There is no need to call it manually.\n        \"\"\"\n        self.log_debug(\"Getting baked\")\n        # create a copy of self\n        baked_preset = copy(self)\n        baked_preset.scan = scan\n        # copy core\n        baked_preset.core = self.core.copy()\n        # copy module loader\n        baked_preset._module_loader = self.module_loader.copy()\n        # prepare os environment\n        os_environ = baked_preset.environ.prepare()\n        # find and replace preloaded modules with os environ\n        # this is different from the config variable substitution because it modifies\n        #  the preloaded modules, i.e. their ansible playbooks\n        baked_preset.module_loader.find_and_replace(**os_environ)\n        # update os environ\n        os.environ.clear()\n        os.environ.update(os_environ)\n\n        # validate flags, config options\n        baked_preset.validate()\n\n        # validate log level options\n        baked_preset.apply_log_level(apply_core=scan is not None)\n\n        # assign baked preset to our scan\n        if scan is not None:\n            scan.preset = baked_preset\n\n        # now that our requirements / exclusions are validated, we can start enabling modules\n        # enable scan modules\n        for module in baked_preset.explicit_scan_modules:\n            baked_preset.add_module(module, module_type=\"scan\")\n\n        # enable output modules\n        output_modules_to_enable = set(baked_preset.explicit_output_modules)\n        default_output_modules = self.default_output_modules\n        output_module_override = any(m in default_output_modules for m in output_modules_to_enable)\n        # if none of the default output modules have been explicitly specified, enable them all\n        if not output_module_override:\n            output_modules_to_enable.update(self.default_output_modules)\n        for module in output_modules_to_enable:\n            baked_preset.add_module(module, module_type=\"output\", raise_error=False)\n\n        # enable internal modules\n        for internal_module, preloaded in self.default_internal_modules.items():\n            is_enabled = baked_preset.config.get(internal_module, True)\n            is_excluded = internal_module in baked_preset.exclude_modules\n            if is_enabled and not is_excluded:\n                baked_preset.add_module(internal_module, module_type=\"internal\", raise_error=False)\n\n        # disable internal modules if requested\n        for internal_module in baked_preset.internal_modules:\n            if baked_preset.config.get(internal_module, True) == False:\n                baked_preset.exclude_modules.add(internal_module)\n\n        # enable modules by flag\n        for flag in baked_preset.flags:\n            for module, preloaded in baked_preset.module_loader.preloaded().items():\n                module_flags = preloaded.get(\"flags\", [])\n                module_type = preloaded.get(\"type\", \"scan\")\n                if flag in module_flags:\n                    self.log_debug(f'Enabling module \"{module}\" because it has flag \"{flag}\"')\n                    baked_preset.add_module(module, module_type, raise_error=False)\n\n        # ensure we have output modules\n        if not baked_preset.output_modules:\n            for output_module in self.default_output_modules:\n                baked_preset.add_module(output_module, module_type=\"output\", raise_error=False)\n\n        # create target object\n        from bbot.scanner.target import BBOTTarget\n\n        baked_preset._target = BBOTTarget(\n            *list(self._seeds),\n            whitelist=self._whitelist,\n            blacklist=self._blacklist,\n            strict_scope=self.strict_scope,\n            scan=scan,\n        )\n\n        # evaluate conditions\n        if baked_preset.conditions:\n            from .conditions import ConditionEvaluator\n\n            evaluator = ConditionEvaluator(baked_preset)\n            evaluator.evaluate()\n\n        self._baked = True\n        return baked_preset\n\n    def parse_args(self):\n        \"\"\"\n        Parse CLI arguments, and merge them into this preset.\n\n        Used in `cli.py`.\n        \"\"\"\n        self._cli = True\n        self.merge(self.args.preset_from_args())\n\n    @property\n    def module_dirs(self):\n        return self.module_loader.module_dirs\n\n    @module_dirs.setter\n    def module_dirs(self, module_dirs):\n        if module_dirs:\n            if isinstance(module_dirs, str):\n                module_dirs = [module_dirs]\n            for m in module_dirs:\n                self.module_loader.add_module_dir(m)\n                self._module_dirs.add(m)\n\n    @property\n    def scan_modules(self):\n        return [m for m in self.modules if self.preloaded_module(m).get(\"type\", \"scan\") == \"scan\"]\n\n    @property\n    def output_modules(self):\n        return [m for m in self.modules if self.preloaded_module(m).get(\"type\", \"scan\") == \"output\"]\n\n    @property\n    def internal_modules(self):\n        return [m for m in self.modules if self.preloaded_module(m).get(\"type\", \"scan\") == \"internal\"]\n\n    def add_module(self, module_name, module_type=\"scan\", raise_error=True):\n        self.log_debug(f'Adding module \"{module_name}\" of type \"{module_type}\"')\n        is_valid, reason, preloaded = self._is_valid_module(module_name, module_type, raise_error=raise_error)\n        if not is_valid:\n            self.log_debug(f'Unable to add {module_type} module \"{module_name}\": {reason}')\n            return\n        self.modules.add(module_name)\n        for module_dep in preloaded.get(\"deps\", {}).get(\"modules\", []):\n            if module_dep != module_name and module_dep not in self.modules:\n                self.log_verbose(f'Adding module \"{module_dep}\" because {module_name} depends on it')\n                self.add_module(module_dep, raise_error=False)\n\n    def preloaded_module(self, module):\n        return self.module_loader.preloaded()[module]\n\n    @property\n    def config(self):\n        return self.core.config\n\n    @property\n    def web_config(self):\n        return self.core.config.get(\"web\", {})\n\n    @property\n    def scope_config(self):\n        return self.config.get(\"scope\", {})\n\n    @property\n    def strict_scope(self):\n        return self.scope_config.get(\"strict\", False)\n\n    def apply_log_level(self, apply_core=False):\n        # silent takes precedence\n        if self.silent:\n            self.verbose = False\n            self.debug = False\n            if apply_core:\n                self.core.logger.log_level = \"CRITICAL\"\n                for key in (\"verbose\", \"debug\"):\n                    with suppress(omegaconf.errors.ConfigKeyError):\n                        del self.core.custom_config[key]\n        else:\n            # then debug\n            if self.debug:\n                self.verbose = False\n                if apply_core:\n                    self.core.logger.log_level = \"DEBUG\"\n                    with suppress(omegaconf.errors.ConfigKeyError):\n                        del self.core.custom_config[\"verbose\"]\n            else:\n                # finally verbose\n                if self.verbose and apply_core:\n                    self.core.logger.log_level = \"VERBOSE\"\n\n    @property\n    def helpers(self):\n        if self._helpers is None:\n            from bbot.core.helpers.helper import ConfigAwareHelper\n\n            self._helpers = ConfigAwareHelper(preset=self)\n        return self._helpers\n\n    @property\n    def module_loader(self):\n        self.environ\n        if self._module_loader is None:\n            from bbot.core.modules import MODULE_LOADER\n\n            self._module_loader = MODULE_LOADER\n            self._module_loader.ensure_config_files()\n\n        return self._module_loader\n\n    @property\n    def environ(self):\n        if self._environ is None:\n            from .environ import BBOTEnviron\n\n            self._environ = BBOTEnviron(self)\n        return self._environ\n\n    @property\n    def args(self):\n        if self._args is None:\n            from .args import BBOTArgs\n\n            self._args = BBOTArgs(self)\n        return self._args\n\n    def in_scope(self, host):\n        return self.target.in_scope(host)\n\n    def blacklisted(self, host):\n        return self.target.blacklisted(host)\n\n    def whitelisted(self, host):\n        return self.target.whitelisted(host)\n\n    @classmethod\n    def from_dict(cls, preset_dict, name=None, _exclude=None, _log=False):\n        \"\"\"\n        Create a preset from a Python dictionary object.\n\n        Args:\n            preset_dict (dict): Preset in dictionary form\n            name (str, optional): Name of preset\n            _exclude (list[Path], optional): Preset filenames to exclude from inclusion. Used internally to prevent infinite recursion in circular or self-referencing presets.\n            _log (bool, optional): Whether to enable logging for the preset. This will record which modules/flags are enabled, etc.\n\n        Returns:\n            Preset: The loaded preset\n\n        Examples:\n            &gt;&gt;&gt; preset = Preset.from_dict({\"target\": [\"evilcorp.com\"], \"modules\": [\"portscan\"]})\n        \"\"\"\n        new_preset = cls(\n            *preset_dict.get(\"target\", []),\n            whitelist=preset_dict.get(\"whitelist\"),\n            blacklist=preset_dict.get(\"blacklist\"),\n            modules=preset_dict.get(\"modules\"),\n            output_modules=preset_dict.get(\"output_modules\"),\n            exclude_modules=preset_dict.get(\"exclude_modules\"),\n            flags=preset_dict.get(\"flags\"),\n            require_flags=preset_dict.get(\"require_flags\"),\n            exclude_flags=preset_dict.get(\"exclude_flags\"),\n            verbose=preset_dict.get(\"verbose\", False),\n            debug=preset_dict.get(\"debug\", False),\n            silent=preset_dict.get(\"silent\", False),\n            config=preset_dict.get(\"config\"),\n            module_dirs=preset_dict.get(\"module_dirs\", []),\n            include=list(preset_dict.get(\"include\", [])),\n            scan_name=preset_dict.get(\"scan_name\"),\n            output_dir=preset_dict.get(\"output_dir\"),\n            name=preset_dict.get(\"name\", name),\n            description=preset_dict.get(\"description\"),\n            conditions=preset_dict.get(\"conditions\", []),\n            _exclude=_exclude,\n            _log=_log,\n        )\n        return new_preset\n\n    def include_preset(self, filename):\n        \"\"\"\n        Load a preset from a yaml file and merge it into this one.\n\n        If the full path is not specified, BBOT will look in all the usual places for it.\n\n        The file extension is optional.\n\n        Args:\n            filename (Path): The preset YAML file to merge\n\n        Examples:\n            &gt;&gt;&gt; preset.include_preset(\"/home/user/my_preset.yml\")\n        \"\"\"\n        self.log_debug(f'Including preset \"{filename}\"')\n        preset_filename = PRESET_PATH.find(filename)\n        preset_from_yaml = self.from_yaml_file(preset_filename, _exclude=self._preset_files_loaded)\n        if preset_from_yaml is not False:\n            self.merge(preset_from_yaml)\n            self._preset_files_loaded.add(preset_filename)\n\n    @classmethod\n    def from_yaml_file(cls, filename, _exclude=None, _log=False):\n        \"\"\"\n        Create a preset from a YAML file. If the full path is not specified, BBOT will look in all the usual places for it.\n\n        The file extension is optional.\n\n        Examples:\n            &gt;&gt;&gt; preset = Preset.from_yaml_file(\"/home/user/my_preset.yml\")\n        \"\"\"\n        filename = Path(filename).resolve()\n        try:\n            return _preset_cache[filename]\n        except KeyError:\n            if _exclude is None:\n                _exclude = set()\n            if _exclude is not None and filename in _exclude:\n                log.debug(f\"Not loading {filename} because it was already loaded {_exclude}\")\n                return False\n            log.debug(f\"Loading {filename} because it's not in excluded list ({_exclude})\")\n            _exclude = set(_exclude)\n            _exclude.add(filename)\n            try:\n                yaml_str = open(filename).read()\n            except FileNotFoundError:\n                raise PresetNotFoundError(f'Could not find preset at \"{filename}\" - file does not exist')\n            preset = cls.from_dict(\n                omegaconf.OmegaConf.create(yaml_str), name=filename.stem, _exclude=_exclude, _log=_log\n            )\n            preset._yaml_str = yaml_str\n            _preset_cache[filename] = preset\n            return preset\n\n    @classmethod\n    def from_yaml_string(cls, yaml_preset):\n        \"\"\"\n        Create a preset from a YAML file. If the full path is not specified, BBOT will look in all the usual places for it.\n\n        The file extension is optional.\n\n        Examples:\n            &gt;&gt;&gt; yaml_string = '''\n            &gt;&gt;&gt; target:\n            &gt;&gt;&gt; - evilcorp.com\n            &gt;&gt;&gt; modules:\n            &gt;&gt;&gt; - portscan'''\n            &gt;&gt;&gt; preset = Preset.from_yaml_string(yaml_string)\n        \"\"\"\n        return cls.from_dict(omegaconf.OmegaConf.create(yaml_preset))\n\n    def to_dict(self, include_target=False, full_config=False, redact_secrets=False):\n        \"\"\"\n        Convert this preset into a Python dictionary.\n\n        Args:\n            include_target (bool, optional): If True, include target, whitelist, and blacklist in the dictionary\n            full_config (bool, optional): If True, include the entire config, not just what's changed from the defaults.\n\n        Returns:\n            dict: The preset in dictionary form\n\n        Examples:\n            &gt;&gt;&gt; preset = Preset(flags=[\"subdomain-enum\"], modules=[\"portscan\"])\n            &gt;&gt;&gt; preset.to_dict()\n            {\"flags\": [\"subdomain-enum\"], \"modules\": [\"portscan\"]}\n        \"\"\"\n        preset_dict = {}\n\n        if self.description:\n            preset_dict[\"description\"] = self.description\n\n        # config\n        if full_config:\n            config = self.core.config\n        else:\n            config = self.core.custom_config\n        config = omegaconf.OmegaConf.to_object(config)\n        if redact_secrets:\n            config = self.core.no_secrets_config(config)\n        if config:\n            preset_dict[\"config\"] = config\n\n        # scope\n        if include_target:\n            target = sorted(self.target.seeds.inputs)\n            whitelist = []\n            if self.target.whitelist is not None:\n                whitelist = sorted(self.target.whitelist.inputs)\n            blacklist = sorted(self.target.blacklist.inputs)\n            if target:\n                preset_dict[\"target\"] = target\n            if whitelist and whitelist != target:\n                preset_dict[\"whitelist\"] = whitelist\n            if blacklist:\n                preset_dict[\"blacklist\"] = blacklist\n\n        # flags + modules\n        if self.require_flags:\n            preset_dict[\"require_flags\"] = sorted(self.require_flags)\n        if self.exclude_flags:\n            preset_dict[\"exclude_flags\"] = sorted(self.exclude_flags)\n        if self.exclude_modules:\n            preset_dict[\"exclude_modules\"] = sorted(self.exclude_modules)\n        if self.flags:\n            preset_dict[\"flags\"] = sorted(self.flags)\n        if self.explicit_scan_modules:\n            preset_dict[\"modules\"] = sorted(self.explicit_scan_modules)\n        if self.explicit_output_modules:\n            preset_dict[\"output_modules\"] = sorted(self.explicit_output_modules)\n\n        # log verbosity\n        if self.verbose:\n            preset_dict[\"verbose\"] = True\n        if self.debug:\n            preset_dict[\"debug\"] = True\n        if self.silent:\n            preset_dict[\"silent\"] = True\n\n        # misc scan options\n        if self.scan_name:\n            preset_dict[\"scan_name\"] = self.scan_name\n        if self.scan_name:\n            preset_dict[\"output_dir\"] = self.output_dir\n\n        # conditions\n        if self.conditions:\n            preset_dict[\"conditions\"] = [c[-1] for c in self.conditions]\n\n        return preset_dict\n\n    def to_yaml(self, include_target=False, full_config=False, sort_keys=False):\n        \"\"\"\n        Return the preset in the form of a YAML string.\n\n        Args:\n            include_target (bool, optional): If True, include target, whitelist, and blacklist in the dictionary\n            full_config (bool, optional): If True, include the entire config, not just what's changed from the defaults.\n            sort_keys (bool, optional): If True, sort YAML keys alphabetically\n\n        Returns:\n            str: The preset in the form of a YAML string\n\n        Examples:\n            &gt;&gt;&gt; preset = Preset(flags=[\"subdomain-enum\"], modules=[\"portscan\"])\n            &gt;&gt;&gt; print(preset.to_yaml())\n            flags:\n            - subdomain-enum\n            modules:\n            - portscan\n        \"\"\"\n        preset_dict = self.to_dict(include_target=include_target, full_config=full_config)\n        return yaml.dump(preset_dict, sort_keys=sort_keys)\n\n    def _is_valid_module(self, module, module_type, name_only=False, raise_error=True):\n        if module_type == \"scan\":\n            module_choices = self.module_loader.scan_module_choices\n        elif module_type == \"output\":\n            module_choices = self.module_loader.output_module_choices\n        elif module_type == \"internal\":\n            module_choices = self.module_loader.internal_module_choices\n        else:\n            raise ValidationError(f'Unknown module type \"{module}\"')\n\n        if not module in module_choices:\n            raise ValidationError(get_closest_match(module, module_choices, msg=f\"{module_type} module\"))\n\n        try:\n            preloaded = self.module_loader.preloaded()[module]\n        except KeyError:\n            raise ValidationError(f'Unknown module \"{module}\"')\n\n        if name_only:\n            return True, \"\", preloaded\n\n        if module in self.exclude_modules:\n            reason = \"the module has been excluded\"\n            return False, reason, {}\n\n        module_flags = preloaded.get(\"flags\", [])\n        _module_type = preloaded.get(\"type\", \"scan\")\n        if module_type:\n            if _module_type != module_type:\n                reason = f'its type ({_module_type}) is not \"{module_type}\"'\n                if raise_error:\n                    raise ValidationError(f'Unable to add {module_type} module \"{module}\" because {reason}')\n                return False, reason, preloaded\n\n        if _module_type == \"scan\":\n            if self.exclude_flags:\n                for f in module_flags:\n                    if f in self.exclude_flags:\n                        return False, f'it has excluded flag, \"{f}\"', preloaded\n            if self.require_flags and not all(f in module_flags for f in self.require_flags):\n                return False, f'it doesn\\'t have the required flags ({\",\".join(self.require_flags)})', preloaded\n\n        return True, \"\", preloaded\n\n    def validate(self):\n        \"\"\"\n        Validate module/flag exclusions/requirements, and CLI config options if applicable.\n        \"\"\"\n        if self._cli:\n            self.args.validate()\n\n        # validate excluded modules\n        for excluded_module in self.exclude_modules:\n            if not excluded_module in self.module_loader.all_module_choices:\n                raise ValidationError(\n                    get_closest_match(excluded_module, self.module_loader.all_module_choices, msg=\"module\")\n                )\n        # validate excluded flags\n        for excluded_flag in self.exclude_flags:\n            if not excluded_flag in self.module_loader.flag_choices:\n                raise ValidationError(get_closest_match(excluded_flag, self.module_loader.flag_choices, msg=\"flag\"))\n        # validate required flags\n        for required_flag in self.require_flags:\n            if not required_flag in self.module_loader.flag_choices:\n                raise ValidationError(get_closest_match(required_flag, self.module_loader.flag_choices, msg=\"flag\"))\n        # validate flags\n        for flag in self.flags:\n            if not flag in self.module_loader.flag_choices:\n                raise ValidationError(get_closest_match(flag, self.module_loader.flag_choices, msg=\"flag\"))\n\n    @property\n    def all_presets(self):\n        \"\"\"\n        Recursively find all the presets and return them as a dictionary\n        \"\"\"\n        preset_dir = self.preset_dir\n        home_dir = Path.home()\n\n        # first, add local preset dir to PRESET_PATH\n        PRESET_PATH.add_path(self.preset_dir)\n\n        # ensure local preset directory exists\n        mkdir(preset_dir)\n\n        global DEFAULT_PRESETS\n        if DEFAULT_PRESETS is None:\n            presets = dict()\n            for ext in (\"yml\", \"yaml\"):\n                for preset_path in PRESET_PATH:\n                    # for every yaml file\n                    for original_filename in preset_path.rglob(f\"**/*.{ext}\"):\n                        # not including symlinks\n                        if original_filename.is_symlink():\n                            continue\n\n                        # try to load it as a preset\n                        try:\n                            loaded_preset = self.from_yaml_file(original_filename, _log=True)\n                            if loaded_preset is False:\n                                continue\n                        except Exception as e:\n                            log.warning(f'Failed to load preset at \"{original_filename}\": {e}')\n                            log.trace(traceback.format_exc())\n                            continue\n\n                        # category is the parent folder(s), if any\n                        category = str(original_filename.relative_to(preset_path).parent)\n                        if category == \".\":\n                            category = \"\"\n\n                        local_preset = original_filename\n                        # populate symlinks in local preset dir\n                        if not original_filename.is_relative_to(preset_dir):\n                            relative_preset = original_filename.relative_to(preset_path)\n                            local_preset = preset_dir / relative_preset\n                            mkdir(local_preset.parent, check_writable=False)\n                            if not local_preset.exists():\n                                local_preset.symlink_to(original_filename)\n\n                        # collapse home directory into \"~\"\n                        if local_preset.is_relative_to(home_dir):\n                            local_preset = Path(\"~\") / local_preset.relative_to(home_dir)\n\n                        presets[local_preset] = (loaded_preset, category, preset_path, original_filename)\n\n            # sort by name\n            DEFAULT_PRESETS = dict(sorted(presets.items(), key=lambda x: x[-1][0].name))\n        return DEFAULT_PRESETS\n\n    def presets_table(self, include_modules=True):\n        \"\"\"\n        Return a table of all the presets in the form of a string\n        \"\"\"\n        table = []\n        header = [\"Preset\", \"Category\", \"Description\", \"# Modules\"]\n        if include_modules:\n            header.append(\"Modules\")\n        for yaml_file, (loaded_preset, category, preset_path, original_file) in self.all_presets.items():\n            loaded_preset = loaded_preset.bake()\n            num_modules = f\"{len(loaded_preset.scan_modules):,}\"\n            row = [loaded_preset.name, category, loaded_preset.description, num_modules]\n            if include_modules:\n                row.append(\", \".join(sorted(loaded_preset.scan_modules)))\n            table.append(row)\n        return make_table(table, header)\n\n    def log_verbose(self, msg):\n        if self._log:\n            log.verbose(f\"Preset {self.name}: {msg}\")\n\n    def log_debug(self, msg):\n        if self._log:\n            log.debug(f\"Preset {self.name}: {msg}\")\n</code></pre>"},{"location":"dev/presets/#bbot.scanner.Preset.all_presets","title":"all_presets  <code>property</code>","text":"<pre><code>all_presets\n</code></pre> <p>Recursively find all the presets and return them as a dictionary</p>"},{"location":"dev/presets/#bbot.scanner.Preset.__init__","title":"__init__","text":"<pre><code>__init__(*targets, whitelist=None, blacklist=None, modules=None, output_modules=None, exclude_modules=None, flags=None, require_flags=None, exclude_flags=None, config=None, module_dirs=None, include=None, presets=None, output_dir=None, scan_name=None, name=None, description=None, conditions=None, force_start=False, verbose=False, debug=False, silent=False, _exclude=None, _log=True)\n</code></pre> <p>Initializes the Preset class.</p> <p>Parameters:</p> <ul> <li> <code>*targets</code>               (<code>str</code>, default:                   <code>()</code> )           \u2013            <p>Target(s) to scan. Types supported: hostnames, IPs, CIDRs, emails, open ports.</p> </li> <li> <code>whitelist</code>               (<code>list</code>, default:                   <code>None</code> )           \u2013            <p>Whitelisted target(s) to scan. Defaults to the same as <code>targets</code>.</p> </li> <li> <code>blacklist</code>               (<code>list</code>, default:                   <code>None</code> )           \u2013            <p>Blacklisted target(s). Takes ultimate precedence. Defaults to empty.</p> </li> <li> <code>modules</code>               (<code>list[str]</code>, default:                   <code>None</code> )           \u2013            <p>List of scan modules to enable for the scan. Defaults to empty list.</p> </li> <li> <code>output_modules</code>               (<code>list[str]</code>, default:                   <code>None</code> )           \u2013            <p>List of output modules to use. Defaults to csv, human, and json.</p> </li> <li> <code>exclude_modules</code>               (<code>list[str]</code>, default:                   <code>None</code> )           \u2013            <p>List of modules to exclude from the scan.</p> </li> <li> <code>require_flags</code>               (<code>list[str]</code>, default:                   <code>None</code> )           \u2013            <p>Only enable modules if they have these flags.</p> </li> <li> <code>exclude_flags</code>               (<code>list[str]</code>, default:                   <code>None</code> )           \u2013            <p>Don't enable modules if they have any of these flags.</p> </li> <li> <code>module_dirs</code>               (<code>list[str]</code>, default:                   <code>None</code> )           \u2013            <p>additional directories to load modules from.</p> </li> <li> <code>config</code>               (<code>dict</code>, default:                   <code>None</code> )           \u2013            <p>Additional scan configuration settings.</p> </li> <li> <code>include</code>               (<code>list[str]</code>, default:                   <code>None</code> )           \u2013            <p>names or filenames of other presets to include.</p> </li> <li> <code>presets</code>               (<code>list[str]</code>, default:                   <code>None</code> )           \u2013            <p>an alias for <code>include</code>.</p> </li> <li> <code>output_dir</code>               (<code>str or Path</code>, default:                   <code>None</code> )           \u2013            <p>Directory to store scan output. Defaults to BBOT home directory (<code>~/.bbot</code>).</p> </li> <li> <code>scan_name</code>               (<code>str</code>, default:                   <code>None</code> )           \u2013            <p>Human-readable name of the scan. If not specified, it will be random, e.g. \"demonic_jimmy\".</p> </li> <li> <code>name</code>               (<code>str</code>, default:                   <code>None</code> )           \u2013            <p>Human-readable name of the preset. Used mainly for logging.</p> </li> <li> <code>description</code>               (<code>str</code>, default:                   <code>None</code> )           \u2013            <p>Description of the preset.</p> </li> <li> <code>conditions</code>               (<code>list[str]</code>, default:                   <code>None</code> )           \u2013            <p>Custom conditions to be executed before scan start. Written in Jinja2.</p> </li> <li> <code>force_start</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>If True, ignore conditional aborts and failed module setups. Just run the scan!</p> </li> <li> <code>verbose</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Set the BBOT logger to verbose mode.</p> </li> <li> <code>debug</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Set the BBOT logger to debug mode.</p> </li> <li> <code>silent</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Silence all stderr (effectively disables the BBOT logger).</p> </li> <li> <code>_exclude</code>               (<code>list[Path]</code>, default:                   <code>None</code> )           \u2013            <p>Preset filenames to exclude from inclusion. Used internally to prevent infinite recursion in circular or self-referencing presets.</p> </li> <li> <code>_log</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to enable logging for the preset. This will record which modules/flags are enabled, etc.</p> </li> </ul> Source code in <code>bbot/scanner/preset/preset.py</code> <pre><code>def __init__(\n    self,\n    *targets,\n    whitelist=None,\n    blacklist=None,\n    modules=None,\n    output_modules=None,\n    exclude_modules=None,\n    flags=None,\n    require_flags=None,\n    exclude_flags=None,\n    config=None,\n    module_dirs=None,\n    include=None,\n    presets=None,\n    output_dir=None,\n    scan_name=None,\n    name=None,\n    description=None,\n    conditions=None,\n    force_start=False,\n    verbose=False,\n    debug=False,\n    silent=False,\n    _exclude=None,\n    _log=True,\n):\n    \"\"\"\n    Initializes the Preset class.\n\n    Args:\n        *targets (str): Target(s) to scan. Types supported: hostnames, IPs, CIDRs, emails, open ports.\n        whitelist (list, optional): Whitelisted target(s) to scan. Defaults to the same as `targets`.\n        blacklist (list, optional): Blacklisted target(s). Takes ultimate precedence. Defaults to empty.\n        modules (list[str], optional): List of scan modules to enable for the scan. Defaults to empty list.\n        output_modules (list[str], optional): List of output modules to use. Defaults to csv, human, and json.\n        exclude_modules (list[str], optional): List of modules to exclude from the scan.\n        require_flags (list[str], optional): Only enable modules if they have these flags.\n        exclude_flags (list[str], optional): Don't enable modules if they have any of these flags.\n        module_dirs (list[str], optional): additional directories to load modules from.\n        config (dict, optional): Additional scan configuration settings.\n        include (list[str], optional): names or filenames of other presets to include.\n        presets (list[str], optional): an alias for `include`.\n        output_dir (str or Path, optional): Directory to store scan output. Defaults to BBOT home directory (`~/.bbot`).\n        scan_name (str, optional): Human-readable name of the scan. If not specified, it will be random, e.g. \"demonic_jimmy\".\n        name (str, optional): Human-readable name of the preset. Used mainly for logging.\n        description (str, optional): Description of the preset.\n        conditions (list[str], optional): Custom conditions to be executed before scan start. Written in Jinja2.\n        force_start (bool, optional): If True, ignore conditional aborts and failed module setups. Just run the scan!\n        verbose (bool, optional): Set the BBOT logger to verbose mode.\n        debug (bool, optional): Set the BBOT logger to debug mode.\n        silent (bool, optional): Silence all stderr (effectively disables the BBOT logger).\n        _exclude (list[Path], optional): Preset filenames to exclude from inclusion. Used internally to prevent infinite recursion in circular or self-referencing presets.\n        _log (bool, optional): Whether to enable logging for the preset. This will record which modules/flags are enabled, etc.\n    \"\"\"\n    # internal variables\n    self._cli = False\n    self._log = _log\n    self.scan = None\n    self._args = None\n    self._environ = None\n    self._helpers = None\n    self._module_loader = None\n    self._yaml_str = \"\"\n    self._baked = False\n\n    self._default_output_modules = None\n    self._default_internal_modules = None\n\n    # modules / flags\n    self.modules = set()\n    self.exclude_modules = set()\n    self.flags = set()\n    self.exclude_flags = set()\n    self.require_flags = set()\n\n    # modules + flags\n    if modules is None:\n        modules = []\n    if isinstance(modules, str):\n        modules = [modules]\n    if output_modules is None:\n        output_modules = []\n    if isinstance(output_modules, str):\n        output_modules = [output_modules]\n    if exclude_modules is None:\n        exclude_modules = []\n    if isinstance(exclude_modules, str):\n        exclude_modules = [exclude_modules]\n    if flags is None:\n        flags = []\n    if isinstance(flags, str):\n        flags = [flags]\n    if exclude_flags is None:\n        exclude_flags = []\n    if isinstance(exclude_flags, str):\n        exclude_flags = [exclude_flags]\n    if require_flags is None:\n        require_flags = []\n    if isinstance(require_flags, str):\n        require_flags = [require_flags]\n\n    # these are used only for preserving the modules as specified in the original preset\n    # this is to ensure the preset looks the same when reserialized\n    self.explicit_scan_modules = set() if modules is None else set(modules)\n    self.explicit_output_modules = set() if output_modules is None else set(output_modules)\n\n    # whether to force-start the scan (ignoring conditional aborts and failed module setups)\n    self.force_start = force_start\n\n    # scan output directory\n    self.output_dir = output_dir\n    # name of scan\n    self.scan_name = scan_name\n\n    # name of preset, default blank\n    self.name = name or \"\"\n    # preset description, default blank\n    self.description = description or \"\"\n\n    # custom conditions, evaluated during .bake()\n    self.conditions = []\n    if conditions is not None:\n        for condition in conditions:\n            self.conditions.append((self.name, condition))\n\n    # keeps track of loaded preset files to prevent infinite circular inclusions\n    self._preset_files_loaded = set()\n    if _exclude is not None:\n        for _filename in _exclude:\n            self._preset_files_loaded.add(Path(_filename).resolve())\n\n    # bbot core config\n    self.core = CORE.copy()\n    if config is None:\n        config = omegaconf.OmegaConf.create({})\n    # merge custom configs if specified by the user\n    self.core.merge_custom(config)\n\n    # log verbosity\n    # actual log verbosity isn't set until .bake()\n    self.verbose = verbose\n    self.debug = debug\n    self.silent = silent\n\n    # custom module directories\n    self._module_dirs = set()\n    self.module_dirs = module_dirs\n\n    # target / whitelist / blacklist\n    # these are temporary receptacles until they all get .baked() together\n    self._seeds = set(targets if targets else [])\n    self._whitelist = set(whitelist) if whitelist else whitelist\n    self._blacklist = set(blacklist if blacklist else [])\n\n    self._target = None\n\n    # \"presets\" is alias to \"include\"\n    if presets and include:\n        raise ValueError(\n            'Cannot use both \"presets\" and \"include\" args at the same time (presets is an alias to include). Please pick one or the other :)'\n        )\n    if presets and not include:\n        include = presets\n    # include other presets\n    if include and not isinstance(include, (list, tuple, set)):\n        include = [include]\n    if include:\n        for included_preset in include:\n            self.include_preset(included_preset)\n\n    # we don't fill self.modules yet (that happens in .bake())\n    self.explicit_scan_modules.update(set(modules))\n    self.explicit_output_modules.update(set(output_modules))\n    self.exclude_modules.update(set(exclude_modules))\n    self.flags.update(set(flags))\n    self.exclude_flags.update(set(exclude_flags))\n    self.require_flags.update(set(require_flags))\n</code></pre>"},{"location":"dev/presets/#bbot.scanner.Preset.bake","title":"bake","text":"<pre><code>bake(scan=None)\n</code></pre> <p>Return a \"baked\" copy of this preset, ready for use by a BBOT scan.</p> <p>Baking a preset finalizes it by populating <code>preset.modules</code> based on flags, performing final validations, and substituting environment variables in preloaded modules. It also evaluates custom <code>conditions</code> as specified in the preset.</p> <p>This function is automatically called in Scanner.init(). There is no need to call it manually.</p> Source code in <code>bbot/scanner/preset/preset.py</code> <pre><code>def bake(self, scan=None):\n    \"\"\"\n    Return a \"baked\" copy of this preset, ready for use by a BBOT scan.\n\n    Baking a preset finalizes it by populating `preset.modules` based on flags,\n    performing final validations, and substituting environment variables in preloaded modules.\n    It also evaluates custom `conditions` as specified in the preset.\n\n    This function is automatically called in Scanner.__init__(). There is no need to call it manually.\n    \"\"\"\n    self.log_debug(\"Getting baked\")\n    # create a copy of self\n    baked_preset = copy(self)\n    baked_preset.scan = scan\n    # copy core\n    baked_preset.core = self.core.copy()\n    # copy module loader\n    baked_preset._module_loader = self.module_loader.copy()\n    # prepare os environment\n    os_environ = baked_preset.environ.prepare()\n    # find and replace preloaded modules with os environ\n    # this is different from the config variable substitution because it modifies\n    #  the preloaded modules, i.e. their ansible playbooks\n    baked_preset.module_loader.find_and_replace(**os_environ)\n    # update os environ\n    os.environ.clear()\n    os.environ.update(os_environ)\n\n    # validate flags, config options\n    baked_preset.validate()\n\n    # validate log level options\n    baked_preset.apply_log_level(apply_core=scan is not None)\n\n    # assign baked preset to our scan\n    if scan is not None:\n        scan.preset = baked_preset\n\n    # now that our requirements / exclusions are validated, we can start enabling modules\n    # enable scan modules\n    for module in baked_preset.explicit_scan_modules:\n        baked_preset.add_module(module, module_type=\"scan\")\n\n    # enable output modules\n    output_modules_to_enable = set(baked_preset.explicit_output_modules)\n    default_output_modules = self.default_output_modules\n    output_module_override = any(m in default_output_modules for m in output_modules_to_enable)\n    # if none of the default output modules have been explicitly specified, enable them all\n    if not output_module_override:\n        output_modules_to_enable.update(self.default_output_modules)\n    for module in output_modules_to_enable:\n        baked_preset.add_module(module, module_type=\"output\", raise_error=False)\n\n    # enable internal modules\n    for internal_module, preloaded in self.default_internal_modules.items():\n        is_enabled = baked_preset.config.get(internal_module, True)\n        is_excluded = internal_module in baked_preset.exclude_modules\n        if is_enabled and not is_excluded:\n            baked_preset.add_module(internal_module, module_type=\"internal\", raise_error=False)\n\n    # disable internal modules if requested\n    for internal_module in baked_preset.internal_modules:\n        if baked_preset.config.get(internal_module, True) == False:\n            baked_preset.exclude_modules.add(internal_module)\n\n    # enable modules by flag\n    for flag in baked_preset.flags:\n        for module, preloaded in baked_preset.module_loader.preloaded().items():\n            module_flags = preloaded.get(\"flags\", [])\n            module_type = preloaded.get(\"type\", \"scan\")\n            if flag in module_flags:\n                self.log_debug(f'Enabling module \"{module}\" because it has flag \"{flag}\"')\n                baked_preset.add_module(module, module_type, raise_error=False)\n\n    # ensure we have output modules\n    if not baked_preset.output_modules:\n        for output_module in self.default_output_modules:\n            baked_preset.add_module(output_module, module_type=\"output\", raise_error=False)\n\n    # create target object\n    from bbot.scanner.target import BBOTTarget\n\n    baked_preset._target = BBOTTarget(\n        *list(self._seeds),\n        whitelist=self._whitelist,\n        blacklist=self._blacklist,\n        strict_scope=self.strict_scope,\n        scan=scan,\n    )\n\n    # evaluate conditions\n    if baked_preset.conditions:\n        from .conditions import ConditionEvaluator\n\n        evaluator = ConditionEvaluator(baked_preset)\n        evaluator.evaluate()\n\n    self._baked = True\n    return baked_preset\n</code></pre>"},{"location":"dev/presets/#bbot.scanner.Preset.from_dict","title":"from_dict  <code>classmethod</code>","text":"<pre><code>from_dict(preset_dict, name=None, _exclude=None, _log=False)\n</code></pre> <p>Create a preset from a Python dictionary object.</p> <p>Parameters:</p> <ul> <li> <code>preset_dict</code>               (<code>dict</code>)           \u2013            <p>Preset in dictionary form</p> </li> <li> <code>name</code>               (<code>str</code>, default:                   <code>None</code> )           \u2013            <p>Name of preset</p> </li> <li> <code>_exclude</code>               (<code>list[Path]</code>, default:                   <code>None</code> )           \u2013            <p>Preset filenames to exclude from inclusion. Used internally to prevent infinite recursion in circular or self-referencing presets.</p> </li> <li> <code>_log</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to enable logging for the preset. This will record which modules/flags are enabled, etc.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>Preset</code>          \u2013            <p>The loaded preset</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; preset = Preset.from_dict({\"target\": [\"evilcorp.com\"], \"modules\": [\"portscan\"]})\n</code></pre> Source code in <code>bbot/scanner/preset/preset.py</code> <pre><code>@classmethod\ndef from_dict(cls, preset_dict, name=None, _exclude=None, _log=False):\n    \"\"\"\n    Create a preset from a Python dictionary object.\n\n    Args:\n        preset_dict (dict): Preset in dictionary form\n        name (str, optional): Name of preset\n        _exclude (list[Path], optional): Preset filenames to exclude from inclusion. Used internally to prevent infinite recursion in circular or self-referencing presets.\n        _log (bool, optional): Whether to enable logging for the preset. This will record which modules/flags are enabled, etc.\n\n    Returns:\n        Preset: The loaded preset\n\n    Examples:\n        &gt;&gt;&gt; preset = Preset.from_dict({\"target\": [\"evilcorp.com\"], \"modules\": [\"portscan\"]})\n    \"\"\"\n    new_preset = cls(\n        *preset_dict.get(\"target\", []),\n        whitelist=preset_dict.get(\"whitelist\"),\n        blacklist=preset_dict.get(\"blacklist\"),\n        modules=preset_dict.get(\"modules\"),\n        output_modules=preset_dict.get(\"output_modules\"),\n        exclude_modules=preset_dict.get(\"exclude_modules\"),\n        flags=preset_dict.get(\"flags\"),\n        require_flags=preset_dict.get(\"require_flags\"),\n        exclude_flags=preset_dict.get(\"exclude_flags\"),\n        verbose=preset_dict.get(\"verbose\", False),\n        debug=preset_dict.get(\"debug\", False),\n        silent=preset_dict.get(\"silent\", False),\n        config=preset_dict.get(\"config\"),\n        module_dirs=preset_dict.get(\"module_dirs\", []),\n        include=list(preset_dict.get(\"include\", [])),\n        scan_name=preset_dict.get(\"scan_name\"),\n        output_dir=preset_dict.get(\"output_dir\"),\n        name=preset_dict.get(\"name\", name),\n        description=preset_dict.get(\"description\"),\n        conditions=preset_dict.get(\"conditions\", []),\n        _exclude=_exclude,\n        _log=_log,\n    )\n    return new_preset\n</code></pre>"},{"location":"dev/presets/#bbot.scanner.Preset.from_yaml_file","title":"from_yaml_file  <code>classmethod</code>","text":"<pre><code>from_yaml_file(filename, _exclude=None, _log=False)\n</code></pre> <p>Create a preset from a YAML file. If the full path is not specified, BBOT will look in all the usual places for it.</p> <p>The file extension is optional.</p> <p>Examples:</p> <pre><code>&gt;&gt;&gt; preset = Preset.from_yaml_file(\"/home/user/my_preset.yml\")\n</code></pre> Source code in <code>bbot/scanner/preset/preset.py</code> <pre><code>@classmethod\ndef from_yaml_file(cls, filename, _exclude=None, _log=False):\n    \"\"\"\n    Create a preset from a YAML file. If the full path is not specified, BBOT will look in all the usual places for it.\n\n    The file extension is optional.\n\n    Examples:\n        &gt;&gt;&gt; preset = Preset.from_yaml_file(\"/home/user/my_preset.yml\")\n    \"\"\"\n    filename = Path(filename).resolve()\n    try:\n        return _preset_cache[filename]\n    except KeyError:\n        if _exclude is None:\n            _exclude = set()\n        if _exclude is not None and filename in _exclude:\n            log.debug(f\"Not loading {filename} because it was already loaded {_exclude}\")\n            return False\n        log.debug(f\"Loading {filename} because it's not in excluded list ({_exclude})\")\n        _exclude = set(_exclude)\n        _exclude.add(filename)\n        try:\n            yaml_str = open(filename).read()\n        except FileNotFoundError:\n            raise PresetNotFoundError(f'Could not find preset at \"{filename}\" - file does not exist')\n        preset = cls.from_dict(\n            omegaconf.OmegaConf.create(yaml_str), name=filename.stem, _exclude=_exclude, _log=_log\n        )\n        preset._yaml_str = yaml_str\n        _preset_cache[filename] = preset\n        return preset\n</code></pre>"},{"location":"dev/presets/#bbot.scanner.Preset.from_yaml_string","title":"from_yaml_string  <code>classmethod</code>","text":"<pre><code>from_yaml_string(yaml_preset)\n</code></pre> <p>Create a preset from a YAML file. If the full path is not specified, BBOT will look in all the usual places for it.</p> <p>The file extension is optional.</p> <p>Examples:</p> <pre><code>&gt;&gt;&gt; yaml_string = '''\n&gt;&gt;&gt; target:\n&gt;&gt;&gt; - evilcorp.com\n&gt;&gt;&gt; modules:\n&gt;&gt;&gt; - portscan'''\n&gt;&gt;&gt; preset = Preset.from_yaml_string(yaml_string)\n</code></pre> Source code in <code>bbot/scanner/preset/preset.py</code> <pre><code>@classmethod\ndef from_yaml_string(cls, yaml_preset):\n    \"\"\"\n    Create a preset from a YAML file. If the full path is not specified, BBOT will look in all the usual places for it.\n\n    The file extension is optional.\n\n    Examples:\n        &gt;&gt;&gt; yaml_string = '''\n        &gt;&gt;&gt; target:\n        &gt;&gt;&gt; - evilcorp.com\n        &gt;&gt;&gt; modules:\n        &gt;&gt;&gt; - portscan'''\n        &gt;&gt;&gt; preset = Preset.from_yaml_string(yaml_string)\n    \"\"\"\n    return cls.from_dict(omegaconf.OmegaConf.create(yaml_preset))\n</code></pre>"},{"location":"dev/presets/#bbot.scanner.Preset.include_preset","title":"include_preset","text":"<pre><code>include_preset(filename)\n</code></pre> <p>Load a preset from a yaml file and merge it into this one.</p> <p>If the full path is not specified, BBOT will look in all the usual places for it.</p> <p>The file extension is optional.</p> <p>Parameters:</p> <ul> <li> <code>filename</code>               (<code>Path</code>)           \u2013            <p>The preset YAML file to merge</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; preset.include_preset(\"/home/user/my_preset.yml\")\n</code></pre> Source code in <code>bbot/scanner/preset/preset.py</code> <pre><code>def include_preset(self, filename):\n    \"\"\"\n    Load a preset from a yaml file and merge it into this one.\n\n    If the full path is not specified, BBOT will look in all the usual places for it.\n\n    The file extension is optional.\n\n    Args:\n        filename (Path): The preset YAML file to merge\n\n    Examples:\n        &gt;&gt;&gt; preset.include_preset(\"/home/user/my_preset.yml\")\n    \"\"\"\n    self.log_debug(f'Including preset \"{filename}\"')\n    preset_filename = PRESET_PATH.find(filename)\n    preset_from_yaml = self.from_yaml_file(preset_filename, _exclude=self._preset_files_loaded)\n    if preset_from_yaml is not False:\n        self.merge(preset_from_yaml)\n        self._preset_files_loaded.add(preset_filename)\n</code></pre>"},{"location":"dev/presets/#bbot.scanner.Preset.merge","title":"merge","text":"<pre><code>merge(other)\n</code></pre> <p>Merge another preset into this one.</p> <p>If there are any config conflicts, <code>other</code> will win over <code>self</code>.</p> <p>Parameters:</p> <ul> <li> <code>other</code>               (<code>Preset</code>)           \u2013            <p>The preset to merge into this one.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; preset1 = Preset(modules=[\"portscan\"])\n&gt;&gt;&gt; preset1.scan_modules\n['portscan']\n&gt;&gt;&gt; preset2 = Preset(modules=[\"sslcert\"])\n&gt;&gt;&gt; preset2.scan_modules\n['sslcert']\n&gt;&gt;&gt; preset1.merge(preset2)\n&gt;&gt;&gt; preset1.scan_modules\n['portscan', 'sslcert']\n</code></pre> Source code in <code>bbot/scanner/preset/preset.py</code> <pre><code>def merge(self, other):\n    \"\"\"\n    Merge another preset into this one.\n\n    If there are any config conflicts, `other` will win over `self`.\n\n    Args:\n        other (Preset): The preset to merge into this one.\n\n    Examples:\n        &gt;&gt;&gt; preset1 = Preset(modules=[\"portscan\"])\n        &gt;&gt;&gt; preset1.scan_modules\n        ['portscan']\n        &gt;&gt;&gt; preset2 = Preset(modules=[\"sslcert\"])\n        &gt;&gt;&gt; preset2.scan_modules\n        ['sslcert']\n        &gt;&gt;&gt; preset1.merge(preset2)\n        &gt;&gt;&gt; preset1.scan_modules\n        ['portscan', 'sslcert']\n    \"\"\"\n    self.log_debug(f'Merging preset \"{other.name}\" into \"{self.name}\"')\n    # config\n    self.core.merge_custom(other.core.custom_config)\n    self.module_loader.core = self.core\n    # module dirs\n    # modules + flags\n    # establish requirements / exclusions first\n    self.exclude_modules.update(other.exclude_modules)\n    self.require_flags.update(other.require_flags)\n    self.exclude_flags.update(other.exclude_flags)\n    # then it's okay to start enabling modules\n    self.explicit_scan_modules.update(other.explicit_scan_modules)\n    self.explicit_output_modules.update(other.explicit_output_modules)\n    self.flags.update(other.flags)\n\n    # target / scope\n    self._seeds.update(other._seeds)\n    # leave whitelist as None until we encounter one\n    if other._whitelist is not None:\n        if self._whitelist is None:\n            self._whitelist = set(other._whitelist)\n        else:\n            self._whitelist.update(other._whitelist)\n    self._blacklist.update(other._blacklist)\n\n    # module dirs\n    self.module_dirs = self.module_dirs.union(other.module_dirs)\n\n    # log verbosity\n    if other.silent:\n        self.silent = other.silent\n    if other.verbose:\n        self.verbose = other.verbose\n    if other.debug:\n        self.debug = other.debug\n    # scan name\n    if other.scan_name is not None:\n        self.scan_name = other.scan_name\n    if other.output_dir is not None:\n        self.output_dir = other.output_dir\n    # conditions\n    if other.conditions:\n        self.conditions.extend(other.conditions)\n    # misc\n    self.force_start = self.force_start | other.force_start\n    self._cli = self._cli | other._cli\n    # transfer args\n    if other._args is not None:\n        self._args = other._args\n</code></pre>"},{"location":"dev/presets/#bbot.scanner.Preset.parse_args","title":"parse_args","text":"<pre><code>parse_args()\n</code></pre> <p>Parse CLI arguments, and merge them into this preset.</p> <p>Used in <code>cli.py</code>.</p> Source code in <code>bbot/scanner/preset/preset.py</code> <pre><code>def parse_args(self):\n    \"\"\"\n    Parse CLI arguments, and merge them into this preset.\n\n    Used in `cli.py`.\n    \"\"\"\n    self._cli = True\n    self.merge(self.args.preset_from_args())\n</code></pre>"},{"location":"dev/presets/#bbot.scanner.Preset.presets_table","title":"presets_table","text":"<pre><code>presets_table(include_modules=True)\n</code></pre> <p>Return a table of all the presets in the form of a string</p> Source code in <code>bbot/scanner/preset/preset.py</code> <pre><code>def presets_table(self, include_modules=True):\n    \"\"\"\n    Return a table of all the presets in the form of a string\n    \"\"\"\n    table = []\n    header = [\"Preset\", \"Category\", \"Description\", \"# Modules\"]\n    if include_modules:\n        header.append(\"Modules\")\n    for yaml_file, (loaded_preset, category, preset_path, original_file) in self.all_presets.items():\n        loaded_preset = loaded_preset.bake()\n        num_modules = f\"{len(loaded_preset.scan_modules):,}\"\n        row = [loaded_preset.name, category, loaded_preset.description, num_modules]\n        if include_modules:\n            row.append(\", \".join(sorted(loaded_preset.scan_modules)))\n        table.append(row)\n    return make_table(table, header)\n</code></pre>"},{"location":"dev/presets/#bbot.scanner.Preset.to_dict","title":"to_dict","text":"<pre><code>to_dict(include_target=False, full_config=False, redact_secrets=False)\n</code></pre> <p>Convert this preset into a Python dictionary.</p> <p>Parameters:</p> <ul> <li> <code>include_target</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>If True, include target, whitelist, and blacklist in the dictionary</p> </li> <li> <code>full_config</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>If True, include the entire config, not just what's changed from the defaults.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>dict</code>          \u2013            <p>The preset in dictionary form</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; preset = Preset(flags=[\"subdomain-enum\"], modules=[\"portscan\"])\n&gt;&gt;&gt; preset.to_dict()\n{\"flags\": [\"subdomain-enum\"], \"modules\": [\"portscan\"]}\n</code></pre> Source code in <code>bbot/scanner/preset/preset.py</code> <pre><code>def to_dict(self, include_target=False, full_config=False, redact_secrets=False):\n    \"\"\"\n    Convert this preset into a Python dictionary.\n\n    Args:\n        include_target (bool, optional): If True, include target, whitelist, and blacklist in the dictionary\n        full_config (bool, optional): If True, include the entire config, not just what's changed from the defaults.\n\n    Returns:\n        dict: The preset in dictionary form\n\n    Examples:\n        &gt;&gt;&gt; preset = Preset(flags=[\"subdomain-enum\"], modules=[\"portscan\"])\n        &gt;&gt;&gt; preset.to_dict()\n        {\"flags\": [\"subdomain-enum\"], \"modules\": [\"portscan\"]}\n    \"\"\"\n    preset_dict = {}\n\n    if self.description:\n        preset_dict[\"description\"] = self.description\n\n    # config\n    if full_config:\n        config = self.core.config\n    else:\n        config = self.core.custom_config\n    config = omegaconf.OmegaConf.to_object(config)\n    if redact_secrets:\n        config = self.core.no_secrets_config(config)\n    if config:\n        preset_dict[\"config\"] = config\n\n    # scope\n    if include_target:\n        target = sorted(self.target.seeds.inputs)\n        whitelist = []\n        if self.target.whitelist is not None:\n            whitelist = sorted(self.target.whitelist.inputs)\n        blacklist = sorted(self.target.blacklist.inputs)\n        if target:\n            preset_dict[\"target\"] = target\n        if whitelist and whitelist != target:\n            preset_dict[\"whitelist\"] = whitelist\n        if blacklist:\n            preset_dict[\"blacklist\"] = blacklist\n\n    # flags + modules\n    if self.require_flags:\n        preset_dict[\"require_flags\"] = sorted(self.require_flags)\n    if self.exclude_flags:\n        preset_dict[\"exclude_flags\"] = sorted(self.exclude_flags)\n    if self.exclude_modules:\n        preset_dict[\"exclude_modules\"] = sorted(self.exclude_modules)\n    if self.flags:\n        preset_dict[\"flags\"] = sorted(self.flags)\n    if self.explicit_scan_modules:\n        preset_dict[\"modules\"] = sorted(self.explicit_scan_modules)\n    if self.explicit_output_modules:\n        preset_dict[\"output_modules\"] = sorted(self.explicit_output_modules)\n\n    # log verbosity\n    if self.verbose:\n        preset_dict[\"verbose\"] = True\n    if self.debug:\n        preset_dict[\"debug\"] = True\n    if self.silent:\n        preset_dict[\"silent\"] = True\n\n    # misc scan options\n    if self.scan_name:\n        preset_dict[\"scan_name\"] = self.scan_name\n    if self.scan_name:\n        preset_dict[\"output_dir\"] = self.output_dir\n\n    # conditions\n    if self.conditions:\n        preset_dict[\"conditions\"] = [c[-1] for c in self.conditions]\n\n    return preset_dict\n</code></pre>"},{"location":"dev/presets/#bbot.scanner.Preset.to_yaml","title":"to_yaml","text":"<pre><code>to_yaml(include_target=False, full_config=False, sort_keys=False)\n</code></pre> <p>Return the preset in the form of a YAML string.</p> <p>Parameters:</p> <ul> <li> <code>include_target</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>If True, include target, whitelist, and blacklist in the dictionary</p> </li> <li> <code>full_config</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>If True, include the entire config, not just what's changed from the defaults.</p> </li> <li> <code>sort_keys</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>If True, sort YAML keys alphabetically</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>The preset in the form of a YAML string</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; preset = Preset(flags=[\"subdomain-enum\"], modules=[\"portscan\"])\n&gt;&gt;&gt; print(preset.to_yaml())\nflags:\n- subdomain-enum\nmodules:\n- portscan\n</code></pre> Source code in <code>bbot/scanner/preset/preset.py</code> <pre><code>def to_yaml(self, include_target=False, full_config=False, sort_keys=False):\n    \"\"\"\n    Return the preset in the form of a YAML string.\n\n    Args:\n        include_target (bool, optional): If True, include target, whitelist, and blacklist in the dictionary\n        full_config (bool, optional): If True, include the entire config, not just what's changed from the defaults.\n        sort_keys (bool, optional): If True, sort YAML keys alphabetically\n\n    Returns:\n        str: The preset in the form of a YAML string\n\n    Examples:\n        &gt;&gt;&gt; preset = Preset(flags=[\"subdomain-enum\"], modules=[\"portscan\"])\n        &gt;&gt;&gt; print(preset.to_yaml())\n        flags:\n        - subdomain-enum\n        modules:\n        - portscan\n    \"\"\"\n    preset_dict = self.to_dict(include_target=include_target, full_config=full_config)\n    return yaml.dump(preset_dict, sort_keys=sort_keys)\n</code></pre>"},{"location":"dev/presets/#bbot.scanner.Preset.validate","title":"validate","text":"<pre><code>validate()\n</code></pre> <p>Validate module/flag exclusions/requirements, and CLI config options if applicable.</p> Source code in <code>bbot/scanner/preset/preset.py</code> <pre><code>def validate(self):\n    \"\"\"\n    Validate module/flag exclusions/requirements, and CLI config options if applicable.\n    \"\"\"\n    if self._cli:\n        self.args.validate()\n\n    # validate excluded modules\n    for excluded_module in self.exclude_modules:\n        if not excluded_module in self.module_loader.all_module_choices:\n            raise ValidationError(\n                get_closest_match(excluded_module, self.module_loader.all_module_choices, msg=\"module\")\n            )\n    # validate excluded flags\n    for excluded_flag in self.exclude_flags:\n        if not excluded_flag in self.module_loader.flag_choices:\n            raise ValidationError(get_closest_match(excluded_flag, self.module_loader.flag_choices, msg=\"flag\"))\n    # validate required flags\n    for required_flag in self.require_flags:\n        if not required_flag in self.module_loader.flag_choices:\n            raise ValidationError(get_closest_match(required_flag, self.module_loader.flag_choices, msg=\"flag\"))\n    # validate flags\n    for flag in self.flags:\n        if not flag in self.module_loader.flag_choices:\n            raise ValidationError(get_closest_match(flag, self.module_loader.flag_choices, msg=\"flag\"))\n</code></pre>"},{"location":"dev/scanner/","title":"Scanner","text":""},{"location":"dev/scanner/#bbot.scanner.Scanner","title":"Scanner","text":"<p>A class representing a single BBOT scan</p> <p>Examples:</p> <p>Create scan with multiple targets:</p> <pre><code>&gt;&gt;&gt; my_scan = Scanner(\"evilcorp.com\", \"1.2.3.0/24\", modules=[\"portscan\", \"sslcert\", \"httpx\"])\n</code></pre> <p>Create scan with custom config:</p> <pre><code>&gt;&gt;&gt; config = {\"http_proxy\": \"http://127.0.0.1:8080\", \"modules\": {\"portscan\": {\"top_ports\": 2000}}}\n&gt;&gt;&gt; my_scan = Scanner(\"www.evilcorp.com\", modules=[\"portscan\", \"httpx\"], config=config)\n</code></pre> <p>Start the scan, iterating over events as they're discovered (synchronous):</p> <pre><code>&gt;&gt;&gt; for event in my_scan.start():\n&gt;&gt;&gt;     print(event)\n</code></pre> <p>Start the scan, iterating over events as they're discovered (asynchronous):</p> <pre><code>&gt;&gt;&gt; async for event in my_scan.async_start():\n&gt;&gt;&gt;     print(event)\n</code></pre> <p>Start the scan without consuming events (synchronous):</p> <pre><code>&gt;&gt;&gt; my_scan.start_without_generator()\n</code></pre> <p>Start the scan without consuming events (asynchronous):</p> <pre><code>&gt;&gt;&gt; await my_scan.async_start_without_generator()\n</code></pre> <p>Attributes:</p> <ul> <li> <code>status</code>               (<code>str</code>)           \u2013            <p>Status of scan, representing its current state. It can take on the following string values, each of which is mapped to an integer code in <code>_status_codes</code>: <pre><code>- \"NOT_STARTED\" (0): Initial status before the scan starts.\n- \"STARTING\" (1): Status when the scan is initializing.\n- \"RUNNING\" (2): Status when the scan is in progress.\n- \"FINISHING\" (3): Status when the scan is in the process of finalizing.\n- \"CLEANING_UP\" (4): Status when the scan is cleaning up resources.\n- \"ABORTING\" (5): Status when the scan is in the process of being aborted.\n- \"ABORTED\" (6): Status when the scan has been aborted.\n- \"FAILED\" (7): Status when the scan has encountered a failure.\n- \"FINISHED\" (8): Status when the scan has successfully completed.\n</code></pre></p> </li> <li> <code>_status_code</code>               (<code>int</code>)           \u2013            <p>The numerical representation of the current scan status, stored for internal use. It is mapped according to the values in <code>_status_codes</code>.</p> </li> <li> <code>target</code>               (<code>Target</code>)           \u2013            <p>Target of scan (alias to <code>self.preset.target</code>).</p> </li> <li> <code>preset</code>               (<code>Preset</code>)           \u2013            <p>The main scan Preset in its baked form.</p> </li> <li> <code>config</code>               (<code>DictConfig</code>)           \u2013            <p>BBOT config (alias to <code>self.preset.config</code>).</p> </li> <li> <code>whitelist</code>               (<code>Target</code>)           \u2013            <p>Scan whitelist (by default this is the same as <code>target</code>) (alias to <code>self.preset.whitelist</code>).</p> </li> <li> <code>blacklist</code>               (<code>Target</code>)           \u2013            <p>Scan blacklist (this takes ultimate precedence) (alias to <code>self.preset.blacklist</code>).</p> </li> <li> <code>helpers</code>               (<code>ConfigAwareHelper</code>)           \u2013            <p>Helper containing various reusable functions, regexes, etc. (alias to <code>self.preset.helpers</code>).</p> </li> <li> <code>output_dir</code>               (<code>Path</code>)           \u2013            <p>Output directory for scan (alias to <code>self.preset.output_dir</code>).</p> </li> <li> <code>name</code>               (<code>str</code>)           \u2013            <p>Name of scan (alias to <code>self.preset.scan_name</code>).</p> </li> <li> <code>dispatcher</code>               (<code>Dispatcher</code>)           \u2013            <p>Triggers certain events when the scan <code>status</code> changes.</p> </li> <li> <code>modules</code>               (<code>dict</code>)           \u2013            <p>Holds all loaded modules in this format: <code>{\"module_name\": Module()}</code>.</p> </li> <li> <code>stats</code>               (<code>ScanStats</code>)           \u2013            <p>Holds high-level scan statistics such as how many events have been produced and consumed by each module.</p> </li> <li> <code>home</code>               (<code>Path</code>)           \u2013            <p>Base output directory of the scan (default: <code>~/.bbot/scans/&lt;scan_name&gt;</code>).</p> </li> <li> <code>running</code>               (<code>bool</code>)           \u2013            <p>Whether the scan is currently running.</p> </li> <li> <code>stopping</code>               (<code>bool</code>)           \u2013            <p>Whether the scan is currently stopping.</p> </li> <li> <code>stopped</code>               (<code>bool</code>)           \u2013            <p>Whether the scan is currently stopped.</p> </li> <li> <code>aborting</code>               (<code>bool</code>)           \u2013            <p>Whether the scan is aborted or currently aborting.</p> </li> </ul> Notes <ul> <li>The status is read-only once set to \"ABORTING\" until it transitions to \"ABORTED.\"</li> <li>Invalid statuses are logged but not applied.</li> <li>Setting a status will trigger the <code>on_status</code> event in the dispatcher.</li> </ul> Source code in <code>bbot/scanner/scanner.py</code> <pre><code>class Scanner:\n    \"\"\"A class representing a single BBOT scan\n\n    Examples:\n        Create scan with multiple targets:\n        &gt;&gt;&gt; my_scan = Scanner(\"evilcorp.com\", \"1.2.3.0/24\", modules=[\"portscan\", \"sslcert\", \"httpx\"])\n\n        Create scan with custom config:\n        &gt;&gt;&gt; config = {\"http_proxy\": \"http://127.0.0.1:8080\", \"modules\": {\"portscan\": {\"top_ports\": 2000}}}\n        &gt;&gt;&gt; my_scan = Scanner(\"www.evilcorp.com\", modules=[\"portscan\", \"httpx\"], config=config)\n\n        Start the scan, iterating over events as they're discovered (synchronous):\n        &gt;&gt;&gt; for event in my_scan.start():\n        &gt;&gt;&gt;     print(event)\n\n        Start the scan, iterating over events as they're discovered (asynchronous):\n        &gt;&gt;&gt; async for event in my_scan.async_start():\n        &gt;&gt;&gt;     print(event)\n\n        Start the scan without consuming events (synchronous):\n        &gt;&gt;&gt; my_scan.start_without_generator()\n\n        Start the scan without consuming events (asynchronous):\n        &gt;&gt;&gt; await my_scan.async_start_without_generator()\n\n    Attributes:\n        status (str): Status of scan, representing its current state. It can take on the following string values, each of which is mapped to an integer code in `_status_codes`:\n            ```markdown\n            - \"NOT_STARTED\" (0): Initial status before the scan starts.\n            - \"STARTING\" (1): Status when the scan is initializing.\n            - \"RUNNING\" (2): Status when the scan is in progress.\n            - \"FINISHING\" (3): Status when the scan is in the process of finalizing.\n            - \"CLEANING_UP\" (4): Status when the scan is cleaning up resources.\n            - \"ABORTING\" (5): Status when the scan is in the process of being aborted.\n            - \"ABORTED\" (6): Status when the scan has been aborted.\n            - \"FAILED\" (7): Status when the scan has encountered a failure.\n            - \"FINISHED\" (8): Status when the scan has successfully completed.\n            ```\n        _status_code (int): The numerical representation of the current scan status, stored for internal use. It is mapped according to the values in `_status_codes`.\n        target (Target): Target of scan (alias to `self.preset.target`).\n        preset (Preset): The main scan Preset in its baked form.\n        config (omegaconf.dictconfig.DictConfig): BBOT config (alias to `self.preset.config`).\n        whitelist (Target): Scan whitelist (by default this is the same as `target`) (alias to `self.preset.whitelist`).\n        blacklist (Target): Scan blacklist (this takes ultimate precedence) (alias to `self.preset.blacklist`).\n        helpers (ConfigAwareHelper): Helper containing various reusable functions, regexes, etc. (alias to `self.preset.helpers`).\n        output_dir (pathlib.Path): Output directory for scan (alias to `self.preset.output_dir`).\n        name (str): Name of scan (alias to `self.preset.scan_name`).\n        dispatcher (Dispatcher): Triggers certain events when the scan `status` changes.\n        modules (dict): Holds all loaded modules in this format: `{\"module_name\": Module()}`.\n        stats (ScanStats): Holds high-level scan statistics such as how many events have been produced and consumed by each module.\n        home (pathlib.Path): Base output directory of the scan (default: `~/.bbot/scans/&lt;scan_name&gt;`).\n        running (bool): Whether the scan is currently running.\n        stopping (bool): Whether the scan is currently stopping.\n        stopped (bool): Whether the scan is currently stopped.\n        aborting (bool): Whether the scan is aborted or currently aborting.\n\n    Notes:\n        - The status is read-only once set to \"ABORTING\" until it transitions to \"ABORTED.\"\n        - Invalid statuses are logged but not applied.\n        - Setting a status will trigger the `on_status` event in the dispatcher.\n    \"\"\"\n\n    _status_codes = {\n        \"NOT_STARTED\": 0,\n        \"STARTING\": 1,\n        \"RUNNING\": 2,\n        \"FINISHING\": 3,\n        \"CLEANING_UP\": 4,\n        \"ABORTING\": 5,\n        \"ABORTED\": 6,\n        \"FAILED\": 7,\n        \"FINISHED\": 8,\n    }\n\n    def __init__(\n        self,\n        *targets,\n        scan_id=None,\n        dispatcher=None,\n        **kwargs,\n    ):\n        \"\"\"\n        Initializes the Scanner class.\n\n        If a premade `preset` is specified, it will be used for the scan.\n        Otherwise, `Scan` accepts the same arguments as `Preset`, which are passed through and used to create a new preset.\n\n        Args:\n            *targets (list[str], optional): Scan targets (passed through to `Preset`).\n            preset (Preset, optional): Preset to use for the scan.\n            scan_id (str, optional): Unique identifier for the scan. Auto-generates if None.\n            dispatcher (Dispatcher, optional): Dispatcher object to use. Defaults to new Dispatcher.\n            **kwargs (list[str], optional): Additional keyword arguments (passed through to `Preset`).\n        \"\"\"\n        self._root_event = None\n        self._finish_event = None\n        self.start_time = None\n        self.end_time = None\n        self.duration = None\n        self.duration_human = None\n        self.duration_seconds = None\n\n        self._success = False\n\n        if scan_id is not None:\n            self.id = str(scan_id)\n        else:\n            self.id = f\"SCAN:{sha1(rand_string(20)).hexdigest()}\"\n\n        custom_preset = kwargs.pop(\"preset\", None)\n        kwargs[\"_log\"] = True\n\n        from .preset import Preset\n\n        base_preset = Preset(*targets, **kwargs)\n\n        if custom_preset is not None:\n            if not isinstance(custom_preset, Preset):\n                raise ValidationError(f'Preset must be of type Preset, not \"{type(custom_preset).__name__}\"')\n            base_preset.merge(custom_preset)\n\n        self.preset = base_preset.bake(self)\n\n        # scan name\n        if self.preset.scan_name is None:\n            tries = 0\n            while 1:\n                if tries &gt; 5:\n                    scan_name = f\"{rand_string(4)}_{rand_string(4)}\"\n                    break\n                scan_name = random_name()\n                if self.preset.output_dir is not None:\n                    home_path = Path(self.preset.output_dir).resolve() / scan_name\n                else:\n                    home_path = self.preset.bbot_home / \"scans\" / scan_name\n                if not home_path.exists():\n                    break\n                tries += 1\n        else:\n            scan_name = str(self.preset.scan_name)\n        self.name = scan_name.replace(\"/\", \"_\")\n\n        # make sure the preset has a description\n        if not self.preset.description:\n            self.preset.description = self.name\n\n        # scan output dir\n        if self.preset.output_dir is not None:\n            self.home = Path(self.preset.output_dir).resolve() / self.name\n        else:\n            self.home = self.preset.bbot_home / \"scans\" / self.name\n\n        self._status = \"NOT_STARTED\"\n        self._status_code = 0\n\n        self.modules = OrderedDict({})\n        self._modules_loaded = False\n        self.dummy_modules = {}\n\n        if dispatcher is None:\n            from .dispatcher import Dispatcher\n\n            self.dispatcher = Dispatcher()\n        else:\n            self.dispatcher = dispatcher\n        self.dispatcher.set_scan(self)\n\n        # scope distance\n        self.scope_config = self.config.get(\"scope\", {})\n        self.scope_search_distance = max(0, int(self.scope_config.get(\"search_distance\", 0)))\n        self.scope_report_distance = int(self.scope_config.get(\"report_distance\", 1))\n\n        # web config\n        self.web_config = self.config.get(\"web\", {})\n        self.web_spider_distance = self.web_config.get(\"spider_distance\", 0)\n        self.web_spider_depth = self.web_config.get(\"spider_depth\", 1)\n        self.web_spider_links_per_page = self.web_config.get(\"spider_links_per_page\", 20)\n        max_redirects = self.web_config.get(\"http_max_redirects\", 5)\n        self.web_max_redirects = max(max_redirects, self.web_spider_distance)\n        self.http_proxy = self.web_config.get(\"http_proxy\", \"\")\n        self.http_timeout = self.web_config.get(\"http_timeout\", 10)\n        self.httpx_timeout = self.web_config.get(\"httpx_timeout\", 5)\n        self.http_retries = self.web_config.get(\"http_retries\", 1)\n        self.httpx_retries = self.web_config.get(\"httpx_retries\", 1)\n        self.useragent = self.web_config.get(\"user_agent\", \"BBOT\")\n        # custom HTTP headers warning\n        self.custom_http_headers = self.web_config.get(\"http_headers\", {})\n        if self.custom_http_headers:\n            self.warning(\n                \"You have enabled custom HTTP headers. These will be attached to all in-scope requests and all requests made by httpx.\"\n            )\n\n        # url file extensions\n        self.url_extension_blacklist = set(e.lower() for e in self.config.get(\"url_extension_blacklist\", []))\n        self.url_extension_httpx_only = set(e.lower() for e in self.config.get(\"url_extension_httpx_only\", []))\n\n        # url querystring behavior\n        self.url_querystring_remove = self.config.get(\"url_querystring_remove\", True)\n\n        # blob inclusion\n        self._file_blobs = self.config.get(\"file_blobs\", False)\n        self._folder_blobs = self.config.get(\"folder_blobs\", False)\n\n        # how often to print scan status\n        self.status_frequency = self.config.get(\"status_frequency\", 15)\n\n        from .stats import ScanStats\n\n        self.stats = ScanStats(self)\n\n        self._prepped = False\n        self._finished_init = False\n        self._new_activity = False\n        self._cleanedup = False\n        self._omitted_event_types = None\n\n        self.__loop = None\n        self._manager_worker_loop_tasks = []\n        self.init_events_task = None\n        self.ticker_task = None\n        self.dispatcher_tasks = []\n\n        self._stopping = False\n\n        self._dns_strings = None\n        self._dns_regexes = None\n        self._dns_regexes_yara = None\n        self._dns_yara_rules_uncompiled = None\n        self._dns_yara_rules = None\n\n        self.__log_handlers = None\n        self._log_handler_backup = []\n\n    async def _prep(self):\n        \"\"\"\n        Creates the scan's output folder, loads its modules, and calls their .setup() methods.\n        \"\"\"\n\n        # update the master PID\n        SHARED_INTERPRETER_STATE.update_scan_pid()\n\n        self.helpers.mkdir(self.home)\n        if not self._prepped:\n            # save scan preset\n            with open(self.home / \"preset.yml\", \"w\") as f:\n                f.write(self.preset.to_yaml())\n\n            # log scan overview\n            start_msg = f\"Scan seeded with {len(self.seeds):,} targets\"\n            details = []\n            if self.whitelist != self.target:\n                details.append(f\"{len(self.whitelist):,} in whitelist\")\n            if self.blacklist:\n                details.append(f\"{len(self.blacklist):,} in blacklist\")\n            if details:\n                start_msg += f\" ({', '.join(details)})\"\n            self.hugeinfo(start_msg)\n\n            # load scan modules (this imports and instantiates them)\n            # up to this point they were only preloaded\n            await self.load_modules()\n\n            # run each module's .setup() method\n            succeeded, hard_failed, soft_failed = await self.setup_modules()\n\n            # intercept modules get sewn together like human centipede\n            self.intercept_modules = [m for m in self.modules.values() if m._intercept]\n            for i, intercept_module in enumerate(self.intercept_modules[1:]):\n                prev_intercept_module = self.intercept_modules[i]\n                self.debug(\n                    f\"Setting intercept module {intercept_module.name}._incoming_event_queue to previous intercept module {prev_intercept_module.name}.outgoing_event_queue\"\n                )\n                interqueue = asyncio.Queue()\n                intercept_module._incoming_event_queue = interqueue\n                prev_intercept_module._outgoing_event_queue = interqueue\n\n            # abort if there are no output modules\n            num_output_modules = len([m for m in self.modules.values() if m._type == \"output\"])\n            if num_output_modules &lt; 1:\n                raise ScanError(\"Failed to load output modules. Aborting.\")\n            # abort if any of the module .setup()s hard-failed (i.e. they errored or returned False)\n            total_failed = len(hard_failed + soft_failed)\n            if hard_failed:\n                msg = f\"Setup hard-failed for {len(hard_failed):,} modules ({','.join(hard_failed)})\"\n                self._fail_setup(msg)\n\n            total_modules = total_failed + len(self.modules)\n            success_msg = f\"Setup succeeded for {len(self.modules):,}/{total_modules:,} modules.\"\n\n            self.success(success_msg)\n            self._prepped = True\n\n    def start(self):\n        for event in async_to_sync_gen(self.async_start()):\n            yield event\n\n    def start_without_generator(self):\n        for event in async_to_sync_gen(self.async_start()):\n            pass\n\n    async def async_start_without_generator(self):\n        async for event in self.async_start():\n            pass\n\n    async def async_start(self):\n        \"\"\" \"\"\"\n        self.start_time = datetime.now()\n        self.root_event.data[\"started_at\"] = self.start_time.isoformat()\n        try:\n            await self._prep()\n\n            self._start_log_handlers()\n            self.trace(f'Ran BBOT {__version__} at {self.start_time}, command: {\" \".join(sys.argv)}')\n            self.trace(f\"Target: {self.preset.target.json}\")\n            self.trace(f\"Preset: {self.preset.to_dict(redact_secrets=True)}\")\n\n            if not self.target:\n                self.warning(f\"No scan targets specified\")\n\n            # start status ticker\n            self.ticker_task = asyncio.create_task(\n                self._status_ticker(self.status_frequency), name=f\"{self.name}._status_ticker()\"\n            )\n\n            self.status = \"STARTING\"\n\n            if not self.modules:\n                self.error(f\"No modules loaded\")\n                self.status = \"FAILED\"\n                return\n            else:\n                self.hugesuccess(f\"Starting scan {self.name}\")\n\n            await self.dispatcher.on_start(self)\n\n            self.status = \"RUNNING\"\n            self._start_modules()\n            self.verbose(f\"{len(self.modules):,} modules started\")\n\n            # distribute seed events\n            self.init_events_task = asyncio.create_task(\n                self.ingress_module.init_events(self.target.seeds.events),\n                name=f\"{self.name}.ingress_module.init_events()\",\n            )\n\n            # main scan loop\n            while 1:\n                # abort if we're aborting\n                if self.aborting:\n                    self._drain_queues()\n                    break\n\n                # yield events as they come (async for event in scan.async_start())\n                if \"python\" in self.modules:\n                    events, finish = await self.modules[\"python\"]._events_waiting(batch_size=-1)\n                    for e in events:\n                        yield e\n                    if events:\n                        continue\n\n                # break if initialization finished and the scan is no longer active\n                if self._finished_init and self.modules_finished:\n                    new_activity = await self.finish()\n                    if not new_activity:\n                        self._success = True\n                        scan_finish_event = await self._mark_finished()\n                        yield scan_finish_event\n                        break\n\n                await asyncio.sleep(0.1)\n\n            self._success = True\n\n        except BaseException as e:\n            if self.helpers.in_exception_chain(e, (KeyboardInterrupt, asyncio.CancelledError)):\n                self.stop()\n                self._success = True\n            else:\n                try:\n                    raise\n                except ScanError as e:\n                    self.error(f\"{e}\")\n\n                except BBOTError as e:\n                    self.critical(f\"Error during scan: {e}\")\n\n                except Exception:\n                    self.critical(f\"Unexpected error during scan:\\n{traceback.format_exc()}\")\n\n        finally:\n            tasks = self._cancel_tasks()\n            self.debug(f\"Awaiting {len(tasks):,} tasks\")\n            for task in tasks:\n                # self.debug(f\"Awaiting {task}\")\n                with contextlib.suppress(BaseException):\n                    await asyncio.wait_for(task, timeout=0.1)\n            self.debug(f\"Awaited {len(tasks):,} tasks\")\n            await self._report()\n            await self._cleanup()\n\n            await self.dispatcher.on_finish(self)\n\n            self._stop_log_handlers()\n\n    async def _mark_finished(self):\n        log_fn = self.hugesuccess\n        if self.status == \"ABORTING\":\n            status = \"ABORTED\"\n            log_fn = self.hugewarning\n        elif not self._success:\n            status = \"FAILED\"\n            log_fn = self.critical\n        else:\n            status = \"FINISHED\"\n\n        self.end_time = datetime.now()\n        self.duration = self.end_time - self.start_time\n        self.duration_seconds = self.duration.total_seconds()\n        self.duration_human = self.helpers.human_timedelta(self.duration)\n\n        status_message = f\"Scan {self.name} completed in {self.duration_human} with status {status}\"\n\n        scan_finish_event = self.finish_event(status_message, status)\n\n        # queue final scan event with output modules\n        output_modules = [m for m in self.modules.values() if m._type == \"output\" and m.name != \"python\"]\n        for m in output_modules:\n            await m.queue_event(scan_finish_event)\n        # wait until output modules are flushed\n        while 1:\n            modules_finished = all([m.finished for m in output_modules])\n            if modules_finished:\n                break\n            await asyncio.sleep(0.05)\n\n        self.status = status\n        log_fn(status_message)\n        return scan_finish_event\n\n    def _start_modules(self):\n        self.verbose(f\"Starting module worker loops\")\n        for module in self.modules.values():\n            module.start()\n\n    async def setup_modules(self, remove_failed=True):\n        \"\"\"Asynchronously initializes all loaded modules by invoking their `setup()` methods.\n\n        Args:\n            remove_failed (bool): Flag indicating whether to remove modules that fail setup.\n\n        Returns:\n            tuple:\n                succeeded - List of modules that successfully set up.\n                hard_failed - List of modules that encountered a hard failure during setup.\n                soft_failed - List of modules that encountered a soft failure during setup.\n\n        Raises:\n            ScanError: If no output modules could be loaded.\n\n        Notes:\n            Hard-failed modules are set to an error state and removed if `remove_failed` is True.\n            Soft-failed modules are not set to an error state but are also removed if `remove_failed` is True.\n        \"\"\"\n        await self.load_modules()\n        self.verbose(f\"Setting up modules\")\n        succeeded = []\n        hard_failed = []\n        soft_failed = []\n\n        async for task in self.helpers.as_completed([m._setup() for m in self.modules.values()]):\n            module, status, msg = await task\n            if status == True:\n                self.debug(f\"Setup succeeded for {module.name} ({msg})\")\n                succeeded.append(module.name)\n            elif status == False:\n                self.warning(f\"Setup hard-failed for {module.name}: {msg}\")\n                self.modules[module.name].set_error_state()\n                hard_failed.append(module.name)\n            else:\n                self.info(f\"Setup soft-failed for {module.name}: {msg}\")\n                soft_failed.append(module.name)\n            if (not status) and (module._intercept or remove_failed):\n                # if a intercept module fails setup, we always remove it\n                self.modules.pop(module.name)\n\n        return succeeded, hard_failed, soft_failed\n\n    async def load_modules(self):\n        \"\"\"Asynchronously import and instantiate all scan modules, including internal and output modules.\n\n        This method is automatically invoked by `setup_modules()`. It performs several key tasks in the following sequence:\n\n        1. Install dependencies for each module via `self.helpers.depsinstaller.install()`.\n        2. Load scan modules and updates the `modules` dictionary.\n        3. Load internal modules and updates the `modules` dictionary.\n        4. Load output modules and updates the `modules` dictionary.\n        5. Sorts modules based on their `_priority` attribute.\n\n        If any modules fail to load or their dependencies fail to install, a ScanError will be raised (unless `self.force_start` is True).\n\n        Attributes:\n            succeeded, failed (tuple): A tuple containing lists of modules that succeeded or failed during the dependency installation.\n            loaded_modules, loaded_internal_modules, loaded_output_modules (dict): Dictionaries of successfully loaded modules.\n            failed, failed_internal, failed_output (list): Lists of module names that failed to load.\n\n        Raises:\n            ScanError: If any module dependencies fail to install or modules fail to load, and if `self.force_start` is False.\n\n        Returns:\n            None\n\n        Note:\n            After all modules are loaded, they are sorted by `_priority` and stored in the `modules` dictionary.\n        \"\"\"\n        if not self._modules_loaded:\n            if not self.preset.modules:\n                self.warning(f\"No modules to load\")\n                return\n\n            if not self.preset.scan_modules:\n                self.warning(f\"No scan modules to load\")\n\n            # install module dependencies\n            succeeded, failed = await self.helpers.depsinstaller.install(*self.preset.modules)\n            if failed:\n                msg = f\"Failed to install dependencies for {len(failed):,} modules: {','.join(failed)}\"\n                self._fail_setup(msg)\n            modules = sorted([m for m in self.preset.scan_modules if m in succeeded])\n            output_modules = sorted([m for m in self.preset.output_modules if m in succeeded])\n            internal_modules = sorted([m for m in self.preset.internal_modules if m in succeeded])\n\n            # Load scan modules\n            self.verbose(f\"Loading {len(modules):,} scan modules: {','.join(modules)}\")\n            loaded_modules, failed = self._load_modules(modules)\n            self.modules.update(loaded_modules)\n            if len(failed) &gt; 0:\n                msg = f\"Failed to load {len(failed):,} scan modules: {','.join(failed)}\"\n                self._fail_setup(msg)\n            if loaded_modules:\n                self.info(\n                    f\"Loaded {len(loaded_modules):,}/{len(self.preset.scan_modules):,} scan modules ({','.join(loaded_modules)})\"\n                )\n\n            # Load internal modules\n            self.verbose(f\"Loading {len(internal_modules):,} internal modules: {','.join(internal_modules)}\")\n            loaded_internal_modules, failed_internal = self._load_modules(internal_modules)\n            self.modules.update(loaded_internal_modules)\n            if len(failed_internal) &gt; 0:\n                msg = f\"Failed to load {len(loaded_internal_modules):,} internal modules: {','.join(loaded_internal_modules)}\"\n                self._fail_setup(msg)\n            if loaded_internal_modules:\n                self.info(\n                    f\"Loaded {len(loaded_internal_modules):,}/{len(self.preset.internal_modules):,} internal modules ({','.join(loaded_internal_modules)})\"\n                )\n\n            # Load output modules\n            self.verbose(f\"Loading {len(output_modules):,} output modules: {','.join(output_modules)}\")\n            loaded_output_modules, failed_output = self._load_modules(output_modules)\n            self.modules.update(loaded_output_modules)\n            if len(failed_output) &gt; 0:\n                msg = f\"Failed to load {len(failed_output):,} output modules: {','.join(failed_output)}\"\n                self._fail_setup(msg)\n            if loaded_output_modules:\n                self.info(\n                    f\"Loaded {len(loaded_output_modules):,}/{len(self.preset.output_modules):,} output modules, ({','.join(loaded_output_modules)})\"\n                )\n\n            # builtin intercept modules\n            self.ingress_module = ScanIngress(self)\n            self.egress_module = ScanEgress(self)\n            self.modules[self.ingress_module.name] = self.ingress_module\n            self.modules[self.egress_module.name] = self.egress_module\n\n            # sort modules by priority\n            self.modules = OrderedDict(sorted(self.modules.items(), key=lambda x: getattr(x[-1], \"priority\", 3)))\n\n            self._modules_loaded = True\n\n    @property\n    def modules_finished(self):\n        finished_modules = [m.finished for m in self.modules.values()]\n        return all(finished_modules)\n\n    def kill_module(self, module_name, message=None):\n        from signal import SIGINT\n\n        module = self.modules[module_name]\n        if module._intercept:\n            self.warning(f'Cannot kill module \"{module_name}\" because it is critical to the scan')\n            return\n        module.set_error_state(message=message, clear_outgoing_queue=True)\n        for proc in module._proc_tracker:\n            with contextlib.suppress(Exception):\n                proc.send_signal(SIGINT)\n        self.helpers.cancel_tasks_sync(module._tasks)\n\n    @property\n    def incoming_event_queues(self):\n        return self.ingress_module.incoming_queues\n\n    @property\n    def num_queued_events(self):\n        total = 0\n        for q in self.incoming_event_queues:\n            total += len(q._queue)\n        return total\n\n    def modules_status(self, _log=False):\n        finished = True\n        status = {\"modules\": {}}\n\n        sorted_modules = []\n        for module_name, module in self.modules.items():\n            if module_name.startswith(\"_\"):\n                continue\n            sorted_modules.append(module)\n            mod_status = module.status\n            if mod_status[\"running\"]:\n                finished = False\n            status[\"modules\"][module_name] = mod_status\n\n        # sort modules by name\n        sorted_modules.sort(key=lambda m: m.name)\n\n        status[\"finished\"] = finished\n\n        modules_errored = [m for m, s in status[\"modules\"].items() if s[\"errored\"]]\n\n        max_mem_percent = 90\n        mem_status = self.helpers.memory_status()\n        # abort if we don't have the memory\n        mem_percent = mem_status.percent\n        if mem_percent &gt; max_mem_percent:\n            free_memory = mem_status.available\n            free_memory_human = self.helpers.bytes_to_human(free_memory)\n            self.warning(f\"System memory is at {mem_percent:.1f}% ({free_memory_human} remaining)\")\n\n        if _log:\n            modules_status = []\n            for m, s in status[\"modules\"].items():\n                running = s[\"running\"]\n                incoming = s[\"events\"][\"incoming\"]\n                outgoing = s[\"events\"][\"outgoing\"]\n                tasks = s[\"tasks\"]\n                total = sum([incoming, outgoing, tasks])\n                if running or total &gt; 0:\n                    modules_status.append((m, running, incoming, outgoing, tasks, total))\n            modules_status.sort(key=lambda x: x[-1], reverse=True)\n\n            if modules_status:\n                modules_status_str = \", \".join([f\"{m}({i:,}:{t:,}:{o:,})\" for m, r, i, o, t, _ in modules_status])\n                self.info(f\"{self.name}: Modules running (incoming:processing:outgoing) {modules_status_str}\")\n            else:\n                self.info(f\"{self.name}: No modules running\")\n            event_type_summary = sorted(self.stats.events_emitted_by_type.items(), key=lambda x: x[-1], reverse=True)\n            if event_type_summary:\n                self.info(\n                    f'{self.name}: Events produced so far: {\", \".join([f\"{k}: {v}\" for k,v in event_type_summary])}'\n                )\n            else:\n                self.info(f\"{self.name}: No events produced yet\")\n\n            if modules_errored:\n                self.verbose(\n                    f'{self.name}: Modules errored: {len(modules_errored):,} ({\", \".join([m for m in modules_errored])})'\n                )\n\n            num_queued_events = self.num_queued_events\n            if num_queued_events:\n                self.info(\n                    f\"{self.name}: {num_queued_events:,} events in queue ({self.stats.speedometer.speed:,} processed in the past {self.status_frequency} seconds)\"\n                )\n            else:\n                self.info(\n                    f\"{self.name}: No events in queue ({self.stats.speedometer.speed:,} processed in the past {self.status_frequency} seconds)\"\n                )\n\n            if self.log_level &lt;= logging.DEBUG:\n                # status debugging\n                scan_active_status = []\n                scan_active_status.append(f\"scan._finished_init: {self._finished_init}\")\n                scan_active_status.append(f\"scan.modules_finished: {self.modules_finished}\")\n                for m in sorted_modules:\n                    running = m.running\n                    scan_active_status.append(f\"    {m}:\")\n                    # scan_active_status.append(f\"        running: {running}\")\n                    if running:\n                        # scan_active_status.append(f\"        tasks:\")\n                        for task in list(m._task_counter.tasks.values()):\n                            scan_active_status.append(f\"        - {task}:\")\n                    # scan_active_status.append(f\"        incoming_queue_size: {m.num_incoming_events}\")\n                    # scan_active_status.append(f\"        outgoing_queue_size: {m.outgoing_event_queue.qsize()}\")\n                for line in scan_active_status:\n                    self.debug(line)\n\n                # log module memory usage\n                module_memory_usage = []\n                for module in sorted_modules:\n                    memory_usage = module.memory_usage\n                    module_memory_usage.append((module.name, memory_usage))\n                module_memory_usage.sort(key=lambda x: x[-1], reverse=True)\n                self.debug(f\"MODULE MEMORY USAGE:\")\n                for module_name, usage in module_memory_usage:\n                    self.debug(f\"    - {module_name}: {self.helpers.bytes_to_human(usage)}\")\n\n        status.update({\"modules_errored\": len(modules_errored)})\n\n        return status\n\n    def stop(self):\n        \"\"\"Stops the in-progress scan and performs necessary cleanup.\n\n        This method sets the scan's status to \"ABORTING,\" cancels any pending tasks, and drains event queues. It also kills child processes spawned during the scan.\n\n        Returns:\n            None\n        \"\"\"\n        if not self._stopping:\n            self._stopping = True\n            self.status = \"ABORTING\"\n            self.hugewarning(\"Aborting scan\")\n            self.trace()\n            self._cancel_tasks()\n            self._drain_queues()\n            self.helpers.kill_children()\n            self._drain_queues()\n            self.helpers.kill_children()\n            self.debug(\"Finished aborting scan\")\n\n    async def finish(self):\n        \"\"\"Finalizes the scan by invoking the `finished()` method on all active modules if new activity is detected.\n\n        The method is idempotent and will return False if no new activity has been recorded since the last invocation.\n\n        Returns:\n            bool: True if new activity has been detected and the `finished()` method is invoked on all modules.\n                  False if no new activity has been detected since the last invocation.\n\n        Notes:\n            This method alters the scan's status to \"FINISHING\" if new activity is detected.\n        \"\"\"\n        # if new events were generated since last time we were here\n        if self._new_activity:\n            self._new_activity = False\n            self.status = \"FINISHING\"\n            # Trigger .finished() on every module and start over\n            log.info(\"Finishing scan\")\n            for module in self.modules.values():\n                finished_event = self.make_event(f\"FINISHED\", \"FINISHED\", dummy=True, tags={module.name})\n                await module.queue_event(finished_event)\n            self.verbose(\"Completed finish()\")\n            return True\n        self.verbose(\"Completed final finish()\")\n        # Return False if no new events were generated since last time\n        return False\n\n    def _drain_queues(self):\n        \"\"\"Empties all the event queues for each loaded module and the manager's incoming event queue.\n\n        This method iteratively empties both the incoming and outgoing event queues of each module, as well as the incoming event queue of the scan manager.\n\n        Returns:\n            None\n        \"\"\"\n        self.debug(\"Draining queues\")\n        for module in self.modules.values():\n            with contextlib.suppress(asyncio.queues.QueueEmpty):\n                while 1:\n                    if module.incoming_event_queue not in (None, False):\n                        module.incoming_event_queue.get_nowait()\n            with contextlib.suppress(asyncio.queues.QueueEmpty):\n                while 1:\n                    if module.outgoing_event_queue not in (None, False):\n                        module.outgoing_event_queue.get_nowait()\n        self.debug(\"Finished draining queues\")\n\n    def _cancel_tasks(self):\n        \"\"\"Cancels all asynchronous tasks and shuts down the process pool.\n\n        This method collects all pending tasks from each module, the dispatcher,\n        and the scan manager. After collecting these tasks, it cancels them synchronously\n        using a helper function. Finally, it shuts down the process pool, canceling any\n        pending futures.\n\n        Returns:\n            None\n        \"\"\"\n        self.debug(\"Cancelling all scan tasks\")\n        tasks = []\n        # module workers\n        for m in self.modules.values():\n            tasks += getattr(m, \"_tasks\", [])\n        # init events\n        if self.init_events_task:\n            tasks.append(self.init_events_task)\n        # ticker\n        if self.ticker_task:\n            tasks.append(self.ticker_task)\n        # dispatcher\n        tasks += self.dispatcher_tasks\n        # manager worker loops\n        tasks += self._manager_worker_loop_tasks\n        self.helpers.cancel_tasks_sync(tasks)\n        # process pool\n        self.helpers.process_pool.shutdown(cancel_futures=True)\n        self.debug(\"Finished cancelling all scan tasks\")\n        return tasks\n\n    async def _report(self):\n        \"\"\"Asynchronously executes the `report()` method for each module in the scan.\n\n        This method is called once at the end of each scan and is responsible for\n        triggering the `report()` function for each module. It executes irrespective\n        of whether the scan was aborted or completed successfully. The method makes\n        use of an asynchronous context manager (`_acatch`) to handle exceptions and\n        a task counter to keep track of the task's context.\n\n        Returns:\n            None\n        \"\"\"\n        for mod in self.modules.values():\n            context = f\"{mod.name}.report()\"\n            async with self._acatch(context), mod._task_counter.count(context):\n                await mod.report()\n\n    async def _cleanup(self):\n        \"\"\"Asynchronously executes the `cleanup()` method for each module in the scan.\n\n        This method is called once at the end of the scan to perform resource cleanup\n        tasks. It is executed regardless of whether the scan was aborted or completed\n        successfully. The scan status is set to \"CLEANING_UP\" during the execution.\n        After calling the `cleanup()` method for each module, it performs additional\n        cleanup tasks such as removing the scan's home directory if empty and cleaning\n        old scans.\n\n        Returns:\n            None\n        \"\"\"\n        # clean up self\n        if not self._cleanedup:\n            self._cleanedup = True\n            self.status = \"CLEANING_UP\"\n            # clean up dns engine\n            if self.helpers._dns is not None:\n                await self.helpers.dns.shutdown()\n            # clean up web engine\n            if self.helpers._web is not None:\n                await self.helpers.web.shutdown()\n            # clean up modules\n            for mod in self.modules.values():\n                await mod._cleanup()\n            with contextlib.suppress(Exception):\n                self.home.rmdir()\n            self.helpers.clean_old_scans()\n\n    def in_scope(self, *args, **kwargs):\n        return self.preset.in_scope(*args, **kwargs)\n\n    def whitelisted(self, *args, **kwargs):\n        return self.preset.whitelisted(*args, **kwargs)\n\n    def blacklisted(self, *args, **kwargs):\n        return self.preset.blacklisted(*args, **kwargs)\n\n    @property\n    def core(self):\n        return self.preset.core\n\n    @property\n    def config(self):\n        return self.preset.core.config\n\n    @property\n    def target(self):\n        return self.preset.target\n\n    @property\n    def seeds(self):\n        return self.preset.seeds\n\n    @property\n    def whitelist(self):\n        return self.preset.whitelist\n\n    @property\n    def blacklist(self):\n        return self.preset.blacklist\n\n    @property\n    def helpers(self):\n        return self.preset.helpers\n\n    @property\n    def force_start(self):\n        return self.preset.force_start\n\n    @property\n    def word_cloud(self):\n        return self.helpers.word_cloud\n\n    @property\n    def stopping(self):\n        return not self.running\n\n    @property\n    def stopped(self):\n        return self._status_code &gt; 5\n\n    @property\n    def running(self):\n        return 0 &lt; self._status_code &lt; 4\n\n    @property\n    def aborting(self):\n        return 5 &lt;= self._status_code &lt;= 6\n\n    @property\n    def status(self):\n        return self._status\n\n    @property\n    def omitted_event_types(self):\n        if self._omitted_event_types is None:\n            self._omitted_event_types = self.config.get(\"omit_event_types\", [])\n        return self._omitted_event_types\n\n    @status.setter\n    def status(self, status):\n        \"\"\"\n        Block setting after status has been aborted\n        \"\"\"\n        status = str(status).strip().upper()\n        if status in self._status_codes:\n            if self.status == \"ABORTING\" and not status == \"ABORTED\":\n                self.debug(f'Attempt to set invalid status \"{status}\" on aborted scan')\n            else:\n                if status != self._status:\n                    self._status = status\n                    self._status_code = self._status_codes[status]\n                    self.dispatcher_tasks.append(\n                        asyncio.create_task(\n                            self.dispatcher.catch(self.dispatcher.on_status, self._status, self.id),\n                            name=f\"{self.name}.dispatcher.on_status({status})\",\n                        )\n                    )\n                else:\n                    self.debug(f'Scan status is already \"{status}\"')\n        else:\n            self.debug(f'Attempt to set invalid status \"{status}\" on scan')\n\n    def make_event(self, *args, **kwargs):\n        kwargs[\"scan\"] = self\n        event = make_event(*args, **kwargs)\n        return event\n\n    @property\n    def root_event(self):\n        \"\"\"\n        The root scan event, e.g.:\n            ```json\n            {\n              \"type\": \"SCAN\",\n              \"id\": \"SCAN:1188928d942ace8e3befae0bdb9c3caa22705f54\",\n              \"data\": \"pixilated_kathryn (SCAN:1188928d942ace8e3befae0bdb9c3caa22705f54)\",\n              \"scope_distance\": 0,\n              \"scan\": \"SCAN:1188928d942ace8e3befae0bdb9c3caa22705f54\",\n              \"timestamp\": 1694548779.616255,\n              \"parent\": \"SCAN:1188928d942ace8e3befae0bdb9c3caa22705f54\",\n              \"tags\": [\n                \"distance-0\"\n              ],\n              \"module\": \"TARGET\",\n              \"module_sequence\": \"TARGET\"\n            }\n            ```\n        \"\"\"\n        if self._root_event is None:\n            self._root_event = self.make_root_event(f\"Scan {self.name} started at {self.start_time}\")\n        self._root_event.data[\"status\"] = self.status\n        return self._root_event\n\n    def finish_event(self, context=None, status=None):\n        if self._finish_event is None:\n            if context is None or status is None:\n                raise ValueError(\"Must specify context and status\")\n            self._finish_event = self.make_root_event(context)\n            self._finish_event.data[\"status\"] = status\n        return self._finish_event\n\n    def make_root_event(self, context):\n        root_event = self.make_event(data=self.json, event_type=\"SCAN\", dummy=True, context=context)\n        root_event._id = self.id\n        root_event.scope_distance = 0\n        root_event.parent = root_event\n        root_event.module = self._make_dummy_module(name=\"TARGET\", _type=\"TARGET\")\n        return root_event\n\n    @property\n    def dns_strings(self):\n        \"\"\"\n        A list of DNS hostname strings generated from the scan target\n        \"\"\"\n        if self._dns_strings is None:\n            dns_whitelist = set(t.host for t in self.whitelist if t.host and isinstance(t.host, str))\n            dns_whitelist = sorted(dns_whitelist, key=len)\n            dns_whitelist_set = set()\n            dns_strings = []\n            for t in dns_whitelist:\n                if not any(x in dns_whitelist_set for x in self.helpers.domain_parents(t, include_self=True)):\n                    dns_whitelist_set.add(t)\n                    dns_strings.append(t)\n            self._dns_strings = dns_strings\n        return self._dns_strings\n\n    def _generate_dns_regexes(self, pattern):\n        \"\"\"\n        Generates a list of compiled DNS hostname regexes based on the provided pattern.\n        This method centralizes the regex compilation to avoid redundancy in the dns_regexes and dns_regexes_yara methods.\n\n        Args:\n            pattern (str):\n        Returns:\n            list[re.Pattern]: A list of compiled regex patterns if enabled, otherwise an empty list.\n        \"\"\"\n\n        dns_regexes = []\n        for t in self.dns_strings:\n            regex_pattern = re.compile(f\"{pattern}{re.escape(t)})\", re.I)\n            log.debug(f\"Generated Regex [{regex_pattern.pattern}] for domain {t}\")\n            dns_regexes.append(regex_pattern)\n        return dns_regexes\n\n    @property\n    def dns_regexes(self):\n        \"\"\"\n        A list of DNS hostname regexes generated from the scan target\n        For the purpose of extracting hostnames\n\n        Examples:\n            Extract hostnames from text:\n            &gt;&gt;&gt; for regex in scan.dns_regexes:\n            ...     for match in regex.finditer(response.text):\n            ...         hostname = match.group().lower()\n        \"\"\"\n        if self._dns_regexes is None:\n            self._dns_regexes = self._generate_dns_regexes(r\"((?:(?:[\\w-]+)\\.)+\")\n        return self._dns_regexes\n\n    @property\n    def dns_regexes_yara(self):\n        \"\"\"\n        Returns a list of DNS hostname regexes formatted specifically for compatibility with YARA rules.\n        \"\"\"\n        if self._dns_regexes_yara is None:\n            self._dns_regexes_yara = self._generate_dns_regexes(r\"(([a-z0-9-]+\\.)*\")\n        return self._dns_regexes_yara\n\n    @property\n    def dns_yara_rules_uncompiled(self):\n        if self._dns_yara_rules_uncompiled is None:\n            regexes_component_list = []\n            for i, r in enumerate(self.dns_regexes_yara):\n                regexes_component_list.append(rf\"$dns_name_{i} = /\\b{r.pattern}/ nocase\")\n            if regexes_component_list:\n                regexes_component = \" \".join(regexes_component_list)\n                self._dns_yara_rules_uncompiled = f'rule hostname_extraction {{meta: description = \"matches DNS hostname pattern derived from target(s)\" strings: {regexes_component} condition: any of them}}'\n        return self._dns_yara_rules_uncompiled\n\n    async def dns_yara_rules(self):\n        if self._dns_yara_rules is None:\n            if self.dns_yara_rules_uncompiled is not None:\n                import yara\n\n                self._dns_yara_rules = await self.helpers.run_in_executor(\n                    yara.compile, source=self.dns_yara_rules_uncompiled\n                )\n        return self._dns_yara_rules\n\n    async def extract_in_scope_hostnames(self, s):\n        \"\"\"\n        Given a string, uses yara to extract hostnames matching scan targets\n\n        Examples:\n            &gt;&gt;&gt; await self.scan.extract_in_scope_hostnames(\"http://www.evilcorp.com\")\n            ... {\"www.evilcorp.com\"}\n        \"\"\"\n        matches = set()\n        dns_yara_rules = await self.dns_yara_rules()\n        if dns_yara_rules is not None:\n            for match in await self.helpers.run_in_executor(dns_yara_rules.match, data=s):\n                for string in match.strings:\n                    for instance in string.instances:\n                        matches.add(str(instance))\n        return matches\n\n    @property\n    def json(self):\n        \"\"\"\n        A dictionary representation of the scan including its name, ID, targets, whitelist, blacklist, and modules\n        \"\"\"\n        j = dict()\n        for i in (\"id\", \"name\"):\n            v = getattr(self, i, \"\")\n            if v:\n                j.update({i: v})\n        j[\"target\"] = self.preset.target.json\n        j[\"preset\"] = self.preset.to_dict(redact_secrets=True)\n        if self.start_time is not None:\n            j[\"started_at\"] = self.start_time.isoformat()\n        if self.end_time is not None:\n            j[\"finished_at\"] = self.end_time.isoformat()\n        if self.duration is not None:\n            j[\"duration_seconds\"] = self.duration_seconds\n        if self.duration_human is not None:\n            j[\"duration\"] = self.duration_human\n        return j\n\n    def debug(self, *args, trace=False, **kwargs):\n        log.debug(*args, extra={\"scan_id\": self.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def verbose(self, *args, trace=False, **kwargs):\n        log.verbose(*args, extra={\"scan_id\": self.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def hugeverbose(self, *args, trace=False, **kwargs):\n        log.hugeverbose(*args, extra={\"scan_id\": self.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def info(self, *args, trace=False, **kwargs):\n        log.info(*args, extra={\"scan_id\": self.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def hugeinfo(self, *args, trace=False, **kwargs):\n        log.hugeinfo(*args, extra={\"scan_id\": self.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def success(self, *args, trace=False, **kwargs):\n        log.success(*args, extra={\"scan_id\": self.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def hugesuccess(self, *args, trace=False, **kwargs):\n        log.hugesuccess(*args, extra={\"scan_id\": self.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def warning(self, *args, trace=True, **kwargs):\n        log.warning(*args, extra={\"scan_id\": self.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def hugewarning(self, *args, trace=True, **kwargs):\n        log.hugewarning(*args, extra={\"scan_id\": self.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def error(self, *args, trace=True, **kwargs):\n        log.error(*args, extra={\"scan_id\": self.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    def trace(self, msg=None):\n        if msg is None:\n            e_type, e_val, e_traceback = exc_info()\n            if e_type is not None:\n                log.trace(traceback.format_exc())\n        else:\n            log.trace(msg)\n\n    def critical(self, *args, trace=True, **kwargs):\n        log.critical(*args, extra={\"scan_id\": self.id}, **kwargs)\n        if trace:\n            self.trace()\n\n    @property\n    def log_level(self):\n        \"\"\"\n        Return the current log level, e.g. logging.INFO\n        \"\"\"\n        return self.core.logger.log_level\n\n    @property\n    def _log_handlers(self):\n        if self.__log_handlers is None:\n            self.helpers.mkdir(self.home)\n            main_handler = logging.handlers.TimedRotatingFileHandler(\n                str(self.home / \"scan.log\"), when=\"d\", interval=1, backupCount=14\n            )\n            main_handler.addFilter(lambda x: x.levelno != logging.TRACE and x.levelno &gt;= logging.VERBOSE)\n            debug_handler = logging.handlers.TimedRotatingFileHandler(\n                str(self.home / \"debug.log\"), when=\"d\", interval=1, backupCount=14\n            )\n            debug_handler.addFilter(lambda x: x.levelno &gt;= logging.DEBUG)\n            self.__log_handlers = [main_handler, debug_handler]\n        return self.__log_handlers\n\n    def _start_log_handlers(self):\n        # add log handlers\n        for handler in self._log_handlers:\n            self.core.logger.add_log_handler(handler)\n        # temporarily disable main ones\n        for handler_name in (\"file_main\", \"file_debug\"):\n            handler = self.core.logger.log_handlers.get(handler_name, None)\n            if handler is not None and handler not in self._log_handler_backup:\n                self._log_handler_backup.append(handler)\n                self.core.logger.remove_log_handler(handler)\n\n    def _stop_log_handlers(self):\n        # remove log handlers\n        for handler in self._log_handlers:\n            self.core.logger.remove_log_handler(handler)\n        # restore main ones\n        for handler in self._log_handler_backup:\n            self.core.logger.add_log_handler(handler)\n\n    def _fail_setup(self, msg):\n        msg = str(msg)\n        if self.force_start:\n            self.error(msg)\n        else:\n            msg += \" (--force to run module anyway)\"\n            raise ScanError(msg)\n\n    def _load_modules(self, modules):\n        modules = [str(m) for m in modules]\n        loaded_modules = {}\n        failed = set()\n        for module_name, module_class in self.preset.module_loader.load_modules(modules).items():\n            if module_class:\n                try:\n                    loaded_modules[module_name] = module_class(self)\n                    self.verbose(f'Loaded module \"{module_name}\"')\n                    continue\n                except Exception:\n                    self.warning(f\"Failed to load module {module_class}\")\n            else:\n                self.warning(f'Failed to load unknown module \"{module_name}\"')\n            failed.add(module_name)\n        return loaded_modules, failed\n\n    async def _status_ticker(self, interval=15):\n        async with self._acatch():\n            while 1:\n                await asyncio.sleep(interval)\n                self.modules_status(_log=True)\n\n    @contextlib.asynccontextmanager\n    async def _acatch(self, context=\"scan\", finally_callback=None, unhandled_is_critical=False):\n        \"\"\"\n        Async version of catch()\n\n        async with catch():\n            await do_stuff()\n        \"\"\"\n        try:\n            yield\n        except BaseException as e:\n            self._handle_exception(e, context=context, unhandled_is_critical=unhandled_is_critical)\n\n    def _handle_exception(self, e, context=\"scan\", finally_callback=None, unhandled_is_critical=False):\n        if callable(context):\n            context = f\"{context.__qualname__}()\"\n        filename, lineno, funcname = self.helpers.get_traceback_details(e)\n        if self.helpers.in_exception_chain(e, (KeyboardInterrupt,)):\n            log.debug(f\"Interrupted\")\n            self.stop()\n        elif isinstance(e, BrokenPipeError):\n            log.debug(f\"BrokenPipeError in {filename}:{lineno}:{funcname}(): {e}\")\n        elif isinstance(e, asyncio.CancelledError):\n            raise\n        elif isinstance(e, Exception):\n            traceback_str = getattr(e, \"engine_traceback\", None)\n            if traceback_str is None:\n                traceback_str = traceback.format_exc()\n            if unhandled_is_critical:\n                log.critical(f\"Error in {context}: {filename}:{lineno}:{funcname}(): {e}\")\n                log.critical(traceback_str)\n            else:\n                log.error(f\"Error in {context}: {filename}:{lineno}:{funcname}(): {e}\")\n                log.trace(traceback_str)\n        if callable(finally_callback):\n            finally_callback(e)\n\n    def _make_dummy_module(self, name, _type=\"scan\"):\n        \"\"\"\n        Construct a dummy module, for attachment to events\n        \"\"\"\n        try:\n            return self.dummy_modules[name]\n        except KeyError:\n            dummy = DummyModule(scan=self, name=name, _type=_type)\n            self.dummy_modules[name] = dummy\n            return dummy\n</code></pre>"},{"location":"dev/scanner/#bbot.scanner.Scanner.dns_regexes","title":"dns_regexes  <code>property</code>","text":"<pre><code>dns_regexes\n</code></pre> <p>A list of DNS hostname regexes generated from the scan target For the purpose of extracting hostnames</p> <p>Examples:</p> <p>Extract hostnames from text:</p> <pre><code>&gt;&gt;&gt; for regex in scan.dns_regexes:\n...     for match in regex.finditer(response.text):\n...         hostname = match.group().lower()\n</code></pre>"},{"location":"dev/scanner/#bbot.scanner.Scanner.dns_regexes_yara","title":"dns_regexes_yara  <code>property</code>","text":"<pre><code>dns_regexes_yara\n</code></pre> <p>Returns a list of DNS hostname regexes formatted specifically for compatibility with YARA rules.</p>"},{"location":"dev/scanner/#bbot.scanner.Scanner.dns_strings","title":"dns_strings  <code>property</code>","text":"<pre><code>dns_strings\n</code></pre> <p>A list of DNS hostname strings generated from the scan target</p>"},{"location":"dev/scanner/#bbot.scanner.Scanner.json","title":"json  <code>property</code>","text":"<pre><code>json\n</code></pre> <p>A dictionary representation of the scan including its name, ID, targets, whitelist, blacklist, and modules</p>"},{"location":"dev/scanner/#bbot.scanner.Scanner.log_level","title":"log_level  <code>property</code>","text":"<pre><code>log_level\n</code></pre> <p>Return the current log level, e.g. logging.INFO</p>"},{"location":"dev/scanner/#bbot.scanner.Scanner.root_event","title":"root_event  <code>property</code>","text":"<pre><code>root_event\n</code></pre> <p>The root scan event, e.g.:     <pre><code>{\n  \"type\": \"SCAN\",\n  \"id\": \"SCAN:1188928d942ace8e3befae0bdb9c3caa22705f54\",\n  \"data\": \"pixilated_kathryn (SCAN:1188928d942ace8e3befae0bdb9c3caa22705f54)\",\n  \"scope_distance\": 0,\n  \"scan\": \"SCAN:1188928d942ace8e3befae0bdb9c3caa22705f54\",\n  \"timestamp\": 1694548779.616255,\n  \"parent\": \"SCAN:1188928d942ace8e3befae0bdb9c3caa22705f54\",\n  \"tags\": [\n    \"distance-0\"\n  ],\n  \"module\": \"TARGET\",\n  \"module_sequence\": \"TARGET\"\n}\n</code></pre></p>"},{"location":"dev/scanner/#bbot.scanner.Scanner.__init__","title":"__init__","text":"<pre><code>__init__(*targets, scan_id=None, dispatcher=None, **kwargs)\n</code></pre> <p>Initializes the Scanner class.</p> <p>If a premade <code>preset</code> is specified, it will be used for the scan. Otherwise, <code>Scan</code> accepts the same arguments as <code>Preset</code>, which are passed through and used to create a new preset.</p> <p>Parameters:</p> <ul> <li> <code>*targets</code>               (<code>list[str]</code>, default:                   <code>()</code> )           \u2013            <p>Scan targets (passed through to <code>Preset</code>).</p> </li> <li> <code>preset</code>               (<code>Preset</code>)           \u2013            <p>Preset to use for the scan.</p> </li> <li> <code>scan_id</code>               (<code>str</code>, default:                   <code>None</code> )           \u2013            <p>Unique identifier for the scan. Auto-generates if None.</p> </li> <li> <code>dispatcher</code>               (<code>Dispatcher</code>, default:                   <code>None</code> )           \u2013            <p>Dispatcher object to use. Defaults to new Dispatcher.</p> </li> <li> <code>**kwargs</code>               (<code>list[str]</code>, default:                   <code>{}</code> )           \u2013            <p>Additional keyword arguments (passed through to <code>Preset</code>).</p> </li> </ul> Source code in <code>bbot/scanner/scanner.py</code> <pre><code>def __init__(\n    self,\n    *targets,\n    scan_id=None,\n    dispatcher=None,\n    **kwargs,\n):\n    \"\"\"\n    Initializes the Scanner class.\n\n    If a premade `preset` is specified, it will be used for the scan.\n    Otherwise, `Scan` accepts the same arguments as `Preset`, which are passed through and used to create a new preset.\n\n    Args:\n        *targets (list[str], optional): Scan targets (passed through to `Preset`).\n        preset (Preset, optional): Preset to use for the scan.\n        scan_id (str, optional): Unique identifier for the scan. Auto-generates if None.\n        dispatcher (Dispatcher, optional): Dispatcher object to use. Defaults to new Dispatcher.\n        **kwargs (list[str], optional): Additional keyword arguments (passed through to `Preset`).\n    \"\"\"\n    self._root_event = None\n    self._finish_event = None\n    self.start_time = None\n    self.end_time = None\n    self.duration = None\n    self.duration_human = None\n    self.duration_seconds = None\n\n    self._success = False\n\n    if scan_id is not None:\n        self.id = str(scan_id)\n    else:\n        self.id = f\"SCAN:{sha1(rand_string(20)).hexdigest()}\"\n\n    custom_preset = kwargs.pop(\"preset\", None)\n    kwargs[\"_log\"] = True\n\n    from .preset import Preset\n\n    base_preset = Preset(*targets, **kwargs)\n\n    if custom_preset is not None:\n        if not isinstance(custom_preset, Preset):\n            raise ValidationError(f'Preset must be of type Preset, not \"{type(custom_preset).__name__}\"')\n        base_preset.merge(custom_preset)\n\n    self.preset = base_preset.bake(self)\n\n    # scan name\n    if self.preset.scan_name is None:\n        tries = 0\n        while 1:\n            if tries &gt; 5:\n                scan_name = f\"{rand_string(4)}_{rand_string(4)}\"\n                break\n            scan_name = random_name()\n            if self.preset.output_dir is not None:\n                home_path = Path(self.preset.output_dir).resolve() / scan_name\n            else:\n                home_path = self.preset.bbot_home / \"scans\" / scan_name\n            if not home_path.exists():\n                break\n            tries += 1\n    else:\n        scan_name = str(self.preset.scan_name)\n    self.name = scan_name.replace(\"/\", \"_\")\n\n    # make sure the preset has a description\n    if not self.preset.description:\n        self.preset.description = self.name\n\n    # scan output dir\n    if self.preset.output_dir is not None:\n        self.home = Path(self.preset.output_dir).resolve() / self.name\n    else:\n        self.home = self.preset.bbot_home / \"scans\" / self.name\n\n    self._status = \"NOT_STARTED\"\n    self._status_code = 0\n\n    self.modules = OrderedDict({})\n    self._modules_loaded = False\n    self.dummy_modules = {}\n\n    if dispatcher is None:\n        from .dispatcher import Dispatcher\n\n        self.dispatcher = Dispatcher()\n    else:\n        self.dispatcher = dispatcher\n    self.dispatcher.set_scan(self)\n\n    # scope distance\n    self.scope_config = self.config.get(\"scope\", {})\n    self.scope_search_distance = max(0, int(self.scope_config.get(\"search_distance\", 0)))\n    self.scope_report_distance = int(self.scope_config.get(\"report_distance\", 1))\n\n    # web config\n    self.web_config = self.config.get(\"web\", {})\n    self.web_spider_distance = self.web_config.get(\"spider_distance\", 0)\n    self.web_spider_depth = self.web_config.get(\"spider_depth\", 1)\n    self.web_spider_links_per_page = self.web_config.get(\"spider_links_per_page\", 20)\n    max_redirects = self.web_config.get(\"http_max_redirects\", 5)\n    self.web_max_redirects = max(max_redirects, self.web_spider_distance)\n    self.http_proxy = self.web_config.get(\"http_proxy\", \"\")\n    self.http_timeout = self.web_config.get(\"http_timeout\", 10)\n    self.httpx_timeout = self.web_config.get(\"httpx_timeout\", 5)\n    self.http_retries = self.web_config.get(\"http_retries\", 1)\n    self.httpx_retries = self.web_config.get(\"httpx_retries\", 1)\n    self.useragent = self.web_config.get(\"user_agent\", \"BBOT\")\n    # custom HTTP headers warning\n    self.custom_http_headers = self.web_config.get(\"http_headers\", {})\n    if self.custom_http_headers:\n        self.warning(\n            \"You have enabled custom HTTP headers. These will be attached to all in-scope requests and all requests made by httpx.\"\n        )\n\n    # url file extensions\n    self.url_extension_blacklist = set(e.lower() for e in self.config.get(\"url_extension_blacklist\", []))\n    self.url_extension_httpx_only = set(e.lower() for e in self.config.get(\"url_extension_httpx_only\", []))\n\n    # url querystring behavior\n    self.url_querystring_remove = self.config.get(\"url_querystring_remove\", True)\n\n    # blob inclusion\n    self._file_blobs = self.config.get(\"file_blobs\", False)\n    self._folder_blobs = self.config.get(\"folder_blobs\", False)\n\n    # how often to print scan status\n    self.status_frequency = self.config.get(\"status_frequency\", 15)\n\n    from .stats import ScanStats\n\n    self.stats = ScanStats(self)\n\n    self._prepped = False\n    self._finished_init = False\n    self._new_activity = False\n    self._cleanedup = False\n    self._omitted_event_types = None\n\n    self.__loop = None\n    self._manager_worker_loop_tasks = []\n    self.init_events_task = None\n    self.ticker_task = None\n    self.dispatcher_tasks = []\n\n    self._stopping = False\n\n    self._dns_strings = None\n    self._dns_regexes = None\n    self._dns_regexes_yara = None\n    self._dns_yara_rules_uncompiled = None\n    self._dns_yara_rules = None\n\n    self.__log_handlers = None\n    self._log_handler_backup = []\n</code></pre>"},{"location":"dev/scanner/#bbot.scanner.Scanner.async_start","title":"async_start  <code>async</code>","text":"<pre><code>async_start()\n</code></pre> Source code in <code>bbot/scanner/scanner.py</code> <pre><code>async def async_start(self):\n    \"\"\" \"\"\"\n    self.start_time = datetime.now()\n    self.root_event.data[\"started_at\"] = self.start_time.isoformat()\n    try:\n        await self._prep()\n\n        self._start_log_handlers()\n        self.trace(f'Ran BBOT {__version__} at {self.start_time}, command: {\" \".join(sys.argv)}')\n        self.trace(f\"Target: {self.preset.target.json}\")\n        self.trace(f\"Preset: {self.preset.to_dict(redact_secrets=True)}\")\n\n        if not self.target:\n            self.warning(f\"No scan targets specified\")\n\n        # start status ticker\n        self.ticker_task = asyncio.create_task(\n            self._status_ticker(self.status_frequency), name=f\"{self.name}._status_ticker()\"\n        )\n\n        self.status = \"STARTING\"\n\n        if not self.modules:\n            self.error(f\"No modules loaded\")\n            self.status = \"FAILED\"\n            return\n        else:\n            self.hugesuccess(f\"Starting scan {self.name}\")\n\n        await self.dispatcher.on_start(self)\n\n        self.status = \"RUNNING\"\n        self._start_modules()\n        self.verbose(f\"{len(self.modules):,} modules started\")\n\n        # distribute seed events\n        self.init_events_task = asyncio.create_task(\n            self.ingress_module.init_events(self.target.seeds.events),\n            name=f\"{self.name}.ingress_module.init_events()\",\n        )\n\n        # main scan loop\n        while 1:\n            # abort if we're aborting\n            if self.aborting:\n                self._drain_queues()\n                break\n\n            # yield events as they come (async for event in scan.async_start())\n            if \"python\" in self.modules:\n                events, finish = await self.modules[\"python\"]._events_waiting(batch_size=-1)\n                for e in events:\n                    yield e\n                if events:\n                    continue\n\n            # break if initialization finished and the scan is no longer active\n            if self._finished_init and self.modules_finished:\n                new_activity = await self.finish()\n                if not new_activity:\n                    self._success = True\n                    scan_finish_event = await self._mark_finished()\n                    yield scan_finish_event\n                    break\n\n            await asyncio.sleep(0.1)\n\n        self._success = True\n\n    except BaseException as e:\n        if self.helpers.in_exception_chain(e, (KeyboardInterrupt, asyncio.CancelledError)):\n            self.stop()\n            self._success = True\n        else:\n            try:\n                raise\n            except ScanError as e:\n                self.error(f\"{e}\")\n\n            except BBOTError as e:\n                self.critical(f\"Error during scan: {e}\")\n\n            except Exception:\n                self.critical(f\"Unexpected error during scan:\\n{traceback.format_exc()}\")\n\n    finally:\n        tasks = self._cancel_tasks()\n        self.debug(f\"Awaiting {len(tasks):,} tasks\")\n        for task in tasks:\n            # self.debug(f\"Awaiting {task}\")\n            with contextlib.suppress(BaseException):\n                await asyncio.wait_for(task, timeout=0.1)\n        self.debug(f\"Awaited {len(tasks):,} tasks\")\n        await self._report()\n        await self._cleanup()\n\n        await self.dispatcher.on_finish(self)\n\n        self._stop_log_handlers()\n</code></pre>"},{"location":"dev/scanner/#bbot.scanner.Scanner.extract_in_scope_hostnames","title":"extract_in_scope_hostnames  <code>async</code>","text":"<pre><code>extract_in_scope_hostnames(s)\n</code></pre> <p>Given a string, uses yara to extract hostnames matching scan targets</p> <p>Examples:</p> <pre><code>&gt;&gt;&gt; await self.scan.extract_in_scope_hostnames(\"http://www.evilcorp.com\")\n... {\"www.evilcorp.com\"}\n</code></pre> Source code in <code>bbot/scanner/scanner.py</code> <pre><code>async def extract_in_scope_hostnames(self, s):\n    \"\"\"\n    Given a string, uses yara to extract hostnames matching scan targets\n\n    Examples:\n        &gt;&gt;&gt; await self.scan.extract_in_scope_hostnames(\"http://www.evilcorp.com\")\n        ... {\"www.evilcorp.com\"}\n    \"\"\"\n    matches = set()\n    dns_yara_rules = await self.dns_yara_rules()\n    if dns_yara_rules is not None:\n        for match in await self.helpers.run_in_executor(dns_yara_rules.match, data=s):\n            for string in match.strings:\n                for instance in string.instances:\n                    matches.add(str(instance))\n    return matches\n</code></pre>"},{"location":"dev/scanner/#bbot.scanner.Scanner.finish","title":"finish  <code>async</code>","text":"<pre><code>finish()\n</code></pre> <p>Finalizes the scan by invoking the <code>finished()</code> method on all active modules if new activity is detected.</p> <p>The method is idempotent and will return False if no new activity has been recorded since the last invocation.</p> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if new activity has been detected and the <code>finished()</code> method is invoked on all modules.   False if no new activity has been detected since the last invocation.</p> </li> </ul> Notes <p>This method alters the scan's status to \"FINISHING\" if new activity is detected.</p> Source code in <code>bbot/scanner/scanner.py</code> <pre><code>async def finish(self):\n    \"\"\"Finalizes the scan by invoking the `finished()` method on all active modules if new activity is detected.\n\n    The method is idempotent and will return False if no new activity has been recorded since the last invocation.\n\n    Returns:\n        bool: True if new activity has been detected and the `finished()` method is invoked on all modules.\n              False if no new activity has been detected since the last invocation.\n\n    Notes:\n        This method alters the scan's status to \"FINISHING\" if new activity is detected.\n    \"\"\"\n    # if new events were generated since last time we were here\n    if self._new_activity:\n        self._new_activity = False\n        self.status = \"FINISHING\"\n        # Trigger .finished() on every module and start over\n        log.info(\"Finishing scan\")\n        for module in self.modules.values():\n            finished_event = self.make_event(f\"FINISHED\", \"FINISHED\", dummy=True, tags={module.name})\n            await module.queue_event(finished_event)\n        self.verbose(\"Completed finish()\")\n        return True\n    self.verbose(\"Completed final finish()\")\n    # Return False if no new events were generated since last time\n    return False\n</code></pre>"},{"location":"dev/scanner/#bbot.scanner.Scanner.load_modules","title":"load_modules  <code>async</code>","text":"<pre><code>load_modules()\n</code></pre> <p>Asynchronously import and instantiate all scan modules, including internal and output modules.</p> <p>This method is automatically invoked by <code>setup_modules()</code>. It performs several key tasks in the following sequence:</p> <ol> <li>Install dependencies for each module via <code>self.helpers.depsinstaller.install()</code>.</li> <li>Load scan modules and updates the <code>modules</code> dictionary.</li> <li>Load internal modules and updates the <code>modules</code> dictionary.</li> <li>Load output modules and updates the <code>modules</code> dictionary.</li> <li>Sorts modules based on their <code>_priority</code> attribute.</li> </ol> <p>If any modules fail to load or their dependencies fail to install, a ScanError will be raised (unless <code>self.force_start</code> is True).</p> <p>Attributes:</p> <ul> <li> <code>succeeded,</code>               (<code>failed (tuple</code>)           \u2013            <p>A tuple containing lists of modules that succeeded or failed during the dependency installation.</p> </li> <li> <code>loaded_modules,</code>               (<code>loaded_internal_modules, loaded_output_modules (dict</code>)           \u2013            <p>Dictionaries of successfully loaded modules.</p> </li> <li> <code>failed,</code>               (<code>failed_internal, failed_output (list</code>)           \u2013            <p>Lists of module names that failed to load.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>ScanError</code>             \u2013            <p>If any module dependencies fail to install or modules fail to load, and if <code>self.force_start</code> is False.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>None</p> </li> </ul> Note <p>After all modules are loaded, they are sorted by <code>_priority</code> and stored in the <code>modules</code> dictionary.</p> Source code in <code>bbot/scanner/scanner.py</code> <pre><code>async def load_modules(self):\n    \"\"\"Asynchronously import and instantiate all scan modules, including internal and output modules.\n\n    This method is automatically invoked by `setup_modules()`. It performs several key tasks in the following sequence:\n\n    1. Install dependencies for each module via `self.helpers.depsinstaller.install()`.\n    2. Load scan modules and updates the `modules` dictionary.\n    3. Load internal modules and updates the `modules` dictionary.\n    4. Load output modules and updates the `modules` dictionary.\n    5. Sorts modules based on their `_priority` attribute.\n\n    If any modules fail to load or their dependencies fail to install, a ScanError will be raised (unless `self.force_start` is True).\n\n    Attributes:\n        succeeded, failed (tuple): A tuple containing lists of modules that succeeded or failed during the dependency installation.\n        loaded_modules, loaded_internal_modules, loaded_output_modules (dict): Dictionaries of successfully loaded modules.\n        failed, failed_internal, failed_output (list): Lists of module names that failed to load.\n\n    Raises:\n        ScanError: If any module dependencies fail to install or modules fail to load, and if `self.force_start` is False.\n\n    Returns:\n        None\n\n    Note:\n        After all modules are loaded, they are sorted by `_priority` and stored in the `modules` dictionary.\n    \"\"\"\n    if not self._modules_loaded:\n        if not self.preset.modules:\n            self.warning(f\"No modules to load\")\n            return\n\n        if not self.preset.scan_modules:\n            self.warning(f\"No scan modules to load\")\n\n        # install module dependencies\n        succeeded, failed = await self.helpers.depsinstaller.install(*self.preset.modules)\n        if failed:\n            msg = f\"Failed to install dependencies for {len(failed):,} modules: {','.join(failed)}\"\n            self._fail_setup(msg)\n        modules = sorted([m for m in self.preset.scan_modules if m in succeeded])\n        output_modules = sorted([m for m in self.preset.output_modules if m in succeeded])\n        internal_modules = sorted([m for m in self.preset.internal_modules if m in succeeded])\n\n        # Load scan modules\n        self.verbose(f\"Loading {len(modules):,} scan modules: {','.join(modules)}\")\n        loaded_modules, failed = self._load_modules(modules)\n        self.modules.update(loaded_modules)\n        if len(failed) &gt; 0:\n            msg = f\"Failed to load {len(failed):,} scan modules: {','.join(failed)}\"\n            self._fail_setup(msg)\n        if loaded_modules:\n            self.info(\n                f\"Loaded {len(loaded_modules):,}/{len(self.preset.scan_modules):,} scan modules ({','.join(loaded_modules)})\"\n            )\n\n        # Load internal modules\n        self.verbose(f\"Loading {len(internal_modules):,} internal modules: {','.join(internal_modules)}\")\n        loaded_internal_modules, failed_internal = self._load_modules(internal_modules)\n        self.modules.update(loaded_internal_modules)\n        if len(failed_internal) &gt; 0:\n            msg = f\"Failed to load {len(loaded_internal_modules):,} internal modules: {','.join(loaded_internal_modules)}\"\n            self._fail_setup(msg)\n        if loaded_internal_modules:\n            self.info(\n                f\"Loaded {len(loaded_internal_modules):,}/{len(self.preset.internal_modules):,} internal modules ({','.join(loaded_internal_modules)})\"\n            )\n\n        # Load output modules\n        self.verbose(f\"Loading {len(output_modules):,} output modules: {','.join(output_modules)}\")\n        loaded_output_modules, failed_output = self._load_modules(output_modules)\n        self.modules.update(loaded_output_modules)\n        if len(failed_output) &gt; 0:\n            msg = f\"Failed to load {len(failed_output):,} output modules: {','.join(failed_output)}\"\n            self._fail_setup(msg)\n        if loaded_output_modules:\n            self.info(\n                f\"Loaded {len(loaded_output_modules):,}/{len(self.preset.output_modules):,} output modules, ({','.join(loaded_output_modules)})\"\n            )\n\n        # builtin intercept modules\n        self.ingress_module = ScanIngress(self)\n        self.egress_module = ScanEgress(self)\n        self.modules[self.ingress_module.name] = self.ingress_module\n        self.modules[self.egress_module.name] = self.egress_module\n\n        # sort modules by priority\n        self.modules = OrderedDict(sorted(self.modules.items(), key=lambda x: getattr(x[-1], \"priority\", 3)))\n\n        self._modules_loaded = True\n</code></pre>"},{"location":"dev/scanner/#bbot.scanner.Scanner.setup_modules","title":"setup_modules  <code>async</code>","text":"<pre><code>setup_modules(remove_failed=True)\n</code></pre> <p>Asynchronously initializes all loaded modules by invoking their <code>setup()</code> methods.</p> <p>Parameters:</p> <ul> <li> <code>remove_failed</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Flag indicating whether to remove modules that fail setup.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>tuple</code>          \u2013            <p>succeeded - List of modules that successfully set up. hard_failed - List of modules that encountered a hard failure during setup. soft_failed - List of modules that encountered a soft failure during setup.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>ScanError</code>             \u2013            <p>If no output modules could be loaded.</p> </li> </ul> Notes <p>Hard-failed modules are set to an error state and removed if <code>remove_failed</code> is True. Soft-failed modules are not set to an error state but are also removed if <code>remove_failed</code> is True.</p> Source code in <code>bbot/scanner/scanner.py</code> <pre><code>async def setup_modules(self, remove_failed=True):\n    \"\"\"Asynchronously initializes all loaded modules by invoking their `setup()` methods.\n\n    Args:\n        remove_failed (bool): Flag indicating whether to remove modules that fail setup.\n\n    Returns:\n        tuple:\n            succeeded - List of modules that successfully set up.\n            hard_failed - List of modules that encountered a hard failure during setup.\n            soft_failed - List of modules that encountered a soft failure during setup.\n\n    Raises:\n        ScanError: If no output modules could be loaded.\n\n    Notes:\n        Hard-failed modules are set to an error state and removed if `remove_failed` is True.\n        Soft-failed modules are not set to an error state but are also removed if `remove_failed` is True.\n    \"\"\"\n    await self.load_modules()\n    self.verbose(f\"Setting up modules\")\n    succeeded = []\n    hard_failed = []\n    soft_failed = []\n\n    async for task in self.helpers.as_completed([m._setup() for m in self.modules.values()]):\n        module, status, msg = await task\n        if status == True:\n            self.debug(f\"Setup succeeded for {module.name} ({msg})\")\n            succeeded.append(module.name)\n        elif status == False:\n            self.warning(f\"Setup hard-failed for {module.name}: {msg}\")\n            self.modules[module.name].set_error_state()\n            hard_failed.append(module.name)\n        else:\n            self.info(f\"Setup soft-failed for {module.name}: {msg}\")\n            soft_failed.append(module.name)\n        if (not status) and (module._intercept or remove_failed):\n            # if a intercept module fails setup, we always remove it\n            self.modules.pop(module.name)\n\n    return succeeded, hard_failed, soft_failed\n</code></pre>"},{"location":"dev/scanner/#bbot.scanner.Scanner.stop","title":"stop","text":"<pre><code>stop()\n</code></pre> <p>Stops the in-progress scan and performs necessary cleanup.</p> <p>This method sets the scan's status to \"ABORTING,\" cancels any pending tasks, and drains event queues. It also kills child processes spawned during the scan.</p> <p>Returns:</p> <ul> <li>           \u2013            <p>None</p> </li> </ul> Source code in <code>bbot/scanner/scanner.py</code> <pre><code>def stop(self):\n    \"\"\"Stops the in-progress scan and performs necessary cleanup.\n\n    This method sets the scan's status to \"ABORTING,\" cancels any pending tasks, and drains event queues. It also kills child processes spawned during the scan.\n\n    Returns:\n        None\n    \"\"\"\n    if not self._stopping:\n        self._stopping = True\n        self.status = \"ABORTING\"\n        self.hugewarning(\"Aborting scan\")\n        self.trace()\n        self._cancel_tasks()\n        self._drain_queues()\n        self.helpers.kill_children()\n        self._drain_queues()\n        self.helpers.kill_children()\n        self.debug(\"Finished aborting scan\")\n</code></pre>"},{"location":"dev/target/","title":"Target","text":""},{"location":"dev/target/#bbot.scanner.target.BaseTarget","title":"BaseTarget","text":"<p>               Bases: <code>RadixTarget</code></p> <p>A collection of BBOT events that represent a scan target.</p> <p>Based on radixtarget, which allows extremely fast IP and DNS lookups.</p> This class is inherited by all three components of the BBOT target <ul> <li>Whitelist</li> <li>Blacklist</li> <li>Seeds</li> </ul> Source code in <code>bbot/scanner/target.py</code> <pre><code>class BaseTarget(RadixTarget):\n    \"\"\"\n    A collection of BBOT events that represent a scan target.\n\n    Based on radixtarget, which allows extremely fast IP and DNS lookups.\n\n    This class is inherited by all three components of the BBOT target:\n        - Whitelist\n        - Blacklist\n        - Seeds\n    \"\"\"\n\n    special_target_types = {\n        # regex-callback pairs for handling special target types\n        # these aren't defined explicitly; instead they are decorated with @special_target_type\n        # the function must return a list of events\n    }\n    tags = []\n\n    def __init__(self, *targets, scan=None, **kwargs):\n        self.scan = scan\n        self.events = set()\n        self.inputs = set()\n        # Register decorated methods\n        for method in dir(self):\n            if callable(getattr(self, method, None)):\n                func = getattr(self, method)\n                if hasattr(func, \"_regex\"):\n                    self.special_target_types[func._regex] = func\n\n        super().__init__(*targets, **kwargs)\n\n    def get(self, event, **kwargs):\n        \"\"\"\n        Override default .get() to accept events\n        \"\"\"\n        if is_event(event):\n            host = event.host\n        # save resources by checking if the event is an IP or DNS name\n        elif is_ip(event, include_network=True) or is_dns_name(event):\n            host = event\n        elif isinstance(event, str):\n            event = self.make_event(event)\n            host = event.host\n        else:\n            raise ValueError(f\"Invalid host/event: {event} ({type(event)})\")\n        if not host:\n            if kwargs.get(\"raise_error\", False):\n                raise KeyError(f\"Host not found: '{event}'\")\n            return None\n        results = super().get(host, **kwargs)\n        return results\n\n    def make_event(self, *args, **kwargs):\n        # if it's already an event, return it\n        if args and is_event(args[0]):\n            return args[0]\n        # otherwise make a new one\n        if not \"tags\" in kwargs:\n            kwargs[\"tags\"] = set()\n        kwargs[\"tags\"].update(self.tags)\n        return make_event(*args, dummy=True, scan=self.scan, **kwargs)\n\n    def add(self, targets):\n        if not isinstance(targets, (list, set, tuple)):\n            targets = [targets]\n        events = set()\n        for target in targets:\n            _events = []\n            special_target_type, _events = self.check_special_target_types(str(target))\n            if special_target_type:\n                self.inputs.add(str(target))\n            else:\n                event = self.make_event(target)\n                if event:\n                    _events = [event]\n            for event in _events:\n                self.inputs.add(event.data)\n                events.add(event)\n\n        # sort by host size to ensure consistency\n        events = sorted(events, key=lambda e: (0 if not e.host else host_size_key(e.host)))\n        for event in events:\n            self.events.add(event)\n            self._add(event.host, data=event)\n\n    def check_special_target_types(self, target):\n        for regex, callback in self.special_target_types.items():\n            match = regex.match(target)\n            if match:\n                return True, callback(match)\n        return False, []\n\n    def __iter__(self):\n        yield from self.events\n</code></pre>"},{"location":"dev/target/#bbot.scanner.target.BaseTarget.get","title":"get","text":"<pre><code>get(event, **kwargs)\n</code></pre> <p>Override default .get() to accept events</p> Source code in <code>bbot/scanner/target.py</code> <pre><code>def get(self, event, **kwargs):\n    \"\"\"\n    Override default .get() to accept events\n    \"\"\"\n    if is_event(event):\n        host = event.host\n    # save resources by checking if the event is an IP or DNS name\n    elif is_ip(event, include_network=True) or is_dns_name(event):\n        host = event\n    elif isinstance(event, str):\n        event = self.make_event(event)\n        host = event.host\n    else:\n        raise ValueError(f\"Invalid host/event: {event} ({type(event)})\")\n    if not host:\n        if kwargs.get(\"raise_error\", False):\n            raise KeyError(f\"Host not found: '{event}'\")\n        return None\n    results = super().get(host, **kwargs)\n    return results\n</code></pre>"},{"location":"dev/target/#bbot.scanner.target.ScanSeeds","title":"ScanSeeds","text":"<p>               Bases: <code>BaseTarget</code></p> <p>Initial events used to seed a scan.</p> <p>These are the targets specified by the user, e.g. via <code>-t</code> on the CLI.</p> Source code in <code>bbot/scanner/target.py</code> <pre><code>class ScanSeeds(BaseTarget):\n    \"\"\"\n    Initial events used to seed a scan.\n\n    These are the targets specified by the user, e.g. via `-t` on the CLI.\n    \"\"\"\n\n    tags = [\"target\"]\n\n    @special_target_type(r\"^(?:ORG|ORG_STUB):(.*)\")\n    def handle_org_stub(self, match):\n        org_stub_event = self.make_event(match.group(1), event_type=\"ORG_STUB\")\n        if org_stub_event:\n            return [org_stub_event]\n        return []\n\n    @special_target_type(r\"^(?:USER|USERNAME):(.*)\")\n    def handle_username(self, match):\n        username_event = self.make_event(match.group(1), event_type=\"USERNAME\")\n        if username_event:\n            return [username_event]\n        return []\n\n    def get(self, event, single=True, **kwargs):\n        results = super().get(event, **kwargs)\n        if results and single:\n            return next(iter(results))\n        return results\n\n    def _add(self, host, data):\n        \"\"\"\n        Overrides the base method to enable having multiple events for the same host.\n\n        The \"data\" attribute of the node is now a set of events.\n        \"\"\"\n        if host:\n            try:\n                event_set = self.get(host, raise_error=True, single=False)\n                event_set.add(data)\n            except KeyError:\n                event_set = {data}\n            super()._add(host, data=event_set)\n\n    def _hash_value(self):\n        # seeds get hashed by event data\n        return sorted(str(e.data).encode() for e in self.events)\n</code></pre>"},{"location":"dev/target/#bbot.scanner.target.ScanWhitelist","title":"ScanWhitelist","text":"<p>               Bases: <code>ACLTarget</code></p> <p>A collection of BBOT events that represent a scan's whitelist.</p> Source code in <code>bbot/scanner/target.py</code> <pre><code>class ScanWhitelist(ACLTarget):\n    \"\"\"\n    A collection of BBOT events that represent a scan's whitelist.\n    \"\"\"\n\n    pass\n</code></pre>"},{"location":"dev/target/#bbot.scanner.target.ScanBlacklist","title":"ScanBlacklist","text":"<p>               Bases: <code>ACLTarget</code></p> <p>A collection of BBOT events that represent a scan's blacklist.</p> Source code in <code>bbot/scanner/target.py</code> <pre><code>class ScanBlacklist(ACLTarget):\n    \"\"\"\n    A collection of BBOT events that represent a scan's blacklist.\n    \"\"\"\n\n    def __init__(self, *args, **kwargs):\n        self.blacklist_regexes = set()\n        super().__init__(*args, **kwargs)\n\n    @special_target_type(r\"^(?:RE|REGEX):(.*)\")\n    def handle_regex(self, match):\n        pattern = match.group(1)\n        log.info(f\"Blacklisting by custom regex: {pattern}\")\n        blacklist_regex = re.compile(pattern, re.IGNORECASE)\n        self.blacklist_regexes.add(blacklist_regex)\n        return []\n\n    def get(self, event, **kwargs):\n        \"\"\"\n        Here, for the blacklist, we modify this method to also consider any special regex patterns specified by the user\n        \"\"\"\n        event = self.make_event(event)\n        # first, check event's host against blacklist\n        try:\n            event_result = super().get(event, raise_error=True)\n        except KeyError:\n            event_result = None\n        if event_result is not None:\n            return event_result\n        # next, check event's host against regexes\n        host_or_url = event.host_filterable\n        if host_or_url:\n            for regex in self.blacklist_regexes:\n                if regex.search(str(host_or_url)):\n                    return event\n        if kwargs.get(\"raise_error\", False):\n            raise KeyError(f\"Host not found: '{event.data}'\")\n        return None\n\n    def _hash_value(self):\n        # regexes are included in blacklist hash\n        regex_patterns = [str(r.pattern).encode() for r in self.blacklist_regexes]\n        hosts = [str(h).encode() for h in self.sorted_hosts]\n        return hosts + regex_patterns\n</code></pre>"},{"location":"dev/target/#bbot.scanner.target.ScanBlacklist.get","title":"get","text":"<pre><code>get(event, **kwargs)\n</code></pre> <p>Here, for the blacklist, we modify this method to also consider any special regex patterns specified by the user</p> Source code in <code>bbot/scanner/target.py</code> <pre><code>def get(self, event, **kwargs):\n    \"\"\"\n    Here, for the blacklist, we modify this method to also consider any special regex patterns specified by the user\n    \"\"\"\n    event = self.make_event(event)\n    # first, check event's host against blacklist\n    try:\n        event_result = super().get(event, raise_error=True)\n    except KeyError:\n        event_result = None\n    if event_result is not None:\n        return event_result\n    # next, check event's host against regexes\n    host_or_url = event.host_filterable\n    if host_or_url:\n        for regex in self.blacklist_regexes:\n            if regex.search(str(host_or_url)):\n                return event\n    if kwargs.get(\"raise_error\", False):\n        raise KeyError(f\"Host not found: '{event.data}'\")\n    return None\n</code></pre>"},{"location":"dev/target/#bbot.scanner.target.BBOTTarget","title":"BBOTTarget","text":"A convenient abstraction of a scan target that contains three subtargets <ul> <li>seeds</li> <li>whitelist</li> <li>blacklist</li> </ul> <p>Provides high-level functions like in_scope(), which includes both whitelist and blacklist checks.</p> Source code in <code>bbot/scanner/target.py</code> <pre><code>class BBOTTarget:\n    \"\"\"\n    A convenient abstraction of a scan target that contains three subtargets:\n        - seeds\n        - whitelist\n        - blacklist\n\n    Provides high-level functions like in_scope(), which includes both whitelist and blacklist checks.\n    \"\"\"\n\n    def __init__(self, *seeds, whitelist=None, blacklist=None, strict_scope=False, scan=None):\n        self.scan = scan\n        self.strict_scope = strict_scope\n        self.seeds = ScanSeeds(*seeds, strict_dns_scope=strict_scope, scan=scan)\n        if whitelist is None:\n            whitelist = self.seeds.hosts\n        self.whitelist = ScanWhitelist(*whitelist, strict_dns_scope=strict_scope, scan=scan)\n        if blacklist is None:\n            blacklist = []\n        self.blacklist = ScanBlacklist(*blacklist, scan=scan)\n\n    @property\n    def json(self):\n        return {\n            \"seeds\": sorted([e.data for e in self.seeds]),\n            \"whitelist\": sorted([e.data for e in self.whitelist]),\n            \"blacklist\": sorted([e.data for e in self.blacklist]),\n            \"strict_scope\": self.strict_scope,\n            \"hash\": self.hash.hex(),\n            \"seed_hash\": self.seeds.hash.hex(),\n            \"whitelist_hash\": self.whitelist.hash.hex(),\n            \"blacklist_hash\": self.blacklist.hash.hex(),\n            \"scope_hash\": self.scope_hash.hex(),\n        }\n\n    @property\n    def hash(self):\n        sha1_hash = sha1()\n        for target_hash in [t.hash for t in (self.seeds, self.whitelist, self.blacklist)]:\n            sha1_hash.update(target_hash)\n        return sha1_hash.digest()\n\n    @property\n    def scope_hash(self):\n        sha1_hash = sha1()\n        # Consider only the hash values of the whitelist and blacklist\n        for target_hash in [t.hash for t in (self.whitelist, self.blacklist)]:\n            sha1_hash.update(target_hash)\n        return sha1_hash.digest()\n\n    def in_scope(self, host):\n        \"\"\"\n        Check whether a hostname, url, IP, etc. is in scope.\n        Accepts either events or string data.\n\n        Checks whitelist and blacklist.\n        If `host` is an event and its scope distance is zero, it will automatically be considered in-scope.\n\n        Examples:\n            Check if a URL is in scope:\n            &gt;&gt;&gt; preset.in_scope(\"http://www.evilcorp.com\")\n            True\n        \"\"\"\n        try:\n            e = make_event(host, dummy=True)\n        except ValidationError:\n            return False\n        in_scope = e.scope_distance == 0 or self.whitelisted(e)\n        return in_scope and not self.blacklisted(e)\n\n    def blacklisted(self, host):\n        \"\"\"\n        Check whether a hostname, url, IP, etc. is blacklisted.\n\n        Note that `host` can be a hostname, IP address, CIDR, email address, or any BBOT `Event` with the `host` attribute.\n\n        Args:\n            host (str or IPAddress or Event): The host to check against the blacklist\n\n        Examples:\n            Check if a URL's host is blacklisted:\n            &gt;&gt;&gt; preset.blacklisted(\"http://www.evilcorp.com\")\n            True\n        \"\"\"\n        return host in self.blacklist\n\n    def whitelisted(self, host):\n        \"\"\"\n        Check whether a hostname, url, IP, etc. is whitelisted.\n\n        Note that `host` can be a hostname, IP address, CIDR, email address, or any BBOT `Event` with the `host` attribute.\n\n        Args:\n            host (str or IPAddress or Event): The host to check against the whitelist\n\n        Examples:\n            Check if a URL's host is whitelisted:\n            &gt;&gt;&gt; preset.whitelisted(\"http://www.evilcorp.com\")\n            True\n        \"\"\"\n        return host in self.whitelist\n\n    @property\n    def minimal(self):\n        \"\"\"\n        A slimmer, serializable version of the target designed for simple scope checks\n\n        This version doesn't have the events, only their hosts. This allows it to be passed across process boundaries.\n        \"\"\"\n        return self.__class__(\n            whitelist=self.whitelist.inputs,\n            blacklist=self.blacklist.inputs,\n            strict_scope=self.strict_scope,\n        )\n\n    def __eq__(self, other):\n        return self.hash == other.hash\n</code></pre>"},{"location":"dev/target/#bbot.scanner.target.BBOTTarget.minimal","title":"minimal  <code>property</code>","text":"<pre><code>minimal\n</code></pre> <p>A slimmer, serializable version of the target designed for simple scope checks</p> <p>This version doesn't have the events, only their hosts. This allows it to be passed across process boundaries.</p>"},{"location":"dev/target/#bbot.scanner.target.BBOTTarget.blacklisted","title":"blacklisted","text":"<pre><code>blacklisted(host)\n</code></pre> <p>Check whether a hostname, url, IP, etc. is blacklisted.</p> <p>Note that <code>host</code> can be a hostname, IP address, CIDR, email address, or any BBOT <code>Event</code> with the <code>host</code> attribute.</p> <p>Parameters:</p> <ul> <li> <code>host</code>               (<code>str or IPAddress or Event</code>)           \u2013            <p>The host to check against the blacklist</p> </li> </ul> <p>Examples:</p> <p>Check if a URL's host is blacklisted:</p> <pre><code>&gt;&gt;&gt; preset.blacklisted(\"http://www.evilcorp.com\")\nTrue\n</code></pre> Source code in <code>bbot/scanner/target.py</code> <pre><code>def blacklisted(self, host):\n    \"\"\"\n    Check whether a hostname, url, IP, etc. is blacklisted.\n\n    Note that `host` can be a hostname, IP address, CIDR, email address, or any BBOT `Event` with the `host` attribute.\n\n    Args:\n        host (str or IPAddress or Event): The host to check against the blacklist\n\n    Examples:\n        Check if a URL's host is blacklisted:\n        &gt;&gt;&gt; preset.blacklisted(\"http://www.evilcorp.com\")\n        True\n    \"\"\"\n    return host in self.blacklist\n</code></pre>"},{"location":"dev/target/#bbot.scanner.target.BBOTTarget.in_scope","title":"in_scope","text":"<pre><code>in_scope(host)\n</code></pre> <p>Check whether a hostname, url, IP, etc. is in scope. Accepts either events or string data.</p> <p>Checks whitelist and blacklist. If <code>host</code> is an event and its scope distance is zero, it will automatically be considered in-scope.</p> <p>Examples:</p> <p>Check if a URL is in scope:</p> <pre><code>&gt;&gt;&gt; preset.in_scope(\"http://www.evilcorp.com\")\nTrue\n</code></pre> Source code in <code>bbot/scanner/target.py</code> <pre><code>def in_scope(self, host):\n    \"\"\"\n    Check whether a hostname, url, IP, etc. is in scope.\n    Accepts either events or string data.\n\n    Checks whitelist and blacklist.\n    If `host` is an event and its scope distance is zero, it will automatically be considered in-scope.\n\n    Examples:\n        Check if a URL is in scope:\n        &gt;&gt;&gt; preset.in_scope(\"http://www.evilcorp.com\")\n        True\n    \"\"\"\n    try:\n        e = make_event(host, dummy=True)\n    except ValidationError:\n        return False\n    in_scope = e.scope_distance == 0 or self.whitelisted(e)\n    return in_scope and not self.blacklisted(e)\n</code></pre>"},{"location":"dev/target/#bbot.scanner.target.BBOTTarget.whitelisted","title":"whitelisted","text":"<pre><code>whitelisted(host)\n</code></pre> <p>Check whether a hostname, url, IP, etc. is whitelisted.</p> <p>Note that <code>host</code> can be a hostname, IP address, CIDR, email address, or any BBOT <code>Event</code> with the <code>host</code> attribute.</p> <p>Parameters:</p> <ul> <li> <code>host</code>               (<code>str or IPAddress or Event</code>)           \u2013            <p>The host to check against the whitelist</p> </li> </ul> <p>Examples:</p> <p>Check if a URL's host is whitelisted:</p> <pre><code>&gt;&gt;&gt; preset.whitelisted(\"http://www.evilcorp.com\")\nTrue\n</code></pre> Source code in <code>bbot/scanner/target.py</code> <pre><code>def whitelisted(self, host):\n    \"\"\"\n    Check whether a hostname, url, IP, etc. is whitelisted.\n\n    Note that `host` can be a hostname, IP address, CIDR, email address, or any BBOT `Event` with the `host` attribute.\n\n    Args:\n        host (str or IPAddress or Event): The host to check against the whitelist\n\n    Examples:\n        Check if a URL's host is whitelisted:\n        &gt;&gt;&gt; preset.whitelisted(\"http://www.evilcorp.com\")\n        True\n    \"\"\"\n    return host in self.whitelist\n</code></pre>"},{"location":"dev/tests/","title":"Unit Tests","text":"<p>BBOT takes tests seriously. Every module must have a custom-written test that actually tests its functionality. Don't worry if you want to contribute but you aren't used to writing tests. If you open a draft PR, we will help write them :)</p> <p>We use black and flake8 for linting, and pytest for tests.</p>"},{"location":"dev/tests/#running-tests-locally","title":"Running tests locally","text":"<p>We have Github actions that automatically run tests whenever you open a Pull Request. However, you can also run the tests locally with <code>pytest</code>:</p> <pre><code># format code with black\npoetry run black .\n\n# lint with flake8\npoetry run flake8\n\n# run all tests with pytest (takes rougly 30 minutes)\npoetry run pytest\n</code></pre>"},{"location":"dev/tests/#running-specific-tests","title":"Running specific tests","text":"<p>If you only want to run a single test, you can select it with <code>-k</code>:</p> <pre><code># run only the sslcert test\npoetry run pytest -k test_module_sslcert\n</code></pre> <p>You can also filter like this: <pre><code># run all the module tests except for sslcert\npoetry run pytest -k \"test_module_ and not test_module_sslcert\"\n</code></pre></p> <p>If you want to see the output of your module, you can enable <code>--log-cli-level</code>: <pre><code>poetry run pytest --log-cli-level=DEBUG\n</code></pre></p>"},{"location":"dev/tests/#example-writing-a-module-test","title":"Example: Writing a Module Test","text":"<p>To write a test for your module, create a new python file in <code>bbot/test/test_step_2/module_tests</code>. Your filename must be <code>test_module_&lt;module_name&gt;</code>:</p> test_module_mymodule.py<pre><code>from .base import ModuleTestBase\n\n\nclass TestMyModule(ModuleTestBase):\n    targets = [\"blacklanternsecurity.com\"]\n    config_overrides = {\"modules\": {\"mymodule\": {\"api_key\": \"deadbeef\"}}}\n\n    async def setup_after_prep(self, module_test):\n        # mock HTTP response\n        module_test.httpx_mock.add_response(\n            url=\"https://api.com/sudomains?apikey=deadbeef&amp;domain=blacklanternsecurity.com\",\n            json={\n                \"subdomains\": [\n                    \"www.blacklanternsecurity.com\",\n                    \"dev.blacklanternsecurity.com\"\n                ],\n            },\n        )\n        # mock DNS\n        await module_test.mock_dns(\n            {\n                \"blacklanternsecurity.com\": {\"A\": [\"1.2.3.4\"]},\n                \"www.blacklanternsecurity.com\": {\"A\": [\"1.2.3.4\"]},\n                \"dev.blacklanternsecurity.com\": {\"A\": [\"1.2.3.4\"]},\n            }\n        )\n\n    def check(self, module_test, events):\n        # here is where we check to make sure it worked\n        dns_names = [e.data for e in events if e.type == \"DNS_NAME\"]\n        # temporary log messages for debugging\n        for e in dns_names:\n            self.log.critical(e)\n        assert \"www.blacklanternsecurity.com\" in dns_names, \"failed to find subdomain #1\"\n        assert \"dev.blacklanternsecurity.com\" in dns_names, \"failed to find subdomain #2\"\n</code></pre>"},{"location":"dev/tests/#debugging-a-test","title":"Debugging a test","text":"<p>Similar to debugging from within a module, you can debug from within a test using <code>self.log.critical()</code>, etc:</p> <pre><code>    def check(self, module_test, events):\n        for e in events:\n            # bright red\n            self.log.critical(e.type)\n            # bright green\n            self.log.hugesuccess(e.data)\n            # bright orange\n            self.log.hugewarning(e.tags)\n            # bright blue\n            self.log.hugeinfo(e.parent)\n</code></pre>"},{"location":"dev/tests/#more-advanced-tests","title":"More advanced tests","text":"<p>If you have questions about tests or need to write a more advanced test, come talk to us on GitHub or Discord.</p> <p>It's also a good idea to look through our existing tests. BBOT has over a hundred of them, so you might find one that's similar to what you're trying to do.</p>"},{"location":"dev/helpers/","title":"BBOT Helpers","text":"<p>In this section are various helper functions that are designed to make your life easier when devving on BBOT. Whether you're extending BBOT by writing a module or working on its core engine, these functions are designed to act as useful machine parts to perform essential tasks, such as making a web request or executing a DNS query.</p> <p>The vast majority of these helpers can be accessed directly from the <code>.helpers</code> attribute of a scan or module, like so:</p> <pre><code>class MyModule(BaseModule):\n\n    ...\n\n    async def handle_event(self, event):\n        # Web Request\n        response = await self.helpers.request(\"https://www.evilcorp.com\")\n\n        # DNS query\n        for ip in await self.helpers.resolve(\"www.evilcorp.com\"):\n            self.hugesuccess(str(ip))\n\n        # Execute shell command\n        completed_process = await self.run_process(\"ls\", \"-l\")\n        self.hugesuccess(completed_process.stdout)\n\n        # Split a DNS name into subdomain / domain\n        self.helpers.split_domain(\"www.internal.evilcorp.co.uk\")\n        # (\"www.internal\", \"evilcorp.co.uk\")\n</code></pre> <p>Next Up: Command Helpers --&gt;</p>"},{"location":"dev/helpers/command/","title":"Command Helpers","text":"<p>These are helpers related to executing shell commands. They are used throughout BBOT and its modules for executing various binaries such as <code>masscan</code>, <code>nuclei</code>, etc.</p> <p>These helpers can be invoked directly from <code>self.helpers</code>, but inside a module they should always use <code>self.run_process()</code> or <code>self.run_process_live()</code>. These are light wrappers which ensure the running process is tracked by the module so that it can be easily terminated should the user need to kill the module:</p> <pre><code># simple subprocess\nls_result = await self.run_process(\"ls\", \"-l\")\nfor line ls_result.stdout.splitlines():\n    # ...\n\n# iterate through each line in real time\nasync for line in self.run_process_live([\"grep\", \"-R\"]):\n    # ...\n</code></pre>"},{"location":"dev/helpers/command/#bbot.core.helpers.command.run","title":"run  <code>async</code>","text":"<pre><code>run(self, *command, check=False, text=True, idle_timeout=None, **kwargs)\n</code></pre> <p>Runs a command asynchronously and gets its output as a string.</p> <pre><code>This method is a simple helper for executing a command and capturing its output.\nIf an error occurs during execution, it can optionally raise an error or just log the stderr.\n\nArgs:\n    *command (str): The command to run as separate arguments.\n    check (bool, optional): If set to True, raises an error if the subprocess exits with a non-zero status.\n                            Defaults to False.\n    text (bool, optional): If set to True, decodes the subprocess output to string. Defaults to True.\n    idle_timeout (int, optional): Sets a limit on the number of seconds the process can run before throwing a TimeoutError\n    **kwargs (dict): Additional keyword arguments for the subprocess.\n\nReturns:\n    CompletedProcess: A completed process object with attributes for the command, return code, stdout, and stderr.\n\nRaises:\n    CalledProcessError: If the subprocess exits with a non-zero status and `check=True`.\n\nExamples:\n    &gt;&gt;&gt; process = await run([\"ls\", \"/tmp\"])\n    &gt;&gt;&gt; process.stdout\n    \"file1.txt\n</code></pre> <p>file2.txt\"</p> Source code in <code>bbot/core/helpers/command.py</code> <pre><code>async def run(self, *command, check=False, text=True, idle_timeout=None, **kwargs):\n    \"\"\"Runs a command asynchronously and gets its output as a string.\n\n    This method is a simple helper for executing a command and capturing its output.\n    If an error occurs during execution, it can optionally raise an error or just log the stderr.\n\n    Args:\n        *command (str): The command to run as separate arguments.\n        check (bool, optional): If set to True, raises an error if the subprocess exits with a non-zero status.\n                                Defaults to False.\n        text (bool, optional): If set to True, decodes the subprocess output to string. Defaults to True.\n        idle_timeout (int, optional): Sets a limit on the number of seconds the process can run before throwing a TimeoutError\n        **kwargs (dict): Additional keyword arguments for the subprocess.\n\n    Returns:\n        CompletedProcess: A completed process object with attributes for the command, return code, stdout, and stderr.\n\n    Raises:\n        CalledProcessError: If the subprocess exits with a non-zero status and `check=True`.\n\n    Examples:\n        &gt;&gt;&gt; process = await run([\"ls\", \"/tmp\"])\n        &gt;&gt;&gt; process.stdout\n        \"file1.txt\\nfile2.txt\"\n    \"\"\"\n    # proc_tracker optionally keeps track of which processes are running under which modules\n    # this allows for graceful SIGINTing of a module's processes in the case when it's killed\n    proc_tracker = kwargs.pop(\"_proc_tracker\", set())\n    log_stderr = kwargs.pop(\"_log_stderr\", True)\n    proc, _input, command = await self._spawn_proc(*command, **kwargs)\n    if proc is not None:\n        proc_tracker.add(proc)\n        try:\n            if _input is not None:\n                if isinstance(_input, (list, tuple)):\n                    _input = b\"\\n\".join(smart_encode(i) for i in _input) + b\"\\n\"\n                else:\n                    _input = smart_encode(_input)\n\n            try:\n                if idle_timeout is not None:\n                    stdout, stderr = await asyncio.wait_for(proc.communicate(_input), timeout=idle_timeout)\n                else:\n                    stdout, stderr = await proc.communicate(_input)\n            except asyncio.exceptions.TimeoutError:\n                proc.send_signal(SIGINT)\n                raise\n\n            # surface stderr\n            if text:\n                if stderr is not None:\n                    stderr = smart_decode(stderr)\n                if stdout is not None:\n                    stdout = smart_decode(stdout)\n            if proc.returncode:\n                if check:\n                    raise CalledProcessError(proc.returncode, command, output=stdout, stderr=stderr)\n                if stderr and log_stderr:\n                    command_str = \" \".join(command)\n                    log.warning(f\"Stderr for run({command_str}):\\n\\t{stderr}\")\n\n            return CompletedProcess(command, proc.returncode, stdout, stderr)\n        finally:\n            proc_tracker.remove(proc)\n</code></pre>"},{"location":"dev/helpers/command/#bbot.core.helpers.command.run_live","title":"run_live  <code>async</code>","text":"<pre><code>run_live(self, *command, check=False, text=True, idle_timeout=None, **kwargs)\n</code></pre> <p>Runs a command asynchronously and iterates through its output line by line in realtime.</p> <p>This method is useful for executing a command and capturing its output on-the-fly, as it is generated. If an error occurs during execution, it can optionally raise an error or just log the stderr.</p> <p>Parameters:</p> <ul> <li> <code>*command</code>               (<code>str</code>, default:                   <code>()</code> )           \u2013            <p>The command to run as separate arguments.</p> </li> <li> <code>check</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>If set to True, raises an error if the subprocess exits with a non-zero status.                     Defaults to False.</p> </li> <li> <code>text</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>If set to True, decodes the subprocess output to string. Defaults to True.</p> </li> <li> <code>idle_timeout</code>               (<code>int</code>, default:                   <code>None</code> )           \u2013            <p>Sets a limit on the number of seconds the process can remain idle (no lines sent to stdout) before throwing a TimeoutError</p> </li> <li> <code>**kwargs</code>               (<code>dict</code>, default:                   <code>{}</code> )           \u2013            <p>Additional keyword arguments for the subprocess.</p> </li> </ul> <p>Yields:</p> <ul> <li>           \u2013            <p>str or bytes: The output lines of the command, either as a decoded string (if <code>text=True</code>)           or as bytes (if <code>text=False</code>).</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>CalledProcessError</code>             \u2013            <p>If the subprocess exits with a non-zero status and <code>check=True</code>.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; async for line in run_live([\"tail\", \"-f\", \"/var/log/auth.log\"]):\n...     log.info(line)\n</code></pre> Source code in <code>bbot/core/helpers/command.py</code> <pre><code>async def run_live(self, *command, check=False, text=True, idle_timeout=None, **kwargs):\n    \"\"\"Runs a command asynchronously and iterates through its output line by line in realtime.\n\n    This method is useful for executing a command and capturing its output on-the-fly, as it is generated.\n    If an error occurs during execution, it can optionally raise an error or just log the stderr.\n\n    Args:\n        *command (str): The command to run as separate arguments.\n        check (bool, optional): If set to True, raises an error if the subprocess exits with a non-zero status.\n                                Defaults to False.\n        text (bool, optional): If set to True, decodes the subprocess output to string. Defaults to True.\n        idle_timeout (int, optional): Sets a limit on the number of seconds the process can remain idle (no lines sent to stdout) before throwing a TimeoutError\n        **kwargs (dict): Additional keyword arguments for the subprocess.\n\n    Yields:\n        str or bytes: The output lines of the command, either as a decoded string (if `text=True`)\n                      or as bytes (if `text=False`).\n\n    Raises:\n        CalledProcessError: If the subprocess exits with a non-zero status and `check=True`.\n\n    Examples:\n        &gt;&gt;&gt; async for line in run_live([\"tail\", \"-f\", \"/var/log/auth.log\"]):\n        ...     log.info(line)\n    \"\"\"\n    # proc_tracker optionally keeps track of which processes are running under which modules\n    # this allows for graceful SIGINTing of a module's processes in the case when it's killed\n    proc_tracker = kwargs.pop(\"_proc_tracker\", set())\n    log_stderr = kwargs.pop(\"_log_stderr\", True)\n    proc, _input, command = await self._spawn_proc(*command, **kwargs)\n    if proc is not None:\n        proc_tracker.add(proc)\n        try:\n            input_task = None\n            if _input is not None:\n                input_task = asyncio.create_task(_write_stdin(proc, _input))\n\n            while 1:\n                try:\n                    if idle_timeout is not None:\n                        line = await asyncio.wait_for(proc.stdout.readline(), timeout=idle_timeout)\n                    else:\n                        line = await proc.stdout.readline()\n                except asyncio.exceptions.TimeoutError:\n                    proc.send_signal(SIGINT)\n                    raise\n                except ValueError as e:\n                    command_str = \" \".join([str(c) for c in command])\n                    log.warning(f\"Error executing command {command_str}: {e}\")\n                    log.trace(traceback.format_exc())\n                    continue\n                if not line:\n                    break\n                if text:\n                    line = smart_decode(line).rstrip(\"\\r\\n\")\n                else:\n                    line = line.rstrip(b\"\\r\\n\")\n                yield line\n\n            if input_task is not None:\n                try:\n                    await input_task\n                except ConnectionError:\n                    log.trace(f\"ConnectionError in command: {command}, kwargs={kwargs}\")\n                    log.trace(traceback.format_exc())\n            await proc.wait()\n\n            if proc.returncode:\n                stdout, stderr = await proc.communicate()\n                if text:\n                    if stderr is not None:\n                        stderr = smart_decode(stderr)\n                    if stdout is not None:\n                        stdout = smart_decode(stdout)\n                if check:\n                    raise CalledProcessError(proc.returncode, command, output=stdout, stderr=stderr)\n                # surface stderr\n                if stderr and log_stderr:\n                    command_str = \" \".join(command)\n                    log.warning(f\"Stderr for run_live({command_str}):\\n\\t{stderr}\")\n        finally:\n            proc_tracker.remove(proc)\n</code></pre>"},{"location":"dev/helpers/dns/","title":"DNS","text":"<p>These are helpers related to DNS resolution. They are used throughout BBOT and its modules for performing DNS lookups and detecting DNS wildcards, etc.</p> <p>Note that these helpers can be invoked directly from <code>self.helpers</code>, e.g.:</p> <pre><code>self.helpers.resolve(\"evilcorp.com\")\n</code></pre>"},{"location":"dev/helpers/dns/#bbot.core.helpers.dns.DNSHelper","title":"DNSHelper","text":"<p>               Bases: <code>EngineClient</code></p> Source code in <code>bbot/core/helpers/dns/dns.py</code> <pre><code>class DNSHelper(EngineClient):\n\n    SERVER_CLASS = DNSEngine\n    ERROR_CLASS = DNSError\n\n    \"\"\"Helper class for DNS-related operations within BBOT.\n\n    This class provides mechanisms for host resolution, wildcard domain detection, event tagging, and more.\n    It centralizes all DNS-related activities in BBOT, offering both synchronous and asynchronous methods\n    for DNS resolution, as well as various utilities for batch resolution and DNS query filtering.\n\n    Attributes:\n        parent_helper: A reference to the instantiated `ConfigAwareHelper` (typically `scan.helpers`).\n        resolver (BBOTAsyncResolver): An asynchronous DNS resolver tailored for BBOT with rate-limiting capabilities.\n        timeout (int): The timeout value for DNS queries. Defaults to 5 seconds.\n        retries (int): The number of retries for failed DNS queries. Defaults to 1.\n        abort_threshold (int): The threshold for aborting after consecutive failed queries. Defaults to 50.\n        runaway_limit (int): Maximum allowed distance for consecutive DNS resolutions. Defaults to 5.\n        all_rdtypes (list): A list of DNS record types to be considered during operations.\n        wildcard_ignore (tuple): Domains to be ignored during wildcard detection.\n        wildcard_tests (int): Number of tests to be run for wildcard detection. Defaults to 5.\n        _wildcard_cache (dict): Cache for wildcard detection results.\n        _dns_cache (LRUCache): Cache for DNS resolution results, limited in size.\n        resolver_file (Path): File containing system's current resolver nameservers.\n        filter_bad_ptrs (bool): Whether to filter out DNS names that appear to be auto-generated PTR records. Defaults to True.\n\n    Args:\n        parent_helper: The parent helper object with configuration details and utilities.\n\n    Raises:\n        DNSError: If an issue arises when creating the BBOTAsyncResolver instance.\n\n    Examples:\n        &gt;&gt;&gt; dns_helper = DNSHelper(parent_config)\n        &gt;&gt;&gt; resolved_host = dns_helper.resolver.resolve(\"example.com\")\n    \"\"\"\n\n    def __init__(self, parent_helper):\n        self.parent_helper = parent_helper\n        self.config = self.parent_helper.config\n        self.dns_config = self.config.get(\"dns\", {})\n        engine_debug = self.config.get(\"engine\", {}).get(\"debug\", False)\n        super().__init__(server_kwargs={\"config\": self.config}, debug=engine_debug)\n\n        # resolver\n        self.timeout = self.dns_config.get(\"timeout\", 5)\n        self.resolver = dns.asyncresolver.Resolver()\n        self.resolver.rotate = True\n        self.resolver.timeout = self.timeout\n        self.resolver.lifetime = self.timeout\n\n        self.runaway_limit = self.dns_config.get(\"runaway_limit\", 5)\n\n        # wildcard handling\n        self.wildcard_disable = self.dns_config.get(\"wildcard_disable\", False)\n        self.wildcard_ignore = RadixTarget()\n        for d in self.dns_config.get(\"wildcard_ignore\", []):\n            self.wildcard_ignore.insert(d)\n\n        # copy the system's current resolvers to a text file for tool use\n        self.system_resolvers = dns.resolver.Resolver().nameservers\n        # TODO: DNS server speed test (start in background task)\n        self.resolver_file = self.parent_helper.tempfile(self.system_resolvers, pipe=False)\n\n        # brute force helper\n        self._brute = None\n\n        self._is_wildcard_cache = LFUCache(maxsize=1000)\n        self._is_wildcard_domain_cache = LFUCache(maxsize=1000)\n\n    async def resolve(self, query, **kwargs):\n        return await self.run_and_return(\"resolve\", query=query, **kwargs)\n\n    async def resolve_raw(self, query, **kwargs):\n        return await self.run_and_return(\"resolve_raw\", query=query, **kwargs)\n\n    async def resolve_batch(self, queries, **kwargs):\n        agen = self.run_and_yield(\"resolve_batch\", queries=queries, **kwargs)\n        while 1:\n            try:\n                yield await agen.__anext__()\n            except (StopAsyncIteration, GeneratorExit):\n                await agen.aclose()\n                break\n\n    async def resolve_raw_batch(self, queries):\n        agen = self.run_and_yield(\"resolve_raw_batch\", queries=queries)\n        while 1:\n            try:\n                yield await agen.__anext__()\n            except (StopAsyncIteration, GeneratorExit):\n                await agen.aclose()\n                break\n\n    @property\n    def brute(self):\n        if self._brute is None:\n            from .brute import DNSBrute\n\n            self._brute = DNSBrute(self.parent_helper)\n        return self._brute\n\n    @async_cachedmethod(\n        lambda self: self._is_wildcard_cache,\n        key=lambda query, rdtypes, raw_dns_records: (query, tuple(sorted(rdtypes)), bool(raw_dns_records)),\n    )\n    async def is_wildcard(self, query, rdtypes, raw_dns_records=None):\n        \"\"\"\n        Use this method to check whether a *host* is a wildcard entry\n\n        This can reliably tell the difference between a valid DNS record and a wildcard within a wildcard domain.\n\n        If you want to know whether a domain is using wildcard DNS, use `is_wildcard_domain()` instead.\n\n        Args:\n            query (str): The hostname to check for a wildcard entry.\n            ips (list, optional): List of IPs to compare against, typically obtained from a previous DNS resolution of the query.\n            rdtype (str, optional): The DNS record type (e.g., \"A\", \"AAAA\") to consider during the check.\n\n        Returns:\n            dict: A dictionary indicating if the query is a wildcard for each checked DNS record type.\n                Keys are DNS record types like \"A\", \"AAAA\", etc.\n                Values are tuples where the first element is a boolean indicating if the query is a wildcard,\n                and the second element is the wildcard parent if it's a wildcard.\n\n        Raises:\n            ValueError: If only one of `ips` or `rdtype` is specified or if no valid IPs are specified.\n\n        Examples:\n            &gt;&gt;&gt; is_wildcard(\"www.github.io\")\n            {\"A\": (True, \"github.io\"), \"AAAA\": (True, \"github.io\")}\n\n            &gt;&gt;&gt; is_wildcard(\"www.evilcorp.com\", ips=[\"93.184.216.34\"], rdtype=\"A\")\n            {\"A\": (False, \"evilcorp.com\")}\n\n        Note:\n            `is_wildcard` can be True, False, or None (indicating that wildcard detection was inconclusive)\n        \"\"\"\n        query = self._wildcard_prevalidation(query)\n        if not query:\n            return {}\n\n        # skip check if the query is a domain\n        if is_domain(query):\n            return {}\n\n        return await self.run_and_return(\"is_wildcard\", query=query, rdtypes=rdtypes, raw_dns_records=raw_dns_records)\n\n    @async_cachedmethod(\n        lambda self: self._is_wildcard_domain_cache, key=lambda domain, rdtypes: (domain, tuple(sorted(rdtypes)))\n    )\n    async def is_wildcard_domain(self, domain, rdtypes):\n        domain = self._wildcard_prevalidation(domain)\n        if not domain:\n            return {}\n\n        return await self.run_and_return(\"is_wildcard_domain\", domain=domain, rdtypes=rdtypes)\n\n    def _wildcard_prevalidation(self, host):\n        if self.wildcard_disable:\n            return False\n\n        host = clean_dns_record(host)\n        # skip check if it's an IP or a plain hostname\n        if is_ip(host) or not \".\" in host:\n            return False\n\n        # skip if query isn't a dns name\n        if not is_dns_name(host):\n            return False\n\n        # skip check if the query's parent domain is excluded in the config\n        wildcard_ignore = self.wildcard_ignore.search(host)\n        if wildcard_ignore:\n            log.debug(f\"Skipping wildcard detection on {host} because {wildcard_ignore} is excluded in the config\")\n            return False\n\n        return host\n\n    async def _mock_dns(self, mock_data, custom_lookup_fn=None):\n        from .mock import MockResolver\n\n        self.resolver = MockResolver(mock_data, custom_lookup_fn=custom_lookup_fn)\n        await self.run_and_return(\"_mock_dns\", mock_data=mock_data, custom_lookup_fn=custom_lookup_fn)\n</code></pre>"},{"location":"dev/helpers/dns/#bbot.core.helpers.dns.DNSHelper.resolve","title":"resolve  <code>async</code>","text":"<pre><code>resolve(query, **kwargs)\n</code></pre> Source code in <code>bbot/core/helpers/dns/dns.py</code> <pre><code>async def resolve(self, query, **kwargs):\n    return await self.run_and_return(\"resolve\", query=query, **kwargs)\n</code></pre>"},{"location":"dev/helpers/dns/#bbot.core.helpers.dns.DNSHelper.resolve_batch","title":"resolve_batch  <code>async</code>","text":"<pre><code>resolve_batch(queries, **kwargs)\n</code></pre> Source code in <code>bbot/core/helpers/dns/dns.py</code> <pre><code>async def resolve_batch(self, queries, **kwargs):\n    agen = self.run_and_yield(\"resolve_batch\", queries=queries, **kwargs)\n    while 1:\n        try:\n            yield await agen.__anext__()\n        except (StopAsyncIteration, GeneratorExit):\n            await agen.aclose()\n            break\n</code></pre>"},{"location":"dev/helpers/dns/#bbot.core.helpers.dns.DNSHelper.resolve_raw","title":"resolve_raw  <code>async</code>","text":"<pre><code>resolve_raw(query, **kwargs)\n</code></pre> Source code in <code>bbot/core/helpers/dns/dns.py</code> <pre><code>async def resolve_raw(self, query, **kwargs):\n    return await self.run_and_return(\"resolve_raw\", query=query, **kwargs)\n</code></pre>"},{"location":"dev/helpers/dns/#bbot.core.helpers.dns.DNSHelper.is_wildcard","title":"is_wildcard  <code>async</code>","text":"<pre><code>is_wildcard(query, rdtypes, raw_dns_records=None)\n</code></pre> <p>Use this method to check whether a host is a wildcard entry</p> <p>This can reliably tell the difference between a valid DNS record and a wildcard within a wildcard domain.</p> <p>If you want to know whether a domain is using wildcard DNS, use <code>is_wildcard_domain()</code> instead.</p> <p>Parameters:</p> <ul> <li> <code>query</code>               (<code>str</code>)           \u2013            <p>The hostname to check for a wildcard entry.</p> </li> <li> <code>ips</code>               (<code>list</code>)           \u2013            <p>List of IPs to compare against, typically obtained from a previous DNS resolution of the query.</p> </li> <li> <code>rdtype</code>               (<code>str</code>)           \u2013            <p>The DNS record type (e.g., \"A\", \"AAAA\") to consider during the check.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>dict</code>          \u2013            <p>A dictionary indicating if the query is a wildcard for each checked DNS record type. Keys are DNS record types like \"A\", \"AAAA\", etc. Values are tuples where the first element is a boolean indicating if the query is a wildcard, and the second element is the wildcard parent if it's a wildcard.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>ValueError</code>             \u2013            <p>If only one of <code>ips</code> or <code>rdtype</code> is specified or if no valid IPs are specified.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; is_wildcard(\"www.github.io\")\n{\"A\": (True, \"github.io\"), \"AAAA\": (True, \"github.io\")}\n</code></pre> <pre><code>&gt;&gt;&gt; is_wildcard(\"www.evilcorp.com\", ips=[\"93.184.216.34\"], rdtype=\"A\")\n{\"A\": (False, \"evilcorp.com\")}\n</code></pre> Note <p><code>is_wildcard</code> can be True, False, or None (indicating that wildcard detection was inconclusive)</p> Source code in <code>bbot/core/helpers/dns/dns.py</code> <pre><code>@async_cachedmethod(\n    lambda self: self._is_wildcard_cache,\n    key=lambda query, rdtypes, raw_dns_records: (query, tuple(sorted(rdtypes)), bool(raw_dns_records)),\n)\nasync def is_wildcard(self, query, rdtypes, raw_dns_records=None):\n    \"\"\"\n    Use this method to check whether a *host* is a wildcard entry\n\n    This can reliably tell the difference between a valid DNS record and a wildcard within a wildcard domain.\n\n    If you want to know whether a domain is using wildcard DNS, use `is_wildcard_domain()` instead.\n\n    Args:\n        query (str): The hostname to check for a wildcard entry.\n        ips (list, optional): List of IPs to compare against, typically obtained from a previous DNS resolution of the query.\n        rdtype (str, optional): The DNS record type (e.g., \"A\", \"AAAA\") to consider during the check.\n\n    Returns:\n        dict: A dictionary indicating if the query is a wildcard for each checked DNS record type.\n            Keys are DNS record types like \"A\", \"AAAA\", etc.\n            Values are tuples where the first element is a boolean indicating if the query is a wildcard,\n            and the second element is the wildcard parent if it's a wildcard.\n\n    Raises:\n        ValueError: If only one of `ips` or `rdtype` is specified or if no valid IPs are specified.\n\n    Examples:\n        &gt;&gt;&gt; is_wildcard(\"www.github.io\")\n        {\"A\": (True, \"github.io\"), \"AAAA\": (True, \"github.io\")}\n\n        &gt;&gt;&gt; is_wildcard(\"www.evilcorp.com\", ips=[\"93.184.216.34\"], rdtype=\"A\")\n        {\"A\": (False, \"evilcorp.com\")}\n\n    Note:\n        `is_wildcard` can be True, False, or None (indicating that wildcard detection was inconclusive)\n    \"\"\"\n    query = self._wildcard_prevalidation(query)\n    if not query:\n        return {}\n\n    # skip check if the query is a domain\n    if is_domain(query):\n        return {}\n\n    return await self.run_and_return(\"is_wildcard\", query=query, rdtypes=rdtypes, raw_dns_records=raw_dns_records)\n</code></pre>"},{"location":"dev/helpers/dns/#bbot.core.helpers.dns.DNSHelper.is_wildcard_domain","title":"is_wildcard_domain  <code>async</code>","text":"<pre><code>is_wildcard_domain(domain, rdtypes)\n</code></pre> Source code in <code>bbot/core/helpers/dns/dns.py</code> <pre><code>@async_cachedmethod(\n    lambda self: self._is_wildcard_domain_cache, key=lambda domain, rdtypes: (domain, tuple(sorted(rdtypes)))\n)\nasync def is_wildcard_domain(self, domain, rdtypes):\n    domain = self._wildcard_prevalidation(domain)\n    if not domain:\n        return {}\n\n    return await self.run_and_return(\"is_wildcard_domain\", domain=domain, rdtypes=rdtypes)\n</code></pre>"},{"location":"dev/helpers/interactsh/","title":"Interact.sh","text":"<p>A pure python implementation of ProjectDiscovery's interact.sh.</p> <p>\"Interactsh is an open-source tool for detecting out-of-band interactions. It is a tool designed to detect vulnerabilities that cause external interactions.\"</p> <ul> <li>https://app.interactsh.com</li> <li>https://github.com/projectdiscovery/interactsh</li> </ul> <p>This class facilitates interactions with the interact.sh service for out-of-band data exfiltration and vulnerability confirmation. It allows for customization by accepting server and token parameters from the configuration provided by <code>parent_helper</code>.</p> <p>Attributes:</p> <ul> <li> <code>parent_helper</code>               (<code>ConfigAwareHelper</code>)           \u2013            <p>An instance of a helper class containing configuration data.</p> </li> <li> <code>server</code>               (<code>str</code>)           \u2013            <p>The server to be used. If None (the default), a random server will be chosen from a predetermined list.</p> </li> <li> <code>correlation_id</code>               (<code>str</code>)           \u2013            <p>An identifier to correlate requests and responses. Default is None.</p> </li> <li> <code>custom_server</code>               (<code>str</code>)           \u2013            <p>Optional. A custom interact.sh server. Loaded from configuration.</p> </li> <li> <code>token</code>               (<code>str</code>)           \u2013            <p>Optional. A token for interact.sh API. Loaded from configuration.</p> </li> <li> <code>_poll_task</code>               (<code>AsyncTask</code>)           \u2013            <p>The task responsible for polling the interact.sh server.</p> </li> </ul> <p>Examples:</p> <pre><code># instantiate interact.sh client (no requests are sent yet)\n&gt;&gt;&gt; interactsh_client = self.helpers.interactsh()\n# register with an interact.sh server\n&gt;&gt;&gt; interactsh_domain = await interactsh_client.register()\n[INFO] Registering with interact.sh server: oast.me\n[INFO] Successfully registered to interactsh server oast.me with correlation_id rg99x2f860h5466ou3so [rg99x2f860h5466ou3so86i07n1m3013k.oast.me]\n# simulate an out-of-band interaction\n&gt;&gt;&gt; await self.helpers.request(f\"https://{interactsh_domain}/test\")\n# wait for out-of-band interaction to be registered\n&gt;&gt;&gt; await asyncio.sleep(10)\n&gt;&gt;&gt; data_list = await interactsh_client.poll()\n&gt;&gt;&gt; print(data_list)\n[\n    {\n        \"protocol\": \"dns\",\n        \"unique-id\": \"rg99x2f860h5466ou3so86i07n1m3013k\",\n        \"full-id\": \"rg99x2f860h5466ou3so86i07n1m3013k\",\n        \"q-type\": \"A\",\n        \"raw-request\": \"...\",\n        \"remote-address\": \"1.2.3.4\",\n        \"timestamp\": \"2023-09-15T21:09:23.187226851Z\"\n    },\n    {\n        \"protocol\": \"http\",\n        \"unique-id\": \"rg99x2f860h5466ou3so86i07n1m3013k\",\n        \"full-id\": \"rg99x2f860h5466ou3so86i07n1m3013k\",\n        \"raw-request\": \"GET /test HTTP/1.1 ...\",\n        \"remote-address\": \"1.2.3.4\",\n        \"timestamp\": \"2023-09-15T21:09:24.155677967Z\"\n    }\n]\n# finally, shut down the client\n&gt;&gt;&gt; await interactsh_client.deregister()\n</code></pre> Source code in <code>bbot/core/helpers/interactsh.py</code> <pre><code>class Interactsh:\n    \"\"\"\n    A pure python implementation of ProjectDiscovery's interact.sh.\n\n    *\"Interactsh is an open-source tool for detecting out-of-band interactions. It is a tool designed to detect vulnerabilities that cause external interactions.\"*\n\n    - https://app.interactsh.com\n    - https://github.com/projectdiscovery/interactsh\n\n    This class facilitates interactions with the interact.sh service for\n    out-of-band data exfiltration and vulnerability confirmation. It allows\n    for customization by accepting server and token parameters from the\n    configuration provided by `parent_helper`.\n\n    Attributes:\n        parent_helper (ConfigAwareHelper): An instance of a helper class containing configuration data.\n        server (str): The server to be used. If None (the default), a random server will be chosen from a predetermined list.\n        correlation_id (str): An identifier to correlate requests and responses. Default is None.\n        custom_server (str): Optional. A custom interact.sh server. Loaded from configuration.\n        token (str): Optional. A token for interact.sh API. Loaded from configuration.\n        _poll_task (AsyncTask): The task responsible for polling the interact.sh server.\n\n    Examples:\n        ```python\n        # instantiate interact.sh client (no requests are sent yet)\n        &gt;&gt;&gt; interactsh_client = self.helpers.interactsh()\n        # register with an interact.sh server\n        &gt;&gt;&gt; interactsh_domain = await interactsh_client.register()\n        [INFO] Registering with interact.sh server: oast.me\n        [INFO] Successfully registered to interactsh server oast.me with correlation_id rg99x2f860h5466ou3so [rg99x2f860h5466ou3so86i07n1m3013k.oast.me]\n        # simulate an out-of-band interaction\n        &gt;&gt;&gt; await self.helpers.request(f\"https://{interactsh_domain}/test\")\n        # wait for out-of-band interaction to be registered\n        &gt;&gt;&gt; await asyncio.sleep(10)\n        &gt;&gt;&gt; data_list = await interactsh_client.poll()\n        &gt;&gt;&gt; print(data_list)\n        [\n            {\n                \"protocol\": \"dns\",\n                \"unique-id\": \"rg99x2f860h5466ou3so86i07n1m3013k\",\n                \"full-id\": \"rg99x2f860h5466ou3so86i07n1m3013k\",\n                \"q-type\": \"A\",\n                \"raw-request\": \"...\",\n                \"remote-address\": \"1.2.3.4\",\n                \"timestamp\": \"2023-09-15T21:09:23.187226851Z\"\n            },\n            {\n                \"protocol\": \"http\",\n                \"unique-id\": \"rg99x2f860h5466ou3so86i07n1m3013k\",\n                \"full-id\": \"rg99x2f860h5466ou3so86i07n1m3013k\",\n                \"raw-request\": \"GET /test HTTP/1.1 ...\",\n                \"remote-address\": \"1.2.3.4\",\n                \"timestamp\": \"2023-09-15T21:09:24.155677967Z\"\n            }\n        ]\n        # finally, shut down the client\n        &gt;&gt;&gt; await interactsh_client.deregister()\n        ```\n    \"\"\"\n\n    def __init__(self, parent_helper, poll_interval=10):\n        self.parent_helper = parent_helper\n        self.server = None\n        self.correlation_id = None\n        self.custom_server = self.parent_helper.config.get(\"interactsh_server\", None)\n        self.token = self.parent_helper.config.get(\"interactsh_token\", None)\n        self.poll_interval = poll_interval\n        self._poll_task = None\n\n    async def register(self, callback=None):\n        \"\"\"\n        Registers the instance with an interact.sh server and sets up polling.\n\n        Generates RSA keys for secure communication, builds a correlation ID,\n        and sends a POST request to an interact.sh server to register. Optionally,\n        starts an asynchronous polling task to listen for interactions.\n\n        Args:\n            callback (callable, optional): A function to be called each time new interactions are received.\n\n        Returns:\n            str: The registered domain for out-of-band interactions.\n\n        Raises:\n            InteractshError: If registration with an interact.sh server fails.\n\n        Examples:\n            &gt;&gt;&gt; interactsh_client = self.helpers.interactsh()\n            &gt;&gt;&gt; registered_domain = await interactsh_client.register()\n            [INFO] Registering with interact.sh server: oast.me\n            [INFO] Successfully registered to interactsh server oast.me with correlation_id rg99x2f860h5466ou3so [rg99x2f860h5466ou3so86i07n1m3013k.oast.me]\n        \"\"\"\n        rsa = RSA.generate(1024)\n\n        self.public_key = rsa.publickey().exportKey()\n        self.private_key = rsa.exportKey()\n\n        encoded_public_key = base64.b64encode(self.public_key).decode(\"utf8\")\n\n        uuid = uuid4().hex.ljust(33, \"a\")\n        guid = \"\".join(i if i.isdigit() else chr(ord(i) + random.randint(0, 20)) for i in uuid)\n\n        self.correlation_id = guid[:20]\n        self.secret = str(uuid4())\n        headers = {}\n\n        if self.custom_server:\n            if not self.token:\n                log.verbose(\"Interact.sh token is not set\")\n            else:\n                headers[\"Authorization\"] = self.token\n            self.server_list = [str(self.custom_server)]\n        else:\n            self.server_list = random.sample(server_list, k=len(server_list))\n        for server in self.server_list:\n            log.info(f\"Registering with interact.sh server: {server}\")\n            data = {\n                \"public-key\": encoded_public_key,\n                \"secret-key\": self.secret,\n                \"correlation-id\": self.correlation_id,\n            }\n            r = await self.parent_helper.request(\n                f\"https://{server}/register\", headers=headers, json=data, method=\"POST\"\n            )\n            if r is None:\n                continue\n            try:\n                msg = r.json().get(\"message\", \"\")\n                assert \"registration successful\" in msg\n            except Exception:\n                log.debug(f\"Failed to register with interactsh server {self.server}\")\n                continue\n            self.server = server\n            self.domain = f\"{guid}.{self.server}\"\n            break\n\n        if not self.server:\n            raise InteractshError(f\"Failed to register with an interactsh server\")\n\n        log.info(\n            f\"Successfully registered to interactsh server {self.server} with correlation_id {self.correlation_id} [{self.domain}]\"\n        )\n\n        if callable(callback):\n            self._poll_task = asyncio.create_task(self.poll_loop(callback))\n\n        return self.domain\n\n    async def deregister(self):\n        \"\"\"\n        Deregisters the instance from the interact.sh server and cancels the polling task.\n\n        Sends a POST request to the server to deregister, using the correlation ID\n        and secret key generated during registration. Optionally, if a polling\n        task was started, it is cancelled.\n\n        Raises:\n            InteractshError: If required information is missing or if deregistration fails.\n\n        Examples:\n            &gt;&gt;&gt; await interactsh_client.deregister()\n        \"\"\"\n        if not self.server or not self.correlation_id or not self.secret:\n            raise InteractshError(f\"Missing required information to deregister\")\n\n        headers = {}\n        if self.token:\n            headers[\"Authorization\"] = self.token\n\n        data = {\"secret-key\": self.secret, \"correlation-id\": self.correlation_id}\n\n        r = await self.parent_helper.request(\n            f\"https://{self.server}/deregister\", headers=headers, json=data, method=\"POST\"\n        )\n\n        if self._poll_task is not None:\n            self._poll_task.cancel()\n\n        if \"success\" not in getattr(r, \"text\", \"\"):\n            raise InteractshError(f\"Failed to de-register with interactsh server {self.server}\")\n\n    async def poll(self):\n        \"\"\"\n        Polls the interact.sh server for interactions tied to the current instance.\n\n        Sends a GET request to the server to fetch interactions associated with the\n        current correlation_id and secret key. Returned interactions are decrypted\n        using an AES key provided by the server response.\n\n        Raises:\n            InteractshError: If required information for polling is missing.\n\n        Returns:\n            list: A list of decrypted interaction data dictionaries.\n\n        Examples:\n            &gt;&gt;&gt; data_list = await interactsh_client.poll()\n            &gt;&gt;&gt; print(data_list)\n            [\n                {\n                    \"protocol\": \"dns\",\n                    \"unique-id\": \"rg99x2f860h5466ou3so86i07n1m3013k\",\n                    ...\n                },\n                ...\n            ]\n        \"\"\"\n        if not self.server or not self.correlation_id or not self.secret:\n            raise InteractshError(f\"Missing required information to poll\")\n\n        headers = {}\n        if self.token:\n            headers[\"Authorization\"] = self.token\n\n        try:\n            r = await self.parent_helper.request(\n                f\"https://{self.server}/poll?id={self.correlation_id}&amp;secret={self.secret}\", headers=headers\n            )\n            if r is None:\n                raise InteractshError(\"Error polling interact.sh: No response from server\")\n\n            ret = []\n            data_list = r.json().get(\"data\", None)\n            if data_list:\n                aes_key = r.json()[\"aes_key\"]\n\n                for data in data_list:\n                    decrypted_data = self._decrypt(aes_key, data)\n                    ret.append(decrypted_data)\n            return ret\n        except Exception as e:\n            raise InteractshError(f\"Error polling interact.sh: {e}\")\n\n    async def poll_loop(self, callback):\n        \"\"\"\n        Starts a polling loop to continuously check for interactions with the interact.sh server.\n\n        Continuously polls the interact.sh server for interactions tied to the current instance,\n        using the `poll` method. When interactions are received, it executes the given callback\n        function with each interaction data.\n\n        Parameters:\n            callback (callable): The function to be called for every interaction received from the server.\n\n        Returns:\n            awaitable: An awaitable object that executes the internal `_poll_loop` method.\n\n        Examples:\n            &gt;&gt;&gt; await interactsh_client.poll_loop(my_callback)\n        \"\"\"\n        async with self.parent_helper.scan._acatch(context=self._poll_loop):\n            return await self._poll_loop(callback)\n\n    async def _poll_loop(self, callback):\n        while 1:\n            if self.parent_helper.scan.stopping:\n                await asyncio.sleep(1)\n                continue\n            data_list = []\n            try:\n                data_list = await self.poll()\n            except InteractshError as e:\n                log.warning(e)\n                log.trace(traceback.format_exc())\n            if not data_list:\n                await asyncio.sleep(self.poll_interval)\n                continue\n            for data in data_list:\n                if data:\n                    await self.parent_helper.execute_sync_or_async(callback, data)\n\n    def _decrypt(self, aes_key, data):\n        \"\"\"\n        Decrypts and returns the data received from the interact.sh server.\n\n        Uses RSA and AES for decrypting the data. RSA with PKCS1_OAEP and SHA256 is used to decrypt the AES key,\n        and then AES (CFB mode) is used to decrypt the actual data payload.\n\n        Parameters:\n            aes_key (str): The AES key for decryption, encrypted with RSA and base64 encoded.\n            data (str): The data payload to decrypt, which is base64 encoded and AES encrypted.\n\n        Returns:\n            dict: The decrypted data, loaded as a JSON object.\n\n        Examples:\n            &gt;&gt;&gt; decrypted_data = self._decrypt(aes_key, data)\n        \"\"\"\n        private_key = RSA.importKey(self.private_key)\n        cipher = PKCS1_OAEP.new(private_key, hashAlgo=SHA256)\n        aes_plain_key = cipher.decrypt(base64.b64decode(aes_key))\n        decode = base64.b64decode(data)\n        bs = AES.block_size\n        iv = decode[:bs]\n        cryptor = AES.new(key=aes_plain_key, mode=AES.MODE_CFB, IV=iv, segment_size=128)\n        plain_text = cryptor.decrypt(decode)\n        return json.loads(plain_text[16:])\n</code></pre>"},{"location":"dev/helpers/interactsh/#bbot.core.helpers.interactsh.Interactsh.deregister","title":"deregister  <code>async</code>","text":"<pre><code>deregister()\n</code></pre> <p>Deregisters the instance from the interact.sh server and cancels the polling task.</p> <p>Sends a POST request to the server to deregister, using the correlation ID and secret key generated during registration. Optionally, if a polling task was started, it is cancelled.</p> <p>Raises:</p> <ul> <li> <code>InteractshError</code>             \u2013            <p>If required information is missing or if deregistration fails.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; await interactsh_client.deregister()\n</code></pre> Source code in <code>bbot/core/helpers/interactsh.py</code> <pre><code>async def deregister(self):\n    \"\"\"\n    Deregisters the instance from the interact.sh server and cancels the polling task.\n\n    Sends a POST request to the server to deregister, using the correlation ID\n    and secret key generated during registration. Optionally, if a polling\n    task was started, it is cancelled.\n\n    Raises:\n        InteractshError: If required information is missing or if deregistration fails.\n\n    Examples:\n        &gt;&gt;&gt; await interactsh_client.deregister()\n    \"\"\"\n    if not self.server or not self.correlation_id or not self.secret:\n        raise InteractshError(f\"Missing required information to deregister\")\n\n    headers = {}\n    if self.token:\n        headers[\"Authorization\"] = self.token\n\n    data = {\"secret-key\": self.secret, \"correlation-id\": self.correlation_id}\n\n    r = await self.parent_helper.request(\n        f\"https://{self.server}/deregister\", headers=headers, json=data, method=\"POST\"\n    )\n\n    if self._poll_task is not None:\n        self._poll_task.cancel()\n\n    if \"success\" not in getattr(r, \"text\", \"\"):\n        raise InteractshError(f\"Failed to de-register with interactsh server {self.server}\")\n</code></pre>"},{"location":"dev/helpers/interactsh/#bbot.core.helpers.interactsh.Interactsh.poll","title":"poll  <code>async</code>","text":"<pre><code>poll()\n</code></pre> <p>Polls the interact.sh server for interactions tied to the current instance.</p> <p>Sends a GET request to the server to fetch interactions associated with the current correlation_id and secret key. Returned interactions are decrypted using an AES key provided by the server response.</p> <p>Raises:</p> <ul> <li> <code>InteractshError</code>             \u2013            <p>If required information for polling is missing.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>list</code>          \u2013            <p>A list of decrypted interaction data dictionaries.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; data_list = await interactsh_client.poll()\n&gt;&gt;&gt; print(data_list)\n[\n    {\n        \"protocol\": \"dns\",\n        \"unique-id\": \"rg99x2f860h5466ou3so86i07n1m3013k\",\n        ...\n    },\n    ...\n]\n</code></pre> Source code in <code>bbot/core/helpers/interactsh.py</code> <pre><code>async def poll(self):\n    \"\"\"\n    Polls the interact.sh server for interactions tied to the current instance.\n\n    Sends a GET request to the server to fetch interactions associated with the\n    current correlation_id and secret key. Returned interactions are decrypted\n    using an AES key provided by the server response.\n\n    Raises:\n        InteractshError: If required information for polling is missing.\n\n    Returns:\n        list: A list of decrypted interaction data dictionaries.\n\n    Examples:\n        &gt;&gt;&gt; data_list = await interactsh_client.poll()\n        &gt;&gt;&gt; print(data_list)\n        [\n            {\n                \"protocol\": \"dns\",\n                \"unique-id\": \"rg99x2f860h5466ou3so86i07n1m3013k\",\n                ...\n            },\n            ...\n        ]\n    \"\"\"\n    if not self.server or not self.correlation_id or not self.secret:\n        raise InteractshError(f\"Missing required information to poll\")\n\n    headers = {}\n    if self.token:\n        headers[\"Authorization\"] = self.token\n\n    try:\n        r = await self.parent_helper.request(\n            f\"https://{self.server}/poll?id={self.correlation_id}&amp;secret={self.secret}\", headers=headers\n        )\n        if r is None:\n            raise InteractshError(\"Error polling interact.sh: No response from server\")\n\n        ret = []\n        data_list = r.json().get(\"data\", None)\n        if data_list:\n            aes_key = r.json()[\"aes_key\"]\n\n            for data in data_list:\n                decrypted_data = self._decrypt(aes_key, data)\n                ret.append(decrypted_data)\n        return ret\n    except Exception as e:\n        raise InteractshError(f\"Error polling interact.sh: {e}\")\n</code></pre>"},{"location":"dev/helpers/interactsh/#bbot.core.helpers.interactsh.Interactsh.poll_loop","title":"poll_loop  <code>async</code>","text":"<pre><code>poll_loop(callback)\n</code></pre> <p>Starts a polling loop to continuously check for interactions with the interact.sh server.</p> <p>Continuously polls the interact.sh server for interactions tied to the current instance, using the <code>poll</code> method. When interactions are received, it executes the given callback function with each interaction data.</p> <p>Parameters:</p> <ul> <li> <code>callback</code>               (<code>callable</code>)           \u2013            <p>The function to be called for every interaction received from the server.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>awaitable</code>          \u2013            <p>An awaitable object that executes the internal <code>_poll_loop</code> method.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; await interactsh_client.poll_loop(my_callback)\n</code></pre> Source code in <code>bbot/core/helpers/interactsh.py</code> <pre><code>async def poll_loop(self, callback):\n    \"\"\"\n    Starts a polling loop to continuously check for interactions with the interact.sh server.\n\n    Continuously polls the interact.sh server for interactions tied to the current instance,\n    using the `poll` method. When interactions are received, it executes the given callback\n    function with each interaction data.\n\n    Parameters:\n        callback (callable): The function to be called for every interaction received from the server.\n\n    Returns:\n        awaitable: An awaitable object that executes the internal `_poll_loop` method.\n\n    Examples:\n        &gt;&gt;&gt; await interactsh_client.poll_loop(my_callback)\n    \"\"\"\n    async with self.parent_helper.scan._acatch(context=self._poll_loop):\n        return await self._poll_loop(callback)\n</code></pre>"},{"location":"dev/helpers/interactsh/#bbot.core.helpers.interactsh.Interactsh.register","title":"register  <code>async</code>","text":"<pre><code>register(callback=None)\n</code></pre> <p>Registers the instance with an interact.sh server and sets up polling.</p> <p>Generates RSA keys for secure communication, builds a correlation ID, and sends a POST request to an interact.sh server to register. Optionally, starts an asynchronous polling task to listen for interactions.</p> <p>Parameters:</p> <ul> <li> <code>callback</code>               (<code>callable</code>, default:                   <code>None</code> )           \u2013            <p>A function to be called each time new interactions are received.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>The registered domain for out-of-band interactions.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>InteractshError</code>             \u2013            <p>If registration with an interact.sh server fails.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; interactsh_client = self.helpers.interactsh()\n&gt;&gt;&gt; registered_domain = await interactsh_client.register()\n[INFO] Registering with interact.sh server: oast.me\n[INFO] Successfully registered to interactsh server oast.me with correlation_id rg99x2f860h5466ou3so [rg99x2f860h5466ou3so86i07n1m3013k.oast.me]\n</code></pre> Source code in <code>bbot/core/helpers/interactsh.py</code> <pre><code>async def register(self, callback=None):\n    \"\"\"\n    Registers the instance with an interact.sh server and sets up polling.\n\n    Generates RSA keys for secure communication, builds a correlation ID,\n    and sends a POST request to an interact.sh server to register. Optionally,\n    starts an asynchronous polling task to listen for interactions.\n\n    Args:\n        callback (callable, optional): A function to be called each time new interactions are received.\n\n    Returns:\n        str: The registered domain for out-of-band interactions.\n\n    Raises:\n        InteractshError: If registration with an interact.sh server fails.\n\n    Examples:\n        &gt;&gt;&gt; interactsh_client = self.helpers.interactsh()\n        &gt;&gt;&gt; registered_domain = await interactsh_client.register()\n        [INFO] Registering with interact.sh server: oast.me\n        [INFO] Successfully registered to interactsh server oast.me with correlation_id rg99x2f860h5466ou3so [rg99x2f860h5466ou3so86i07n1m3013k.oast.me]\n    \"\"\"\n    rsa = RSA.generate(1024)\n\n    self.public_key = rsa.publickey().exportKey()\n    self.private_key = rsa.exportKey()\n\n    encoded_public_key = base64.b64encode(self.public_key).decode(\"utf8\")\n\n    uuid = uuid4().hex.ljust(33, \"a\")\n    guid = \"\".join(i if i.isdigit() else chr(ord(i) + random.randint(0, 20)) for i in uuid)\n\n    self.correlation_id = guid[:20]\n    self.secret = str(uuid4())\n    headers = {}\n\n    if self.custom_server:\n        if not self.token:\n            log.verbose(\"Interact.sh token is not set\")\n        else:\n            headers[\"Authorization\"] = self.token\n        self.server_list = [str(self.custom_server)]\n    else:\n        self.server_list = random.sample(server_list, k=len(server_list))\n    for server in self.server_list:\n        log.info(f\"Registering with interact.sh server: {server}\")\n        data = {\n            \"public-key\": encoded_public_key,\n            \"secret-key\": self.secret,\n            \"correlation-id\": self.correlation_id,\n        }\n        r = await self.parent_helper.request(\n            f\"https://{server}/register\", headers=headers, json=data, method=\"POST\"\n        )\n        if r is None:\n            continue\n        try:\n            msg = r.json().get(\"message\", \"\")\n            assert \"registration successful\" in msg\n        except Exception:\n            log.debug(f\"Failed to register with interactsh server {self.server}\")\n            continue\n        self.server = server\n        self.domain = f\"{guid}.{self.server}\"\n        break\n\n    if not self.server:\n        raise InteractshError(f\"Failed to register with an interactsh server\")\n\n    log.info(\n        f\"Successfully registered to interactsh server {self.server} with correlation_id {self.correlation_id} [{self.domain}]\"\n    )\n\n    if callable(callback):\n        self._poll_task = asyncio.create_task(self.poll_loop(callback))\n\n    return self.domain\n</code></pre>"},{"location":"dev/helpers/misc/","title":"Misc Helpers","text":"<p>These are miscellaneous helpers, used throughout BBOT and its modules for simple tasks such as parsing domains, ports, urls, etc.</p>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.add_get_params","title":"add_get_params","text":"<pre><code>add_get_params(url, params)\n</code></pre> <p>Add or update query parameters to the given URL.</p> <p>This function takes an existing URL and a dictionary of query parameters, updates or adds these parameters to the URL, and returns a new URL.</p> <p>Parameters:</p> <ul> <li> <code>url</code>               (<code>Union[str, ParseResult]</code>)           \u2013            <p>The original URL.</p> </li> <li> <code>params</code>               (<code>Dict[str, Any]</code>)           \u2013            <p>A dictionary containing the query parameters to be added or updated.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>ParseResult</code>          \u2013            <p>A named 6-tuple containing the components of the modified URL.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; add_get_params('https://www.evilcorp.com?foo=1', {'bar': 2})\nParseResult(scheme='https', netloc='www.evilcorp.com', path='', params='', query='foo=1&amp;bar=2', fragment='')\n</code></pre> <pre><code>&gt;&gt;&gt; add_get_params('https://www.evilcorp.com?foo=1', {'foo': 2})\nParseResult(scheme='https', netloc='www.evilcorp.com', path='', params='', query='foo=2', fragment='')\n</code></pre> Source code in <code>bbot/core/helpers/url.py</code> <pre><code>def add_get_params(url, params):\n    \"\"\"\n    Add or update query parameters to the given URL.\n\n    This function takes an existing URL and a dictionary of query parameters,\n    updates or adds these parameters to the URL, and returns a new URL.\n\n    Args:\n        url (Union[str, ParseResult]): The original URL.\n        params (Dict[str, Any]): A dictionary containing the query parameters to be added or updated.\n\n    Returns:\n        ParseResult: A named 6-tuple containing the components of the modified URL.\n\n    Examples:\n        &gt;&gt;&gt; add_get_params('https://www.evilcorp.com?foo=1', {'bar': 2})\n        ParseResult(scheme='https', netloc='www.evilcorp.com', path='', params='', query='foo=1&amp;bar=2', fragment='')\n\n        &gt;&gt;&gt; add_get_params('https://www.evilcorp.com?foo=1', {'foo': 2})\n        ParseResult(scheme='https', netloc='www.evilcorp.com', path='', params='', query='foo=2', fragment='')\n    \"\"\"\n    parsed = parse_url(url)\n    old_params = dict(parse_qs(parsed.query))\n    old_params.update(params)\n    return parsed._replace(query=urlencode(old_params, doseq=True))\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.as_completed","title":"as_completed  <code>async</code>","text":"<pre><code>as_completed(coros)\n</code></pre> <p>Async generator that yields completed Tasks as they are completed.</p> <p>Parameters:</p> <ul> <li> <code>coros</code>               (<code>iterable</code>)           \u2013            <p>An iterable of coroutine objects or asyncio Tasks.</p> </li> </ul> <p>Yields:</p> <ul> <li>           \u2013            <p>asyncio.Task: A Task object that has completed its execution.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; async def main():\n...     async for task in as_completed([coro1(), coro2(), coro3()]):\n...         result = task.result()\n...         print(f'Task completed with result: {result}')\n</code></pre> <pre><code>&gt;&gt;&gt; asyncio.run(main())\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>async def as_completed(coros):\n    \"\"\"\n    Async generator that yields completed Tasks as they are completed.\n\n    Args:\n        coros (iterable): An iterable of coroutine objects or asyncio Tasks.\n\n    Yields:\n        asyncio.Task: A Task object that has completed its execution.\n\n    Examples:\n        &gt;&gt;&gt; async def main():\n        ...     async for task in as_completed([coro1(), coro2(), coro3()]):\n        ...         result = task.result()\n        ...         print(f'Task completed with result: {result}')\n\n        &gt;&gt;&gt; asyncio.run(main())\n    \"\"\"\n    tasks = {coro if isinstance(coro, asyncio.Task) else asyncio.create_task(coro): coro for coro in coros}\n    while tasks:\n        done, _ = await asyncio.wait(tasks.keys(), return_when=asyncio.FIRST_COMPLETED)\n        for task in done:\n            tasks.pop(task)\n            yield task\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.backup_file","title":"backup_file","text":"<pre><code>backup_file(filename, max_backups=10)\n</code></pre> <p>Renames a file by appending an iteration number as a backup. Recursively renames files up to a specified maximum number of backups.</p> <p>Parameters:</p> <ul> <li> <code>filename</code>               (<code>str or Path</code>)           \u2013            <p>The file to backup.</p> </li> <li> <code>max_backups</code>               (<code>int</code>, default:                   <code>10</code> )           \u2013            <p>The maximum number of backups to keep. Defaults to 10.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>pathlib.Path: The new backup filepath.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; backup_file(\"/tmp/test.txt\")\nPosixPath(\"/tmp/test.0.txt\")\n&gt;&gt;&gt; backup_file(\"/tmp/test.0.txt\")\nPosixPath(\"/tmp/test.1.txt\")\n&gt;&gt;&gt; backup_file(\"/tmp/test.1.txt\")\nPosixPath(\"/tmp/test.2.txt\")\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def backup_file(filename, max_backups=10):\n    \"\"\"\n    Renames a file by appending an iteration number as a backup. Recursively renames\n    files up to a specified maximum number of backups.\n\n    Args:\n        filename (str or pathlib.Path): The file to backup.\n        max_backups (int, optional): The maximum number of backups to keep. Defaults to 10.\n\n    Returns:\n        pathlib.Path: The new backup filepath.\n\n    Examples:\n        &gt;&gt;&gt; backup_file(\"/tmp/test.txt\")\n        PosixPath(\"/tmp/test.0.txt\")\n        &gt;&gt;&gt; backup_file(\"/tmp/test.0.txt\")\n        PosixPath(\"/tmp/test.1.txt\")\n        &gt;&gt;&gt; backup_file(\"/tmp/test.1.txt\")\n        PosixPath(\"/tmp/test.2.txt\")\n    \"\"\"\n    filename = Path(filename).resolve()\n    suffixes = [s.strip(\".\") for s in filename.suffixes]\n    iteration = 1\n    with suppress(Exception):\n        iteration = min(max_backups - 1, max(0, int(suffixes[0]))) + 1\n        suffixes = suffixes[1:]\n    stem = filename.stem.split(\".\")[0]\n    destination = filename.parent / f\"{stem}.{iteration}.{'.'.join(suffixes)}\"\n    if destination.exists() and iteration &lt; max_backups:\n        backup_file(destination)\n    if filename.exists():\n        filename.rename(destination)\n    return destination\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.best_http_status","title":"best_http_status","text":"<pre><code>best_http_status(code1, code2)\n</code></pre> <p>Determine the better HTTP status code between two given codes.</p> <p>The 'better' status code is considered based on typical usage and priority in HTTP communication. Lower codes are generally better than higher codes. Within the same class (e.g., 2xx), a lower code is better. Between different classes, the order of preference is 2xx &gt; 3xx &gt; 1xx &gt; 4xx &gt; 5xx.</p> <p>Parameters:</p> <ul> <li> <code>code1</code>               (<code>int</code>)           \u2013            <p>The first HTTP status code.</p> </li> <li> <code>code2</code>               (<code>int</code>)           \u2013            <p>The second HTTP status code.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>int</code>          \u2013            <p>The better HTTP status code between the two provided codes.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; better_http_status(200, 404)\n200\n&gt;&gt;&gt; better_http_status(500, 400)\n400\n&gt;&gt;&gt; better_http_status(301, 302)\n301\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def best_http_status(code1, code2):\n    \"\"\"\n    Determine the better HTTP status code between two given codes.\n\n    The 'better' status code is considered based on typical usage and priority in HTTP communication.\n    Lower codes are generally better than higher codes. Within the same class (e.g., 2xx), a lower code is better.\n    Between different classes, the order of preference is 2xx &gt; 3xx &gt; 1xx &gt; 4xx &gt; 5xx.\n\n    Args:\n        code1 (int): The first HTTP status code.\n        code2 (int): The second HTTP status code.\n\n    Returns:\n        int: The better HTTP status code between the two provided codes.\n\n    Examples:\n        &gt;&gt;&gt; better_http_status(200, 404)\n        200\n        &gt;&gt;&gt; better_http_status(500, 400)\n        400\n        &gt;&gt;&gt; better_http_status(301, 302)\n        301\n    \"\"\"\n\n    # Classify the codes into their respective categories (1xx, 2xx, 3xx, 4xx, 5xx)\n    def classify_code(code):\n        return int(code) // 100\n\n    class1 = classify_code(code1)\n    class2 = classify_code(code2)\n\n    # Priority order for classes\n    priority_order = {2: 1, 3: 2, 1: 3, 4: 4, 5: 5}\n\n    # Compare based on class priority\n    p1 = priority_order.get(class1, 10)\n    p2 = priority_order.get(class2, 10)\n    if p1 != p2:\n        return code1 if p1 &lt; p2 else code2\n\n    # If in the same class, the lower code is better\n    return min(code1, code2)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.bytes_to_human","title":"bytes_to_human","text":"<pre><code>bytes_to_human(_bytes)\n</code></pre> <p>Convert a bytes size to a human-readable string.</p> <p>This function converts a numeric bytes value into a human-readable string format, complete with the appropriate unit symbol (B, KB, MB, GB, etc.).</p> <p>Parameters:</p> <ul> <li> <code>_bytes</code>               (<code>int</code>)           \u2013            <p>The number of bytes to convert.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>A string representing the number of bytes in a more readable format, rounded to two  decimal places.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; bytes_to_human(1234129384)\n'1.15GB'\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def bytes_to_human(_bytes):\n    \"\"\"Convert a bytes size to a human-readable string.\n\n    This function converts a numeric bytes value into a human-readable string format, complete\n    with the appropriate unit symbol (B, KB, MB, GB, etc.).\n\n    Args:\n        _bytes (int): The number of bytes to convert.\n\n    Returns:\n        str: A string representing the number of bytes in a more readable format, rounded to two\n             decimal places.\n\n    Examples:\n        &gt;&gt;&gt; bytes_to_human(1234129384)\n        '1.15GB'\n    \"\"\"\n    sizes = [\"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\", \"ZB\"]\n    units = {}\n    for count, size in enumerate(sizes):\n        units[size] = pow(1024, count)\n    for size in sizes:\n        if abs(_bytes) &lt; 1024.0:\n            if size == sizes[0]:\n                _bytes = str(int(_bytes))\n            else:\n                _bytes = f\"{_bytes:.2f}\"\n            return f\"{_bytes}{size}\"\n        _bytes /= 1024\n    raise ValueError(f'Unable to convert \"{_bytes}\" to human filesize')\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.can_sudo_without_password","title":"can_sudo_without_password","text":"<pre><code>can_sudo_without_password()\n</code></pre> <p>Check if the current user has passwordless sudo access.</p> <p>This function checks whether the current user can use sudo without entering a password. It runs a command with sudo and checks the return code to determine this.</p> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the current user can use sudo without a password, False otherwise.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; can_sudo_without_password()\nTrue\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def can_sudo_without_password():\n    \"\"\"Check if the current user has passwordless sudo access.\n\n    This function checks whether the current user can use sudo without entering a password.\n    It runs a command with sudo and checks the return code to determine this.\n\n    Returns:\n        bool: True if the current user can use sudo without a password, False otherwise.\n\n    Examples:\n        &gt;&gt;&gt; can_sudo_without_password()\n        True\n    \"\"\"\n    if os.geteuid() != 0:\n        env = dict(os.environ)\n        env[\"SUDO_ASKPASS\"] = \"/bin/false\"\n        try:\n            sp.run([\"sudo\", \"-K\"], stderr=sp.DEVNULL, stdout=sp.DEVNULL, check=True, env=env)\n            sp.run([\"sudo\", \"-An\", \"/bin/true\"], stderr=sp.DEVNULL, stdout=sp.DEVNULL, check=True, env=env)\n        except sp.CalledProcessError:\n            return False\n    return True\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.cancel_tasks","title":"cancel_tasks  <code>async</code>","text":"<pre><code>cancel_tasks(tasks, ignore_errors=True)\n</code></pre> <p>Asynchronously cancels a list of asyncio tasks.</p> <p>Parameters:</p> <ul> <li> <code>tasks</code>               (<code>list[Task]</code>)           \u2013            <p>A list of asyncio Task objects to cancel.</p> </li> <li> <code>ignore_errors</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to ignore errors other than asyncio.CancelledError. Defaults to True.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; async def main():\n...     task1 = asyncio.create_task(async_function1())\n...     task2 = asyncio.create_task(async_function2())\n...     await cancel_tasks([task1, task2])\n...\n&gt;&gt;&gt; asyncio.run(main())\n</code></pre> Note <p>This function will not cancel the current task that it is called from.</p> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>async def cancel_tasks(tasks, ignore_errors=True):\n    \"\"\"\n    Asynchronously cancels a list of asyncio tasks.\n\n    Args:\n        tasks (list[Task]): A list of asyncio Task objects to cancel.\n        ignore_errors (bool, optional): Whether to ignore errors other than asyncio.CancelledError. Defaults to True.\n\n    Examples:\n        &gt;&gt;&gt; async def main():\n        ...     task1 = asyncio.create_task(async_function1())\n        ...     task2 = asyncio.create_task(async_function2())\n        ...     await cancel_tasks([task1, task2])\n        ...\n        &gt;&gt;&gt; asyncio.run(main())\n\n    Note:\n        This function will not cancel the current task that it is called from.\n    \"\"\"\n    current_task = asyncio.current_task()\n    tasks = [t for t in tasks if t != current_task]\n    for task in tasks:\n        # log.debug(f\"Cancelling task: {task}\")\n        task.cancel()\n    if ignore_errors:\n        for task in tasks:\n            try:\n                await task\n            except BaseException as e:\n                if not isinstance(e, asyncio.CancelledError):\n                    import traceback\n\n                    log.trace(traceback.format_exc())\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.cancel_tasks_sync","title":"cancel_tasks_sync","text":"<pre><code>cancel_tasks_sync(tasks)\n</code></pre> <p>Synchronously cancels a list of asyncio tasks.</p> <p>Parameters:</p> <ul> <li> <code>tasks</code>               (<code>list[Task]</code>)           \u2013            <p>A list of asyncio Task objects to cancel.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; loop = asyncio.get_event_loop()\n&gt;&gt;&gt; task1 = loop.create_task(some_async_function1())\n&gt;&gt;&gt; task2 = loop.create_task(some_async_function2())\n&gt;&gt;&gt; cancel_tasks_sync([task1, task2])\n</code></pre> Note <p>This function will not cancel the current task from which it is called.</p> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def cancel_tasks_sync(tasks):\n    \"\"\"\n    Synchronously cancels a list of asyncio tasks.\n\n    Args:\n        tasks (list[Task]): A list of asyncio Task objects to cancel.\n\n    Examples:\n        &gt;&gt;&gt; loop = asyncio.get_event_loop()\n        &gt;&gt;&gt; task1 = loop.create_task(some_async_function1())\n        &gt;&gt;&gt; task2 = loop.create_task(some_async_function2())\n        &gt;&gt;&gt; cancel_tasks_sync([task1, task2])\n\n    Note:\n        This function will not cancel the current task from which it is called.\n    \"\"\"\n    current_task = asyncio.current_task()\n    for task in tasks:\n        if task != current_task:\n            # log.debug(f\"Cancelling task: {task}\")\n            task.cancel()\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.chain_lists","title":"chain_lists","text":"<pre><code>chain_lists(l, try_files=False, msg=None, remove_blank=True, validate=False, validate_chars='&lt;&gt;:\"/\\\\|?*)')\n</code></pre> <p>Chains together list elements, allowing for entries separated by commas.</p> <p>This function takes a list <code>l</code> and flattens it by splitting its entries on commas. It also allows you to optionally open entries as files and add their contents to the list.</p> <p>The order of entries is preserved, and deduplication is performed automatically.</p> <p>Parameters:</p> <ul> <li> <code>l</code>               (<code>list</code>)           \u2013            <p>The list of strings to chain together.</p> </li> <li> <code>try_files</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to try to open entries as files. Defaults to False.</p> </li> <li> <code>msg</code>               (<code>str</code>, default:                   <code>None</code> )           \u2013            <p>An optional message to log when reading from a file. Defaults to None.</p> </li> <li> <code>remove_blank</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to remove blank entries from the list. Defaults to True.</p> </li> <li> <code>validate</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to perform validation for undesirable characters. Defaults to False.</p> </li> <li> <code>validate_chars</code>               (<code>str</code>, default:                   <code>'&lt;&gt;:\"/\\\\|?*)'</code> )           \u2013            <p>When performing validation, what additional set of characters to block (blocks non-printable ascii automatically). Defaults to '&lt;&gt;:\"/|?*)'</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>list</code>          \u2013            <p>The list of chained elements.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>ValueError</code>             \u2013            <p>If the input string contains invalid characters, when enabled (off by default).</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; chain_lists([\"a\", \"b,c,d\"])\n['a', 'b', 'c', 'd']\n</code></pre> <pre><code>&gt;&gt;&gt; chain_lists([\"a,file.txt\", \"c,d\"], try_files=True)\n['a', 'f_line1', 'f_line2', 'f_line3', 'c', 'd']\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def chain_lists(\n    l,\n    try_files=False,\n    msg=None,\n    remove_blank=True,\n    validate=False,\n    validate_chars='&lt;&gt;:\"/\\\\|?*)',\n):\n    \"\"\"Chains together list elements, allowing for entries separated by commas.\n\n    This function takes a list `l` and flattens it by splitting its entries on commas.\n    It also allows you to optionally open entries as files and add their contents to the list.\n\n    The order of entries is preserved, and deduplication is performed automatically.\n\n    Args:\n        l (list): The list of strings to chain together.\n        try_files (bool, optional): Whether to try to open entries as files. Defaults to False.\n        msg (str, optional): An optional message to log when reading from a file. Defaults to None.\n        remove_blank (bool, optional): Whether to remove blank entries from the list. Defaults to True.\n        validate (bool, optional): Whether to perform validation for undesirable characters. Defaults to False.\n        validate_chars (str, optional): When performing validation, what additional set of characters to block (blocks non-printable ascii automatically). Defaults to '&lt;&gt;:\"/\\\\|?*)'\n\n    Returns:\n        list: The list of chained elements.\n\n    Raises:\n        ValueError: If the input string contains invalid characters, when enabled (off by default).\n\n    Examples:\n        &gt;&gt;&gt; chain_lists([\"a\", \"b,c,d\"])\n        ['a', 'b', 'c', 'd']\n\n        &gt;&gt;&gt; chain_lists([\"a,file.txt\", \"c,d\"], try_files=True)\n        ['a', 'f_line1', 'f_line2', 'f_line3', 'c', 'd']\n    \"\"\"\n    if isinstance(l, str):\n        l = [l]\n    final_list = dict()\n    for entry in l:\n        for s in split_regex.split(entry):\n            f = s.strip()\n            if validate:\n                if any((c in validate_chars) or (ord(c) &lt; 32 and c != \" \") for c in f):\n                    raise ValueError(f\"Invalid character in string: {f}\")\n            f_path = Path(f).resolve()\n            if try_files and f_path.is_file():\n                if msg is not None:\n                    new_msg = str(msg).format(filename=f_path)\n                    log.info(new_msg)\n                for line in str_or_file(f):\n                    final_list[line] = None\n            else:\n                final_list[f] = None\n\n    ret = list(final_list)\n    if remove_blank:\n        ret = [r for r in ret if r]\n    return ret\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.charset","title":"charset","text":"<pre><code>charset(p)\n</code></pre> <p>Determine the character set of the given string based on the types of characters it contains.</p> <p>Parameters:</p> <ul> <li> <code>p</code>               (<code>str</code>)           \u2013            <p>The string whose character set is to be determined.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>int</code>          \u2013            <p>A bitmask representing the types of characters present in the string. - CHAR_LOWER = 1: Lowercase alphabets - CHAR_UPPER = 2: Uppercase alphabets - CHAR_DIGIT = 4: Digits - CHAR_SYMBOL = 8: Symbols/Special characters</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; charset('abc')\n1\n</code></pre> <pre><code>&gt;&gt;&gt; charset('abcABC')\n3\n</code></pre> <pre><code>&gt;&gt;&gt; charset('abc123')\n5\n</code></pre> <pre><code>&gt;&gt;&gt; charset('!abc123')\n13\n</code></pre> Source code in <code>bbot/core/helpers/url.py</code> <pre><code>def charset(p):\n    \"\"\"\n    Determine the character set of the given string based on the types of characters it contains.\n\n    Args:\n        p (str): The string whose character set is to be determined.\n\n    Returns:\n        int: A bitmask representing the types of characters present in the string.\n            - CHAR_LOWER = 1: Lowercase alphabets\n            - CHAR_UPPER = 2: Uppercase alphabets\n            - CHAR_DIGIT = 4: Digits\n            - CHAR_SYMBOL = 8: Symbols/Special characters\n\n    Examples:\n        &gt;&gt;&gt; charset('abc')\n        1\n\n        &gt;&gt;&gt; charset('abcABC')\n        3\n\n        &gt;&gt;&gt; charset('abc123')\n        5\n\n        &gt;&gt;&gt; charset('!abc123')\n        13\n    \"\"\"\n    ret = 0\n    for c in p:\n        if c.islower():\n            ret |= CHAR_LOWER\n        elif c.isupper():\n            ret |= CHAR_UPPER\n        elif c.isnumeric():\n            ret |= CHAR_DIGIT\n        else:\n            ret |= CHAR_SYMBOL\n    return ret\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.clean_dict","title":"clean_dict","text":"<pre><code>clean_dict(d, *key_names, fuzzy=False, exclude_keys=None, _prev_key=None)\n</code></pre> <p>Recursively clean unwanted keys from a dictionary. Useful for removing secrets from a config.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>dict</code>)           \u2013            <p>The input dictionary.</p> </li> <li> <code>*key_names</code>           \u2013            <p>Names of keys to remove.</p> </li> <li> <code>fuzzy</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to perform fuzzy matching on keys.</p> </li> <li> <code>exclude_keys</code>               (<code>(list, None)</code>, default:                   <code>None</code> )           \u2013            <p>List of keys to be excluded from removal.</p> </li> <li> <code>_prev_key</code>               (<code>(str, None)</code>, default:                   <code>None</code> )           \u2013            <p>For internal recursive use; the previous key in the hierarchy.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>dict</code>          \u2013            <p>A dictionary cleaned of the keys specified in key_names.</p> </li> </ul> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def clean_dict(d, *key_names, fuzzy=False, exclude_keys=None, _prev_key=None):\n    \"\"\"\n    Recursively clean unwanted keys from a dictionary.\n    Useful for removing secrets from a config.\n\n    Args:\n        d (dict): The input dictionary.\n        *key_names: Names of keys to remove.\n        fuzzy (bool): Whether to perform fuzzy matching on keys.\n        exclude_keys (list, None): List of keys to be excluded from removal.\n        _prev_key (str, None): For internal recursive use; the previous key in the hierarchy.\n\n    Returns:\n        dict: A dictionary cleaned of the keys specified in key_names.\n\n    \"\"\"\n    if exclude_keys is None:\n        exclude_keys = []\n    if isinstance(exclude_keys, str):\n        exclude_keys = [exclude_keys]\n    d = copy.deepcopy(d)\n    if isinstance(d, dict):\n        for key, val in list(d.items()):\n            if key in key_names or (fuzzy and any(k in key for k in key_names)):\n                if _prev_key not in exclude_keys:\n                    d.pop(key)\n                    continue\n            d[key] = clean_dict(val, *key_names, fuzzy=fuzzy, _prev_key=key, exclude_keys=exclude_keys)\n    return d\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.clean_dns_record","title":"clean_dns_record","text":"<pre><code>clean_dns_record(record)\n</code></pre> <p>Cleans and formats a given DNS record for further processing.</p> <p>This static method converts the DNS record to text format if it's not already a string. It also removes any trailing dots and converts the record to lowercase.</p> <p>Parameters:</p> <ul> <li> <code>record</code>               (<code>str or Rdata</code>)           \u2013            <p>The DNS record to clean.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>The cleaned and formatted DNS record.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; clean_dns_record('www.evilcorp.com.')\n'www.evilcorp.com'\n</code></pre> <pre><code>&gt;&gt;&gt; from dns.rrset import from_text\n&gt;&gt;&gt; record = from_text('www.evilcorp.com', 3600, 'IN', 'A', '1.2.3.4')[0]\n&gt;&gt;&gt; clean_dns_record(record)\n'1.2.3.4'\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def clean_dns_record(record):\n    \"\"\"\n    Cleans and formats a given DNS record for further processing.\n\n    This static method converts the DNS record to text format if it's not already a string.\n    It also removes any trailing dots and converts the record to lowercase.\n\n    Args:\n        record (str or dns.rdata.Rdata): The DNS record to clean.\n\n    Returns:\n        str: The cleaned and formatted DNS record.\n\n    Examples:\n        &gt;&gt;&gt; clean_dns_record('www.evilcorp.com.')\n        'www.evilcorp.com'\n\n        &gt;&gt;&gt; from dns.rrset import from_text\n        &gt;&gt;&gt; record = from_text('www.evilcorp.com', 3600, 'IN', 'A', '1.2.3.4')[0]\n        &gt;&gt;&gt; clean_dns_record(record)\n        '1.2.3.4'\n    \"\"\"\n    if not isinstance(record, str):\n        record = str(record.to_text())\n    return str(record).rstrip(\".\").lower()\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.clean_old","title":"clean_old","text":"<pre><code>clean_old(d, keep=10, filter=lambda x: True, key=latest_mtime, reverse=True, raise_error=False)\n</code></pre> <p>Clean up old files and directories within a given directory based on various filtering and sorting options.</p> <p>This function removes the oldest files and directories in the provided directory 'd' that exceed a specified threshold ('keep'). The items to be deleted can be filtered using a lambda function 'filter', and they are sorted by a key function, defaulting to latest modification time.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>str or Path</code>)           \u2013            <p>The directory path to clean up.</p> </li> <li> <code>keep</code>               (<code>int</code>, default:                   <code>10</code> )           \u2013            <p>The number of items to keep. Ones beyond this count will be removed.</p> </li> <li> <code>filter</code>               (<code>Callable</code>, default:                   <code>lambda x: True</code> )           \u2013            <p>A lambda function for filtering which files or directories to consider.                Defaults to a lambda function that returns True for all.</p> </li> <li> <code>key</code>               (<code>Callable</code>, default:                   <code>latest_mtime</code> )           \u2013            <p>A function to sort the files and directories. Defaults to latest modification time.</p> </li> <li> <code>reverse</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to reverse the order of sorted items before removing. Defaults to True.</p> </li> <li> <code>raise_error</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to raise an error if directory deletion fails. Defaults to False.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; clean_old(\"~/.bbot/scans\", filter=lambda x: x.is_dir() and scan_name_regex.match(x.name))\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def clean_old(d, keep=10, filter=lambda x: True, key=latest_mtime, reverse=True, raise_error=False):\n    \"\"\"Clean up old files and directories within a given directory based on various filtering and sorting options.\n\n    This function removes the oldest files and directories in the provided directory 'd' that exceed a specified\n    threshold ('keep'). The items to be deleted can be filtered using a lambda function 'filter', and they are\n    sorted by a key function, defaulting to latest modification time.\n\n    Args:\n        d (str or Path): The directory path to clean up.\n        keep (int): The number of items to keep. Ones beyond this count will be removed.\n        filter (Callable): A lambda function for filtering which files or directories to consider.\n                           Defaults to a lambda function that returns True for all.\n        key (Callable): A function to sort the files and directories. Defaults to latest modification time.\n        reverse (bool): Whether to reverse the order of sorted items before removing. Defaults to True.\n        raise_error (bool): Whether to raise an error if directory deletion fails. Defaults to False.\n\n    Examples:\n        &gt;&gt;&gt; clean_old(\"~/.bbot/scans\", filter=lambda x: x.is_dir() and scan_name_regex.match(x.name))\n    \"\"\"\n    d = Path(d)\n    if not d.is_dir():\n        return\n    paths = [x for x in d.iterdir() if filter(x)]\n    paths.sort(key=key, reverse=reverse)\n    for path in paths[keep:]:\n        try:\n            log.debug(f\"Removing {path}\")\n            rm_rf(path)\n        except Exception as e:\n            msg = f\"Failed to delete directory: {path}, {e}\"\n            if raise_error:\n                raise errors.DirectoryDeletionError()\n            log.warning(msg)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.closest_match","title":"closest_match","text":"<pre><code>closest_match(s, choices, n=1, cutoff=0.0)\n</code></pre> <p>Finds the closest matching strings from a list of choices based on a given string.</p> <p>This function uses the difflib library to find the closest matches to a given string <code>s</code> from a list of <code>choices</code>. It can return either the single best match or a list of the top <code>n</code> best matches.</p> <p>Parameters:</p> <ul> <li> <code>s</code>               (<code>str</code>)           \u2013            <p>The string for which to find the closest match.</p> </li> <li> <code>choices</code>               (<code>list</code>)           \u2013            <p>A list of strings to compare against.</p> </li> <li> <code>n</code>               (<code>int</code>, default:                   <code>1</code> )           \u2013            <p>The number of best matches to return. Defaults to 1.</p> </li> <li> <code>cutoff</code>               (<code>float</code>, default:                   <code>0.0</code> )           \u2013            <p>A float value that defines the similarity threshold. Strings with similarity below this value are not considered. Defaults to 0.0.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>str or list: Either the closest matching string or a list of the <code>n</code> closest matching strings.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; closest_match(\"asdf\", [\"asd\", \"fds\"])\n'asd'\n&gt;&gt;&gt; closest_match(\"asdf\", [\"asd\", \"fds\", \"asdff\"], n=3)\n['asdff', 'asd', 'fds']\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def closest_match(s, choices, n=1, cutoff=0.0):\n    \"\"\"Finds the closest matching strings from a list of choices based on a given string.\n\n    This function uses the difflib library to find the closest matches to a given string `s` from a list of `choices`.\n    It can return either the single best match or a list of the top `n` best matches.\n\n    Args:\n        s (str): The string for which to find the closest match.\n        choices (list): A list of strings to compare against.\n        n (int, optional): The number of best matches to return. Defaults to 1.\n        cutoff (float, optional): A float value that defines the similarity threshold. Strings with similarity below this value are not considered. Defaults to 0.0.\n\n    Returns:\n        str or list: Either the closest matching string or a list of the `n` closest matching strings.\n\n    Examples:\n        &gt;&gt;&gt; closest_match(\"asdf\", [\"asd\", \"fds\"])\n        'asd'\n        &gt;&gt;&gt; closest_match(\"asdf\", [\"asd\", \"fds\", \"asdff\"], n=3)\n        ['asdff', 'asd', 'fds']\n    \"\"\"\n    import difflib\n\n    matches = difflib.get_close_matches(s, choices, n=n, cutoff=cutoff)\n    if not choices or not matches:\n        return\n    if n == 1:\n        return matches[0]\n    return matches\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.cloudcheck","title":"cloudcheck","text":"<pre><code>cloudcheck(ip)\n</code></pre> <p>Check whether an IP address belongs to a cloud provider and returns the provider name, type, and subnet.</p> <p>Parameters:</p> <ul> <li> <code>ip</code>               (<code>str</code>)           \u2013            <p>The IP address to check.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>tuple</code>          \u2013            <p>A tuple containing provider name (str), provider type (str), and subnet (IPv4Network).</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; cloudcheck(\"168.62.20.37\")\n('Azure', 'cloud', IPv4Network('168.62.0.0/19'))\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def cloudcheck(ip):\n    \"\"\"\n    Check whether an IP address belongs to a cloud provider and returns the provider name, type, and subnet.\n\n    Args:\n        ip (str): The IP address to check.\n\n    Returns:\n        tuple: A tuple containing provider name (str), provider type (str), and subnet (IPv4Network).\n\n    Examples:\n        &gt;&gt;&gt; cloudcheck(\"168.62.20.37\")\n        ('Azure', 'cloud', IPv4Network('168.62.0.0/19'))\n    \"\"\"\n    import cloudcheck as _cloudcheck\n\n    return _cloudcheck.check(ip)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.cpu_architecture","title":"cpu_architecture","text":"<pre><code>cpu_architecture()\n</code></pre> <p>Return the CPU architecture of the current system.</p> <p>This function fetches and returns the architecture type of the CPU where the code is being executed. It maps common identifiers like \"x86_64\" to more general types like \"amd64\".</p> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>A string representing the CPU architecture, such as \"amd64\", \"armv7\", or \"arm64\".</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; cpu_architecture()\n'amd64'\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def cpu_architecture():\n    \"\"\"Return the CPU architecture of the current system.\n\n    This function fetches and returns the architecture type of the CPU where the code is being executed.\n    It maps common identifiers like \"x86_64\" to more general types like \"amd64\".\n\n    Returns:\n        str: A string representing the CPU architecture, such as \"amd64\", \"armv7\", or \"arm64\".\n\n    Examples:\n        &gt;&gt;&gt; cpu_architecture()\n        'amd64'\n    \"\"\"\n    import platform\n\n    uname = platform.uname()\n    arch = uname.machine.lower()\n    if arch.startswith(\"aarch\"):\n        return \"arm64\"\n    elif arch == \"x86_64\":\n        return \"amd64\"\n    return arch\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.delete_file","title":"delete_file","text":"<pre><code>delete_file(path)\n</code></pre> <p>Deletes a file at the given path.</p> <p>Parameters:</p> <ul> <li> <code>path</code>               (<code>str or Path</code>)           \u2013            <p>The path to the file to be deleted.</p> </li> </ul> Note <p>This function suppresses all exceptions to ensure that the program continues running even if the file could not be deleted.</p> <p>Examples:</p> <pre><code>&gt;&gt;&gt; delete_file(\"/tmp/test/file1.txt\")\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def delete_file(path):\n    \"\"\"Deletes a file at the given path.\n\n    Args:\n        path (str or Path): The path to the file to be deleted.\n\n    Note:\n        This function suppresses all exceptions to ensure that the program continues running even if the file could not be deleted.\n\n    Examples:\n        &gt;&gt;&gt; delete_file(\"/tmp/test/file1.txt\")\n    \"\"\"\n    with suppress(Exception):\n        Path(path).unlink(missing_ok=True)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.domain_parents","title":"domain_parents","text":"<pre><code>domain_parents(d, include_self=False)\n</code></pre> <p>Generate a list of parent domains for a given domain string.</p> <p>This function takes an input string <code>d</code> and generates a list of parent domains in decreasing order of specificity. If <code>include_self</code> is set to True, the list will also include the input domain if it is not a top-level domain.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>str</code>)           \u2013            <p>The input string representing a domain or subdomain.</p> </li> <li> <code>include_self</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to include the input domain itself. Defaults to False.</p> </li> </ul> <p>Yields:</p> <ul> <li> <code>str</code>          \u2013            <p>Parent domains of the input string in decreasing order of specificity.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; list(domain_parents(\"test.www.evilcorp.co.uk\"))\n[\"www.evilcorp.co.uk\", \"evilcorp.co.uk\"]\n</code></pre> Notes <ul> <li>Port, if present in input, is preserved in the output.</li> </ul> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def domain_parents(d, include_self=False):\n    \"\"\"\n    Generate a list of parent domains for a given domain string.\n\n    This function takes an input string `d` and generates a list of parent domains in decreasing order of specificity.\n    If `include_self` is set to True, the list will also include the input domain if it is not a top-level domain.\n\n    Args:\n        d (str): The input string representing a domain or subdomain.\n        include_self (bool, optional): Whether to include the input domain itself. Defaults to False.\n\n    Yields:\n        str: Parent domains of the input string in decreasing order of specificity.\n\n    Examples:\n        &gt;&gt;&gt; list(domain_parents(\"test.www.evilcorp.co.uk\"))\n        [\"www.evilcorp.co.uk\", \"evilcorp.co.uk\"]\n\n    Notes:\n        - Port, if present in input, is preserved in the output.\n    \"\"\"\n\n    parent = str(d)\n    if include_self and not is_domain(parent):\n        yield parent\n    while 1:\n        parent = parent_domain(parent)\n        if is_subdomain(parent):\n            yield parent\n            continue\n        elif is_domain(parent):\n            yield parent\n        break\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.domain_stem","title":"domain_stem","text":"<pre><code>domain_stem(domain)\n</code></pre> <p>Returns an abbreviated representation of the hostname by removing the TLD (Top-Level Domain).</p> <p>Parameters:</p> <ul> <li> <code>domain</code>               (<code>str</code>)           \u2013            <p>The full domain name to be abbreviated.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>An abbreviated domain string without the TLD.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; domain_stem(\"www.evilcorp.com\")\n\"www.evilcorp\"\n</code></pre> Notes <ul> <li>Utilizes the <code>tldextract</code> function for domain parsing.</li> </ul> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def domain_stem(domain):\n    \"\"\"\n    Returns an abbreviated representation of the hostname by removing the TLD (Top-Level Domain).\n\n    Args:\n        domain (str): The full domain name to be abbreviated.\n\n    Returns:\n        str: An abbreviated domain string without the TLD.\n\n    Examples:\n        &gt;&gt;&gt; domain_stem(\"www.evilcorp.com\")\n        \"www.evilcorp\"\n\n    Notes:\n        - Utilizes the `tldextract` function for domain parsing.\n    \"\"\"\n    parsed = tldextract(str(domain))\n    return f\".\".join(parsed.subdomain.split(\".\") + parsed.domain.split(\".\")).strip(\".\")\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.execute_sync_or_async","title":"execute_sync_or_async  <code>async</code>","text":"<pre><code>execute_sync_or_async(callback, *args, **kwargs)\n</code></pre> <p>Execute a function or coroutine, handling either synchronous or asynchronous invocation.</p> <p>Parameters:</p> <ul> <li> <code>callback</code>               (<code>Union[Callable, Coroutine]</code>)           \u2013            <p>The function or coroutine to execute.</p> </li> <li> <code>*args</code>           \u2013            <p>Variable-length argument list to pass to the callback.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Arbitrary keyword arguments to pass to the callback.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>Any</code>          \u2013            <p>The return value from the executed function or coroutine.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; async def foo_async(x):\n...     return x + 1\n&gt;&gt;&gt; def foo_sync(x):\n...     return x + 1\n</code></pre> <pre><code>&gt;&gt;&gt; asyncio.run(execute_sync_or_async(foo_async, 1))\n2\n</code></pre> <pre><code>&gt;&gt;&gt; asyncio.run(execute_sync_or_async(foo_sync, 1))\n2\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>async def execute_sync_or_async(callback, *args, **kwargs):\n    \"\"\"\n    Execute a function or coroutine, handling either synchronous or asynchronous invocation.\n\n    Args:\n        callback (Union[Callable, Coroutine]): The function or coroutine to execute.\n        *args: Variable-length argument list to pass to the callback.\n        **kwargs: Arbitrary keyword arguments to pass to the callback.\n\n    Returns:\n        Any: The return value from the executed function or coroutine.\n\n    Examples:\n        &gt;&gt;&gt; async def foo_async(x):\n        ...     return x + 1\n        &gt;&gt;&gt; def foo_sync(x):\n        ...     return x + 1\n\n        &gt;&gt;&gt; asyncio.run(execute_sync_or_async(foo_async, 1))\n        2\n\n        &gt;&gt;&gt; asyncio.run(execute_sync_or_async(foo_sync, 1))\n        2\n    \"\"\"\n    if is_async_function(callback):\n        return await callback(*args, **kwargs)\n    else:\n        return callback(*args, **kwargs)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.extract_emails","title":"extract_emails","text":"<pre><code>extract_emails(s)\n</code></pre> <p>Extract email addresses from a body of text</p> <p>This function takes in a string and yields all email addresses found in it. The emails are converted to lower case before yielding. It utilizes regular expressions for email pattern matching.</p> <p>Parameters:</p> <ul> <li> <code>s</code>               (<code>str</code>)           \u2013            <p>The input string from which to extract email addresses.</p> </li> </ul> <p>Yields:</p> <ul> <li> <code>str</code>          \u2013            <p>Yields email addresses found in the input string, in lower case.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; list(extract_emails(\"Contact us at info@evilcorp.com and support@evilcorp.com\"))\n['info@evilcorp.com', 'support@evilcorp.com']\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def extract_emails(s):\n    \"\"\"\n    Extract email addresses from a body of text\n\n    This function takes in a string and yields all email addresses found in it.\n    The emails are converted to lower case before yielding. It utilizes\n    regular expressions for email pattern matching.\n\n    Args:\n        s (str): The input string from which to extract email addresses.\n\n    Yields:\n        str: Yields email addresses found in the input string, in lower case.\n\n    Examples:\n        &gt;&gt;&gt; list(extract_emails(\"Contact us at info@evilcorp.com and support@evilcorp.com\"))\n        ['info@evilcorp.com', 'support@evilcorp.com']\n    \"\"\"\n    for email in bbot_regexes.email_regex.findall(smart_decode(s)):\n        yield email.lower()\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.extract_host","title":"extract_host","text":"<pre><code>extract_host(s)\n</code></pre> <p>Attempts to find and extract the host portion of a string.</p> <p>Parameters:</p> <ul> <li> <code>s</code>               (<code>str</code>)           \u2013            <p>The string from which to extract the host.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>tuple</code>          \u2013            <p>A tuple containing three strings:    (hostname (None if not found), string_before_hostname, string_after_hostname).</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; extract_host(\"evilcorp.com:80\")\n(\"evilcorp.com\", \"\", \":80\")\n</code></pre> <pre><code>&gt;&gt;&gt; extract_host(\"http://evilcorp.com:80/asdf.php?a=b\")\n(\"evilcorp.com\", \"http://\", \":80/asdf.php?a=b\")\n</code></pre> <pre><code>&gt;&gt;&gt; extract_host(\"bob@evilcorp.com\")\n(\"evilcorp.com\", \"bob@\", \"\")\n</code></pre> <pre><code>&gt;&gt;&gt; extract_host(\"[dead::beef]:22\")\n(\"dead::beef\", \"[\", \"]:22\")\n</code></pre> <pre><code>&gt;&gt;&gt; extract_host(\"ftp://username:password@my-ftp.com/my-file.csv\")\n(\n    \"my-ftp.com\",\n    \"ftp://username:password@\",\n    \"/my-file.csv\",\n)\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def extract_host(s):\n    \"\"\"\n    Attempts to find and extract the host portion of a string.\n\n    Args:\n        s (str): The string from which to extract the host.\n\n    Returns:\n        tuple: A tuple containing three strings:\n               (hostname (None if not found), string_before_hostname, string_after_hostname).\n\n    Examples:\n        &gt;&gt;&gt; extract_host(\"evilcorp.com:80\")\n        (\"evilcorp.com\", \"\", \":80\")\n\n        &gt;&gt;&gt; extract_host(\"http://evilcorp.com:80/asdf.php?a=b\")\n        (\"evilcorp.com\", \"http://\", \":80/asdf.php?a=b\")\n\n        &gt;&gt;&gt; extract_host(\"bob@evilcorp.com\")\n        (\"evilcorp.com\", \"bob@\", \"\")\n\n        &gt;&gt;&gt; extract_host(\"[dead::beef]:22\")\n        (\"dead::beef\", \"[\", \"]:22\")\n\n        &gt;&gt;&gt; extract_host(\"ftp://username:password@my-ftp.com/my-file.csv\")\n        (\n            \"my-ftp.com\",\n            \"ftp://username:password@\",\n            \"/my-file.csv\",\n        )\n    \"\"\"\n    s = smart_decode(s)\n    match = bbot_regexes.extract_host_regex.search(s)\n\n    if match:\n        hostname = match.group(1)\n        before = s[: match.start(1)]\n        after = s[match.end(1) :]\n        host, port = split_host_port(hostname)\n        netloc = make_netloc(host, port)\n        if netloc != hostname:\n            # invalid host / port\n            return (None, s, \"\")\n        if host is not None:\n            if port is not None:\n                after = f\":{port}{after}\"\n            if is_ip(host, version=6) and hostname.startswith(\"[\"):\n                before = f\"{before}[\"\n                after = f\"]{after}\"\n            hostname = str(host)\n        return (hostname, before, after)\n\n    return (None, s, \"\")\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.extract_params_json","title":"extract_params_json","text":"<pre><code>extract_params_json(json_data, compare_mode='getparam')\n</code></pre> <p>Extracts key-value pairs from a JSON object and returns them as a set of tuples. Used by the <code>paramminer_headers</code> module.</p> <p>Parameters:</p> <ul> <li> <code>json_data</code>               (<code>str</code>)           \u2013            <p>JSON-formatted string containing key-value pairs.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>set</code>          \u2013            <p>A set of tuples containing the keys and their corresponding values present in the JSON object.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; extract_params_json('{\"a\": 1, \"b\": {\"c\": 2}}')\n{('a', 1), ('b', {'c': 2}), ('c', 2)}\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def extract_params_json(json_data, compare_mode=\"getparam\"):\n    \"\"\"\n    Extracts key-value pairs from a JSON object and returns them as a set of tuples. Used by the `paramminer_headers` module.\n\n    Args:\n        json_data (str): JSON-formatted string containing key-value pairs.\n\n    Returns:\n        set: A set of tuples containing the keys and their corresponding values present in the JSON object.\n\n    Raises:\n        Returns an empty set if JSONDecodeError occurs.\n\n    Examples:\n        &gt;&gt;&gt; extract_params_json('{\"a\": 1, \"b\": {\"c\": 2}}')\n        {('a', 1), ('b', {'c': 2}), ('c', 2)}\n    \"\"\"\n    try:\n        data = json.loads(json_data)\n    except json.JSONDecodeError:\n        return set()\n\n    key_value_pairs = set()\n    stack = [(data, \"\")]\n\n    while stack:\n        current_data, path = stack.pop()\n        if isinstance(current_data, dict):\n            for key, value in current_data.items():\n                full_key = f\"{path}.{key}\" if path else key\n                if isinstance(value, dict):\n                    stack.append((value, full_key))\n                elif isinstance(value, list):\n                    stack.append((value, full_key))\n                else:\n                    if validate_parameter(full_key, compare_mode):\n                        key_value_pairs.add((full_key, value))\n        elif isinstance(current_data, list):\n            for item in current_data:\n                if isinstance(item, (dict, list)):\n                    stack.append((item, path))\n    return key_value_pairs\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.extract_params_xml","title":"extract_params_xml","text":"<pre><code>extract_params_xml(xml_data, compare_mode='getparam')\n</code></pre> <p>Extracts tags and their text values from an XML object and returns them as a set of tuples.</p> <p>Parameters:</p> <ul> <li> <code>xml_data</code>               (<code>str</code>)           \u2013            <p>XML-formatted string containing elements.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>set</code>          \u2013            <p>A set of tuples containing the tags and their corresponding text values present in the XML object.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; extract_params_xml('&lt;root&gt;&lt;child1&gt;&lt;child2&gt;value&lt;/child2&gt;&lt;/child1&gt;&lt;/root&gt;')\n{('root', None), ('child1', None), ('child2', 'value')}\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def extract_params_xml(xml_data, compare_mode=\"getparam\"):\n    \"\"\"\n    Extracts tags and their text values from an XML object and returns them as a set of tuples.\n\n    Args:\n        xml_data (str): XML-formatted string containing elements.\n\n    Returns:\n        set: A set of tuples containing the tags and their corresponding text values present in the XML object.\n\n    Raises:\n        Returns an empty set if ParseError occurs.\n\n    Examples:\n        &gt;&gt;&gt; extract_params_xml('&lt;root&gt;&lt;child1&gt;&lt;child2&gt;value&lt;/child2&gt;&lt;/child1&gt;&lt;/root&gt;')\n        {('root', None), ('child1', None), ('child2', 'value')}\n    \"\"\"\n    import xml.etree.ElementTree as ET\n\n    try:\n        root = ET.fromstring(xml_data)\n    except ET.ParseError:\n        return set()\n\n    tag_value_pairs = set()\n    stack = [root]\n\n    while stack:\n        current_element = stack.pop()\n        if validate_parameter(current_element.tag, compare_mode):\n            tag_value_pairs.add((current_element.tag, current_element.text))\n        for child in current_element:\n            stack.append(child)\n    return tag_value_pairs\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.extract_words","title":"extract_words","text":"<pre><code>extract_words(data, acronyms=True, wordninja=True, model=None, max_length=100, word_regexes=None)\n</code></pre> <p>Intelligently extracts words from given data.</p> <p>This function uses regular expressions and optionally wordninja to extract words from a given text string. Thanks to wordninja it can handle concatenated words intelligently.</p> <p>Parameters:</p> <ul> <li> <code>data</code>               (<code>str</code>)           \u2013            <p>The data from which words are to be extracted.</p> </li> <li> <code>acronyms</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to include acronyms. Defaults to True.</p> </li> <li> <code>wordninja</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to use the wordninja library to split concatenated words. Defaults to True.</p> </li> <li> <code>model</code>               (<code>object</code>, default:                   <code>None</code> )           \u2013            <p>A custom wordninja model for special types of data such as DNS names.</p> </li> <li> <code>max_length</code>               (<code>int</code>, default:                   <code>100</code> )           \u2013            <p>Maximum length for a word to be included. Defaults to 100.</p> </li> <li> <code>word_regexes</code>               (<code>list</code>, default:                   <code>None</code> )           \u2013            <p>A list of compiled regular expression objects for word extraction. Defaults to None.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>set</code>          \u2013            <p>A set of extracted words.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; extract_words('blacklanternsecurity')\n{'black', 'lantern', 'security', 'bls', 'blacklanternsecurity'}\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def extract_words(data, acronyms=True, wordninja=True, model=None, max_length=100, word_regexes=None):\n    \"\"\"Intelligently extracts words from given data.\n\n    This function uses regular expressions and optionally wordninja to extract words\n    from a given text string. Thanks to wordninja it can handle concatenated words intelligently.\n\n    Args:\n        data (str): The data from which words are to be extracted.\n        acronyms (bool, optional): Whether to include acronyms. Defaults to True.\n        wordninja (bool, optional): Whether to use the wordninja library to split concatenated words. Defaults to True.\n        model (object, optional): A custom wordninja model for special types of data such as DNS names.\n        max_length (int, optional): Maximum length for a word to be included. Defaults to 100.\n        word_regexes (list, optional): A list of compiled regular expression objects for word extraction. Defaults to None.\n\n    Returns:\n        set: A set of extracted words.\n\n    Examples:\n        &gt;&gt;&gt; extract_words('blacklanternsecurity')\n        {'black', 'lantern', 'security', 'bls', 'blacklanternsecurity'}\n    \"\"\"\n    import wordninja as _wordninja\n\n    if word_regexes is None:\n        word_regexes = bbot_regexes.word_regexes\n    words = set()\n    data = smart_decode(data)\n    for r in word_regexes:\n        for word in set(r.findall(data)):\n            # blacklanternsecurity\n            if len(word) &lt;= max_length:\n                words.add(word)\n\n    # blacklanternsecurity --&gt; ['black', 'lantern', 'security']\n    # max_slice_length = 3\n    for word in list(words):\n        if wordninja:\n            if model is None:\n                model = _wordninja\n            subwords = model.split(word)\n            for subword in subwords:\n                words.add(subword)\n        # this section generates compound words\n        # it is interesting but currently disabled the quality of its output doesn't quite justify its quantity\n        # blacklanternsecurity --&gt; ['black', 'lantern', 'security', 'blacklantern', 'lanternsecurity']\n        # for s, e in combinations(range(len(subwords) + 1), 2):\n        #    if e - s &lt;= max_slice_length:\n        #        subword_slice = \"\".join(subwords[s:e])\n        #        words.add(subword_slice)\n        # blacklanternsecurity --&gt; bls\n        if acronyms:\n            if len(subwords) &gt; 1:\n                words.add(\"\".join([c[0] for c in subwords if len(c) &gt; 0]))\n\n    return words\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.filesize","title":"filesize","text":"<pre><code>filesize(f)\n</code></pre> <p>Get the file size of a given file.</p> <p>This function takes a file path as an argument and returns its size in bytes. If the path does not point to a file, the function returns 0.</p> <p>Parameters:</p> <ul> <li> <code>f</code>               (<code>str or Path</code>)           \u2013            <p>The file path for which to get the size.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>int</code>          \u2013            <p>The size of the file in bytes, or 0 if the path does not point to a file.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; filesize(\"/path/to/file.txt\")\n1024\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def filesize(f):\n    \"\"\"Get the file size of a given file.\n\n    This function takes a file path as an argument and returns its size in bytes. If the path\n    does not point to a file, the function returns 0.\n\n    Args:\n        f (str or Path): The file path for which to get the size.\n\n    Returns:\n        int: The size of the file in bytes, or 0 if the path does not point to a file.\n\n    Examples:\n        &gt;&gt;&gt; filesize(\"/path/to/file.txt\")\n        1024\n    \"\"\"\n    f = Path(f)\n    if f.is_file():\n        return f.stat().st_size\n    return 0\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.filter_dict","title":"filter_dict","text":"<pre><code>filter_dict(d, *key_names, fuzzy=False, exclude_keys=None, _prev_key=None)\n</code></pre> <p>Recursively filter a dictionary based on key names.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>dict</code>)           \u2013            <p>The input dictionary.</p> </li> <li> <code>*key_names</code>           \u2013            <p>Names of keys to filter for.</p> </li> <li> <code>fuzzy</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to perform fuzzy matching on keys.</p> </li> <li> <code>exclude_keys</code>               (<code>(list, None)</code>, default:                   <code>None</code> )           \u2013            <p>List of keys to be excluded from the final dict.</p> </li> <li> <code>_prev_key</code>               (<code>(str, None)</code>, default:                   <code>None</code> )           \u2013            <p>For internal recursive use; the previous key in the hierarchy.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>dict</code>          \u2013            <p>A dictionary containing only the keys specified in key_names.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; filter_dict({\"key1\": \"test\", \"key2\": \"asdf\"}, \"key2\")\n{\"key2\": \"asdf\"}\n&gt;&gt;&gt; filter_dict({\"key1\": \"test\", \"key2\": {\"key3\": \"asdf\"}}, \"key1\", \"key3\", exclude_keys=\"key2\")\n{'key1': 'test'}\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def filter_dict(d, *key_names, fuzzy=False, exclude_keys=None, _prev_key=None):\n    \"\"\"\n    Recursively filter a dictionary based on key names.\n\n    Args:\n        d (dict): The input dictionary.\n        *key_names: Names of keys to filter for.\n        fuzzy (bool): Whether to perform fuzzy matching on keys.\n        exclude_keys (list, None): List of keys to be excluded from the final dict.\n        _prev_key (str, None): For internal recursive use; the previous key in the hierarchy.\n\n    Returns:\n        dict: A dictionary containing only the keys specified in key_names.\n\n    Examples:\n        &gt;&gt;&gt; filter_dict({\"key1\": \"test\", \"key2\": \"asdf\"}, \"key2\")\n        {\"key2\": \"asdf\"}\n        &gt;&gt;&gt; filter_dict({\"key1\": \"test\", \"key2\": {\"key3\": \"asdf\"}}, \"key1\", \"key3\", exclude_keys=\"key2\")\n        {'key1': 'test'}\n    \"\"\"\n    if exclude_keys is None:\n        exclude_keys = []\n    if isinstance(exclude_keys, str):\n        exclude_keys = [exclude_keys]\n    ret = {}\n    if isinstance(d, dict):\n        for key in d:\n            if key in key_names or (fuzzy and any(k in key for k in key_names)):\n                if not any(k in exclude_keys for k in [key, _prev_key]):\n                    ret[key] = copy.deepcopy(d[key])\n            elif isinstance(d[key], list) or isinstance(d[key], dict):\n                child = filter_dict(d[key], *key_names, fuzzy=fuzzy, _prev_key=key, exclude_keys=exclude_keys)\n                if child:\n                    ret[key] = child\n    return ret\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.gen_numbers","title":"gen_numbers","text":"<pre><code>gen_numbers(n, padding=2)\n</code></pre> <p>Generates numbers with variable padding and returns them as a set of strings.</p> <p>Parameters:</p> <ul> <li> <code>n</code>               (<code>int</code>)           \u2013            <p>The upper limit of numbers to generate, exclusive.</p> </li> <li> <code>padding</code>               (<code>int</code>, default:                   <code>2</code> )           \u2013            <p>The maximum number of digits to pad the numbers with. Defaults to 2.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>set</code>          \u2013            <p>A set of string representations of numbers with varying degrees of padding.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; gen_numbers(5)\n{'0', '00', '01', '02', '03', '04', '1', '2', '3', '4'}\n</code></pre> <pre><code>&gt;&gt;&gt; gen_numbers(3, padding=3)\n{'0', '00', '000', '001', '002', '01', '02', '1', '2'}\n</code></pre> <pre><code>&gt;&gt;&gt; gen_numbers(5, padding=1)\n{'0', '1', '2', '3', '4'}\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def gen_numbers(n, padding=2):\n    \"\"\"Generates numbers with variable padding and returns them as a set of strings.\n\n    Args:\n        n (int): The upper limit of numbers to generate, exclusive.\n        padding (int, optional): The maximum number of digits to pad the numbers with. Defaults to 2.\n\n    Returns:\n        set: A set of string representations of numbers with varying degrees of padding.\n\n    Examples:\n        &gt;&gt;&gt; gen_numbers(5)\n        {'0', '00', '01', '02', '03', '04', '1', '2', '3', '4'}\n\n        &gt;&gt;&gt; gen_numbers(3, padding=3)\n        {'0', '00', '000', '001', '002', '01', '02', '1', '2'}\n\n        &gt;&gt;&gt; gen_numbers(5, padding=1)\n        {'0', '1', '2', '3', '4'}\n    \"\"\"\n    results = set()\n    for i in range(n):\n        for p in range(1, padding + 1):\n            results.add(str(i).zfill(p))\n    return results\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.get_closest_match","title":"get_closest_match","text":"<pre><code>get_closest_match(s, choices, msg=None)\n</code></pre> <p>Finds the closest match from a list of choices for a given string.</p> <p>This function is particularly useful for CLI applications where you want to validate flags or modules.</p> <p>Parameters:</p> <ul> <li> <code>s</code>               (<code>str</code>)           \u2013            <p>The string for which to find the closest match.</p> </li> <li> <code>choices</code>               (<code>list</code>)           \u2013            <p>A list of strings to compare against.</p> </li> <li> <code>msg</code>               (<code>str</code>, default:                   <code>None</code> )           \u2013            <p>Additional message to prepend in the warning message. Defaults to None.</p> </li> <li> <code>loglevel</code>               (<code>str</code>)           \u2013            <p>The log level to use for the warning message. Defaults to \"HUGEWARNING\".</p> </li> <li> <code>exitcode</code>               (<code>int</code>)           \u2013            <p>The exit code to use when exiting the program. Defaults to 2.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; get_closest_match(\"some_module\", [\"some_mod\", \"some_other_mod\"], msg=\"module\")\n# Output: Could not find module \"some_module\". Did you mean \"some_mod\"?\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def get_closest_match(s, choices, msg=None):\n    \"\"\"Finds the closest match from a list of choices for a given string.\n\n    This function is particularly useful for CLI applications where you want to validate flags or modules.\n\n    Args:\n        s (str): The string for which to find the closest match.\n        choices (list): A list of strings to compare against.\n        msg (str, optional): Additional message to prepend in the warning message. Defaults to None.\n        loglevel (str, optional): The log level to use for the warning message. Defaults to \"HUGEWARNING\".\n        exitcode (int, optional): The exit code to use when exiting the program. Defaults to 2.\n\n    Examples:\n        &gt;&gt;&gt; get_closest_match(\"some_module\", [\"some_mod\", \"some_other_mod\"], msg=\"module\")\n        # Output: Could not find module \"some_module\". Did you mean \"some_mod\"?\n    \"\"\"\n    if msg is None:\n        msg = \"\"\n    else:\n        msg += \" \"\n    closest = closest_match(s, choices)\n    return f'Could not find {msg}\"{s}\". Did you mean \"{closest}\"?'\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.get_exception_chain","title":"get_exception_chain","text":"<pre><code>get_exception_chain(e)\n</code></pre> <p>Retrieves the full chain of exceptions leading to the given exception.</p> <p>Parameters:</p> <ul> <li> <code>e</code>               (<code>BaseException</code>)           \u2013            <p>The exception for which to get the chain.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>list[BaseException]: List of exceptions in the chain, from the given exception back to the root cause.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; try:\n...     raise ValueError(\"This is a value error\")\n... except ValueError as e:\n...     exc_chain = get_exception_chain(e)\n...     for exc in exc_chain:\n...         print(exc)\nThis is a value error\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def get_exception_chain(e):\n    \"\"\"\n    Retrieves the full chain of exceptions leading to the given exception.\n\n    Args:\n        e (BaseException): The exception for which to get the chain.\n\n    Returns:\n        list[BaseException]: List of exceptions in the chain, from the given exception back to the root cause.\n\n    Examples:\n        &gt;&gt;&gt; try:\n        ...     raise ValueError(\"This is a value error\")\n        ... except ValueError as e:\n        ...     exc_chain = get_exception_chain(e)\n        ...     for exc in exc_chain:\n        ...         print(exc)\n        This is a value error\n    \"\"\"\n    exception_chain = []\n    current_exception = e\n    while current_exception is not None:\n        exception_chain.append(current_exception)\n        current_exception = getattr(current_exception, \"__context__\", None)\n    return exception_chain\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.get_file_extension","title":"get_file_extension","text":"<pre><code>get_file_extension(s)\n</code></pre> <p>Extracts the file extension from a given string representing a URL or file path.</p> <p>Parameters:</p> <ul> <li> <code>s</code>               (<code>str</code>)           \u2013            <p>The string from which to extract the file extension.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>The file extension, or an empty string if no extension is found.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; get_file_extension(\"https://evilcorp.com/api/test.php\")\n\"php\"\n&gt;&gt;&gt; get_file_extension(\"/etc/test.conf\")\n\"conf\"\n&gt;&gt;&gt; get_file_extension(\"/etc/passwd\")\n\"\"\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def get_file_extension(s):\n    \"\"\"\n    Extracts the file extension from a given string representing a URL or file path.\n\n    Args:\n        s (str): The string from which to extract the file extension.\n\n    Returns:\n        str: The file extension, or an empty string if no extension is found.\n\n    Examples:\n        &gt;&gt;&gt; get_file_extension(\"https://evilcorp.com/api/test.php\")\n        \"php\"\n        &gt;&gt;&gt; get_file_extension(\"/etc/test.conf\")\n        \"conf\"\n        &gt;&gt;&gt; get_file_extension(\"/etc/passwd\")\n        \"\"\n    \"\"\"\n    s = str(s).lower().strip()\n    rightmost_section = s.rsplit(\"/\", 1)[-1]\n    if \".\" in rightmost_section:\n        extension = rightmost_section.rsplit(\".\", 1)[-1]\n        return extension\n    return \"\"\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.get_get_params","title":"get_get_params","text":"<pre><code>get_get_params(url)\n</code></pre> <p>Extract the query parameters from the given URL as a dictionary.</p> <p>Parameters:</p> <ul> <li> <code>url</code>               (<code>Union[str, ParseResult]</code>)           \u2013            <p>The URL from which to extract query parameters.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>Dict[str, List[str]]: A dictionary containing the query parameters and their values.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; get_get_params('https://www.evilcorp.com?foo=1&amp;bar=2')\n{'foo': ['1'], 'bar': ['2']}\n</code></pre> <pre><code>&gt;&gt;&gt; get_get_params('https://www.evilcorp.com?foo=1&amp;foo=2')\n{'foo': ['1', '2']}\n</code></pre> Source code in <code>bbot/core/helpers/url.py</code> <pre><code>def get_get_params(url):\n    \"\"\"\n    Extract the query parameters from the given URL as a dictionary.\n\n    Args:\n        url (Union[str, ParseResult]): The URL from which to extract query parameters.\n\n    Returns:\n        Dict[str, List[str]]: A dictionary containing the query parameters and their values.\n\n    Examples:\n        &gt;&gt;&gt; get_get_params('https://www.evilcorp.com?foo=1&amp;bar=2')\n        {'foo': ['1'], 'bar': ['2']}\n\n        &gt;&gt;&gt; get_get_params('https://www.evilcorp.com?foo=1&amp;foo=2')\n        {'foo': ['1', '2']}\n    \"\"\"\n    parsed = parse_url(url)\n    return dict(parse_qs(parsed.query))\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.get_keys_in_dot_syntax","title":"get_keys_in_dot_syntax","text":"<pre><code>get_keys_in_dot_syntax(config)\n</code></pre> <p>Retrieve all keys in an OmegaConf configuration in dot notation.</p> <p>This function converts an OmegaConf configuration into a list of keys represented in dot notation.</p> <p>Parameters:</p> <ul> <li> <code>config</code>               (<code>DictConfig</code>)           \u2013            <p>The OmegaConf configuration object.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>List[str]: A list of keys in dot notation.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; config = OmegaConf.create({\n...     \"web\": {\n...         \"test\": True\n...     },\n...     \"db\": {\n...         \"host\": \"localhost\",\n...         \"port\": 5432\n...     }\n... })\n&gt;&gt;&gt; get_keys_in_dot_syntax(config)\n['web.test', 'db.host', 'db.port']\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def get_keys_in_dot_syntax(config):\n    \"\"\"Retrieve all keys in an OmegaConf configuration in dot notation.\n\n    This function converts an OmegaConf configuration into a list of keys\n    represented in dot notation.\n\n    Args:\n        config (DictConfig): The OmegaConf configuration object.\n\n    Returns:\n        List[str]: A list of keys in dot notation.\n\n    Examples:\n        &gt;&gt;&gt; config = OmegaConf.create({\n        ...     \"web\": {\n        ...         \"test\": True\n        ...     },\n        ...     \"db\": {\n        ...         \"host\": \"localhost\",\n        ...         \"port\": 5432\n        ...     }\n        ... })\n        &gt;&gt;&gt; get_keys_in_dot_syntax(config)\n        ['web.test', 'db.host', 'db.port']\n    \"\"\"\n    from omegaconf import OmegaConf\n\n    container = OmegaConf.to_container(config, resolve=True)\n    keys = []\n\n    def recursive_keys(d, parent_key=\"\"):\n        for k, v in d.items():\n            full_key = f\"{parent_key}.{k}\" if parent_key else k\n            if isinstance(v, dict):\n                recursive_keys(v, full_key)\n            else:\n                keys.append(full_key)\n\n    recursive_keys(container)\n    return keys\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.get_size","title":"get_size","text":"<pre><code>get_size(obj, max_depth=5, seen=None)\n</code></pre> <p>Roughly estimate the memory footprint of a Python object using recursion.</p> <p>Parameters:</p> <ul> <li> <code>obj</code>               (<code>any</code>)           \u2013            <p>The object whose size is to be determined.</p> </li> <li> <code>max_depth</code>               (<code>int</code>, default:                   <code>5</code> )           \u2013            <p>Maximum depth to which nested objects will be inspected. Defaults to 5.</p> </li> <li> <code>seen</code>               (<code>set</code>, default:                   <code>None</code> )           \u2013            <p>Objects that have already been accounted for, to avoid loops.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>int</code>          \u2013            <p>Approximate memory footprint of the object in bytes.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; get_size(my_list)\n4200\n</code></pre> <pre><code>&gt;&gt;&gt; get_size(my_dict, max_depth=3)\n8400\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def get_size(obj, max_depth=5, seen=None):\n    \"\"\"\n    Roughly estimate the memory footprint of a Python object using recursion.\n\n    Parameters:\n        obj (any): The object whose size is to be determined.\n        max_depth (int, optional): Maximum depth to which nested objects will be inspected. Defaults to 5.\n        seen (set, optional): Objects that have already been accounted for, to avoid loops.\n\n    Returns:\n        int: Approximate memory footprint of the object in bytes.\n\n    Examples:\n        &gt;&gt;&gt; get_size(my_list)\n        4200\n\n        &gt;&gt;&gt; get_size(my_dict, max_depth=3)\n        8400\n    \"\"\"\n    from collections.abc import Mapping\n\n    # If seen is not provided, initialize an empty set\n    if seen is None:\n        seen = set()\n    # Get the id of the object\n    obj_id = id(obj)\n    # Decrease the maximum depth for the next recursion\n    new_max_depth = max_depth - 1\n    # If the object has already been seen or we've reached the maximum recursion depth, return 0\n    if obj_id in seen or new_max_depth &lt;= 0:\n        return 0\n    # Get the size of the object\n    size = sys.getsizeof(obj)\n    # Add the object's id to the set of seen objects\n    seen.add(obj_id)\n    # If the object has a __dict__ attribute, we want to measure its size\n    if hasattr(obj, \"__dict__\"):\n        # Iterate over the Method Resolution Order (MRO) of the class of the object\n        for cls in obj.__class__.__mro__:\n            # If the class's __dict__ contains a __dict__ key\n            if \"__dict__\" in cls.__dict__:\n                for k, v in obj.__dict__.items():\n                    size += get_size(k, new_max_depth, seen)\n                    size += get_size(v, new_max_depth, seen)\n                break\n    # If the object is a mapping (like a dictionary), we want to measure the size of its items\n    if isinstance(obj, Mapping):\n        with suppress(StopIteration):\n            k, v = next(iter(obj.items()))\n            size += (get_size(k, new_max_depth, seen) + get_size(v, new_max_depth, seen)) * len(obj)\n    # If the object is a container (like a list or tuple) but not a string or bytes-like object\n    elif isinstance(obj, (list, tuple, set)):\n        with suppress(StopIteration):\n            size += get_size(next(iter(obj)), new_max_depth, seen) * len(obj)\n    # If the object has __slots__, we want to measure the size of the attributes in __slots__\n    if hasattr(obj, \"__slots__\"):\n        size += sum(get_size(getattr(obj, s), new_max_depth, seen) for s in obj.__slots__ if hasattr(obj, s))\n    return size\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.get_traceback_details","title":"get_traceback_details","text":"<pre><code>get_traceback_details(e)\n</code></pre> <p>Retrieves detailed information from the traceback of an exception.</p> <p>Parameters:</p> <ul> <li> <code>e</code>               (<code>BaseException</code>)           \u2013            <p>The exception for which to get traceback details.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>tuple</code>          \u2013            <p>A tuple containing filename (str), line number (int), and function name (str) where the exception was raised.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; try:\n...     raise ValueError(\"This is a value error\")\n... except ValueError as e:\n...     filename, lineno, funcname = get_traceback_details(e)\n...     print(f\"File: {filename}, Line: {lineno}, Function: {funcname}\")\nFile: &lt;stdin&gt;, Line: 2, Function: &lt;module&gt;\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def get_traceback_details(e):\n    \"\"\"\n    Retrieves detailed information from the traceback of an exception.\n\n    Args:\n        e (BaseException): The exception for which to get traceback details.\n\n    Returns:\n        tuple: A tuple containing filename (str), line number (int), and function name (str) where the exception was raised.\n\n    Examples:\n        &gt;&gt;&gt; try:\n        ...     raise ValueError(\"This is a value error\")\n        ... except ValueError as e:\n        ...     filename, lineno, funcname = get_traceback_details(e)\n        ...     print(f\"File: {filename}, Line: {lineno}, Function: {funcname}\")\n        File: &lt;stdin&gt;, Line: 2, Function: &lt;module&gt;\n    \"\"\"\n    import traceback\n\n    tb = traceback.extract_tb(e.__traceback__)\n    last_frame = tb[-1]  # Get the last frame in the traceback (the one where the exception was raised)\n    filename = last_frame.filename\n    lineno = last_frame.lineno\n    funcname = last_frame.name\n    return filename, lineno, funcname\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.grouper","title":"grouper","text":"<pre><code>grouper(iterable, n)\n</code></pre> <p>Grouper groups an iterable into chunks of a given size.</p> <p>Parameters:</p> <ul> <li> <code>iterable</code>               (<code>iterable</code>)           \u2013            <p>The iterable to be chunked.</p> </li> <li> <code>n</code>               (<code>int</code>)           \u2013            <p>The size of each chunk.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>iterator</code>          \u2013            <p>An iterator that produces lists of elements from the original iterable, each of length <code>n</code> or less.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; list(grouper('ABCDEFG', 3))\n[['A', 'B', 'C'], ['D', 'E', 'F'], ['G']]\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def grouper(iterable, n):\n    \"\"\"\n    Grouper groups an iterable into chunks of a given size.\n\n    Args:\n        iterable (iterable): The iterable to be chunked.\n        n (int): The size of each chunk.\n\n    Returns:\n        iterator: An iterator that produces lists of elements from the original iterable, each of length `n` or less.\n\n    Examples:\n        &gt;&gt;&gt; list(grouper('ABCDEFG', 3))\n        [['A', 'B', 'C'], ['D', 'E', 'F'], ['G']]\n    \"\"\"\n    from itertools import islice\n\n    iterable = iter(iterable)\n    return iter(lambda: list(islice(iterable, n)), [])\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.hash_url","title":"hash_url","text":"<pre><code>hash_url(url)\n</code></pre> <p>Hashes a URL for the purpose of cleaning or collapsing similar URLs.</p> <p>Parameters:</p> <ul> <li> <code>url</code>               (<code>str</code>)           \u2013            <p>The URL to be hashed.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>int</code>          \u2013            <p>The hash value of the cleaned URL.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; hash_url('https://www.evilcorp.com')\n-7448777882396416944\n</code></pre> <pre><code>&gt;&gt;&gt; hash_url('https://www.evilcorp.com/page/1')\n-8101275613229735915\n</code></pre> <pre><code>&gt;&gt;&gt; hash_url('https://www.evilcorp.com/page/2')\n-8101275613229735915\n</code></pre> Source code in <code>bbot/core/helpers/url.py</code> <pre><code>def hash_url(url):\n    \"\"\"\n    Hashes a URL for the purpose of cleaning or collapsing similar URLs.\n\n    Args:\n        url (str): The URL to be hashed.\n\n    Returns:\n        int: The hash value of the cleaned URL.\n\n    Examples:\n        &gt;&gt;&gt; hash_url('https://www.evilcorp.com')\n        -7448777882396416944\n\n        &gt;&gt;&gt; hash_url('https://www.evilcorp.com/page/1')\n        -8101275613229735915\n\n        &gt;&gt;&gt; hash_url('https://www.evilcorp.com/page/2')\n        -8101275613229735915\n    \"\"\"\n    parsed = parse_url(url)\n    parsed = parsed._replace(fragment=\"\", query=\"\")\n    to_hash = [parsed.netloc]\n    for segment in parsed.path.split(\"/\"):\n        hash_segment = []\n        hash_segment.append(charset(segment))\n        hash_segment.append(param_type(segment))\n        dot_split = segment.split(\".\")\n        if len(dot_split) &gt; 1:\n            hash_segment.append(dot_split[-1])\n        else:\n            hash_segment.append(\"\")\n        to_hash.append(tuple(hash_segment))\n    return hash(tuple(to_hash))\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.human_timedelta","title":"human_timedelta","text":"<pre><code>human_timedelta(d)\n</code></pre> <p>Convert a TimeDelta object into a human-readable string.</p> <p>This function takes a datetime.timedelta object and converts it into a string format that is easier to read and understand.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>timedelta</code>)           \u2013            <p>The TimeDelta object to convert.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>A string representation of the TimeDelta object in human-readable form.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; from datetime import datetime\n&gt;&gt;&gt;\n&gt;&gt;&gt; start_time = datetime.now()\n&gt;&gt;&gt; end_time = datetime.now()\n&gt;&gt;&gt; elapsed_time = end_time - start_time\n&gt;&gt;&gt; human_timedelta(elapsed_time)\n'2 hours, 30 minutes, 15 seconds'\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def human_timedelta(d):\n    \"\"\"Convert a TimeDelta object into a human-readable string.\n\n    This function takes a datetime.timedelta object and converts it into a string format that\n    is easier to read and understand.\n\n    Args:\n        d (datetime.timedelta): The TimeDelta object to convert.\n\n    Returns:\n        str: A string representation of the TimeDelta object in human-readable form.\n\n    Examples:\n        &gt;&gt;&gt; from datetime import datetime\n        &gt;&gt;&gt;\n        &gt;&gt;&gt; start_time = datetime.now()\n        &gt;&gt;&gt; end_time = datetime.now()\n        &gt;&gt;&gt; elapsed_time = end_time - start_time\n        &gt;&gt;&gt; human_timedelta(elapsed_time)\n        '2 hours, 30 minutes, 15 seconds'\n    \"\"\"\n    hours, remainder = divmod(d.seconds, 3600)\n    minutes, seconds = divmod(remainder, 60)\n    result = []\n    if hours:\n        result.append(f\"{hours:,} hour\" + (\"s\" if hours &gt; 1 else \"\"))\n    if minutes:\n        result.append(f\"{minutes:,} minute\" + (\"s\" if minutes &gt; 1 else \"\"))\n    if seconds:\n        result.append(f\"{seconds:,} second\" + (\"s\" if seconds &gt; 1 else \"\"))\n    ret = \", \".join(result)\n    if not ret:\n        ret = \"0 seconds\"\n    return ret\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.human_to_bytes","title":"human_to_bytes","text":"<pre><code>human_to_bytes(filesize)\n</code></pre> <p>Convert a human-readable file size string to its bytes equivalent.</p> <p>This function takes a human-readable file size string, such as \"2.5GB\", and converts it to its equivalent number of bytes.</p> <p>Parameters:</p> <ul> <li> <code>filesize</code>               (<code>str or int</code>)           \u2013            <p>The human-readable file size string or integer bytes value to convert.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>int</code>          \u2013            <p>The number of bytes equivalent to the input human-readable file size.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>ValueError</code>             \u2013            <p>If the input string cannot be converted to bytes.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; human_to_bytes(\"23.23gb\")\n24943022571\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def human_to_bytes(filesize):\n    \"\"\"Convert a human-readable file size string to its bytes equivalent.\n\n    This function takes a human-readable file size string, such as \"2.5GB\", and converts it\n    to its equivalent number of bytes.\n\n    Args:\n        filesize (str or int): The human-readable file size string or integer bytes value to convert.\n\n    Returns:\n        int: The number of bytes equivalent to the input human-readable file size.\n\n    Raises:\n        ValueError: If the input string cannot be converted to bytes.\n\n    Examples:\n        &gt;&gt;&gt; human_to_bytes(\"23.23gb\")\n        24943022571\n    \"\"\"\n    if isinstance(filesize, int):\n        return filesize\n    sizes = [\"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\", \"ZB\"]\n    units = {}\n    for count, size in enumerate(sizes):\n        size_increment = pow(1024, count)\n        units[size] = size_increment\n        if len(size) == 2:\n            units[size[0]] = size_increment\n    match = filesize_regex.match(filesize)\n    try:\n        if match:\n            num, size = match.groups()\n            size = size.upper()\n            size_increment = units[size]\n            return int(float(num) * size_increment)\n    except KeyError:\n        pass\n    raise ValueError(f'Unable to convert filesize \"{filesize}\" to bytes')\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.in_exception_chain","title":"in_exception_chain","text":"<pre><code>in_exception_chain(e, exc_types)\n</code></pre> <p>Given an Exception and a list of Exception types, returns whether any of the specified types are contained anywhere in the Exception chain.</p> <p>Parameters:</p> <ul> <li> <code>e</code>               (<code>BaseException</code>)           \u2013            <p>The exception to check</p> </li> <li> <code>exc_types</code>               (<code>list[Exception]</code>)           \u2013            <p>Exception types to consider intentional cancellations. Default is KeyboardInterrupt</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>Whether the error is the result of an intentional cancellaion</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; try:\n...     raise ValueError(\"This is a value error\")\n... except Exception as e:\n...     if not in_exception_chain(e, (KeyboardInterrupt, asyncio.CancelledError)):\n...         raise\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def in_exception_chain(e, exc_types):\n    \"\"\"\n    Given an Exception and a list of Exception types, returns whether any of the specified types are contained anywhere in the Exception chain.\n\n    Args:\n        e (BaseException): The exception to check\n        exc_types (list[Exception]): Exception types to consider intentional cancellations. Default is KeyboardInterrupt\n\n    Returns:\n        bool: Whether the error is the result of an intentional cancellaion\n\n    Examples:\n        &gt;&gt;&gt; try:\n        ...     raise ValueError(\"This is a value error\")\n        ... except Exception as e:\n        ...     if not in_exception_chain(e, (KeyboardInterrupt, asyncio.CancelledError)):\n        ...         raise\n    \"\"\"\n    return any([isinstance(_, exc_types) for _ in get_exception_chain(e)])\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.integer_to_ordinal","title":"integer_to_ordinal","text":"<pre><code>integer_to_ordinal(n)\n</code></pre> <p>Convert an integer to its ordinal representation.</p> <p>Parameters:</p> <ul> <li> <code>n</code>               (<code>int</code>)           \u2013            <p>The integer to convert.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>The ordinal representation of the integer.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; integer_to_ordinal(1)\n'1st'\n&gt;&gt;&gt; integer_to_ordinal(2)\n'2nd'\n&gt;&gt;&gt; integer_to_ordinal(3)\n'3rd'\n&gt;&gt;&gt; integer_to_ordinal(11)\n'11th'\n&gt;&gt;&gt; integer_to_ordinal(21)\n'21st'\n&gt;&gt;&gt; integer_to_ordinal(101)\n'101st'\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def integer_to_ordinal(n):\n    \"\"\"\n    Convert an integer to its ordinal representation.\n\n    Args:\n        n (int): The integer to convert.\n\n    Returns:\n        str: The ordinal representation of the integer.\n\n    Examples:\n        &gt;&gt;&gt; integer_to_ordinal(1)\n        '1st'\n        &gt;&gt;&gt; integer_to_ordinal(2)\n        '2nd'\n        &gt;&gt;&gt; integer_to_ordinal(3)\n        '3rd'\n        &gt;&gt;&gt; integer_to_ordinal(11)\n        '11th'\n        &gt;&gt;&gt; integer_to_ordinal(21)\n        '21st'\n        &gt;&gt;&gt; integer_to_ordinal(101)\n        '101st'\n    \"\"\"\n    # Check the last digit\n    last_digit = n % 10\n    # Check the last two digits for special cases (11th, 12th, 13th)\n    last_two_digits = n % 100\n\n    if 10 &lt;= last_two_digits &lt;= 20:\n        suffix = \"th\"\n    else:\n        if last_digit == 1:\n            suffix = \"st\"\n        elif last_digit == 2:\n            suffix = \"nd\"\n        elif last_digit == 3:\n            suffix = \"rd\"\n        else:\n            suffix = \"th\"\n\n    return f\"{n}{suffix}\"\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.ip_network_parents","title":"ip_network_parents","text":"<pre><code>ip_network_parents(i, include_self=False)\n</code></pre> <p>Generates all parent IP networks for a given IP address or network, optionally including the network itself.</p> <p>Parameters:</p> <ul> <li> <code>i</code>               (<code>str or IPv4Network / IPv6Network</code>)           \u2013            <p>The IP address or network to find parents for.</p> </li> <li> <code>include_self</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to include the network itself in the result. Default is False.</p> </li> </ul> <p>Yields:</p> <ul> <li>           \u2013            <p>ipaddress.IPv4Network or ipaddress.IPv6Network: Parent IP networks in descending order of prefix length.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; list(ip_network_parents(\"192.168.1.1\"))\n[ipaddress.IPv4Network('192.168.1.0/31'), ipaddress.IPv4Network('192.168.1.0/30'), ... , ipaddress.IPv4Network('0.0.0.0/0')]\n</code></pre> Notes <ul> <li>Utilizes Python's built-in <code>ipaddress</code> module for network operations.</li> </ul> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def ip_network_parents(i, include_self=False):\n    \"\"\"\n    Generates all parent IP networks for a given IP address or network, optionally including the network itself.\n\n    Args:\n        i (str or ipaddress.IPv4Network/ipaddress.IPv6Network): The IP address or network to find parents for.\n        include_self (bool, optional): Whether to include the network itself in the result. Default is False.\n\n    Yields:\n        ipaddress.IPv4Network or ipaddress.IPv6Network: Parent IP networks in descending order of prefix length.\n\n    Examples:\n        &gt;&gt;&gt; list(ip_network_parents(\"192.168.1.1\"))\n        [ipaddress.IPv4Network('192.168.1.0/31'), ipaddress.IPv4Network('192.168.1.0/30'), ... , ipaddress.IPv4Network('0.0.0.0/0')]\n\n    Notes:\n        - Utilizes Python's built-in `ipaddress` module for network operations.\n    \"\"\"\n    net = ipaddress.ip_network(i, strict=False)\n    for i in range(net.prefixlen - (0 if include_self else 1), -1, -1):\n        yield ipaddress.ip_network(f\"{net.network_address}/{i}\", strict=False)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.is_async_function","title":"is_async_function","text":"<pre><code>is_async_function(f)\n</code></pre> <p>Check if a given function is an asynchronous function.</p> <p>Parameters:</p> <ul> <li> <code>f</code>               (<code>function</code>)           \u2013            <p>The function to check.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the function is asynchronous, False otherwise.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; async def foo():\n...     pass\n&gt;&gt;&gt; is_async_function(foo)\nTrue\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def is_async_function(f):\n    \"\"\"\n    Check if a given function is an asynchronous function.\n\n    Args:\n        f (function): The function to check.\n\n    Returns:\n        bool: True if the function is asynchronous, False otherwise.\n\n    Examples:\n        &gt;&gt;&gt; async def foo():\n        ...     pass\n        &gt;&gt;&gt; is_async_function(foo)\n        True\n    \"\"\"\n    import inspect\n\n    return inspect.iscoroutinefunction(f)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.is_dns_name","title":"is_dns_name","text":"<pre><code>is_dns_name(d, include_local=True)\n</code></pre> <p>Determines if the given string is a valid DNS name.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>str</code>)           \u2013            <p>The string to be checked.</p> </li> <li> <code>include_local</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Consider local hostnames to be valid (hostnames without periods)</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the string is a valid DNS name, False otherwise.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; is_dns_name('www.example.com')\nTrue\n&gt;&gt;&gt; is_dns_name('localhost')\nTrue\n&gt;&gt;&gt; is_dns_name('localhost', include_local=False)\nFalse\n&gt;&gt;&gt; is_dns_name('192.168.1.1')\nFalse\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def is_dns_name(d, include_local=True):\n    \"\"\"\n    Determines if the given string is a valid DNS name.\n\n    Args:\n        d (str): The string to be checked.\n        include_local (bool): Consider local hostnames to be valid (hostnames without periods)\n\n    Returns:\n        bool: True if the string is a valid DNS name, False otherwise.\n\n    Examples:\n        &gt;&gt;&gt; is_dns_name('www.example.com')\n        True\n        &gt;&gt;&gt; is_dns_name('localhost')\n        True\n        &gt;&gt;&gt; is_dns_name('localhost', include_local=False)\n        False\n        &gt;&gt;&gt; is_dns_name('192.168.1.1')\n        False\n    \"\"\"\n    if is_ip(d):\n        return False\n    d = smart_decode(d)\n    if include_local:\n        if bbot_regexes.hostname_regex.match(d):\n            return True\n    if bbot_regexes.dns_name_validation_regex.match(d):\n        return True\n    return False\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.is_domain","title":"is_domain","text":"<pre><code>is_domain(d)\n</code></pre> <p>Check if the given input represents a domain without subdomains.</p> <p>This function takes an input string <code>d</code> and returns True if it represents a domain without any subdomains. Otherwise, it returns False.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>str</code>)           \u2013            <p>The input string containing the domain.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the input is a domain without subdomains, False otherwise.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; is_domain(\"evilcorp.co.uk\")\nTrue\n</code></pre> <pre><code>&gt;&gt;&gt; is_domain(\"www.evilcorp.co.uk\")\nFalse\n</code></pre> Notes <ul> <li>Port, if present in input, is ignored.</li> </ul> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def is_domain(d):\n    \"\"\"\n    Check if the given input represents a domain without subdomains.\n\n    This function takes an input string `d` and returns True if it represents a domain without any subdomains.\n    Otherwise, it returns False.\n\n    Args:\n        d (str): The input string containing the domain.\n\n    Returns:\n        bool: True if the input is a domain without subdomains, False otherwise.\n\n    Examples:\n        &gt;&gt;&gt; is_domain(\"evilcorp.co.uk\")\n        True\n\n        &gt;&gt;&gt; is_domain(\"www.evilcorp.co.uk\")\n        False\n\n    Notes:\n        - Port, if present in input, is ignored.\n    \"\"\"\n    d, _ = split_host_port(d)\n    if is_ip(d):\n        return False\n    extracted = tldextract(d)\n    if extracted.registered_domain:\n        if not extracted.subdomain:\n            return True\n    else:\n        return d.count(\".\") == 1\n    return False\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.is_file","title":"is_file","text":"<pre><code>is_file(f)\n</code></pre> <p>Check if a path points to a file.</p> <p>Parameters:</p> <ul> <li> <code>f</code>               (<code>str</code>)           \u2013            <p>Path to the file.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the path is a file, False otherwise.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; is_file(\"/etc/passwd\")\nTrue\n</code></pre> <pre><code>&gt;&gt;&gt; is_file(\"/nonexistent\")\nFalse\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def is_file(f):\n    \"\"\"\n    Check if a path points to a file.\n\n    Parameters:\n        f (str): Path to the file.\n\n    Returns:\n        bool: True if the path is a file, False otherwise.\n\n    Examples:\n        &gt;&gt;&gt; is_file(\"/etc/passwd\")\n        True\n\n        &gt;&gt;&gt; is_file(\"/nonexistent\")\n        False\n    \"\"\"\n    with suppress(Exception):\n        return Path(f).is_file()\n    return False\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.is_ip","title":"is_ip","text":"<pre><code>is_ip(d, version=None, include_network=False)\n</code></pre> <p>Checks if the given string or object represents a valid IP address.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>str or IPvXAddress</code>)           \u2013            <p>The IP address to check.</p> </li> <li> <code>include_network</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to include network types (IPv4Network or IPv6Network). Defaults to False.</p> </li> <li> <code>version</code>               (<code>int</code>, default:                   <code>None</code> )           \u2013            <p>The IP version to validate (4 or 6). Default is None.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the string or object is a valid IP address, False otherwise.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; is_ip('192.168.1.1')\nTrue\n&gt;&gt;&gt; is_ip('bad::c0de', version=6)\nTrue\n&gt;&gt;&gt; is_ip('bad::c0de', version=4)\nFalse\n&gt;&gt;&gt; is_ip('evilcorp.com')\nFalse\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def is_ip(d, version=None, include_network=False):\n    \"\"\"\n    Checks if the given string or object represents a valid IP address.\n\n    Args:\n        d (str or ipaddress.IPvXAddress): The IP address to check.\n        include_network (bool, optional): Whether to include network types (IPv4Network or IPv6Network). Defaults to False.\n        version (int, optional): The IP version to validate (4 or 6). Default is None.\n\n    Returns:\n        bool: True if the string or object is a valid IP address, False otherwise.\n\n    Examples:\n        &gt;&gt;&gt; is_ip('192.168.1.1')\n        True\n        &gt;&gt;&gt; is_ip('bad::c0de', version=6)\n        True\n        &gt;&gt;&gt; is_ip('bad::c0de', version=4)\n        False\n        &gt;&gt;&gt; is_ip('evilcorp.com')\n        False\n    \"\"\"\n    ip = None\n    try:\n        ip = ipaddress.ip_address(d)\n    except Exception:\n        if include_network:\n            try:\n                ip = ipaddress.ip_network(d, strict=False)\n            except Exception:\n                pass\n    if ip is not None and (version is None or ip.version == version):\n        return True\n    return False\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.is_ip_type","title":"is_ip_type","text":"<pre><code>is_ip_type(i, network=None)\n</code></pre> <p>Checks if the given object is an instance of an IPv4 or IPv6 type from the ipaddress module.</p> <p>Parameters:</p> <ul> <li> <code>i</code>               (<code>_BaseV4 or _BaseV6</code>)           \u2013            <p>The IP object to check.</p> </li> <li> <code>network</code>               (<code>bool</code>, default:                   <code>None</code> )           \u2013            <p>Whether to restrict the check to network types (IPv4Network or IPv6Network). Defaults to False.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the object is an instance of ipaddress._BaseV4 or ipaddress._BaseV6, False otherwise.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; is_ip_type(ipaddress.IPv6Address('dead::beef'))\nTrue\n&gt;&gt;&gt; is_ip_type(ipaddress.IPv4Network('192.168.1.0/24'))\nTrue\n&gt;&gt;&gt; is_ip_type(\"192.168.1.0/24\")\nFalse\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def is_ip_type(i, network=None):\n    \"\"\"\n    Checks if the given object is an instance of an IPv4 or IPv6 type from the ipaddress module.\n\n    Args:\n        i (ipaddress._BaseV4 or ipaddress._BaseV6): The IP object to check.\n        network (bool, optional): Whether to restrict the check to network types (IPv4Network or IPv6Network). Defaults to False.\n\n    Returns:\n        bool: True if the object is an instance of ipaddress._BaseV4 or ipaddress._BaseV6, False otherwise.\n\n    Examples:\n        &gt;&gt;&gt; is_ip_type(ipaddress.IPv6Address('dead::beef'))\n        True\n        &gt;&gt;&gt; is_ip_type(ipaddress.IPv4Network('192.168.1.0/24'))\n        True\n        &gt;&gt;&gt; is_ip_type(\"192.168.1.0/24\")\n        False\n    \"\"\"\n    if network is not None:\n        is_network = ipaddress._BaseNetwork in i.__class__.__mro__\n        if network:\n            return is_network\n        else:\n            return not is_network\n    return ipaddress._IPAddressBase in i.__class__.__mro__\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.is_port","title":"is_port","text":"<pre><code>is_port(p)\n</code></pre> <p>Checks if the given string represents a valid port number.</p> <p>Parameters:</p> <ul> <li> <code>p</code>               (<code>str or int</code>)           \u2013            <p>The port number to check.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the port number is valid, False otherwise.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; is_port('80')\nTrue\n&gt;&gt;&gt; is_port('70000')\nFalse\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def is_port(p):\n    \"\"\"\n    Checks if the given string represents a valid port number.\n\n    Args:\n        p (str or int): The port number to check.\n\n    Returns:\n        bool: True if the port number is valid, False otherwise.\n\n    Examples:\n        &gt;&gt;&gt; is_port('80')\n        True\n        &gt;&gt;&gt; is_port('70000')\n        False\n    \"\"\"\n\n    p = str(p)\n    return p and p.isdigit() and 0 &lt;= int(p) &lt;= 65535\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.is_ptr","title":"is_ptr","text":"<pre><code>is_ptr(d)\n</code></pre> <p>Check if the given input represents a PTR record domain.</p> <p>This function takes an input string <code>d</code> and returns True if it matches the PTR record format. Otherwise, it returns False.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>str</code>)           \u2013            <p>The input string potentially representing a PTR record domain.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the input matches PTR record format, False otherwise.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; is_ptr(\"wsc-11-22-33-44.evilcorp.com\")\nTrue\n</code></pre> <pre><code>&gt;&gt;&gt; is_ptr(\"www2.evilcorp.com\")\nFalse\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def is_ptr(d):\n    \"\"\"\n    Check if the given input represents a PTR record domain.\n\n    This function takes an input string `d` and returns True if it matches the PTR record format.\n    Otherwise, it returns False.\n\n    Args:\n        d (str): The input string potentially representing a PTR record domain.\n\n    Returns:\n        bool: True if the input matches PTR record format, False otherwise.\n\n    Examples:\n        &gt;&gt;&gt; is_ptr(\"wsc-11-22-33-44.evilcorp.com\")\n        True\n\n        &gt;&gt;&gt; is_ptr(\"www2.evilcorp.com\")\n        False\n    \"\"\"\n    return bool(bbot_regexes.ptr_regex.search(str(d)))\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.is_subdomain","title":"is_subdomain","text":"<pre><code>is_subdomain(d)\n</code></pre> <p>Check if the given input represents a subdomain.</p> <p>This function takes an input string <code>d</code> and returns True if it represents a subdomain. Otherwise, it returns False.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>str</code>)           \u2013            <p>The input string containing the domain or subdomain.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the input is a subdomain, False otherwise.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; is_subdomain(\"www.evilcorp.co.uk\")\nTrue\n</code></pre> <pre><code>&gt;&gt;&gt; is_subdomain(\"evilcorp.co.uk\")\nFalse\n</code></pre> Notes <ul> <li>Port, if present in input, is ignored.</li> </ul> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def is_subdomain(d):\n    \"\"\"\n    Check if the given input represents a subdomain.\n\n    This function takes an input string `d` and returns True if it represents a subdomain.\n    Otherwise, it returns False.\n\n    Args:\n        d (str): The input string containing the domain or subdomain.\n\n    Returns:\n        bool: True if the input is a subdomain, False otherwise.\n\n    Examples:\n        &gt;&gt;&gt; is_subdomain(\"www.evilcorp.co.uk\")\n        True\n\n        &gt;&gt;&gt; is_subdomain(\"evilcorp.co.uk\")\n        False\n\n    Notes:\n        - Port, if present in input, is ignored.\n    \"\"\"\n    d, _ = split_host_port(d)\n    if is_ip(d):\n        return False\n    extracted = tldextract(d)\n    if extracted.registered_domain:\n        if extracted.subdomain:\n            return True\n    else:\n        return d.count(\".\") &gt; 1\n    return False\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.is_uri","title":"is_uri","text":"<pre><code>is_uri(u, return_scheme=False)\n</code></pre> <p>Check if the given input represents a URI and optionally return its scheme.</p> <p>This function takes an input string <code>u</code> and returns True if it matches a URI format. When <code>return_scheme</code> is True, it returns the URI scheme instead of a boolean.</p> <p>Parameters:</p> <ul> <li> <code>u</code>               (<code>str</code>)           \u2013            <p>The input string potentially representing a URI.</p> </li> <li> <code>return_scheme</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to return the URI scheme. Defaults to False.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>Union[bool, str]: True if the input matches a URI format; the URI scheme if <code>return_scheme</code> is True.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; is_uri(\"http://evilcorp.com\")\nTrue\n</code></pre> <pre><code>&gt;&gt;&gt; is_uri(\"ftp://evilcorp.com\")\nTrue\n</code></pre> <pre><code>&gt;&gt;&gt; is_uri(\"evilcorp.com\")\nFalse\n</code></pre> <pre><code>&gt;&gt;&gt; is_uri(\"ftp://evilcorp.com\", return_scheme=True)\n\"ftp\"\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def is_uri(u, return_scheme=False):\n    \"\"\"\n    Check if the given input represents a URI and optionally return its scheme.\n\n    This function takes an input string `u` and returns True if it matches a URI format.\n    When `return_scheme` is True, it returns the URI scheme instead of a boolean.\n\n    Args:\n        u (str): The input string potentially representing a URI.\n        return_scheme (bool, optional): Whether to return the URI scheme. Defaults to False.\n\n    Returns:\n        Union[bool, str]: True if the input matches a URI format; the URI scheme if `return_scheme` is True.\n\n    Examples:\n        &gt;&gt;&gt; is_uri(\"http://evilcorp.com\")\n        True\n\n        &gt;&gt;&gt; is_uri(\"ftp://evilcorp.com\")\n        True\n\n        &gt;&gt;&gt; is_uri(\"evilcorp.com\")\n        False\n\n        &gt;&gt;&gt; is_uri(\"ftp://evilcorp.com\", return_scheme=True)\n        \"ftp\"\n    \"\"\"\n    match = uri_regex.match(u)\n    if return_scheme:\n        if match:\n            return match.groups()[0].lower()\n        return \"\"\n    return bool(match)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.is_url","title":"is_url","text":"<pre><code>is_url(u)\n</code></pre> <p>Check if the given input represents a valid URL.</p> <p>This function takes an input string <code>u</code> and returns True if it matches any of the predefined URL formats. Otherwise, it returns False.</p> <p>Parameters:</p> <ul> <li> <code>u</code>               (<code>str</code>)           \u2013            <p>The input string potentially representing a URL.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the input matches a valid URL format, False otherwise.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; is_url(\"https://evilcorp.com\")\nTrue\n</code></pre> <pre><code>&gt;&gt;&gt; is_url(\"not-a-url\")\nFalse\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def is_url(u):\n    \"\"\"\n    Check if the given input represents a valid URL.\n\n    This function takes an input string `u` and returns True if it matches any of the predefined URL formats.\n    Otherwise, it returns False.\n\n    Args:\n        u (str): The input string potentially representing a URL.\n\n    Returns:\n        bool: True if the input matches a valid URL format, False otherwise.\n\n    Examples:\n        &gt;&gt;&gt; is_url(\"https://evilcorp.com\")\n        True\n\n        &gt;&gt;&gt; is_url(\"not-a-url\")\n        False\n    \"\"\"\n    u = str(u)\n    for r in bbot_regexes.event_type_regexes[\"URL\"]:\n        if r.match(u):\n            return True\n    return False\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.kill_children","title":"kill_children","text":"<pre><code>kill_children(parent_pid=None, sig=None)\n</code></pre> <p>Forgive me father for I have sinned</p> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def kill_children(parent_pid=None, sig=None):\n    \"\"\"\n    Forgive me father for I have sinned\n    \"\"\"\n    import psutil\n    import signal\n\n    if sig is None:\n        sig = signal.SIGTERM\n\n    try:\n        parent = psutil.Process(parent_pid)\n    except psutil.NoSuchProcess:\n        log.debug(f\"No such PID: {parent_pid}\")\n        return\n    log.debug(f\"Killing children of process ID {parent.pid}\")\n    children = parent.children(recursive=True)\n    for child in children:\n        log.debug(f\"Killing child with PID {child.pid}\")\n        if child.name != \"python\":\n            try:\n                child.send_signal(sig)\n            except psutil.NoSuchProcess:\n                log.debug(f\"No such PID: {child.pid}\")\n            except psutil.AccessDenied:\n                log.debug(f\"Error killing PID: {child.pid} - access denied\")\n    log.debug(f\"Finished killing children of process ID {parent.pid}\")\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.latest_mtime","title":"latest_mtime","text":"<pre><code>latest_mtime(d)\n</code></pre> <p>Get the latest modified time of any file or sub-directory in a given directory.</p> <p>This function takes a directory path as an argument and returns the latest modified time of any contained file or directory, recursively. It's useful for sorting directories by modified time for cleanup or other purposes.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>str or Path</code>)           \u2013            <p>The directory path to search for the latest modified time.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>float</code>          \u2013            <p>The latest modified time in Unix timestamp format.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; latest_mtime(\"~/.bbot/scans/mushy_susan\")\n1659016928.2848816\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def latest_mtime(d):\n    \"\"\"Get the latest modified time of any file or sub-directory in a given directory.\n\n    This function takes a directory path as an argument and returns the latest modified time\n    of any contained file or directory, recursively. It's useful for sorting directories by\n    modified time for cleanup or other purposes.\n\n    Args:\n        d (str or Path): The directory path to search for the latest modified time.\n\n    Returns:\n        float: The latest modified time in Unix timestamp format.\n\n    Examples:\n        &gt;&gt;&gt; latest_mtime(\"~/.bbot/scans/mushy_susan\")\n        1659016928.2848816\n    \"\"\"\n    d = Path(d).resolve()\n    mtimes = [d.lstat().st_mtime]\n    if d.is_dir():\n        to_list = d.glob(\"**/*\")\n    else:\n        to_list = [d]\n    for e in to_list:\n        mtimes.append(e.lstat().st_mtime)\n    return max(mtimes)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.list_files","title":"list_files","text":"<pre><code>list_files(directory, filter=lambda x: True)\n</code></pre> <p>Lists files in a given directory that meet a specified filter condition.</p> <p>Parameters:</p> <ul> <li> <code>directory</code>               (<code>str</code>)           \u2013            <p>The directory where to list files.</p> </li> <li> <code>filter</code>               (<code>callable</code>, default:                   <code>lambda x: True</code> )           \u2013            <p>A function to filter the files. Defaults to a lambda function that returns True for all files.</p> </li> </ul> <p>Yields:</p> <ul> <li> <code>Path</code>          \u2013            <p>A Path object for each file that meets the filter condition.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; list(list_files(\"/tmp/test\"))\n[Path('/tmp/test/file1.py'), Path('/tmp/test/file2.txt')]\n</code></pre> <pre><code>&gt;&gt;&gt; list(list_files(\"/tmp/test\"), filter=lambda f: f.suffix == \".py\")\n[Path('/tmp/test/file1.py')]\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def list_files(directory, filter=lambda x: True):\n    \"\"\"Lists files in a given directory that meet a specified filter condition.\n\n    Args:\n        directory (str): The directory where to list files.\n        filter (callable, optional): A function to filter the files. Defaults to a lambda function that returns True for all files.\n\n    Yields:\n        Path: A Path object for each file that meets the filter condition.\n\n    Examples:\n        &gt;&gt;&gt; list(list_files(\"/tmp/test\"))\n        [Path('/tmp/test/file1.py'), Path('/tmp/test/file2.txt')]\n\n        &gt;&gt;&gt; list(list_files(\"/tmp/test\"), filter=lambda f: f.suffix == \".py\")\n        [Path('/tmp/test/file1.py')]\n    \"\"\"\n    directory = Path(directory).resolve()\n    if directory.is_dir():\n        for file in directory.iterdir():\n            if file.is_file() and filter(file):\n                yield file\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.make_date","title":"make_date","text":"<pre><code>make_date(d=None, microseconds=False)\n</code></pre> <p>Generates a string representation of the current date and time, with optional microsecond precision.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>datetime</code>, default:                   <code>None</code> )           \u2013            <p>A datetime object to convert. Defaults to the current date and time.</p> </li> <li> <code>microseconds</code>               (<code>bool</code>, default:                   <code>False</code> )           \u2013            <p>Whether to include microseconds. Defaults to False.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>A string representation of the date and time, formatted as YYYYMMDD_HHMM_SS or YYYYMMDD_HHMM_SSFFFFFF if microseconds are included.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; make_date()\n\"20220707_1325_50\"\n&gt;&gt;&gt; make_date(microseconds=True)\n\"20220707_1330_35167617\"\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def make_date(d=None, microseconds=False):\n    \"\"\"\n    Generates a string representation of the current date and time, with optional microsecond precision.\n\n    Args:\n        d (datetime, optional): A datetime object to convert. Defaults to the current date and time.\n        microseconds (bool, optional): Whether to include microseconds. Defaults to False.\n\n    Returns:\n        str: A string representation of the date and time, formatted as YYYYMMDD_HHMM_SS or YYYYMMDD_HHMM_SSFFFFFF if microseconds are included.\n\n    Examples:\n        &gt;&gt;&gt; make_date()\n        \"20220707_1325_50\"\n        &gt;&gt;&gt; make_date(microseconds=True)\n        \"20220707_1330_35167617\"\n    \"\"\"\n    from datetime import datetime\n\n    f = \"%Y%m%d_%H%M_%S\"\n    if microseconds:\n        f += \"%f\"\n    if d is None:\n        d = datetime.now()\n    return d.strftime(f)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.make_ip_type","title":"make_ip_type","text":"<pre><code>make_ip_type(s)\n</code></pre> <p>Convert a string to its corresponding IP address or network type.</p> <p>This function attempts to convert the input string <code>s</code> into either an IPv4 or IPv6 address object, or an IPv4 or IPv6 network object. If none of these conversions are possible, the original string is returned.</p> <p>Parameters:</p> <ul> <li> <code>s</code>               (<code>str</code>)           \u2013            <p>The input string to be converted.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>Union[IPv4Address, IPv6Address, IPv4Network, IPv6Network, str]: The converted object or original string.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; make_ip_type(\"dead::beef\")\nIPv6Address('dead::beef')\n</code></pre> <pre><code>&gt;&gt;&gt; make_ip_type(\"192.168.1.0/24\")\nIPv4Network('192.168.1.0/24')\n</code></pre> <pre><code>&gt;&gt;&gt; make_ip_type(\"evilcorp.com\")\n'evilcorp.com'\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def make_ip_type(s):\n    \"\"\"\n    Convert a string to its corresponding IP address or network type.\n\n    This function attempts to convert the input string `s` into either an IPv4 or IPv6 address object,\n    or an IPv4 or IPv6 network object. If none of these conversions are possible, the original string is returned.\n\n    Args:\n        s (str): The input string to be converted.\n\n    Returns:\n        Union[IPv4Address, IPv6Address, IPv4Network, IPv6Network, str]: The converted object or original string.\n\n    Examples:\n        &gt;&gt;&gt; make_ip_type(\"dead::beef\")\n        IPv6Address('dead::beef')\n\n        &gt;&gt;&gt; make_ip_type(\"192.168.1.0/24\")\n        IPv4Network('192.168.1.0/24')\n\n        &gt;&gt;&gt; make_ip_type(\"evilcorp.com\")\n        'evilcorp.com'\n    \"\"\"\n    if not s:\n        raise ValueError(f'Invalid hostname: \"{s}\"')\n    # IP address\n    with suppress(Exception):\n        return ipaddress.ip_address(s)\n    # IP network\n    with suppress(Exception):\n        return ipaddress.ip_network(s, strict=False)\n    return s\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.make_netloc","title":"make_netloc","text":"<pre><code>make_netloc(host, port=None)\n</code></pre> <p>Constructs a network location string from a given host and port.</p> <p>Parameters:</p> <ul> <li> <code>host</code>               (<code>str</code>)           \u2013            <p>The hostname or IP address.</p> </li> <li> <code>port</code>               (<code>int</code>, default:                   <code>None</code> )           \u2013            <p>The port number. If None, the port is omitted.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>A network location string in the form 'host' or 'host:port'.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; make_netloc(\"192.168.1.1\", None)\n\"192.168.1.1\"\n</code></pre> <pre><code>&gt;&gt;&gt; make_netloc(\"192.168.1.1\", 443)\n\"192.168.1.1:443\"\n</code></pre> <pre><code>&gt;&gt;&gt; make_netloc(\"evilcorp.com\", 80)\n\"evilcorp.com:80\"\n</code></pre> <pre><code>&gt;&gt;&gt; make_netloc(\"dead::beef\", None)\n\"[dead::beef]\"\n</code></pre> <pre><code>&gt;&gt;&gt; make_netloc(\"dead::beef\", 443)\n\"[dead::beef]:443\"\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def make_netloc(host, port=None):\n    \"\"\"Constructs a network location string from a given host and port.\n\n    Args:\n        host (str): The hostname or IP address.\n        port (int, optional): The port number. If None, the port is omitted.\n\n    Returns:\n        str: A network location string in the form 'host' or 'host:port'.\n\n    Examples:\n        &gt;&gt;&gt; make_netloc(\"192.168.1.1\", None)\n        \"192.168.1.1\"\n\n        &gt;&gt;&gt; make_netloc(\"192.168.1.1\", 443)\n        \"192.168.1.1:443\"\n\n        &gt;&gt;&gt; make_netloc(\"evilcorp.com\", 80)\n        \"evilcorp.com:80\"\n\n        &gt;&gt;&gt; make_netloc(\"dead::beef\", None)\n        \"[dead::beef]\"\n\n        &gt;&gt;&gt; make_netloc(\"dead::beef\", 443)\n        \"[dead::beef]:443\"\n    \"\"\"\n    if is_ip(host, version=6):\n        host = f\"[{host}]\"\n    if port is None:\n        return str(host)\n    return f\"{host}:{port}\"\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.make_table","title":"make_table","text":"<pre><code>make_table(rows, header, **kwargs)\n</code></pre> <p>Generate a formatted table from the given rows and headers.</p> <p>This function uses the <code>tabulate</code> package to generate a table with formatting options. It can accept various input formats and table styles, which can be customized using optional arguments.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Positional arguments to be passed to <code>tabulate.tabulate</code>.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Keyword arguments to customize table formatting. - tablefmt (str, optional): Table format. Default is 'grid'. - disable_numparse (bool, optional): Disable automatic number parsing. Default is True. - maxcolwidths (int, optional): Maximum column width. Default is 40.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>A string representing the formatted table.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; print(make_table([[\"row1\", \"row1\"], [\"row2\", \"row2\"]], [\"header1\", \"header2\"]))\n+-----------+-----------+\n| header1   | header2   |\n+===========+===========+\n| row1      | row1      |\n+-----------+-----------+\n| row2      | row2      |\n+-----------+-----------+\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def make_table(rows, header, **kwargs):\n    \"\"\"Generate a formatted table from the given rows and headers.\n\n    This function uses the `tabulate` package to generate a table with formatting options.\n    It can accept various input formats and table styles, which can be customized using optional arguments.\n\n    Args:\n        *args: Positional arguments to be passed to `tabulate.tabulate`.\n        **kwargs: Keyword arguments to customize table formatting.\n            - tablefmt (str, optional): Table format. Default is 'grid'.\n            - disable_numparse (bool, optional): Disable automatic number parsing. Default is True.\n            - maxcolwidths (int, optional): Maximum column width. Default is 40.\n\n    Returns:\n        str: A string representing the formatted table.\n\n    Examples:\n        &gt;&gt;&gt; print(make_table([[\"row1\", \"row1\"], [\"row2\", \"row2\"]], [\"header1\", \"header2\"]))\n        +-----------+-----------+\n        | header1   | header2   |\n        +===========+===========+\n        | row1      | row1      |\n        +-----------+-----------+\n        | row2      | row2      |\n        +-----------+-----------+\n    \"\"\"\n    from tabulate import tabulate\n\n    # fix IndexError: list index out of range\n    if not rows:\n        rows = [[]]\n    tablefmt = os.environ.get(\"BBOT_TABLE_FORMAT\", None)\n    defaults = {\"tablefmt\": \"grid\", \"disable_numparse\": True, \"maxcolwidths\": None}\n    if tablefmt is None:\n        defaults.update({\"maxcolwidths\": 40})\n    else:\n        defaults.update({\"tablefmt\": tablefmt})\n    for k, v in defaults.items():\n        if k not in kwargs:\n            kwargs[k] = v\n    # don't wrap columns in markdown\n    if tablefmt in (\"github\", \"markdown\"):\n        kwargs.pop(\"maxcolwidths\")\n        # escape problematic markdown characters in rows\n\n        def markdown_escape(s):\n            return str(s).replace(\"|\", \"&amp;#124;\")\n\n        rows = [[markdown_escape(f) for f in row] for row in rows]\n        header = [markdown_escape(h) for h in header]\n    return tabulate(rows, header, **kwargs)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.memory_status","title":"memory_status","text":"<pre><code>memory_status()\n</code></pre> <p>Return statistics on system memory consumption.</p> <p>The function returns a <code>psutil</code> named tuple that contains statistics on system virtual memory usage, such as total memory, used memory, available memory, and more.</p> <p>Returns:</p> <ul> <li>           \u2013            <p>psutil._pslinux.svmem: A named tuple representing various statistics about system virtual memory usage.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; mem = memory_status()\n&gt;&gt;&gt; mem.available\n13195399168\n</code></pre> <pre><code>&gt;&gt;&gt; mem = memory_status()\n&gt;&gt;&gt; mem.percent\n79.0\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def memory_status():\n    \"\"\"Return statistics on system memory consumption.\n\n    The function returns a `psutil` named tuple that contains statistics on\n    system virtual memory usage, such as total memory, used memory, available\n    memory, and more.\n\n    Returns:\n        psutil._pslinux.svmem: A named tuple representing various statistics\n            about system virtual memory usage.\n\n    Examples:\n        &gt;&gt;&gt; mem = memory_status()\n        &gt;&gt;&gt; mem.available\n        13195399168\n\n        &gt;&gt;&gt; mem = memory_status()\n        &gt;&gt;&gt; mem.percent\n        79.0\n    \"\"\"\n    import psutil\n\n    return psutil.virtual_memory()\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.mkdir","title":"mkdir","text":"<pre><code>mkdir(path, check_writable=True, raise_error=True)\n</code></pre> <p>Creates a directory and optionally checks if it's writable.</p> <p>Parameters:</p> <ul> <li> <code>path</code>               (<code>str or Path</code>)           \u2013            <p>The directory to create.</p> </li> <li> <code>check_writable</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to check if the directory is writable. Default is True.</p> </li> <li> <code>raise_error</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to raise an error if the directory creation fails. Default is True.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the directory is successfully created (and writable, if check_writable=True); otherwise False.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>DirectoryCreationError</code>             \u2013            <p>Raised if the directory cannot be created and <code>raise_error=True</code>.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; mkdir(\"/tmp/new_dir\")\nTrue\n&gt;&gt;&gt; mkdir(\"/restricted_dir\", check_writable=False, raise_error=False)\nFalse\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def mkdir(path, check_writable=True, raise_error=True):\n    \"\"\"\n    Creates a directory and optionally checks if it's writable.\n\n    Args:\n        path (str or Path): The directory to create.\n        check_writable (bool, optional): Whether to check if the directory is writable. Default is True.\n        raise_error (bool, optional): Whether to raise an error if the directory creation fails. Default is True.\n\n    Returns:\n        bool: True if the directory is successfully created (and writable, if check_writable=True); otherwise False.\n\n    Raises:\n        DirectoryCreationError: Raised if the directory cannot be created and `raise_error=True`.\n\n    Examples:\n        &gt;&gt;&gt; mkdir(\"/tmp/new_dir\")\n        True\n        &gt;&gt;&gt; mkdir(\"/restricted_dir\", check_writable=False, raise_error=False)\n        False\n    \"\"\"\n    path = Path(path).resolve()\n    touchfile = path / f\".{rand_string()}\"\n    try:\n        path.mkdir(exist_ok=True, parents=True)\n        if check_writable:\n            touchfile.touch()\n        return True\n    except Exception as e:\n        if raise_error:\n            raise errors.DirectoryCreationError(f\"Failed to create directory at {path}: {e}\")\n    finally:\n        with suppress(Exception):\n            touchfile.unlink()\n    return False\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.os_platform","title":"os_platform","text":"<pre><code>os_platform()\n</code></pre> <p>Return the OS platform of the current system.</p> <p>This function fetches and returns the OS type where the code is being executed. It converts the platform identifier to lowercase.</p> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>A string representing the OS platform, such as \"linux\", \"darwin\", or \"windows\".</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; os_platform()\n'linux'\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def os_platform():\n    \"\"\"Return the OS platform of the current system.\n\n    This function fetches and returns the OS type where the code is being executed.\n    It converts the platform identifier to lowercase.\n\n    Returns:\n        str: A string representing the OS platform, such as \"linux\", \"darwin\", or \"windows\".\n\n    Examples:\n        &gt;&gt;&gt; os_platform()\n        'linux'\n    \"\"\"\n    import platform\n\n    return platform.system().lower()\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.os_platform_friendly","title":"os_platform_friendly","text":"<pre><code>os_platform_friendly()\n</code></pre> <p>Return a human-friendly OS platform string, suitable for golang release binaries.</p> <p>This function fetches the OS platform and modifies it to a more human-readable format if necessary. Specifically, it changes \"darwin\" to \"macOS\".</p> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>A string representing the human-friendly OS platform, such as \"macOS\", \"linux\", or \"windows\".</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; os_platform_friendly()\n'macOS'\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def os_platform_friendly():\n    \"\"\"Return a human-friendly OS platform string, suitable for golang release binaries.\n\n    This function fetches the OS platform and modifies it to a more human-readable format if necessary.\n    Specifically, it changes \"darwin\" to \"macOS\".\n\n    Returns:\n        str: A string representing the human-friendly OS platform, such as \"macOS\", \"linux\", or \"windows\".\n\n    Examples:\n        &gt;&gt;&gt; os_platform_friendly()\n        'macOS'\n    \"\"\"\n    p = os_platform()\n    if p == \"darwin\":\n        return \"macOS\"\n    return p\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.param_type","title":"param_type","text":"<pre><code>param_type(p)\n</code></pre> <p>Evaluates the type of the given parameter.</p> <p>Parameters:</p> <ul> <li> <code>p</code>               (<code>str</code>)           \u2013            <p>The parameter whose type is to be evaluated.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>int</code>          \u2013            <p>An integer representing the type of parameter. - 1: Integer - 2: UUID - 3: Other</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; param_type('123')\n1\n</code></pre> <pre><code>&gt;&gt;&gt; param_type('550e8400-e29b-41d4-a716-446655440000')\n2\n</code></pre> <pre><code>&gt;&gt;&gt; param_type('abc')\n3\n</code></pre> Source code in <code>bbot/core/helpers/url.py</code> <pre><code>def param_type(p):\n    \"\"\"\n    Evaluates the type of the given parameter.\n\n    Args:\n        p (str): The parameter whose type is to be evaluated.\n\n    Returns:\n        int: An integer representing the type of parameter.\n            - 1: Integer\n            - 2: UUID\n            - 3: Other\n\n    Examples:\n        &gt;&gt;&gt; param_type('123')\n        1\n\n        &gt;&gt;&gt; param_type('550e8400-e29b-41d4-a716-446655440000')\n        2\n\n        &gt;&gt;&gt; param_type('abc')\n        3\n    \"\"\"\n    try:\n        int(p)\n        return 1\n    except Exception:\n        with suppress(Exception):\n            uuid.UUID(p)\n            return 2\n    return 3\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.parent_domain","title":"parent_domain","text":"<pre><code>parent_domain(d)\n</code></pre> <p>Retrieve the parent domain of a given subdomain string.</p> <p>This function takes an input string <code>d</code> representing a subdomain and returns its parent domain. If the input does not represent a subdomain, it returns the input as is.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>str</code>)           \u2013            <p>The input string representing a subdomain or domain.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>The parent domain of the subdomain, or the original input if it is not a subdomain.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; parent_domain(\"www.internal.evilcorp.co.uk\")\n\"internal.evilcorp.co.uk\"\n</code></pre> <pre><code>&gt;&gt;&gt; parent_domain(\"www.internal.evilcorp.co.uk:8080\")\n\"internal.evilcorp.co.uk:8080\"\n</code></pre> <pre><code>&gt;&gt;&gt; parent_domain(\"www.evilcorp.co.uk\")\n\"evilcorp.co.uk\"\n</code></pre> <pre><code>&gt;&gt;&gt; parent_domain(\"evilcorp.co.uk\")\n\"evilcorp.co.uk\"\n</code></pre> Notes <ul> <li>Port, if present in input, is preserved in the output.</li> </ul> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def parent_domain(d):\n    \"\"\"\n    Retrieve the parent domain of a given subdomain string.\n\n    This function takes an input string `d` representing a subdomain and returns its parent domain.\n    If the input does not represent a subdomain, it returns the input as is.\n\n    Args:\n        d (str): The input string representing a subdomain or domain.\n\n    Returns:\n        str: The parent domain of the subdomain, or the original input if it is not a subdomain.\n\n    Examples:\n        &gt;&gt;&gt; parent_domain(\"www.internal.evilcorp.co.uk\")\n        \"internal.evilcorp.co.uk\"\n\n        &gt;&gt;&gt; parent_domain(\"www.internal.evilcorp.co.uk:8080\")\n        \"internal.evilcorp.co.uk:8080\"\n\n        &gt;&gt;&gt; parent_domain(\"www.evilcorp.co.uk\")\n        \"evilcorp.co.uk\"\n\n        &gt;&gt;&gt; parent_domain(\"evilcorp.co.uk\")\n        \"evilcorp.co.uk\"\n\n    Notes:\n        - Port, if present in input, is preserved in the output.\n    \"\"\"\n    host, port = split_host_port(d)\n    if is_subdomain(d):\n        return make_netloc(\".\".join(str(host).split(\".\")[1:]), port)\n    return d\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.parent_url","title":"parent_url","text":"<pre><code>parent_url(u)\n</code></pre> <p>Retrieve the parent URL of a given URL.</p> <p>This function takes an input string <code>u</code> representing a URL and returns its parent URL. If the input URL does not have a parent (i.e., it's already the top-level), it returns None.</p> <p>Parameters:</p> <ul> <li> <code>u</code>               (<code>str</code>)           \u2013            <p>The input string representing a URL.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>Union[str, None]: The parent URL of the input URL, or None if it has no parent.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; parent_url(\"https://evilcorp.com/sub/path/\")\n\"https://evilcorp.com/sub/\"\n</code></pre> <pre><code>&gt;&gt;&gt; parent_url(\"https://evilcorp.com/\")\nNone\n</code></pre> Notes <ul> <li>Only the path component of the URL is modified.</li> <li>All other components like scheme, netloc, query, and fragment are preserved.</li> </ul> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def parent_url(u):\n    \"\"\"\n    Retrieve the parent URL of a given URL.\n\n    This function takes an input string `u` representing a URL and returns its parent URL.\n    If the input URL does not have a parent (i.e., it's already the top-level), it returns None.\n\n    Args:\n        u (str): The input string representing a URL.\n\n    Returns:\n        Union[str, None]: The parent URL of the input URL, or None if it has no parent.\n\n    Examples:\n        &gt;&gt;&gt; parent_url(\"https://evilcorp.com/sub/path/\")\n        \"https://evilcorp.com/sub/\"\n\n        &gt;&gt;&gt; parent_url(\"https://evilcorp.com/\")\n        None\n\n    Notes:\n        - Only the path component of the URL is modified.\n        - All other components like scheme, netloc, query, and fragment are preserved.\n    \"\"\"\n    parsed = urlparse(u)\n    path = Path(parsed.path)\n    if path.parent == path:\n        return None\n    else:\n        return urlunparse(parsed._replace(path=str(path.parent), query=\"\"))\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.parse_port_string","title":"parse_port_string","text":"<pre><code>parse_port_string(port_string)\n</code></pre> <p>Parses a string containing ports and port ranges into a list of individual ports.</p> <p>Parameters:</p> <ul> <li> <code>port_string</code>               (<code>str</code>)           \u2013            <p>The string containing individual ports and port ranges separated by commas.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>list</code>          \u2013            <p>A list of individual ports parsed from the input string.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>ValueError</code>             \u2013            <p>If the input string contains invalid ports or port ranges.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; parse_port_string(\"22,80,1000-1002\")\n[22, 80, 1000, 1001, 1002]\n</code></pre> <pre><code>&gt;&gt;&gt; parse_port_string(\"1-2,3-5\")\n[1, 2, 3, 4, 5]\n</code></pre> <pre><code>&gt;&gt;&gt; parse_port_string(\"invalid\")\nValueError: Invalid port or port range: invalid\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def parse_port_string(port_string):\n    \"\"\"\n    Parses a string containing ports and port ranges into a list of individual ports.\n\n    Args:\n        port_string (str): The string containing individual ports and port ranges separated by commas.\n\n    Returns:\n        list: A list of individual ports parsed from the input string.\n\n    Raises:\n        ValueError: If the input string contains invalid ports or port ranges.\n\n    Examples:\n        &gt;&gt;&gt; parse_port_string(\"22,80,1000-1002\")\n        [22, 80, 1000, 1001, 1002]\n\n        &gt;&gt;&gt; parse_port_string(\"1-2,3-5\")\n        [1, 2, 3, 4, 5]\n\n        &gt;&gt;&gt; parse_port_string(\"invalid\")\n        ValueError: Invalid port or port range: invalid\n    \"\"\"\n    elements = str(port_string).split(\",\")\n    ports = []\n\n    for element in elements:\n        if element.isdigit():\n            port = int(element)\n            if 1 &lt;= port &lt;= 65535:\n                ports.append(port)\n            else:\n                raise ValueError(f\"Invalid port: {element}\")\n        elif \"-\" in element:\n            range_parts = element.split(\"-\")\n            if len(range_parts) != 2 or not all(part.isdigit() for part in range_parts):\n                raise ValueError(f\"Invalid port or port range: {element}\")\n            start, end = map(int, range_parts)\n            if not (1 &lt;= start &lt; end &lt;= 65535):\n                raise ValueError(f\"Invalid port range: {element}\")\n            ports.extend(range(start, end + 1))\n        else:\n            raise ValueError(f\"Invalid port or port range: {element}\")\n\n    return ports\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.parse_url","title":"parse_url","text":"<pre><code>parse_url(url)\n</code></pre> <p>Parse the given URL string or ParseResult object and return a ParseResult.</p> <p>This function checks if the input is already a ParseResult object. If it is, it returns the object as-is. Otherwise, it parses the given URL string using <code>urlparse</code>.</p> <p>Parameters:</p> <ul> <li> <code>url</code>               (<code>Union[str, ParseResult]</code>)           \u2013            <p>The URL string or ParseResult object to be parsed.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>ParseResult</code>          \u2013            <p>A named 6-tuple that contains the components of a URL.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; parse_url('https://www.evilcorp.com')\nParseResult(scheme='https', netloc='www.evilcorp.com', path='', params='', query='', fragment='')\n</code></pre> Source code in <code>bbot/core/helpers/url.py</code> <pre><code>def parse_url(url):\n    \"\"\"\n    Parse the given URL string or ParseResult object and return a ParseResult.\n\n    This function checks if the input is already a ParseResult object. If it is,\n    it returns the object as-is. Otherwise, it parses the given URL string using\n    `urlparse`.\n\n    Args:\n        url (Union[str, ParseResult]): The URL string or ParseResult object to be parsed.\n\n    Returns:\n        ParseResult: A named 6-tuple that contains the components of a URL.\n\n    Examples:\n        &gt;&gt;&gt; parse_url('https://www.evilcorp.com')\n        ParseResult(scheme='https', netloc='www.evilcorp.com', path='', params='', query='', fragment='')\n    \"\"\"\n    if isinstance(url, ParseResult):\n        return url\n    return urlparse(url)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.rand_string","title":"rand_string","text":"<pre><code>rand_string(length=10, digits=True)\n</code></pre> <p>Generates a random string of specified length.</p> <p>Parameters:</p> <ul> <li> <code>length</code>               (<code>int</code>, default:                   <code>10</code> )           \u2013            <p>The length of the random string. Defaults to 10.</p> </li> <li> <code>digits</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to include digits in the string. Defaults to True.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>A random string of the specified length.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; rand_string()\n'c4hp4i9jzx'\n&gt;&gt;&gt; rand_string(20)\n'ap4rsdtg5iw7ey7y3oa5'\n&gt;&gt;&gt; rand_string(30, digits=False)\n'xdmyxtglqfzqktngkesyulwbfrihva'\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def rand_string(length=10, digits=True):\n    \"\"\"\n    Generates a random string of specified length.\n\n    Args:\n        length (int, optional): The length of the random string. Defaults to 10.\n        digits (bool, optional): Whether to include digits in the string. Defaults to True.\n\n    Returns:\n        str: A random string of the specified length.\n\n    Examples:\n        &gt;&gt;&gt; rand_string()\n        'c4hp4i9jzx'\n        &gt;&gt;&gt; rand_string(20)\n        'ap4rsdtg5iw7ey7y3oa5'\n        &gt;&gt;&gt; rand_string(30, digits=False)\n        'xdmyxtglqfzqktngkesyulwbfrihva'\n    \"\"\"\n    pool = rand_pool\n    if digits:\n        pool = rand_pool_digits\n    return \"\".join([random.choice(pool) for _ in range(int(length))])\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.read_file","title":"read_file","text":"<pre><code>read_file(filename)\n</code></pre> <p>Reads a file line by line and yields each line without line breaks.</p> <p>Parameters:</p> <ul> <li> <code>filename</code>               (<code>str or Path</code>)           \u2013            <p>The path to the file to read.</p> </li> </ul> <p>Yields:</p> <ul> <li> <code>str</code>          \u2013            <p>A line from the file without the trailing line break.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; for line in read_file(\"/tmp/file.txt\"):\n...     print(line)\nfile_line1\nfile_line2\nfile_line3\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def read_file(filename):\n    \"\"\"Reads a file line by line and yields each line without line breaks.\n\n    Args:\n        filename (str or Path): The path to the file to read.\n\n    Yields:\n        str: A line from the file without the trailing line break.\n\n    Examples:\n        &gt;&gt;&gt; for line in read_file(\"/tmp/file.txt\"):\n        ...     print(line)\n        file_line1\n        file_line2\n        file_line3\n    \"\"\"\n    with open(filename, errors=\"ignore\") as f:\n        for line in f:\n            yield line.rstrip(\"\\r\\n\")\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.recursive_decode","title":"recursive_decode","text":"<pre><code>recursive_decode(data, max_depth=5)\n</code></pre> <p>Recursively decodes doubly or triply-encoded strings to their original form.</p> <p>Supports both URL-encoding and backslash-escapes (including unicode)</p> <p>Parameters:</p> <ul> <li> <code>data</code>               (<code>str</code>)           \u2013            <p>The data to decode.</p> </li> <li> <code>max_depth</code>               (<code>int</code>, default:                   <code>5</code> )           \u2013            <p>Maximum recursion depth for decoding. Defaults to 5.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>The decoded string.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; recursive_decode(\"Hello%20world%21\")\n\"Hello world!\"\n&gt;&gt;&gt; recursive_decode(\"Hello%20%5Cu041f%5Cu0440%5Cu0438%5Cu0432%5Cu0435%5Cu0442\")\n\"Hello \u041f\u0440\u0438\u0432\u0435\u0442\"\n&gt;&gt;&gt; recursive_dcode(\"%5Cu0020%5Cu041f%5Cu0440%5Cu0438%5Cu0432%5Cu0435%5Cu0442%5Cu0021\")\n\" \u041f\u0440\u0438\u0432\u0435\u0442!\"\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def recursive_decode(data, max_depth=5):\n    \"\"\"\n    Recursively decodes doubly or triply-encoded strings to their original form.\n\n    Supports both URL-encoding and backslash-escapes (including unicode)\n\n    Args:\n        data (str): The data to decode.\n        max_depth (int, optional): Maximum recursion depth for decoding. Defaults to 5.\n\n    Returns:\n        str: The decoded string.\n\n    Examples:\n        &gt;&gt;&gt; recursive_decode(\"Hello%20world%21\")\n        \"Hello world!\"\n        &gt;&gt;&gt; recursive_decode(\"Hello%20%5Cu041f%5Cu0440%5Cu0438%5Cu0432%5Cu0435%5Cu0442\")\n        \"Hello \u041f\u0440\u0438\u0432\u0435\u0442\"\n        &gt;&gt;&gt; recursive_dcode(\"%5Cu0020%5Cu041f%5Cu0440%5Cu0438%5Cu0432%5Cu0435%5Cu0442%5Cu0021\")\n        \" \u041f\u0440\u0438\u0432\u0435\u0442!\"\n    \"\"\"\n    import codecs\n\n    # Decode newline and tab escapes\n    data = backslash_regex.sub(\n        lambda match: {\"n\": \"\\n\", \"t\": \"\\t\", \"r\": \"\\r\", \"b\": \"\\b\", \"v\": \"\\v\"}.get(match.group(\"char\")), data\n    )\n    data = smart_decode(data)\n    if max_depth == 0:\n        return data\n    # Decode URL encoding\n    data = unquote(data, errors=\"ignore\")\n    # Decode Unicode escapes\n    with suppress(UnicodeEncodeError):\n        data = ensure_utf8_compliant(codecs.decode(data, \"unicode_escape\", errors=\"ignore\"))\n    # Check if there's still URL-encoded or Unicode-escaped content\n    if encoded_regex.search(data):\n        # If yes, continue decoding\n        return recursive_decode(data, max_depth=max_depth - 1)\n    return data\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.rm_at_exit","title":"rm_at_exit","text":"<pre><code>rm_at_exit(path)\n</code></pre> <p>Registers a file to be automatically deleted when the program exits.</p> <p>Parameters:</p> <ul> <li> <code>path</code>               (<code>str or Path</code>)           \u2013            <p>The path to the file to be deleted upon program exit.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; rm_at_exit(\"/tmp/test/file1.txt\")\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def rm_at_exit(path):\n    \"\"\"Registers a file to be automatically deleted when the program exits.\n\n    Args:\n        path (str or Path): The path to the file to be deleted upon program exit.\n\n    Examples:\n        &gt;&gt;&gt; rm_at_exit(\"/tmp/test/file1.txt\")\n    \"\"\"\n    import atexit\n\n    atexit.register(delete_file, path)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.rm_rf","title":"rm_rf","text":"<pre><code>rm_rf(f)\n</code></pre> <p>Recursively delete a directory</p> <p>Parameters:</p> <ul> <li> <code>f</code>               (<code>str or Path</code>)           \u2013            <p>The directory path to delete.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; rm_rf(\"/tmp/httpx98323849\")\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def rm_rf(f):\n    \"\"\"Recursively delete a directory\n\n    Args:\n        f (str or Path): The directory path to delete.\n\n    Examples:\n        &gt;&gt;&gt; rm_rf(\"/tmp/httpx98323849\")\n    \"\"\"\n    import shutil\n\n    shutil.rmtree(f)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.safe_format","title":"safe_format","text":"<pre><code>safe_format(s, **kwargs)\n</code></pre> <p>Format string while ignoring unused keys (prevents KeyError)</p> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def safe_format(s, **kwargs):\n    \"\"\"\n    Format string while ignoring unused keys (prevents KeyError)\n    \"\"\"\n    return s.format_map(SafeDict(kwargs))\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.search_dict_by_key","title":"search_dict_by_key","text":"<pre><code>search_dict_by_key(key, d)\n</code></pre> <p>Search a nested dictionary or list of dictionaries by a key and yield all matching values.</p> <p>Parameters:</p> <ul> <li> <code>key</code>               (<code>str</code>)           \u2013            <p>The key to search for.</p> </li> <li> <code>d</code>               (<code>Union[dict, list]</code>)           \u2013            <p>The dictionary or list of dictionaries to search.</p> </li> </ul> <p>Yields:</p> <ul> <li> <code>Any</code>          \u2013            <p>Yields all values that match the provided key.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; d = {'a': 1, 'b': {'c': 2, 'a': 3}, 'd': [{'a': 4}, {'e': 5}]}\n&gt;&gt;&gt; list(search_dict_by_key('a', d))\n[1, 3, 4]\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def search_dict_by_key(key, d):\n    \"\"\"Search a nested dictionary or list of dictionaries by a key and yield all matching values.\n\n    Args:\n        key (str): The key to search for.\n        d (Union[dict, list]): The dictionary or list of dictionaries to search.\n\n    Yields:\n        Any: Yields all values that match the provided key.\n\n    Examples:\n        &gt;&gt;&gt; d = {'a': 1, 'b': {'c': 2, 'a': 3}, 'd': [{'a': 4}, {'e': 5}]}\n        &gt;&gt;&gt; list(search_dict_by_key('a', d))\n        [1, 3, 4]\n    \"\"\"\n    if isinstance(d, dict):\n        if key in d:\n            yield d[key]\n        for k, v in d.items():\n            yield from search_dict_by_key(key, v)\n    elif isinstance(d, list):\n        for v in d:\n            yield from search_dict_by_key(key, v)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.search_dict_values","title":"search_dict_values","text":"<pre><code>search_dict_values(d, *regexes)\n</code></pre> <p>Recursively search a dictionary's values based on provided regex patterns.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>Union[dict, list, str]</code>)           \u2013            <p>The dictionary, list, or string to search.</p> </li> <li> <code>*regexes</code>           \u2013            <p>Arbitrary number of compiled regex patterns.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>Generator</code>          \u2013            <p>Yields matching values based on the provided regex patterns.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; dict_to_search = {\n...     \"key1\": {\n...         \"key2\": [\n...             {\n...                 \"key3\": \"A URL: https://www.evilcorp.com\"\n...             }\n...         ]\n...     }\n... }\n&gt;&gt;&gt; url_regexes = re.compile(r'https?://[^\\s&lt;&gt;\"]+|www\\.[^\\s&lt;&gt;\"]+')\n&gt;&gt;&gt; list(search_dict_values(dict_to_search, url_regexes))\n[\"https://www.evilcorp.com\"]\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def search_dict_values(d, *regexes):\n    \"\"\"Recursively search a dictionary's values based on provided regex patterns.\n\n    Args:\n        d (Union[dict, list, str]): The dictionary, list, or string to search.\n        *regexes: Arbitrary number of compiled regex patterns.\n\n    Returns:\n        Generator: Yields matching values based on the provided regex patterns.\n\n    Examples:\n        &gt;&gt;&gt; dict_to_search = {\n        ...     \"key1\": {\n        ...         \"key2\": [\n        ...             {\n        ...                 \"key3\": \"A URL: https://www.evilcorp.com\"\n        ...             }\n        ...         ]\n        ...     }\n        ... }\n        &gt;&gt;&gt; url_regexes = re.compile(r'https?://[^\\\\s&lt;&gt;\"]+|www\\\\.[^\\\\s&lt;&gt;\"]+')\n        &gt;&gt;&gt; list(search_dict_values(dict_to_search, url_regexes))\n        [\"https://www.evilcorp.com\"]\n    \"\"\"\n\n    results = set()\n    if isinstance(d, str):\n        for r in regexes:\n            for match in r.finditer(d):\n                result = match.group()\n                h = hash(result)\n                if h not in results:\n                    results.add(h)\n                    yield result\n    elif isinstance(d, dict):\n        for _, v in d.items():\n            yield from search_dict_values(v, *regexes)\n    elif isinstance(d, list):\n        for v in d:\n            yield from search_dict_values(v, *regexes)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.search_format_dict","title":"search_format_dict","text":"<pre><code>search_format_dict(d, **kwargs)\n</code></pre> <p>Recursively format string values in a dictionary or list using the provided keyword arguments.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>Union[dict, list, str]</code>)           \u2013            <p>The dictionary, list, or string to format.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Arbitrary keyword arguments used for string formatting.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>Union[dict, list, str]: The formatted dictionary, list, or string.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; search_format_dict({\"test\": \"#{name} is awesome\"}, name=\"keanu\")\n{\"test\": \"keanu is awesome\"}\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def search_format_dict(d, **kwargs):\n    \"\"\"Recursively format string values in a dictionary or list using the provided keyword arguments.\n\n    Args:\n        d (Union[dict, list, str]): The dictionary, list, or string to format.\n        **kwargs: Arbitrary keyword arguments used for string formatting.\n\n    Returns:\n        Union[dict, list, str]: The formatted dictionary, list, or string.\n\n    Examples:\n        &gt;&gt;&gt; search_format_dict({\"test\": \"#{name} is awesome\"}, name=\"keanu\")\n        {\"test\": \"keanu is awesome\"}\n    \"\"\"\n    if isinstance(d, dict):\n        return {k: search_format_dict(v, **kwargs) for k, v in d.items()}\n    elif isinstance(d, list):\n        return [search_format_dict(v, **kwargs) for v in d]\n    elif isinstance(d, str):\n        for find, replace in kwargs.items():\n            find = \"#{\" + str(find) + \"}\"\n            d = d.replace(find, replace)\n    return d\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.sha1","title":"sha1","text":"<pre><code>sha1(data)\n</code></pre> <p>Computes the SHA-1 hash of the given data.</p> <p>Parameters:</p> <ul> <li> <code>data</code>               (<code>str or dict</code>)           \u2013            <p>The data to hash. If a dictionary, it is first converted to a JSON string with sorted keys.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>hashlib.Hash: SHA-1 hash object of the input data.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; sha1(\"asdf\").hexdigest()\n'3da541559918a808c2402bba5012f6c60b27661c'\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def sha1(data):\n    \"\"\"\n    Computes the SHA-1 hash of the given data.\n\n    Args:\n        data (str or dict): The data to hash. If a dictionary, it is first converted to a JSON string with sorted keys.\n\n    Returns:\n        hashlib.Hash: SHA-1 hash object of the input data.\n\n    Examples:\n        &gt;&gt;&gt; sha1(\"asdf\").hexdigest()\n        '3da541559918a808c2402bba5012f6c60b27661c'\n    \"\"\"\n    from hashlib import sha1 as hashlib_sha1\n\n    if isinstance(data, dict):\n        data = json.dumps(data, sort_keys=True)\n    return hashlib_sha1(smart_encode(data))\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.smart_decode","title":"smart_decode","text":"<pre><code>smart_decode(data)\n</code></pre> <p>Decodes the input data to a UTF-8 string, silently ignoring errors.</p> <p>Parameters:</p> <ul> <li> <code>data</code>               (<code>str or bytes</code>)           \u2013            <p>The data to decode.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>The decoded string.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; smart_decode(b\"asdf\")\n\"asdf\"\n&gt;&gt;&gt; smart_decode(\"asdf\")\n\"asdf\"\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def smart_decode(data):\n    \"\"\"\n    Decodes the input data to a UTF-8 string, silently ignoring errors.\n\n    Args:\n        data (str or bytes): The data to decode.\n\n    Returns:\n        str: The decoded string.\n\n    Examples:\n        &gt;&gt;&gt; smart_decode(b\"asdf\")\n        \"asdf\"\n        &gt;&gt;&gt; smart_decode(\"asdf\")\n        \"asdf\"\n    \"\"\"\n    if isinstance(data, bytes):\n        return data.decode(\"utf-8\", errors=\"ignore\")\n    else:\n        return str(data)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.smart_decode_punycode","title":"smart_decode_punycode","text":"<pre><code>smart_decode_punycode(text: str) -&gt; str\n</code></pre> <p>xn--eckwd4c7c.xn--zckzah --&gt; \u30c9\u30e1\u30a4\u30f3.\u30c6\u30b9\u30c8</p> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def smart_decode_punycode(text: str) -&gt; str:\n    \"\"\"\n    xn--eckwd4c7c.xn--zckzah --&gt; \u30c9\u30e1\u30a4\u30f3.\u30c6\u30b9\u30c8\n    \"\"\"\n    import idna\n\n    host, before, after = extract_host(text)\n    if host is None:\n        return text\n\n    try:\n        host = idna.decode(host)\n    except UnicodeError:\n        pass  # If decoding fails, leave the host as it is\n\n    return f\"{before}{host}{after}\"\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.smart_encode","title":"smart_encode","text":"<pre><code>smart_encode(data)\n</code></pre> <p>Encodes the input data to bytes using UTF-8 encoding, silently ignoring errors.</p> <p>Parameters:</p> <ul> <li> <code>data</code>               (<code>str or bytes</code>)           \u2013            <p>The data to encode.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>bytes</code>          \u2013            <p>The encoded bytes.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; smart_encode(\"asdf\")\nb\"asdf\"\n&gt;&gt;&gt; smart_encode(b\"asdf\")\nb\"asdf\"\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def smart_encode(data):\n    \"\"\"\n    Encodes the input data to bytes using UTF-8 encoding, silently ignoring errors.\n\n    Args:\n        data (str or bytes): The data to encode.\n\n    Returns:\n        bytes: The encoded bytes.\n\n    Examples:\n        &gt;&gt;&gt; smart_encode(\"asdf\")\n        b\"asdf\"\n        &gt;&gt;&gt; smart_encode(b\"asdf\")\n        b\"asdf\"\n    \"\"\"\n    if isinstance(data, bytes):\n        return data\n    return str(data).encode(\"utf-8\", errors=\"ignore\")\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.smart_encode_punycode","title":"smart_encode_punycode","text":"<pre><code>smart_encode_punycode(text: str) -&gt; str\n</code></pre> <p>\u30c9\u30e1\u30a4\u30f3.\u30c6\u30b9\u30c8 --&gt; xn--eckwd4c7c.xn--zckzah</p> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def smart_encode_punycode(text: str) -&gt; str:\n    \"\"\"\n    \u30c9\u30e1\u30a4\u30f3.\u30c6\u30b9\u30c8 --&gt; xn--eckwd4c7c.xn--zckzah\n    \"\"\"\n    import idna\n\n    host, before, after = extract_host(text)\n    if host is None:\n        return text\n\n    try:\n        host = idna.encode(host).decode(errors=\"ignore\")\n    except UnicodeError:\n        pass  # If encoding fails, leave the host as it is\n\n    return f\"{before}{host}{after}\"\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.split_domain","title":"split_domain","text":"<pre><code>split_domain(hostname)\n</code></pre> <p>Splits the hostname into its subdomain and registered domain components.</p> <p>Parameters:</p> <ul> <li> <code>hostname</code>               (<code>str</code>)           \u2013            <p>The full hostname to be split.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>tuple</code>          \u2013            <p>A tuple containing the subdomain and registered domain.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; split_domain(\"www.internal.evilcorp.co.uk\")\n(\"www.internal\", \"evilcorp.co.uk\")\n</code></pre> Notes <ul> <li>Utilizes the <code>tldextract</code> function to first break down the hostname.</li> </ul> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def split_domain(hostname):\n    \"\"\"\n    Splits the hostname into its subdomain and registered domain components.\n\n    Args:\n        hostname (str): The full hostname to be split.\n\n    Returns:\n        tuple: A tuple containing the subdomain and registered domain.\n\n    Examples:\n        &gt;&gt;&gt; split_domain(\"www.internal.evilcorp.co.uk\")\n        (\"www.internal\", \"evilcorp.co.uk\")\n\n    Notes:\n        - Utilizes the `tldextract` function to first break down the hostname.\n    \"\"\"\n    if is_ip(hostname):\n        return (\"\", hostname)\n    parsed = tldextract(hostname)\n    subdomain = parsed.subdomain\n    domain = parsed.registered_domain\n    if not domain:\n        split = hostname.split(\".\")\n        subdomain = \".\".join(split[:-2])\n        domain = \".\".join(split[-2:])\n    return (subdomain, domain)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.split_host_port","title":"split_host_port","text":"<pre><code>split_host_port(d)\n</code></pre> <p>Parse a string containing a host and port into a tuple.</p> <p>This function takes an input string <code>d</code> and returns a tuple containing the host and port. The host is converted to its appropriate IP address type if possible. The port is inferred based on the scheme if not provided.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>str</code>)           \u2013            <p>The input string containing the host and possibly the port.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>Tuple[Union[IPv4Address, IPv6Address, str], Optional[int]]: Tuple containing the host and port.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; split_host_port(\"evilcorp.com:443\")\n(\"evilcorp.com\", 443)\n</code></pre> <pre><code>&gt;&gt;&gt; split_host_port(\"192.168.1.1:443\")\n(IPv4Address('192.168.1.1'), 443)\n</code></pre> <pre><code>&gt;&gt;&gt; split_host_port(\"[dead::beef]:443\")\n(IPv6Address('dead::beef'), 443)\n</code></pre> Notes <ul> <li>If port is not provided, it is inferred based on the scheme:<ul> <li>For \"https\" and \"wss\", port 443 is used.</li> <li>For \"http\" and \"ws\", port 80 is used.</li> </ul> </li> </ul> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def split_host_port(d):\n    \"\"\"\n    Parse a string containing a host and port into a tuple.\n\n    This function takes an input string `d` and returns a tuple containing the host and port.\n    The host is converted to its appropriate IP address type if possible. The port is inferred\n    based on the scheme if not provided.\n\n    Args:\n        d (str): The input string containing the host and possibly the port.\n\n    Returns:\n        Tuple[Union[IPv4Address, IPv6Address, str], Optional[int]]: Tuple containing the host and port.\n\n    Examples:\n        &gt;&gt;&gt; split_host_port(\"evilcorp.com:443\")\n        (\"evilcorp.com\", 443)\n\n        &gt;&gt;&gt; split_host_port(\"192.168.1.1:443\")\n        (IPv4Address('192.168.1.1'), 443)\n\n        &gt;&gt;&gt; split_host_port(\"[dead::beef]:443\")\n        (IPv6Address('dead::beef'), 443)\n\n    Notes:\n        - If port is not provided, it is inferred based on the scheme:\n            - For \"https\" and \"wss\", port 443 is used.\n            - For \"http\" and \"ws\", port 80 is used.\n    \"\"\"\n    d = str(d)\n    host = None\n    port = None\n    scheme = None\n    if is_ip(d):\n        return make_ip_type(d), port\n\n    match = bbot_regexes.split_host_port_regex.match(d)\n    if match is None:\n        raise ValueError(f'split_port() failed to parse \"{d}\"')\n    scheme = match.group(\"scheme\")\n    netloc = match.group(\"netloc\")\n    if netloc is None:\n        raise ValueError(f'split_port() failed to parse \"{d}\"')\n\n    match = bbot_regexes.extract_open_port_regex.match(netloc)\n    if match is None:\n        raise ValueError(f'split_port() failed to parse netloc \"{netloc}\" (original value: {d})')\n\n    host = match.group(2)\n    if host is None:\n        host = match.group(1)\n    if host is None:\n        raise ValueError(f'split_port() failed to locate host in netloc \"{netloc}\" (original value: {d})')\n\n    port = match.group(3)\n    if port is None and scheme is not None:\n        scheme = scheme.lower()\n        if scheme in (\"https\", \"wss\"):\n            port = 443\n        elif scheme in (\"http\", \"ws\"):\n            port = 80\n    elif port is not None:\n        with suppress(ValueError):\n            port = int(port)\n\n    return make_ip_type(host), port\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.split_list","title":"split_list","text":"<pre><code>split_list(alist, wanted_parts=2)\n</code></pre> <p>Splits a list into a specified number of approximately equal parts.</p> <p>Parameters:</p> <ul> <li> <code>alist</code>               (<code>list</code>)           \u2013            <p>The list to be split.</p> </li> <li> <code>wanted_parts</code>               (<code>int</code>, default:                   <code>2</code> )           \u2013            <p>The number of parts to split the list into.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>list</code>          \u2013            <p>A list of lists, each containing a portion of the original list.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; split_list([1, 2, 3, 4, 5])\n[[1, 2], [3, 4, 5]]\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def split_list(alist, wanted_parts=2):\n    \"\"\"\n    Splits a list into a specified number of approximately equal parts.\n\n    Args:\n        alist (list): The list to be split.\n        wanted_parts (int): The number of parts to split the list into.\n\n    Returns:\n        list: A list of lists, each containing a portion of the original list.\n\n    Examples:\n        &gt;&gt;&gt; split_list([1, 2, 3, 4, 5])\n        [[1, 2], [3, 4, 5]]\n    \"\"\"\n    length = len(alist)\n    return [alist[i * length // wanted_parts : (i + 1) * length // wanted_parts] for i in range(wanted_parts)]\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.str_or_file","title":"str_or_file","text":"<pre><code>str_or_file(s)\n</code></pre> <p>Reads a string or file and yields its content line-by-line.</p> <p>This function tries to open the given string <code>s</code> as a file and yields its lines. If it fails to open <code>s</code> as a file, it treats <code>s</code> as a regular string and yields it as is.</p> <p>Parameters:</p> <ul> <li> <code>s</code>               (<code>str</code>)           \u2013            <p>The string or file path to read.</p> </li> </ul> <p>Yields:</p> <ul> <li> <code>str</code>          \u2013            <p>Either lines from the file or the original string.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; list(str_or_file(\"file.txt\"))\n['file_line1', 'file_line2', 'file_line3']\n&gt;&gt;&gt; list(str_or_file(\"not_a_file\"))\n['not_a_file']\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def str_or_file(s):\n    \"\"\"Reads a string or file and yields its content line-by-line.\n\n    This function tries to open the given string `s` as a file and yields its lines.\n    If it fails to open `s` as a file, it treats `s` as a regular string and yields it as is.\n\n    Args:\n        s (str): The string or file path to read.\n\n    Yields:\n        str: Either lines from the file or the original string.\n\n    Examples:\n        &gt;&gt;&gt; list(str_or_file(\"file.txt\"))\n        ['file_line1', 'file_line2', 'file_line3']\n        &gt;&gt;&gt; list(str_or_file(\"not_a_file\"))\n        ['not_a_file']\n    \"\"\"\n    try:\n        with open(s, errors=\"ignore\") as f:\n            for line in f:\n                yield line.rstrip(\"\\r\\n\")\n    except OSError:\n        yield s\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.subdomain_depth","title":"subdomain_depth","text":"<pre><code>subdomain_depth(d)\n</code></pre> <p>Calculate the depth of subdomains within a given domain name.</p> <p>Parameters:</p> <ul> <li> <code>d</code>               (<code>str</code>)           \u2013            <p>The domain name to analyze.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>int</code>          \u2013            <p>The depth of the subdomain. For example, a hostname \"5.4.3.2.1.evilcorp.com\"</p> </li> <li>           \u2013            <p>has a subdomain depth of 5.</p> </li> </ul> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def subdomain_depth(d):\n    \"\"\"\n    Calculate the depth of subdomains within a given domain name.\n\n    Args:\n        d (str): The domain name to analyze.\n\n    Returns:\n        int: The depth of the subdomain. For example, a hostname \"5.4.3.2.1.evilcorp.com\"\n        has a subdomain depth of 5.\n    \"\"\"\n    subdomain, domain = split_domain(d)\n    if not subdomain:\n        return 0\n    return subdomain.count(\".\") + 1\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.swap_status","title":"swap_status","text":"<pre><code>swap_status()\n</code></pre> <p>Return statistics on swap memory consumption.</p> <p>The function returns a <code>psutil</code> named tuple that contains statistics on system swap memory usage, such as total swap, used swap, free swap, and more.</p> <p>Returns:</p> <ul> <li>           \u2013            <p>psutil._common.sswap: A named tuple representing various statistics about system swap memory usage.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; swap = swap_status()\n&gt;&gt;&gt; swap.total\n4294967296\n</code></pre> <pre><code>&gt;&gt;&gt; swap = swap_status()\n&gt;&gt;&gt; swap.used\n2097152\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def swap_status():\n    \"\"\"Return statistics on swap memory consumption.\n\n    The function returns a `psutil` named tuple that contains statistics on\n    system swap memory usage, such as total swap, used swap, free swap, and more.\n\n    Returns:\n        psutil._common.sswap: A named tuple representing various statistics\n            about system swap memory usage.\n\n    Examples:\n        &gt;&gt;&gt; swap = swap_status()\n        &gt;&gt;&gt; swap.total\n        4294967296\n\n        &gt;&gt;&gt; swap = swap_status()\n        &gt;&gt;&gt; swap.used\n        2097152\n    \"\"\"\n    import psutil\n\n    return psutil.swap_memory()\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.tagify","title":"tagify","text":"<pre><code>tagify(s, delimiter=None, maxlen=None)\n</code></pre> <p>Sanitize a string into a tag-friendly format.</p> <p>Converts a given string to lowercase and replaces all characters not matching [a-z0-9] with hyphens. Optionally truncates the result to 'maxlen' characters.</p> <p>Parameters:</p> <ul> <li> <code>s</code>               (<code>str</code>)           \u2013            <p>The input string to sanitize.</p> </li> <li> <code>maxlen</code>               (<code>int</code>, default:                   <code>None</code> )           \u2013            <p>The maximum length for the tag. Defaults to None.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>A sanitized, tag-friendly string.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; tagify(\"HTTP Web Title\")\n'http-web-title'\n&gt;&gt;&gt; tagify(\"HTTP Web Title\", maxlen=8)\n'http-web'\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def tagify(s, delimiter=None, maxlen=None):\n    \"\"\"Sanitize a string into a tag-friendly format.\n\n    Converts a given string to lowercase and replaces all characters not matching\n    [a-z0-9] with hyphens. Optionally truncates the result to 'maxlen' characters.\n\n    Args:\n        s (str): The input string to sanitize.\n        maxlen (int, optional): The maximum length for the tag. Defaults to None.\n\n    Returns:\n        str: A sanitized, tag-friendly string.\n\n    Examples:\n        &gt;&gt;&gt; tagify(\"HTTP Web Title\")\n        'http-web-title'\n        &gt;&gt;&gt; tagify(\"HTTP Web Title\", maxlen=8)\n        'http-web'\n    \"\"\"\n    if delimiter is None:\n        delimiter = \"-\"\n    ret = str(s).lower()\n    return tag_filter_regex.sub(delimiter, ret)[:maxlen].strip(delimiter)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.tldextract","title":"tldextract","text":"<pre><code>tldextract(data)\n</code></pre> <p>Extracts the subdomain, domain, and suffix from a URL string.</p> <p>Parameters:</p> <ul> <li> <code>data</code>               (<code>str</code>)           \u2013            <p>The URL string to be processed.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>ExtractResult</code>          \u2013            <p>A named tuple containing the subdomain, domain, and suffix.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; tldextract(\"www.evilcorp.co.uk\")\nExtractResult(subdomain='www', domain='evilcorp', suffix='co.uk')\n</code></pre> Notes <ul> <li>Utilizes <code>smart_decode</code> to preprocess the data.</li> <li>Makes use of the <code>tldextract</code> library for extraction.</li> </ul> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def tldextract(data):\n    \"\"\"\n    Extracts the subdomain, domain, and suffix from a URL string.\n\n    Args:\n        data (str): The URL string to be processed.\n\n    Returns:\n        ExtractResult: A named tuple containing the subdomain, domain, and suffix.\n\n    Examples:\n        &gt;&gt;&gt; tldextract(\"www.evilcorp.co.uk\")\n        ExtractResult(subdomain='www', domain='evilcorp', suffix='co.uk')\n\n    Notes:\n        - Utilizes `smart_decode` to preprocess the data.\n        - Makes use of the `tldextract` library for extraction.\n    \"\"\"\n    import tldextract as _tldextract\n\n    return _tldextract.extract(smart_decode(data))\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.top_tcp_ports","title":"top_tcp_ports","text":"<pre><code>top_tcp_ports(n, as_string=False)\n</code></pre> <p>Returns the top n TCP ports as evaluated by nmap</p> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def top_tcp_ports(n, as_string=False):\n    \"\"\"\n    Returns the top *n* TCP ports as evaluated by nmap\n    \"\"\"\n    top_ports_file = Path(__file__).parent.parent.parent / \"wordlists\" / \"top_open_ports_nmap.txt\"\n\n    global top_ports_cache\n    if top_ports_cache is None:\n        # Read the open ports from the file\n        with open(top_ports_file, \"r\") as f:\n            top_ports_cache = [int(line.strip()) for line in f]\n\n        # If n is greater than the length of the ports list, add remaining ports from range(1, 65536)\n        unique_ports = set(top_ports_cache)\n        top_ports_cache.extend([port for port in range(1, 65536) if port not in unique_ports])\n\n    top_ports = top_ports_cache[:n]\n    if as_string:\n        return \",\".join([str(s) for s in top_ports])\n    return top_ports\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.truncate_filename","title":"truncate_filename","text":"<pre><code>truncate_filename(file_path, max_length=255)\n</code></pre> <p>Truncate the filename while preserving the file extension to ensure the total path length does not exceed the maximum length.</p> <p>Parameters:</p> <ul> <li> <code>file_path</code>               (<code>str</code>)           \u2013            <p>The original file path.</p> </li> <li> <code>max_length</code>               (<code>int</code>, default:                   <code>255</code> )           \u2013            <p>The maximum allowed length for the total path. Default is 255.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>pathlib.Path: A new Path object with the truncated filename.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>ValueError</code>             \u2013            <p>If the directory path is too long to accommodate any filename within the limit.</p> </li> </ul> Example <p>truncate_filename('/path/to/example_long_filename.txt', 20) PosixPath('/path/to/example.txt')</p> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def truncate_filename(file_path, max_length=255):\n    \"\"\"\n    Truncate the filename while preserving the file extension to ensure the total path length does not exceed the maximum length.\n\n    Args:\n        file_path (str): The original file path.\n        max_length (int): The maximum allowed length for the total path. Default is 255.\n\n    Returns:\n        pathlib.Path: A new Path object with the truncated filename.\n\n    Raises:\n        ValueError: If the directory path is too long to accommodate any filename within the limit.\n\n    Example:\n        &gt;&gt;&gt; truncate_filename('/path/to/example_long_filename.txt', 20)\n        PosixPath('/path/to/example.txt')\n    \"\"\"\n    p = Path(file_path)\n    directory, stem, suffix = p.parent, p.stem, p.suffix\n\n    max_filename_length = max_length - len(str(directory)) - len(suffix) - 1  # 1 for the '/' separator\n\n    if max_filename_length &lt;= 0:\n        raise ValueError(\"The directory path is too long to accommodate any filename within the limit.\")\n\n    if len(stem) &gt; max_filename_length:\n        truncated_stem = stem[:max_filename_length]\n    else:\n        truncated_stem = stem\n\n    new_path = directory / (truncated_stem + suffix)\n    return new_path\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.url_depth","title":"url_depth","text":"<pre><code>url_depth(url)\n</code></pre> <p>Calculate the depth of the given URL based on its path components.</p> <p>Parameters:</p> <ul> <li> <code>url</code>               (<code>Union[str, ParseResult]</code>)           \u2013            <p>The URL whose depth is to be calculated.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>int</code>          \u2013            <p>The depth of the URL, based on its path components.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; url_depth('https://www.evilcorp.com/foo/bar/')\n2\n</code></pre> <pre><code>&gt;&gt;&gt; url_depth('https://www.evilcorp.com/foo//bar/baz/')\n3\n</code></pre> Source code in <code>bbot/core/helpers/url.py</code> <pre><code>def url_depth(url):\n    \"\"\"\n    Calculate the depth of the given URL based on its path components.\n\n    Args:\n        url (Union[str, ParseResult]): The URL whose depth is to be calculated.\n\n    Returns:\n        int: The depth of the URL, based on its path components.\n\n    Examples:\n        &gt;&gt;&gt; url_depth('https://www.evilcorp.com/foo/bar/')\n        2\n\n        &gt;&gt;&gt; url_depth('https://www.evilcorp.com/foo//bar/baz/')\n        3\n    \"\"\"\n    parsed = parse_url(url)\n    parsed = parsed._replace(path=double_slash_regex.sub(\"/\", parsed.path))\n    split_path = str(parsed.path).strip(\"/\").split(\"/\")\n    split_path = [e for e in split_path if e]\n    return len(split_path)\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.url_parents","title":"url_parents","text":"<pre><code>url_parents(u)\n</code></pre> <p>Generate a list of parent URLs for a given URL string.</p> <p>This function takes an input string <code>u</code> representing a URL and generates a list of its parent URLs in decreasing order of specificity.</p> <p>Parameters:</p> <ul> <li> <code>u</code>               (<code>str</code>)           \u2013            <p>The input string representing a URL.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>List[str]: A list of parent URLs of the input URL in decreasing order of specificity.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; url_parents(\"http://www.evilcorp.co.uk/admin/tools/cmd.php\")\n[\"http://www.evilcorp.co.uk/admin/tools/\", \"http://www.evilcorp.co.uk/admin/\", \"http://www.evilcorp.co.uk/\"]\n</code></pre> Notes <ul> <li>The list is generated by continuously calling <code>parent_url</code> until it returns None.</li> <li>All components of the URL except for the path are preserved.</li> </ul> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def url_parents(u):\n    \"\"\"\n    Generate a list of parent URLs for a given URL string.\n\n    This function takes an input string `u` representing a URL and generates a list of its parent URLs in decreasing order of specificity.\n\n    Args:\n        u (str): The input string representing a URL.\n\n    Returns:\n        List[str]: A list of parent URLs of the input URL in decreasing order of specificity.\n\n    Examples:\n        &gt;&gt;&gt; url_parents(\"http://www.evilcorp.co.uk/admin/tools/cmd.php\")\n        [\"http://www.evilcorp.co.uk/admin/tools/\", \"http://www.evilcorp.co.uk/admin/\", \"http://www.evilcorp.co.uk/\"]\n\n    Notes:\n        - The list is generated by continuously calling `parent_url` until it returns None.\n        - All components of the URL except for the path are preserved.\n    \"\"\"\n    parent_list = []\n    while 1:\n        parent = parent_url(u)\n        if parent == None:\n            return parent_list\n        elif parent not in parent_list:\n            parent_list.append(parent)\n            u = parent\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.verify_sudo_password","title":"verify_sudo_password","text":"<pre><code>verify_sudo_password(sudo_pass)\n</code></pre> <p>Verify if the given sudo password is correct.</p> <p>This function checks whether the sudo password provided is valid for the current user. It runs a command with sudo, feeding in the password via stdin, and checks the return code.</p> <p>Parameters:</p> <ul> <li> <code>sudo_pass</code>               (<code>str</code>)           \u2013            <p>The sudo password to verify.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>bool</code>          \u2013            <p>True if the sudo password is correct, False otherwise.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; verify_sudo_password(\"mysecretpassword\")\nTrue\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def verify_sudo_password(sudo_pass):\n    \"\"\"Verify if the given sudo password is correct.\n\n    This function checks whether the sudo password provided is valid for the current user.\n    It runs a command with sudo, feeding in the password via stdin, and checks the return code.\n\n    Args:\n        sudo_pass (str): The sudo password to verify.\n\n    Returns:\n        bool: True if the sudo password is correct, False otherwise.\n\n    Examples:\n        &gt;&gt;&gt; verify_sudo_password(\"mysecretpassword\")\n        True\n    \"\"\"\n    try:\n        sp.run(\n            [\"sudo\", \"-S\", \"-k\", \"true\"],\n            input=smart_encode(sudo_pass),\n            stderr=sp.DEVNULL,\n            stdout=sp.DEVNULL,\n            check=True,\n        )\n    except sp.CalledProcessError:\n        return False\n    return True\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.weighted_shuffle","title":"weighted_shuffle","text":"<pre><code>weighted_shuffle(items, weights)\n</code></pre> <p>Shuffles a list of items based on their corresponding weights.</p> <p>Parameters:</p> <ul> <li> <code>items</code>               (<code>list</code>)           \u2013            <p>The list of items to shuffle.</p> </li> <li> <code>weights</code>               (<code>list</code>)           \u2013            <p>The list of weights corresponding to each item.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>list</code>          \u2013            <p>A new list containing the shuffled items.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; items = ['apple', 'banana', 'cherry']\n&gt;&gt;&gt; weights = [0.4, 0.5, 0.1]\n&gt;&gt;&gt; weighted_shuffle(items, weights)\n['banana', 'apple', 'cherry']\n&gt;&gt;&gt; weighted_shuffle(items, weights)\n['apple', 'banana', 'cherry']\n&gt;&gt;&gt; weighted_shuffle(items, weights)\n['apple', 'banana', 'cherry']\n&gt;&gt;&gt; weighted_shuffle(items, weights)\n['banana', 'apple', 'cherry']\n</code></pre> Note <p>The sum of all weights does not have to be 1. They will be normalized internally.</p> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def weighted_shuffle(items, weights):\n    \"\"\"\n    Shuffles a list of items based on their corresponding weights.\n\n    Args:\n        items (list): The list of items to shuffle.\n        weights (list): The list of weights corresponding to each item.\n\n    Returns:\n        list: A new list containing the shuffled items.\n\n    Examples:\n        &gt;&gt;&gt; items = ['apple', 'banana', 'cherry']\n        &gt;&gt;&gt; weights = [0.4, 0.5, 0.1]\n        &gt;&gt;&gt; weighted_shuffle(items, weights)\n        ['banana', 'apple', 'cherry']\n        &gt;&gt;&gt; weighted_shuffle(items, weights)\n        ['apple', 'banana', 'cherry']\n        &gt;&gt;&gt; weighted_shuffle(items, weights)\n        ['apple', 'banana', 'cherry']\n        &gt;&gt;&gt; weighted_shuffle(items, weights)\n        ['banana', 'apple', 'cherry']\n\n    Note:\n        The sum of all weights does not have to be 1. They will be normalized internally.\n    \"\"\"\n    # Create a list of tuples where each tuple is (item, weight)\n    pool = list(zip(items, weights))\n\n    shuffled_items = []\n\n    # While there are still items to be chosen...\n    while pool:\n        # Normalize weights\n        total = sum(weight for item, weight in pool)\n        weights = [weight / total for item, weight in pool]\n\n        # Choose an index based on weight\n        chosen_index = random.choices(range(len(pool)), weights=weights, k=1)[0]\n\n        # Add the chosen item to the shuffled list\n        chosen_item, chosen_weight = pool.pop(chosen_index)\n        shuffled_items.append(chosen_item)\n\n    return shuffled_items\n</code></pre>"},{"location":"dev/helpers/misc/#bbot.core.helpers.misc.which","title":"which","text":"<pre><code>which(*executables)\n</code></pre> <p>Finds the full path of the first available executable from a list of executables.</p> <p>Parameters:</p> <ul> <li> <code>*executables</code>               (<code>str</code>, default:                   <code>()</code> )           \u2013            <p>One or more executable names to search for.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>The full path of the first available executable, or None if none are found.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; which(\"python\", \"python3\")\n\"/usr/bin/python\"\n</code></pre> Source code in <code>bbot/core/helpers/misc.py</code> <pre><code>def which(*executables):\n    \"\"\"Finds the full path of the first available executable from a list of executables.\n\n    Args:\n        *executables (str): One or more executable names to search for.\n\n    Returns:\n        str: The full path of the first available executable, or None if none are found.\n\n    Examples:\n        &gt;&gt;&gt; which(\"python\", \"python3\")\n        \"/usr/bin/python\"\n    \"\"\"\n    import shutil\n\n    for e in executables:\n        location = shutil.which(e)\n        if location:\n            return location\n</code></pre>"},{"location":"dev/helpers/web/","title":"Web","text":"<p>These are helpers for making various web requests.</p> <p>Note that these helpers can be invoked directly from <code>self.helpers</code>, e.g.:</p> <pre><code>self.helpers.request(\"https://www.evilcorp.com\")\n</code></pre>"},{"location":"dev/helpers/web/#bbot.core.helpers.web.WebHelper","title":"WebHelper","text":"<p>               Bases: <code>EngineClient</code></p> Source code in <code>bbot/core/helpers/web/web.py</code> <pre><code>class WebHelper(EngineClient):\n\n    SERVER_CLASS = HTTPEngine\n    ERROR_CLASS = WebError\n\n    \"\"\"\n    Main utility class for managing HTTP operations in BBOT. It serves as a wrapper around the BBOTAsyncClient,\n    which itself is a subclass of httpx.AsyncClient. The class provides functionalities to make HTTP requests,\n    download files, and handle cached wordlists.\n\n    Attributes:\n        parent_helper (object): The parent helper object containing scan configurations.\n        http_debug (bool): Flag to indicate whether HTTP debugging is enabled.\n        ssl_verify (bool): Flag to indicate whether SSL verification is enabled.\n        web_client (BBOTAsyncClient): An instance of BBOTAsyncClient for making HTTP requests.\n        client_only_options (tuple): A tuple of options only applicable to the web client.\n\n    Examples:\n        Basic web request:\n        &gt;&gt;&gt; response = await self.helpers.request(\"https://www.evilcorp.com\")\n\n        Download file:\n        &gt;&gt;&gt; filename = await self.helpers.download(\"https://www.evilcorp.com/passwords.docx\")\n\n        Download wordlist (cached for 30 days by default):\n        &gt;&gt;&gt; filename = await self.helpers.wordlist(\"https://www.evilcorp.com/wordlist.txt\")\n    \"\"\"\n\n    def __init__(self, parent_helper):\n        self.parent_helper = parent_helper\n        self.preset = self.parent_helper.preset\n        self.config = self.preset.config\n        self.web_config = self.config.get(\"web\", {})\n        self.web_spider_depth = self.web_config.get(\"spider_depth\", 1)\n        self.web_spider_distance = self.web_config.get(\"spider_distance\", 0)\n        self.web_clients = {}\n        self.target = self.preset.target\n        self.ssl_verify = self.config.get(\"ssl_verify\", False)\n        engine_debug = self.config.get(\"engine\", {}).get(\"debug\", False)\n        super().__init__(\n            server_kwargs={\"config\": self.config, \"target\": self.parent_helper.preset.target.minimal},\n            debug=engine_debug,\n        )\n\n    def AsyncClient(self, *args, **kwargs):\n        # cache by retries to prevent unwanted accumulation of clients\n        # (they are not garbage-collected)\n        retries = kwargs.get(\"retries\", 1)\n        try:\n            return self.web_clients[retries]\n        except KeyError:\n            from .client import BBOTAsyncClient\n\n            client = BBOTAsyncClient.from_config(self.config, self.target, *args, persist_cookies=False, **kwargs)\n            self.web_clients[client.retries] = client\n            return client\n\n    async def request(self, *args, **kwargs):\n        \"\"\"\n        Asynchronous function for making HTTP requests, intended to be the most basic web request function\n        used widely across BBOT and within this helper class. Handles various exceptions and timeouts\n        that might occur during the request.\n\n        This function automatically respects the scan's global timeout, proxy, headers, etc.\n        Headers you specify will be merged with the scan's. Your arguments take ultimate precedence,\n        meaning you can override the scan's values if you want.\n\n        Args:\n            url (str): The URL to send the request to.\n            method (str, optional): The HTTP method to use for the request. Defaults to 'GET'.\n            headers (dict, optional): Dictionary of HTTP headers to send with the request.\n            params (dict, optional): Dictionary, list of tuples, or bytes to send in the query string.\n            cookies (dict, optional): Dictionary or CookieJar object containing cookies.\n            json (Any, optional): A JSON serializable Python object to send in the body.\n            data (dict, optional): Dictionary, list of tuples, or bytes to send in the body.\n            files (dict, optional): Dictionary of 'name': file-like-objects for multipart encoding upload.\n            auth (tuple, optional): Auth tuple to enable Basic/Digest/Custom HTTP auth.\n            timeout (float, optional): The maximum time to wait for the request to complete.\n            proxies (dict, optional): Dictionary mapping protocol schemes to proxy URLs.\n            allow_redirects (bool, optional): Enables or disables redirection. Defaults to None.\n            stream (bool, optional): Enables or disables response streaming.\n            raise_error (bool, optional): Whether to raise exceptions for HTTP connect, timeout errors. Defaults to False.\n            client (httpx.AsyncClient, optional): A specific httpx.AsyncClient to use for the request. Defaults to self.web_client.\n            cache_for (int, optional): Time in seconds to cache the request. Not used currently. Defaults to None.\n\n        Raises:\n            httpx.TimeoutException: If the request times out.\n            httpx.ConnectError: If the connection fails.\n            httpx.RequestError: For other request-related errors.\n\n        Returns:\n            httpx.Response or None: The HTTP response object returned by the httpx library.\n\n        Examples:\n            &gt;&gt;&gt; response = await self.helpers.request(\"https://www.evilcorp.com\")\n\n            &gt;&gt;&gt; response = await self.helpers.request(\"https://api.evilcorp.com/\", method=\"POST\", data=\"stuff\")\n\n        Note:\n            If the web request fails, it will return None unless `raise_error` is `True`.\n        \"\"\"\n        raise_error = kwargs.get(\"raise_error\", False)\n        result = await self.run_and_return(\"request\", *args, **kwargs)\n        if isinstance(result, dict) and \"_request_error\" in result:\n            if raise_error:\n                error_msg = result[\"_request_error\"]\n                response = result[\"_response\"]\n                error = self.ERROR_CLASS(error_msg)\n                error.response = response\n                raise error\n        return result\n\n    async def request_batch(self, urls, *args, **kwargs):\n        \"\"\"\n        Given a list of URLs, request them in parallel and yield responses as they come in.\n\n        Args:\n            urls (list[str]): List of URLs to visit\n            *args: Positional arguments to pass through to httpx\n            **kwargs: Keyword arguments to pass through to httpx\n\n        Examples:\n            &gt;&gt;&gt; async for url, response in self.helpers.request_batch(urls, headers={\"X-Test\": \"Test\"}):\n            &gt;&gt;&gt;     if response is not None and response.status_code == 200:\n            &gt;&gt;&gt;         self.hugesuccess(response)\n        \"\"\"\n        agen = self.run_and_yield(\"request_batch\", urls, *args, **kwargs)\n        while 1:\n            try:\n                yield await agen.__anext__()\n            except (StopAsyncIteration, GeneratorExit):\n                await agen.aclose()\n                break\n\n    async def request_custom_batch(self, urls_and_kwargs):\n        \"\"\"\n        Make web requests in parallel with custom options for each request. Yield responses as they come in.\n\n        Similar to `request_batch` except it allows individual arguments for each URL.\n\n        Args:\n            urls_and_kwargs (list[tuple]): List of tuples in the format: (url, kwargs, custom_tracker)\n                where custom_tracker is an optional value for your own internal use. You may use it to\n                help correlate requests, etc.\n\n        Examples:\n            &gt;&gt;&gt; urls_and_kwargs = [\n            &gt;&gt;&gt;     (\"http://evilcorp.com/1\", {\"method\": \"GET\"}, \"request-1\"),\n            &gt;&gt;&gt;     (\"http://evilcorp.com/2\", {\"method\": \"POST\"}, \"request-2\"),\n            &gt;&gt;&gt; ]\n            &gt;&gt;&gt; async for url, kwargs, custom_tracker, response in self.helpers.request_custom_batch(\n            &gt;&gt;&gt;     urls_and_kwargs\n            &gt;&gt;&gt; ):\n            &gt;&gt;&gt;     if response is not None and response.status_code == 200:\n            &gt;&gt;&gt;         self.hugesuccess(response)\n        \"\"\"\n        agen = self.run_and_yield(\"request_custom_batch\", urls_and_kwargs)\n        while 1:\n            try:\n                yield await agen.__anext__()\n            except (StopAsyncIteration, GeneratorExit):\n                await agen.aclose()\n                break\n\n    async def download(self, url, **kwargs):\n        \"\"\"\n        Asynchronous function for downloading files from a given URL. Supports caching with an optional\n        time period in hours via the \"cache_hrs\" keyword argument. In case of successful download,\n        returns the full path of the saved filename. If the download fails, returns None.\n\n        Args:\n            url (str): The URL of the file to download.\n            filename (str, optional): The filename to save the downloaded file as.\n                If not provided, will generate based on URL.\n            max_size (str or int): Maximum filesize as a string (\"5MB\") or integer in bytes.\n            cache_hrs (float, optional): The number of hours to cache the downloaded file.\n                A negative value disables caching. Defaults to -1.\n            method (str, optional): The HTTP method to use for the request, defaults to 'GET'.\n            raise_error (bool, optional): Whether to raise exceptions for HTTP connect, timeout errors. Defaults to False.\n            **kwargs: Additional keyword arguments to pass to the httpx request.\n\n        Returns:\n            Path or None: The full path of the downloaded file as a Path object if successful, otherwise None.\n\n        Examples:\n            &gt;&gt;&gt; filepath = await self.helpers.download(\"https://www.evilcorp.com/passwords.docx\", cache_hrs=24)\n        \"\"\"\n        success = False\n        raise_error = kwargs.get(\"raise_error\", False)\n        filename = kwargs.pop(\"filename\", self.parent_helper.cache_filename(url))\n        filename = truncate_filename(Path(filename).resolve())\n        kwargs[\"filename\"] = filename\n        max_size = kwargs.pop(\"max_size\", None)\n        if max_size is not None:\n            max_size = self.parent_helper.human_to_bytes(max_size)\n            kwargs[\"max_size\"] = max_size\n        cache_hrs = float(kwargs.pop(\"cache_hrs\", -1))\n        if cache_hrs &gt; 0 and self.parent_helper.is_cached(url):\n            log.debug(f\"{url} is cached at {self.parent_helper.cache_filename(url)}\")\n            success = True\n        else:\n            result = await self.run_and_return(\"download\", url, **kwargs)\n            if isinstance(result, dict) and \"_download_error\" in result:\n                if raise_error:\n                    error_msg = result[\"_download_error\"]\n                    response = result[\"_response\"]\n                    error = self.ERROR_CLASS(error_msg)\n                    error.response = response\n                    raise error\n            elif result:\n                success = True\n\n        if success:\n            return filename\n\n    async def wordlist(self, path, lines=None, **kwargs):\n        \"\"\"\n        Asynchronous function for retrieving wordlists, either from a local path or a URL.\n        Allows for optional line-based truncation and caching. Returns the full path of the wordlist\n        file or a truncated version of it.\n\n        Args:\n            path (str): The local or remote path of the wordlist.\n            lines (int, optional): Number of lines to read from the wordlist.\n                If specified, will return a truncated wordlist with this many lines.\n            cache_hrs (float, optional): Number of hours to cache the downloaded wordlist.\n                Defaults to 720 hours (30 days) for remote wordlists.\n            **kwargs: Additional keyword arguments to pass to the 'download' function for remote wordlists.\n\n        Returns:\n            Path: The full path of the wordlist (or its truncated version) as a Path object.\n\n        Raises:\n            WordlistError: If the path is invalid or the wordlist could not be retrieved or found.\n\n        Examples:\n            Fetching full wordlist\n            &gt;&gt;&gt; wordlist_path = await self.helpers.wordlist(\"https://www.evilcorp.com/wordlist.txt\")\n\n            Fetching and truncating to the first 100 lines\n            &gt;&gt;&gt; wordlist_path = await self.helpers.wordlist(\"/root/rockyou.txt\", lines=100)\n        \"\"\"\n        if not path:\n            raise WordlistError(f\"Invalid wordlist: {path}\")\n        if not \"cache_hrs\" in kwargs:\n            kwargs[\"cache_hrs\"] = 720\n        if self.parent_helper.is_url(path):\n            filename = await self.download(str(path), **kwargs)\n            if filename is None:\n                raise WordlistError(f\"Unable to retrieve wordlist from {path}\")\n        else:\n            filename = Path(path).resolve()\n            if not filename.is_file():\n                raise WordlistError(f\"Unable to find wordlist at {path}\")\n\n        if lines is None:\n            return filename\n        else:\n            lines = int(lines)\n            with open(filename) as f:\n                read_lines = f.readlines()\n            cache_key = f\"{filename}:{lines}\"\n            truncated_filename = self.parent_helper.cache_filename(cache_key)\n            with open(truncated_filename, \"w\") as f:\n                for line in read_lines[:lines]:\n                    f.write(line)\n            return truncated_filename\n\n    async def curl(self, *args, **kwargs):\n        \"\"\"\n        An asynchronous function that runs a cURL command with specified arguments and options.\n\n        This function constructs and executes a cURL command based on the provided parameters.\n        It offers support for various cURL options such as headers, post data, and cookies.\n\n        Args:\n            *args: Variable length argument list for positional arguments. Unused in this function.\n            url (str): The URL for the cURL request. Mandatory.\n            raw_path (bool, optional): If True, activates '--path-as-is' in cURL. Defaults to False.\n            headers (dict, optional): A dictionary of HTTP headers to include in the request.\n            ignore_bbot_global_settings (bool, optional): If True, ignores the global settings of BBOT. Defaults to False.\n            post_data (dict, optional): A dictionary containing data to be sent in the request body.\n            method (str, optional): The HTTP method to use for the request (e.g., 'GET', 'POST').\n            cookies (dict, optional): A dictionary of cookies to include in the request.\n            path_override (str, optional): Overrides the request-target to use in the HTTP request line.\n            head_mode (bool, optional): If True, includes '-I' to fetch headers only. Defaults to None.\n            raw_body (str, optional): Raw string to be sent in the body of the request.\n            **kwargs: Arbitrary keyword arguments that will be forwarded to the HTTP request function.\n\n        Returns:\n            str: The output of the cURL command.\n\n        Raises:\n            CurlError: If 'url' is not supplied.\n\n        Examples:\n            &gt;&gt;&gt; output = await curl(url=\"https://example.com\", headers={\"X-Header\": \"Wat\"})\n            &gt;&gt;&gt; print(output)\n        \"\"\"\n        url = kwargs.get(\"url\", \"\")\n\n        if not url:\n            raise CurlError(\"No URL supplied to CURL helper\")\n\n        curl_command = [\"curl\", url, \"-s\"]\n\n        raw_path = kwargs.get(\"raw_path\", False)\n        if raw_path:\n            curl_command.append(\"--path-as-is\")\n\n        # respect global ssl verify settings\n        if self.ssl_verify is not True:\n            curl_command.append(\"-k\")\n\n        headers = kwargs.get(\"headers\", {})\n\n        ignore_bbot_global_settings = kwargs.get(\"ignore_bbot_global_settings\", False)\n\n        if ignore_bbot_global_settings:\n            log.debug(\"ignore_bbot_global_settings enabled. Global settings will not be applied\")\n        else:\n            http_timeout = self.parent_helper.web_config.get(\"http_timeout\", 20)\n            user_agent = self.parent_helper.web_config.get(\"user_agent\", \"BBOT\")\n\n            if \"User-Agent\" not in headers:\n                headers[\"User-Agent\"] = user_agent\n\n            # only add custom headers if the URL is in-scope\n            if self.parent_helper.preset.in_scope(url):\n                for hk, hv in self.web_config.get(\"http_headers\", {}).items():\n                    headers[hk] = hv\n\n            # add the timeout\n            if not \"timeout\" in kwargs:\n                timeout = http_timeout\n\n            curl_command.append(\"-m\")\n            curl_command.append(str(timeout))\n\n        for k, v in headers.items():\n            if isinstance(v, list):\n                for x in v:\n                    curl_command.append(\"-H\")\n                    curl_command.append(f\"{k}: {x}\")\n\n            else:\n                curl_command.append(\"-H\")\n                curl_command.append(f\"{k}: {v}\")\n\n        post_data = kwargs.get(\"post_data\", {})\n        if len(post_data.items()) &gt; 0:\n            curl_command.append(\"-d\")\n            post_data_str = \"\"\n            for k, v in post_data.items():\n                post_data_str += f\"&amp;{k}={v}\"\n            curl_command.append(post_data_str.lstrip(\"&amp;\"))\n\n        method = kwargs.get(\"method\", \"\")\n        if method:\n            curl_command.append(\"-X\")\n            curl_command.append(method)\n\n        cookies = kwargs.get(\"cookies\", \"\")\n        if cookies:\n            curl_command.append(\"-b\")\n            cookies_str = \"\"\n            for k, v in cookies.items():\n                cookies_str += f\"{k}={v}; \"\n            curl_command.append(f'{cookies_str.rstrip(\" \")}')\n\n        path_override = kwargs.get(\"path_override\", None)\n        if path_override:\n            curl_command.append(\"--request-target\")\n            curl_command.append(f\"{path_override}\")\n\n        head_mode = kwargs.get(\"head_mode\", None)\n        if head_mode:\n            curl_command.append(\"-I\")\n\n        raw_body = kwargs.get(\"raw_body\", None)\n        if raw_body:\n            curl_command.append(\"-d\")\n            curl_command.append(raw_body)\n\n        output = (await self.parent_helper.run(curl_command)).stdout\n        return output\n\n    def beautifulsoup(\n        self,\n        markup,\n        features=\"html.parser\",\n        builder=None,\n        parse_only=None,\n        from_encoding=None,\n        exclude_encodings=None,\n        element_classes=None,\n        **kwargs,\n    ):\n        \"\"\"\n        Naviate, Search, Modify, Parse, or PrettyPrint HTML Content.\n        More information at https://beautiful-soup-4.readthedocs.io/en/latest/\n\n        Args:\n            markup: A string or a file-like object representing markup to be parsed.\n            features: Desirable features of the parser to be used.\n                This may be the name of a specific parser (\"lxml\",\n                \"lxml-xml\", \"html.parser\", or \"html5lib\") or it may be\n                the type of markup to be used (\"html\", \"html5\", \"xml\").\n                Defaults to 'html.parser'.\n            builder: A TreeBuilder subclass to instantiate (or instance to use)\n                instead of looking one up based on `features`.\n            parse_only: A SoupStrainer. Only parts of the document\n                matching the SoupStrainer will be considered.\n            from_encoding: A string indicating the encoding of the\n                document to be parsed.\n            exclude_encodings = A list of strings indicating\n                encodings known to be wrong.\n            element_classes = A dictionary mapping BeautifulSoup\n                classes like Tag and NavigableString, to other classes you'd\n                like to be instantiated instead as the parse tree is\n                built.\n            **kwargs = For backwards compatibility purposes.\n\n        Returns:\n            soup: An instance of the BeautifulSoup class\n\n        Todo:\n            - Write tests for this function\n\n        Examples:\n            &gt;&gt;&gt; soup = self.helpers.beautifulsoup(event.data[\"body\"], \"html.parser\")\n            Perform an html parse of the 'markup' argument and return a soup instance\n\n            &gt;&gt;&gt; email_type = soup.find(type=\"email\")\n            Searches the soup instance for all occurances of the passed in argument\n        \"\"\"\n        try:\n            soup = BeautifulSoup(\n                markup, features, builder, parse_only, from_encoding, exclude_encodings, element_classes, **kwargs\n            )\n            return soup\n        except Exception as e:\n            log.debug(f\"Error parsing beautifulsoup: {e}\")\n            return False\n\n    def response_to_json(self, response):\n        \"\"\"\n        Convert web response to JSON object, similar to the output of `httpx -irr -json`\n        \"\"\"\n\n        if response is None:\n            return\n\n        import mmh3\n        from datetime import datetime\n        from hashlib import md5, sha256\n        from bbot.core.helpers.misc import tagify, urlparse, split_host_port, smart_decode\n\n        request = response.request\n        url = str(request.url)\n        parsed_url = urlparse(url)\n        netloc = parsed_url.netloc\n        scheme = parsed_url.scheme.lower()\n        host, port = split_host_port(f\"{scheme}://{netloc}\")\n\n        raw_headers = \"\\r\\n\".join([f\"{k}: {v}\" for k, v in response.headers.items()])\n        raw_headers_encoded = raw_headers.encode()\n\n        headers = {}\n        for k, v in response.headers.items():\n            k = tagify(k, delimiter=\"_\")\n            headers[k] = v\n\n        j = {\n            \"timestamp\": datetime.now().isoformat(),\n            \"hash\": {\n                \"body_md5\": md5(response.content).hexdigest(),\n                \"body_mmh3\": mmh3.hash(response.content),\n                \"body_sha256\": sha256(response.content).hexdigest(),\n                # \"body_simhash\": \"TODO\",\n                \"header_md5\": md5(raw_headers_encoded).hexdigest(),\n                \"header_mmh3\": mmh3.hash(raw_headers_encoded),\n                \"header_sha256\": sha256(raw_headers_encoded).hexdigest(),\n                # \"header_simhash\": \"TODO\",\n            },\n            \"header\": headers,\n            \"body\": smart_decode(response.content),\n            \"content_type\": headers.get(\"content_type\", \"\").split(\";\")[0].strip(),\n            \"url\": url,\n            \"host\": str(host),\n            \"port\": port,\n            \"scheme\": scheme,\n            \"method\": response.request.method,\n            \"path\": parsed_url.path,\n            \"raw_header\": raw_headers,\n            \"status_code\": response.status_code,\n        }\n\n        return j\n</code></pre>"},{"location":"dev/helpers/web/#bbot.core.helpers.web.WebHelper.ERROR_CLASS","title":"ERROR_CLASS  <code>class-attribute</code> <code>instance-attribute</code>","text":"<pre><code>ERROR_CLASS = WebError\n</code></pre> <p>Main utility class for managing HTTP operations in BBOT. It serves as a wrapper around the BBOTAsyncClient, which itself is a subclass of httpx.AsyncClient. The class provides functionalities to make HTTP requests, download files, and handle cached wordlists.</p> <p>Attributes:</p> <ul> <li> <code>parent_helper</code>               (<code>object</code>)           \u2013            <p>The parent helper object containing scan configurations.</p> </li> <li> <code>http_debug</code>               (<code>bool</code>)           \u2013            <p>Flag to indicate whether HTTP debugging is enabled.</p> </li> <li> <code>ssl_verify</code>               (<code>bool</code>)           \u2013            <p>Flag to indicate whether SSL verification is enabled.</p> </li> <li> <code>web_client</code>               (<code>BBOTAsyncClient</code>)           \u2013            <p>An instance of BBOTAsyncClient for making HTTP requests.</p> </li> <li> <code>client_only_options</code>               (<code>tuple</code>)           \u2013            <p>A tuple of options only applicable to the web client.</p> </li> </ul> <p>Examples:</p> <p>Basic web request:</p> <pre><code>&gt;&gt;&gt; response = await self.helpers.request(\"https://www.evilcorp.com\")\n</code></pre> <p>Download file:</p> <pre><code>&gt;&gt;&gt; filename = await self.helpers.download(\"https://www.evilcorp.com/passwords.docx\")\n</code></pre> <p>Download wordlist (cached for 30 days by default):</p> <pre><code>&gt;&gt;&gt; filename = await self.helpers.wordlist(\"https://www.evilcorp.com/wordlist.txt\")\n</code></pre>"},{"location":"dev/helpers/web/#bbot.core.helpers.web.WebHelper.beautifulsoup","title":"beautifulsoup","text":"<pre><code>beautifulsoup(markup, features='html.parser', builder=None, parse_only=None, from_encoding=None, exclude_encodings=None, element_classes=None, **kwargs)\n</code></pre> <p>Naviate, Search, Modify, Parse, or PrettyPrint HTML Content. More information at https://beautiful-soup-4.readthedocs.io/en/latest/</p> <p>Parameters:</p> <ul> <li> <code>markup</code>           \u2013            <p>A string or a file-like object representing markup to be parsed.</p> </li> <li> <code>features</code>           \u2013            <p>Desirable features of the parser to be used. This may be the name of a specific parser (\"lxml\", \"lxml-xml\", \"html.parser\", or \"html5lib\") or it may be the type of markup to be used (\"html\", \"html5\", \"xml\"). Defaults to 'html.parser'.</p> </li> <li> <code>builder</code>           \u2013            <p>A TreeBuilder subclass to instantiate (or instance to use) instead of looking one up based on <code>features</code>.</p> </li> <li> <code>parse_only</code>           \u2013            <p>A SoupStrainer. Only parts of the document matching the SoupStrainer will be considered.</p> </li> <li> <code>from_encoding</code>           \u2013            <p>A string indicating the encoding of the document to be parsed.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>soup</code>          \u2013            <p>An instance of the BeautifulSoup class</p> </li> </ul> Todo <ul> <li>Write tests for this function</li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; soup = self.helpers.beautifulsoup(event.data[\"body\"], \"html.parser\")\nPerform an html parse of the 'markup' argument and return a soup instance\n</code></pre> <pre><code>&gt;&gt;&gt; email_type = soup.find(type=\"email\")\nSearches the soup instance for all occurances of the passed in argument\n</code></pre> Source code in <code>bbot/core/helpers/web/web.py</code> <pre><code>def beautifulsoup(\n    self,\n    markup,\n    features=\"html.parser\",\n    builder=None,\n    parse_only=None,\n    from_encoding=None,\n    exclude_encodings=None,\n    element_classes=None,\n    **kwargs,\n):\n    \"\"\"\n    Naviate, Search, Modify, Parse, or PrettyPrint HTML Content.\n    More information at https://beautiful-soup-4.readthedocs.io/en/latest/\n\n    Args:\n        markup: A string or a file-like object representing markup to be parsed.\n        features: Desirable features of the parser to be used.\n            This may be the name of a specific parser (\"lxml\",\n            \"lxml-xml\", \"html.parser\", or \"html5lib\") or it may be\n            the type of markup to be used (\"html\", \"html5\", \"xml\").\n            Defaults to 'html.parser'.\n        builder: A TreeBuilder subclass to instantiate (or instance to use)\n            instead of looking one up based on `features`.\n        parse_only: A SoupStrainer. Only parts of the document\n            matching the SoupStrainer will be considered.\n        from_encoding: A string indicating the encoding of the\n            document to be parsed.\n        exclude_encodings = A list of strings indicating\n            encodings known to be wrong.\n        element_classes = A dictionary mapping BeautifulSoup\n            classes like Tag and NavigableString, to other classes you'd\n            like to be instantiated instead as the parse tree is\n            built.\n        **kwargs = For backwards compatibility purposes.\n\n    Returns:\n        soup: An instance of the BeautifulSoup class\n\n    Todo:\n        - Write tests for this function\n\n    Examples:\n        &gt;&gt;&gt; soup = self.helpers.beautifulsoup(event.data[\"body\"], \"html.parser\")\n        Perform an html parse of the 'markup' argument and return a soup instance\n\n        &gt;&gt;&gt; email_type = soup.find(type=\"email\")\n        Searches the soup instance for all occurances of the passed in argument\n    \"\"\"\n    try:\n        soup = BeautifulSoup(\n            markup, features, builder, parse_only, from_encoding, exclude_encodings, element_classes, **kwargs\n        )\n        return soup\n    except Exception as e:\n        log.debug(f\"Error parsing beautifulsoup: {e}\")\n        return False\n</code></pre>"},{"location":"dev/helpers/web/#bbot.core.helpers.web.WebHelper.curl","title":"curl  <code>async</code>","text":"<pre><code>curl(*args, **kwargs)\n</code></pre> <p>An asynchronous function that runs a cURL command with specified arguments and options.</p> <p>This function constructs and executes a cURL command based on the provided parameters. It offers support for various cURL options such as headers, post data, and cookies.</p> <p>Parameters:</p> <ul> <li> <code>*args</code>           \u2013            <p>Variable length argument list for positional arguments. Unused in this function.</p> </li> <li> <code>url</code>               (<code>str</code>)           \u2013            <p>The URL for the cURL request. Mandatory.</p> </li> <li> <code>raw_path</code>               (<code>bool</code>)           \u2013            <p>If True, activates '--path-as-is' in cURL. Defaults to False.</p> </li> <li> <code>headers</code>               (<code>dict</code>)           \u2013            <p>A dictionary of HTTP headers to include in the request.</p> </li> <li> <code>ignore_bbot_global_settings</code>               (<code>bool</code>)           \u2013            <p>If True, ignores the global settings of BBOT. Defaults to False.</p> </li> <li> <code>post_data</code>               (<code>dict</code>)           \u2013            <p>A dictionary containing data to be sent in the request body.</p> </li> <li> <code>method</code>               (<code>str</code>)           \u2013            <p>The HTTP method to use for the request (e.g., 'GET', 'POST').</p> </li> <li> <code>cookies</code>               (<code>dict</code>)           \u2013            <p>A dictionary of cookies to include in the request.</p> </li> <li> <code>path_override</code>               (<code>str</code>)           \u2013            <p>Overrides the request-target to use in the HTTP request line.</p> </li> <li> <code>head_mode</code>               (<code>bool</code>)           \u2013            <p>If True, includes '-I' to fetch headers only. Defaults to None.</p> </li> <li> <code>raw_body</code>               (<code>str</code>)           \u2013            <p>Raw string to be sent in the body of the request.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Arbitrary keyword arguments that will be forwarded to the HTTP request function.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>str</code>          \u2013            <p>The output of the cURL command.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>CurlError</code>             \u2013            <p>If 'url' is not supplied.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; output = await curl(url=\"https://example.com\", headers={\"X-Header\": \"Wat\"})\n&gt;&gt;&gt; print(output)\n</code></pre> Source code in <code>bbot/core/helpers/web/web.py</code> <pre><code>async def curl(self, *args, **kwargs):\n    \"\"\"\n    An asynchronous function that runs a cURL command with specified arguments and options.\n\n    This function constructs and executes a cURL command based on the provided parameters.\n    It offers support for various cURL options such as headers, post data, and cookies.\n\n    Args:\n        *args: Variable length argument list for positional arguments. Unused in this function.\n        url (str): The URL for the cURL request. Mandatory.\n        raw_path (bool, optional): If True, activates '--path-as-is' in cURL. Defaults to False.\n        headers (dict, optional): A dictionary of HTTP headers to include in the request.\n        ignore_bbot_global_settings (bool, optional): If True, ignores the global settings of BBOT. Defaults to False.\n        post_data (dict, optional): A dictionary containing data to be sent in the request body.\n        method (str, optional): The HTTP method to use for the request (e.g., 'GET', 'POST').\n        cookies (dict, optional): A dictionary of cookies to include in the request.\n        path_override (str, optional): Overrides the request-target to use in the HTTP request line.\n        head_mode (bool, optional): If True, includes '-I' to fetch headers only. Defaults to None.\n        raw_body (str, optional): Raw string to be sent in the body of the request.\n        **kwargs: Arbitrary keyword arguments that will be forwarded to the HTTP request function.\n\n    Returns:\n        str: The output of the cURL command.\n\n    Raises:\n        CurlError: If 'url' is not supplied.\n\n    Examples:\n        &gt;&gt;&gt; output = await curl(url=\"https://example.com\", headers={\"X-Header\": \"Wat\"})\n        &gt;&gt;&gt; print(output)\n    \"\"\"\n    url = kwargs.get(\"url\", \"\")\n\n    if not url:\n        raise CurlError(\"No URL supplied to CURL helper\")\n\n    curl_command = [\"curl\", url, \"-s\"]\n\n    raw_path = kwargs.get(\"raw_path\", False)\n    if raw_path:\n        curl_command.append(\"--path-as-is\")\n\n    # respect global ssl verify settings\n    if self.ssl_verify is not True:\n        curl_command.append(\"-k\")\n\n    headers = kwargs.get(\"headers\", {})\n\n    ignore_bbot_global_settings = kwargs.get(\"ignore_bbot_global_settings\", False)\n\n    if ignore_bbot_global_settings:\n        log.debug(\"ignore_bbot_global_settings enabled. Global settings will not be applied\")\n    else:\n        http_timeout = self.parent_helper.web_config.get(\"http_timeout\", 20)\n        user_agent = self.parent_helper.web_config.get(\"user_agent\", \"BBOT\")\n\n        if \"User-Agent\" not in headers:\n            headers[\"User-Agent\"] = user_agent\n\n        # only add custom headers if the URL is in-scope\n        if self.parent_helper.preset.in_scope(url):\n            for hk, hv in self.web_config.get(\"http_headers\", {}).items():\n                headers[hk] = hv\n\n        # add the timeout\n        if not \"timeout\" in kwargs:\n            timeout = http_timeout\n\n        curl_command.append(\"-m\")\n        curl_command.append(str(timeout))\n\n    for k, v in headers.items():\n        if isinstance(v, list):\n            for x in v:\n                curl_command.append(\"-H\")\n                curl_command.append(f\"{k}: {x}\")\n\n        else:\n            curl_command.append(\"-H\")\n            curl_command.append(f\"{k}: {v}\")\n\n    post_data = kwargs.get(\"post_data\", {})\n    if len(post_data.items()) &gt; 0:\n        curl_command.append(\"-d\")\n        post_data_str = \"\"\n        for k, v in post_data.items():\n            post_data_str += f\"&amp;{k}={v}\"\n        curl_command.append(post_data_str.lstrip(\"&amp;\"))\n\n    method = kwargs.get(\"method\", \"\")\n    if method:\n        curl_command.append(\"-X\")\n        curl_command.append(method)\n\n    cookies = kwargs.get(\"cookies\", \"\")\n    if cookies:\n        curl_command.append(\"-b\")\n        cookies_str = \"\"\n        for k, v in cookies.items():\n            cookies_str += f\"{k}={v}; \"\n        curl_command.append(f'{cookies_str.rstrip(\" \")}')\n\n    path_override = kwargs.get(\"path_override\", None)\n    if path_override:\n        curl_command.append(\"--request-target\")\n        curl_command.append(f\"{path_override}\")\n\n    head_mode = kwargs.get(\"head_mode\", None)\n    if head_mode:\n        curl_command.append(\"-I\")\n\n    raw_body = kwargs.get(\"raw_body\", None)\n    if raw_body:\n        curl_command.append(\"-d\")\n        curl_command.append(raw_body)\n\n    output = (await self.parent_helper.run(curl_command)).stdout\n    return output\n</code></pre>"},{"location":"dev/helpers/web/#bbot.core.helpers.web.WebHelper.download","title":"download  <code>async</code>","text":"<pre><code>download(url, **kwargs)\n</code></pre> <p>Asynchronous function for downloading files from a given URL. Supports caching with an optional time period in hours via the \"cache_hrs\" keyword argument. In case of successful download, returns the full path of the saved filename. If the download fails, returns None.</p> <p>Parameters:</p> <ul> <li> <code>url</code>               (<code>str</code>)           \u2013            <p>The URL of the file to download.</p> </li> <li> <code>filename</code>               (<code>str</code>)           \u2013            <p>The filename to save the downloaded file as. If not provided, will generate based on URL.</p> </li> <li> <code>max_size</code>               (<code>str or int</code>)           \u2013            <p>Maximum filesize as a string (\"5MB\") or integer in bytes.</p> </li> <li> <code>cache_hrs</code>               (<code>float</code>)           \u2013            <p>The number of hours to cache the downloaded file. A negative value disables caching. Defaults to -1.</p> </li> <li> <code>method</code>               (<code>str</code>)           \u2013            <p>The HTTP method to use for the request, defaults to 'GET'.</p> </li> <li> <code>raise_error</code>               (<code>bool</code>)           \u2013            <p>Whether to raise exceptions for HTTP connect, timeout errors. Defaults to False.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Additional keyword arguments to pass to the httpx request.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>Path or None: The full path of the downloaded file as a Path object if successful, otherwise None.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; filepath = await self.helpers.download(\"https://www.evilcorp.com/passwords.docx\", cache_hrs=24)\n</code></pre> Source code in <code>bbot/core/helpers/web/web.py</code> <pre><code>async def download(self, url, **kwargs):\n    \"\"\"\n    Asynchronous function for downloading files from a given URL. Supports caching with an optional\n    time period in hours via the \"cache_hrs\" keyword argument. In case of successful download,\n    returns the full path of the saved filename. If the download fails, returns None.\n\n    Args:\n        url (str): The URL of the file to download.\n        filename (str, optional): The filename to save the downloaded file as.\n            If not provided, will generate based on URL.\n        max_size (str or int): Maximum filesize as a string (\"5MB\") or integer in bytes.\n        cache_hrs (float, optional): The number of hours to cache the downloaded file.\n            A negative value disables caching. Defaults to -1.\n        method (str, optional): The HTTP method to use for the request, defaults to 'GET'.\n        raise_error (bool, optional): Whether to raise exceptions for HTTP connect, timeout errors. Defaults to False.\n        **kwargs: Additional keyword arguments to pass to the httpx request.\n\n    Returns:\n        Path or None: The full path of the downloaded file as a Path object if successful, otherwise None.\n\n    Examples:\n        &gt;&gt;&gt; filepath = await self.helpers.download(\"https://www.evilcorp.com/passwords.docx\", cache_hrs=24)\n    \"\"\"\n    success = False\n    raise_error = kwargs.get(\"raise_error\", False)\n    filename = kwargs.pop(\"filename\", self.parent_helper.cache_filename(url))\n    filename = truncate_filename(Path(filename).resolve())\n    kwargs[\"filename\"] = filename\n    max_size = kwargs.pop(\"max_size\", None)\n    if max_size is not None:\n        max_size = self.parent_helper.human_to_bytes(max_size)\n        kwargs[\"max_size\"] = max_size\n    cache_hrs = float(kwargs.pop(\"cache_hrs\", -1))\n    if cache_hrs &gt; 0 and self.parent_helper.is_cached(url):\n        log.debug(f\"{url} is cached at {self.parent_helper.cache_filename(url)}\")\n        success = True\n    else:\n        result = await self.run_and_return(\"download\", url, **kwargs)\n        if isinstance(result, dict) and \"_download_error\" in result:\n            if raise_error:\n                error_msg = result[\"_download_error\"]\n                response = result[\"_response\"]\n                error = self.ERROR_CLASS(error_msg)\n                error.response = response\n                raise error\n        elif result:\n            success = True\n\n    if success:\n        return filename\n</code></pre>"},{"location":"dev/helpers/web/#bbot.core.helpers.web.WebHelper.request","title":"request  <code>async</code>","text":"<pre><code>request(*args, **kwargs)\n</code></pre> <p>Asynchronous function for making HTTP requests, intended to be the most basic web request function used widely across BBOT and within this helper class. Handles various exceptions and timeouts that might occur during the request.</p> <p>This function automatically respects the scan's global timeout, proxy, headers, etc. Headers you specify will be merged with the scan's. Your arguments take ultimate precedence, meaning you can override the scan's values if you want.</p> <p>Parameters:</p> <ul> <li> <code>url</code>               (<code>str</code>)           \u2013            <p>The URL to send the request to.</p> </li> <li> <code>method</code>               (<code>str</code>)           \u2013            <p>The HTTP method to use for the request. Defaults to 'GET'.</p> </li> <li> <code>headers</code>               (<code>dict</code>)           \u2013            <p>Dictionary of HTTP headers to send with the request.</p> </li> <li> <code>params</code>               (<code>dict</code>)           \u2013            <p>Dictionary, list of tuples, or bytes to send in the query string.</p> </li> <li> <code>cookies</code>               (<code>dict</code>)           \u2013            <p>Dictionary or CookieJar object containing cookies.</p> </li> <li> <code>json</code>               (<code>Any</code>)           \u2013            <p>A JSON serializable Python object to send in the body.</p> </li> <li> <code>data</code>               (<code>dict</code>)           \u2013            <p>Dictionary, list of tuples, or bytes to send in the body.</p> </li> <li> <code>files</code>               (<code>dict</code>)           \u2013            <p>Dictionary of 'name': file-like-objects for multipart encoding upload.</p> </li> <li> <code>auth</code>               (<code>tuple</code>)           \u2013            <p>Auth tuple to enable Basic/Digest/Custom HTTP auth.</p> </li> <li> <code>timeout</code>               (<code>float</code>)           \u2013            <p>The maximum time to wait for the request to complete.</p> </li> <li> <code>proxies</code>               (<code>dict</code>)           \u2013            <p>Dictionary mapping protocol schemes to proxy URLs.</p> </li> <li> <code>allow_redirects</code>               (<code>bool</code>)           \u2013            <p>Enables or disables redirection. Defaults to None.</p> </li> <li> <code>stream</code>               (<code>bool</code>)           \u2013            <p>Enables or disables response streaming.</p> </li> <li> <code>raise_error</code>               (<code>bool</code>)           \u2013            <p>Whether to raise exceptions for HTTP connect, timeout errors. Defaults to False.</p> </li> <li> <code>client</code>               (<code>AsyncClient</code>)           \u2013            <p>A specific httpx.AsyncClient to use for the request. Defaults to self.web_client.</p> </li> <li> <code>cache_for</code>               (<code>int</code>)           \u2013            <p>Time in seconds to cache the request. Not used currently. Defaults to None.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>TimeoutException</code>             \u2013            <p>If the request times out.</p> </li> <li> <code>ConnectError</code>             \u2013            <p>If the connection fails.</p> </li> <li> <code>RequestError</code>             \u2013            <p>For other request-related errors.</p> </li> </ul> <p>Returns:</p> <ul> <li>           \u2013            <p>httpx.Response or None: The HTTP response object returned by the httpx library.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; response = await self.helpers.request(\"https://www.evilcorp.com\")\n</code></pre> <pre><code>&gt;&gt;&gt; response = await self.helpers.request(\"https://api.evilcorp.com/\", method=\"POST\", data=\"stuff\")\n</code></pre> Note <p>If the web request fails, it will return None unless <code>raise_error</code> is <code>True</code>.</p> Source code in <code>bbot/core/helpers/web/web.py</code> <pre><code>async def request(self, *args, **kwargs):\n    \"\"\"\n    Asynchronous function for making HTTP requests, intended to be the most basic web request function\n    used widely across BBOT and within this helper class. Handles various exceptions and timeouts\n    that might occur during the request.\n\n    This function automatically respects the scan's global timeout, proxy, headers, etc.\n    Headers you specify will be merged with the scan's. Your arguments take ultimate precedence,\n    meaning you can override the scan's values if you want.\n\n    Args:\n        url (str): The URL to send the request to.\n        method (str, optional): The HTTP method to use for the request. Defaults to 'GET'.\n        headers (dict, optional): Dictionary of HTTP headers to send with the request.\n        params (dict, optional): Dictionary, list of tuples, or bytes to send in the query string.\n        cookies (dict, optional): Dictionary or CookieJar object containing cookies.\n        json (Any, optional): A JSON serializable Python object to send in the body.\n        data (dict, optional): Dictionary, list of tuples, or bytes to send in the body.\n        files (dict, optional): Dictionary of 'name': file-like-objects for multipart encoding upload.\n        auth (tuple, optional): Auth tuple to enable Basic/Digest/Custom HTTP auth.\n        timeout (float, optional): The maximum time to wait for the request to complete.\n        proxies (dict, optional): Dictionary mapping protocol schemes to proxy URLs.\n        allow_redirects (bool, optional): Enables or disables redirection. Defaults to None.\n        stream (bool, optional): Enables or disables response streaming.\n        raise_error (bool, optional): Whether to raise exceptions for HTTP connect, timeout errors. Defaults to False.\n        client (httpx.AsyncClient, optional): A specific httpx.AsyncClient to use for the request. Defaults to self.web_client.\n        cache_for (int, optional): Time in seconds to cache the request. Not used currently. Defaults to None.\n\n    Raises:\n        httpx.TimeoutException: If the request times out.\n        httpx.ConnectError: If the connection fails.\n        httpx.RequestError: For other request-related errors.\n\n    Returns:\n        httpx.Response or None: The HTTP response object returned by the httpx library.\n\n    Examples:\n        &gt;&gt;&gt; response = await self.helpers.request(\"https://www.evilcorp.com\")\n\n        &gt;&gt;&gt; response = await self.helpers.request(\"https://api.evilcorp.com/\", method=\"POST\", data=\"stuff\")\n\n    Note:\n        If the web request fails, it will return None unless `raise_error` is `True`.\n    \"\"\"\n    raise_error = kwargs.get(\"raise_error\", False)\n    result = await self.run_and_return(\"request\", *args, **kwargs)\n    if isinstance(result, dict) and \"_request_error\" in result:\n        if raise_error:\n            error_msg = result[\"_request_error\"]\n            response = result[\"_response\"]\n            error = self.ERROR_CLASS(error_msg)\n            error.response = response\n            raise error\n    return result\n</code></pre>"},{"location":"dev/helpers/web/#bbot.core.helpers.web.WebHelper.request_batch","title":"request_batch  <code>async</code>","text":"<pre><code>request_batch(urls, *args, **kwargs)\n</code></pre> <p>Given a list of URLs, request them in parallel and yield responses as they come in.</p> <p>Parameters:</p> <ul> <li> <code>urls</code>               (<code>list[str]</code>)           \u2013            <p>List of URLs to visit</p> </li> <li> <code>*args</code>           \u2013            <p>Positional arguments to pass through to httpx</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Keyword arguments to pass through to httpx</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; async for url, response in self.helpers.request_batch(urls, headers={\"X-Test\": \"Test\"}):\n&gt;&gt;&gt;     if response is not None and response.status_code == 200:\n&gt;&gt;&gt;         self.hugesuccess(response)\n</code></pre> Source code in <code>bbot/core/helpers/web/web.py</code> <pre><code>async def request_batch(self, urls, *args, **kwargs):\n    \"\"\"\n    Given a list of URLs, request them in parallel and yield responses as they come in.\n\n    Args:\n        urls (list[str]): List of URLs to visit\n        *args: Positional arguments to pass through to httpx\n        **kwargs: Keyword arguments to pass through to httpx\n\n    Examples:\n        &gt;&gt;&gt; async for url, response in self.helpers.request_batch(urls, headers={\"X-Test\": \"Test\"}):\n        &gt;&gt;&gt;     if response is not None and response.status_code == 200:\n        &gt;&gt;&gt;         self.hugesuccess(response)\n    \"\"\"\n    agen = self.run_and_yield(\"request_batch\", urls, *args, **kwargs)\n    while 1:\n        try:\n            yield await agen.__anext__()\n        except (StopAsyncIteration, GeneratorExit):\n            await agen.aclose()\n            break\n</code></pre>"},{"location":"dev/helpers/web/#bbot.core.helpers.web.WebHelper.request_custom_batch","title":"request_custom_batch  <code>async</code>","text":"<pre><code>request_custom_batch(urls_and_kwargs)\n</code></pre> <p>Make web requests in parallel with custom options for each request. Yield responses as they come in.</p> <p>Similar to <code>request_batch</code> except it allows individual arguments for each URL.</p> <p>Parameters:</p> <ul> <li> <code>urls_and_kwargs</code>               (<code>list[tuple]</code>)           \u2013            <p>List of tuples in the format: (url, kwargs, custom_tracker) where custom_tracker is an optional value for your own internal use. You may use it to help correlate requests, etc.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; urls_and_kwargs = [\n&gt;&gt;&gt;     (\"http://evilcorp.com/1\", {\"method\": \"GET\"}, \"request-1\"),\n&gt;&gt;&gt;     (\"http://evilcorp.com/2\", {\"method\": \"POST\"}, \"request-2\"),\n&gt;&gt;&gt; ]\n&gt;&gt;&gt; async for url, kwargs, custom_tracker, response in self.helpers.request_custom_batch(\n&gt;&gt;&gt;     urls_and_kwargs\n&gt;&gt;&gt; ):\n&gt;&gt;&gt;     if response is not None and response.status_code == 200:\n&gt;&gt;&gt;         self.hugesuccess(response)\n</code></pre> Source code in <code>bbot/core/helpers/web/web.py</code> <pre><code>async def request_custom_batch(self, urls_and_kwargs):\n    \"\"\"\n    Make web requests in parallel with custom options for each request. Yield responses as they come in.\n\n    Similar to `request_batch` except it allows individual arguments for each URL.\n\n    Args:\n        urls_and_kwargs (list[tuple]): List of tuples in the format: (url, kwargs, custom_tracker)\n            where custom_tracker is an optional value for your own internal use. You may use it to\n            help correlate requests, etc.\n\n    Examples:\n        &gt;&gt;&gt; urls_and_kwargs = [\n        &gt;&gt;&gt;     (\"http://evilcorp.com/1\", {\"method\": \"GET\"}, \"request-1\"),\n        &gt;&gt;&gt;     (\"http://evilcorp.com/2\", {\"method\": \"POST\"}, \"request-2\"),\n        &gt;&gt;&gt; ]\n        &gt;&gt;&gt; async for url, kwargs, custom_tracker, response in self.helpers.request_custom_batch(\n        &gt;&gt;&gt;     urls_and_kwargs\n        &gt;&gt;&gt; ):\n        &gt;&gt;&gt;     if response is not None and response.status_code == 200:\n        &gt;&gt;&gt;         self.hugesuccess(response)\n    \"\"\"\n    agen = self.run_and_yield(\"request_custom_batch\", urls_and_kwargs)\n    while 1:\n        try:\n            yield await agen.__anext__()\n        except (StopAsyncIteration, GeneratorExit):\n            await agen.aclose()\n            break\n</code></pre>"},{"location":"dev/helpers/web/#bbot.core.helpers.web.WebHelper.response_to_json","title":"response_to_json","text":"<pre><code>response_to_json(response)\n</code></pre> <p>Convert web response to JSON object, similar to the output of <code>httpx -irr -json</code></p> Source code in <code>bbot/core/helpers/web/web.py</code> <pre><code>def response_to_json(self, response):\n    \"\"\"\n    Convert web response to JSON object, similar to the output of `httpx -irr -json`\n    \"\"\"\n\n    if response is None:\n        return\n\n    import mmh3\n    from datetime import datetime\n    from hashlib import md5, sha256\n    from bbot.core.helpers.misc import tagify, urlparse, split_host_port, smart_decode\n\n    request = response.request\n    url = str(request.url)\n    parsed_url = urlparse(url)\n    netloc = parsed_url.netloc\n    scheme = parsed_url.scheme.lower()\n    host, port = split_host_port(f\"{scheme}://{netloc}\")\n\n    raw_headers = \"\\r\\n\".join([f\"{k}: {v}\" for k, v in response.headers.items()])\n    raw_headers_encoded = raw_headers.encode()\n\n    headers = {}\n    for k, v in response.headers.items():\n        k = tagify(k, delimiter=\"_\")\n        headers[k] = v\n\n    j = {\n        \"timestamp\": datetime.now().isoformat(),\n        \"hash\": {\n            \"body_md5\": md5(response.content).hexdigest(),\n            \"body_mmh3\": mmh3.hash(response.content),\n            \"body_sha256\": sha256(response.content).hexdigest(),\n            # \"body_simhash\": \"TODO\",\n            \"header_md5\": md5(raw_headers_encoded).hexdigest(),\n            \"header_mmh3\": mmh3.hash(raw_headers_encoded),\n            \"header_sha256\": sha256(raw_headers_encoded).hexdigest(),\n            # \"header_simhash\": \"TODO\",\n        },\n        \"header\": headers,\n        \"body\": smart_decode(response.content),\n        \"content_type\": headers.get(\"content_type\", \"\").split(\";\")[0].strip(),\n        \"url\": url,\n        \"host\": str(host),\n        \"port\": port,\n        \"scheme\": scheme,\n        \"method\": response.request.method,\n        \"path\": parsed_url.path,\n        \"raw_header\": raw_headers,\n        \"status_code\": response.status_code,\n    }\n\n    return j\n</code></pre>"},{"location":"dev/helpers/web/#bbot.core.helpers.web.WebHelper.wordlist","title":"wordlist  <code>async</code>","text":"<pre><code>wordlist(path, lines=None, **kwargs)\n</code></pre> <p>Asynchronous function for retrieving wordlists, either from a local path or a URL. Allows for optional line-based truncation and caching. Returns the full path of the wordlist file or a truncated version of it.</p> <p>Parameters:</p> <ul> <li> <code>path</code>               (<code>str</code>)           \u2013            <p>The local or remote path of the wordlist.</p> </li> <li> <code>lines</code>               (<code>int</code>, default:                   <code>None</code> )           \u2013            <p>Number of lines to read from the wordlist. If specified, will return a truncated wordlist with this many lines.</p> </li> <li> <code>cache_hrs</code>               (<code>float</code>)           \u2013            <p>Number of hours to cache the downloaded wordlist. Defaults to 720 hours (30 days) for remote wordlists.</p> </li> <li> <code>**kwargs</code>           \u2013            <p>Additional keyword arguments to pass to the 'download' function for remote wordlists.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>Path</code>          \u2013            <p>The full path of the wordlist (or its truncated version) as a Path object.</p> </li> </ul> <p>Raises:</p> <ul> <li> <code>WordlistError</code>             \u2013            <p>If the path is invalid or the wordlist could not be retrieved or found.</p> </li> </ul> <p>Examples:</p> <p>Fetching full wordlist</p> <pre><code>&gt;&gt;&gt; wordlist_path = await self.helpers.wordlist(\"https://www.evilcorp.com/wordlist.txt\")\n</code></pre> <p>Fetching and truncating to the first 100 lines</p> <pre><code>&gt;&gt;&gt; wordlist_path = await self.helpers.wordlist(\"/root/rockyou.txt\", lines=100)\n</code></pre> Source code in <code>bbot/core/helpers/web/web.py</code> <pre><code>async def wordlist(self, path, lines=None, **kwargs):\n    \"\"\"\n    Asynchronous function for retrieving wordlists, either from a local path or a URL.\n    Allows for optional line-based truncation and caching. Returns the full path of the wordlist\n    file or a truncated version of it.\n\n    Args:\n        path (str): The local or remote path of the wordlist.\n        lines (int, optional): Number of lines to read from the wordlist.\n            If specified, will return a truncated wordlist with this many lines.\n        cache_hrs (float, optional): Number of hours to cache the downloaded wordlist.\n            Defaults to 720 hours (30 days) for remote wordlists.\n        **kwargs: Additional keyword arguments to pass to the 'download' function for remote wordlists.\n\n    Returns:\n        Path: The full path of the wordlist (or its truncated version) as a Path object.\n\n    Raises:\n        WordlistError: If the path is invalid or the wordlist could not be retrieved or found.\n\n    Examples:\n        Fetching full wordlist\n        &gt;&gt;&gt; wordlist_path = await self.helpers.wordlist(\"https://www.evilcorp.com/wordlist.txt\")\n\n        Fetching and truncating to the first 100 lines\n        &gt;&gt;&gt; wordlist_path = await self.helpers.wordlist(\"/root/rockyou.txt\", lines=100)\n    \"\"\"\n    if not path:\n        raise WordlistError(f\"Invalid wordlist: {path}\")\n    if not \"cache_hrs\" in kwargs:\n        kwargs[\"cache_hrs\"] = 720\n    if self.parent_helper.is_url(path):\n        filename = await self.download(str(path), **kwargs)\n        if filename is None:\n            raise WordlistError(f\"Unable to retrieve wordlist from {path}\")\n    else:\n        filename = Path(path).resolve()\n        if not filename.is_file():\n            raise WordlistError(f\"Unable to find wordlist at {path}\")\n\n    if lines is None:\n        return filename\n    else:\n        lines = int(lines)\n        with open(filename) as f:\n            read_lines = f.readlines()\n        cache_key = f\"{filename}:{lines}\"\n        truncated_filename = self.parent_helper.cache_filename(cache_key)\n        with open(truncated_filename, \"w\") as f:\n            for line in read_lines[:lines]:\n                f.write(line)\n        return truncated_filename\n</code></pre>"},{"location":"dev/helpers/wordcloud/","title":"Word Cloud","text":"<p>These are helpers related to BBOT's Word Cloud, a mechanism for storing target-specific keywords that are useful for custom wordlists, etc.</p> <p>Note that these helpers can be invoked directly from <code>self.helpers</code>, e.g.:</p> <pre><code>self.helpers.word_cloud\n</code></pre>"},{"location":"dev/helpers/wordcloud/#bbot.core.helpers.wordcloud.DNSMutator","title":"DNSMutator","text":"<p>               Bases: <code>Mutator</code></p> <p>DNS-specific mutator used by the <code>dnsbrute_mutations</code> module to generate target-specific subdomain mutations.</p> <p>This class extends the Mutator base class to add DNS-specific logic for generating subdomain mutations based on input words. It utilizes custom word extraction patterns and a wordninja model trained on DNS-specific data.</p> <p>Examples:</p> <pre><code>&gt;&gt;&gt; s = Scanner(\"www1.evilcorp.com\", \"www-test.evilcorp.com\")\n&gt;&gt;&gt; s.start_without_generator()\n&gt;&gt;&gt; s.helpers.word_cloud.dns_mutator.mutations(\"word\")\n[\n    \"word\",\n    \"word-test\",\n    \"word1\",\n    \"wordtest\",\n    \"www-word\",\n    \"wwwword\"\n]\n</code></pre> Source code in <code>bbot/core/helpers/wordcloud.py</code> <pre><code>class DNSMutator(Mutator):\n    \"\"\"\n    DNS-specific mutator used by the `dnsbrute_mutations` module to generate target-specific subdomain mutations.\n\n    This class extends the Mutator base class to add DNS-specific logic for generating\n    subdomain mutations based on input words. It utilizes custom word extraction patterns\n    and a wordninja model trained on DNS-specific data.\n\n    Examples:\n        &gt;&gt;&gt; s = Scanner(\"www1.evilcorp.com\", \"www-test.evilcorp.com\")\n        &gt;&gt;&gt; s.start_without_generator()\n        &gt;&gt;&gt; s.helpers.word_cloud.dns_mutator.mutations(\"word\")\n        [\n            \"word\",\n            \"word-test\",\n            \"word1\",\n            \"wordtest\",\n            \"www-word\",\n            \"wwwword\"\n        ]\n    \"\"\"\n\n    extract_word_regexes = [\n        re.compile(r, re.I)\n        for r in [\n            r\"[a-z]+\",\n            r\"[a-z_-]+\",\n            r\"[a-z0-9]+\",\n            r\"[a-z0-9_-]+\",\n        ]\n    ]\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        wordlist_dir = Path(__file__).parent.parent.parent / \"wordlists\"\n        wordninja_dns_wordlist = wordlist_dir / \"wordninja_dns.txt.gz\"\n        self.model = wordninja.LanguageModel(wordninja_dns_wordlist)\n\n    def mutations(self, words, max_mutations=None):\n        if isinstance(words, str):\n            words = [words]\n        new_words = set()\n        for word in words:\n            for e in extract_words(word, acronyms=False, model=self.model, word_regexes=self.extract_word_regexes):\n                new_words.add(e)\n        return super().mutations(new_words, max_mutations=max_mutations)\n\n    def add_word(self, word):\n        spans = set()\n        mutations = set()\n        for r in self.extract_word_regexes:\n            for match in r.finditer(word):\n                span = match.span()\n                if span not in spans:\n                    spans.add(span)\n        for start, end in spans:\n            match_str = word[start:end]\n            # skip digits\n            if match_str.isdigit():\n                continue\n            before = word[:start]\n            after = word[end:]\n            basic_mutation = (before, None, after)\n            mutations.add(basic_mutation)\n            match_str_split = self.model.split(match_str)\n            if len(match_str_split) &gt; 1:\n                for i, s in enumerate(match_str_split):\n                    if s.isdigit():\n                        continue\n                    split_before = \"\".join(match_str_split[:i])\n                    split_after = \"\".join(match_str_split[i + 1 :])\n                    wordninja_mutation = (before + split_before, None, split_after + after)\n                    mutations.add(wordninja_mutation)\n        for m in mutations:\n            self._add_mutation(m)\n</code></pre>"},{"location":"dev/helpers/wordcloud/#bbot.core.helpers.wordcloud.Mutator","title":"Mutator","text":"<p>               Bases: <code>dict</code></p> <p>Base class for generating mutations from a list of words. It accumulates words and produces mutations from them.</p> Source code in <code>bbot/core/helpers/wordcloud.py</code> <pre><code>class Mutator(dict):\n    \"\"\"\n    Base class for generating mutations from a list of words.\n    It accumulates words and produces mutations from them.\n    \"\"\"\n\n    def mutations(self, words, max_mutations=None):\n        mutations = self.top_mutations(max_mutations)\n        ret = set()\n        if isinstance(words, str):\n            words = [words]\n        for word in words:\n            for m in self.mutate(word, mutations=mutations):\n                ret.add(\"\".join(m))\n        return ret\n\n    def mutate(self, word, max_mutations=None, mutations=None):\n        if mutations is None:\n            mutations = self.top_mutations(max_mutations)\n        for mutation, count in mutations.items():\n            ret = []\n            for s in mutation:\n                if s is not None:\n                    ret.append(s)\n                else:\n                    ret.append(word)\n            yield ret\n\n    def top_mutations(self, n=None):\n        if n is not None:\n            return dict(sorted(self.items(), key=lambda x: x[-1], reverse=True)[:n])\n        else:\n            return dict(self)\n\n    def _add_mutation(self, mutation):\n        if None not in mutation:\n            return\n        mutation = tuple([m for m in mutation if m != \"\"])\n        try:\n            self[mutation] += 1\n        except KeyError:\n            self[mutation] = 1\n\n    def add_word(self, word):\n        pass\n</code></pre>"},{"location":"dev/helpers/wordcloud/#bbot.core.helpers.wordcloud.WordCloud","title":"WordCloud","text":"<p>               Bases: <code>dict</code></p> <p>WordCloud is a specialized dictionary-like class for storing and aggregating words extracted from various data sources such as DNS names and URLs. The class is intended to facilitate the generation of target-specific wordlists and mutations.</p> <p>The WordCloud class can be accessed and manipulated like a standard Python dictionary. It also offers additional methods for generating mutations based on the words it contains.</p> <p>Attributes:</p> <ul> <li> <code>parent_helper</code>           \u2013            <p>The parent helper object that provides necessary utilities.</p> </li> <li> <code>devops_mutations</code>           \u2013            <p>A set containing common devops-related mutations, loaded from a file.</p> </li> <li> <code>dns_mutator</code>           \u2013            <p>An instance of the DNSMutator class for generating DNS-based mutations.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; s = Scanner(\"www1.evilcorp.com\", \"www-test.evilcorp.com\")\n&gt;&gt;&gt; s.start_without_generator()\n&gt;&gt;&gt; print(s.helpers.word_cloud)\n{\n    \"evilcorp\": 2,\n    \"ec\": 2,\n    \"www1\": 1,\n    \"evil\": 2,\n    \"www\": 2,\n    \"w1\": 1,\n    \"corp\": 2,\n    \"1\": 1,\n    \"wt\": 1,\n    \"test\": 1,\n    \"www-test\": 1\n}\n</code></pre> <pre><code>&gt;&gt;&gt; s.helpers.word_cloud.mutations([\"word\"], cloud=True, numbers=0, devops=False, letters=False)\n[\n    [\n        \"1\",\n        \"word\"\n    ],\n    [\n        \"corp\",\n        \"word\"\n    ],\n    [\n        \"ec\",\n        \"word\"\n    ],\n    [\n        \"evil\",\n        \"word\"\n    ],\n    ...\n]\n</code></pre> <pre><code>&gt;&gt;&gt; s.helpers.word_cloud.dns_mutator.mutations(\"word\")\n[\n    \"word\",\n    \"word-test\",\n    \"word1\",\n    \"wordtest\",\n    \"www-word\",\n    \"wwwword\"\n]\n</code></pre> Source code in <code>bbot/core/helpers/wordcloud.py</code> <pre><code>class WordCloud(dict):\n    \"\"\"\n    WordCloud is a specialized dictionary-like class for storing and aggregating\n    words extracted from various data sources such as DNS names and URLs. The class\n    is intended to facilitate the generation of target-specific wordlists and mutations.\n\n    The WordCloud class can be accessed and manipulated like a standard Python dictionary.\n    It also offers additional methods for generating mutations based on the words it contains.\n\n    Attributes:\n        parent_helper: The parent helper object that provides necessary utilities.\n        devops_mutations: A set containing common devops-related mutations, loaded from a file.\n        dns_mutator: An instance of the DNSMutator class for generating DNS-based mutations.\n\n    Examples:\n        &gt;&gt;&gt; s = Scanner(\"www1.evilcorp.com\", \"www-test.evilcorp.com\")\n        &gt;&gt;&gt; s.start_without_generator()\n        &gt;&gt;&gt; print(s.helpers.word_cloud)\n        {\n            \"evilcorp\": 2,\n            \"ec\": 2,\n            \"www1\": 1,\n            \"evil\": 2,\n            \"www\": 2,\n            \"w1\": 1,\n            \"corp\": 2,\n            \"1\": 1,\n            \"wt\": 1,\n            \"test\": 1,\n            \"www-test\": 1\n        }\n\n        &gt;&gt;&gt; s.helpers.word_cloud.mutations([\"word\"], cloud=True, numbers=0, devops=False, letters=False)\n        [\n            [\n                \"1\",\n                \"word\"\n            ],\n            [\n                \"corp\",\n                \"word\"\n            ],\n            [\n                \"ec\",\n                \"word\"\n            ],\n            [\n                \"evil\",\n                \"word\"\n            ],\n            ...\n        ]\n\n        &gt;&gt;&gt; s.helpers.word_cloud.dns_mutator.mutations(\"word\")\n        [\n            \"word\",\n            \"word-test\",\n            \"word1\",\n            \"wordtest\",\n            \"www-word\",\n            \"wwwword\"\n        ]\n    \"\"\"\n\n    def __init__(self, parent_helper, *args, **kwargs):\n        self.parent_helper = parent_helper\n\n        devops_filename = self.parent_helper.wordlist_dir / \"devops_mutations.txt\"\n        self.devops_mutations = set(self.parent_helper.read_file(devops_filename))\n\n        self.dns_mutator = DNSMutator()\n\n        super().__init__(*args, **kwargs)\n\n    def mutations(\n        self, words, devops=True, cloud=True, letters=True, numbers=5, number_padding=2, substitute_numbers=True\n    ):\n        \"\"\"\n        Generate various mutations for the given list of words based on different criteria.\n\n        Yields tuples of strings which can be joined on the desired delimiter, e.g. \"-\" or \"_\".\n\n        Args:\n            words (Union[str, Iterable[str]]): A single word or list of words to mutate.\n            devops (bool): Whether to include devops-related mutations.\n            cloud (bool): Whether to include mutations from the word cloud.\n            letters (bool): Whether to include letter-based mutations.\n            numbers (int): The maximum numeric mutations to include.\n            number_padding (int): Padding for numeric mutations.\n            substitute_numbers (bool): Whether to substitute numbers in mutations.\n\n        Yields:\n            tuple: A tuple containing each of the mutation segments.\n        \"\"\"\n        if isinstance(words, str):\n            words = (words,)\n        results = set()\n        for word in words:\n            h = hash(word)\n            if not h in results:\n                results.add(h)\n                yield (word,)\n        if numbers &gt; 0:\n            if substitute_numbers:\n                for word in words:\n                    for number_mutation in self.get_number_mutations(word, n=numbers, padding=number_padding):\n                        h = hash(number_mutation)\n                        if not h in results:\n                            results.add(h)\n                            yield (number_mutation,)\n        for word in words:\n            for modifier in self.modifiers(\n                devops=devops, cloud=cloud, letters=letters, numbers=numbers, number_padding=number_padding\n            ):\n                a = (word, modifier)\n                b = (modifier, word)\n                for _ in (a, b):\n                    h = hash(_)\n                    if h not in results:\n                        results.add(h)\n                        yield _\n\n    def modifiers(self, devops=True, cloud=True, letters=True, numbers=5, number_padding=2):\n        modifiers = set()\n        if devops:\n            modifiers.update(self.devops_mutations)\n        if cloud:\n            modifiers.update(set(self))\n        if letters:\n            modifiers.update(set(string.ascii_lowercase))\n        if numbers &gt; 0:\n            modifiers.update(self.parent_helper.gen_numbers(numbers, number_padding))\n        return modifiers\n\n    def absorb_event(self, event):\n        \"\"\"\n        Absorbs an event from a BBOT scan into the word cloud.\n\n        This method updates the word cloud by extracting words from the given event. It aims to avoid including PTR\n        (Pointer) records, as they tend to produce unhelpful mutations in the word cloud.\n\n        Args:\n            event (Event): The event object containing the words to be absorbed into the word cloud.\n        \"\"\"\n        for word in event.words:\n            self.add_word(word)\n        if event.scope_distance == 0 and event.type.startswith(\"DNS_NAME\"):\n            subdomain = tldextract(event.data).subdomain\n            if subdomain and not self.parent_helper.is_ptr(subdomain):\n                for s in subdomain.split(\".\"):\n                    self.dns_mutator.add_word(s)\n\n    def absorb_word(self, word, wordninja=True):\n        \"\"\"\n        Absorbs a word into the word cloud after splitting it using a word extraction algorithm.\n\n        This method splits the input word into smaller meaningful words using word extraction, and then adds each\n        of them to the word cloud. The splitting is done using a predefined algorithm in the parent helper.\n\n        Args:\n            word (str): The word to be split and absorbed into the word cloud.\n            wordninja (bool, optional): If True, word extraction is enabled. Defaults to True.\n\n        Examples:\n            &gt;&gt;&gt; self.helpers.word_cloud.absorb_word(\"blacklantern\")\n            &gt;&gt;&gt; print(self.helpers.word_cloud)\n            {\n                \"blacklantern\": 1,\n                \"black\": 1,\n                \"bl\": 1,\n                \"lantern\": 1\n            }\n        \"\"\"\n        for w in self.parent_helper.extract_words(word, wordninja=wordninja):\n            self.add_word(w)\n\n    def add_word(self, word, lowercase=True):\n        \"\"\"\n        Adds a word to the word cloud.\n\n        This method updates the word cloud by adding a given word. If the word already exists in the cloud,\n        its frequency count is incremented by 1. Optionally, the word can be converted to lowercase before adding.\n\n        Args:\n            word (str): The word to be added to the word cloud.\n            lowercase (bool, optional): If True, the word will be converted to lowercase before adding. Defaults to True.\n\n        Examples:\n            &gt;&gt;&gt; self.helpers.word_cloud.add_word(\"Example\")\n            &gt;&gt;&gt; self.helpers.word_cloud.add_word(\"example\")\n            &gt;&gt;&gt; print(self.helpers.word_cloud)\n            {'example': 2}\n        \"\"\"\n        if lowercase:\n            word = word.lower()\n        try:\n            self[word] += 1\n        except KeyError:\n            self[word] = 1\n\n    def get_number_mutations(self, base, n=5, padding=2):\n        \"\"\"\n        Generates mutations of a base string by modifying the numerical parts or appending numbers.\n\n        This method detects existing numbers in the base string and tries incrementing and decrementing them within a\n        specified range. It also appends numbers at the end or after each word to generate more mutations.\n\n        Args:\n            base (str): The base string to generate mutations from.\n            n (int, optional): The range of numbers to use for incrementing/decrementing. Defaults to 5.\n            padding (int, optional): Zero-pad numbers up to this length. Defaults to 2.\n\n        Returns:\n            set: A set of mutated strings based on the base input.\n\n        Examples:\n            &gt;&gt;&gt; self.helpers.word_cloud.get_number_mutations(\"www2-test\", n=2)\n            {\n                \"www0-test\",\n                \"www1-test\",\n                \"www2-test\",\n                \"www2-test0\",\n                \"www2-test00\",\n                \"www2-test01\",\n                \"www2-test1\",\n                \"www3-test\",\n                \"www4-test\"\n            }\n        \"\"\"\n        results = set()\n\n        # detects numbers and increments/decrements them\n        # e.g. for \"base2_p013\", we would try:\n        # - \"base0_p013\" through \"base12_p013\"\n        # - \"base2_p003\" through \"base2_p023\"\n        # limited to three iterations for sanity's sake\n        for match in list(self.parent_helper.regexes.num_regex.finditer(base))[-3:]:\n            span = match.span()\n            before = base[: span[0]]\n            after = base[span[-1] :]\n            number = base[span[0] : span[-1]]\n            numlen = len(number)\n            maxnum = min(int(\"9\" * numlen), int(number) + n)\n            minnum = max(0, int(number) - n)\n            for i in range(minnum, maxnum + 1):\n                filled_num = str(i).zfill(numlen)\n                results.add(f\"{before}{filled_num}{after}\")\n                if not number.startswith(\"0\"):\n                    results.add(f\"{before}{i}{after}\")\n\n        # appends numbers after each word\n        # e.g., for \"base_www\", we would try:\n        # - \"base1_www\", \"base2_www\", etc.\n        # - \"base_www1\", \"base_www2\", etc.\n        # limited to three iterations for sanity's sake\n        number_suffixes = self.parent_helper.gen_numbers(n, padding)\n        for match in list(self.parent_helper.regexes.word_regex.finditer(base))[-3:]:\n            span = match.span()\n            for suffix in number_suffixes:\n                before = base[: span[-1]]\n                after = base[span[-1] :]\n                # skip if there's already a number\n                if len(after) &gt; 1 and not after[0].isdigit():\n                    results.add(f\"{before}{suffix}{after}\")\n        # basic cases so we don't miss anything\n        for s in number_suffixes:\n            results.add(f\"{base}{s}\")\n            results.add(base)\n\n        return results\n\n    def truncate(self, limit):\n        \"\"\"\n        Truncates the word cloud dictionary to retain only the top `limit` entries based on their occurrence frequencies.\n\n        Args:\n            limit (int): The maximum number of entries to retain in the word cloud.\n\n        Examples:\n            &gt;&gt;&gt; self.helpers.word_cloud.update({\"apple\": 5, \"banana\": 2, \"cherry\": 8})\n            &gt;&gt;&gt; self.helpers.word_cloud.truncate(2)\n            &gt;&gt;&gt; self.helpers.word_cloud\n            {'cherry': 8, 'apple': 5}\n        \"\"\"\n        new_self = dict(self.json(limit=limit))\n        self.clear()\n        self.update(new_self)\n\n    def json(self, limit=None):\n        \"\"\"\n        Returns the word cloud as a sorted OrderedDict, optionally truncated to the top `limit` entries.\n\n        Args:\n            limit (int, optional): The maximum number of entries to include in the returned OrderedDict. If None, all entries are included.\n\n        Returns:\n            OrderedDict: A dictionary sorted by word frequencies, potentially truncated to the top `limit` entries.\n\n        Examples:\n            &gt;&gt;&gt; self.helpers.word_cloud.update({\"apple\": 5, \"banana\": 2, \"cherry\": 8})\n            &gt;&gt;&gt; self.helpers.word_cloud.json(limit=2)\n            OrderedDict([('cherry', 8), ('apple', 5)])\n        \"\"\"\n        cloud_sorted = sorted(self.items(), key=lambda x: x[-1], reverse=True)\n        if limit is not None:\n            cloud_sorted = cloud_sorted[:limit]\n        return OrderedDict(cloud_sorted)\n\n    @property\n    def default_filename(self):\n        return self.parent_helper.preset.scan.home / f\"wordcloud.tsv\"\n\n    def save(self, filename=None, limit=None):\n        \"\"\"\n        Saves the word cloud to a file. The cloud can optionally be truncated to the top `limit` entries.\n\n        Args:\n            filename (str, optional): The path to the file where the word cloud will be saved. If None, uses a default filename.\n            limit (int, optional): The maximum number of entries to save to the file. If None, all entries are saved.\n\n        Returns:\n            tuple: A tuple containing a boolean indicating success or failure, and the resolved filename.\n\n        Examples:\n            &gt;&gt;&gt; self.helpers.word_cloud.update({\"apple\": 5, \"banana\": 2, \"cherry\": 8})\n            &gt;&gt;&gt; self.helpers.word_cloud.save(filename=\"word_cloud.txt\", limit=2)\n            (True, Path('word_cloud.txt'))\n        \"\"\"\n        if filename is None:\n            filename = self.default_filename\n        else:\n            filename = Path(filename).resolve()\n        try:\n            if not self.parent_helper.mkdir(filename.parent):\n                log.error(f\"Failure creating or error writing to {filename.parent} when saving word cloud\")\n                return\n            if len(self) &gt; 0:\n                log.debug(f\"Saving word cloud to {filename}\")\n                with open(str(filename), mode=\"w\", newline=\"\") as f:\n                    c = csv.writer(f, delimiter=\"\\t\")\n                    for word, count in self.json(limit).items():\n                        c.writerow([count, word])\n                log.debug(f\"Saved word cloud ({len(self):,} words) to {filename}\")\n                return True, filename\n            else:\n                log.debug(f\"No words to save\")\n        except Exception as e:\n            import traceback\n\n            log.warning(f\"Failed to save word cloud to {filename}: {e}\")\n            log.trace(traceback.format_exc())\n        return False, filename\n\n    def load(self, filename=None):\n        \"\"\"\n        Loads a word cloud from a file. The file can be either a standard wordlist with one entry per line\n        or a .tsv (tab-separated) file where the first row is the count and the second row is the associated entry.\n\n        Args:\n            filename (str, optional): The path to the file from which to load the word cloud. If None, uses a default filename.\n        \"\"\"\n        if filename is None:\n            wordcloud_path = self.default_filename\n        else:\n            wordcloud_path = Path(filename).resolve()\n        log.verbose(f\"Loading word cloud from {wordcloud_path}\")\n        try:\n            with open(str(wordcloud_path), newline=\"\") as f:\n                c = csv.reader(f, delimiter=\"\\t\")\n                for row in c:\n                    if len(row) == 1:\n                        self.add_word(row[0])\n                    elif len(row) == 2:\n                        with suppress(Exception):\n                            count, word = row\n                            count = int(count)\n                            self[word] = count\n            if len(self) &gt; 0:\n                log.success(f\"Loaded word cloud ({len(self):,} words) from {wordcloud_path}\")\n        except Exception as e:\n            import traceback\n\n            log_fn = log.debug\n            if filename is not None:\n                log_fn = log.warning\n            log_fn(f\"Failed to load word cloud from {wordcloud_path}: {e}\")\n            if filename is not None:\n                log.trace(traceback.format_exc())\n</code></pre>"},{"location":"dev/helpers/wordcloud/#bbot.core.helpers.wordcloud.WordCloud.absorb_event","title":"absorb_event","text":"<pre><code>absorb_event(event)\n</code></pre> <p>Absorbs an event from a BBOT scan into the word cloud.</p> <p>This method updates the word cloud by extracting words from the given event. It aims to avoid including PTR (Pointer) records, as they tend to produce unhelpful mutations in the word cloud.</p> <p>Parameters:</p> <ul> <li> <code>event</code>               (<code>Event</code>)           \u2013            <p>The event object containing the words to be absorbed into the word cloud.</p> </li> </ul> Source code in <code>bbot/core/helpers/wordcloud.py</code> <pre><code>def absorb_event(self, event):\n    \"\"\"\n    Absorbs an event from a BBOT scan into the word cloud.\n\n    This method updates the word cloud by extracting words from the given event. It aims to avoid including PTR\n    (Pointer) records, as they tend to produce unhelpful mutations in the word cloud.\n\n    Args:\n        event (Event): The event object containing the words to be absorbed into the word cloud.\n    \"\"\"\n    for word in event.words:\n        self.add_word(word)\n    if event.scope_distance == 0 and event.type.startswith(\"DNS_NAME\"):\n        subdomain = tldextract(event.data).subdomain\n        if subdomain and not self.parent_helper.is_ptr(subdomain):\n            for s in subdomain.split(\".\"):\n                self.dns_mutator.add_word(s)\n</code></pre>"},{"location":"dev/helpers/wordcloud/#bbot.core.helpers.wordcloud.WordCloud.absorb_word","title":"absorb_word","text":"<pre><code>absorb_word(word, wordninja=True)\n</code></pre> <p>Absorbs a word into the word cloud after splitting it using a word extraction algorithm.</p> <p>This method splits the input word into smaller meaningful words using word extraction, and then adds each of them to the word cloud. The splitting is done using a predefined algorithm in the parent helper.</p> <p>Parameters:</p> <ul> <li> <code>word</code>               (<code>str</code>)           \u2013            <p>The word to be split and absorbed into the word cloud.</p> </li> <li> <code>wordninja</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>If True, word extraction is enabled. Defaults to True.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.helpers.word_cloud.absorb_word(\"blacklantern\")\n&gt;&gt;&gt; print(self.helpers.word_cloud)\n{\n    \"blacklantern\": 1,\n    \"black\": 1,\n    \"bl\": 1,\n    \"lantern\": 1\n}\n</code></pre> Source code in <code>bbot/core/helpers/wordcloud.py</code> <pre><code>def absorb_word(self, word, wordninja=True):\n    \"\"\"\n    Absorbs a word into the word cloud after splitting it using a word extraction algorithm.\n\n    This method splits the input word into smaller meaningful words using word extraction, and then adds each\n    of them to the word cloud. The splitting is done using a predefined algorithm in the parent helper.\n\n    Args:\n        word (str): The word to be split and absorbed into the word cloud.\n        wordninja (bool, optional): If True, word extraction is enabled. Defaults to True.\n\n    Examples:\n        &gt;&gt;&gt; self.helpers.word_cloud.absorb_word(\"blacklantern\")\n        &gt;&gt;&gt; print(self.helpers.word_cloud)\n        {\n            \"blacklantern\": 1,\n            \"black\": 1,\n            \"bl\": 1,\n            \"lantern\": 1\n        }\n    \"\"\"\n    for w in self.parent_helper.extract_words(word, wordninja=wordninja):\n        self.add_word(w)\n</code></pre>"},{"location":"dev/helpers/wordcloud/#bbot.core.helpers.wordcloud.WordCloud.add_word","title":"add_word","text":"<pre><code>add_word(word, lowercase=True)\n</code></pre> <p>Adds a word to the word cloud.</p> <p>This method updates the word cloud by adding a given word. If the word already exists in the cloud, its frequency count is incremented by 1. Optionally, the word can be converted to lowercase before adding.</p> <p>Parameters:</p> <ul> <li> <code>word</code>               (<code>str</code>)           \u2013            <p>The word to be added to the word cloud.</p> </li> <li> <code>lowercase</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>If True, the word will be converted to lowercase before adding. Defaults to True.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.helpers.word_cloud.add_word(\"Example\")\n&gt;&gt;&gt; self.helpers.word_cloud.add_word(\"example\")\n&gt;&gt;&gt; print(self.helpers.word_cloud)\n{'example': 2}\n</code></pre> Source code in <code>bbot/core/helpers/wordcloud.py</code> <pre><code>def add_word(self, word, lowercase=True):\n    \"\"\"\n    Adds a word to the word cloud.\n\n    This method updates the word cloud by adding a given word. If the word already exists in the cloud,\n    its frequency count is incremented by 1. Optionally, the word can be converted to lowercase before adding.\n\n    Args:\n        word (str): The word to be added to the word cloud.\n        lowercase (bool, optional): If True, the word will be converted to lowercase before adding. Defaults to True.\n\n    Examples:\n        &gt;&gt;&gt; self.helpers.word_cloud.add_word(\"Example\")\n        &gt;&gt;&gt; self.helpers.word_cloud.add_word(\"example\")\n        &gt;&gt;&gt; print(self.helpers.word_cloud)\n        {'example': 2}\n    \"\"\"\n    if lowercase:\n        word = word.lower()\n    try:\n        self[word] += 1\n    except KeyError:\n        self[word] = 1\n</code></pre>"},{"location":"dev/helpers/wordcloud/#bbot.core.helpers.wordcloud.WordCloud.get_number_mutations","title":"get_number_mutations","text":"<pre><code>get_number_mutations(base, n=5, padding=2)\n</code></pre> <p>Generates mutations of a base string by modifying the numerical parts or appending numbers.</p> <p>This method detects existing numbers in the base string and tries incrementing and decrementing them within a specified range. It also appends numbers at the end or after each word to generate more mutations.</p> <p>Parameters:</p> <ul> <li> <code>base</code>               (<code>str</code>)           \u2013            <p>The base string to generate mutations from.</p> </li> <li> <code>n</code>               (<code>int</code>, default:                   <code>5</code> )           \u2013            <p>The range of numbers to use for incrementing/decrementing. Defaults to 5.</p> </li> <li> <code>padding</code>               (<code>int</code>, default:                   <code>2</code> )           \u2013            <p>Zero-pad numbers up to this length. Defaults to 2.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>set</code>          \u2013            <p>A set of mutated strings based on the base input.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.helpers.word_cloud.get_number_mutations(\"www2-test\", n=2)\n{\n    \"www0-test\",\n    \"www1-test\",\n    \"www2-test\",\n    \"www2-test0\",\n    \"www2-test00\",\n    \"www2-test01\",\n    \"www2-test1\",\n    \"www3-test\",\n    \"www4-test\"\n}\n</code></pre> Source code in <code>bbot/core/helpers/wordcloud.py</code> <pre><code>def get_number_mutations(self, base, n=5, padding=2):\n    \"\"\"\n    Generates mutations of a base string by modifying the numerical parts or appending numbers.\n\n    This method detects existing numbers in the base string and tries incrementing and decrementing them within a\n    specified range. It also appends numbers at the end or after each word to generate more mutations.\n\n    Args:\n        base (str): The base string to generate mutations from.\n        n (int, optional): The range of numbers to use for incrementing/decrementing. Defaults to 5.\n        padding (int, optional): Zero-pad numbers up to this length. Defaults to 2.\n\n    Returns:\n        set: A set of mutated strings based on the base input.\n\n    Examples:\n        &gt;&gt;&gt; self.helpers.word_cloud.get_number_mutations(\"www2-test\", n=2)\n        {\n            \"www0-test\",\n            \"www1-test\",\n            \"www2-test\",\n            \"www2-test0\",\n            \"www2-test00\",\n            \"www2-test01\",\n            \"www2-test1\",\n            \"www3-test\",\n            \"www4-test\"\n        }\n    \"\"\"\n    results = set()\n\n    # detects numbers and increments/decrements them\n    # e.g. for \"base2_p013\", we would try:\n    # - \"base0_p013\" through \"base12_p013\"\n    # - \"base2_p003\" through \"base2_p023\"\n    # limited to three iterations for sanity's sake\n    for match in list(self.parent_helper.regexes.num_regex.finditer(base))[-3:]:\n        span = match.span()\n        before = base[: span[0]]\n        after = base[span[-1] :]\n        number = base[span[0] : span[-1]]\n        numlen = len(number)\n        maxnum = min(int(\"9\" * numlen), int(number) + n)\n        minnum = max(0, int(number) - n)\n        for i in range(minnum, maxnum + 1):\n            filled_num = str(i).zfill(numlen)\n            results.add(f\"{before}{filled_num}{after}\")\n            if not number.startswith(\"0\"):\n                results.add(f\"{before}{i}{after}\")\n\n    # appends numbers after each word\n    # e.g., for \"base_www\", we would try:\n    # - \"base1_www\", \"base2_www\", etc.\n    # - \"base_www1\", \"base_www2\", etc.\n    # limited to three iterations for sanity's sake\n    number_suffixes = self.parent_helper.gen_numbers(n, padding)\n    for match in list(self.parent_helper.regexes.word_regex.finditer(base))[-3:]:\n        span = match.span()\n        for suffix in number_suffixes:\n            before = base[: span[-1]]\n            after = base[span[-1] :]\n            # skip if there's already a number\n            if len(after) &gt; 1 and not after[0].isdigit():\n                results.add(f\"{before}{suffix}{after}\")\n    # basic cases so we don't miss anything\n    for s in number_suffixes:\n        results.add(f\"{base}{s}\")\n        results.add(base)\n\n    return results\n</code></pre>"},{"location":"dev/helpers/wordcloud/#bbot.core.helpers.wordcloud.WordCloud.json","title":"json","text":"<pre><code>json(limit=None)\n</code></pre> <p>Returns the word cloud as a sorted OrderedDict, optionally truncated to the top <code>limit</code> entries.</p> <p>Parameters:</p> <ul> <li> <code>limit</code>               (<code>int</code>, default:                   <code>None</code> )           \u2013            <p>The maximum number of entries to include in the returned OrderedDict. If None, all entries are included.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>OrderedDict</code>          \u2013            <p>A dictionary sorted by word frequencies, potentially truncated to the top <code>limit</code> entries.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.helpers.word_cloud.update({\"apple\": 5, \"banana\": 2, \"cherry\": 8})\n&gt;&gt;&gt; self.helpers.word_cloud.json(limit=2)\nOrderedDict([('cherry', 8), ('apple', 5)])\n</code></pre> Source code in <code>bbot/core/helpers/wordcloud.py</code> <pre><code>def json(self, limit=None):\n    \"\"\"\n    Returns the word cloud as a sorted OrderedDict, optionally truncated to the top `limit` entries.\n\n    Args:\n        limit (int, optional): The maximum number of entries to include in the returned OrderedDict. If None, all entries are included.\n\n    Returns:\n        OrderedDict: A dictionary sorted by word frequencies, potentially truncated to the top `limit` entries.\n\n    Examples:\n        &gt;&gt;&gt; self.helpers.word_cloud.update({\"apple\": 5, \"banana\": 2, \"cherry\": 8})\n        &gt;&gt;&gt; self.helpers.word_cloud.json(limit=2)\n        OrderedDict([('cherry', 8), ('apple', 5)])\n    \"\"\"\n    cloud_sorted = sorted(self.items(), key=lambda x: x[-1], reverse=True)\n    if limit is not None:\n        cloud_sorted = cloud_sorted[:limit]\n    return OrderedDict(cloud_sorted)\n</code></pre>"},{"location":"dev/helpers/wordcloud/#bbot.core.helpers.wordcloud.WordCloud.load","title":"load","text":"<pre><code>load(filename=None)\n</code></pre> <p>Loads a word cloud from a file. The file can be either a standard wordlist with one entry per line or a .tsv (tab-separated) file where the first row is the count and the second row is the associated entry.</p> <p>Parameters:</p> <ul> <li> <code>filename</code>               (<code>str</code>, default:                   <code>None</code> )           \u2013            <p>The path to the file from which to load the word cloud. If None, uses a default filename.</p> </li> </ul> Source code in <code>bbot/core/helpers/wordcloud.py</code> <pre><code>def load(self, filename=None):\n    \"\"\"\n    Loads a word cloud from a file. The file can be either a standard wordlist with one entry per line\n    or a .tsv (tab-separated) file where the first row is the count and the second row is the associated entry.\n\n    Args:\n        filename (str, optional): The path to the file from which to load the word cloud. If None, uses a default filename.\n    \"\"\"\n    if filename is None:\n        wordcloud_path = self.default_filename\n    else:\n        wordcloud_path = Path(filename).resolve()\n    log.verbose(f\"Loading word cloud from {wordcloud_path}\")\n    try:\n        with open(str(wordcloud_path), newline=\"\") as f:\n            c = csv.reader(f, delimiter=\"\\t\")\n            for row in c:\n                if len(row) == 1:\n                    self.add_word(row[0])\n                elif len(row) == 2:\n                    with suppress(Exception):\n                        count, word = row\n                        count = int(count)\n                        self[word] = count\n        if len(self) &gt; 0:\n            log.success(f\"Loaded word cloud ({len(self):,} words) from {wordcloud_path}\")\n    except Exception as e:\n        import traceback\n\n        log_fn = log.debug\n        if filename is not None:\n            log_fn = log.warning\n        log_fn(f\"Failed to load word cloud from {wordcloud_path}: {e}\")\n        if filename is not None:\n            log.trace(traceback.format_exc())\n</code></pre>"},{"location":"dev/helpers/wordcloud/#bbot.core.helpers.wordcloud.WordCloud.mutations","title":"mutations","text":"<pre><code>mutations(words, devops=True, cloud=True, letters=True, numbers=5, number_padding=2, substitute_numbers=True)\n</code></pre> <p>Generate various mutations for the given list of words based on different criteria.</p> <p>Yields tuples of strings which can be joined on the desired delimiter, e.g. \"-\" or \"_\".</p> <p>Parameters:</p> <ul> <li> <code>words</code>               (<code>Union[str, Iterable[str]]</code>)           \u2013            <p>A single word or list of words to mutate.</p> </li> <li> <code>devops</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to include devops-related mutations.</p> </li> <li> <code>cloud</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to include mutations from the word cloud.</p> </li> <li> <code>letters</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to include letter-based mutations.</p> </li> <li> <code>numbers</code>               (<code>int</code>, default:                   <code>5</code> )           \u2013            <p>The maximum numeric mutations to include.</p> </li> <li> <code>number_padding</code>               (<code>int</code>, default:                   <code>2</code> )           \u2013            <p>Padding for numeric mutations.</p> </li> <li> <code>substitute_numbers</code>               (<code>bool</code>, default:                   <code>True</code> )           \u2013            <p>Whether to substitute numbers in mutations.</p> </li> </ul> <p>Yields:</p> <ul> <li> <code>tuple</code>          \u2013            <p>A tuple containing each of the mutation segments.</p> </li> </ul> Source code in <code>bbot/core/helpers/wordcloud.py</code> <pre><code>def mutations(\n    self, words, devops=True, cloud=True, letters=True, numbers=5, number_padding=2, substitute_numbers=True\n):\n    \"\"\"\n    Generate various mutations for the given list of words based on different criteria.\n\n    Yields tuples of strings which can be joined on the desired delimiter, e.g. \"-\" or \"_\".\n\n    Args:\n        words (Union[str, Iterable[str]]): A single word or list of words to mutate.\n        devops (bool): Whether to include devops-related mutations.\n        cloud (bool): Whether to include mutations from the word cloud.\n        letters (bool): Whether to include letter-based mutations.\n        numbers (int): The maximum numeric mutations to include.\n        number_padding (int): Padding for numeric mutations.\n        substitute_numbers (bool): Whether to substitute numbers in mutations.\n\n    Yields:\n        tuple: A tuple containing each of the mutation segments.\n    \"\"\"\n    if isinstance(words, str):\n        words = (words,)\n    results = set()\n    for word in words:\n        h = hash(word)\n        if not h in results:\n            results.add(h)\n            yield (word,)\n    if numbers &gt; 0:\n        if substitute_numbers:\n            for word in words:\n                for number_mutation in self.get_number_mutations(word, n=numbers, padding=number_padding):\n                    h = hash(number_mutation)\n                    if not h in results:\n                        results.add(h)\n                        yield (number_mutation,)\n    for word in words:\n        for modifier in self.modifiers(\n            devops=devops, cloud=cloud, letters=letters, numbers=numbers, number_padding=number_padding\n        ):\n            a = (word, modifier)\n            b = (modifier, word)\n            for _ in (a, b):\n                h = hash(_)\n                if h not in results:\n                    results.add(h)\n                    yield _\n</code></pre>"},{"location":"dev/helpers/wordcloud/#bbot.core.helpers.wordcloud.WordCloud.save","title":"save","text":"<pre><code>save(filename=None, limit=None)\n</code></pre> <p>Saves the word cloud to a file. The cloud can optionally be truncated to the top <code>limit</code> entries.</p> <p>Parameters:</p> <ul> <li> <code>filename</code>               (<code>str</code>, default:                   <code>None</code> )           \u2013            <p>The path to the file where the word cloud will be saved. If None, uses a default filename.</p> </li> <li> <code>limit</code>               (<code>int</code>, default:                   <code>None</code> )           \u2013            <p>The maximum number of entries to save to the file. If None, all entries are saved.</p> </li> </ul> <p>Returns:</p> <ul> <li> <code>tuple</code>          \u2013            <p>A tuple containing a boolean indicating success or failure, and the resolved filename.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.helpers.word_cloud.update({\"apple\": 5, \"banana\": 2, \"cherry\": 8})\n&gt;&gt;&gt; self.helpers.word_cloud.save(filename=\"word_cloud.txt\", limit=2)\n(True, Path('word_cloud.txt'))\n</code></pre> Source code in <code>bbot/core/helpers/wordcloud.py</code> <pre><code>def save(self, filename=None, limit=None):\n    \"\"\"\n    Saves the word cloud to a file. The cloud can optionally be truncated to the top `limit` entries.\n\n    Args:\n        filename (str, optional): The path to the file where the word cloud will be saved. If None, uses a default filename.\n        limit (int, optional): The maximum number of entries to save to the file. If None, all entries are saved.\n\n    Returns:\n        tuple: A tuple containing a boolean indicating success or failure, and the resolved filename.\n\n    Examples:\n        &gt;&gt;&gt; self.helpers.word_cloud.update({\"apple\": 5, \"banana\": 2, \"cherry\": 8})\n        &gt;&gt;&gt; self.helpers.word_cloud.save(filename=\"word_cloud.txt\", limit=2)\n        (True, Path('word_cloud.txt'))\n    \"\"\"\n    if filename is None:\n        filename = self.default_filename\n    else:\n        filename = Path(filename).resolve()\n    try:\n        if not self.parent_helper.mkdir(filename.parent):\n            log.error(f\"Failure creating or error writing to {filename.parent} when saving word cloud\")\n            return\n        if len(self) &gt; 0:\n            log.debug(f\"Saving word cloud to {filename}\")\n            with open(str(filename), mode=\"w\", newline=\"\") as f:\n                c = csv.writer(f, delimiter=\"\\t\")\n                for word, count in self.json(limit).items():\n                    c.writerow([count, word])\n            log.debug(f\"Saved word cloud ({len(self):,} words) to {filename}\")\n            return True, filename\n        else:\n            log.debug(f\"No words to save\")\n    except Exception as e:\n        import traceback\n\n        log.warning(f\"Failed to save word cloud to {filename}: {e}\")\n        log.trace(traceback.format_exc())\n    return False, filename\n</code></pre>"},{"location":"dev/helpers/wordcloud/#bbot.core.helpers.wordcloud.WordCloud.truncate","title":"truncate","text":"<pre><code>truncate(limit)\n</code></pre> <p>Truncates the word cloud dictionary to retain only the top <code>limit</code> entries based on their occurrence frequencies.</p> <p>Parameters:</p> <ul> <li> <code>limit</code>               (<code>int</code>)           \u2013            <p>The maximum number of entries to retain in the word cloud.</p> </li> </ul> <p>Examples:</p> <pre><code>&gt;&gt;&gt; self.helpers.word_cloud.update({\"apple\": 5, \"banana\": 2, \"cherry\": 8})\n&gt;&gt;&gt; self.helpers.word_cloud.truncate(2)\n&gt;&gt;&gt; self.helpers.word_cloud\n{'cherry': 8, 'apple': 5}\n</code></pre> Source code in <code>bbot/core/helpers/wordcloud.py</code> <pre><code>def truncate(self, limit):\n    \"\"\"\n    Truncates the word cloud dictionary to retain only the top `limit` entries based on their occurrence frequencies.\n\n    Args:\n        limit (int): The maximum number of entries to retain in the word cloud.\n\n    Examples:\n        &gt;&gt;&gt; self.helpers.word_cloud.update({\"apple\": 5, \"banana\": 2, \"cherry\": 8})\n        &gt;&gt;&gt; self.helpers.word_cloud.truncate(2)\n        &gt;&gt;&gt; self.helpers.word_cloud\n        {'cherry': 8, 'apple': 5}\n    \"\"\"\n    new_self = dict(self.json(limit=limit))\n    self.clear()\n    self.update(new_self)\n</code></pre>"},{"location":"modules/custom_yara_rules/","title":"Custom Yara Rules","text":""},{"location":"modules/custom_yara_rules/#overview","title":"Overview","text":"<p>Through the <code>excavate</code> internal module, BBOT supports searching through HTTP response data using custom YARA rules. </p> <p>This feature can be utilized with the command line option <code>--custom-yara-rules</code> or <code>-cy</code>, followed by a file containing the YARA rules.</p> <p>Example:</p> <pre><code>bbot -m httpx --custom-yara-rules=test.yara -t http://example.com/\n</code></pre> <p>Where <code>test.yara</code> is a file on the filesystem. The file can contain multiple YARA rules, separated by lines.</p> <p>YARA rules can be quite simple, the simplest example being a single string search:</p> <pre><code>rule find_string {\n    strings:\n        $str1 = \"AAAABBBB\"\n\n    condition:\n        $str1\n}\n</code></pre> <p>To look for multiple strings, and match if any of them were to hit:</p> <pre><code>rule find_string {\n    strings:\n        $str1 = \"AAAABBBB\"\n        $str2 = \"CCCCDDDD\"\n\n    condition:\n        any of them\n}\n</code></pre> <p>One of the most important capabilities is the use of regexes within the rule, as shown in the following example.</p> <pre><code>rule find_AAAABBBB_regex {\n    strings:\n        $regex = /A{1,4}B{1,4}/\n\n    condition:\n        $regex\n}\n</code></pre> <p>Note: YARA uses it's own regex engine that is not a 1:1 match with python regexes. This means many existing regexes will have to be modified before they will work with YARA. The good news is: YARA's regex engine is FAST, immensely more fast than pythons!</p> <p>Further discussion of art of writing complex YARA rules goes far beyond the scope of this documentation. A good place to start learning more is the official YARA documentation. </p> <p>The YARA engine provides plenty of room to make highly complex signatures possible, with various conditional operators available. Multiple signatures can be linked together to create sophisticated detection rules that can identify a wide range of specific content. This flexibility allows the crafting of efficient rules for detecting security vulnerabilities, leveraging logical operators, regular expressions, and other powerful features. Additionally, YARA's modular structure supports easy updates and maintenance of signature sets.</p>"},{"location":"modules/custom_yara_rules/#custom-options","title":"Custom options","text":"<p>BBOT supports the use of a few custom <code>meta</code> attributes within YARA rules, which will alter the behavior of the rule and the post-processing of the results.</p>"},{"location":"modules/custom_yara_rules/#description","title":"description","text":"<p>The description of the rule. Will end up in the description of any produced events if defined.</p> <p>Example with no description provided:</p> <pre><code>[FINDING] {\"description\": \"Custom Yara Rule [find_string] Matched via identifier [str1]\", \"host\": \"example.com\", \"url\": \"http://example.com\"} excavate\n</code></pre> <p>Example with the description added:</p> <pre><code>[FINDING] {\"description\": \"Custom Yara Rule [AAAABBBB] with description: [contains our test string] Matched via identifier [str1]\", \"host\": \"example.com, \"url\": \"http://example.com\"}     excavate\n</code></pre> <p>That FINDING was produced with the following signature:</p> <pre><code>rule AAAABBBB {\n\n    meta:\n        description = \"contains our test string\"\n    strings:\n        $str1 = \"AAAABBBB\"\n    condition:\n        $str1\n}\n</code></pre>"},{"location":"modules/custom_yara_rules/#tags","title":"tags","text":"<p>Tags specified with this option will be passed-on to any resulting emitted events. Tags are provided as a comma separated string, as shown below:</p> <p>Lets expand on the previous example:</p> <pre><code>rule AAAABBBB {\n\n    meta:\n        description = \"contains our test string\"\n        tags = \"tag1,tag2,tag3\"\n    strings:\n        $str1 = \"AAAABBBB\"\n    condition:\n        $str1\n}\n</code></pre> <p>Now, the BBOT FINDING includes these custom tags, as with the following output:</p> <pre><code>[FINDING] {\"description\": \"Custom Yara Rule [AAAABBBB] with description: [contains our test string] Matched via identifier [str1]\", \"host\": \"example.com\", \"url\": \"http://example.com/\"} excavate   (tag1, tag2, tag3)\n</code></pre>"},{"location":"modules/custom_yara_rules/#emit_match","title":"emit_match","text":"<p>When set to True, the contents returned from a successful extraction via a YARA regex will be included in the FINDING event which is emitted.</p> <p>Consider the following example YARA rule:</p> <pre><code>rule SubstackLink\n{\n    meta:\n        description = \"contains a Substack link\"\n        emit_match = true\n    strings:\n        $substack_link = /https?:\\/\\/[a-zA-Z0-9.-]+\\.substack\\.com/\n    condition:\n        $substack_link\n}\n</code></pre> <p>When run against the Black Lantern Security homepage with the following BBOT command:</p> <pre><code>bbot -m httpx --custom-yara-rules=substack.yara -t http://www.blacklanternsecurity.com/\n</code></pre> <p>We get the following result. Note that the finding now contains the actual link that was identified with the regex.</p> <pre><code>[FINDING] {\"description\": \"Custom Yara Rule [SubstackLink] with description: [contains a Substack link] Matched via identifier [substack_link] and extracted [https://blacklanternsecurity.substack.com]\", \"host\": \"www.blacklanternsecurity.com\", \"url\": \"https://www.blacklanternsecurity.com/\"}    excavate\n</code></pre>"},{"location":"modules/internal_modules/","title":"List of Modules","text":""},{"location":"modules/internal_modules/#what-are-internal-modules","title":"What are internal modules?","text":"<p>Internal modules are just like regular modules, except that they run all the time. They do not have to be explicitly enabled. They can, however, be explicitly disabled if needed.</p> <p>Turning them off is simple, a root-level config option is present which can be set to False to disable them:</p> <pre><code># Infer certain events from others, e.g. IPs from IP ranges, DNS_NAMEs from URLs, etc.\nspeculate: True\n# Passively search event data for URLs, hostnames, emails, etc.\nexcavate: True\n# Summarize activity at the end of a scan\naggregate: True\n# DNS resolution\ndnsresolve: True\n# Cloud provider tagging\ncloudcheck: True\n</code></pre> <p>These modules are executing core functionality that is normally essential for a typical BBOT scan. Let's take a quick look at each one's functionality:</p>"},{"location":"modules/internal_modules/#aggregate","title":"aggregate","text":"<p>Summarize statistics at the end of a scan. Disable if you don't want to see this table.</p>"},{"location":"modules/internal_modules/#cloud","title":"cloud","text":"<p>The cloud module looks at events and tries to determine if they are associated with a cloud provider and tags them as such, and can also identify certain cloud resources</p>"},{"location":"modules/internal_modules/#dns","title":"dns","text":"<p>The DNS internal module controls the basic DNS resoultion the BBOT performs, and all of the supporting machinery like wildcard detection, etc.</p>"},{"location":"modules/internal_modules/#excavate","title":"excavate","text":"<p>The excavate internal module designed to passively extract valuable information from HTTP response data. It primarily uses YARA regexes to extract information, with various events being produced from the post-processing of the YARA results.</p> <p>Here is a summary of the data it produces:</p>"},{"location":"modules/internal_modules/#urls","title":"URLs","text":"<p>By extracting URLs from all visited pages, this is actually already half of a web-spider. The other half is recursion, which is baked in to BBOT from the ground up. Therefore, protections are in place by default in the form of <code>web_spider_distance</code> and <code>web_spider_depth</code> settings. These settings govern restrictions to URLs recursively harvested from HTTP responses, preventing endless runaway scans. However, in the right situation the controlled use of a web-spider is extremely powerful.</p>"},{"location":"modules/internal_modules/#parameter-extraction","title":"Parameter Extraction","text":"<p>Parameter Extraction The parameter extraction functionality identifies and extracts key web parameters from HTTP responses, and produced <code>WEB_PARAMETER</code> events. This includes parameters found in GET and POST requests, HTML forms, and jQuery requests. Currently, these are only used by the <code>hunt</code> module, and by the <code>paramminer</code> modules, to a limited degree. However, future functionality will make extensive use of these events.</p>"},{"location":"modules/internal_modules/#email-extraction","title":"Email Extraction","text":"<p>Detect email addresses within HTTP_RESPONSE data. </p>"},{"location":"modules/internal_modules/#error-detection","title":"Error Detection","text":"<p>Scans for verbose error messages in HTTP responses and raw text data. By identifying specific error signatures from various programming languages and frameworks, this feature helps uncover misconfigurations, debugging information, and potential vulnerabilities. This insight is invaluable for identifying weak points or anomalies in web applications.</p>"},{"location":"modules/internal_modules/#content-security-policy-csp-extraction","title":"Content Security Policy (CSP) Extraction","text":"<p>The CSP extraction capability focuses on extracting domains from Content-Security-Policy headers. By analyzing these headers, BBOT can identify additional domains which can get fed back into the scan.</p>"},{"location":"modules/internal_modules/#serialization-detection","title":"Serialization Detection","text":"<p>Serialized objects are a common source of serious security vulnerablities. Excavate aims to detect those used in Java, .NET, and PHP applications. </p>"},{"location":"modules/internal_modules/#functionality-detection","title":"Functionality Detection","text":"<p>Looks for specific web functionalities such as file upload fields and WSDL URLs. By identifying these elements, BBOT can pinpoint areas of the application that may require further scrutiny for security vulnerabilities.</p>"},{"location":"modules/internal_modules/#non-http-scheme-detection","title":"Non-HTTP Scheme Detection","text":"<p>The non-HTTP scheme detection capability extracts URLs with non-HTTP schemes, such as ftp, mailto, and javascript. By identifying these URLs, BBOT can uncover additional vectors for attack or information leakage.</p>"},{"location":"modules/internal_modules/#custom-yara-rules","title":"Custom Yara Rules","text":"<p>Excavate supports the use of custom YARA rules, which wil be added to the other rules before the scan start. For more info, view this.</p>"},{"location":"modules/internal_modules/#speculate","title":"speculate","text":"<p>Speculate is all about inferring one data type from another, particularly when certain tools like port scanners are not enabled. This is essential functionality for most BBOT scans, allowing for the discovery of web resources when starting with a DNS-only target list without a port scanner. It bridges gaps in the data, providing a more comprehensive view of the target by leveraging existing information.</p> <ul> <li>IP_RANGE: Converts an IP range into individual IP addresses and emits them as IP_ADDRESS events.</li> <li>DNS_NAME: Generates parent domains from DNS names.</li> <li>URL and URL_UNVERIFIED: Infers open TCP ports from URLs and speculates on sub-directory URLs.</li> <li>General URL Speculation: Emits URL_UNVERIFIED events for URLs not already in the event's history.</li> <li>IP_ADDRESS / DNS_NAME: Infers open TCP ports if active port scanning is not enabled.</li> <li>ORG_STUB: Derives organization stubs from TLDs, social stubs, or Azure tenant names and emits them as ORG_STUB events.</li> <li>USERNAME: Converts usernames to email addresses if they validate as such.</li> </ul>"},{"location":"modules/list_of_modules/","title":"List of Modules","text":"Module Type Needs API Key Description Flags Consumed Events Produced Events Author Created Date ajaxpro scan No Check for potentially vulnerable Ajaxpro instances active, safe, web-thorough HTTP_RESPONSE, URL FINDING, VULNERABILITY @liquidsec 2024-01-18 baddns scan No Check hosts for domain/subdomain takeovers active, baddns, cloud-enum, safe, subdomain-hijack, web-basic DNS_NAME, DNS_NAME_UNRESOLVED FINDING, VULNERABILITY @liquidsec 2024-01-18 baddns_direct scan No Check for unusual subdomain / service takeover edge cases that require direct detection active, baddns, cloud-enum, safe, subdomain-enum STORAGE_BUCKET, URL FINDING, VULNERABILITY @liquidsec 2024-01-29 baddns_zone scan No Check hosts for DNS zone transfers and NSEC walks active, baddns, cloud-enum, safe, subdomain-enum DNS_NAME FINDING, VULNERABILITY @liquidsec 2024-01-29 badsecrets scan No Library for detecting known or weak secrets across many web frameworks active, safe, web-basic HTTP_RESPONSE FINDING, TECHNOLOGY, VULNERABILITY @liquidsec 2022-11-19 bucket_amazon scan No Check for S3 buckets related to target active, cloud-enum, safe, web-basic DNS_NAME, STORAGE_BUCKET FINDING, STORAGE_BUCKET @TheTechromancer 2022-11-04 bucket_azure scan No Check for Azure storage blobs related to target active, cloud-enum, safe, web-basic DNS_NAME, STORAGE_BUCKET FINDING, STORAGE_BUCKET @TheTechromancer 2022-11-04 bucket_digitalocean scan No Check for DigitalOcean spaces related to target active, cloud-enum, safe, slow, web-thorough DNS_NAME, STORAGE_BUCKET FINDING, STORAGE_BUCKET @TheTechromancer 2022-11-08 bucket_firebase scan No Check for open Firebase databases related to target active, cloud-enum, safe, web-basic DNS_NAME, STORAGE_BUCKET FINDING, STORAGE_BUCKET @TheTechromancer 2023-03-20 bucket_google scan No Check for Google object storage related to target active, cloud-enum, safe, web-basic DNS_NAME, STORAGE_BUCKET FINDING, STORAGE_BUCKET @TheTechromancer 2022-11-04 bypass403 scan No Check 403 pages for common bypasses active, aggressive, web-thorough URL FINDING @liquidsec 2022-07-05 dastardly scan No Lightweight web application security scanner active, aggressive, deadly, slow, web-thorough HTTP_RESPONSE FINDING, VULNERABILITY @domwhewell-sage 2023-12-11 dnsbrute scan No Brute-force subdomains with massdns + static wordlist active, aggressive, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2024-04-24 dnsbrute_mutations scan No Brute-force subdomains with massdns + target-specific mutations active, aggressive, slow, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2024-04-25 dnscommonsrv scan No Check for common SRV records active, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-05-15 dotnetnuke scan No Scan for critical DotNetNuke (DNN) vulnerabilities active, aggressive, web-thorough HTTP_RESPONSE TECHNOLOGY, VULNERABILITY @liquidsec 2023-11-21 ffuf scan No A fast web fuzzer written in Go active, aggressive, deadly URL URL_UNVERIFIED @liquidsec 2022-04-10 ffuf_shortnames scan No Use ffuf in combination IIS shortnames active, aggressive, iis-shortnames, web-thorough URL_HINT URL_UNVERIFIED @liquidsec 2022-07-05 filedownload scan No Download common filetypes such as PDF, DOCX, PPTX, etc. active, safe, web-basic HTTP_RESPONSE, URL_UNVERIFIED FILESYSTEM @TheTechromancer 2023-10-11 fingerprintx scan No Fingerprint exposed services like RDP, SSH, MySQL, etc. active, safe, service-enum, slow OPEN_TCP_PORT PROTOCOL @TheTechromancer 2023-01-30 generic_ssrf scan No Check for generic SSRFs active, aggressive, web-thorough URL VULNERABILITY @liquidsec 2022-07-30 git scan No Check for exposed .git repositories active, code-enum, safe, web-basic URL FINDING @TheTechromancer 2023-05-30 gitlab scan No Detect GitLab instances and query them for repositories active, code-enum, safe HTTP_RESPONSE, SOCIAL, TECHNOLOGY CODE_REPOSITORY, FINDING, SOCIAL, TECHNOLOGY @TheTechromancer 2024-03-11 gowitness scan No Take screenshots of webpages active, safe, web-screenshots SOCIAL, URL TECHNOLOGY, URL, URL_UNVERIFIED, WEBSCREENSHOT @TheTechromancer 2022-07-08 host_header scan No Try common HTTP Host header spoofing techniques active, aggressive, web-thorough HTTP_RESPONSE FINDING @liquidsec 2022-07-27 httpx scan No Visit webpages. Many other modules rely on httpx active, cloud-enum, safe, social-enum, subdomain-enum, web-basic OPEN_TCP_PORT, URL, URL_UNVERIFIED HTTP_RESPONSE, URL @TheTechromancer 2022-07-08 hunt scan No Watch for commonly-exploitable HTTP parameters active, safe, web-thorough WEB_PARAMETER FINDING @liquidsec 2022-07-20 iis_shortnames scan No Check for IIS shortname vulnerability active, iis-shortnames, safe, web-basic URL URL_HINT @liquidsec 2022-04-15 newsletters scan No Searches for Newsletter Submission Entry Fields on Websites active, safe HTTP_RESPONSE FINDING @stryker2k2 2024-02-02 ntlm scan No Watch for HTTP endpoints that support NTLM authentication active, safe, web-basic HTTP_RESPONSE, URL DNS_NAME, FINDING @liquidsec 2022-07-25 nuclei scan No Fast and customisable vulnerability scanner active, aggressive, deadly URL FINDING, TECHNOLOGY, VULNERABILITY @TheTechromancer 2022-03-12 oauth scan No Enumerate OAUTH and OpenID Connect services active, affiliates, cloud-enum, safe, subdomain-enum, web-basic DNS_NAME, URL_UNVERIFIED DNS_NAME @TheTechromancer 2023-07-12 paramminer_cookies scan No Smart brute-force to check for common HTTP cookie parameters active, aggressive, slow, web-paramminer HTTP_RESPONSE, WEB_PARAMETER FINDING, WEB_PARAMETER @liquidsec 2022-06-27 paramminer_getparams scan No Use smart brute-force to check for common HTTP GET parameters active, aggressive, slow, web-paramminer HTTP_RESPONSE, WEB_PARAMETER FINDING, WEB_PARAMETER @liquidsec 2022-06-28 paramminer_headers scan No Use smart brute-force to check for common HTTP header parameters active, aggressive, slow, web-paramminer HTTP_RESPONSE, WEB_PARAMETER WEB_PARAMETER @liquidsec 2022-04-15 portscan scan No Port scan with masscan. By default, scans top 100 ports. active, portscan, safe DNS_NAME, IP_ADDRESS, IP_RANGE OPEN_TCP_PORT @TheTechromancer 2024-05-15 robots scan No Look for and parse robots.txt active, safe, web-basic URL URL_UNVERIFIED @liquidsec 2023-02-01 secretsdb scan No Detect common secrets with secrets-patterns-db active, safe, web-basic HTTP_RESPONSE FINDING @TheTechromancer 2023-03-17 securitytxt scan No Check for security.txt content active, cloud-enum, safe, subdomain-enum, web-basic DNS_NAME EMAIL_ADDRESS, URL_UNVERIFIED @colin-stubbs 2024-05-26 smuggler scan No Check for HTTP smuggling active, aggressive, slow, web-thorough URL FINDING @liquidsec 2022-07-06 sslcert scan No Visit open ports and retrieve SSL certificates active, affiliates, email-enum, safe, subdomain-enum, web-basic OPEN_TCP_PORT DNS_NAME, EMAIL_ADDRESS @TheTechromancer 2022-03-30 telerik scan No Scan for critical Telerik vulnerabilities active, aggressive, web-thorough HTTP_RESPONSE, URL FINDING, VULNERABILITY @liquidsec 2022-04-10 url_manipulation scan No Attempt to identify URL parsing/routing based vulnerabilities active, aggressive, web-thorough URL FINDING @liquidsec 2022-09-27 vhost scan No Fuzz for virtual hosts active, aggressive, deadly, slow URL DNS_NAME, VHOST @liquidsec 2022-05-02 wafw00f scan No Web Application Firewall Fingerprinting Tool active, aggressive URL WAF @liquidsec 2023-02-15 wappalyzer scan No Extract technologies from web responses active, safe, web-basic HTTP_RESPONSE TECHNOLOGY @liquidsec 2022-04-15 wpscan scan No Wordpress security scanner. Highly recommended to use an API key for better results. active, aggressive HTTP_RESPONSE, TECHNOLOGY FINDING, TECHNOLOGY, URL_UNVERIFIED, VULNERABILITY @domwhewell-sage 2024-05-29 affiliates scan No Summarize affiliate domains at the end of a scan affiliates, passive, report, safe * @TheTechromancer 2022-07-25 anubisdb scan No Query jldc.me's database for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-10-04 apkpure scan No Download android applications from apkpure.com code-enum, passive, safe MOBILE_APP FILESYSTEM @domwhewell-sage 2024-10-11 asn scan No Query ripe and bgpview.io for ASNs passive, report, safe, subdomain-enum IP_ADDRESS ASN @TheTechromancer 2022-07-25 azure_realm scan No Retrieves the \"AuthURL\" from login.microsoftonline.com/getuserrealm affiliates, cloud-enum, passive, safe, subdomain-enum, web-basic DNS_NAME URL_UNVERIFIED @TheTechromancer 2023-07-12 azure_tenant scan No Query Azure for tenant sister domains affiliates, cloud-enum, passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2024-07-04 bevigil scan Yes Retrieve OSINT data from mobile applications using BeVigil passive, safe, subdomain-enum DNS_NAME DNS_NAME, URL_UNVERIFIED @alt-glitch 2022-10-26 binaryedge scan Yes Query the BinaryEdge API passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-08-17 bucket_file_enum scan No Works in conjunction with the filedownload module to download files from open storage buckets. Currently supported cloud providers: AWS, DigitalOcean cloud-enum, passive, safe STORAGE_BUCKET URL_UNVERIFIED @TheTechromancer 2023-11-14 bufferoverrun scan Yes Query BufferOverrun's TLS API for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2024-10-23 builtwith scan Yes Query Builtwith.com for subdomains affiliates, passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-08-23 c99 scan Yes Query the C99 API for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-07-08 censys scan Yes Query the Censys API passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-08-04 certspotter scan No Query Certspotter's API for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-07-28 chaos scan Yes Query ProjectDiscovery's Chaos API for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-08-14 code_repository scan No Look for code repository links in webpages code-enum, passive, safe URL_UNVERIFIED CODE_REPOSITORY @domwhewell-sage 2024-05-15 columbus scan No Query the Columbus Project API for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2023-06-01 credshed scan Yes Send queries to your own credshed server to check for known credentials of your targets passive, safe DNS_NAME EMAIL_ADDRESS, HASHED_PASSWORD, PASSWORD, USERNAME @SpamFaux 2023-10-12 crt scan No Query crt.sh (certificate transparency) for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-05-13 dehashed scan Yes Execute queries against dehashed.com for exposed credentials email-enum, passive, safe DNS_NAME EMAIL_ADDRESS, HASHED_PASSWORD, PASSWORD, USERNAME @SpamFaux 2023-10-12 digitorus scan No Query certificatedetails.com for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2023-07-25 dnsbimi scan No Check DNS_NAME's for BIMI records to find image and certificate hosting URL's cloud-enum, passive, safe, subdomain-enum DNS_NAME RAW_DNS_RECORD, URL_UNVERIFIED @colin-stubbs 2024-11-15 dnscaa scan No Check for CAA records email-enum, passive, safe, subdomain-enum DNS_NAME DNS_NAME, EMAIL_ADDRESS, URL_UNVERIFIED @colin-stubbs 2024-05-26 dnsdumpster scan No Query dnsdumpster for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-03-12 docker_pull scan No Download images from a docker repository code-enum, passive, safe, slow CODE_REPOSITORY FILESYSTEM @domwhewell-sage 2024-03-24 dockerhub scan No Search for docker repositories of discovered orgs/usernames code-enum, passive, safe ORG_STUB, SOCIAL CODE_REPOSITORY, SOCIAL, URL_UNVERIFIED @domwhewell-sage 2024-03-12 emailformat scan No Query email-format.com for email addresses email-enum, passive, safe DNS_NAME EMAIL_ADDRESS @TheTechromancer 2022-07-11 extractous scan No Module to extract data from files passive, safe FILESYSTEM RAW_TEXT @domwhewell-sage 2024-06-03 fullhunt scan Yes Query the fullhunt.io API for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-08-24 git_clone scan No Clone code github repositories code-enum, passive, safe, slow CODE_REPOSITORY FILESYSTEM @domwhewell-sage 2024-03-08 github_codesearch scan Yes Query Github's API for code containing the target domain name code-enum, passive, safe, subdomain-enum DNS_NAME CODE_REPOSITORY, URL_UNVERIFIED @domwhewell-sage 2023-12-14 github_org scan No Query Github's API for organization and member repositories code-enum, passive, safe, subdomain-enum ORG_STUB, SOCIAL CODE_REPOSITORY @domwhewell-sage 2023-12-14 github_workflows scan No Download a github repositories workflow logs and workflow artifacts code-enum, passive, safe CODE_REPOSITORY FILESYSTEM @domwhewell-sage 2024-04-29 google_playstore scan No Search for android applications on play.google.com code-enum, passive, safe CODE_REPOSITORY, ORG_STUB MOBILE_APP @domwhewell-sage 2024-10-08 hackertarget scan No Query the hackertarget.com API for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-07-28 hunterio scan Yes Query hunter.io for emails email-enum, passive, safe, subdomain-enum DNS_NAME DNS_NAME, EMAIL_ADDRESS, URL_UNVERIFIED @TheTechromancer 2022-04-25 internetdb scan No Query Shodan's InternetDB for open ports, hostnames, technologies, and vulnerabilities passive, portscan, safe, subdomain-enum DNS_NAME, IP_ADDRESS DNS_NAME, FINDING, OPEN_TCP_PORT, TECHNOLOGY, VULNERABILITY @TheTechromancer 2023-12-22 ip2location scan Yes Query IP2location.io's API for geolocation information. passive, safe IP_ADDRESS GEOLOCATION @TheTechromancer 2023-09-12 ipneighbor scan No Look beside IPs in their surrounding subnet aggressive, passive, subdomain-enum IP_ADDRESS IP_ADDRESS @TheTechromancer 2022-06-08 ipstack scan Yes Query IPStack's GeoIP API passive, safe IP_ADDRESS GEOLOCATION @tycoonslive 2022-11-26 jadx scan No Decompile APKs and XAPKs using JADX passive, safe FILESYSTEM FILESYSTEM @domwhewell-sage 2024-11-04 leakix scan No Query leakix.net for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-07-11 myssl scan No Query myssl.com's API for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2023-07-10 otx scan No Query otx.alienvault.com for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-08-24 passivetotal scan Yes Query the PassiveTotal API for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-08-08 pgp scan No Query common PGP servers for email addresses email-enum, passive, safe DNS_NAME EMAIL_ADDRESS @TheTechromancer 2022-08-10 postman scan No Query Postman's API for related workspaces, collections, requests and download them code-enum, passive, safe, subdomain-enum ORG_STUB, SOCIAL CODE_REPOSITORY @domwhewell-sage 2024-09-07 postman_download scan No Download workspaces, collections, requests from Postman code-enum, passive, safe, subdomain-enum CODE_REPOSITORY FILESYSTEM @domwhewell-sage 2024-09-07 rapiddns scan No Query rapiddns.io for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-08-24 securitytrails scan Yes Query the SecurityTrails API for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-07-03 shodan_dns scan Yes Query Shodan for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-07-03 sitedossier scan No Query sitedossier.com for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2023-08-04 skymem scan No Query skymem.info for email addresses email-enum, passive, safe DNS_NAME EMAIL_ADDRESS @TheTechromancer 2022-07-11 social scan No Look for social media links in webpages passive, safe, social-enum URL_UNVERIFIED SOCIAL @TheTechromancer 2023-03-28 subdomaincenter scan No Query subdomain.center's API for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2023-07-26 subdomainradar scan Yes Query the Subdomain API for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-07-08 trickest scan Yes Query Trickest's API for subdomains affiliates, passive, safe, subdomain-enum DNS_NAME DNS_NAME @amiremami 2024-07-27 trufflehog scan No TruffleHog is a tool for finding credentials code-enum, passive, safe CODE_REPOSITORY, FILESYSTEM FINDING, VULNERABILITY @domwhewell-sage 2024-03-12 urlscan scan No Query urlscan.io for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME, URL_UNVERIFIED @TheTechromancer 2022-06-09 viewdns scan No Query viewdns.info's reverse whois for related domains affiliates, passive, safe DNS_NAME DNS_NAME @TheTechromancer 2022-07-04 virustotal scan Yes Query VirusTotal's API for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-08-25 wayback scan No Query archive.org's API for subdomains passive, safe, subdomain-enum DNS_NAME DNS_NAME, URL_UNVERIFIED @liquidsec 2022-04-01 zoomeye scan Yes Query ZoomEye's API for subdomains affiliates, passive, safe, subdomain-enum DNS_NAME DNS_NAME @TheTechromancer 2022-08-03 asset_inventory output No Merge hosts, open ports, technologies, findings, etc. into a single asset inventory CSV DNS_NAME, FINDING, HTTP_RESPONSE, IP_ADDRESS, OPEN_TCP_PORT, TECHNOLOGY, URL, VULNERABILITY, WAF IP_ADDRESS, OPEN_TCP_PORT @liquidsec 2022-09-30 csv output No Output to CSV * @TheTechromancer 2022-04-07 discord output No Message a Discord channel when certain events are encountered * @TheTechromancer 2023-08-14 emails output No Output any email addresses found belonging to the target domain email-enum EMAIL_ADDRESS @domwhewell-sage 2023-12-23 http output No Send every event to a custom URL via a web request * @TheTechromancer 2022-04-13 json output No Output to Newline-Delimited JSON (NDJSON) * @TheTechromancer 2022-04-07 neo4j output No Output to Neo4j * @TheTechromancer 2022-04-07 postgres output No Output scan data to a SQLite database * python output No Output via Python API * @TheTechromancer 2022-09-13 slack output No Message a Slack channel when certain events are encountered * @TheTechromancer 2023-08-14 splunk output No Send every event to a splunk instance through HTTP Event Collector * @w0Tx 2024-02-17 sqlite output No Output scan data to a SQLite database * stdout output No Output to text * subdomains output No Output only resolved, in-scope subdomains subdomain-enum DNS_NAME, DNS_NAME_UNRESOLVED @TheTechromancer 2023-07-31 teams output No Message a Teams channel when certain events are encountered * @TheTechromancer 2023-08-14 txt output No Output to text * web_report output No Create a markdown report with web assets FINDING, TECHNOLOGY, URL, VHOST, VULNERABILITY @liquidsec 2023-02-08 websocket output No Output to websockets * @TheTechromancer 2022-04-15 cloudcheck internal No Tag events by cloud provider, identify cloud resources like storage buckets * dnsresolve internal No * aggregate internal No Summarize statistics at the end of a scan passive, safe @TheTechromancer 2022-07-25 excavate internal No Passively extract juicy tidbits from scan data passive HTTP_RESPONSE, RAW_TEXT URL_UNVERIFIED, WEB_PARAMETER @liquidsec 2022-06-27 speculate internal No Derive certain event types from others by common sense passive AZURE_TENANT, DNS_NAME, DNS_NAME_UNRESOLVED, HTTP_RESPONSE, IP_ADDRESS, IP_RANGE, SOCIAL, STORAGE_BUCKET, URL, URL_UNVERIFIED, USERNAME DNS_NAME, FINDING, IP_ADDRESS, OPEN_TCP_PORT, ORG_STUB @liquidsec 2022-05-03 <p>For a list of module config options, see Module Options.</p>"},{"location":"modules/nuclei/","title":"Nuclei","text":""},{"location":"modules/nuclei/#overview","title":"Overview","text":"<p>BBOT integrates with Nuclei, an open-source web vulnerability scanner by Project Discovery. This is one of the ways BBOT makes it possible to go from a single target domain/IP all the way to confirmed vulnerabilities, in one scan. </p> <p></p> <ul> <li>The BBOT Nuclei module ingests [URL] events and emits events of type [VULNERABILITY] or [FINDING]</li> <li>Vulnerabilities will inherit their severity from the Nuclei templates</li> <li>Nuclei templates of severity INFO will be emitted as [FINDINGS]</li> </ul>"},{"location":"modules/nuclei/#default-behavior","title":"Default Behavior","text":"<ul> <li>By default, only \"directory URLs\" (URLs ending in a slash) will be scanned, but ALL templates will be used (BE CAREFUL!)</li> <li>Because it's so aggressive, Nuclei is considered a deadly module. This means you need to use the flag --allow-deadly to turn it on.</li> </ul>"},{"location":"modules/nuclei/#specifying-custom-templates","title":"Specifying custom templates","text":"<p>You can specify individual nuclei templates by setting the <code>modules.nuclei.templates</code> to their comma-separated filenames:</p> <pre><code>bbot -m nuclei -c modules.nuclei.templates=http/takeovers/airee-takeover.yaml,http/takeovers/cargo-takeover.yaml\n</code></pre> <p>...or via the config:</p> <pre><code>modules:\n  nuclei:\n    templates: http/takeovers/airee-takeover.yaml,http/takeovers/cargo-takeover.yaml\n</code></pre>"},{"location":"modules/nuclei/#configuration-and-options","title":"Configuration and Options","text":"<p>The Nuclei module has many configuration options:</p> Config Option Type Description Default modules.nuclei.batch_size int Number of targets to send to Nuclei per batch (default 200) 200 modules.nuclei.budget int Used in budget mode to set the number of requests which will be allotted to the nuclei scan 1 modules.nuclei.concurrency int maximum number of templates to be executed in parallel (default 25) 25 modules.nuclei.directory_only bool Filter out 'file' URL event (default True) True modules.nuclei.etags str tags to exclude from the scan modules.nuclei.mode str manual | technology | severe | budget. Technology: Only activate based on technology events that match nuclei tags (nuclei -as mode). Manual (DEFAULT): Fully manual settings. Severe: Only critical and high severity templates without intrusive. Budget: Limit Nuclei to a specified number of HTTP requests manual modules.nuclei.ratelimit int maximum number of requests to send per second (default 150) 150 modules.nuclei.retries int number of times to retry a failed request (default 0) 0 modules.nuclei.severity str Filter based on severity field available in the template. modules.nuclei.silent bool Don't display nuclei's banner or status messages False modules.nuclei.tags str execute a subset of templates that contain the provided tags modules.nuclei.templates str template or template directory paths to include in the scan modules.nuclei.version str nuclei version 3.3.5 <p>Most of these you probably will NOT want to change. In particular, we advise against changing the version of Nuclei, as it's possible the latest version won't work right with BBOT.</p> <p>We also do not recommend changing directory_only mode. This will cause Nuclei to process every URL. Because BBOT is recursive, this can get very out-of-hand very quickly, depending on which other modules are in use.</p>"},{"location":"modules/nuclei/#modes","title":"Modes","text":"<p>The modes with the Nuclei module are generally in place to help you limit the number of templates you are scanning with, to make your scans quicker. </p>"},{"location":"modules/nuclei/#manual","title":"Manual","text":"<p>This is the default setting, and will use all templates. However, if you're looking to do something particular, you might pair this with some of the pass-through options shown in the next setting.</p>"},{"location":"modules/nuclei/#severe","title":"Severe","text":"<p>severe mode uses only high/critical severity templates. It also excludes the intrusive tag. This is intended to be a shortcut for times when you need to rapidly identify high severity vulnerabilities but can't afford the full scan. Because most templates are INFO, LOW, or MEDIUM, your scan will finish much faster.</p>"},{"location":"modules/nuclei/#technology","title":"Technology","text":"<p>This is equivalent to the Nuclei '-as' scan option. It only use templates that match detected technologies, using wappalyzer-based signatures. This can be a nice way to run a light-weight scan that still has a chance to find some good vulnerabilities.</p>"},{"location":"modules/nuclei/#budget","title":"Budget","text":"<p>Budget mode is unique to BBOT.</p> <p>For larger scans with thousands of targets, doing a FULL Nuclei scan (1000s of Requests) for each is not realistic.  As an alternative to the other modes, you can take advantage of Nuclei's \"collapsible\" template feature. </p> <p>For only the cost of one (or more) \"extra\" request(s) per host, it can activate several hundred modules. These are modules which happen to look at a BaseUrl, and typically look for a specific string or other attribute. Nuclei is smart about reusing the request data when it can, and we can use this to our advantage. </p> <p>The budget parameter is the # of extra requests per host you are willing to send to \"feed\" Nuclei templates (defaults to 1). For those times when vulnerability scanning isn't the main focus, but you want to look for easy wins.</p> <p>Of course, there is a rapidly diminishing return when you set he value to more than a handful. Eventually, this becomes 1 template per 1 budget value increase. However, in the 1-10 range there is a lot of value. This graphic should give you a rough visual idea of this concept.</p> <p></p>"},{"location":"modules/nuclei/#nuclei-pass-through-options","title":"Nuclei pass-through options","text":"<p>Most of the rest of the options are usually passed straight through to Nuclei when its executed. You can do things like set specific tags to include, (or exclude with etags), exactly how you'd do with Nuclei directly. You can also limit the templates with severity.</p> <p>The ratelimit and concurrency settings default to the same defaults that Nuclei does. These are relatively sane settings, but if you are in a sensitive environment it can certainly help to turn them down.</p> <p>templates will allow you to set your own templates directory. This can be very useful if you have your own custom templates that you want to use with BBOT.</p>"},{"location":"modules/nuclei/#example-commands","title":"Example Commands","text":"<pre><code># Scan a SINGLE target with a basic port scan and web modules\nbbot -f web-basic -m portscan nuclei --allow-deadly -t app.evilcorp.com\n</code></pre> <pre><code># Scanning MULTIPLE targets\nbbot -f web-basic -m portscan nuclei --allow-deadly -t app1.evilcorp.com app2.evilcorp.com app3.evilcorp.com\n</code></pre> <pre><code># Scanning MULTIPLE targets while performing subdomain enumeration\nbbot -f subdomain-enum web-basic -m portscan nuclei --allow-deadly -t app1.evilcorp.com app2.evilcorp.com app3.evilcorp.com\n</code></pre> <pre><code># Scanning MULTIPLE targets on a BUDGET\nbbot -f subdomain-enum web-basic -m portscan nuclei --allow-deadly -c modules.nuclei.mode=budget -t app1.evilcorp.com app2.evilcorp.com app3.evilcorp.com\n</code></pre>"},{"location":"scanning/","title":"Scanning Overview","text":""},{"location":"scanning/#scan-names","title":"Scan Names","text":"<p>Every BBOT scan gets a random, mildly-entertaining name like <code>demonic_jimmy</code>. Output for that scan, including scan stats and any web screenshots, are saved to a folder by that name in <code>~/.bbot/scans</code>. The most recent 20 scans are kept, and older ones are removed.</p> <p>If you don't want a random name, you can change it with <code>-n</code>. You can also change the location of BBOT's output with <code>-o</code>:</p> <pre><code># save everything to the folder \"my_scan\" in the current directory\nbbot -t evilcorp.com -f subdomain-enum -m gowitness -n my_scan -o .\n</code></pre> <p>If you reuse a scan name, BBOT will automatically append to your previous output files.</p>"},{"location":"scanning/#targets-t","title":"Targets (<code>-t</code>)","text":"<p>Targets declare what's in-scope, and seed a scan with initial data. BBOT accepts an unlimited number of targets. They can be any of the following:</p> <ul> <li><code>DNS_NAME</code> (<code>evilcorp.com</code>)</li> <li><code>IP_ADDRESS</code> (<code>1.2.3.4</code>)</li> <li><code>IP_RANGE</code> (<code>1.2.3.0/24</code>)</li> <li><code>OPEN_TCP_PORT</code> (<code>192.168.0.1:80</code>)</li> <li><code>URL</code> (<code>https://www.evilcorp.com</code>)</li> </ul> <p>Note that BBOT only discriminates down to the host level. This means, for example, if you specify a URL <code>https://www.evilcorp.com</code> as the target, the scan will be seeded with that URL, but the scope of the scan will be the entire host, <code>www.evilcorp.com</code>. Other ports/URLs on that same host may also be scanned.</p> <p>You can specify targets directly on the command line, load them from files, or both! For example:</p> <pre><code>$ cat targets.txt\n4.3.2.1\n10.0.0.2:80\n1.2.3.0/24\nevilcorp.com\nevilcorp.co.uk\nhttps://www.evilcorp.co.uk\n\n# load targets from a file and from the command-line\n$ bbot -t targets.txt fsociety.com 5.6.7.0/24 -m nmap\n</code></pre> <p>On start, BBOT automatically converts Targets into Events.</p>"},{"location":"scanning/#modules-m","title":"Modules (<code>-m</code>)","text":"<p>To see a full list of modules and their descriptions, use <code>bbot -l</code> or see List of Modules.</p> <p>Modules are the part of BBOT that does the work -- port scanning, subdomain brute-forcing, API querying, etc. Modules consume Events (<code>IP_ADDRESS</code>, <code>DNS_NAME</code>, etc.) from each other, process the data in a useful way, then emit the results as new events. You can enable individual modules with <code>-m</code>.</p> <pre><code># Enable modules: nmap, sslcert, and httpx\nbbot -t www.evilcorp.com -m nmap sslcert httpx\n</code></pre>"},{"location":"scanning/#types-of-modules","title":"Types of Modules","text":"<p>Modules fall into three categories:</p> <ul> <li>Scan Modules:<ul> <li>These make up the majority of modules. Examples are <code>nmap</code>, <code>sslcert</code>, <code>httpx</code>, etc. Enable with <code>-m</code>.</li> </ul> </li> <li>Output Modules:<ul> <li>These output scan data to different formats/destinations. <code>human</code>, <code>json</code>, and <code>csv</code> are enabled by default. Enable others with <code>-om</code>. (See: Output)</li> </ul> </li> <li>Internal Modules:<ul> <li>These modules perform essential, common-sense tasks. They are always enabled, unless explicitly disabled via the config (e.g. <code>-c speculate=false</code>).<ul> <li><code>aggregate</code>: Summarizes results at the end of a scan</li> <li><code>excavate</code>: Extracts useful data such as subdomains from webpages, etc.</li> <li><code>speculate</code>: Intelligently infers new events, e.g. <code>OPEN_TCP_PORT</code> from <code>URL</code> or <code>IP_ADDRESS</code> from <code>IP_NETWORK</code>.</li> </ul> </li> </ul> </li> </ul> <p>For details in the inner workings of modules, see Creating a Module.</p>"},{"location":"scanning/#flags-f","title":"Flags (<code>-f</code>)","text":"<p>Flags are how BBOT categorizes its modules. In a way, you can think of them as groups. Flags let you enable a bunch of similar modules at the same time without having to specify them each individually. For example, <code>-f subdomain-enum</code> would enable every module with the <code>subdomain-enum</code> flag.</p> <pre><code># list all subdomain-enum modules\nbbot -f subdomain-enum -l\n</code></pre>"},{"location":"scanning/#filtering-modules","title":"Filtering Modules","text":"<p>Modules can be easily enabled/disabled based on their flags:</p> <ul> <li><code>-f</code> Enable these flags (e.g. <code>-f subdomain-enum</code>)</li> <li><code>-rf</code> Require modules to have this flag (e.g. <code>-rf safe</code>)</li> <li><code>-ef</code> Exclude these flags (e.g. <code>-ef slow</code>)</li> <li><code>-em</code> Exclude these individual modules (e.g. <code>-em ipneighbor</code>)</li> <li><code>-lf</code> List all available flags</li> </ul> <p>Every module is either <code>safe</code> or <code>aggressive</code>, and either <code>active</code> or <code>passive</code>. These can be useful for filtering. For example, if you wanted to enable all the <code>safe</code> modules, but exclude active ones, you could do:</p> <pre><code># Enable safe modules but exclude active ones\nbbot -t evilcorp.com -f safe -ef active\n</code></pre> <p>This is equivalent to requiring the passive flag:</p> <pre><code># Enable safe modules but only if they're also passive\nbbot -t evilcorp.com -f safe -rf passive\n</code></pre> <p>A single module can have multiple flags. For example, the <code>securitytrails</code> module is <code>passive</code>, <code>safe</code>, <code>subdomain-enum</code>. Below is a full list of flags and their associated modules.</p>"},{"location":"scanning/#list-of-flags","title":"List of Flags","text":"Flag # Modules Description Modules safe 91 Non-intrusive, safe to run affiliates, aggregate, ajaxpro, anubisdb, apkpure, asn, azure_realm, azure_tenant, baddns, baddns_direct, baddns_zone, badsecrets, bevigil, binaryedge, bucket_amazon, bucket_azure, bucket_digitalocean, bucket_file_enum, bucket_firebase, bucket_google, bufferoverrun, builtwith, c99, censys, certspotter, chaos, code_repository, columbus, credshed, crt, dehashed, digitorus, dnsbimi, dnscaa, dnscommonsrv, dnsdumpster, docker_pull, dockerhub, emailformat, extractous, filedownload, fingerprintx, fullhunt, git, git_clone, github_codesearch, github_org, github_workflows, gitlab, google_playstore, gowitness, hackertarget, httpx, hunt, hunterio, iis_shortnames, internetdb, ip2location, ipstack, jadx, leakix, myssl, newsletters, ntlm, oauth, otx, passivetotal, pgp, portscan, postman, postman_download, rapiddns, robots, secretsdb, securitytrails, securitytxt, shodan_dns, sitedossier, skymem, social, sslcert, subdomaincenter, subdomainradar, trickest, trufflehog, urlscan, viewdns, virustotal, wappalyzer, wayback, zoomeye passive 66 Never connects to target systems affiliates, aggregate, anubisdb, apkpure, asn, azure_realm, azure_tenant, bevigil, binaryedge, bucket_file_enum, bufferoverrun, builtwith, c99, censys, certspotter, chaos, code_repository, columbus, credshed, crt, dehashed, digitorus, dnsbimi, dnscaa, dnsdumpster, docker_pull, dockerhub, emailformat, excavate, extractous, fullhunt, git_clone, github_codesearch, github_org, github_workflows, google_playstore, hackertarget, hunterio, internetdb, ip2location, ipneighbor, ipstack, jadx, leakix, myssl, otx, passivetotal, pgp, postman, postman_download, rapiddns, securitytrails, shodan_dns, sitedossier, skymem, social, speculate, subdomaincenter, subdomainradar, trickest, trufflehog, urlscan, viewdns, virustotal, wayback, zoomeye subdomain-enum 52 Enumerates subdomains anubisdb, asn, azure_realm, azure_tenant, baddns_direct, baddns_zone, bevigil, binaryedge, bufferoverrun, builtwith, c99, censys, certspotter, chaos, columbus, crt, digitorus, dnsbimi, dnsbrute, dnsbrute_mutations, dnscaa, dnscommonsrv, dnsdumpster, fullhunt, github_codesearch, github_org, hackertarget, httpx, hunterio, internetdb, ipneighbor, leakix, myssl, oauth, otx, passivetotal, postman, postman_download, rapiddns, securitytrails, securitytxt, shodan_dns, sitedossier, sslcert, subdomaincenter, subdomainradar, subdomains, trickest, urlscan, virustotal, wayback, zoomeye active 47 Makes active connections to target systems ajaxpro, baddns, baddns_direct, baddns_zone, badsecrets, bucket_amazon, bucket_azure, bucket_digitalocean, bucket_firebase, bucket_google, bypass403, dastardly, dnsbrute, dnsbrute_mutations, dnscommonsrv, dotnetnuke, ffuf, ffuf_shortnames, filedownload, fingerprintx, generic_ssrf, git, gitlab, gowitness, host_header, httpx, hunt, iis_shortnames, newsletters, ntlm, nuclei, oauth, paramminer_cookies, paramminer_getparams, paramminer_headers, portscan, robots, secretsdb, securitytxt, smuggler, sslcert, telerik, url_manipulation, vhost, wafw00f, wappalyzer, wpscan aggressive 20 Generates a large amount of network traffic bypass403, dastardly, dnsbrute, dnsbrute_mutations, dotnetnuke, ffuf, ffuf_shortnames, generic_ssrf, host_header, ipneighbor, nuclei, paramminer_cookies, paramminer_getparams, paramminer_headers, smuggler, telerik, url_manipulation, vhost, wafw00f, wpscan web-basic 18 Basic, non-intrusive web scan functionality azure_realm, baddns, badsecrets, bucket_amazon, bucket_azure, bucket_firebase, bucket_google, filedownload, git, httpx, iis_shortnames, ntlm, oauth, robots, secretsdb, securitytxt, sslcert, wappalyzer cloud-enum 15 Enumerates cloud resources azure_realm, azure_tenant, baddns, baddns_direct, baddns_zone, bucket_amazon, bucket_azure, bucket_digitalocean, bucket_file_enum, bucket_firebase, bucket_google, dnsbimi, httpx, oauth, securitytxt code-enum 14 Find public code repositories and search them for secrets etc. apkpure, code_repository, docker_pull, dockerhub, git, git_clone, github_codesearch, github_org, github_workflows, gitlab, google_playstore, postman, postman_download, trufflehog web-thorough 12 More advanced web scanning functionality ajaxpro, bucket_digitalocean, bypass403, dastardly, dotnetnuke, ffuf_shortnames, generic_ssrf, host_header, hunt, smuggler, telerik, url_manipulation slow 11 May take a long time to complete bucket_digitalocean, dastardly, dnsbrute_mutations, docker_pull, fingerprintx, git_clone, paramminer_cookies, paramminer_getparams, paramminer_headers, smuggler, vhost affiliates 9 Discovers affiliated hostnames/domains affiliates, azure_realm, azure_tenant, builtwith, oauth, sslcert, trickest, viewdns, zoomeye email-enum 8 Enumerates email addresses dehashed, dnscaa, emailformat, emails, hunterio, pgp, skymem, sslcert deadly 4 Highly aggressive dastardly, ffuf, nuclei, vhost baddns 3 Runs all modules from the DNS auditing tool BadDNS baddns, baddns_direct, baddns_zone web-paramminer 3 Discovers HTTP parameters through brute-force paramminer_cookies, paramminer_getparams, paramminer_headers iis-shortnames 2 Scans for IIS Shortname vulnerability ffuf_shortnames, iis_shortnames portscan 2 Discovers open ports internetdb, portscan report 2 Generates a report at the end of the scan affiliates, asn social-enum 2 Enumerates social media httpx, social service-enum 1 Identifies protocols running on open ports fingerprintx subdomain-hijack 1 Detects hijackable subdomains baddns web-screenshots 1 Takes screenshots of web pages gowitness"},{"location":"scanning/#dependencies","title":"Dependencies","text":"<p>BBOT modules have external dependencies ranging from OS packages (<code>openssl</code>) to binaries (<code>nmap</code>) to Python libraries (<code>wappalyzer</code>). When a module is enabled, installation of its dependencies happens at runtime with Ansible. BBOT provides several command-line flags to control how dependencies are installed.</p> <ul> <li><code>--no-deps</code> - Don't install module dependencies</li> <li><code>--force-deps</code> - Force install all module dependencies</li> <li><code>--retry-deps</code> - Try again to install failed module dependencies</li> <li><code>--ignore-failed-deps</code> - Run modules even if they have failed dependencies</li> <li><code>--install-all-deps</code> - Install dependencies for all modules (useful if you are provisioning a pentest system and want to install everything ahead of time)</li> </ul> <p>For details on how Ansible playbooks are attached to BBOT modules, see How to Write a Module.</p>"},{"location":"scanning/#scope","title":"Scope","text":"<p>For pentesters and bug bounty hunters, staying in scope is extremely important. BBOT takes this seriously, meaning that active modules (e.g. <code>nuclei</code>) will only touch in-scope resources.</p> <p>By default, scope is whatever you specify with <code>-t</code>. This includes child subdomains. For example, if you specify <code>-t evilcorp.com</code>, all its subdomains (<code>www.evilcorp.com</code>, <code>mail.evilcorp.com</code>, etc.) also become in-scope.</p>"},{"location":"scanning/#scope-distance","title":"Scope Distance","text":"<p>Since BBOT is recursive, it would quickly resort to scanning the entire internet without some kind of restraining mechanism. To solve this problem, every event discovered by BBOT is assigned a Scope Distance. Scope distance represents how far out from the main scope that data was discovered.</p> <p>For example, if your target is <code>evilcorp.com</code>, <code>www.evilcorp.com</code> would have a scope distance of <code>0</code> (i.e. in-scope). If BBOT discovers that <code>www.evilcorp.com</code> resolves to <code>1.2.3.4</code>, <code>1.2.3.4</code> is one hop away, which means it would have a scope distance of <code>1</code>. If <code>1.2.3.4</code> has a PTR record that points to <code>ecorp.blob.core.windows.net</code>, <code>ecorp.blob.core.windows.net</code> is two hops away, so its scope distance is <code>2</code>.</p> <p>Scope distance continues to increase the further out you get. Most modules (e.g. <code>nuclei</code> and <code>nmap</code>) only consume in-scope events. Certain other passive modules such as <code>asn</code> accept out to distance <code>1</code>. By default, DNS resolution happens out to a distance of <code>2</code>. Upon its discovery, any event that's determined to be in-scope (e.g. <code>www.evilcorp.com</code>) immediately becomes distance <code>0</code>, and the cycle starts over.</p>"},{"location":"scanning/#displaying-out-of-scope-events","title":"Displaying Out-of-scope Events","text":"<p>By default, BBOT only displays in-scope events (with a few exceptions such as <code>STORAGE_BUCKET</code>s). If you want to see more, you must increase the config value of <code>scope.report_distance</code>:</p> <pre><code># display out-of-scope events up to one hop away from the main scope\nbbot -t evilcorp.com -f subdomain-enum -c scope.report_distance=1\n</code></pre>"},{"location":"scanning/#strict-scope","title":"Strict Scope","text":"<p>If you want to scan only that specific target hostname and none of its children, you can specify <code>--strict-scope</code>.</p> <p>Note that <code>--strict-scope</code> only applies to targets and whitelists, but not blacklists. This means that if you put <code>internal.evilcorp.com</code> in your blacklist, you can be sure none of its subdomains will be scanned, even when using <code>--strict-scope</code>.</p>"},{"location":"scanning/#whitelists-and-blacklists","title":"Whitelists and Blacklists","text":"<p>BBOT allows precise control over scope with whitelists and blacklists. These both use the same syntax as <code>--target</code>, meaning they accept the same event types, and you can specify an unlimited number of them, via a file, the CLI, or both.</p>"},{"location":"scanning/#whitelists","title":"Whitelists","text":"<p><code>--whitelist</code> enables you to override what's in scope. For example, if you want to run nuclei against <code>evilcorp.com</code>, but stay only inside their corporate IP range of <code>1.2.3.0/24</code>, you can accomplish this like so:</p> <pre><code># Seed scan with evilcorp.com, but restrict scope to 1.2.3.0/24\nbbot -t evilcorp.com --whitelist 1.2.3.0/24 -f subdomain-enum -m nmap nuclei --allow-deadly\n</code></pre>"},{"location":"scanning/#blacklists","title":"Blacklists","text":"<p><code>--blacklist</code> takes ultimate precedence. Anything in the blacklist is completely excluded from the scan, even if it's in the whitelist.</p> <pre><code># Scan evilcorp.com, but exclude internal.evilcorp.com and its children\nbbot -t evilcorp.com --blacklist internal.evilcorp.com -f subdomain-enum -m nmap nuclei --allow-deadly\n</code></pre>"},{"location":"scanning/#blacklist-by-regex","title":"Blacklist by Regex","text":"<p>Blacklists also accept regex patterns. These regexes are are checked against the full URL, including the host and path.</p> <p>To specify a regex, prefix the pattern with <code>RE:</code>. For example, to exclude all events containing \"signout\", you could do:</p> <pre><code>bbot -t evilcorp.com --blacklist \"RE:signout\"\n</code></pre> <p>Note that this would blacklist both of the following events:</p> <ul> <li><code>[URL]       http://evilcorp.com/signout.aspx</code></li> <li><code>[DNS_NAME]  signout.evilcorp.com</code></li> </ul> <p>If you only want to blacklist the URL, you could narrow the regex like so:</p> <pre><code>bbot -t evilcorp.com --blacklist 'RE:signout\\.aspx$'\n</code></pre> <p>Similar to targets and whitelists, blacklists can be specified in your preset. The <code>spider</code> preset makes use of this to prevent the spider from following logout links:</p> spider.yml<pre><code>description: Recursive web spider\n\nmodules:\n  - httpx\n\nblacklist:\n  # Prevent spider from invalidating sessions by logging out\n  - \"RE:/.*(sign|log)[_-]?out\"\n\nconfig:\n  web:\n    # how many links to follow in a row\n    spider_distance: 2\n    # don't follow links whose directory depth is higher than 4\n    spider_depth: 4\n    # maximum number of links to follow per page\n    spider_links_per_page: 25\n</code></pre>"},{"location":"scanning/#dns-wildcards","title":"DNS Wildcards","text":"<p>BBOT has robust wildcard detection built-in. It can reliably detect wildcard domains, and will tag them accordingly:</p> <pre><code>[DNS_NAME]      github.io   TARGET  (a-record, a-wildcard-domain, aaaa-wildcard-domain, wildcard-domain)\n                                               ^^^^^^^^^^^^^^^^^  ^^^^^^^^^^^^^^^^^^^^  ^^^^^^^^^^^^^^^\n</code></pre> <p>Wildcard hosts are collapsed into a single host beginning with <code>_wildcard</code>:</p> <pre><code>[DNS_NAME]      _wildcard.github.io     TARGET  (a-record, a-wildcard, a-wildcard-domain, aaaa-record, aaaa-wildcard, aaaa-wildcard-domain, wildcard, wildcard-domain)\n                ^^^^^^^^^\n</code></pre> <p>If you don't want this, you can disable wildcard detection on a domain-to-domain basis in the config:</p> ~/.bbot/config/bbot.yml<pre><code>dns:\n  wildcard_ignore:\n    - evilcorp.com\n    - evilcorp.co.uk\n</code></pre> <p>There are certain edge cases (such as with dynamic DNS rules) where BBOT's wildcard detection fails. In these cases, you can try increasing the number of wildcard checks in the config:</p> ~/.bbot/config/bbot.yml<pre><code># default == 10\ndns:\n  wildcard_tests: 20\n</code></pre> <p>If that doesn't work you can consider blacklisting the offending domain.</p>"},{"location":"scanning/advanced/","title":"Advanced","text":"<p>Below you can find some advanced uses of BBOT.</p>"},{"location":"scanning/advanced/#bbot-as-a-python-library","title":"BBOT as a Python library","text":""},{"location":"scanning/advanced/#synchronous","title":"Synchronous","text":"<pre><code>from bbot.scanner import Scanner\n\nif __name__ == \"__main__\":\n    scan = Scanner(\"evilcorp.com\", presets=[\"subdomain-enum\"])\n    for event in scan.start():\n        print(event)\n</code></pre>"},{"location":"scanning/advanced/#asynchronous","title":"Asynchronous","text":"<pre><code>from bbot.scanner import Scanner\n\nasync def main():\n    scan = Scanner(\"evilcorp.com\", presets=[\"subdomain-enum\"])\n    async for event in scan.async_start():\n        print(event.json())\n\nif __name__ == \"__main__\":\n    import asyncio\n    asyncio.run(main())\n</code></pre>"},{"location":"scanning/advanced/#command-line-help","title":"Command-Line Help","text":"<pre><code>usage: bbot [-h] [-t TARGET [TARGET ...]] [-w WHITELIST [WHITELIST ...]]\n               [-b BLACKLIST [BLACKLIST ...]] [--strict-scope]\n               [-p [PRESET ...]] [-c [CONFIG ...]] [-lp]\n               [-m MODULE [MODULE ...]] [-l] [-lmo] [-em MODULE [MODULE ...]]\n               [-f FLAG [FLAG ...]] [-lf] [-rf FLAG [FLAG ...]]\n               [-ef FLAG [FLAG ...]] [--allow-deadly] [-n SCAN_NAME] [-v] [-d]\n               [-s] [--force] [-y] [--fast-mode] [--dry-run]\n               [--current-preset] [--current-preset-full] [-o DIR]\n               [-om MODULE [MODULE ...]] [--json] [--brief]\n               [--event-types EVENT_TYPES [EVENT_TYPES ...]]\n               [--no-deps | --force-deps | --retry-deps | --ignore-failed-deps | --install-all-deps]\n               [--version] [--proxy HTTP_PROXY]\n               [-H CUSTOM_HEADERS [CUSTOM_HEADERS ...]]\n               [--custom-yara-rules CUSTOM_YARA_RULES]\n\nBighuge BLS OSINT Tool\n\noptions:\n  -h, --help            show this help message and exit\n\nTarget:\n  -t TARGET [TARGET ...], --targets TARGET [TARGET ...]\n                        Targets to seed the scan\n  -w WHITELIST [WHITELIST ...], --whitelist WHITELIST [WHITELIST ...]\n                        What's considered in-scope (by default it's the same as --targets)\n  -b BLACKLIST [BLACKLIST ...], --blacklist BLACKLIST [BLACKLIST ...]\n                        Don't touch these things\n  --strict-scope        Don't consider subdomains of target/whitelist to be in-scope\n\nPresets:\n  -p [PRESET ...], --preset [PRESET ...]\n                        Enable BBOT preset(s)\n  -c [CONFIG ...], --config [CONFIG ...]\n                        Custom config options in key=value format: e.g. 'modules.shodan.api_key=1234'\n  -lp, --list-presets   List available presets.\n\nModules:\n  -m MODULE [MODULE ...], --modules MODULE [MODULE ...]\n                        Modules to enable. Choices: affiliates,ajaxpro,anubisdb,apkpure,asn,azure_realm,azure_tenant,baddns,baddns_direct,baddns_zone,badsecrets,bevigil,binaryedge,bucket_amazon,bucket_azure,bucket_digitalocean,bucket_file_enum,bucket_firebase,bucket_google,bufferoverrun,builtwith,bypass403,c99,censys,certspotter,chaos,code_repository,columbus,credshed,crt,dastardly,dehashed,digitorus,dnsbimi,dnsbrute,dnsbrute_mutations,dnscaa,dnscommonsrv,dnsdumpster,docker_pull,dockerhub,dotnetnuke,emailformat,extractous,ffuf,ffuf_shortnames,filedownload,fingerprintx,fullhunt,generic_ssrf,git,git_clone,github_codesearch,github_org,github_workflows,gitlab,google_playstore,gowitness,hackertarget,host_header,httpx,hunt,hunterio,iis_shortnames,internetdb,ip2location,ipneighbor,ipstack,jadx,leakix,myssl,newsletters,ntlm,nuclei,oauth,otx,paramminer_cookies,paramminer_getparams,paramminer_headers,passivetotal,pgp,portscan,postman,postman_download,rapiddns,robots,secretsdb,securitytrails,securitytxt,shodan_dns,sitedossier,skymem,smuggler,social,sslcert,subdomaincenter,subdomainradar,telerik,trickest,trufflehog,url_manipulation,urlscan,vhost,viewdns,virustotal,wafw00f,wappalyzer,wayback,wpscan,zoomeye\n  -l, --list-modules    List available modules.\n  -lmo, --list-module-options\n                        Show all module config options\n  -em MODULE [MODULE ...], --exclude-modules MODULE [MODULE ...]\n                        Exclude these modules.\n  -f FLAG [FLAG ...], --flags FLAG [FLAG ...]\n                        Enable modules by flag. Choices: active,affiliates,aggressive,baddns,cloud-enum,code-enum,deadly,email-enum,iis-shortnames,passive,portscan,report,safe,service-enum,slow,social-enum,subdomain-enum,subdomain-hijack,web-basic,web-paramminer,web-screenshots,web-thorough\n  -lf, --list-flags     List available flags.\n  -rf FLAG [FLAG ...], --require-flags FLAG [FLAG ...]\n                        Only enable modules with these flags (e.g. -rf passive)\n  -ef FLAG [FLAG ...], --exclude-flags FLAG [FLAG ...]\n                        Disable modules with these flags. (e.g. -ef aggressive)\n  --allow-deadly        Enable the use of highly aggressive modules\n\nScan:\n  -n SCAN_NAME, --name SCAN_NAME\n                        Name of scan (default: random)\n  -v, --verbose         Be more verbose\n  -d, --debug           Enable debugging\n  -s, --silent          Be quiet\n  --force               Run scan even in the case of condition violations or failed module setups\n  -y, --yes             Skip scan confirmation prompt\n  --fast-mode           Scan only the provided targets as fast as possible, with no extra discovery\n  --dry-run             Abort before executing scan\n  --current-preset      Show the current preset in YAML format\n  --current-preset-full\n                        Show the current preset in its full form, including defaults\n\nOutput:\n  -o DIR, --output-dir DIR\n                        Directory to output scan results\n  -om MODULE [MODULE ...], --output-modules MODULE [MODULE ...]\n                        Output module(s). Choices: asset_inventory,csv,discord,emails,http,json,neo4j,postgres,python,slack,splunk,sqlite,stdout,subdomains,teams,txt,web_report,websocket\n  --json, -j            Output scan data in JSON format\n  --brief, -br          Output only the data itself\n  --event-types EVENT_TYPES [EVENT_TYPES ...]\n                        Choose which event types to display\n\nModule dependencies:\n  Control how modules install their dependencies\n\n  --no-deps             Don't install module dependencies\n  --force-deps          Force install all module dependencies\n  --retry-deps          Try again to install failed module dependencies\n  --ignore-failed-deps  Run modules even if they have failed dependencies\n  --install-all-deps    Install dependencies for all modules\n\nMisc:\n  --version             show BBOT version and exit\n  --proxy HTTP_PROXY    Use this proxy for all HTTP requests\n  -H CUSTOM_HEADERS [CUSTOM_HEADERS ...], --custom-headers CUSTOM_HEADERS [CUSTOM_HEADERS ...]\n                        List of custom headers as key value pairs (header=value).\n  --custom-yara-rules CUSTOM_YARA_RULES, -cy CUSTOM_YARA_RULES\n                        Add custom yara rules to excavate\n\nEXAMPLES\n\n    Subdomains:\n        bbot -t evilcorp.com -p subdomain-enum\n\n    Subdomains (passive only):\n        bbot -t evilcorp.com -p subdomain-enum -rf passive\n\n    Subdomains + port scan + web screenshots:\n        bbot -t evilcorp.com -p subdomain-enum -m portscan gowitness -n my_scan -o .\n\n    Subdomains + basic web scan:\n        bbot -t evilcorp.com -p subdomain-enum web-basic\n\n    Web spider:\n        bbot -t www.evilcorp.com -p spider -c web.spider_distance=2 web.spider_depth=2\n\n    Everything everywhere all at once:\n        bbot -t evilcorp.com -p kitchen-sink\n\n    List modules:\n        bbot -l\n\n    List presets:\n        bbot -lp\n\n    List flags:\n        bbot -lf\n</code></pre>"},{"location":"scanning/configuration/","title":"Configuration Overview","text":"<p>Normally, Presets are used to configure a scan. However, there may be cases where you want to change BBOT's global defaults so a certain option is always set, even if it's not specified in a preset.</p> <p>BBOT has a YAML config at <code>~/.config/bbot.yml</code>. This is the first config that BBOT loads, so it's a good place to put default settings like <code>http_proxy</code>, <code>max_threads</code>, or <code>http_user_agent</code>. You can also put any module settings here, including API keys.</p> <p>For a list of all possible config options, see:</p> <ul> <li>Global Options</li> <li>Module Options</li> </ul> <p>For examples of common config changes, see Tips and Tricks.</p>"},{"location":"scanning/configuration/#configuration-files","title":"Configuration Files","text":"<p>BBOT loads its config from the following files, in this order (last one loaded == highest priority):</p> <ul> <li><code>~/.config/bbot/bbot.yml</code>  &lt;-- Global BBOT config</li> <li>presets (<code>-p</code>)             &lt;-- Presets are good for scan-specific settings</li> <li>command line (<code>-c</code>)        &lt;-- CLI overrides everything</li> </ul> <p><code>bbot.yml</code> will be automatically created for you when you first run BBOT.</p>"},{"location":"scanning/configuration/#yaml-config-vs-command-line","title":"YAML Config vs Command Line","text":"<p>You can specify config options either via the command line or the config. For example, if you want to proxy your BBOT scan through a local proxy like Burp Suite, you could either do:</p> <pre><code># send BBOT traffic through an HTTP proxy\nbbot -t evilcorp.com -c http_proxy=http://127.0.0.1:8080\n</code></pre> <p>Or, in <code>~/.config/bbot/config.yml</code>:</p> ~/.bbot/config/bbot.yml<pre><code>http_proxy: http://127.0.0.1:8080\n</code></pre> <p>These two are equivalent.</p> <p>Config options specified via the command-line take precedence over all others. You can give BBOT a custom config file with <code>-c myconf.yml</code>, or individual arguments like this: <code>-c modules.shodan_dns.api_key=deadbeef</code>. To display the full and current BBOT config, including any command-line arguments, use <code>bbot -c</code>.</p> <p>Note that placing the following in <code>bbot.yml</code>: ~/.bbot/config/bbot.yml<pre><code>modules:\n  shodan_dns:\n    api_key: deadbeef\n</code></pre> Is the same as: <pre><code>bbot -c modules.shodan_dns.api_key=deadbeef\n</code></pre></p>"},{"location":"scanning/configuration/#global-config-options","title":"Global Config Options","text":"<p>Below is a full list of the config options supported, along with their defaults.</p> defaults.yml<pre><code>### BASIC OPTIONS ###\n\n# BBOT working directory\nhome: ~/.bbot\n# How many scan results to keep before cleaning up the older ones\nkeep_scans: 20\n# Interval for displaying status messages\nstatus_frequency: 15\n# Include the raw data of files (i.e. PDFs, web screenshots) as base64 in the event\nfile_blobs: false\n# Include the raw data of directories (i.e. git repos) as tar.gz base64 in the event\nfolder_blobs: false\n\n### SCOPE ###\n\nscope:\n  # strict scope means only exact DNS names are considered in-scope\n  # subdomains are not included unless they are explicitly provided in the target list\n  strict: false\n  # Filter by scope distance which events are displayed in the output\n  # 0 == show only in-scope events (affiliates are always shown)\n  # 1 == show all events up to distance-1 (1 hop from target)\n  report_distance: 0\n  # How far out from the main scope to search\n  # Do not change this setting unless you know what you're doing\n  search_distance: 0\n\n### DNS ###\n\ndns:\n  # Completely disable DNS resolution (careful if you have IP whitelists/blacklists, consider using minimal=true instead)\n  disable: false\n  # Speed up scan by not creating any new DNS events, and only resolving A and AAAA records\n  minimal: false\n  # How many instances of the dns module to run concurrently\n  threads: 25\n  # How many concurrent DNS resolvers to use when brute-forcing\n  # (under the hood this is passed through directly to massdns -s)\n  brute_threads: 1000\n  # nameservers to use for DNS brute-forcing\n  # default is updated weekly and contains ~10K high-quality public servers\n  brute_nameservers: https://raw.githubusercontent.com/blacklanternsecurity/public-dns-servers/master/nameservers.txt\n  # How far away from the main target to explore via DNS resolution (independent of scope.search_distance)\n  # This is safe to change\n  search_distance: 1\n  # Limit how many DNS records can be followed in a row (stop malicious/runaway DNS records)\n  runaway_limit: 5\n  # DNS query timeout\n  timeout: 5\n  # How many times to retry DNS queries\n  retries: 1\n  # Completely disable BBOT's DNS wildcard detection\n  wildcard_disable: False\n  # Disable BBOT's DNS wildcard detection for select domains\n  wildcard_ignore: []\n  # How many sanity checks to make when verifying wildcard DNS\n  # Increase this value if BBOT's wildcard detection isn't working\n  wildcard_tests: 10\n  # Skip DNS requests for a certain domain and rdtype after encountering this many timeouts or SERVFAILs\n  # This helps prevent faulty DNS servers from hanging up the scan\n  abort_threshold: 50\n  # Don't show PTR records containing IP addresses\n  filter_ptrs: true\n  # Enable/disable debug messages for DNS queries\n  debug: false\n  # For performance reasons, always skip these DNS queries\n  # Microsoft's DNS infrastructure is misconfigured so that certain queries to mail.protection.outlook.com always time out\n  omit_queries:\n    - SRV:mail.protection.outlook.com\n    - CNAME:mail.protection.outlook.com\n    - TXT:mail.protection.outlook.com\n\n### WEB ###\n\nweb:\n  # HTTP proxy\n  http_proxy: \n  # Web user-agent\n  user_agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.2151.97\n  # Set the maximum number of HTTP links that can be followed in a row (0 == no spidering allowed)\n  spider_distance: 0\n  # Set the maximum directory depth for the web spider\n  spider_depth: 1\n  # Set the maximum number of links that can be followed per page\n  spider_links_per_page: 25\n  # HTTP timeout (for Python requests; API calls, etc.)\n  http_timeout: 10\n  # HTTP timeout (for httpx)\n  httpx_timeout: 5\n  # Custom HTTP headers (e.g. cookies, etc.)\n  # in the format { \"Header-Key\": \"header_value\" }\n  # These are attached to all in-scope HTTP requests\n  # Note that some modules (e.g. github) may end up sending these to out-of-scope resources\n  http_headers: {}\n  # HTTP retries (for Python requests; API calls, etc.)\n  http_retries: 1\n  # HTTP retries (for httpx)\n  httpx_retries: 1\n  # Enable/disable debug messages for web requests/responses\n  debug: false\n  # Maximum number of HTTP redirects to follow\n  http_max_redirects: 5\n  # Whether to verify SSL certificates\n  ssl_verify: false\n\n### ENGINE ###\n\nengine:\n  debug: false\n\n# Tool dependencies\ndeps:\n  ffuf:\n    version: \"2.1.0\"\n\n### ADVANCED OPTIONS ###\n\n# Load BBOT modules from these custom paths\nmodule_dirs: []\n\n# Infer certain events from others, e.g. IPs from IP ranges, DNS_NAMEs from URLs, etc.\nspeculate: True\n# Passively search event data for URLs, hostnames, emails, etc.\nexcavate: True\n# Summarize activity at the end of a scan\naggregate: True\n# DNS resolution, wildcard detection, etc.\ndnsresolve: True\n# Cloud provider tagging\ncloudcheck: True\n\n# How to handle installation of module dependencies\n# Choices are:\n#  - abort_on_failure (default) - if a module dependency fails to install, abort the scan\n#  - retry_failed - try again to install failed dependencies\n#  - ignore_failed - run the scan regardless of what happens with dependency installation\n#  - disable - completely disable BBOT's dependency system (you are responsible for installing tools, pip packages, etc.)\ndeps_behavior: abort_on_failure\n\n# Strip querystring from URLs by default\nurl_querystring_remove: True\n# When query string is retained, by default collapse parameter values down to a single value per parameter\nurl_querystring_collapse: True\n\n# Completely ignore URLs with these extensions\nurl_extension_blacklist:\n  # images\n  - png\n  - jpg\n  - bmp\n  - ico\n  - jpeg\n  - gif\n  - svg\n  - webp\n  # web/fonts\n  - css\n  - woff\n  - woff2\n  - ttf\n  - eot\n  - sass\n  - scss\n  # audio\n  - mp3\n  - m4a\n  - wav\n  - flac\n  # video\n  - mp4\n  - mkv\n  - avi\n  - wmv\n  - mov\n  - flv\n  - webm\n# Distribute URLs with these extensions only to httpx (these are omitted from output)\nurl_extension_httpx_only:\n  - js\n# Don't output these types of events (they are still distributed to modules)\nomit_event_types:\n  - HTTP_RESPONSE\n  - RAW_TEXT\n  - URL_UNVERIFIED\n  - DNS_NAME_UNRESOLVED\n  - FILESYSTEM\n  - WEB_PARAMETER\n  - RAW_DNS_RECORD\n  # - IP_ADDRESS\n\n# Custom interactsh server settings\ninteractsh_server: null\ninteractsh_token: null\ninteractsh_disable: false\n</code></pre>"},{"location":"scanning/configuration/#module-config-options","title":"Module Config Options","text":"<p>Many modules accept their own configuration options. These options have the ability to change their behavior. For example, the <code>portscan</code> module accepts options for <code>ports</code>, <code>rate</code>, etc. Below is a list of all possible module config options.</p> Config Option Type Description Default modules.baddns.custom_nameservers list Force BadDNS to use a list of custom nameservers [] modules.baddns.enabled_submodules list A list of submodules to enable. Empty list (default) enables CNAME, TXT and MX Only [] modules.baddns.only_high_confidence bool Do not emit low-confidence or generic detections False modules.baddns_direct.custom_nameservers list Force BadDNS to use a list of custom nameservers [] modules.baddns_zone.custom_nameservers list Force BadDNS to use a list of custom nameservers [] modules.baddns_zone.only_high_confidence bool Do not emit low-confidence or generic detections False modules.badsecrets.custom_secrets NoneType Include custom secrets loaded from a local file None modules.bucket_amazon.permutations bool Whether to try permutations False modules.bucket_azure.permutations bool Whether to try permutations False modules.bucket_digitalocean.permutations bool Whether to try permutations False modules.bucket_firebase.permutations bool Whether to try permutations False modules.bucket_google.permutations bool Whether to try permutations False modules.dnsbrute.max_depth int How many subdomains deep to brute force, i.e. 5.4.3.2.1.evilcorp.com 5 modules.dnsbrute.wordlist str Subdomain wordlist URL https://raw.githubusercontent.com/danielmiessler/SecLists/master/Discovery/DNS/subdomains-top1million-5000.txt modules.dnsbrute_mutations.max_mutations int Maximum number of target-specific mutations to try per subdomain 100 modules.dnscommonsrv.max_depth int The maximum subdomain depth to brute-force SRV records 2 modules.ffuf.extensions str Optionally include a list of extensions to extend the keyword with (comma separated) modules.ffuf.lines int take only the first N lines from the wordlist when finding directories 5000 modules.ffuf.max_depth int the maximum directory depth to attempt to solve 0 modules.ffuf.wordlist str Specify wordlist to use when finding directories https://raw.githubusercontent.com/danielmiessler/SecLists/master/Discovery/Web-Content/raft-small-directories.txt modules.ffuf_shortnames.extensions str Optionally include a list of extensions to extend the keyword with (comma separated) modules.ffuf_shortnames.find_common_prefixes bool Attempt to automatically detect common prefixes and make additional ffuf runs against them False modules.ffuf_shortnames.find_delimiters bool Attempt to detect common delimiters and make additional ffuf runs against them True modules.ffuf_shortnames.ignore_redirects bool Explicitly ignore redirects (301,302) True modules.ffuf_shortnames.lines int take only the first N lines from the wordlist when finding directories 1000000 modules.ffuf_shortnames.max_depth int the maximum directory depth to attempt to solve 1 modules.ffuf_shortnames.version str ffuf version 2.0.0 modules.ffuf_shortnames.wordlist str Specify wordlist to use when finding directories modules.ffuf_shortnames.wordlist_extensions str Specify wordlist to use when making extension lists modules.filedownload.base_64_encoded_file str Stream the bytes of a file and encode them in base 64 for event data. false modules.filedownload.extensions list File extensions to download ['bak', 'bash', 'bashrc', 'cfg', 'conf', 'crt', 'csv', 'db', 'dll', 'doc', 'docx', 'exe', 'ica', 'indd', 'ini', 'jar', 'key', 'log', 'markdown', 'md', 'msi', 'odg', 'odp', 'ods', 'odt', 'pdf', 'pem', 'pps', 'ppsx', 'ppt', 'pptx', 'ps1', 'pub', 'raw', 'rdp', 'sh', 'sql', 'sqlite', 'swp', 'sxw', 'tar.gz', 'tar', 'txt', 'vbs', 'war', 'wpd', 'xls', 'xlsx', 'xml', 'yaml', 'yml', 'zip'] modules.filedownload.max_filesize str Cancel download if filesize is greater than this size 10MB modules.fingerprintx.skip_common_web bool Skip common web ports such as 80, 443, 8080, 8443, etc. True modules.fingerprintx.version str fingerprintx version 1.1.4 modules.gitlab.api_key str Gitlab access token modules.gowitness.idle_timeout int Skip the current gowitness batch if it stalls for longer than this many seconds 1800 modules.gowitness.output_path str Where to save screenshots modules.gowitness.resolution_x int Screenshot resolution x 1440 modules.gowitness.resolution_y int Screenshot resolution y 900 modules.gowitness.social bool Whether to screenshot social media webpages False modules.gowitness.threads int How many gowitness threads to spawn (default is number of CPUs x 2) 0 modules.gowitness.timeout int Preflight check timeout 10 modules.gowitness.version str Gowitness version 2.4.2 modules.httpx.in_scope_only bool Only visit web reparents that are in scope. True modules.httpx.max_response_size int Max response size in bytes 5242880 modules.httpx.probe_all_ips bool Probe all the ips associated with same host False modules.httpx.store_responses bool Save raw HTTP responses to scan folder False modules.httpx.threads int Number of httpx threads to use 50 modules.httpx.version str httpx version 1.2.5 modules.iis_shortnames.detect_only bool Only detect the vulnerability and do not run the shortname scanner True modules.iis_shortnames.max_node_count int Limit how many nodes to attempt to resolve on any given recursion branch 50 modules.ntlm.try_all bool Try every NTLM endpoint False modules.nuclei.batch_size int Number of targets to send to Nuclei per batch (default 200) 200 modules.nuclei.budget int Used in budget mode to set the number of requests which will be allotted to the nuclei scan 1 modules.nuclei.concurrency int maximum number of templates to be executed in parallel (default 25) 25 modules.nuclei.directory_only bool Filter out 'file' URL event (default True) True modules.nuclei.etags str tags to exclude from the scan modules.nuclei.mode str manual | technology | severe | budget. Technology: Only activate based on technology events that match nuclei tags (nuclei -as mode). Manual (DEFAULT): Fully manual settings. Severe: Only critical and high severity templates without intrusive. Budget: Limit Nuclei to a specified number of HTTP requests manual modules.nuclei.ratelimit int maximum number of requests to send per second (default 150) 150 modules.nuclei.retries int number of times to retry a failed request (default 0) 0 modules.nuclei.severity str Filter based on severity field available in the template. modules.nuclei.silent bool Don't display nuclei's banner or status messages False modules.nuclei.tags str execute a subset of templates that contain the provided tags modules.nuclei.templates str template or template directory paths to include in the scan modules.nuclei.version str nuclei version 3.3.5 modules.oauth.try_all bool Check for OAUTH/IODC on every subdomain and URL. False modules.paramminer_cookies.recycle_words bool Attempt to use words found during the scan on all other endpoints False modules.paramminer_cookies.skip_boring_words bool Remove commonly uninteresting words from the wordlist True modules.paramminer_cookies.wordlist str Define the wordlist to be used to derive cookies modules.paramminer_getparams.recycle_words bool Attempt to use words found during the scan on all other endpoints False modules.paramminer_getparams.skip_boring_words bool Remove commonly uninteresting words from the wordlist True modules.paramminer_getparams.wordlist str Define the wordlist to be used to derive headers modules.paramminer_headers.recycle_words bool Attempt to use words found during the scan on all other endpoints False modules.paramminer_headers.skip_boring_words bool Remove commonly uninteresting words from the wordlist True modules.paramminer_headers.wordlist str Define the wordlist to be used to derive headers modules.portscan.adapter str Manually specify a network interface, such as \"eth0\" or \"tun0\". If not specified, the first network interface found with a default gateway will be used. modules.portscan.adapter_ip str Send packets using this IP address. Not needed unless masscan's autodetection fails modules.portscan.adapter_mac str Send packets using this as the source MAC address. Not needed unless masscan's autodetection fails modules.portscan.ping_first bool Only portscan hosts that reply to pings False modules.portscan.ping_only bool Ping sweep only, no portscan False modules.portscan.ports str Ports to scan modules.portscan.rate int Rate in packets per second 300 modules.portscan.router_mac str Send packets to this MAC address as the destination. Not needed unless masscan's autodetection fails modules.portscan.top_ports int Top ports to scan (default 100) (to override, specify 'ports') 100 modules.portscan.wait int Seconds to wait for replies after scan is complete 5 modules.robots.include_allow bool Include 'Allow' Entries True modules.robots.include_disallow bool Include 'Disallow' Entries True modules.robots.include_sitemap bool Include 'sitemap' entries False modules.secretsdb.min_confidence int Only use signatures with this confidence score or higher 99 modules.secretsdb.signatures str File path or URL to YAML signatures https://raw.githubusercontent.com/blacklanternsecurity/secrets-patterns-db/master/db/rules-stable.yml modules.securitytxt.emails bool emit EMAIL_ADDRESS events True modules.securitytxt.urls bool emit URL_UNVERIFIED events True modules.sslcert.skip_non_ssl bool Don't try common non-SSL ports True modules.sslcert.timeout float Socket connect timeout in seconds 5.0 modules.telerik.exploit_RAU_crypto bool Attempt to confirm any RAU AXD detections are vulnerable False modules.url_manipulation.allow_redirects bool Allowing redirects will sometimes create false positives. Disallowing will sometimes create false negatives. Allowed by default. True modules.vhost.force_basehost str Use a custom base host (e.g. evilcorp.com) instead of the default behavior of using the current URL modules.vhost.lines int take only the first N lines from the wordlist when finding directories 5000 modules.vhost.wordlist str Wordlist containing subdomains https://raw.githubusercontent.com/danielmiessler/SecLists/master/Discovery/DNS/subdomains-top1million-5000.txt modules.wafw00f.generic_detect bool When no specific WAF detections are made, try to perform a generic detect True modules.wpscan.api_key str WPScan API Key modules.wpscan.connection_timeout int The connection timeout in seconds (default 2) 2 modules.wpscan.disable_tls_checks bool Disables the SSL/TLS certificate verification (Default True) True modules.wpscan.enumerate str Enumeration Process see wpscan help documentation (default: vp,vt,cb,dbe) vp,vt,cb,dbe modules.wpscan.force bool Do not check if the target is running WordPress or returns a 403 False modules.wpscan.request_timeout int The request timeout in seconds (default 5) 5 modules.wpscan.threads int How many wpscan threads to spawn (default is 5) 5 modules.anubisdb.limit int Limit the number of subdomains returned per query (increasing this may slow the scan due to garbage results from this API) 1000 modules.apkpure.output_folder str Folder to download apk's to modules.bevigil.api_key str BeVigil OSINT API Key modules.bevigil.urls bool Emit URLs in addition to DNS_NAMEs False modules.binaryedge.api_key str BinaryEdge API key modules.binaryedge.max_records int Limit results to help prevent exceeding API quota 1000 modules.bucket_file_enum.file_limit int Limit the number of files downloaded per bucket 50 modules.bufferoverrun.api_key str BufferOverrun API key modules.bufferoverrun.commercial bool Use commercial API False modules.builtwith.api_key str Builtwith API key modules.builtwith.redirects bool Also look up inbound and outbound redirects True modules.c99.api_key str c99.nl API key modules.censys.api_key str Censys.io API Key in the format of 'key:secret' modules.censys.max_pages int Maximum number of pages to fetch (100 results per page) 5 modules.chaos.api_key str Chaos API key modules.credshed.credshed_url str URL of credshed server modules.credshed.password str Credshed password modules.credshed.username str Credshed username modules.dehashed.api_key str DeHashed API Key modules.dehashed.username str Email Address associated with your API key modules.dnsbimi.emit_raw_dns_records bool Emit RAW_DNS_RECORD events False modules.dnsbimi.emit_urls bool Emit URL_UNVERIFIED events True modules.dnsbimi.selectors str CSV list of BIMI selectors to check default,email,mail,bimi modules.dnscaa.dns_names bool emit DNS_NAME events True modules.dnscaa.emails bool emit EMAIL_ADDRESS events True modules.dnscaa.in_scope_only bool Only check in-scope domains True modules.dnscaa.urls bool emit URL_UNVERIFIED events True modules.docker_pull.all_tags bool Download all tags from each registry (Default False) False modules.docker_pull.output_folder str Folder to download docker repositories to modules.extractous.extensions list File extensions to parse ['bak', 'bash', 'bashrc', 'conf', 'cfg', 'crt', 'csv', 'db', 'sqlite', 'doc', 'docx', 'ica', 'indd', 'ini', 'key', 'pub', 'log', 'markdown', 'md', 'odg', 'odp', 'ods', 'odt', 'pdf', 'pem', 'pps', 'ppsx', 'ppt', 'pptx', 'ps1', 'rdp', 'sh', 'sql', 'swp', 'sxw', 'txt', 'vbs', 'wpd', 'xls', 'xlsx', 'xml', 'yml', 'yaml'] modules.fullhunt.api_key str FullHunt API Key modules.git_clone.api_key str Github token modules.git_clone.output_folder str Folder to clone repositories to modules.github_codesearch.api_key str Github token modules.github_codesearch.limit int Limit code search to this many results 100 modules.github_org.api_key str Github token modules.github_org.include_member_repos bool Also enumerate organization members' repositories False modules.github_org.include_members bool Enumerate organization members True modules.github_workflows.api_key str Github token modules.github_workflows.num_logs int For each workflow fetch the last N successful runs logs (max 100) 1 modules.hunterio.api_key str Hunter.IO API key modules.internetdb.show_open_ports bool Display OPEN_TCP_PORT events in output, even if they didn't lead to an interesting discovery False modules.ip2location.api_key str IP2location.io API Key modules.ip2location.lang str Translation information(ISO639-1). The translation is only applicable for continent, country, region and city name. modules.ipneighbor.num_bits int Netmask size (in CIDR notation) to check. Default is 4 bits (16 hosts) 4 modules.ipstack.api_key str IPStack GeoIP API Key modules.jadx.threads int Maximum jadx threads for extracting apk's, default: 4 4 modules.leakix.api_key str LeakIX API Key modules.passivetotal.api_key str PassiveTotal API Key in the format of 'username:api_key' modules.pgp.search_urls list PGP key servers to search <code>['https://keyserver.ubuntu.com/pks/lookup?fingerprint=on&amp;op=vindex&amp;search=&lt;query&gt;', 'http://the.earth.li:11371/pks/lookup?fingerprint=on&amp;op=vindex&amp;search=&lt;query&gt;', 'https://pgpkeys.eu/pks/lookup?search=&lt;query&gt;&amp;op=index', 'https://pgp.mit.edu/pks/lookup?search=&lt;query&gt;&amp;op=index']</code> modules.postman_download.api_key str Postman API Key modules.postman_download.output_folder str Folder to download postman workspaces to modules.securitytrails.api_key str SecurityTrails API key modules.shodan_dns.api_key str Shodan API key modules.subdomainradar.api_key str SubDomainRadar.io API key modules.subdomainradar.group str The enumeration group to use. Choose from fast, medium, deep fast modules.subdomainradar.timeout int Timeout in seconds 120 modules.trickest.api_key str Trickest API key modules.trufflehog.concurrency int Number of concurrent workers 8 modules.trufflehog.config str File path or URL to YAML trufflehog config modules.trufflehog.deleted_forks bool Scan for deleted github forks. WARNING: This is SLOW. For a smaller repository, this process can take 20 minutes. For a larger repository, it could take hours. False modules.trufflehog.only_verified bool Only report credentials that have been verified True modules.trufflehog.version str trufflehog version 3.83.7 modules.urlscan.urls bool Emit URLs in addition to DNS_NAMEs False modules.virustotal.api_key str VirusTotal API Key modules.wayback.garbage_threshold int Dedupe similar urls if they are in a group of this size or higher (lower values == less garbage data) 10 modules.wayback.urls bool emit URLs in addition to DNS_NAMEs False modules.zoomeye.api_key str ZoomEye API key modules.zoomeye.include_related bool Include domains which may be related to the target False modules.zoomeye.max_pages int How many pages of results to fetch 20 modules.asset_inventory.output_file str Set a custom output file modules.asset_inventory.recheck bool When use_previous=True, don't retain past details like open ports or findings. Instead, allow them to be rediscovered by the new scan False modules.asset_inventory.summary_netmask int Subnet mask to use when summarizing IP addresses at end of scan 16 modules.asset_inventory.use_previous bool <code>Emit previous asset inventory as new events (use in conjunction with -n &lt;old_scan_name&gt;)</code> False modules.csv.output_file str Output to CSV file modules.discord.event_types list Types of events to send ['VULNERABILITY', 'FINDING'] modules.discord.min_severity str Only allow VULNERABILITY events of this severity or higher LOW modules.discord.webhook_url str Discord webhook URL modules.emails.output_file str Output to file modules.http.bearer str Authorization Bearer token modules.http.method str HTTP method POST modules.http.password str Password (basic auth) modules.http.siem_friendly bool Format JSON in a SIEM-friendly way for ingestion into Elastic, Splunk, etc. False modules.http.timeout int HTTP timeout 10 modules.http.url str Web URL modules.http.username str Username (basic auth) modules.json.output_file str Output to file modules.json.siem_friendly bool Output JSON in a SIEM-friendly format for ingestion into Elastic, Splunk, etc. False modules.neo4j.password str Neo4j password bbotislife modules.neo4j.uri str Neo4j server + port bolt://localhost:7687 modules.neo4j.username str Neo4j username neo4j modules.postgres.database str The database name to connect to bbot modules.postgres.host str The server running Postgres localhost modules.postgres.password str The password to connect to Postgres bbotislife modules.postgres.port int The port to connect to Postgres 5432 modules.postgres.username str The username to connect to Postgres postgres modules.slack.event_types list Types of events to send ['VULNERABILITY', 'FINDING'] modules.slack.min_severity str Only allow VULNERABILITY events of this severity or higher LOW modules.slack.webhook_url str Discord webhook URL modules.splunk.hectoken str HEC Token modules.splunk.index str Index to send data to modules.splunk.source str Source path to be added to the metadata modules.splunk.timeout int HTTP timeout 10 modules.splunk.url str Web URL modules.sqlite.database str The path to the sqlite database file modules.stdout.accept_dupes bool Whether to show duplicate events, default True True modules.stdout.event_fields list Which event fields to display [] modules.stdout.event_types list Which events to display, default all event types [] modules.stdout.format str Which text format to display, choices: text,json text modules.stdout.in_scope_only bool Whether to only show in-scope events False modules.subdomains.include_unresolved bool Include unresolved subdomains in output False modules.subdomains.output_file str Output to file modules.teams.event_types list Types of events to send ['VULNERABILITY', 'FINDING'] modules.teams.min_severity str Only allow VULNERABILITY events of this severity or higher LOW modules.teams.webhook_url str Teams webhook URL modules.txt.output_file str Output to file modules.web_report.css_theme_file str CSS theme URL for HTML output https://cdnjs.cloudflare.com/ajax/libs/github-markdown-css/5.1.0/github-markdown.min.css modules.web_report.output_file str Output to file modules.websocket.preserve_graph bool Preserve full chains of events in the graph (prevents orphans) True modules.websocket.token str Authorization Bearer token modules.websocket.url str Web URL modules.excavate.custom_yara_rules str Include custom Yara rules modules.excavate.retain_querystring bool Keep the querystring intact on emitted WEB_PARAMETERS False modules.excavate.yara_max_match_data int Sets the maximum amount of text that can extracted from a YARA regex 2000 modules.speculate.essential_only bool Only enable essential speculate features (no extra discovery) False modules.speculate.max_hosts int Max number of IP_RANGE hosts to convert into IP_ADDRESS events 65536 modules.speculate.ports str The set of ports to speculate on 80,443"},{"location":"scanning/events/","title":"Events","text":"<p>An Event is a piece of data discovered by BBOT. Examples include <code>IP_ADDRESS</code>, <code>DNS_NAME</code>, <code>EMAIL_ADDRESS</code>, <code>URL</code>, etc. When you run a BBOT scan, events are constantly being exchanged between modules. They are also output to the console:</p> <pre><code>[DNS_NAME]      www.evilcorp.com    sslcert         (distance-0, in-scope, resolved, subdomain, a-record)\n ^^^^^^^^       ^^^^^^^^^^^^^^^^    ^^^^^^^          ^^^^^^^^^^\nevent type      event data          source module    tags\n</code></pre>"},{"location":"scanning/events/#event-attributes","title":"Event Attributes","text":"<p>Each BBOT event has the following attributes. Not all of these attributes are visible in the terminal output. However, they are always saved in <code>output.json</code> in the scan output folder. If you want to see them on the terminal, you can use <code>--json</code>.</p> <ul> <li><code>.type</code>: the event type (e.g. <code>DNS_NAME</code>, <code>IP_ADDRESS</code>, <code>OPEN_TCP_PORT</code>, etc.)</li> <li><code>.id</code>: an identifier representing the event type + a SHA1 hash of its data (note: multiple events can have the same <code>.id</code>)</li> <li><code>.uuid</code>: a universally unique identifier for the event (e.g. <code>DNS_NAME:6c96d512-090a-47f0-82e4-6860e46aac13</code>)</li> <li><code>.scope_description</code>: describes the scope of the event (e.g. <code>in-scope</code>, <code>affiliate</code>, <code>distance-2</code>)</li> <li><code>.data</code>: the actual discovered data (for some events like <code>DNS_NAME</code> or <code>IP_ADDRESS</code>, this is a string. For other more complex events like <code>HTTP_RESPONSE</code>, it's a dictionary)</li> <li><code>.host</code>: the hostname or IP address (e.g. <code>evilcorp.com</code> or <code>1.2.3.4</code>)</li> <li><code>.port</code>: the port number (e.g. <code>80</code>, <code>443</code>)</li> <li><code>.netloc</code>: the network location, including both the hostname and port (e.g. <code>www.evilcorp.com:443</code>)</li> <li><code>.resolved_hosts</code>: a list of all resolved hosts for the event (<code>A</code>, <code>AAAA</code>, and <code>CNAME</code> records)</li> <li><code>.dns_children</code>: a dictionary of all DNS records for the event (typically only present on <code>DNS_NAME</code>)</li> <li><code>.web_spider_distance</code>: a count of how many URL links have been followed in a row to get to this event</li> <li><code>.scope_distance</code>: a count of how many hops it is from the main scope (0 == in-scope)</li> <li><code>.scan</code>: the ID of the scan that produced the event</li> <li><code>.timestamp</code>: the date/time when the event was discovered</li> <li><code>.parent</code>: the ID of the parent event that led to the discovery of this event</li> <li><code>.parent_uuid</code>: the universally unique identifier for the parent event</li> <li><code>.tags</code>: a list of tags describing the event (e.g. <code>mx-record</code>, <code>http-title</code>, etc.)</li> <li><code>.module</code>: the module that discovered the event</li> <li><code>.module_sequence</code>: the recent sequence of modules that were executed to discover the event (including omitted events)</li> <li><code>.discovery_context</code>: a description of the context in which the event was discovered</li> <li><code>.discovery_path</code>: a list of every discovery context leading to this event</li> <li><code>.parent_chain</code>: a list of every event UUID leading to the discovery of this event (corresponds exactly to <code>.discovery_path</code>)</li> </ul> <p>These attributes allow us to construct a visual graph of events (e.g. in Neo4j) and query/filter/grep them more easily. Here is what a typical event looks like in JSON format:</p> <pre><code>{\n  \"type\": \"DNS_NAME\",\n  \"id\": \"DNS_NAME:33bc005c2bdfea4d73e07db733bd11861cf6520e\",\n  \"uuid\": \"DNS_NAME:6c96d512-090a-47f0-82e4-6860e46aac13\",\n  \"scope_description\": \"in-scope\",\n  \"data\": \"link.evilcorp.com\",\n  \"host\": \"link.evilcorp.com\",\n  \"resolved_hosts\": [\n    \"184.31.52.65\",\n    \"2600:1402:b800:d82::700\",\n    \"2600:1402:b800:d87::700\",\n    \"link.evilcorp.com.edgekey.net\"\n  ],\n  \"dns_children\": {\n    \"A\": [\n      \"184.31.52.65\"\n    ],\n    \"AAAA\": [\n      \"2600:1402:b800:d82::700\",\n      \"2600:1402:b800:d87::700\"\n    ],\n    \"CNAME\": [\n      \"link.evilcorp.com.edgekey.net\"\n    ]\n  },\n  \"web_spider_distance\": 0,\n  \"scope_distance\": 0,\n  \"scan\": \"SCAN:b6ef48bc036bc8d001595ae5061846a7e6beadb6\",\n  \"timestamp\": \"2024-10-18T15:40:13.716880+00:00\",\n  \"parent\": \"DNS_NAME:94c92b7eaed431b37ae2a757fec4e678cc3bd213\",\n  \"parent_uuid\": \"DNS_NAME:c737dffa-d4f0-4b6e-a72d-cc8c05bd892e\",\n  \"tags\": [\n    \"subdomain\",\n    \"a-record\",\n    \"cdn-akamai\",\n    \"in-scope\",\n    \"cname-record\",\n    \"aaaa-record\"\n  ],\n  \"module\": \"speculate\",\n  \"module_sequence\": \"speculate-&gt;speculate\",\n  \"discovery_context\": \"speculated parent DNS_NAME: link.evilcorp.com\",\n  \"discovery_path\": [\n    \"Scan insidious_frederick seeded with DNS_NAME: evilcorp.com\",\n    \"TXT record for evilcorp.com contains IP_ADDRESS: 149.72.247.52\",\n    \"PTR record for 149.72.247.52 contains DNS_NAME: o1.ptr2410.link.evilcorp.com\",\n    \"speculated parent DNS_NAME: ptr2410.link.evilcorp.com\",\n    \"speculated parent DNS_NAME: link.evilcorp.com\"\n  ],\n  \"parent_chain\": [\n    \"DNS_NAME:34c657a3-0bfa-457e-9e6e-0f22f04b8da5\",\n    \"IP_ADDRESS:efc0fb3b-1b42-44da-916e-83db2360e10e\",\n    \"DNS_NAME:c737dffa-d4f0-4b6e-a72d-cc8c05bd892e\",\n    \"DNS_NAME_UNRESOLVED:722a3473-30c6-40f1-90aa-908d47105d5a\",\n    \"DNS_NAME:6c96d512-090a-47f0-82e4-6860e46aac13\"\n  ]\n}\n</code></pre> <p>For a more detailed description of BBOT events, see Developer Documentation - Event.</p> <p>Below is a full list of event types along with which modules produce/consume them.</p>"},{"location":"scanning/events/#list-of-event-types","title":"List of Event Types","text":"Event Type # Consuming Modules # Producing Modules Consuming Modules Producing Modules * 17 0 affiliates, cloudcheck, csv, discord, dnsresolve, http, json, neo4j, postgres, python, slack, splunk, sqlite, stdout, teams, txt, websocket ASN 0 1 asn AZURE_TENANT 1 0 speculate CODE_REPOSITORY 6 6 docker_pull, git_clone, github_workflows, google_playstore, postman_download, trufflehog code_repository, dockerhub, github_codesearch, github_org, gitlab, postman DNS_NAME 59 43 anubisdb, asset_inventory, azure_realm, azure_tenant, baddns, baddns_zone, bevigil, binaryedge, bucket_amazon, bucket_azure, bucket_digitalocean, bucket_firebase, bucket_google, bufferoverrun, builtwith, c99, censys, certspotter, chaos, columbus, credshed, crt, dehashed, digitorus, dnsbimi, dnsbrute, dnsbrute_mutations, dnscaa, dnscommonsrv, dnsdumpster, emailformat, fullhunt, github_codesearch, hackertarget, hunterio, internetdb, leakix, myssl, oauth, otx, passivetotal, pgp, portscan, rapiddns, securitytrails, securitytxt, shodan_dns, sitedossier, skymem, speculate, subdomaincenter, subdomainradar, subdomains, trickest, urlscan, viewdns, virustotal, wayback, zoomeye anubisdb, azure_tenant, bevigil, binaryedge, bufferoverrun, builtwith, c99, censys, certspotter, chaos, columbus, crt, digitorus, dnsbrute, dnsbrute_mutations, dnscaa, dnscommonsrv, dnsdumpster, fullhunt, hackertarget, hunterio, internetdb, leakix, myssl, ntlm, oauth, otx, passivetotal, rapiddns, securitytrails, shodan_dns, sitedossier, speculate, sslcert, subdomaincenter, subdomainradar, trickest, urlscan, vhost, viewdns, virustotal, wayback, zoomeye DNS_NAME_UNRESOLVED 3 0 baddns, speculate, subdomains EMAIL_ADDRESS 1 9 emails credshed, dehashed, dnscaa, emailformat, hunterio, pgp, securitytxt, skymem, sslcert FILESYSTEM 3 7 extractous, jadx, trufflehog apkpure, docker_pull, filedownload, git_clone, github_workflows, jadx, postman_download FINDING 2 29 asset_inventory, web_report ajaxpro, baddns, baddns_direct, baddns_zone, badsecrets, bucket_amazon, bucket_azure, bucket_digitalocean, bucket_firebase, bucket_google, bypass403, dastardly, git, gitlab, host_header, hunt, internetdb, newsletters, ntlm, nuclei, paramminer_cookies, paramminer_getparams, secretsdb, smuggler, speculate, telerik, trufflehog, url_manipulation, wpscan GEOLOCATION 0 2 ip2location, ipstack HASHED_PASSWORD 0 2 credshed, dehashed HTTP_RESPONSE 19 1 ajaxpro, asset_inventory, badsecrets, dastardly, dotnetnuke, excavate, filedownload, gitlab, host_header, newsletters, ntlm, paramminer_cookies, paramminer_getparams, paramminer_headers, secretsdb, speculate, telerik, wappalyzer, wpscan httpx IP_ADDRESS 8 3 asn, asset_inventory, internetdb, ip2location, ipneighbor, ipstack, portscan, speculate asset_inventory, ipneighbor, speculate IP_RANGE 2 0 portscan, speculate MOBILE_APP 1 1 apkpure google_playstore OPEN_TCP_PORT 4 4 asset_inventory, fingerprintx, httpx, sslcert asset_inventory, internetdb, portscan, speculate ORG_STUB 4 1 dockerhub, github_org, google_playstore, postman speculate PASSWORD 0 2 credshed, dehashed PROTOCOL 0 1 fingerprintx RAW_DNS_RECORD 0 1 dnsbimi RAW_TEXT 1 1 excavate extractous SOCIAL 6 3 dockerhub, github_org, gitlab, gowitness, postman, speculate dockerhub, gitlab, social STORAGE_BUCKET 8 5 baddns_direct, bucket_amazon, bucket_azure, bucket_digitalocean, bucket_file_enum, bucket_firebase, bucket_google, speculate bucket_amazon, bucket_azure, bucket_digitalocean, bucket_firebase, bucket_google TECHNOLOGY 4 8 asset_inventory, gitlab, web_report, wpscan badsecrets, dotnetnuke, gitlab, gowitness, internetdb, nuclei, wappalyzer, wpscan URL 20 2 ajaxpro, asset_inventory, baddns_direct, bypass403, ffuf, generic_ssrf, git, gowitness, httpx, iis_shortnames, ntlm, nuclei, robots, smuggler, speculate, telerik, url_manipulation, vhost, wafw00f, web_report gowitness, httpx URL_HINT 1 1 ffuf_shortnames iis_shortnames URL_UNVERIFIED 6 17 code_repository, filedownload, httpx, oauth, social, speculate azure_realm, bevigil, bucket_file_enum, dnsbimi, dnscaa, dockerhub, excavate, ffuf, ffuf_shortnames, github_codesearch, gowitness, hunterio, robots, securitytxt, urlscan, wayback, wpscan USERNAME 1 2 speculate credshed, dehashed VHOST 1 1 web_report vhost VULNERABILITY 2 13 asset_inventory, web_report ajaxpro, baddns, baddns_direct, baddns_zone, badsecrets, dastardly, dotnetnuke, generic_ssrf, internetdb, nuclei, telerik, trufflehog, wpscan WAF 1 1 asset_inventory wafw00f WEBSCREENSHOT 0 1 gowitness WEB_PARAMETER 4 4 hunt, paramminer_cookies, paramminer_getparams, paramminer_headers excavate, paramminer_cookies, paramminer_getparams, paramminer_headers"},{"location":"scanning/events/#findings-vs-vulnerabilities","title":"Findings Vs. Vulnerabilities","text":"<p>BBOT has a sharp distinction between Findings and Vulnerabilities:</p> <p>VULNERABILITY</p> <ul> <li>There's a higher standard for what is allowed to be a vulnerability. They should be considered confirmed and actionable - no additional confirmation required</li> <li>They are always assigned a severity. The possible severities are: LOW, MEDIUM, HIGH, or CRITICAL</li> </ul> <p>FINDING</p> <ul> <li>Findings can range anywhere from \"slightly interesting behavior\" to \"likely, but unconfirmed vulnerability\"</li> <li>Are often false positives</li> </ul> <p>By making this separation, actionable vulnerabilities can be identified quickly in the midst of a large scan</p>"},{"location":"scanning/output/","title":"Output","text":"<p>By default, BBOT saves its output in TXT, JSON, and CSV formats. The filenames are logged at the end of each scan: </p> <p>Every BBOT scan gets a unique and mildly-entertaining name like <code>demonic_jimmy</code>. Output for that scan, including scan stats and any web screenshots, etc., are saved to a folder by that name in <code>~/.bbot/scans</code>. The most recent 20 scans are kept, and older ones are removed. You can change the location of BBOT's output with <code>--output</code>, and you can also pick a custom scan name with <code>--name</code>.</p> <p>If you reuse a scan name, it will append to its original output files and leverage the previous.</p>"},{"location":"scanning/output/#output-modules","title":"Output Modules","text":"<p>Multiple simultaneous output formats are possible because of output modules. Output modules are similar to normal modules except they are enabled with <code>-om</code>.</p>"},{"location":"scanning/output/#stdout","title":"STDOUT","text":"<p>The <code>stdout</code> output module is what you see when you execute BBOT in the terminal. By default it looks the same as the <code>txt</code> module, but it has options you can customize. You can filter by event type, choose the data format (<code>text</code>, <code>json</code>), and which fields you want to see:</p> Config Option Type Description Default modules.stdout.accept_dupes bool Whether to show duplicate events, default True True modules.stdout.event_fields list Which event fields to display [] modules.stdout.event_types list Which events to display, default all event types [] modules.stdout.format str Which text format to display, choices: text,json text modules.stdout.in_scope_only bool Whether to only show in-scope events False"},{"location":"scanning/output/#txt","title":"TXT","text":"<p><code>txt</code> output is tab-delimited, so it's easy to grep:</p> <pre><code># grep out only the DNS_NAMEs\ncat ~/.bbot/scans/extreme_johnny/output.txt | grep '[DNS_NAME]' | cut -f2\nevilcorp.com\nwww.evilcorp.com\nmail.evilcorp.com\n</code></pre>"},{"location":"scanning/output/#csv","title":"CSV","text":"<p>The <code>csv</code> output module produces a CSV like this:</p> Event type Event data IP Address Source Module Scope Distance Event Tags DNS_NAME evilcorp.com 1.2.3.4 TARGET 0 a-record,cdn-github,distance-0,domain,in-scope,mx-record,ns-record,resolved,soa-record,target,txt-record DNS_NAME www.evilcorp.com 2.3.4.5 certspotter 0 a-record,aaaa-record,cdn-github,cname-record,distance-0,in-scope,resolved,subdomain URL http://www.evilcorp.com 2.3.4.5 httpx 0 a-record,aaaa-record,cdn-github,cname-record,distance-0,in-scope,resolved,subdomain DNS_NAME admin.evilcorp.com 5.6.7.8 otx 0 a-record,aaaa-record,cloud-azure,cname-record,distance-0,in-scope,resolved,subdomain"},{"location":"scanning/output/#json","title":"JSON","text":"<p>If you manually enable the <code>json</code> output module, it will go to stdout:</p> <pre><code>bbot -t evilcorp.com -om json | jq\n</code></pre> <p>You will then see events like this:</p> <pre><code>{\n  \"type\": \"IP_ADDRESS\",\n  \"id\": \"IP_ADDRESS:13cd09c2adf0860a582240229cd7ad1dccdb5eb1\",\n  \"data\": \"1.2.3.4\",\n  \"scope_distance\": 1,\n  \"scan\": \"SCAN:64c0e076516ae7aa6502fd99489693d0d5ec26cc\",\n  \"timestamp\": 1688518967.740472,\n  \"resolved_hosts\": [\"1.2.3.4\"],\n  \"parent\": \"DNS_NAME:2da045542abbf86723f22383d04eb453e573723c\",\n  \"tags\": [\"distance-1\", \"ipv4\", \"internal\"],\n  \"module\": \"A\",\n  \"module_sequence\": \"A\"\n}\n</code></pre> <p>You can filter on the JSON output with <code>jq</code>:</p> <pre><code># pull out only the .data attribute of every DNS_NAME\n$ jq -r 'select(.type==\"DNS_NAME\") | .data' ~/.bbot/scans/extreme_johnny/output.json\nevilcorp.com\nwww.evilcorp.com\nmail.evilcorp.com\n</code></pre>"},{"location":"scanning/output/#discord-slack-teams","title":"Discord / Slack / Teams","text":"<p>BBOT supports output via webhooks to <code>discord</code>, <code>slack</code>, and <code>teams</code>. To use them, you must specify a webhook URL either in the config:</p> discord_preset.yml<pre><code>config:\n  modules:\n    discord:\n      webhook_url: https://discord.com/api/webhooks/1234/deadbeef\n</code></pre> <p>...or on the command line: <pre><code>bbot -t evilcorp.com -om discord -c modules.discord.webhook_url=https://discord.com/api/webhooks/1234/deadbeef\n</code></pre></p> <p>By default, only <code>VULNERABILITY</code> and <code>FINDING</code> events are sent, but this can be customized by setting <code>event_types</code> in the config like so:</p> discord_preset.yml<pre><code>config:\n  modules:\n    discord:\n      event_types:\n        - VULNERABILITY\n        - FINDING\n        - STORAGE_BUCKET\n</code></pre> <p>...or on the command line: <pre><code>bbot -t evilcorp.com -om discord -c modules.discord.event_types=[\"STORAGE_BUCKET\",\"FINDING\",\"VULNERABILITY\"]\n</code></pre></p> <p>You can also filter on the severity of <code>VULNERABILITY</code> events by setting <code>min_severity</code>:</p> discord_preset.yml<pre><code>config:\n  modules:\n    discord:\n      min_severity: HIGH\n</code></pre>"},{"location":"scanning/output/#http","title":"HTTP","text":"<p>The <code>http</code> output module sends events in JSON format to a desired HTTP endpoint.</p> <pre><code># POST scan results to localhost\nbbot -t evilcorp.com -om http -c modules.http.url=http://localhost:8000\n</code></pre> <p>You can customize the HTTP method if needed. Authentication is also supported:</p> http_preset.yml<pre><code>config:\n  modules:\n    http:\n      url: https://localhost:8000\n      method: PUT\n      # Authorization: Bearer\n      bearer: &lt;bearer_token&gt;\n      # OR\n      username: bob\n      password: P@ssw0rd\n</code></pre>"},{"location":"scanning/output/#elasticsearch","title":"Elasticsearch","text":"<p>When outputting to Elastic, use the <code>http</code> output module with the following settings (replace <code>&lt;your_index&gt;</code> with your desired index, e.g. <code>bbot</code>):</p> <pre><code># send scan results directly to elasticsearch\nbbot -t evilcorp.com -om http -c \\\n  modules.http.url=http://localhost:8000/&lt;your_index&gt;/_doc \\\n  modules.http.siem_friendly=true \\\n  modules.http.username=elastic \\\n  modules.http.password=changeme\n</code></pre> <p>Alternatively, via a preset:</p> elastic_preset.yml<pre><code>config:\n  modules:\n    http:\n      url: http://localhost:8000/&lt;your_index&gt;/_doc\n      siem_friendly: true\n      username: elastic\n      password: changeme\n</code></pre>"},{"location":"scanning/output/#splunk","title":"Splunk","text":"<p>The <code>splunk</code> output module sends events in JSON format to a desired splunk instance via HEC.</p> <p>You can customize this output with the following config options:</p> splunk_preset.yml<pre><code>config:\n  modules:\n    splunk:\n      # The full URL with the URI `/services/collector/event`\n      url: https://localhost:8088/services/collector/event\n      # Generated from splunk webui\n      hectoken: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\n      # Defaults to `main` if not set\n      index: my-specific-index\n      # Defaults to `bbot` if not set\n      source: /my/source.json\n</code></pre>"},{"location":"scanning/output/#asset-inventory","title":"Asset Inventory","text":"<p>The <code>asset_inventory</code> module produces a CSV like this:</p> Host Provider IP(s) Status Open Ports evilcorp.com cdn-github 1.2.3.4 Active 80,443 www.evilcorp.com cdn-github 2.3.4.5 Active 22,80,443 admin.evilcorp.com cloud-azure 5.6.7.8 N/A"},{"location":"scanning/output/#sqlite","title":"SQLite","text":"<p>The <code>sqlite</code> output module produces a SQLite database containing all events, scans, and targets. By default, it will be saved in the scan directory as <code>output.sqlite</code>.</p> <pre><code># specifying a custom database path\nbbot -t evilcorp.com -om sqlite -c modules.sqlite.database=/tmp/bbot.sqlite\n</code></pre>"},{"location":"scanning/output/#postgres","title":"Postgres","text":"<p>The <code>postgres</code> output module allows you to ingest events, scans, and targets into a Postgres database. By default, it will connect to the server on <code>localhost</code> with a username of <code>postgres</code> and password of <code>bbotislife</code>. You can change this behavior in the config.</p> <pre><code># specifying an alternate database\nbbot -t evilcorp.com -om postgres -c modules.postgres.database=custom_bbot_db\n</code></pre> postgres_preset.yml<pre><code>config:\n  modules:\n    postgres:\n      host: psq.fsociety.local\n      database: custom_bbot_db\n      port: 5432\n      username: postgres\n      password: bbotislife\n</code></pre>"},{"location":"scanning/output/#subdomains","title":"Subdomains","text":"<p>The <code>subdomains</code> output module produces simple text file containing only in-scope and resolved subdomains:</p> subdomains.txt<pre><code>evilcorp.com\nwww.evilcorp.com\nmail.evilcorp.com\nportal.evilcorp.com\n</code></pre>"},{"location":"scanning/output/#neo4j","title":"Neo4j","text":"<p>Neo4j is the funnest (and prettiest) way to view and interact with BBOT data.</p> <p></p> <ul> <li>You can get Neo4j up and running with a single docker command:</li> </ul> <pre><code># start Neo4j in the background with docker\ndocker run -d -p 7687:7687 -p 7474:7474 -v \"$(pwd)/neo4j/:/data/\" -e NEO4J_AUTH=neo4j/bbotislife neo4j\n</code></pre> <ul> <li>After that, run bbot with <code>-om neo4j</code></li> </ul> <pre><code>bbot -f subdomain-enum -t evilcorp.com -om neo4j\n</code></pre> <ul> <li>Log in at http://localhost:7474 with <code>neo4j</code> / <code>bbotislife</code></li> </ul>"},{"location":"scanning/output/#cypher-queries-and-tips","title":"Cypher Queries and Tips","text":"<p>Neo4j uses the Cypher Query Language for its graph query language. Cypher uses common clauses to craft relational queries and present the desired data in multiple formats. </p> <p>Cypher queries can be broken down into three required pieces; selection, filter, and presentation. The selection piece identifies what data that will be searched against - 90% of the time the \"MATCH\" clause will be enough but there are means to read from csv or json data files. In all of these examples the \"MATCH\" clause will be used. The filter piece helps to focus in on the required data and used the \"WHERE\" clause to accomplish this effort (most basic operators can be used). Finally, the presentation section identifies how the data should be presented back to the querier. While neo4j is a graph database, it can be used in a traditional table view.</p> <p>A simple query to grab every URL event with \".com\" in the BBOT data field would look like this: <code>MATCH (u:URL) WHERE u.data contains \".com\" RETURN u</code></p> <p>In this query the following can be identified: - Within the MATCH statement \"u\" is a variable and can be any value needed by the user while the \"URL\" label is a direct relationship to the BBOT event type. - The WHERE statement allows the query to filter on any of the BBOT event properties like data, tag, or even the label itself.  - The RETURN statement is a general presentation of the whole URL event but this can be narrowed down to present any of the specific properties of the BBOT event (<code>RETURN u.data, u.tags</code>).</p> <p>The following are a few recommended queries to get started with:</p> <pre><code>// Get all \"in-scope\" DNS Nodes and return just data and tags properties\nMATCH (n:DNS_NAME)\nWHERE \"in-scope\" IN n.tags\nRETURN n.data, n.tags\n</code></pre> <pre><code>// Get the count of labels/BBOT events in the Neo4j Database\nMATCH (n)\nRETURN labels(n), count(n)\n</code></pre> <pre><code>// Get a graph of open ports associated with each domain\nMATCH z = ((n:DNS_NAME) --&gt; (p:OPEN_TCP_PORT))\nRETURN z\n</code></pre> <pre><code>// Get all domains and IP addresses with open TCP ports\nMATCH (n) --&gt; (p:OPEN_TCP_PORT)\nWHERE \"in-scope\" in n.tags and (n:DNS_NAME or n:IP_ADDRESS)\nWITH *, TAIL(SPLIT(p.data, ':')) AS port\nRETURN n.data, collect(distinct port)\n</code></pre> <pre><code>// Clear the database\nMATCH (n) DETACH DELETE n\n</code></pre> <p>This is not an exhaustive list of clauses, filters, or other means to use cypher and should be considered a starting point. To build more advanced queries consider reading Neo4j's Cypher documentation. </p> <p>Additional note: these sample queries are dependent on the existence of the data in the target neo4j database. </p>"},{"location":"scanning/presets/","title":"Presets","text":"<p>Once you start customizing BBOT, your commands can start to get really long. Presets let you put all your scan settings in a single file:</p> <pre><code>bbot -p ./my_preset.yml\n</code></pre> <p>A Preset is a YAML file that can include scan targets, modules, and config options like API keys.</p> <p>A typical preset looks like this:</p> subdomain-enum.yml<pre><code>description: Enumerate subdomains via APIs, brute-force\n\nflags:\n  - subdomain-enum\n\noutput_modules:\n  - subdomains\n</code></pre>"},{"location":"scanning/presets/#how-to-use-presets-p","title":"How to use Presets (<code>-p</code>)","text":"<p>BBOT has a ready-made collection of presets for common tasks like subdomain enumeration and web spidering. They live in <code>~/.bbot/presets</code>.</p> <p>To list them, you can do:</p> <pre><code># list available presets\nbbot -lp\n</code></pre> <p>Enable them with <code>-p</code>:</p> <pre><code># do a subdomain enumeration \nbbot -t evilcorp.com -p subdomain-enum\n\n# multiple presets - subdomain enumeration + web spider\nbbot -t evilcorp.com -p subdomain-enum spider\n\n# start with a preset but only enable modules that have the 'passive' flag\nbbot -t evilcorp.com -p subdomain-enum -rf passive\n\n# preset + manual config override\nbbot -t www.evilcorp.com -p spider -c web.spider_distance=10\n</code></pre> <p>You can build on the default presets, or create your own. Here's an example of a custom preset that builds on <code>subdomain-enum</code>:</p> my_subdomains.yml<pre><code>description: Do a subdomain enumeration + basic web scan + nuclei\n\ntarget:\n  - evilcorp.com\n\ninclude:\n  # include these default presets\n  - subdomain-enum\n  - web-basic\n\nmodules:\n  # enable nuclei in addition to the other modules\n  - nuclei\n\nconfig:\n  # global config options\n  web:\n    http_proxy: http://127.0.0.1:8080\n  # module config options\n  modules:\n    # api keys\n    securitytrails:\n      api_key: 21a270d5f59c9b05813a72bb41707266\n    virustotal:\n      # multiple API keys are allowed\n      api_key:\n        - 4f41243847da693a4f356c0486114bc6\n        - 5bc6ed268ab6488270e496d3183a1a27\n</code></pre> <p>To execute your custom preset, you do:</p> <pre><code>bbot -p ./my_subdomains.yml\n</code></pre>"},{"location":"scanning/presets/#preset-load-order","title":"Preset Load Order","text":"<p>When you enable multiple presets, the order matters. In the case of a conflict, the last preset will always win. This means, for example, if you have a custom preset called <code>my_spider</code> that sets <code>web.spider_distance</code> to 1:</p> my_spider.yml<pre><code>config:\n  web:\n    spider_distance: 1\n</code></pre> <p>...and you enable it alongside the default <code>spider</code> preset in this order:</p> <pre><code>bbot -t evilcorp.com -p ./my_spider.yml spider\n</code></pre> <p>...the value of <code>web.spider_distance</code> will be overridden by <code>spider</code>. To ensure this doesn't happen, you would want to switch the order of the presets:</p> <pre><code>bbot -t evilcorp.com -p spider ./my_spider.yml\n</code></pre>"},{"location":"scanning/presets/#validating-presets","title":"Validating Presets","text":"<p>To make sure BBOT is configured the way you expect, you can always check the <code>--current-preset</code> to show the final version of the config that will be used when BBOT executes:</p> <pre><code># verify the preset is what you want\nbbot -p ./mypreset.yml --current-preset\n</code></pre>"},{"location":"scanning/presets/#advanced-usage","title":"Advanced Usage","text":"<p>BBOT Presets support advanced features like environment variable substitution and custom conditions.</p>"},{"location":"scanning/presets/#custom-modules","title":"Custom Modules","text":"<p>If you want to use a custom BBOT <code>.py</code> module, you can either move it into <code>bbot/modules</code> where BBOT is installed, or add its parent folder to <code>module_dirs</code> like so:</p> custom_modules.yml<pre><code># load extra BBOT modules from this locaation\nmodule_dirs:\n  - /home/user/custom_modules\n</code></pre>"},{"location":"scanning/presets/#environment-variables","title":"Environment Variables","text":"<p>You can insert environment variables into your preset like this: <code>${env:&lt;variable&gt;}</code>:</p> my_nuclei.yml<pre><code>description: Do a nuclei scan\n\ntarget:\n  - evilcorp.com\n\nmodules:\n  - nuclei\n\nconfig:\n  modules:\n    nuclei:\n      # allow the nuclei templates to be specified at runtime via an environment variable\n      tags: ${env:NUCLEI_TAGS}\n</code></pre> <pre><code>NUCLEI_TAGS=apache,nginx bbot -p ./my_nuclei.yml\n</code></pre>"},{"location":"scanning/presets/#conditions","title":"Conditions","text":"<p>Sometimes, you might need to add custom logic to a preset. BBOT supports this via <code>conditions</code>. The <code>conditions</code> attribute allows you to specify a list of custom conditions that will be evaluated before the scan starts. This is useful for performing last-minute sanity checks, or changing the behavior of the scan based on custom criteria.</p> my_preset.yml<pre><code>description: Abort if nuclei templates aren't specified\n\nmodules:\n  - nuclei\n\nconditions:\n  - |\n    {% if not config.modules.nuclei.templates %}\n      {{ abort(\"Don't forget to set your templates!\") }}\n    {% endif %}\n</code></pre> my_preset.yml<pre><code>description: Enable ffuf but only when the web spider isn't also enabled\n\nmodules:\n  - ffuf\n\nconditions:\n  - |\n    {% if config.web.spider_distance &gt; 0 and config.web.spider_depth &gt; 0 %}\n      {{ warn(\"Disabling ffuf because the web spider is enabled\") }}\n      {{ preset.exclude_module(\"ffuf\") }}\n    {% endif %}\n</code></pre> <p>Conditions use Jinja, which means they can contain Python code. They run inside a sandboxed environment which has access to the following variables:</p> <ul> <li><code>preset</code> - the current preset object</li> <li><code>config</code> - the current config (an alias for <code>preset.config</code>)</li> <li><code>warn(message)</code> - display a custom warning message to the user</li> <li><code>abort(message)</code> - abort the scan with an optional message</li> </ul> <p>If you aren't able to accomplish what you want with conditions, or if you need access to a new variable/function, please let us know on Github.</p>"},{"location":"scanning/presets_list/","title":"List of Presets","text":"<p>Below is a list of every default BBOT preset, including its YAML.</p>"},{"location":"scanning/presets_list/#baddns-thorough","title":"baddns-thorough","text":"<p>Run all baddns modules and submodules.</p> <code>baddns-thorough.yml</code> ~/.bbot/presets/baddns-thorough.yml<pre><code>description: Run all baddns modules and submodules.\n\n\nmodules:\n  - baddns\n  - baddns_zone\n  - baddns_direct\n\nconfig:\n  modules:\n    baddns:\n      enabled_submodules: [CNAME,references,MX,NS,TXT]\n</code></pre> <p>Modules: 4</p>"},{"location":"scanning/presets_list/#cloud-enum","title":"cloud-enum","text":"<p>Enumerate cloud resources such as storage buckets, etc.</p> <code>cloud-enum.yml</code> ~/.bbot/presets/cloud-enum.yml<pre><code>description: Enumerate cloud resources such as storage buckets, etc.\n\ninclude:\n  - subdomain-enum\n\nflags:\n  - cloud-enum\n</code></pre> <p>Modules: 59</p>"},{"location":"scanning/presets_list/#code-enum","title":"code-enum","text":"<p>Enumerate Git repositories, Docker images, etc.</p> <code>code-enum.yml</code> ~/.bbot/presets/code-enum.yml<pre><code>description: Enumerate Git repositories, Docker images, etc.\n\nflags:\n  - code-enum\n</code></pre> <p>Modules: 16</p>"},{"location":"scanning/presets_list/#dirbust-heavy","title":"dirbust-heavy","text":"<p>Recursive web directory brute-force (aggressive)</p> <code>dirbust-heavy.yml</code> ~/.bbot/presets/web/dirbust-heavy.yml<pre><code>description: Recursive web directory brute-force (aggressive)\n\ninclude:\n  - spider\n\nflags:\n  - iis-shortnames\n\nmodules:\n  - ffuf\n  - wayback\n\nconfig:\n  modules:\n    iis_shortnames:\n      # we exploit the shortnames vulnerability to produce URL_HINTs which are consumed by ffuf_shortnames\n      detect_only: False\n    ffuf:\n      depth: 3\n      lines: 5000\n      extensions:\n        - php\n        - asp\n        - aspx\n        - ashx\n        - asmx\n        - jsp\n        - jspx\n        - cfm\n        - zip\n        - conf\n        - config\n        - xml\n        - json\n        - yml\n        - yaml\n    # emit URLs from wayback\n    wayback:\n      urls: True\n</code></pre> <p>Category: web</p> <p>Modules: 5</p>"},{"location":"scanning/presets_list/#dirbust-light","title":"dirbust-light","text":"<p>Basic web directory brute-force (surface-level directories only)</p> <code>dirbust-light.yml</code> ~/.bbot/presets/web/dirbust-light.yml<pre><code>description: Basic web directory brute-force (surface-level directories only)\n\ninclude:\n  - iis-shortnames\n\nmodules:\n  - ffuf\n\nconfig:\n  modules:\n    ffuf:\n      # wordlist size = 1000\n      lines: 1000\n</code></pre> <p>Category: web</p> <p>Modules: 4</p>"},{"location":"scanning/presets_list/#dotnet-audit","title":"dotnet-audit","text":"<p>Comprehensive scan for all IIS/.NET specific modules and module settings</p> <code>dotnet-audit.yml</code> ~/.bbot/presets/web/dotnet-audit.yml<pre><code>description: Comprehensive scan for all IIS/.NET specific modules and module settings\n\n\ninclude:\n  - iis-shortnames\n\nmodules:\n  - httpx\n  - badsecrets\n  - ffuf_shortnames\n  - ffuf\n  - telerik\n  - ajaxpro\n  - dotnetnuke\n\nconfig:\n  modules:\n    ffuf:\n      extensions: asp,aspx,ashx,asmx,ascx\n    telerik:\n      exploit_RAU_crypto: True\n</code></pre> <p>Category: web</p> <p>Modules: 8</p>"},{"location":"scanning/presets_list/#email-enum","title":"email-enum","text":"<p>Enumerate email addresses from APIs, web crawling, etc.</p> <code>email-enum.yml</code> ~/.bbot/presets/email-enum.yml<pre><code>description: Enumerate email addresses from APIs, web crawling, etc.\n\nflags:\n  - email-enum\n\noutput_modules:\n  - emails\n</code></pre> <p>Modules: 7</p>"},{"location":"scanning/presets_list/#fast","title":"fast","text":"<p>Scan only the provided targets as fast as possible - no extra discovery</p> <code>fast.yml</code> ~/.bbot/presets/fast.yml<pre><code>description: Scan only the provided targets as fast as possible - no extra discovery\n\nexclude_modules:\n  - excavate\n\nconfig:\n  # only scan the exact targets specified\n  scope:\n    strict: true\n  # speed up dns resolution by doing A/AAAA only - not MX/NS/SRV/etc\n  dns:\n    minimal: true\n  # essential speculation only\n  modules:\n    speculate:\n      essential_only: true\n</code></pre> <p>Modules: 0</p>"},{"location":"scanning/presets_list/#iis-shortnames","title":"iis-shortnames","text":"<p>Recursively enumerate IIS shortnames</p> <code>iis-shortnames.yml</code> ~/.bbot/presets/web/iis-shortnames.yml<pre><code>description: Recursively enumerate IIS shortnames\n\nflags:\n  - iis-shortnames\n\nconfig:\n  modules:\n    iis_shortnames:\n      # exploit the vulnerability\n      detect_only: false\n</code></pre> <p>Category: web</p> <p>Modules: 3</p>"},{"location":"scanning/presets_list/#kitchen-sink","title":"kitchen-sink","text":"<p>Everything everywhere all at once</p> <code>kitchen-sink.yml</code> ~/.bbot/presets/kitchen-sink.yml<pre><code>description: Everything everywhere all at once\n\ninclude:\n  - subdomain-enum\n  - cloud-enum\n  - code-enum\n  - email-enum\n  - spider\n  - web-basic\n  - paramminer\n  - dirbust-light\n  - web-screenshots\n  - baddns-thorough\n\nconfig:\n  modules:\n    baddns:\n      enable_references: True\n</code></pre> <p>Modules: 86</p>"},{"location":"scanning/presets_list/#paramminer","title":"paramminer","text":"<p>Discover new web parameters via brute-force</p> <code>paramminer.yml</code> ~/.bbot/presets/web/paramminer.yml<pre><code>description: Discover new web parameters via brute-force\n\nflags:\n  - web-paramminer\n\nmodules:\n  - httpx\n\nconfig:\n  web:\n    spider_distance: 1\n    spider_depth: 4\n</code></pre> <p>Category: web</p> <p>Modules: 4</p>"},{"location":"scanning/presets_list/#spider","title":"spider","text":"<p>Recursive web spider</p> <code>spider.yml</code> ~/.bbot/presets/spider.yml<pre><code>description: Recursive web spider\n\nmodules:\n  - httpx\n\nblacklist:\n  # Prevent spider from invalidating sessions by logging out\n  - \"RE:/.*(sign|log)[_-]?out\"\n\nconfig:\n  web:\n    # how many links to follow in a row\n    spider_distance: 2\n    # don't follow links whose directory depth is higher than 4\n    spider_depth: 4\n    # maximum number of links to follow per page\n    spider_links_per_page: 25\n</code></pre> <p>Modules: 1</p>"},{"location":"scanning/presets_list/#subdomain-enum","title":"subdomain-enum","text":"<p>Enumerate subdomains via APIs, brute-force</p> <code>subdomain-enum.yml</code> ~/.bbot/presets/subdomain-enum.yml<pre><code>description: Enumerate subdomains via APIs, brute-force\n\nflags:\n  # enable every module with the subdomain-enum flag\n  - subdomain-enum\n\noutput_modules:\n  # output unique subdomains to TXT file\n  - subdomains\n\nconfig:\n  dns:\n    threads: 25\n    brute_threads: 1000\n  # put your API keys here\n  # modules:\n  #   github:\n  #     api_key: \"\"\n  #   chaos:\n  #     api_key: \"\"\n  #   securitytrails:\n  #     api_key: \"\"\n</code></pre> <p>Modules: 52</p>"},{"location":"scanning/presets_list/#web-basic","title":"web-basic","text":"<p>Quick web scan</p> <code>web-basic.yml</code> ~/.bbot/presets/web-basic.yml<pre><code>description: Quick web scan\n\ninclude:\n  - iis-shortnames\n\nflags:\n  - web-basic\n</code></pre> <p>Modules: 19</p>"},{"location":"scanning/presets_list/#web-screenshots","title":"web-screenshots","text":"<p>Take screenshots of webpages</p> <code>web-screenshots.yml</code> ~/.bbot/presets/web-screenshots.yml<pre><code>description: Take screenshots of webpages\n\nflags:\n  - web-screenshots\n\nconfig:\n  modules:\n    gowitness:\n      resolution_x: 1440\n      resolution_y: 900\n      # folder to output web screenshots (default is inside ~/.bbot/scans/scan_name)\n      output_path: \"\"\n      # whether to take screenshots of social media pages\n      social: True\n</code></pre> <p>Modules: 3</p>"},{"location":"scanning/presets_list/#web-thorough","title":"web-thorough","text":"<p>Aggressive web scan</p> <code>web-thorough.yml</code> ~/.bbot/presets/web-thorough.yml<pre><code>description: Aggressive web scan\n\ninclude:\n  # include the web-basic preset\n  - web-basic\n\nflags:\n  - web-thorough\n</code></pre> <p>Modules: 30</p>"},{"location":"scanning/presets_list/#table-of-default-presets","title":"Table of Default Presets","text":"<p>Here is a the same data, but in a table:</p> Preset Category Description # Modules Modules baddns-thorough Run all baddns modules and submodules. 4 baddns, baddns_direct, baddns_zone, httpx cloud-enum Enumerate cloud resources such as storage buckets, etc. 59 anubisdb, asn, azure_realm, azure_tenant, baddns, baddns_direct, baddns_zone, bevigil, binaryedge, bucket_amazon, bucket_azure, bucket_digitalocean, bucket_file_enum, bucket_firebase, bucket_google, bufferoverrun, builtwith, c99, censys, certspotter, chaos, columbus, crt, digitorus, dnsbimi, dnsbrute, dnsbrute_mutations, dnscaa, dnscommonsrv, dnsdumpster, fullhunt, github_codesearch, github_org, hackertarget, httpx, hunterio, internetdb, ipneighbor, leakix, myssl, oauth, otx, passivetotal, postman, postman_download, rapiddns, securitytrails, securitytxt, shodan_dns, sitedossier, social, sslcert, subdomaincenter, subdomainradar, trickest, urlscan, virustotal, wayback, zoomeye code-enum Enumerate Git repositories, Docker images, etc. 16 apkpure, code_repository, docker_pull, dockerhub, git, git_clone, github_codesearch, github_org, github_workflows, gitlab, google_playstore, httpx, postman, postman_download, social, trufflehog dirbust-heavy web Recursive web directory brute-force (aggressive) 5 ffuf, ffuf_shortnames, httpx, iis_shortnames, wayback dirbust-light web Basic web directory brute-force (surface-level directories only) 4 ffuf, ffuf_shortnames, httpx, iis_shortnames dotnet-audit web Comprehensive scan for all IIS/.NET specific modules and module settings 8 ajaxpro, badsecrets, dotnetnuke, ffuf, ffuf_shortnames, httpx, iis_shortnames, telerik email-enum Enumerate email addresses from APIs, web crawling, etc. 7 dehashed, dnscaa, emailformat, hunterio, pgp, skymem, sslcert fast Scan only the provided targets as fast as possible - no extra discovery 0 iis-shortnames web Recursively enumerate IIS shortnames 3 ffuf_shortnames, httpx, iis_shortnames kitchen-sink Everything everywhere all at once 86 anubisdb, apkpure, asn, azure_realm, azure_tenant, baddns, baddns_direct, baddns_zone, badsecrets, bevigil, binaryedge, bucket_amazon, bucket_azure, bucket_digitalocean, bucket_file_enum, bucket_firebase, bucket_google, bufferoverrun, builtwith, c99, censys, certspotter, chaos, code_repository, columbus, crt, dehashed, digitorus, dnsbimi, dnsbrute, dnsbrute_mutations, dnscaa, dnscommonsrv, dnsdumpster, docker_pull, dockerhub, emailformat, ffuf, ffuf_shortnames, filedownload, fullhunt, git, git_clone, github_codesearch, github_org, github_workflows, gitlab, google_playstore, gowitness, hackertarget, httpx, hunterio, iis_shortnames, internetdb, ipneighbor, leakix, myssl, ntlm, oauth, otx, paramminer_cookies, paramminer_getparams, paramminer_headers, passivetotal, pgp, postman, postman_download, rapiddns, robots, secretsdb, securitytrails, securitytxt, shodan_dns, sitedossier, skymem, social, sslcert, subdomaincenter, subdomainradar, trickest, trufflehog, urlscan, virustotal, wappalyzer, wayback, zoomeye paramminer web Discover new web parameters via brute-force 4 httpx, paramminer_cookies, paramminer_getparams, paramminer_headers spider Recursive web spider 1 httpx subdomain-enum Enumerate subdomains via APIs, brute-force 52 anubisdb, asn, azure_realm, azure_tenant, baddns_direct, baddns_zone, bevigil, binaryedge, bufferoverrun, builtwith, c99, censys, certspotter, chaos, columbus, crt, digitorus, dnsbimi, dnsbrute, dnsbrute_mutations, dnscaa, dnscommonsrv, dnsdumpster, fullhunt, github_codesearch, github_org, hackertarget, httpx, hunterio, internetdb, ipneighbor, leakix, myssl, oauth, otx, passivetotal, postman, postman_download, rapiddns, securitytrails, securitytxt, shodan_dns, sitedossier, social, sslcert, subdomaincenter, subdomainradar, trickest, urlscan, virustotal, wayback, zoomeye web-basic Quick web scan 19 azure_realm, baddns, badsecrets, bucket_amazon, bucket_azure, bucket_firebase, bucket_google, ffuf_shortnames, filedownload, git, httpx, iis_shortnames, ntlm, oauth, robots, secretsdb, securitytxt, sslcert, wappalyzer web-screenshots Take screenshots of webpages 3 gowitness, httpx, social web-thorough Aggressive web scan 30 ajaxpro, azure_realm, baddns, badsecrets, bucket_amazon, bucket_azure, bucket_digitalocean, bucket_firebase, bucket_google, bypass403, dastardly, dotnetnuke, ffuf_shortnames, filedownload, generic_ssrf, git, host_header, httpx, hunt, iis_shortnames, ntlm, oauth, robots, secretsdb, securitytxt, smuggler, sslcert, telerik, url_manipulation, wappalyzer"},{"location":"scanning/tips_and_tricks/","title":"Tips and Tricks","text":"<p>Below are some helpful tricks to help you in your adventures.</p>"},{"location":"scanning/tips_and_tricks/#change-verbosity-during-scan","title":"Change Verbosity During Scan","text":"<p>Press enter during a BBOT scan to change the log level. This will allow you to see debugging messages, etc.</p> <p></p>"},{"location":"scanning/tips_and_tricks/#kill-individual-module-during-scan","title":"Kill Individual Module During Scan","text":"<p>Sometimes a certain module can get stuck or slow down the scan. If this happens and you want to kill it, just type \"<code>kill &lt;module&gt;</code>\" in the terminal and press enter. This will kill and disable the module for the rest of the scan.</p> <p>You can also kill multiple modules at a time by specifying them in a space or comma-separated list:</p> <pre><code>kill httpx sslcert\n</code></pre> <p></p>"},{"location":"scanning/tips_and_tricks/#common-config-changes","title":"Common Config Changes","text":""},{"location":"scanning/tips_and_tricks/#speed-up-slow-modules","title":"Speed Up Slow Modules","text":"<p>BBOT modules can be parallelized so that more than one instance runs at a time. By default, many modules are already set to reasonable defaults:</p> <pre><code>class baddns(BaseModule):\n    module_threads = 8\n</code></pre> <p>To override this, you can set a module's <code>module_threads</code> in the config:</p> <pre><code># increase baddns threads to 20\nbbot -t evilcorp.com -m baddns -c modules.baddns.module_threads=20\n</code></pre>"},{"location":"scanning/tips_and_tricks/#boost-dns-brute-force-speed","title":"Boost DNS Brute-force Speed","text":"<p>If you have a fast internet connection or are running BBOT from a cloud VM, you can speed up subdomain enumeration by cranking the threads for <code>massdns</code>. The default is <code>1000</code>, which is about 1MB/s of DNS traffic:</p> <pre><code># massdns with 5000 resolvers, about 5MB/s\nbbot -t evilcorp.com -f subdomain-enum -c dns.brute_threads=5000\n</code></pre>"},{"location":"scanning/tips_and_tricks/#web-spider","title":"Web Spider","text":"<p>The web spider is great for finding juicy data like subdomains, email addresses, and javascript secrets buried in webpages. However since it can lengthen the duration of a scan, it's disabled by default. To enable the web spider, you must increase the value of <code>web.spider_distance</code>.</p> <p>The web spider is controlled with three config values:</p> <ul> <li><code>web.spider_depth</code> (default: <code>1</code>: the maximum directory depth allowed. This is to prevent the spider from delving too deep into a website.</li> <li><code>web.spider_distance</code> (<code>0</code> == all spidering disabled, default: <code>0</code>): the maximum number of links that can be followed in a row. This is designed to limit the spider in cases where <code>web.spider_depth</code> fails (e.g. for an ecommerce website with thousands of base-level URLs).</li> <li><code>web.spider_links_per_page</code> (default: <code>25</code>): the maximum number of links per page that can be followed. This is designed to save you in cases where a single page has hundreds or thousands of links.</li> </ul> <p>Here is a typical example:</p> spider.yml<pre><code>config:\n  web:\n    spider_depth: 2\n    spider_distance: 2\n    spider_links_per_page: 25\n</code></pre> <pre><code># run the web spider against www.evilcorp.com\nbbot -t www.evilcorp.com -m httpx -c spider.yml\n</code></pre> <p>You can also pair the web spider with subdomain enumeration:</p> <pre><code># spider every subdomain of evilcorp.com\nbbot -t evilcorp.com -f subdomain-enum -c spider.yml\n</code></pre>"},{"location":"scanning/tips_and_tricks/#ingesting-bbot-data-into-siem-elastic-splunk","title":"Ingesting BBOT Data Into SIEM (Elastic, Splunk)","text":"<p>If your goal is to run a BBOT scan and later feed its data into a SIEM such as Elastic, be sure to enable this option when scanning:</p> <pre><code>bbot -t evilcorp.com -c modules.json.siem_friendly=true\n</code></pre> <p>This ensures the <code>.data</code> event attribute is always the same type (a dictionary), by nesting it like so: <pre><code>{\n  \"type\": \"DNS_NAME\",\n  \"data\": {\n    \"DNS_NAME\": \"blacklanternsecurity.com\"\n  }\n}\n</code></pre></p>"},{"location":"scanning/tips_and_tricks/#custom-http-proxy","title":"Custom HTTP Proxy","text":"<p>Web pentesters may appreciate BBOT's ability to quickly populate Burp Suite site maps for all subdomains in a target. If your scan includes gowitness, this will capture the traffic as if you manually visited each website in your browser -- including auxiliary web resources and javascript API calls. To accomplish this, set the <code>web.http_proxy</code> config option like so:</p> <pre><code># enumerate subdomains, take web screenshots, proxy through Burp\nbbot -t evilcorp.com -f subdomain-enum -m gowitness -c web.http_proxy=http://127.0.0.1:8080\n</code></pre>"},{"location":"scanning/tips_and_tricks/#display-http_response-events","title":"Display <code>HTTP_RESPONSE</code> Events","text":"<p>BBOT's <code>httpx</code> module emits <code>HTTP_RESPONSE</code> events, but by default they're hidden from output. These events contain the full raw HTTP body along with headers, etc. If you want to see them, you can modify <code>omit_event_types</code> in the config:</p> ~/.bbot/config/bbot.yml<pre><code>omit_event_types:\n  - URL_UNVERIFIED\n  # - HTTP_RESPONSE\n</code></pre>"},{"location":"scanning/tips_and_tricks/#display-out-of-scope-events","title":"Display Out-of-scope Events","text":"<p>By default, BBOT only shows in-scope events (with a few exceptions for things like storage buckets). If you want to see events that BBOT is emitting internally (such as for DNS resolution, etc.), you can increase <code>scope.report_distance</code> in the config or on the command line like so: <pre><code># display events up to scope distance 2 (default == 0)\nbbot -f subdomain-enum -t evilcorp.com -c scope.report_distance=2\n</code></pre></p>"},{"location":"scanning/tips_and_tricks/#speed-up-scans-by-disabling-dns-resolution","title":"Speed Up Scans By Disabling DNS Resolution","text":"<p>If you already have a list of discovered targets (e.g. URLs), you can speed up the scan by skipping BBOT's DNS resolution. You can do this by setting <code>dns.disable</code> to <code>true</code>:</p> <pre><code># completely disable DNS resolution\nbbot -m httpx gowitness wappalyzer -t urls.txt -c dns.disable=true\n</code></pre> <p>Note that the above setting completely disables DNS resolution, meaning even <code>A</code> and <code>AAAA</code> records are not resolved. This can cause problems if you're using an IP whitelist or blacklist. In this case, you'll want to use <code>dns.minimal</code> instead:</p> <pre><code># only resolve A and AAAA records\nbbot -m httpx gowitness wappalyzer -t urls.txt -c dns.minimal=true\n</code></pre>"},{"location":"scanning/tips_and_tricks/#faq","title":"FAQ","text":""},{"location":"scanning/tips_and_tricks/#what-is-url_unverified","title":"What is <code>URL_UNVERIFIED</code>?","text":"<p><code>URL_UNVERIFIED</code> events are URLs that haven't yet been visited by <code>httpx</code>. Once <code>httpx</code> visits them, it reraises them as <code>URL</code>s, tagged with their resulting status code.</p> <p>For example, when <code>excavate</code> gets an <code>HTTP_RESPONSE</code> event, it extracts links from the raw HTTP response as <code>URL_UNVERIFIED</code>s and then passes them back to <code>httpx</code> to be visited.</p> <p>By default, <code>URL_UNVERIFIED</code>s are hidden from output. If you want to see all of them including the out-of-scope ones, you can do it by changing <code>omit_event_types</code> and <code>scope.report_distance</code> in the config like so:</p> <pre><code># visit www.evilcorp.com and extract all the links\nbbot -t www.evilcorp.com -m httpx -c omit_event_types=[] scope.report_distance=2\n</code></pre>"}]}
\ No newline at end of file
diff --git a/Dev/troubleshooting/index.html b/Dev/troubleshooting/index.html
index 1837fbb2a..100c71c58 100644
--- a/Dev/troubleshooting/index.html
+++ b/Dev/troubleshooting/index.html
@@ -20,7 +20,7 @@
 <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
 <link href="../assets/_mkdocstrings.css" rel="stylesheet"/>
 <script>__md_scope=new URL("..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
-<link href="../assets/stylesheets/extra-style.vcmriqom.min.css" rel="stylesheet"/></head>
+<link href="../assets/stylesheets/extra-style.b78wdczn.min.css" rel="stylesheet"/></head>
 <body data-md-color-accent="deep-orange" data-md-color-primary="black" data-md-color-scheme="slate" dir="ltr">
 <input autocomplete="off" class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
 <input autocomplete="off" class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>