From 7c72d5607b1426996bd6770822467609f091217c Mon Sep 17 00:00:00 2001 From: fullmetalcache Date: Thu, 4 Mar 2021 06:57:31 -0700 Subject: [PATCH] Beacon Detection by FQDN (#604) * initial poc layout and mongo query setup * added dissector and sorter portions of the beaconing process to the fqdn beacons package * beaconing by fqdn table creation functionality finished * Add ResolvedIPs to data sent to sorter * Working MVP for FQDN Beaconing Detection * moved datatypes/functions only used in fqdn beacons out of the data package * Added show-beacons-fqdn command and updated supporting files * initial all sources modification * stashin changes * Changed module fields to beaconsFQDN * removed mongodb_test.go * Addid static config for BeaconFQDN * Fixed name in static config, added flag in fsimporter * Filter out strobes for now * Fixed reporting output for dst/src IPs * updated strobes flagging and storage for fqdn beacons * Added BeaconFQDN to rita.yaml * Updated a comment * Removed todo comment for config file update * Move countAndRemoveConsecutiveDuplicates to the analyzer where it is used in beacons and beaconsFQDN, update comments in beacon and beaconfqdn sorter, add typing info to FqdnInput.DstBSONList * added initial additional worker functionality for beacons fqdn * fixed closing bug in new worker * Fix bug where dissector query was matching on src_network_name, clean up unique ip data structures. Co-authored-by: lisaSW Co-authored-by: Logan Lembke Co-authored-by: lisaSW Co-authored-by: Logan L --- commands/show-beacons-fqdn.go | 156 ++++++++++ config/static.go | 7 + config/tables.go | 6 + etc/rita.yaml | 245 ++++++++-------- parser/fsimporter.go | 30 +- pkg/beacon/analyzer.go | 31 +- pkg/beacon/dissector.go | 7 + pkg/beacon/sorter.go | 26 +- pkg/beaconfqdn/accumulator.go | 95 ++++++ pkg/beaconfqdn/analyzer.go | 481 +++++++++++++++++++++++++++++++ pkg/beaconfqdn/dissector.go | 183 ++++++++++++ pkg/beaconfqdn/mongodb.go | 155 ++++++++++ pkg/beaconfqdn/repository.go | 90 ++++++ pkg/beaconfqdn/results.go | 20 ++ pkg/beaconfqdn/sorter.go | 66 +++++ pkg/beaconfqdn/writer.go | 100 +++++++ pkg/data/repository.go | 96 ++++-- pkg/hostname/repository.go | 47 ++- pkg/uconn/analyzer.go | 7 +- reporting/report-beaconsfqdn.go | 77 +++++ reporting/report.go | 5 + reporting/templates/templates.go | 29 ++ 22 files changed, 1770 insertions(+), 189 deletions(-) create mode 100644 commands/show-beacons-fqdn.go create mode 100644 pkg/beaconfqdn/accumulator.go create mode 100644 pkg/beaconfqdn/analyzer.go create mode 100644 pkg/beaconfqdn/dissector.go create mode 100644 pkg/beaconfqdn/mongodb.go create mode 100644 pkg/beaconfqdn/repository.go create mode 100644 pkg/beaconfqdn/results.go create mode 100644 pkg/beaconfqdn/sorter.go create mode 100644 pkg/beaconfqdn/writer.go create mode 100644 reporting/report-beaconsfqdn.go diff --git a/commands/show-beacons-fqdn.go b/commands/show-beacons-fqdn.go new file mode 100644 index 00000000..63bff139 --- /dev/null +++ b/commands/show-beacons-fqdn.go @@ -0,0 +1,156 @@ +package commands + +import ( + "fmt" + "os" + "strings" + + "github.com/activecm/rita/pkg/beaconfqdn" + "github.com/activecm/rita/resources" + "github.com/olekukonko/tablewriter" + "github.com/urfave/cli" +) + +func init() { + command := cli.Command{ + Name: "show-beacons-fqdn", + Usage: "Print hosts which show signs of C2 software (FQDN Analysis)", + ArgsUsage: "", + Flags: []cli.Flag{ + humanFlag, + configFlag, + delimFlag, + netNamesFlag, + }, + Action: showBeaconsFQDN, + } + + bootstrapCommands(command) +} + +func showBeaconsFQDN(c *cli.Context) error { + db := c.Args().Get(0) + if db == "" { + return cli.NewExitError("Specify a database", -1) + } + res := resources.InitResources(c.String("config")) + res.DB.SelectDB(db) + + data, err := beaconfqdn.Results(res, 0) + + if err != nil { + res.Log.Error(err) + return cli.NewExitError(err, -1) + } + + if !(len(data) > 0) { + return cli.NewExitError("No results were found for "+db, -1) + } + + showNetNames := c.Bool("network-names") + + if c.Bool("human-readable") { + err := showBeaconsFQDNHuman(data, showNetNames) + if err != nil { + return cli.NewExitError(err.Error(), -1) + } + return nil + } + + err = showBeaconsFQDNDelim(data, c.String("delimiter"), showNetNames) + if err != nil { + return cli.NewExitError(err.Error(), -1) + } + return nil +} + +func showBeaconsFQDNHuman(data []beaconfqdn.Result, showNetNames bool) error { + table := tablewriter.NewWriter(os.Stdout) + var headerFields []string + if showNetNames { + headerFields = []string{ + "Score", "Source Network", "Source IP", "FQDN", "Resolved IPs", + "Connections", "Avg. Bytes", "Intvl Range", "Size Range", "Top Intvl", + "Top Size", "Top Intvl Count", "Top Size Count", "Intvl Skew", + "Size Skew", "Intvl Dispersion", "Size Dispersion", + } + } else { + headerFields = []string{ + "Score", "Source IP", "FQDN", "Resolved IPs", + "Connections", "Avg. Bytes", "Intvl Range", "Size Range", "Top Intvl", + "Top Size", "Top Intvl Count", "Top Size Count", "Intvl Skew", + "Size Skew", "Intvl Dispersion", "Size Dispersion", + } + } + + table.SetHeader(headerFields) + + for _, d := range data { + var row []string + + if showNetNames { + row = []string{ + f(d.Score), d.SrcNetworkName, + d.SrcIP, d.FQDN, i(d.Connections), f(d.AvgBytes), + i(d.Ts.Range), i(d.Ds.Range), i(d.Ts.Mode), i(d.Ds.Mode), + i(d.Ts.ModeCount), i(d.Ds.ModeCount), f(d.Ts.Skew), f(d.Ds.Skew), + i(d.Ts.Dispersion), i(d.Ds.Dispersion), + } + } else { + row = []string{ + f(d.Score), d.SrcIP, d.FQDN, i(d.Connections), f(d.AvgBytes), + i(d.Ts.Range), i(d.Ds.Range), i(d.Ts.Mode), i(d.Ds.Mode), + i(d.Ts.ModeCount), i(d.Ds.ModeCount), f(d.Ts.Skew), f(d.Ds.Skew), + i(d.Ts.Dispersion), i(d.Ds.Dispersion), + } + } + table.Append(row) + } + table.Render() + return nil +} + +func showBeaconsFQDNDelim(data []beaconfqdn.Result, delim string, showNetNames bool) error { + var headerFields []string + if showNetNames { + headerFields = []string{ + "Score", "Source Network", "Source IP", "FQDN", + "Connections", "Avg. Bytes", "Intvl Range", "Size Range", "Top Intvl", + "Top Size", "Top Intvl Count", "Top Size Count", "Intvl Skew", + "Size Skew", "Intvl Dispersion", "Size Dispersion", + } + } else { + headerFields = []string{ + "Score", "Source IP", "FQDN", + "Connections", "Avg. Bytes", "Intvl Range", "Size Range", "Top Intvl", + "Top Size", "Top Intvl Count", "Top Size Count", "Intvl Skew", + "Size Skew", "Intvl Dispersion", "Size Dispersion", + } + } + + // Print the headers and analytic values, separated by a delimiter + fmt.Println(strings.Join(headerFields, delim)) + for _, d := range data { + + var row []string + if showNetNames { + row = []string{ + f(d.Score), d.SrcNetworkName, + d.SrcIP, d.FQDN, i(d.Connections), f(d.AvgBytes), + i(d.Ts.Range), i(d.Ds.Range), i(d.Ts.Mode), i(d.Ds.Mode), + i(d.Ts.ModeCount), i(d.Ds.ModeCount), f(d.Ts.Skew), f(d.Ds.Skew), + i(d.Ts.Dispersion), i(d.Ds.Dispersion), + } + } else { + row = []string{ + f(d.Score), d.SrcIP, d.FQDN, i(d.Connections), f(d.AvgBytes), + i(d.Ts.Range), i(d.Ds.Range), i(d.Ts.Mode), i(d.Ds.Mode), + i(d.Ts.ModeCount), i(d.Ds.ModeCount), f(d.Ts.Skew), f(d.Ds.Skew), + i(d.Ts.Dispersion), i(d.Ds.Dispersion), + } + } + + fmt.Println(strings.Join(row, delim)) + } + return nil +} diff --git a/config/static.go b/config/static.go index 8d1a73c9..b86a90cc 100644 --- a/config/static.go +++ b/config/static.go @@ -19,6 +19,7 @@ type ( Log LogStaticCfg `yaml:"LogConfig"` Blacklisted BlacklistedStaticCfg `yaml:"BlackListed"` Beacon BeaconStaticCfg `yaml:"Beacon"` + BeaconFQDN BeaconFQDNStaticCfg `yaml:"BeaconFQDN"` DNS DNSStaticCfg `yaml:"DNS"` UserAgent UserAgentStaticCfg `yaml:"UserAgent"` Bro BroStaticCfg `yaml:"Bro"` // kept in for MetaDB backwards compatibility @@ -86,6 +87,12 @@ type ( DefaultConnectionThresh int `yaml:"DefaultConnectionThresh" default:"20"` } + //BeaconFQDNStaticCfg is used to control the beaconing analysis module + BeaconFQDNStaticCfg struct { + Enabled bool `yaml:"Enabled" default:"true"` + DefaultConnectionThresh int `yaml:"DefaultConnectionThresh" default:"20"` + } + //DNSStaticCfg is used to control the DNS analysis module DNSStaticCfg struct { Enabled bool `yaml:"Enabled" default:"true"` diff --git a/config/tables.go b/config/tables.go index 89910881..fe8ff8ab 100644 --- a/config/tables.go +++ b/config/tables.go @@ -7,6 +7,7 @@ type ( DNS DNSTableCfg Structure StructureTableCfg Beacon BeaconTableCfg + BeaconFQDN BeaconFQDNTableCfg UserAgent UserAgentTableCfg Cert CertificateTableCfg Meta MetaTableCfg @@ -38,6 +39,11 @@ type ( BeaconTable string `default:"beacon"` } + //BeaconFQDNTableCfg is used to control the beaconing analysis module + BeaconFQDNTableCfg struct { + BeaconFQDNTable string `default:"beaconFQDN"` + } + //UserAgentTableCfg is used to control the useragent analysis module UserAgentTableCfg struct { UserAgentTable string `default:"useragent"` diff --git a/etc/rita.yaml b/etc/rita.yaml index 4dba1832..a3395842 100644 --- a/etc/rita.yaml +++ b/etc/rita.yaml @@ -1,142 +1,155 @@ # This section configures the connection to the MongoDB server and the database name to use MongoDB: - # See https://docs.mongodb.com/manual/reference/connection-string/ - ConnectionString: mongodb://localhost:27017 - # Example with authentication. Be sure to change the AuthenticationMechanism as well. - # ConnectionString: mongodb://username:password@localhost:27017 + # See https://docs.mongodb.com/manual/reference/connection-string/ + ConnectionString: mongodb://localhost:27017 + # Example with authentication. Be sure to change the AuthenticationMechanism as well. + # ConnectionString: mongodb://username:password@localhost:27017 - # Accepted Values: null, "SCRAM-SHA-1", "MONGODB-CR", "PLAIN" - # Since Mongo version 3.0 the default authentication mechanism is SCRAM-SHA-1 - AuthenticationMechanism: null + # Accepted Values: null, "SCRAM-SHA-1", "MONGODB-CR", "PLAIN" + # Since Mongo version 3.0 the default authentication mechanism is SCRAM-SHA-1 + AuthenticationMechanism: null - # The time in hours before RITA's connection to MongoDB times out. 0 waits indefinitely. - SocketTimeout: 2 + # The time in hours before RITA's connection to MongoDB times out. 0 waits indefinitely. + SocketTimeout: 2 - # For encrypting data on the wire between RITA and MongoDB - TLS: - Enable: false - #If set, RITA will verify the MongoDB certificate's hostname and validity - VerifyCertificate: false - #If set, RITA will use the provided CA file instead of the system's CA's - CAFile: null + # For encrypting data on the wire between RITA and MongoDB + TLS: + Enable: false + #If set, RITA will verify the MongoDB certificate's hostname and validity + VerifyCertificate: false + #If set, RITA will use the provided CA file instead of the system's CA's + CAFile: null - # This database holds information about the procesed files and databases. - MetaDB: MetaDatabase + # This database holds information about the procesed files and databases. + MetaDB: MetaDatabase Rolling: - # This is the default number of chunks to keep in rolling databases. - # This only is used if the --numchunks command argument isn't supplied. - DefaultChunks: 24 + # This is the default number of chunks to keep in rolling databases. + # This only is used if the --numchunks command argument isn't supplied. + DefaultChunks: 24 LogConfig: - # LogLevel - # 3 = debug - # 2 = info - # 1 = warn - # 0 = error - LogLevel: 2 + # LogLevel + # 3 = debug + # 2 = info + # 1 = warn + # 0 = error + LogLevel: 2 - # LogPath is the path for Rita's logs. Make sure permissions are set accordingly. - # Logs will only be written here if LogToFile is true - RitaLogPath: /var/lib/rita/logs + # LogPath is the path for Rita's logs. Make sure permissions are set accordingly. + # Logs will only be written here if LogToFile is true + RitaLogPath: /var/lib/rita/logs - LogToFile: true - LogToDB: true + LogToFile: true + LogToDB: true UserConfig: - # Number of days before checking for a new version of RITA. - # A value of zero here will disable checking. - UpdateCheckFrequency: 14 + # Number of days before checking for a new version of RITA. + # A value of zero here will disable checking. + UpdateCheckFrequency: 14 Filtering: - # These are filters that affect the import of connection logs. They - # currently do not apply to dns or http logs. - # A good reference for networks you may wish to consider is RFC 5735. - # https://tools.ietf.org/html/rfc5735#section-4 - - # Example: AlwaysInclude: ["192.168.1.2/32"] - # This functionality overrides the NeverInclude and InternalSubnets - # section, making sure that any connection records containing addresses from - # this range are kept and not filtered - AlwaysInclude: [] - - # Example: NeverInclude: ["255.255.255.255/32"] - # This functions as a whitelisting setting, and connections involving - # ranges entered into this section are filtered out at import time - NeverInclude: - - 0.0.0.0/32 # "This" Host RFC 1122, Section 3.2.1.3 - - 127.0.0.0/8 # Loopback RFC 1122, Section 3.2.1.3 - - 169.254.0.0/16 # Link Local RFC 3927 - - 224.0.0.0/4 # Multicast RFC 3171 - - 255.255.255.255/32 # Limited Broadcast RFC 919, Section 7 - - ::1/128 # Loopback RFC 4291, Section 2.5.3 - - fe80::/10 # Link local RFC 4291, Section 2.5.6 - - ff00::/8 # Multicast RFC 4291, Section 2.7 - - # Example: InternalSubnets: ["10.0.0.0/8","172.16.0.0/12","192.168.0.0/16"] - # This allows a user to identify their internal network, which will result - # in any internal to internal and external to external connections being - # filtered out at import time. Reasonable defaults are provided below - # but need to be manually verified against each installation before enabling. - InternalSubnets: - - 10.0.0.0/8 # Private-Use Networks RFC 1918 - - 172.16.0.0/12 # Private-Use Networks RFC 1918 - - 192.168.0.0/16 # Private-Use Networks RFC 1918 + # These are filters that affect the import of connection logs. They + # currently do not apply to dns or http logs. + # A good reference for networks you may wish to consider is RFC 5735. + # https://tools.ietf.org/html/rfc5735#section-4 + + # Example: AlwaysInclude: ["192.168.1.2/32"] + # This functionality overrides the NeverInclude and InternalSubnets + # section, making sure that any connection records containing addresses from + # this range are kept and not filtered + AlwaysInclude: [] + + # Example: NeverInclude: ["255.255.255.255/32"] + # This functions as a whitelisting setting, and connections involving + # ranges entered into this section are filtered out at import time + NeverInclude: + - 0.0.0.0/32 # "This" Host RFC 1122, Section 3.2.1.3 + - 127.0.0.0/8 # Loopback RFC 1122, Section 3.2.1.3 + - 169.254.0.0/16 # Link Local RFC 3927 + - 224.0.0.0/4 # Multicast RFC 3171 + - 255.255.255.255/32 # Limited Broadcast RFC 919, Section 7 + - ::1/128 # Loopback RFC 4291, Section 2.5.3 + - fe80::/10 # Link local RFC 4291, Section 2.5.6 + - ff00::/8 # Multicast RFC 4291, Section 2.7 + + # Example: InternalSubnets: ["10.0.0.0/8","172.16.0.0/12","192.168.0.0/16"] + # This allows a user to identify their internal network, which will result + # in any internal to internal and external to external connections being + # filtered out at import time. Reasonable defaults are provided below + # but need to be manually verified against each installation before enabling. + InternalSubnets: + - 10.0.0.0/8 # Private-Use Networks RFC 1918 + - 172.16.0.0/12 # Private-Use Networks RFC 1918 + - 192.168.0.0/16 # Private-Use Networks RFC 1918 BlackListed: - Enabled: true - # These are blacklists built into rita-blacklist. Set these to false - # to disable checks against them. - MalwareDomains.com: true - feodotracker.abuse.ch: true - - # This is the name of the database which will be created as a master list of - # blacklisted ips and hostnames by rita-blacklist - BlacklistDatabase: "rita-bl" - - # These are custom blacklists that you may define. They are lists of either - # file paths or urls. These custom blacklists are expected to be simple, - # line separated text documents containing a list of blacklisted entries. - - # Example: CustomIPBlacklists: ["$HOME/.rita/myIPBlacklist.txt"] - # myIPBlacklist.txt would look like this: - # 192.168.0.1 - # 10.10.174.1 - - # Lists containing both IPv4 and IPv6 addresses are acceptable - CustomIPBlacklists: [] - # Lists containing hostnames, domain names, and FQDNs are acceptable - CustomHostnameBlacklists: [] + Enabled: true + # These are blacklists built into rita-blacklist. Set these to false + # to disable checks against them. + MalwareDomains.com: true + feodotracker.abuse.ch: true + + # This is the name of the database which will be created as a master list of + # blacklisted ips and hostnames by rita-blacklist + BlacklistDatabase: "rita-bl" + + # These are custom blacklists that you may define. They are lists of either + # file paths or urls. These custom blacklists are expected to be simple, + # line separated text documents containing a list of blacklisted entries. + + # Example: CustomIPBlacklists: ["$HOME/.rita/myIPBlacklist.txt"] + # myIPBlacklist.txt would look like this: + # 192.168.0.1 + # 10.10.174.1 + + # Lists containing both IPv4 and IPv6 addresses are acceptable + CustomIPBlacklists: [] + # Lists containing hostnames, domain names, and FQDNs are acceptable + CustomHostnameBlacklists: [] Beacon: - Enabled: true - # The default minimum number of connections used for beacons analysis. - # Any two hosts connecting fewer than this number will not be analyzed. - # 20 was chosen as it is a little bit less than once per hour in a day, - # and allows for any packet loss that could occur. - - # If you choose a lower value, this will significantly increase both - # the analysis time and the number of false positives. You can safely - # increase this value to improve performance if you are not concerned - # about slow beacons. - DefaultConnectionThresh: 20 + Enabled: true + # The default minimum number of connections used for beacons analysis. + # Any two hosts connecting fewer than this number will not be analyzed. + # 20 was chosen as it is a little bit less than once per hour in a day, + # and allows for any packet loss that could occur. + + # If you choose a lower value, this will significantly increase both + # the analysis time and the number of false positives. You can safely + # increase this value to improve performance if you are not concerned + # about slow beacons. + DefaultConnectionThresh: 20 + +BeaconFQDN: + Enabled: true + # The default minimum number of connections used for beacons FQDN analysis. + # Any two hosts connecting fewer than this number will not be analyzed. + # 20 was chosen as it is a little bit less than once per hour in a day, + # and allows for any packet loss that could occur. + + # If you choose a lower value, this will significantly increase both + # the analysis time and the number of false positives. You can safely + # increase this value to improve performance if you are not concerned + # about slow beacons. + DefaultConnectionThresh: 20 DNS: - Enabled: true + Enabled: true UserAgent: - Enabled: true + Enabled: true Strobe: - # This sets the maximum number of connections between any two given hosts that are stored. - # Connections above this limit will be deleted and not used in other analysis modules. This will - # also trigger an entry in the strobe module. A lower value will reduce import & analysis time and - # hide more potential false positives from other modules. A higher value will increase import & - # analysis time, increase false positives, but reduce the risk of false negatives. - # Recommended values for this setting are: - # 86400 - One connection every second for 24 hours - # 250000 - (Default) Good middle of the road value - # 700000 - Safe max value that is unlikely to cause errors - # The theoretical limit due to implementation limitations is ~1,048,573 - # but in practice timeouts have occurred at lower values. - ConnectionLimit: 250000 + # This sets the maximum number of connections between any two given hosts that are stored. + # Connections above this limit will be deleted and not used in other analysis modules. This will + # also trigger an entry in the strobe module. A lower value will reduce import & analysis time and + # hide more potential false positives from other modules. A higher value will increase import & + # analysis time, increase false positives, but reduce the risk of false negatives. + # Recommended values for this setting are: + # 86400 - One connection every second for 24 hours + # 250000 - (Default) Good middle of the road value + # 700000 - Safe max value that is unlikely to cause errors + # The theoretical limit due to implementation limitations is ~1,048,573 + # but in practice timeouts have occurred at lower values. + ConnectionLimit: 250000 diff --git a/parser/fsimporter.go b/parser/fsimporter.go index a37573cb..03d3d2d7 100644 --- a/parser/fsimporter.go +++ b/parser/fsimporter.go @@ -14,10 +14,12 @@ import ( fpt "github.com/activecm/rita/parser/fileparsetypes" "github.com/activecm/rita/parser/parsetypes" "github.com/activecm/rita/pkg/beacon" + "github.com/activecm/rita/pkg/beaconfqdn" "github.com/activecm/rita/pkg/blacklist" "github.com/activecm/rita/pkg/certificate" "github.com/activecm/rita/pkg/data" "github.com/activecm/rita/pkg/explodeddns" + "github.com/activecm/rita/pkg/host" "github.com/activecm/rita/pkg/hostname" "github.com/activecm/rita/pkg/remover" @@ -190,6 +192,9 @@ func (fs *FSImporter) Run(indexedFiles []*fpt.IndexedFile) { // build or update Beacons table fs.buildBeacons(uconnMap) + // build or update the FQDN Beacons Table + fs.buildFQDNBeacons(hostnameMap) + // build or update UserAgent table fs.buildUserAgent(useragentMap) @@ -778,11 +783,10 @@ func (fs *FSImporter) buildExplodedDNS(domainMap map[string]int) { } else { fmt.Println("\t[!] No DNS data to analyze") } - } } -//buildExplodedDNS ..... +//buildCertificates ..... func (fs *FSImporter) buildCertificates(certMap map[string]*certificate.Input) { if len(certMap) > 0 { @@ -799,7 +803,7 @@ func (fs *FSImporter) buildCertificates(certMap map[string]*certificate.Input) { } -//buildHostnames ..... +//removeAnalysisChunk ..... func (fs *FSImporter) removeAnalysisChunk(cid int) error { // Set up the remover @@ -851,7 +855,6 @@ func (fs *FSImporter) buildUconns(uconnMap map[string]*uconn.Input) { fmt.Printf("\t\t[!!] No local network traffic found, please check ") fmt.Println("InternalSubnets in your RITA config (/etc/rita/config.yaml)") } - } func (fs *FSImporter) buildHosts(hostMap map[string]*host.Input) { @@ -902,6 +905,25 @@ func (fs *FSImporter) buildBeacons(uconnMap map[string]*uconn.Input) { } +func (fs *FSImporter) buildFQDNBeacons(hostnameMap map[string]*hostname.Input) { + if fs.res.Config.S.BeaconFQDN.Enabled { + if len(hostnameMap) > 0 { + beaconFQDNRepo := beaconfqdn.NewMongoRepository(fs.res) + + err := beaconFQDNRepo.CreateIndexes() + if err != nil { + fs.res.Log.Error(err) + } + + // send uconns to beacon analysis + beaconFQDNRepo.Upsert(hostnameMap) + } else { + fmt.Println("\t[!] No FQDN Beacon data to analyze") + } + } + +} + //buildUserAgent ..... func (fs *FSImporter) buildUserAgent(useragentMap map[string]*useragent.Input) { diff --git a/pkg/beacon/analyzer.go b/pkg/beacon/analyzer.go index d96b6c9b..523449a9 100644 --- a/pkg/beacon/analyzer.go +++ b/pkg/beacon/analyzer.go @@ -1,12 +1,13 @@ package beacon import ( - "github.com/activecm/rita/pkg/data" "math" "sort" "strconv" "sync" + "github.com/activecm/rita/pkg/data" + "github.com/activecm/rita/config" "github.com/activecm/rita/database" "github.com/activecm/rita/pkg/uconn" @@ -228,8 +229,8 @@ func (a *analyzer) start() { selector: res.Hosts.BSONKey(), } - output.hostIcert = a.hostIcertQuery(res.InvalidCertFlag, res.Hosts.Source(), res.Hosts.Destination()) - output.hostBeacon = a.hostBeaconQuery(score, res.Hosts.Source(), res.Hosts.Destination()) + output.hostIcert = a.hostIcertQuery(res.InvalidCertFlag, res.Hosts.UniqueSrcIP.Unpair(), res.Hosts.UniqueDstIP.Unpair()) + output.hostBeacon = a.hostBeaconQuery(score, res.Hosts.UniqueSrcIP.Unpair(), res.Hosts.UniqueDstIP.Unpair()) // set to writer channel a.analyzedCallback(output) @@ -260,6 +261,30 @@ func createCountMap(sortedIn []int64) ([]int64, []int64, int64, int64) { return distinct, countsArr, mode, max } +//countAndRemoveConsecutiveDuplicates removes consecutive +//duplicates in an array of integers and counts how many +//instances of each number exist in the array. +//Similar to `uniq -c`, but counts all duplicates, not just +//consecutive duplicates. +func countAndRemoveConsecutiveDuplicates(numberList []int64) ([]int64, map[int64]int64) { + //Avoid some reallocations + result := make([]int64, 0, len(numberList)/2) + counts := make(map[int64]int64) + + last := numberList[0] + result = append(result, last) + counts[last]++ + + for idx := 1; idx < len(numberList); idx++ { + if last != numberList[idx] { + result = append(result, numberList[idx]) + } + last = numberList[idx] + counts[last]++ + } + return result, counts +} + func (a *analyzer) hostIcertQuery(icert bool, src data.UniqueIP, dst data.UniqueIP) updateInfo { ssn := a.db.Session.Copy() defer ssn.Close() diff --git a/pkg/beacon/dissector.go b/pkg/beacon/dissector.go index 1668348f..d92068cd 100644 --- a/pkg/beacon/dissector.go +++ b/pkg/beacon/dissector.go @@ -55,6 +55,13 @@ func (d *dissector) start() { for datum := range d.dissectChannel { matchNoStrobeKey := datum.Hosts.BSONKey() + + // we are able to filter out already flagged strobes here + // because we use the uconns table to access them. The uconns table has + // already had its counts and stats updated. Note that this can't be the + // case with the fqdn beacon strobes, as they are also stored in the fqdn + // strobes collection and cannot be stored in uconns, needing the updated + // counts and stats matchNoStrobeKey["strobe"] = bson.M{"$ne": true} // This will work for both updating and inserting completely new Beacons diff --git a/pkg/beacon/sorter.go b/pkg/beacon/sorter.go index 9a4059a2..bf38beef 100644 --- a/pkg/beacon/sorter.go +++ b/pkg/beacon/sorter.go @@ -52,7 +52,7 @@ func (s *sorter) start() { for data := range s.sortChannel { if (data.TsList) != nil { - //sort the size and timestamps since they may have arrived out of order + //sort the size and timestamps to compute quantiles in the analyzer sort.Sort(util.SortableInt64(data.TsList)) sort.Sort(util.SortableInt64(data.OrigBytesList)) @@ -64,27 +64,3 @@ func (s *sorter) start() { s.sortWg.Done() }() } - -//CountAndRemoveConsecutiveDuplicates removes consecutive -//duplicates in an array of integers and counts how many -//instances of each number exist in the array. -//Similar to `uniq -c`, but counts all duplicates, not just -//consecutive duplicates. -func countAndRemoveConsecutiveDuplicates(numberList []int64) ([]int64, map[int64]int64) { - //Avoid some reallocations - result := make([]int64, 0, len(numberList)/2) - counts := make(map[int64]int64) - - last := numberList[0] - result = append(result, last) - counts[last]++ - - for idx := 1; idx < len(numberList); idx++ { - if last != numberList[idx] { - result = append(result, numberList[idx]) - } - last = numberList[idx] - counts[last]++ - } - return result, counts -} diff --git a/pkg/beaconfqdn/accumulator.go b/pkg/beaconfqdn/accumulator.go new file mode 100644 index 00000000..45775665 --- /dev/null +++ b/pkg/beaconfqdn/accumulator.go @@ -0,0 +1,95 @@ +package beaconfqdn + +import ( + "sync" + + "github.com/activecm/rita/config" + "github.com/activecm/rita/database" + "github.com/activecm/rita/pkg/data" + "github.com/activecm/rita/pkg/hostname" + "github.com/globalsign/mgo/bson" +) + +type ( + accumulator struct { + db *database.DB // provides access to MongoDB + conf *config.Config // contains details needed to access MongoDB + accumulatedCallback func(*hostname.FqdnInput) // called on each analyzed result + closedCallback func() // called when .close() is called and no more calls to analyzedCallback will be made + accumulateChannel chan *hostname.Input // holds unanalyzed data + accumulateWg sync.WaitGroup // wait for analysis to finish + } +) + +//newAccumulator creates a new collector for gathering data +func newAccumulator(db *database.DB, conf *config.Config, accumulatedCallback func(*hostname.FqdnInput), closedCallback func()) *accumulator { + return &accumulator{ + db: db, + conf: conf, + accumulatedCallback: accumulatedCallback, + closedCallback: closedCallback, + accumulateChannel: make(chan *hostname.Input), + } +} + +//collect sends a chunk of data to be analyzed +func (c *accumulator) collect(entry *hostname.Input) { + c.accumulateChannel <- entry +} + +//close waits for the collector to finish +func (c *accumulator) close() { + close(c.accumulateChannel) + c.accumulateWg.Wait() + c.closedCallback() +} + +//start kicks off a new analysis thread +func (c *accumulator) start() { + c.accumulateWg.Add(1) + go func() { + ssn := c.db.Session.Copy() + defer ssn.Close() + + for entry := range c.accumulateChannel { + // create resolved dst array for match query + var dstList []bson.M + for _, dst := range entry.ResolvedIPs { + dstList = append(dstList, dst.AsDst().BSONKey()) + } + + // create match query + srcMatchQuery := []bson.M{ + {"$match": bson.M{ + "$or": dstList, + }}, + {"$project": bson.M{ + "src": 1, + "src_network_uuid": 1, + "src_network_name": 1, + }}, + } + + // get all src ips that connected to the resolved ips + var srcRes []data.UniqueSrcIP + + // execute query + _ = ssn.DB(c.db.GetSelectedDB()).C(c.conf.T.Structure.UniqueConnTable).Pipe(srcMatchQuery).AllowDiskUse().All(&srcRes) + + // for each src that connected to a resolved ip... + for _, src := range srcRes { + + input := &hostname.FqdnInput{ + Src: src, + FQDN: entry.Host, + DstBSONList: dstList, + ResolvedIPs: entry.ResolvedIPs, + } + + c.accumulatedCallback(input) + } + + } + c.accumulateWg.Done() + }() +} diff --git a/pkg/beaconfqdn/analyzer.go b/pkg/beaconfqdn/analyzer.go new file mode 100644 index 00000000..4112e69e --- /dev/null +++ b/pkg/beaconfqdn/analyzer.go @@ -0,0 +1,481 @@ +package beaconfqdn + +import ( + "math" + "sort" + "strconv" + "sync" + + "github.com/activecm/rita/config" + "github.com/activecm/rita/database" + "github.com/activecm/rita/pkg/data" + "github.com/activecm/rita/pkg/hostname" + "github.com/activecm/rita/util" + "github.com/globalsign/mgo/bson" +) + +type ( + analyzer struct { + tsMin int64 // min timestamp for the whole dataset + tsMax int64 // max timestamp for the whole dataset + chunk int //current chunk (0 if not on rolling analysis) + chunkStr string //current chunk (0 if not on rolling analysis) + db *database.DB // provides access to MongoDB + conf *config.Config // contains details needed to access MongoDB + analyzedCallback func(*update) // called on each analyzed result + closedCallback func() // called when .close() is called and no more calls to analyzedCallback will be made + analysisChannel chan *hostname.FqdnInput // holds unanalyzed data + analysisWg sync.WaitGroup // wait for analysis to finish + } +) + +//newAnalyzer creates a new collector for gathering data // +func newAnalyzer(min int64, max int64, chunk int, db *database.DB, conf *config.Config, analyzedCallback func(*update), closedCallback func()) *analyzer { + return &analyzer{ + tsMin: min, + tsMax: max, + chunk: chunk, + chunkStr: strconv.Itoa(chunk), + db: db, + conf: conf, + analyzedCallback: analyzedCallback, + closedCallback: closedCallback, + analysisChannel: make(chan *hostname.FqdnInput), + } +} + +//collect sends a chunk of data to be analyzed +func (a *analyzer) collect(data *hostname.FqdnInput) { + a.analysisChannel <- data +} + +//close waits for the collector to finish +func (a *analyzer) close() { + close(a.analysisChannel) + a.analysisWg.Wait() + a.closedCallback() +} + +//start kicks off a new analysis thread +func (a *analyzer) start() { + a.analysisWg.Add(1) + go func() { + + for entry := range a.analysisChannel { + + // set up beacon writer output + output := &update{} + + // create selector pair object + selectorPair := uniqueSrcHostnamePair{ + entry.Src.SrcIP, + entry.Src.SrcNetworkUUID, + entry.FQDN, + } + + // create query + query := bson.M{} + + // if beacon has turned into a strobe, we will not have any timestamps here, + // and need to update beaconFQDN table with the strobeFQDN flag. + if (entry.TsList) == nil { + + // set strobe info + query["$set"] = bson.M{ + "strobeFQDN": true, + "total_bytes": entry.TotalBytes, + "avg_bytes": entry.TotalBytes / entry.ConnectionCount, + "connection_count": entry.ConnectionCount, + "src_network_name": entry.Src.SrcNetworkName, + "resolved_ips": entry.ResolvedIPs, + "cid": a.chunk, + } + + // unset any beacon calculations since this + // is now a strobe and those would be inaccurate + // (this will only apply to chunked imports) + query["$unset"] = bson.M{ + "ts": 1, + "ds": 1, + "score": 1, + } + + // create selector for output + output.beacon.query = query + output.beacon.selector = selectorPair.BSONKey() + + // set to writer channel + a.analyzedCallback(output) + + } else { + //store the diff slice length since we use it a lot + //for timestamps this is one less then the data slice length + //since we are calculating the times in between readings + tsLength := len(entry.TsList) - 1 + dsLength := len(entry.OrigBytesList) + + //find the delta times between the timestamps + diff := make([]int64, tsLength) + for i := 0; i < tsLength; i++ { + diff[i] = entry.TsList[i+1] - entry.TsList[i] + } + + //perfect beacons should have symmetric delta time and size distributions + //Bowley's measure of skew is used to check symmetry + sort.Sort(util.SortableInt64(diff)) + tsSkew := float64(0) + dsSkew := float64(0) + + //tsLength -1 is used since diff is a zero based slice + tsLow := diff[util.Round(.25*float64(tsLength-1))] + tsMid := diff[util.Round(.5*float64(tsLength-1))] + tsHigh := diff[util.Round(.75*float64(tsLength-1))] + tsBowleyNum := tsLow + tsHigh - 2*tsMid + tsBowleyDen := tsHigh - tsLow + + //we do the same for datasizes + dsLow := entry.OrigBytesList[util.Round(.25*float64(dsLength-1))] + dsMid := entry.OrigBytesList[util.Round(.5*float64(dsLength-1))] + dsHigh := entry.OrigBytesList[util.Round(.75*float64(dsLength-1))] + dsBowleyNum := dsLow + dsHigh - 2*dsMid + dsBowleyDen := dsHigh - dsLow + + //tsSkew should equal zero if the denominator equals zero + //bowley skew is unreliable if Q2 = Q1 or Q2 = Q3 + if tsBowleyDen != 0 && tsMid != tsLow && tsMid != tsHigh { + tsSkew = float64(tsBowleyNum) / float64(tsBowleyDen) + } + + if dsBowleyDen != 0 && dsMid != dsLow && dsMid != dsHigh { + dsSkew = float64(dsBowleyNum) / float64(dsBowleyDen) + } + + //perfect beacons should have very low dispersion around the + //median of their delta times + //Median Absolute Deviation About the Median + //is used to check dispersion + devs := make([]int64, tsLength) + for i := 0; i < tsLength; i++ { + devs[i] = util.Abs(diff[i] - tsMid) + } + + dsDevs := make([]int64, dsLength) + for i := 0; i < dsLength; i++ { + dsDevs[i] = util.Abs(entry.OrigBytesList[i] - dsMid) + } + + sort.Sort(util.SortableInt64(devs)) + sort.Sort(util.SortableInt64(dsDevs)) + + tsMadm := devs[util.Round(.5*float64(tsLength-1))] + dsMadm := dsDevs[util.Round(.5*float64(dsLength-1))] + + //Store the range for human analysis + tsIntervalRange := diff[tsLength-1] - diff[0] + dsRange := entry.OrigBytesList[dsLength-1] - entry.OrigBytesList[0] + + //get a list of the intervals found in the data, + //the number of times the interval was found, + //and the most occurring interval + intervals, intervalCounts, tsMode, tsModeCount := createCountMap(diff) + dsSizes, dsCounts, dsMode, dsModeCount := createCountMap(entry.OrigBytesList) + + //more skewed distributions receive a lower score + //less skewed distributions receive a higher score + tsSkewScore := 1.0 - math.Abs(tsSkew) //smush tsSkew + dsSkewScore := 1.0 - math.Abs(dsSkew) //smush dsSkew + + //lower dispersion is better, cutoff dispersion scores at 30 seconds + tsMadmScore := 1.0 - float64(tsMadm)/30.0 + if tsMadmScore < 0 { + tsMadmScore = 0 + } + + //lower dispersion is better, cutoff dispersion scores at 32 bytes + dsMadmScore := 1.0 - float64(dsMadm)/32.0 + if dsMadmScore < 0 { + dsMadmScore = 0 + } + + //smaller data sizes receive a higher score + dsSmallnessScore := 1.0 - float64(dsMode)/65535.0 + if dsSmallnessScore < 0 { + dsSmallnessScore = 0 + } + + // connection count scoring + tsConnDiv := (float64(a.tsMax) - float64(a.tsMin)) / 10.0 + tsConnCountScore := float64(entry.ConnectionCount) / tsConnDiv + if tsConnCountScore > 1.0 { + tsConnCountScore = 1.0 + } + + //score numerators + tsSum := tsSkewScore + tsMadmScore + tsConnCountScore + dsSum := dsSkewScore + dsMadmScore + dsSmallnessScore + + //score averages + tsScore := math.Ceil((tsSum/3.0)*1000) / 1000 + dsScore := math.Ceil((dsSum/3.0)*1000) / 1000 + score := math.Ceil(((tsSum+dsSum)/6.0)*1000) / 1000 + + // update beacon query + query["$set"] = bson.M{ + "connection_count": entry.ConnectionCount, + "avg_bytes": entry.TotalBytes / entry.ConnectionCount, + "ts.range": tsIntervalRange, + "ts.mode": tsMode, + "ts.mode_count": tsModeCount, + "ts.intervals": intervals, + "ts.interval_counts": intervalCounts, + "ts.dispersion": tsMadm, + "ts.skew": tsSkew, + "ts.conns_score": tsConnCountScore, + "ts.score": tsScore, + "ds.range": dsRange, + "ds.mode": dsMode, + "ds.mode_count": dsModeCount, + "ds.sizes": dsSizes, + "ds.counts": dsCounts, + "ds.dispersion": dsMadm, + "ds.skew": dsSkew, + "ds.score": dsScore, + "score": score, + "cid": a.chunk, + "src_network_name": entry.Src.SrcNetworkName, + "resolved_ips": entry.ResolvedIPs, + } + + // set query + output.beacon.query = query + + // create selector for output + output.beacon.selector = selectorPair.BSONKey() + + // updates source entry in hosts table to flag for invalid certificates + output.hostIcert = a.hostIcertQuery(entry.InvalidCertFlag, entry.Src.Unpair(), entry.FQDN) + + // updates max FQDN beacon score for the source entry in the hosts table + output.hostBeacon = a.hostBeaconQuery(score, entry.Src.Unpair(), entry.FQDN) + + // set to writer channel + a.analyzedCallback(output) + } + } + a.analysisWg.Done() + }() +} + +// createCountMap returns a distinct data array, data count array, the mode, +// and the number of times the mode occurred +func createCountMap(sortedIn []int64) ([]int64, []int64, int64, int64) { + //Since the data is already sorted, we can call this without fear + distinct, countsMap := countAndRemoveConsecutiveDuplicates(sortedIn) + countsArr := make([]int64, len(distinct)) + mode := distinct[0] + max := countsMap[mode] + for i, datum := range distinct { + count := countsMap[datum] + countsArr[i] = count + if count > max { + max = count + mode = datum + } + } + return distinct, countsArr, mode, max +} + +//countAndRemoveConsecutiveDuplicates removes consecutive +//duplicates in an array of integers and counts how many +//instances of each number exist in the array. +//Similar to `uniq -c`, but counts all duplicates, not just +//consecutive duplicates. +func countAndRemoveConsecutiveDuplicates(numberList []int64) ([]int64, map[int64]int64) { + //Avoid some reallocations + result := make([]int64, 0, len(numberList)/2) + counts := make(map[int64]int64) + + last := numberList[0] + result = append(result, last) + counts[last]++ + + for idx := 1; idx < len(numberList); idx++ { + if last != numberList[idx] { + result = append(result, numberList[idx]) + } + last = numberList[idx] + counts[last]++ + } + return result, counts +} + +func (a *analyzer) hostIcertQuery(icert bool, src data.UniqueIP, fqdn string) updateInfo { + ssn := a.db.Session.Copy() + defer ssn.Close() + + var output updateInfo + + // create query + query := bson.M{} + + // update host table if there is an invalid cert record between pair + if icert == true { + + newFlag := false + + var resList []interface{} + + hostSelector := src.BSONKey() + hostSelector["dat.icfqdn"] = fqdn + + _ = ssn.DB(a.db.GetSelectedDB()).C(a.conf.T.Structure.HostTable).Find(hostSelector).All(&resList) + + if len(resList) <= 0 { + newFlag = true + } + + if newFlag { + + query["$push"] = bson.M{ + "dat": bson.M{ + "icfqdn": fqdn, + "icert": 1, + "cid": a.chunk, + }} + + // create selector for output + output.query = query + output.selector = src.BSONKey() + + } else { + + query["$set"] = bson.M{ + "dat.$.icert": 1, + "dat.$.cid": a.chunk, + } + + // create selector for output + output.query = query + output.selector = hostSelector + } + } + + return output +} + +func (a *analyzer) hostBeaconQuery(score float64, src data.UniqueIP, fqdn string) updateInfo { + ssn := a.db.Session.Copy() + defer ssn.Close() + + var output updateInfo + + // create query + query := bson.M{} + + // check if we need to update + // we do this before the other queries because otherwise if a beacon + // starts out with a high score which reduces over time, it will keep + // the incorrect high max for that specific destination. + var resListExactMatch []interface{} + + maxBeaconMatchExactQuery := src.BSONKey() + maxBeaconMatchExactQuery["dat.mbfqdn"] = fqdn + + _ = ssn.DB(a.db.GetSelectedDB()).C(a.conf.T.Structure.HostTable).Find(maxBeaconMatchExactQuery).All(&resListExactMatch) + + // if we have exact matches, update to new score and return + if len(resListExactMatch) > 0 { + query["$set"] = bson.M{ + "dat.$.max_beacon_fqdn_score": score, + "dat.$.mbfqdn": fqdn, + "dat.$.cid": a.chunk, + } + + // create selector for output + output.query = query + + // using the same find query we created above will allow us to match and + // update the exact chunk we need to update + output.selector = maxBeaconMatchExactQuery + + return output + } + + // The below is only for cases where the ip is not currently listed as a max beacon + // for a source + // update max beacon score + newFlag := false + updateFlag := false + + // this query will find any matching chunk that is reporting a lower + // max beacon score than the current one we are working with + maxBeaconMatchLowerQuery := src.BSONKey() + maxBeaconMatchLowerQuery["dat"] = bson.M{ + "$elemMatch": bson.M{ + "cid": a.chunk, + "max_beacon_fqdn_score": bson.M{"$lte": score}, + }, + } + // find matching lower chunks + var resListLower []interface{} + + _ = ssn.DB(a.db.GetSelectedDB()).C(a.conf.T.Structure.HostTable).Find(maxBeaconMatchLowerQuery).All(&resListLower) + + // if no matching chunks are found, we will set the new flag + if len(resListLower) <= 0 { + + maxBeaconMatchUpperQuery := src.BSONKey() + maxBeaconMatchUpperQuery["dat"] = bson.M{ + "$elemMatch": bson.M{ + "cid": a.chunk, + "max_beacon_fqdn_score": bson.M{"$gte": score}, + }, + } + + // find matching upper chunks + var resListUpper []interface{} + _ = ssn.DB(a.db.GetSelectedDB()).C(a.conf.T.Structure.HostTable).Find(maxBeaconMatchUpperQuery).All(&resListUpper) + // update if no upper chunks are found + if len(resListUpper) <= 0 { + newFlag = true + } + } else { + updateFlag = true + } + + // since we didn't find any changeable lower max beacon scores, we will + // set the condition to push a new entry with the current score listed as the + // max beacon ONLY if no matching chunks reporting higher max beacon scores + // are found. + + if newFlag { + + query["$push"] = bson.M{ + "dat": bson.M{ + "max_beacon_fqdn_score": score, + "mbfqdn": fqdn, + "cid": a.chunk, + }} + + // create selector for output + output.query = query + output.selector = src.BSONKey() + + } else if updateFlag { + + query["$set"] = bson.M{ + "dat.$.max_beacon_fqdn_score": score, + "dat.$.mbfqdn": fqdn, + "dat.$.cid": a.chunk, + } + + // create selector for output + output.query = query + + // using the same find query we created above will allow us to match and + // update the exact chunk we need to update + output.selector = maxBeaconMatchLowerQuery + } + + return output +} diff --git a/pkg/beaconfqdn/dissector.go b/pkg/beaconfqdn/dissector.go new file mode 100644 index 00000000..f58534ac --- /dev/null +++ b/pkg/beaconfqdn/dissector.go @@ -0,0 +1,183 @@ +package beaconfqdn + +import ( + "sync" + + "github.com/activecm/rita/config" + "github.com/activecm/rita/database" + "github.com/activecm/rita/pkg/hostname" + "github.com/globalsign/mgo/bson" +) + +type ( + dissector struct { + connLimit int64 // limit for strobe classification + db *database.DB // provides access to MongoDB + conf *config.Config // contains details needed to access MongoDB + dissectedCallback func(*hostname.FqdnInput) // called on each analyzed result + closedCallback func() // called when .close() is called and no more calls to analyzedCallback will be made + dissectChannel chan *hostname.FqdnInput // holds unanalyzed data + dissectWg sync.WaitGroup // wait for analysis to finish + } +) + +//newdissector creates a new collector for gathering data +func newDissector(connLimit int64, db *database.DB, conf *config.Config, dissectedCallback func(*hostname.FqdnInput), closedCallback func()) *dissector { + return &dissector{ + connLimit: connLimit, + db: db, + conf: conf, + dissectedCallback: dissectedCallback, + closedCallback: closedCallback, + dissectChannel: make(chan *hostname.FqdnInput), + } +} + +//collect sends a chunk of data to be analyzed +func (d *dissector) collect(entry *hostname.FqdnInput) { + d.dissectChannel <- entry +} + +//close waits for the collector to finish +func (d *dissector) close() { + close(d.dissectChannel) + d.dissectWg.Wait() + d.closedCallback() +} + +//start kicks off a new analysis thread +func (d *dissector) start() { + d.dissectWg.Add(1) + go func() { + ssn := d.db.Session.Copy() + defer ssn.Close() + + for entry := range d.dissectChannel { + + // This will work for both updating and inserting completely new Beacons + // for every new hostnames record we have, we will check every entry in the + // uconn table where the source IP from the hostnames record connected to one + // of the associated IPs for FQDN. This + // will always return a result because even with a brand new database, we already + // created the uconns table. It will only continue and analyze if the connection + // meets the required specs, again working for both an update and a new src-fqdn + // pair. We would have to perform this check regardless if we want the rolling + // update option to remain, and this gets us the vetting for both situations, and + // Only works on the current entries - not a re-aggregation on the whole collection, + // and individual lookups like this are really fast. This also ensures a unique + // set of timestamps for analysis. + uconnFindQuery := []bson.M{ + // beacons strobe ignores any already flagged strobes, but we don't want to do + // that here. Beacons relies on the uconn table for having the updated connection info + // we do not have that, so the calculation must happen. We don't necessarily need to store + // the tslist or byte list, but I don't think that leaving it in will significantly impact + // performance on a few strobes. + {"$match": entry.Src.BSONKey()}, + {"$match": bson.M{"$or": entry.DstBSONList}}, + {"$project": bson.M{ + "src": 1, + "ts": bson.M{ + "$reduce": bson.M{ + "input": "$dat.ts", + "initialValue": []interface{}{}, + "in": bson.M{"$concatArrays": []interface{}{"$$value", "$$this"}}, + }, + }, + "bytes": bson.M{ + "$reduce": bson.M{ + "input": "$dat.bytes", + "initialValue": []interface{}{}, + "in": bson.M{"$concatArrays": []interface{}{"$$value", "$$this"}}, + }, + }, + "count": bson.M{"$sum": "$dat.count"}, + "tbytes": bson.M{"$sum": "$dat.tbytes"}, + "icerts": bson.M{"$anyElementTrue": []interface{}{"$dat.icerts"}}, + }}, + {"$group": bson.M{ + "_id": "$src", + "ts": bson.M{"$push": "$ts"}, + "bytes": bson.M{"$push": "$bytes"}, + "count": bson.M{"$sum": "$count"}, + "tbytes": bson.M{"$sum": "$tbytes"}, + "icerts": bson.M{"$push": "$icerts"}, + }}, + {"$unwind": "$ts"}, + {"$unwind": "$ts"}, + {"$group": bson.M{ + "_id": "$_id", + // need to unique-ify timestamps or else results + // will be skewed by "0 distant" data points + "ts": bson.M{"$addToSet": "$ts"}, + "bytes": bson.M{"$first": "$bytes"}, + "count": bson.M{"$first": "$count"}, + "tbytes": bson.M{"$first": "$tbytes"}, + "icerts": bson.M{"$first": "$icerts"}, + }}, + {"$unwind": "$bytes"}, + {"$unwind": "$bytes"}, + {"$group": bson.M{ + "_id": "$_id", + "ts": bson.M{"$first": "$ts"}, + "bytes": bson.M{"$push": "$bytes"}, + "count": bson.M{"$first": "$count"}, + "tbytes": bson.M{"$first": "$tbytes"}, + "icerts": bson.M{"$first": "$icerts"}, + }}, + {"$project": bson.M{ + "_id": 0, + "ts": 1, + "bytes": 1, + "count": 1, + "tbytes": 1, + "icerts": bson.M{"$anyElementTrue": []interface{}{"$icerts"}}, + }}, + } + + var res struct { + Count int64 `bson:"count"` + Ts []int64 `bson:"ts"` + Bytes []int64 `bson:"bytes"` + TBytes int64 `bson:"tbytes"` + ICerts bool `bson:"icerts"` + } + + _ = ssn.DB(d.db.GetSelectedDB()).C(d.conf.T.Structure.UniqueConnTable).Pipe(uconnFindQuery).AllowDiskUse().One(&res) + + // Check for errors and parse results + // this is here because it will still return an empty document even if there are no results + if res.Count > 0 { + + analysisInput := &hostname.FqdnInput{ + FQDN: entry.FQDN, + Src: entry.Src, + ConnectionCount: res.Count, + TotalBytes: res.TBytes, + InvalidCertFlag: res.ICerts, + ResolvedIPs: entry.ResolvedIPs, + } + + // check if beacon has become a strobe + if analysisInput.ConnectionCount > d.connLimit { + + // set to writer channel + d.dissectedCallback(analysisInput) + + } else { // otherwise, parse timestamps and orig ip bytes + + analysisInput.TsList = res.Ts + analysisInput.OrigBytesList = res.Bytes + + // send to writer channel if we have over UNIQUE 3 timestamps (analysis needs this verification) + if len(analysisInput.TsList) > 3 { + d.dissectedCallback(analysisInput) + } + + } + + } + + } + d.dissectWg.Done() + }() +} diff --git a/pkg/beaconfqdn/mongodb.go b/pkg/beaconfqdn/mongodb.go new file mode 100644 index 00000000..4f024888 --- /dev/null +++ b/pkg/beaconfqdn/mongodb.go @@ -0,0 +1,155 @@ +package beaconfqdn + +import ( + "runtime" + "time" + + "github.com/activecm/rita/pkg/hostname" + "github.com/activecm/rita/resources" + "github.com/activecm/rita/util" + "github.com/globalsign/mgo" + "github.com/vbauerster/mpb" + "github.com/vbauerster/mpb/decor" +) + +type repo struct { + res *resources.Resources + min int64 + max int64 +} + +//NewMongoRepository create new repository +func NewMongoRepository(res *resources.Resources) Repository { + min, max, _ := res.MetaDB.GetTSRange(res.DB.GetSelectedDB()) + return &repo{ + res: res, + min: min, + max: max, + } +} + +func (r *repo) CreateIndexes() error { + session := r.res.DB.Session.Copy() + defer session.Close() + + // set collection name + collectionName := r.res.Config.T.BeaconFQDN.BeaconFQDNTable + + // check if collection already exists + names, _ := session.DB(r.res.DB.GetSelectedDB()).CollectionNames() + + // if collection exists, we don't need to do anything else + for _, name := range names { + if name == collectionName { + return nil + } + } + + // set desired indexes + indexes := []mgo.Index{ + {Key: []string{"-score"}}, + {Key: []string{"src", "src_network_uuid"}}, + {Key: []string{"fqdn"}}, + {Key: []string{"-connection_count"}}, + } + + // create collection + err := r.res.DB.CreateCollection(collectionName, indexes) + if err != nil { + return err + } + + return nil +} + +//Upsert loops through every new hostname .... +func (r *repo) Upsert(hostnameMap map[string]*hostname.Input) { + + session := r.res.DB.Session.Copy() + defer session.Close() + + // Create the workers + + // stage 5 - write out results + writerWorker := newWriter( + r.res.Config.T.BeaconFQDN.BeaconFQDNTable, + r.res.DB, + r.res.Config, + r.res.Log, + ) + + // stage 4 - perform the analysis + analyzerWorker := newAnalyzer( + r.min, + r.max, + r.res.Config.S.Rolling.CurrentChunk, + r.res.DB, + r.res.Config, + writerWorker.collect, + writerWorker.close, + ) + + // stage 3 - sort data + sorterWorker := newSorter( + r.res.DB, + r.res.Config, + analyzerWorker.collect, + analyzerWorker.close, + ) + + // stage 2 - get and vet beacon details + dissectorWorker := newDissector( + int64(r.res.Config.S.Strobe.ConnectionLimit), + r.res.DB, + r.res.Config, + sorterWorker.collect, + sorterWorker.close, + ) + + // stage 1 - get all the sources that connected + // to a resolved FQDN + accumulatorWorker := newAccumulator( + r.res.DB, + r.res.Config, + dissectorWorker.collect, + dissectorWorker.close, + ) + + //kick off the threaded goroutines + for i := 0; i < util.Max(1, runtime.NumCPU()/2); i++ { + accumulatorWorker.start() + dissectorWorker.start() + sorterWorker.start() + analyzerWorker.start() + writerWorker.start() + } + + // progress bar for troubleshooting + p := mpb.New(mpb.WithWidth(20)) + bar := p.AddBar(int64(len(hostnameMap)), + mpb.PrependDecorators( + decor.Name("\t[-] FQDN Beacon Analysis:", decor.WC{W: 30, C: decor.DidentRight}), + decor.CountersNoUnit(" %d / %d ", decor.WCSyncWidth), + ), + mpb.AppendDecorators(decor.Percentage()), + ) + + // loop over map entries (each hostname) + for _, entry := range hostnameMap { + + start := time.Now() + + // check to make sure hostname has resolved ips, skip otherwise + if len(entry.ResolvedIPs) > 0 { + accumulatorWorker.collect(entry) + } + + // progress bar increment + bar.IncrBy(1, time.Since(start)) + + } + p.Wait() + + // start the closing cascade (this will also close the other channels) + accumulatorWorker.close() +} diff --git a/pkg/beaconfqdn/repository.go b/pkg/beaconfqdn/repository.go new file mode 100644 index 00000000..4778fbd1 --- /dev/null +++ b/pkg/beaconfqdn/repository.go @@ -0,0 +1,90 @@ +package beaconfqdn + +import ( + "github.com/activecm/rita/pkg/data" + "github.com/activecm/rita/pkg/hostname" + "github.com/globalsign/mgo/bson" +) + +type ( + + // Repository for host collection + Repository interface { + CreateIndexes() error + Upsert(hostnameMap map[string]*hostname.Input) + } + + updateInfo struct { + selector bson.M + query bson.M + } + + //update .... + update struct { + beacon updateInfo + hostIcert updateInfo + hostBeacon updateInfo + } + + //TSData ... + TSData struct { + Range int64 `bson:"range"` + Mode int64 `bson:"mode"` + ModeCount int64 `bson:"mode_count"` + Skew float64 `bson:"skew"` + Dispersion int64 `bson:"dispersion"` + Duration float64 `bson:"duration"` + } + + //DSData ... + DSData struct { + Skew float64 `bson:"skew"` + Dispersion int64 `bson:"dispersion"` + Range int64 `bson:"range"` + Mode int64 `bson:"mode"` + ModeCount int64 `bson:"mode_count"` + } + + //Result represents a beacon FQDN between a source IP and + // an FQDN. An FQDN can be comprised of one or more destination IPs. + // Contains information on connection delta times and the amount of data transferred + Result struct { + FQDN string `bson:"fqdn"` + SrcIP string `bson:"src"` + SrcNetworkName string `bson:"src_network_name"` + SrcNetworkUUID bson.Binary `bson:"src_network_uuid"` + Connections int64 `bson:"connection_count"` + AvgBytes float64 `bson:"avg_bytes"` + Ts TSData `bson:"ts"` + Ds DSData `bson:"ds"` + Score float64 `bson:"score"` + ResolvedIPs []data.UniqueIP `bson:"resolved_ips"` + } + + //TODO: Implement strobe for beacons FQDN + //StrobeResult represents a unique connection with a large amount + //of connections between the hosts + StrobeResult struct { + data.UniqueIPPair `bson:",inline"` + ConnectionCount int64 `bson:"connection_count"` + } + + //uniqueSrcHostnamePair ... + uniqueSrcHostnamePair struct { + SrcIP string `bson:"src"` + SrcNetworkUUID bson.Binary `bson:"src_network_uuid"` + FQDN string `bson:"fqdn"` + } +) + +//BSONKey generates a BSON map which may be used to index a given a unique src +// fqdn pair +//Includes IP and Network UUID. +func (p uniqueSrcHostnamePair) BSONKey() bson.M { + key := bson.M{ + "src": p.SrcIP, + "src_network_uuid": p.SrcNetworkUUID, + "fqdn": p.FQDN, + } + return key +} diff --git a/pkg/beaconfqdn/results.go b/pkg/beaconfqdn/results.go new file mode 100644 index 00000000..6673b17b --- /dev/null +++ b/pkg/beaconfqdn/results.go @@ -0,0 +1,20 @@ +package beaconfqdn + +import ( + "github.com/activecm/rita/resources" + "github.com/globalsign/mgo/bson" +) + +//Results finds beacons FQDN in the database greater than a given cutoffScore +func Results(res *resources.Resources, cutoffScore float64) ([]Result, error) { + ssn := res.DB.Session.Copy() + defer ssn.Close() + + var beaconsFQDN []Result + + beaconFQDNQuery := bson.M{"score": bson.M{"$gt": cutoffScore}} + + err := ssn.DB(res.DB.GetSelectedDB()).C(res.Config.T.BeaconFQDN.BeaconFQDNTable).Find(beaconFQDNQuery).Sort("-score").All(&beaconsFQDN) + + return beaconsFQDN, err +} diff --git a/pkg/beaconfqdn/sorter.go b/pkg/beaconfqdn/sorter.go new file mode 100644 index 00000000..2328c80b --- /dev/null +++ b/pkg/beaconfqdn/sorter.go @@ -0,0 +1,66 @@ +package beaconfqdn + +import ( + "sort" + "sync" + + "github.com/activecm/rita/config" + "github.com/activecm/rita/database" + "github.com/activecm/rita/pkg/hostname" + "github.com/activecm/rita/util" +) + +type ( + sorter struct { + db *database.DB // provides access to MongoDB + conf *config.Config // contains details needed to access MongoDB + sortedCallback func(*hostname.FqdnInput) // called on each analyzed result + closedCallback func() // called when .close() is called and no more calls to analyzedCallback will be made + sortChannel chan *hostname.FqdnInput // holds unanalyzed data + sortWg sync.WaitGroup // wait for analysis to finish + } +) + +//newsorter creates a new collector for gathering data +func newSorter(db *database.DB, conf *config.Config, sortedCallback func(*hostname.FqdnInput), closedCallback func()) *sorter { + return &sorter{ + db: db, + conf: conf, + sortedCallback: sortedCallback, + closedCallback: closedCallback, + sortChannel: make(chan *hostname.FqdnInput), + } +} + +//collect sends a chunk of data to be analyzed +func (s *sorter) collect(entry *hostname.FqdnInput) { + s.sortChannel <- entry +} + +//close waits for the collector to finish +func (s *sorter) close() { + close(s.sortChannel) + s.sortWg.Wait() + s.closedCallback() +} + +//start kicks off a new analysis thread +func (s *sorter) start() { + s.sortWg.Add(1) + go func() { + + for entry := range s.sortChannel { + + if (entry.TsList) != nil { + //sort the size and timestamps to compute quantiles in the analyzer + sort.Sort(util.SortableInt64(entry.TsList)) + sort.Sort(util.SortableInt64(entry.OrigBytesList)) + + } + + s.sortedCallback(entry) + + } + s.sortWg.Done() + }() +} diff --git a/pkg/beaconfqdn/writer.go b/pkg/beaconfqdn/writer.go new file mode 100644 index 00000000..f883665c --- /dev/null +++ b/pkg/beaconfqdn/writer.go @@ -0,0 +1,100 @@ +package beaconfqdn + +import ( + "sync" + + "github.com/activecm/rita/config" + "github.com/activecm/rita/database" + log "github.com/sirupsen/logrus" +) + +type ( + writer struct { + targetCollection string + db *database.DB // provides access to MongoDB + conf *config.Config // contains details needed to access MongoDB + log *log.Logger // main logger for RITA + writeChannel chan *update // holds analyzed data + writeWg sync.WaitGroup // wait for writing to finish + } +) + +//newWriter creates a new writer object to write output data to blacklisted collections +func newWriter(targetCollection string, db *database.DB, conf *config.Config, log *log.Logger) *writer { + return &writer{ + targetCollection: targetCollection, + db: db, + conf: conf, + log: log, + writeChannel: make(chan *update), + } +} + +//collect sends a group of results to the writer for writing out to the database +func (w *writer) collect(data *update) { + w.writeChannel <- data +} + +//close waits for the write threads to finish +func (w *writer) close() { + close(w.writeChannel) + w.writeWg.Wait() +} + +//start kicks off a new write thread +func (w *writer) start() { + w.writeWg.Add(1) + go func() { + ssn := w.db.Session.Copy() + defer ssn.Close() + + for data := range w.writeChannel { + + if data.beacon.query != nil { + // update beacons table + info, err := ssn.DB(w.db.GetSelectedDB()).C(w.targetCollection).Upsert(data.beacon.selector, data.beacon.query) + + if err != nil || + ((info.Updated == 0) && (info.UpsertedId == nil)) { + w.log.WithFields(log.Fields{ + "Module": "beaconsFQDN", + "Info": info, + "Data": data, + }).Error(err) + } + + // update hosts table with icert updates + if data.hostIcert.query != nil { + + info, err = ssn.DB(w.db.GetSelectedDB()).C(w.conf.T.Structure.HostTable).Upsert(data.hostIcert.selector, data.hostIcert.query) + + if err != nil || + ((info.Updated == 0) && (info.UpsertedId == nil) && (info.Matched == 0)) { + w.log.WithFields(log.Fields{ + "Module": "beaconsFQDN", + "Info": info, + "Data": data, + }).Error(err) + } + } + + // update hosts table with max beacon updates + if data.hostBeacon.query != nil { + + // update hosts table + info, err = ssn.DB(w.db.GetSelectedDB()).C(w.conf.T.Structure.HostTable).Upsert(data.hostBeacon.selector, data.hostBeacon.query) + + if err != nil || + ((info.Updated == 0) && (info.UpsertedId == nil) && (info.Matched == 0)) { + w.log.WithFields(log.Fields{ + "Module": "beaconsFQDN", + "Info": info, + "Data": data, + }).Error(err) + } + } + } + } + w.writeWg.Done() + }() +} diff --git a/pkg/data/repository.go b/pkg/data/repository.go index a42090dc..de9d2cbf 100644 --- a/pkg/data/repository.go +++ b/pkg/data/repository.go @@ -77,43 +77,95 @@ func (u UniqueIP) BSONKey() bson.M { return key } -//UniqueIPPair binds a pair of UniqueIPs where direction matters. -type UniqueIPPair struct { +//UniqueSrcIP is a unique IP which acts as the source in an IP pair +type UniqueSrcIP struct { SrcIP string `bson:"src"` SrcNetworkUUID bson.Binary `bson:"src_network_uuid"` SrcNetworkName string `bson:"src_network_name"` +} + +//AsSrc returns the UniqueIP in the UniqueSrcIP format +func (u UniqueIP) AsSrc() UniqueSrcIP { + return UniqueSrcIP{ + SrcIP: u.IP, + SrcNetworkUUID: u.NetworkUUID, + SrcNetworkName: u.NetworkName, + } +} + +//Unpair returns a copy of the SrcUniqueIP in UniqueIP format +func (u UniqueSrcIP) Unpair() UniqueIP { + return UniqueIP{ + IP: u.SrcIP, + NetworkUUID: u.SrcNetworkUUID, + NetworkName: u.SrcNetworkName, + } +} + +//BSONKey generates a BSON map which may be used to index a the source of a UniqueIP pair. +//Includes IP and Network UUID. +func (u UniqueSrcIP) BSONKey() bson.M { + key := bson.M{ + "src": u.SrcIP, + "src_network_uuid": u.SrcNetworkUUID, + } + return key +} + +//UniqueDstIP is a unique IP which acts as the destination in an IP Pair +type UniqueDstIP struct { DstIP string `bson:"dst"` DstNetworkUUID bson.Binary `bson:"dst_network_uuid"` DstNetworkName string `bson:"dst_network_name"` } -//NewUniqueIPPair binds a pair of UniqueIPs where direction matters. -func NewUniqueIPPair(source UniqueIP, destination UniqueIP) UniqueIPPair { - return UniqueIPPair{ - SrcIP: source.IP, - DstIP: destination.IP, - SrcNetworkUUID: source.NetworkUUID, - DstNetworkUUID: destination.NetworkUUID, - SrcNetworkName: source.NetworkName, - DstNetworkName: destination.NetworkName, +//AsDst returns the UniqueIP in the UniqueDstIP format +func (u UniqueIP) AsDst() UniqueDstIP { + return UniqueDstIP{ + DstIP: u.IP, + DstNetworkUUID: u.NetworkUUID, + DstNetworkName: u.NetworkName, } } -//Source returns the source UniqueIP from the pair. -func (p UniqueIPPair) Source() UniqueIP { +//Unpair returns a copy of the DstUniqueIP in UniqueIP format +func (u UniqueDstIP) Unpair() UniqueIP { return UniqueIP{ - IP: p.SrcIP, - NetworkUUID: p.SrcNetworkUUID, - NetworkName: p.SrcNetworkName, + IP: u.DstIP, + NetworkUUID: u.DstNetworkUUID, + NetworkName: u.DstNetworkName, } } -//Destination returns the destination UniqueIP from the pair. -func (p UniqueIPPair) Destination() UniqueIP { - return UniqueIP{ - IP: p.DstIP, - NetworkUUID: p.DstNetworkUUID, - NetworkName: p.DstNetworkName, +//BSONKey generates a BSON map which may be used to index a the destination of a UniqueIP pair. +//Includes IP and Network UUID. +func (u UniqueDstIP) BSONKey() bson.M { + key := bson.M{ + "dst": u.DstIP, + "dst_network_uuid": u.DstNetworkUUID, + } + return key +} + +//UniqueIPPair binds a pair of UniqueIPs where direction matters. +type UniqueIPPair struct { + UniqueSrcIP `bson:",inline"` + UniqueDstIP `bson:",inline"` +} + +//NewUniqueIPPair binds a pair of UniqueIPs where direction matters. +func NewUniqueIPPair(source UniqueIP, destination UniqueIP) UniqueIPPair { + return UniqueIPPair{ + UniqueSrcIP: UniqueSrcIP{ + SrcIP: source.IP, + SrcNetworkUUID: source.NetworkUUID, + SrcNetworkName: source.NetworkName, + }, + UniqueDstIP: UniqueDstIP{ + DstIP: destination.IP, + DstNetworkUUID: destination.NetworkUUID, + DstNetworkName: destination.NetworkName, + }, } } diff --git a/pkg/hostname/repository.go b/pkg/hostname/repository.go index 382cf3fe..61158341 100644 --- a/pkg/hostname/repository.go +++ b/pkg/hostname/repository.go @@ -5,21 +5,36 @@ import ( "github.com/globalsign/mgo/bson" ) -// Repository for hostnames collection -type Repository interface { - CreateIndexes() error - Upsert(domainMap map[string]*Input) -} +type ( + // Repository for hostnames collection + Repository interface { + CreateIndexes() error + Upsert(domainMap map[string]*Input) + } -//update .... -type update struct { - selector bson.M - query bson.M -} + //update .... + update struct { + selector bson.M + query bson.M + } -//Input .... -type Input struct { - Host string //A hostname - ResolvedIPs data.UniqueIPSet //Set of resolved UniqueIPs associated with a given hostname - ClientIPs data.UniqueIPSet //Set of DNS Client UniqueIPs which issued queries for a given hostname -} + //Input .... + Input struct { + Host string //A hostname + ResolvedIPs data.UniqueIPSet //Set of resolved UniqueIPs associated with a given hostname + ClientIPs data.UniqueIPSet //Set of DNS Client UniqueIPs which issued queries for a given hostname + } + + //FqdnInput .... + FqdnInput struct { + FQDN string //A hostname + Src data.UniqueSrcIP // Single src that connected to a hostname + ResolvedIPs data.UniqueIPSet //Set of resolved UniqueIPs associated with a given hostname + InvalidCertFlag bool + ConnectionCount int64 + TotalBytes int64 + TsList []int64 + OrigBytesList []int64 + DstBSONList []bson.M // set of resolved UniqueDstIPs since we need it in that format + } +) diff --git a/pkg/uconn/analyzer.go b/pkg/uconn/analyzer.go index 86634f54..8b2337cb 100644 --- a/pkg/uconn/analyzer.go +++ b/pkg/uconn/analyzer.go @@ -1,10 +1,11 @@ package uconn import ( - "github.com/activecm/rita/pkg/data" "strconv" "sync" + "github.com/activecm/rita/pkg/data" + "github.com/activecm/rita/config" "github.com/activecm/rita/database" "github.com/globalsign/mgo/bson" @@ -122,9 +123,9 @@ func (a *analyzer) start() { // user in the file), we need to customize the query to update based on // which ip in the connection was local. if datum.IsLocalSrc { - output.hostMaxDur = a.hostMaxDurQuery(datum.MaxDuration, datum.Hosts.Source(), datum.Hosts.Destination()) + output.hostMaxDur = a.hostMaxDurQuery(datum.MaxDuration, datum.Hosts.UniqueSrcIP.Unpair(), datum.Hosts.UniqueDstIP.Unpair()) } else if datum.IsLocalDst { - output.hostMaxDur = a.hostMaxDurQuery(datum.MaxDuration, datum.Hosts.Destination(), datum.Hosts.Source()) + output.hostMaxDur = a.hostMaxDurQuery(datum.MaxDuration, datum.Hosts.UniqueDstIP.Unpair(), datum.Hosts.UniqueSrcIP.Unpair()) } // set to writer channel diff --git a/reporting/report-beaconsfqdn.go b/reporting/report-beaconsfqdn.go new file mode 100644 index 00000000..13711700 --- /dev/null +++ b/reporting/report-beaconsfqdn.go @@ -0,0 +1,77 @@ +package reporting + +import ( + "bytes" + "html/template" + "os" + + "github.com/activecm/rita/pkg/beaconfqdn" + "github.com/activecm/rita/reporting/templates" + "github.com/activecm/rita/resources" +) + +func printBeaconsFQDN(db string, showNetNames bool, res *resources.Resources) error { + var w string + f, err := os.Create("beaconsfqdn.html") + if err != nil { + return err + } + defer f.Close() + + var beaconsFQDNTempl string + if showNetNames { + beaconsFQDNTempl = templates.BeaconsFQDNNetNamesTempl + } else { + beaconsFQDNTempl = templates.BeaconsFQDNTempl + } + + out, err := template.New("beaconfqdn.html").Parse(beaconsFQDNTempl) + if err != nil { + return err + } + + data, err := beaconfqdn.Results(res, 0) + if err != nil { + return err + } + + if len(data) == 0 { + w = "" + } else { + w, err = getBeaconFQDNWriter(data, showNetNames) + if err != nil { + return err + } + } + + return out.Execute(f, &templates.ReportingInfo{DB: db, Writer: template.HTML(w)}) +} + +func getBeaconFQDNWriter(beaconsFQDN []beaconfqdn.Result, showNetNames bool) (string, error) { + tmpl := "{{printf \"%.3f\" .Score}}" + if showNetNames { + tmpl += "{{.SrcNetworkName}}{{.SrcIP}}{{.FQDN}}" + } else { + tmpl += "{{.SrcIP}}{{.FQDN}}" + } + tmpl += "{{.Connections}}{{printf \"%.3f\" .AvgBytes}}" + tmpl += "{{.Ts.Range}}{{.Ds.Range}}{{.Ts.Mode}}{{.Ds.Mode}}{{.Ts.ModeCount}}{{.Ds.ModeCount}}" + tmpl += "{{printf \"%.3f\" .Ts.Skew}}{{printf \"%.3f\" .Ds.Skew}}{{.Ts.Dispersion}}{{.Ds.Dispersion}}" + tmpl += "\n" + + out, err := template.New("beaconfqdn").Parse(tmpl) + if err != nil { + return "", err + } + + w := new(bytes.Buffer) + + for _, result := range beaconsFQDN { + err = out.Execute(w, result) + if err != nil { + return "", err + } + } + + return w.String(), nil +} diff --git a/reporting/report.go b/reporting/report.go index a288969d..c5553966 100644 --- a/reporting/report.go +++ b/reporting/report.go @@ -169,6 +169,11 @@ func writeDB(db string, wd string, showNetNames bool, res *resources.Resources) fmt.Println("[-] Error writing beacons page: " + err.Error()) } + err = printBeaconsFQDN(db, showNetNames, res) + if err != nil { + fmt.Println("[-] Error writing beaconsFQDN page: " + err.Error()) + } + err = printStrobes(db, showNetNames, res) if err != nil { fmt.Println("[-] Error writing strobes page: " + err.Error()) diff --git a/reporting/templates/templates.go b/reporting/templates/templates.go index 4ec9c330..37372f4e 100644 --- a/reporting/templates/templates.go +++ b/reporting/templates/templates.go @@ -24,6 +24,7 @@ var dbHeader = `
  • RITA
  • Viewing: {{.DB}}
  • Beacons
  • +
  • Beacons FQDN
  • Strobes
  • DNS
  • BL Source IPs
  • @@ -115,6 +116,34 @@ var BeaconsNetNamesTempl = dbHeader + ` ` +// BeaconsFQDNTempl is our beaconsFQDN html template +var BeaconsFQDNTempl = dbHeader + ` +
    + + + + {{.Writer}} +
    ScoreSourceFQDNConnectionsAvg. Bytes + Intvl. RangeSize RangeIntvl. ModeSize ModeIntvl. Mode CountSize Mode CountIntvl. SkewSize SkewIntvl. DispersionSize Dispersion +
    +
    +` + +// BeaconsFQDNNetNamesTempl is our beacons html template with network names +var BeaconsFQDNNetNamesTempl = dbHeader + ` +
    + + + + + + + + {{.Writer}} +
    ScoreSource NetworkSourceFQDNConnectionsAvg. BytesIntvl. RangeSize RangeIntvl. ModeSize ModeIntvl. Mode CountSize Mode CountIntvl. SkewSize SkewIntvl. DispersionSize Dispersion
    +
    +` + //StrobesTempl is the strobes html template var StrobesTempl = dbHeader + `