From ffb7ec6fd96bb6b0321c3286fedfc7686833966b Mon Sep 17 00:00:00 2001 From: djerfy Date: Wed, 10 Jan 2024 19:54:12 +0100 Subject: [PATCH] feat: prepare modules integration Signed-off-by: djerfy --- src/config.yaml | 12 ++- src/modules/common/functions.py | 11 +++ src/modules/kubernetes/openebs/__init__.py | 1 + .../kubernetes/openebs/cstorpoolclusters.py | 88 +++++++++++++++++++ src/modules/kubernetes/trivy/__init__.py | 1 + .../kubernetes/trivy/vulnerabilityreports.py | 19 ++++ src/zabbix-kubernetes-discovery.py | 18 ++-- 7 files changed, 140 insertions(+), 10 deletions(-) create mode 100644 src/modules/kubernetes/openebs/__init__.py create mode 100644 src/modules/kubernetes/openebs/cstorpoolclusters.py create mode 100644 src/modules/kubernetes/trivy/__init__.py create mode 100644 src/modules/kubernetes/trivy/vulnerabilityreports.py diff --git a/src/config.yaml b/src/config.yaml index 90d40c7..3cc1110 100644 --- a/src/config.yaml +++ b/src/config.yaml @@ -45,24 +45,28 @@ monitoring: labels: include: [] exclude: [] - # optional + # openebs openebs: enabled: False + engine: cstor labels: include: [] exclude: [] + # velero velero: - enabled: True + enabled: False labels: include: [] exclude: [] + # trivy trivy: - enabled: True + enabled: False labels: include: [] exclude: [] + # certificates certs: - enabled: True + enabled: False labels: include: [] exclude: [] diff --git a/src/modules/common/functions.py b/src/modules/common/functions.py index fe20eae..6080e5e 100644 --- a/src/modules/common/functions.py +++ b/src/modules/common/functions.py @@ -20,3 +20,14 @@ def matchLabels(match_labels=None, object_labels=None): return True return False + +def rawObjects(data=[]): + """ + description: get objects from raw api, convert items and return only objects + return: list + """ + for key, value in data.items(): + if key == "items": + return value + + return [] diff --git a/src/modules/kubernetes/openebs/__init__.py b/src/modules/kubernetes/openebs/__init__.py new file mode 100644 index 0000000..f4a76e9 --- /dev/null +++ b/src/modules/kubernetes/openebs/__init__.py @@ -0,0 +1 @@ +from modules.kubernetes.openebs.cstorpoolclusters import openebsGetCstorpoolclusters diff --git a/src/modules/kubernetes/openebs/cstorpoolclusters.py b/src/modules/kubernetes/openebs/cstorpoolclusters.py new file mode 100644 index 0000000..dcf8a07 --- /dev/null +++ b/src/modules/kubernetes/openebs/cstorpoolclusters.py @@ -0,0 +1,88 @@ +from kubernetes import client +from pyzabbix import ZabbixMetric +from modules.common.functions import * +import json, urllib3 + +urllib3.disable_warnings() + +def openebsGetCstorpoolclusters(config=None): + """ + description: get cstorpoolclusters data + return: list + """ + kubernetes = client.CustomObjectsApi() + + cstorpoolclusters = [] + + for cstorpoolcluster in rawObjects(kubernetes.list_cluster_custom_object(group="cstor.openebs.io", version="v1", plural="cstorpoolclusters")): + json = { + "name": cstorpoolcluster.metadata.name, + "namespace": cstorpoolcluster.metadata.namespace, + "instances": { + "desired": cstorpoolcluster.status.desiredInstances, + "healthy": cstorpoolcluster.status.healthyInstances, + "provisioned": cstorpoolcluster.status.provisionedInstances + }, + "version": { + "desired": cstorpoolcluster.status.versionDetails.desired, + "current": cstorpoolcluster.status.versionDetails.status.current + } + } + + if matchLabels(config['labels']['exclude'], cstorpoolcluster.metadata.labels): + continue + + if config['labels']['include'] != []: + if not matchLabels(config['labels']['exclude'], cstorpoolcluster.metadata.labels): + continue + + if any(c['name'] == json['name'] and c['namespace'] == json['namespace'] for c in cstorpoolclusters): + continue + + cstorpoolclusters.append(json) + + return cstorpoolclusters + +def ZabbixDiscoveryCstorpoolclusters(clustername, cstorpoolclusters=[]): + """ + description: create a discovery for cstorpoolclusters, per namespace + return: class ZabbixMetric + """ + discovery = {"data":[]} + + for cstorpoolcluster in cstorpoolclusters: + output = { + "{#KUBERNETES_OPENEBS_CSTORPOOLCLUSTER_NAMESPACE}": cstorpoolcluster['namespace'], + "{#KUBERNETES_OPENEBS_CSTORPOOLCLUSTER_NAME}": cstorpoolcluster['name']} + discovery['data'].append(output) + + sender = [ZabbixMetric(clustername, "kubernetes.openebs.cstorpoolclusters.discovery", json.dumps(discovery))] + + return sender + +def ZabbixItemCstorpoolclusters(clustername, cstorpoolclusters=[]): + """ + description: create a item for cstorpoolclusters, per namespace + return: class ZabbixMetric + """ + sender = [] + + for cstorpoolcluster in cstorpoolclusters: + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.desiredInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['desired']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.healthyInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['healthy']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.provisionedInstances[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['instances']['provisioned']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.desiredVersion[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['version']['desired']),) + sender.append(ZabbixMetric(clustername, f"kubernetes.openebs.cstorpoolclusters.currentVersion[{cstorpoolcluster['namespace']},{cstorpoolcluster['name']}]", cstorpoolcluster['version']['current']),) + + return sender + +def baseOpenebsCstorpoolclusters(mode=None, config=None): + """ + description: monitoring openebs cstorpoolclusters + return: class ZabbixMetric + """ + if mode == "discovery": + return ZabbixDiscoveryCstorpoolclusters(config['kubernetes']['name'], openebsGetCstorpoolclusters(config['monitoring']['openebs'])) + if mode == "item": + return ZabbixItemCstorpoolclusters(config['kubernetes']['name'], openebsGetCstorpoolclusters(config['monitoring']['openebs'])) + return [] diff --git a/src/modules/kubernetes/trivy/__init__.py b/src/modules/kubernetes/trivy/__init__.py new file mode 100644 index 0000000..9ffd180 --- /dev/null +++ b/src/modules/kubernetes/trivy/__init__.py @@ -0,0 +1 @@ +from modules.kubernetes.trivy.vulnerabilityreports import trivyGetVulnerabilityreports diff --git a/src/modules/kubernetes/trivy/vulnerabilityreports.py b/src/modules/kubernetes/trivy/vulnerabilityreports.py new file mode 100644 index 0000000..1fad38f --- /dev/null +++ b/src/modules/kubernetes/trivy/vulnerabilityreports.py @@ -0,0 +1,19 @@ +from kubernetes import client +from pyzabbix import ZabbixMetric +from modules.common.functions import * +import json, urllib3 + +urllib3.disable_warnings() + +def trivyGetVulnerabilityreports(config=None): + """ + description: get vulnerabilityreports data + return: list + """ + kubernetes = client.CustomObjectsApi() + + reports = [] + + for vuln in rawObjects(kubernetes.list_cluster_custom_object(group="aquasecurity.github.io", version="v1alpha1", plural="vulnerabilityreports")): + print(vuln['metadata']['name']) + print(vuln['report']['summary']) diff --git a/src/zabbix-kubernetes-discovery.py b/src/zabbix-kubernetes-discovery.py index 90b190d..863f6eb 100644 --- a/src/zabbix-kubernetes-discovery.py +++ b/src/zabbix-kubernetes-discovery.py @@ -6,6 +6,7 @@ from kubernetes import config as kube_config from pyzabbix import ZabbixSender from modules.kubernetes.base import * +from modules.kubernetes.openebs import * parser = argparse.ArgumentParser() parser.add_argument("--config-file", dest="config_file", action="store", required=False, help="Configuration file (default: config.yaml)", default="config.yaml") @@ -56,36 +57,41 @@ def mainThread(func): if __name__ == "__main__": logging.info("Application zabbix-kubernetes-discovery started") - # cronjobs + # cronjobs (base) if config['monitoring']['cronjobs']['enabled']: schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseCronjobs(mode="discovery", config=config))) schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseCronjobs(mode="item", config=config))) - # daemonsets + # daemonsets (base) if config['monitoring']['daemonsets']['enabled']: schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseDaemonsets(mode="discovery", config=config))) schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseDaemonsets(mode="item", config=config))) - # deployments + # deployments (base) if config['monitoring']['deployments']['enabled']: schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseDeployments(mode="discovery", config=config))) schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseDeployments(mode="item", config=config))) - # nodes + # nodes (base) if config['monitoring']['nodes']['enabled']: schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseNodes(mode="discovery", config=config))) schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseNodes(mode="item", config=config))) - # statefulsets + # statefulsets (base) if config['monitoring']['statefulsets']['enabled']: schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseStatefulsets(mode="discovery", config=config))) schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseStatefulsets(mode="item", config=config))) - # volumes + # volumes (base) if config['monitoring']['volumes']['enabled']: schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseVolumes(mode="discovery", config=config))) schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseVolumes(mode="item", config=config))) + # cstorpoolclusters (openebs) + if config['monitoring']['openebs']['enabled']: + schedule.every(config['zabbix']['schedule']['discovery']).seconds.do(mainThread, lambda: mainSend(baseOpenebsCstorpoolclusters(mode="discovery", config=config))) + schedule.every(config['zabbix']['schedule']['items']).seconds.do(mainThread, lambda: mainSend(baseOpenebsCstorpoolclusters(mode="item", config=config))) + # tasks while True: schedule.run_pending()