-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsys_resource.py
371 lines (314 loc) · 11.6 KB
/
sys_resource.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
#!/usr/bin/env/python
# coding=utf-8
"""系统基础资源统计数据
CPU、内存、硬盘、网络流量的总计和TOP5
依赖于采集器:CPU、内存、硬盘和网络的源数据采集
"""
import os
import json
import heapq
import logging
import time
from argus_statistics.utils import get_client, urlopen, send_to_db, check_tsdb, check_scale
from argus_statistics.settings import OPENTSDB_HOST, OPENTSDB_PORT
db = "argus-statistics"
top_collection = "sys_resource"
interval_time = 10 * 60 # senconds
logger = logging.getLogger("top 5 module")
def update_sys_resource():
"""Fetch the data we needed and insert into MongoDB."""
cpu_cores = get_cpu_cores()
logger.debug("starting top module")
cpu_usage = get_cpu_usage()
mem_usage = get_mem_usage()
df_usage = get_df_usage()
logger.debug("round instrument data ready, next is top 5data")
fields = [
'check_time', 'cpu_usage', 'cpu_all', 'cpu_using', 'mem_usage',
'mem_all', 'mem_using', 'disk_usage', 'disk_all', 'disk_using',
'cpu_topN', 'mem_topN', 'disk_topN', 'net_in_topN', 'net_out_topN'
]
# result = {}
# result.fromkeys(field, None)
result = {i: None for i in fields}
result['check_time'] = int(time.time())
result['cpu_all'] = cpu_cores
result['cpu_usage'] = cpu_usage
result['mem_all'], result['mem_using'] = mem_usage
result['disk_all'], result['disk_using'] = df_usage
try:
result['mem_usage'] = result['mem_using'] / result['mem_all']
except ZeroDivisionError:
result['mem_usage'] = 0.0
try:
result['disk_usage'] = result['disk_using'] / result['disk_all']
except ZeroDivisionError:
result['disk_usage'] = 0.0
result['cpu_topN'] = get_topN_cpu()
net_topn_data = get_topN_netIO()
mnd_topn_data = get_topN_mnd()
result["mem_topN"] = mnd_topn_data["mem.bytes.memavailable"]
result["disk_topN"] = mnd_topn_data["df.bytes.used"]
result["net_in_topN"] = net_topn_data["cluster.net.dev.receive"]
result["net_out_topN"] = net_topn_data["cluster.net.dev.transmit"]
# print(result)
send_to_db('argus-statistics', 'sys_resource', result)
logger.debug("update is already success")
def get_cpu_cores():
"""
Use metric <cluster.cpu.usage>.
/api/query/?start=10m-ago&m=sum:cluster.cpu.cores{host=*}
"""
cluster_count, cores = get_usage("cluster.cpu.cores", interval_time)
return cores
def get_cpu_usage():
"""
Use metric <cluster.cpu.usage>.
/api/query/?start=10m-ago&m=sum:cluster.cpu.usage{host=*}
"""
count, sum_result = get_usage("cluster.cpu.usage", interval_time)
return sum_result / count if count else float(count)
def get_mem_usage():
"""
Use metric <mem.bytes.memtotal> <mem.bytes.used>.
return: (int, int) first element represent total, 2th is sum of used memory
rtype: tuple
"""
total_count, total_sum = get_usage('mem.bytes.memtotal', interval_time)
used_count, used_sum = get_usage('mem.bytes.used', interval_time)
return (total_sum, used_sum)
def get_df_usage():
"""
Use metric: <df.bytes.total> <df.bytes.used>.
return: (int, int) first element represent total disk amout, 2th is sum of used disk
rtype: tuple
"""
total_count, total_sum = get_usage('df.bytes.total', interval_time)
used_count, used_sum = get_usage('df.bytes.used', interval_time)
return total_sum, used_sum
def get_usage(metric: str, interval_time: int):
r"""Get specific metric's useful info.
:param metric: Metric name need to be gotten.
:param invertal_time: Start parameter in api request i.e. start=300s-ago, use second as unit.
:return: (int, int) represent the cluster scale and sum of latest time series at query time
:rtype: tuple Object with 2 elements both are int.
"""
count, ignored = check_scale(metric, f'{interval_time}s')
param = {
'start': f'{interval_time}s-ago',
'm': f'sum:{metric}' + '{host=*}',
}
start = time.time()
resp = urlopen(f'http://{OPENTSDB_HOST}:{OPENTSDB_PORT}/api/query?', param)
if resp.status == 200:
_total = json.load(resp)
else:
pass
# remove the elements that should be ignored
valid_source = [i for i in _total if i['tags'] not in ignored]
valid_last_time = []
for i in valid_source:
last = sorted(i['dps'].keys())[-1]
if (start - interval_time) <= int(last) <= (start + interval_time):
valid_last_time.append(i)
else:
pass
# elements in valid_last_time mean it should be aggregated.
total = [i['dps'][sorted(i['dps'].keys())[-1]] for i in valid_last_time]
return count, sum(total)
def get_raw_data(url, Param):
"""
这个是公用方法,用于从TSDB中获取数据, 并且把数据进行提取(获取最新数据和对应的host
原数据:
{"metric":"mem.bytes.memavailable","tags":{"host":"cdh180"},"aggregateTags":[],"dps":{"1510535675":12250939392}}
处理后数据:
{host:"cdh180", "data" = "12250939392"}
:param url:
:param Param:
:return:
"""
response = urlopen(url, Param)
if response is None or response.status != 200:
return 0
else:
response_data = json.load(response)
available_data = []
for item in response_data:
single_data = {}
host = item["tags"]["host"]
if item["dps"] == {}:
continue
newest_key = list(item["dps"].keys())[-1]
data = item["dps"][newest_key]
single_data["host"] = host
single_data["data"] = data
available_data.append(single_data)
return available_data
def sorting_data(a_data, metric_name):
"""
输入:
[{"host":"cdh180", "data":1234}, {"host":"cdh182","data":55555}, {"host":"cdh251", "data":3333}]
输出:
[{'data': 1234, 'host': 'cdh180'},{'data': 3333, 'host': 'cdh251'},{'data': 55555, 'host': 'cdh182'}]
:param a_data: get_raw_data中拿到的结果类型
:return: sorted_data (type:list)
"""
if not a_data:
return []
normal = ("cluster.cpu.usage", "cluster.net.dev.receive", "cluster.net.dev.transmit")
if metric_name in normal:
sorted_data = sorted(a_data, key=lambda k: k['data'], reverse=True)
else:
sorted_data = sorted(a_data, key=lambda k: k['per'], reverse=True)
return sorted_data
def add_percentage(s_data, metric_name):
"""
这是以一个公用的方法
输入:
[{'data': 1234, 'host': 'cdh180'},{'data': 3333, 'host': 'cdh251'},{'data': 55555, 'host': 'cdh182'}]
输出:
[{'data': 1234, 'host': 'cdh180',"per":1},{'data': 3333, 'host': 'cdh251',"per":10},
{'data': 55555, 'host': 'cdh182',"per":100}]
:param s_data: 已经排序好的列表
:return:
"""
if not s_data:
return []
max_data = s_data[0]['data']
normal = ("cluster.cpu.usage", "cluster.net.dev.receive", "cluster.net.dev.transmit")
for item in s_data:
item_data = item["data"]
if metric_name in normal:
item["usage"] = round(item_data, 2)
# item["per"] = round(percentage*100, 2)
if metric_name == "cluster.cpu.usage":
item["per"] = item["usage"] * 100
percent_str = round(item["per"], 2)
item["usageprint"] = f'{percent_str}%'
else:
percentage = round(item_data / max_data, 2)
item["per"] = round(percentage * 100, 2)
item["usageprint"] = tranfor_printusage(item["data"])
else:
item["usage"] = item_data
item["usageprint"] = tranfor_printusage(item["data"])
del (item["data"])
return s_data
def tranfor_printusage(usage):
"""
进行单位转换的函数
:param usage:
:return:
"""
unit_data = tranfer_unit(usage)
return unit_data
def get_raw_data_formnd(url, Param):
"""
把内存和硬盘的数据专门拿出来读取,因为处理逻辑不一样,如果耦合在一起,代码很难进行改动
:param url:
:param Param:
:return:
"""
response = urlopen(url, Param)
if response.status == 200:
response_data = json.load(response)
return response_data
else:
return 0
def get_topN_cpu():
"""
这里是处理top5 cpu占用路的函数
:return:
"""
metric = "cluster.cpu.usage"
url = f"http://{OPENTSDB_HOST}:{OPENTSDB_PORT}/api/query?"
param = {'start': '10m-ago', 'm': f'sum:{metric}' + '{host=*}'}
metric_data = get_raw_data(url, param)
sort_data = sorting_data(metric_data, metric)[-5:]
sort_data = add_percentage(sort_data, metric)
return sort_data
def get_mnd_usage(m_data, t_data):
available_data = []
count = 0
for item in m_data:
single_data = {}
single_data["host"] = item["tags"]["host"]
if item["dps"] != {}:
single_data_key = list(item["dps"].keys())[-1]
single_data["data"] = item["dps"][single_data_key]
total_single_key = list(t_data[count]["dps"].keys())[-1]
total_single_data = t_data[count]["dps"][total_single_key]
single_data["per"] = round(single_data["data"] / total_single_data, 2) * 100
count += 1
available_data.append(single_data)
else:
continue
return available_data
def get_topN_mnd():
"""
这里是处理top5磁盘使用率和内存使用率的函数
:return:
"""
metrics = ("mem.bytes.memavailable", "df.bytes.used")
total_metrics = ("mem.bytes.memtotal", "df.bytes.total")
url = f"http://{OPENTSDB_HOST}:{OPENTSDB_PORT}/api/query?"
data_dict = {}
for metric in metrics:
param = {'start': '10m-ago', 'm': f'sum:{metric}' + '{host=*}'}
if metric == "df.bytes.used":
total_item = "df.bytes.total"
else:
total_item = "mem.bytes.memtotal"
total_param = {'start': '5m-ago', 'm': f'sum:{total_item}' + '{host=*}'}
metric_data = get_raw_data_formnd(url, param)
total_metric_data = get_raw_data_formnd(url, total_param)
available_data = get_mnd_usage(metric_data, total_metric_data)
sorted_data = sorting_data(available_data, metric)
sort_data = add_percentage(sorted_data, metric)
data_dict[metric] = sort_data
return data_dict
def get_topN_netIO():
"""
这里是处理top5网络流入流出的函数
:return:
"""
metrics = ("cluster.net.dev.receive", "cluster.net.dev.transmit")
url = f"http://{OPENTSDB_HOST}:{OPENTSDB_PORT}/api/query?"
data_dict = {}
for metric in metrics:
param = {'start': '10m-ago', 'm': f'sum:{metric}' + '{host=*}'}
metric_data = get_raw_data(url, param)
sort_data = sorting_data(metric_data, metric)[-5:]
sort_data = add_percentage(sort_data, metric)
data_dict[metric] = sort_data
return data_dict
def tranfer_unit(number):
"""
this function is for tranfer byte into different unit
:param number:
:return:
"""
count = 0
unit_name = ""
if 2 ** 20 > number > 2 ** 10:
unit_name = "Kb"
count = 1
elif 2 ** 30 > number > 2 ** 20:
unit_name = "Mb"
count = 2
elif number > 2 ** 30:
unit_name = "Gb"
count = 3
else:
unit_name = "b"
if count != 0:
unit_number = round(number / ((2 ** 10) ** count), 2)
else:
unit_number = round(number, 2)
unit_str = "{num}{name}".format(num=unit_number, name=unit_name)
return unit_str
if __name__ == '__main__':
update_sys_resource()
# get_topN_cpu()
# get_topN_netIO()
# get_topN_mnd()