-
Notifications
You must be signed in to change notification settings - Fork 0
/
isilon_collector.py
247 lines (214 loc) · 10.3 KB
/
isilon_collector.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
import datetime
from common_functions import get_logger
from common_functions import Collector
from common_functions import get_https_response_with_json
import params as param
logger = get_logger('Isilon')
strmark = 'ISILON'
def get_isilon_information(ip, user, passwd):
api = 'https://' + ip + ':8080' + '/platform/1/cluster/config'
try:
# Execute HTTPS GET
ret = get_https_response_with_json(user, passwd, api)
# Printing for debug
logger.info('S/N : ' + ret['local_serial'])
logger.info('Cluster Name : ' + ret['name'])
logger.info('OneFS Version : ' + ret['onefs_version']['release'] + ' <<Build : ' + ret['onefs_version']['build'] + '>>')
logger.info('Nodes count : ' + str(len(ret['devices'])))
logger.info('Nodes information : ')
for d in ret['devices']:
logger.info('DeviceID : ' + str(d['devid']) + ' <<GUID : ' + d['guid'] + '>>')
# Return result
return ret
except Exception:
logger.error(strmark + '_Collector>>> Exception is throwed by common function. '
'Error when getting information from Isilon ...')
def calculate_average(t, json):
avg_sum = 0
if t == 'cpu':
for i in range(len(json['stats'])):
sum = 0
for v in json['stats'][i]['values']:
e = v['value'] / 10
sum = sum + e
cpu_average_byid = 100 - (sum / len(json['stats'][i]['values']))
logger.info(strmark + '_Collector>>> CPU Average Util on DeviceID-' + str(json['stats'][i]['devid']) + ' : ' + str(cpu_average_byid))
avg_sum = avg_sum + cpu_average_byid
cpu_average = avg_sum / len(json['stats'])
logger.info(strmark + '_Collector>>> Average CPU Utilization : ' + str(cpu_average))
return cpu_average
elif t == 'bandwidth':
for i in range(len(json['stats'])):
sum = 0
for v in json['stats'][i]['values']:
sum = sum + v['value']
bandwidth_average_byid = sum / len(json['stats'][i]['values'])
# logger.info(strmark + '_Collector>>> Average bandwidth Util on DeviceID-' + str(json['stats'][i]['devid']) + ' : ' + str(bandwidth_average_byid))
avg_sum = avg_sum + bandwidth_average_byid
bandwidth_average = avg_sum / len(json['stats'])
logger.info(strmark + '_Collector>>> Bandwidth average Utilization : ' + str(bandwidth_average))
return bandwidth_average
else:
logger.error('Specified flag is wrong...')
def main():
logger.info(strmark + '_Collector>>> Isilon Collector boots up...!!')
# Setting parameters for target Isilon
str_ipaddress = param.isilon_address
str_username = param.isilon_user
str_password = param.isilon_pass
# Getting General isilon Information
logger.info(strmark + '_Collector>>> Target Isilon : ' + str_ipaddress)
logger.info(strmark + '_Collector>>> General Information : ')
isilon_info = get_isilon_information(str_ipaddress, str_username, str_password)
# Instantiate Collector Class with constructor
isilon_collector = Collector(strmark='isilon')
# Send message to rabbitmq
isilon_collector.send_message('Start')
# --- Run main task(capacity)
# Create capacity table in postgres
capacity_maps = {'clustername': 'varchar',
'timestamp': 'varchar',
'ifs_bytes_total': 'bigint',
'ifs_bytes_used': 'bigint',
'ifs_percent_used': 'double precision',
'ifs_bytes_free': 'bigint',
'ifs_percent_free': 'double precision'
}
c_columns = '('
for k, v in capacity_maps.items():
c_columns += k + ' ' + v + ','
c_columns += ')'
isilon_collector.create_table(metric='capacity', columns=c_columns.replace(',)', ')'))
# Get capacity information
c_results = {}
for i in capacity_maps:
if 'clustername' in i:
c_results[i] = isilon_info['name']
elif 'timestamp' in i:
c_results[i] = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
else:
uri = 'https://' + str_ipaddress + ':8080' + '/platform/1/statistics/current?key=' + i.replace('_','.')
ret = get_https_response_with_json(str_username, str_password, uri)
c_results[i] = ret['stats'][0]['value']
# Insert capacity information to postgres
isilon_collector.send_data_to_postgres(data=c_results, data_type='capacity')
# --- Run main task(quota)
# Create quota table in postgres
quota_maps = {'clustername': 'varchar',
'timestamp': 'varchar',
'path': 'varchar',
'hard_threshold': 'bigint',
'logical_with_overhead': 'bigint',
'physical_with_overhead': 'bigint'
}
q_columns = '('
for k, v in quota_maps.items():
q_columns += k + ' ' + v + ','
q_columns += ')'
isilon_collector.create_table(metric='quota', columns=q_columns.replace(',)',')'))
# Get quota information
uri = 'https://' + str_ipaddress + ':8080' + '/platform/1/quota/quotas'
ret = get_https_response_with_json(str_username, str_password, uri)
q_results = {}
for i in quota_maps:
value_list = []
if 'clustername' in i:
q_results[i] = isilon_info['name']
elif 'timestamp' in i:
q_results[i] = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
elif i == 'path':
for v in ret['quotas']:
value_list.append(v['path'])
q_results[i] = value_list
elif i == 'hard_threshold':
for v in ret['quotas']:
if v['thresholds']['hard'] is None:
value_list.append(0)
else:
value_list.append(v['thresholds']['hard'])
q_results[i] = value_list
elif i == 'logical_with_overhead':
for v in ret['quotas']:
value_list.append(v['usage']['logical'])
q_results[i] = value_list
elif i == 'physical_with_overhead':
for v in ret['quotas']:
value_list.append(v['usage']['physical'])
q_results[i] = value_list
else:
logger.error(strmark + '_Collector>>> Some Errors...')
# Insert capacity information to postgres
isilon_collector.send_data_to_postgres(data=q_results, data_type='quota')
# --- Run main task(performance: CPU/Bandwidth)
# Get common prefix(CPU/bandwidth)
uri_prefix = 'https://' + str_ipaddress + ':8080' + '/platform/1/statistics/history'
# Create performance(CPU) table in postgres
cpu_maps = {'clustername': 'varchar',
'timestamp': 'varchar',
'average_cpu': 'double precision'
}
cpu_columns = '('
for k, v in cpu_maps.items():
cpu_columns += k + ' ' + v + ','
cpu_columns += ')'
isilon_collector.create_table(metric='cpu', columns=cpu_columns.replace(',)', ')'))
# Get CPU information
cpu_results = {}
for i in cpu_maps:
if 'clustername' in i:
cpu_results[i] = isilon_info['name']
elif 'timestamp' in i:
cpu_results[i] = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
else:
unix_today = int(datetime.datetime.timestamp(datetime.datetime.now()))
unix_yesterday = int(unix_today - 86400)
# add to cpu keys and get information about CPU
uri_key_cpu = 'node.cpu.idle.avg&nodes=all'
uri = uri_prefix + '?begin=' + str(unix_yesterday) + '&end=' + str(unix_today) + '&key=' + uri_key_cpu
ret_cpu = get_https_response_with_json(str_username, str_password, uri)
# Calculate daily CPU calculate_averagetilization
daily_cpu_util = calculate_average('cpu', ret_cpu)
cpu_results[i] = daily_cpu_util
# Insert CPU information to postgres
isilon_collector.send_data_to_postgres(data=cpu_results, data_type='cpu')
# Create performance(bandwidth) table in postgres
bandwidth_maps = {'clustername': 'varchar',
'timestamp': 'varchar',
'ext1_rdavg_day': 'double precision', 'ext1_wtavg_day': 'double precision',
'ext2_rdavg_day': 'double precision', 'ext2_wtavg_day': 'double precision',
'gb1_rdavg_day': 'double precision', 'gb1_wtavg_day': 'double precision',
'gb2_rdavg_day': 'double precision', 'gb2_wtavg_day': 'double precision'
}
bandwidth_columns = '('
for k, v in bandwidth_maps.items():
bandwidth_columns += k + ' ' + v + ','
bandwidth_columns += ')'
isilon_collector.create_table(metric='bandwidth', columns=bandwidth_columns.replace(',)',')'))
# add to bandwidth keys and get information about bandwidth
uri_keys_bandwidth = (
'node.net.iface.bytes.out.rate.2', 'node.net.iface.bytes.in.rate.2',
'node.net.iface.bytes.out.rate.3', 'node.net.iface.bytes.in.rate.3',
'node.net.iface.bytes.out.rate.4', 'node.net.iface.bytes.in.rate.4',
'node.net.iface.bytes.out.rate.5', 'node.net.iface.bytes.in.rate.5'
)
# Get capacity information
bandwidth_results = {}
for i, v in enumerate(bandwidth_maps):
if 'clustername' in v:
bandwidth_results[v] = isilon_info['name']
elif 'timestamp' in v:
bandwidth_results[v] = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
else:
unix_today = int(datetime.datetime.timestamp(datetime.datetime.now()))
unix_yesterday = int(unix_today - 86400)
# Escape 'clustername' and 'timestamp' with [i-2]
uri = uri_prefix + '?begin=' + str(unix_yesterday) + '&end=' + str(unix_today) + '&key=' + uri_keys_bandwidth[i-2]
ret_bandwidth = get_https_response_with_json(str_username, str_password, uri)
bandwidth_results[v] = calculate_average('bandwidth', ret_bandwidth)
# Insert capacity information to postgres
isilon_collector.send_data_to_postgres(data=bandwidth_results, data_type='bandwidth')
# Send message to rabbitmq
isilon_collector.send_message('END')
logger.info(strmark + '_Collector>>> Isilon Collector has done its task...!!')
if __name__ == '__main__':
main()