-
Notifications
You must be signed in to change notification settings - Fork 0
/
gpu.py
37 lines (32 loc) · 1.19 KB
/
gpu.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
# GPU_availability returns [least_occupied_GPU, ..., most_occupied_GPU].
# Each element of the list is an GPU index (starting from 0).
# It is ensured that the performance of each GPU in the list is at most P2.
# P0 is the maximum performance, indicating that one GPU is completely occupied.
# Credit to Gaiyu
def GPU_availability():
import itertools
from subprocess import Popen, PIPE
import re
output = Popen(['nvidia-smi'], stdout=PIPE).communicate()[0]
lines = output.split('\n')
performance = {}
index = 0
for i in range(len(lines)):
if 'GTX' in lines[i]:
p = int(re.search(r'P(\d?\d)', lines[i+1]).group(0)[-1])
if p>1:
try:
performance[p].append(index)
except:
performance.update({p : [index]})
index += 1
return list(itertools.chain(*[performance[key] for key in reversed(sorted(performance.keys()))]))
# ===================================================================================
import os
def define_gpu(num):
gpu_list = GPU_availability()[:num]
gpu_str = ','.join(map(str, gpu_list))
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_str
return gpu_list
if __name__ == '__main__':
print GPU_availability()