-
Notifications
You must be signed in to change notification settings - Fork 3
/
eval_edge.py
130 lines (100 loc) · 5.23 KB
/
eval_edge.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
import os
import glob
import time
import sys
import pickle
import numpy as np
from datetime import datetime
from parameters import configs
from environment.env import *
from instance_generator import one_instance_gen
###
# Reward TEST in the environment when it chooses the best machine in terms of latency
###
def main():
np.random.seed(configs.np_seed_train)
number_all_device_features = len(configs.feature_labels) #TODO fix
envs = [SPP(number_jobs=configs.n_jobs, number_devices=configs.n_devices,number_features=number_all_device_features) for _ in range(configs.num_envs)]
# training loop
log = []
for i_update in range(configs.max_updates):
ep_rewards = np.zeros(configs.num_envs)
init_rewards = np.zeros(configs.num_envs)
candidate_envs = []
mask_envs = []
logAlloc = []
machine = []
# Init all the environments
for i, env in enumerate(envs):
_, _, candidate, mask = env.reset(*one_instance_gen(n_jobs=configs.n_jobs, n_devices=configs.n_devices,cloud_features=configs.cloud_features, dependency_degree=configs.DAG_rand_dependencies_factor))
candidate_envs.append(candidate)
mask_envs.append(mask)
ep_rewards[i] = - env.initQuality
# print("\tR%i: %f "%(0,ep_rewards[i]))
init_rewards[i] = - env.initQuality
# machine.append(env.selectDevicePriorizingLat())
# machine.append(env.selectDevicePriorizingCost())
machine.append(env.selectDevicePriorizingLat())
print(machine)
steps = 0
while True:
action_envs = []
steps+=1
for i in range(configs.num_envs):
# V0. select rnd action. Version amigable
ix_job = np.random.choice(len(candidate_envs[i][~mask_envs[i]]))
candidate_task = candidate_envs[i][~mask_envs[i]][ix_job]
device = machine[i]
# V1. 1º Cualquiera sin los candidatos y posteriormente aplicando candidatos (modelo ppo_train.py) # V1. 1º Cualquier sin los candidatos
### NO FUNCIONA - hay que aplicar el candidate
# action = np.random.randint(0,envs[i].action_dim)
action_envs.append((candidate_task,device))
candidate_envs = []
mask_envs = []
# Saving episode data
for i in range(configs.num_envs):
# print("Task:",action_envs[i][0])
_, _, reward, done, candidate, mask = envs[i].step(task=action_envs[i][0],
device=action_envs[i][1])
candidate_envs.append(candidate)
mask_envs.append(mask)
# print("MASK ",mask)
# print("DONE ",done)
ep_rewards[i] += reward
# print("\tR%i\t %f \t %f \t %f \t %f"%(steps,reward,envs[i].max_endTime,ep_rewards[i],np.sum(envs[i].LBs)))
if envs[0].done(): #all environments are DONE (same number of tasks)
assert steps == envs[0].step_count
break
if i_update in configs.record_alloc_episodes:
# print("Final placement: ",i_update)
# print(" -"*30)
for i in range(configs.num_envs): # Makespan
# print(i,envs[i].opIDsOnMchs,envs[i].feat_copy[envs[i].opIDsOnMchs][:,0],envs[i].feat_copy[envs[i].opIDsOnMchs][:,2])
logAlloc.append([i,envs[i].opIDsOnMchs.tolist(),envs[i].feat_copy[envs[i].opIDsOnMchs][:,0].tolist(),envs[i].feat_copy[envs[i].opIDsOnMchs][:,2].tolist()])
for j in range(configs.num_envs): #Get the last one
ep_rewards[j] -= envs[j].posRewards # same actions/states as the initial maximum goal state
# for i in range(configs.num_envs):
# print("%i ENV allocations: %i "%(i_update + 1,i))
# print(envs[i].opIDsOnMchs)
# print(envs[i].feat_copy)
# ep_rewards represents the computational and network time for the current allocation
mean_rewards_all_env = ep_rewards.mean() # mean of the c-n time
mean_all_init_rewards = init_rewards.mean()
log.append([i_update, mean_rewards_all_env, mean_all_init_rewards])
print('Episode {}\t Last reward: {:.2f} \t Init reward: {:.2f}'.format(i_update + 1, mean_rewards_all_env, mean_all_init_rewards))
if configs.record_ppo:
with open('logs/log_edge2_' + str(configs.name) + "_" + str(configs.n_jobs) + '_' + str(configs.n_devices)+'.pkl', 'wb') as f:
pickle.dump(log, f)
if configs.record_alloc:
with open('logs/log_edge_alloc_PCost'+ str(configs.name) + "_" + str(configs.n_jobs) + '_' + str(configs.n_devices)+'.pkl', 'wb') as f:
pickle.dump(logAlloc, f)
if __name__ == '__main__':
print("Edge-strategy test: using default parameters")
start_time = datetime.now().replace(microsecond=0)
print("Started training: ", start_time)
print("="*30)
main()
end_time = datetime.now().replace(microsecond=0)
print("Finish training: ", end_time)
print("Total time: ",(end_time-start_time))
print("Done.")