-
Notifications
You must be signed in to change notification settings - Fork 0
/
oracle_layer.py
executable file
·467 lines (329 loc) · 23 KB
/
oracle_layer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
import numpy as np
from experience_buffer import ExperienceBuffer
import torch
from collections import defaultdict
from utils import oracle_action
class OracleLayer():
def __init__(self, layer_number, FLAGS, env, sess, agent_params):
self.layer_number = layer_number
self.relative_subgoals = False
self.FLAGS = FLAGS
self.sess = sess
# Set time limit for each layer. If agent uses only 1 layer, time limit is the max number of low-level actions allowed in the episode (i.e, env.max_actions).
if FLAGS.layers > 1:
self.time_limit = FLAGS.time_scale
else:
self.time_limit = env.max_actions
self.current_state = None
self.goal = None
# Initialize Replay Buffer. Below variables determine size of replay buffer.
# Ceiling on buffer size
self.buffer_size_ceiling = 10**7
# Number of full episodes stored in replay buffer
self.episodes_to_store = agent_params["episodes_to_store"]
# Set number of transitions to serve as replay goals during goal replay
self.num_replay_goals = 4
# Number of the transitions created for each attempt (i.e, action replay + goal replay + subgoal testing)
if self.layer_number == 0:
self.trans_per_attempt = (1 + self.num_replay_goals) * self.time_limit
else:
self.trans_per_attempt = (1 + self.num_replay_goals) * self.time_limit + int(self.time_limit/3)
# Buffer size = transitions per attempt * # attempts per episode * num of episodes stored
self.buffer_size = min(self.trans_per_attempt * self.time_limit**(self.FLAGS.layers-1 - self.layer_number) * self.episodes_to_store, self.buffer_size_ceiling)
# self.buffer_size = 10000000
self.batch_size = 1024
# self.replay_buffer = ExperienceBuffer(self.buffer_size, self.batch_size)
# Create buffer to store not yet finalized goal replay transitions
self.temp_goal_replay_storage = []
# Initialize actor and critic networks
# self.actor = Actor(sess, env, self.batch_size, self.layer_number, FLAGS)
# self.critic = Critic(sess, env, self.layer_number, FLAGS)
# Parameter determines degree of noise added to actions during training
# self.noise_perc = noise_perc
if self.layer_number == 0:
self.noise_perc = agent_params["atomic_noise"]
else:
self.noise_perc = agent_params["oracle_noise"]
# Create flag to indicate when layer has ran out of attempts to achieve goal. This will be important for subgoal testing
self.maxed_out = False
self.subgoal_penalty = agent_params["subgoal_penalty"]
self.agg_metrics = defaultdict(list)
# Add noise to provided action
def add_noise(self,action, env):
# Noise added will be percentage of range
action_bounds = env.subgoal_bounds_symmetric[:env.end_goal_dim]
action_offset = env.subgoal_bounds_offset[:env.end_goal_dim]
assert len(action) == len(action_bounds), ("Action bounds must have same dimension as action", len(action), len(action_bounds))
assert len(action) == len(self.noise_perc), "Noise percentage vector must have same dimension as action"
# Add noise to action and ensure remains within bounds
for i in range(len(action)):
action[i] += np.random.normal(0,self.noise_perc[i] * action_bounds[i])
action[i] = max(min(action[i], action_bounds[i]+action_offset[i]), -action_bounds[i]+action_offset[i])
return action
# Select random action
def get_random_action(self, env):
action = np.zeros((env.end_goal_dim))
# Each dimension of random action should take some value in the dimension's range
for i in range(len(action)):
if self.layer_number == 0:
action[i] = np.random.uniform(-env.action_bounds[i] + env.action_offset[i], env.action_bounds[i] + env.action_offset[i])
else:
action[i] = np.random.uniform(env.subgoal_bounds[i][0],env.subgoal_bounds[i][1])
return action
# Function selects action using an epsilon-greedy policy
def choose_action(self,agent, env, subgoal_test):
# If testing mode or testing subgoals, action is output of actor network without noise
if agent.FLAGS.test or subgoal_test:
return oracle_action(self.FLAGS, self.current_state.cpu().numpy(), self.goal.cpu().numpy(), env), "Policy", subgoal_test
else:
if np.random.random_sample() > 0.2:
# Choose noisy action
action = self.add_noise(oracle_action(self.FLAGS, self.current_state.cpu().numpy(), self.goal.cpu().numpy(), env), env)
action_type = "Noisy Policy"
# Otherwise, choose random action
else:
action = self.get_random_action(env)
action_type = "Random"
# Determine whether to test upcoming subgoal
if np.random.random_sample() < agent.subgoal_test_perc:
next_subgoal_test = True
else:
next_subgoal_test = False
return action, action_type, next_subgoal_test
# Create action replay transition by evaluating hindsight action given original goal
def perform_action_replay(self, hindsight_action, next_state, goal_status):
# Determine reward (0 if goal achieved, -1 otherwise) and finished boolean. The finished boolean is used for determining the target for Q-value updates
if goal_status[self.layer_number]:
reward = 0
finished = True
else:
reward = -1
finished = False
# Transition will take the form [old state, hindsight_action, reward, next_state, goal, terminate boolean, None]
transition = [self.current_state, hindsight_action, reward, next_state, self.goal, finished, None]
if self.FLAGS.all_trans or self.FLAGS.hind_action:
print("\nLevel %d Hindsight Action: " % self.layer_number, transition)
# Add action replay transition to layer's replay buffer
self.replay_buffer.add(np.copy(transition))
# Create initial goal replay transitions
def create_prelim_goal_replay_trans(self, hindsight_action, next_state, env, total_layers):
# Create transition evaluating hindsight action for some goal to be determined in future. Goal will be ultimately be selected from states layer has traversed through. Transition will be in the form [old state, hindsight action, reward = None, next state, goal = None, finished = None, next state projeted to subgoal/end goal space]
if self.layer_number == total_layers - 1:
hindsight_goal = env.project_state_to_end_goal(env.sim, next_state)
else:
hindsight_goal = env.project_state_to_subgoal(env.sim, next_state)
transition = [self.current_state, hindsight_action, None, next_state, None, None, hindsight_goal]
if self.FLAGS.all_trans or self.FLAGS.prelim_HER:
print("\nLevel %d Prelim HER: " % self.layer_number, transition)
self.temp_goal_replay_storage.append(np.copy(transition))
"""
# Designer can create some additional goal replay transitions. For instance, higher level transitions can be replayed with the subgoal achieved in hindsight as the original goal.
if self.layer_number > 0:
transition_b = [self.current_state, hindsight_action, 0, next_state, hindsight_goal, True, None]
# print("\nGoal Replay B: ", transition_b)
self.replay_buffer.add(np.copy(transition_b))
"""
# Return reward given provided goal and goal achieved in hindsight
def get_reward(self,new_goal, hindsight_goal, goal_thresholds):
assert len(new_goal) == len(hindsight_goal) == len(goal_thresholds), "Goal, hindsight goal, and goal thresholds do not have same dimensions"
# If the difference in any dimension is greater than threshold, goal not achieved
for i in range(len(new_goal)):
if np.absolute(new_goal[i]-hindsight_goal[i]) > goal_thresholds[i]:
return -1
# Else goal is achieved
return 0
# Finalize goal replay by filling in goal, reward, and finished boolean for the preliminary goal replay transitions created before
def finalize_goal_replay(self,goal_thresholds):
# Choose transitions to serve as goals during goal replay. The last transition will always be used
num_trans = len(self.temp_goal_replay_storage)
num_replay_goals = self.num_replay_goals
# If fewer transitions that ordinary number of replay goals, lower number of replay goals
if num_trans < self.num_replay_goals:
num_replay_goals = num_trans
if self.FLAGS.all_trans or self.FLAGS.HER:
print("\n\nPerforming Goal Replay for Level %d\n\n" % self.layer_number)
print("Num Trans: ", num_trans, ", Num Replay Goals: ", num_replay_goals)
indices = np.zeros((num_replay_goals))
indices[:num_replay_goals-1] = np.random.randint(num_trans,size=num_replay_goals-1)
indices[num_replay_goals-1] = num_trans - 1
indices = np.sort(indices)
if self.FLAGS.all_trans or self.FLAGS.HER:
print("Selected Indices: ", indices)
# For each selected transition, update the goal dimension of the selected transition and all prior transitions by using the next state of the selected transition as the new goal. Given new goal, update the reward and finished boolean as well.
for index in range(num_trans):
# trans_copy = np.copy(self.temp_goal_replay_storage)
# if self.FLAGS.all_trans or self.FLAGS.HER:
# print("GR Iteration: %d, Index %d" % (i, indices[i]))
# new_goal = trans_copy[int(indices[i])][6]
# for index in range(int(indices[i])+1):
for i in range(num_replay_goals):
if i == num_replay_goals -1:
future_index = num_trans-1
else:
future_index = np.random.randint(index, num_trans)
new_goal = np.copy(self.temp_goal_replay_storage[future_index][6])
trans_copy = np.copy(self.temp_goal_replay_storage[index])
# Update goal to new goal
trans_copy[4] = new_goal
# Update reward
trans_copy[2] = self.get_reward(new_goal, trans_copy[6], goal_thresholds)
# Update finished boolean based on reward
if trans_copy[2] == 0:
trans_copy[5] = True
else:
trans_copy[5] = False
# Add finished transition to replay buffer
if self.FLAGS.all_trans or self.FLAGS.HER:
print("\nNew Goal: ", new_goal)
print("Upd Trans %d: " % index, trans_copy)
self.replay_buffer.add(trans_copy)
# Clear storage for preliminary goal replay transitions at end of goal replay
self.temp_goal_replay_storage = []
# Create transition penalizing subgoal if necessary. The target Q-value when this transition is used will ignore next state as the finished boolena = True. Change the finished boolean to False, if you would like the subgoal penalty to depend on the next state.
def penalize_subgoal(self, subgoal, next_state, high_level_goal_achieved):
transition = [self.current_state, subgoal, self.subgoal_penalty, next_state, self.goal, True, None]
if self.FLAGS.all_trans or self.FLAGS.penalty:
print("Level %d Penalty Trans: " % self.layer_number, transition)
self.replay_buffer.add(np.copy(transition))
# Determine whether layer is finished training
def return_to_higher_level(self, max_lay_achieved, agent, env, attempts_made):
# Return to higher level if (i) a higher level goal has been reached, (ii) maxed out episode time steps (env.max_actions), (iii) not testing and layer is out of attempts, and (iv) testing, layer is not the highest level, and layer is out of attempts. NOTE: during testing, highest level will continue to ouput subgoals until either (i) the maximum number of episdoe time steps or (ii) the end goal has been achieved.
# Return to previous level when any higher level goal achieved. NOTE: if not testing and agent achieves end goal, training will continue until out of time (i.e., out of time steps or highest level runs out of attempts). This will allow agent to experience being around the end goal.
if max_lay_achieved is not None and max_lay_achieved >= self.layer_number:
return True
# Return when out of time
elif agent.steps_taken >= env.max_actions:
return True
# Return when layer has maxed out attempts
elif not agent.FLAGS.test and attempts_made >= self.time_limit:
return True
# NOTE: During testing, agent will have env.max_action attempts to achieve goal
elif agent.FLAGS.test and self.layer_number < agent.FLAGS.layers-1 and attempts_made >= self.time_limit:
return True
else:
return False
# Learn to achieve goals with actions belonging to appropriate time scale. "goal_array" contains the goal states for the current layer and all higher layers
def train(self, agent, env, metrics, subgoal_test = False, episode_num = None):
# print("\nTraining Layer %d" % self.layer_number)
# Set layer's current state and new goal state
self.goal = agent.goal_array[self.layer_number]
self.current_state = agent.current_state
# Reset flag indicating whether layer has ran out of attempts. This will be used for subgoal testing.
self.maxed_out = False
# Display all subgoals if visualizing training and current layer is bottom layer
if self.layer_number == 0 and agent.FLAGS.show and agent.FLAGS.layers > 1:
env.display_subgoals([arr.cpu().numpy() for arr in agent.goal_array], agent.FLAGS)
# env.sim.data.mocap_pos[3] = env.project_state_to_end_goal(env.sim,self.current_state)
# print("Subgoal Pos: ", env.sim.data.mocap_pos[1])
# Current layer has self.time_limit attempts to each its goal state.
attempts_made = 0
while True:
# Select action to achieve goal state using epsilon-greedy policy or greedy policy if in test mode
action, action_type, next_subgoal_test = self.choose_action(agent, env, subgoal_test)
self.agg_metrics['subgoal_distances'].append(np.linalg.norm(action[:2] if self.FLAGS.relative_subgoals else (action[:2] - self.current_state[:2])))
# lower_action = agent.layers[self.layer_number-1].actor.get_action(np.reshape(self.current_state,(1,len(self.current_state))), np.reshape(np.copy(action),(1,len(action))))[0]
# lower_Q = agent.layers[self.layer_number-1].critic.get_Q_value(np.reshape(self.current_state,(1,len(self.current_state))), np.reshape(np.copy(action),(1,len(action))), np.reshape(lower_action,(1,len(lower_action))))
# self.agg_metrics['lower_Q_val'].append(lower_Q)
if self.FLAGS.Q_values:
# print("\nLayer %d Action: " % self.layer_number, action)
print("Layer %d Q-Value: " % self.layer_number, self.critic.get_Q_value(np.reshape(self.current_state,(1,len(self.current_state))), np.reshape(self.goal,(1,len(self.goal))), np.reshape(action,(1,len(action)))))
if self.layer_number == 2:
test_action = np.copy(action)
test_action[:3] = self.goal
print("Layer %d Goal Q-Value: " % self.layer_number, self.critic.get_Q_value(np.reshape(self.current_state,(1,len(self.current_state))), np.reshape(self.goal,(1,len(self.goal))), np.reshape(test_action,(1,len(test_action)))))
agent.goal_array[self.layer_number - 1] = torch.from_numpy(action).to(self.sess, dtype=torch.float32)
goal_status, max_lay_achieved = agent.layers[self.layer_number - 1].train(agent, env, metrics, next_subgoal_test, episode_num)
attempts_made += 1
# Print if goal from current layer as been achieved
if goal_status[self.layer_number] and False:
if self.layer_number < agent.FLAGS.layers - 1:
print("SUBGOAL ACHIEVED")
print("\nEpisode %d, Layer %d, Attempt %d Goal Achieved" % (episode_num, self.layer_number, attempts_made))
print("Goal: ", self.goal)
if self.layer_number == agent.FLAGS.layers - 1:
print("Hindsight Goal: ", env.project_state_to_end_goal(env.sim, agent.current_state))
else:
print("Hindsight Goal: ", env.project_state_to_subgoal(env.sim, agent.current_state))
# Perform hindsight learning using action actually executed (low-level action or hindsight subgoal)
if self.layer_number == 0:
hindsight_action = action
else:
# If subgoal action was achieved by layer below, use this as hindsight action
if goal_status[self.layer_number-1]:
hindsight_action = action
# Otherwise, use subgoal that was achieved in hindsight
else:
hindsight_action = env.project_state_to_subgoal(None, agent.current_state)
# # Next, create hindsight transitions if not testing
# if not agent.FLAGS.test:
# # Create action replay transition by evaluating hindsight action given current goal
# self.perform_action_replay(hindsight_action, agent.current_state, goal_status)
# # Create preliminary goal replay transitions. The goal and reward in these transitions will be finalized when this layer has run out of attempts or the goal has been achieved.
# self.create_prelim_goal_replay_trans(hindsight_action, agent.current_state, env, agent.FLAGS.layers)
# # Penalize subgoals if subgoal testing and subgoal was missed by lower layers after maximum number of attempts
# if self.layer_number > 0 and next_subgoal_test and agent.layers[self.layer_number-1].maxed_out:
# self.penalize_subgoal(action, agent.current_state, goal_status[self.layer_number])
# Print summary of transition
if agent.FLAGS.verbose:
print("\nEpisode %d, Level %d, Attempt %d" % (episode_num, self.layer_number,attempts_made))
# print("Goal Array: ", agent.goal_array, "Max Lay Achieved: ", max_lay_achieved)
print("Old State: ", self.current_state)
print("Hindsight Action: ", hindsight_action)
print("Original Action: ", action)
print("Next State: ", agent.current_state)
print("Goal: ", self.goal)
if self.layer_number == agent.FLAGS.layers - 1:
print("Hindsight Goal: ", env.project_state_to_end_goal(env.sim, agent.current_state))
else:
print("Hindsight Goal: ", env.project_state_to_subgoal(env.sim, agent.current_state))
print("Goal Status: ", goal_status, "\n")
print("All Goals: ", agent.goal_array)
# Update state of current layer
self.current_state = agent.current_state
if self.FLAGS.ddl and self.layer_number == self.FLAGS.layers-1:
agent.DDL.add_to_path(env.project_state_to_end_goal(None, agent.current_state))
# Return to previous level to receive next subgoal if applicable
# if self.return_to_higher_level(max_lay_achieved, agent, env, attempts_made):
if (max_lay_achieved is not None and max_lay_achieved >= self.layer_number) or agent.steps_taken >= env.max_actions or attempts_made >= self.time_limit:
# If goal was not achieved after max number of attempts, set maxed out flag to true
if attempts_made >= self.time_limit and not goal_status[self.layer_number]:
self.maxed_out = True
# print("Layer %d Out of Attempts" % self.layer_number)
# If not testing, finish goal replay by filling in missing goal and reward values before returning to prior level.
# if not agent.FLAGS.test:
# if self.layer_number == agent.FLAGS.layers - 1:
# goal_thresholds = env.end_goal_thresholds
# else:
# goal_thresholds = env.subgoal_thresholds
# self.finalize_goal_replay(goal_thresholds)
# Under certain circumstances, the highest layer will not seek a new end goal
if self.return_to_higher_level(max_lay_achieved, agent, env, attempts_made):
if self.layer_number == agent.FLAGS.layers-1 and agent.FLAGS.test:
print("HL Attempts Made: ", attempts_made)
return goal_status, max_lay_achieved
# Update actor and critic networks
def learn(self, env, agent, num_updates, metrics):
pass
# if self.FLAGS.no_target_net:
# for _ in range(num_updates):
# # Update weights of non-target networks
# # if self.replay_buffer.size >= self.batch_size:
# if self.replay_buffer.size > 250:
# old_states, actions, rewards, new_states, goals, is_terminals = self.replay_buffer.get_batch()
# next_batch_size = min(self.replay_buffer.size, self.replay_buffer.batch_size)
# self.critic.update(old_states, actions, rewards, new_states, goals, self.actor.get_action(new_states,goals), is_terminals, metrics)
# action_derivs = self.critic.get_gradients_for_actions(old_states, goals, self.actor.get_action(old_states, goals))
# self.actor.update(old_states, goals, action_derivs, next_batch_size, metrics)
# else:
# # To use target networks comment for loop above and uncomment for loop below
# for _ in range(num_updates):
# # Update weights of non-target networks
# if self.replay_buffer.size >= 250:
# old_states, actions, rewards, new_states, goals, is_terminals = self.replay_buffer.get_batch()
# next_batch_size = min(self.replay_buffer.size, self.replay_buffer.batch_size)
# self.critic.update(old_states, actions, rewards, new_states, goals, self.actor.get_target_action(new_states,goals), is_terminals, metrics)
# action_derivs = self.critic.get_gradients_for_actions(old_states, goals, self.actor.get_action(old_states, goals))
# self.actor.update(old_states, goals, action_derivs, next_batch_size, metrics)
# # Update weights of target networks
# self.sess.run(self.critic.update_target_weights)
# self.sess.run(self.actor.update_target_weights)