forked from jangirrishabh/toyCarIRL
-
Notifications
You must be signed in to change notification settings - Fork 0
/
nn.py
61 lines (46 loc) · 1.54 KB
/
nn.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
"""
The design of this comes from here:
http://outlace.com/Reinforcement-Learning-Part-3/
"""
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.optimizers import RMSprop
from keras.layers.recurrent import LSTM
from keras.callbacks import Callback
class LossHistory(Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
def neural_net(num_sensors, params, load=''):
model = Sequential()
# First layer.
model.add(Dense(
params[0], init='lecun_uniform', input_shape=(num_sensors,)
))
model.add(Activation('relu'))
model.add(Dropout(0.2))
# Second layer.
model.add(Dense(params[1], init='lecun_uniform'))
model.add(Activation('relu'))
model.add(Dropout(0.2))
# Output layer.
model.add(Dense(3, init='lecun_uniform')) #!
model.add(Activation('linear'))
rms = RMSprop()
model.compile(loss='mse', optimizer=rms)
if load:
model.load_weights(load)
return model
def lstm_net(num_sensors, load=False):
model = Sequential()
model.add(LSTM(
output_dim=512, input_dim=num_sensors, return_sequences=True
))
model.add(Dropout(0.2))
model.add(LSTM(output_dim=512, input_dim=512, return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(output_dim=3, input_dim=512)) #!
model.add(Activation("linear"))
model.compile(loss="mean_squared_error", optimizer="rmsprop")
return model