forked from Ceruleanacg/Personae
-
Notifications
You must be signed in to change notification settings - Fork 1
/
DualAttnRNN.py
119 lines (99 loc) · 4.68 KB
/
DualAttnRNN.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
# coding=utf-8
import tensorflow as tf
import logging
import os
from algorithm import config
from base.env.market import Market
from checkpoints import CHECKPOINTS_DIR
from base.algorithm.model import BaseSLTFModel
from sklearn.preprocessing import MinMaxScaler
from helper.args_parser import model_launcher_parser
class Algorithm(BaseSLTFModel):
def __init__(self, session, env, seq_length, x_space, y_space, **options):
super(Algorithm, self).__init__(session, env, **options)
self.seq_length, self.x_space, self.y_space = seq_length, x_space, y_space
try:
self.hidden_size = options['hidden_size']
except KeyError:
self.hidden_size = 1
self._init_input()
self._init_nn()
self._init_op()
self._init_saver()
self._init_summary_writer()
def _init_input(self):
self.x = tf.placeholder(tf.float32, [None, self.seq_length, self.x_space])
self.label = tf.placeholder(tf.float32, [None, self.y_space])
def _init_nn(self):
# First Attn
with tf.variable_scope("1st_encoder"):
self.f_encoder_rnn = self.add_rnn(1, self.hidden_size)
self.f_encoder_outputs, _ = tf.nn.dynamic_rnn(self.f_encoder_rnn, self.x, dtype=tf.float32)
self.f_attn_inputs = self.add_fc(self.f_encoder_outputs, self.hidden_size, tf.tanh)
self.f_attn_outputs = tf.nn.softmax(self.f_attn_inputs)
with tf.variable_scope("1st_decoder"):
self.f_decoder_input = tf.multiply(self.f_encoder_outputs, self.f_attn_outputs)
self.f_decoder_rnn = self.add_rnn(1, self.hidden_size)
self.f_decoder_outputs, _ = tf.nn.dynamic_rnn(self.f_decoder_rnn, self.f_decoder_input, dtype=tf.float32)
# Second Attn
with tf.variable_scope("2nd_encoder"):
self.s_attn_input = self.add_fc(self.f_decoder_outputs, self.hidden_size, tf.tanh)
self.s_attn_outputs = tf.nn.softmax(self.s_attn_input)
with tf.variable_scope("2nd_decoder"):
self.s_decoder_input = tf.multiply(self.f_decoder_outputs, self.s_attn_outputs)
self.s_decoder_rnn = self.add_rnn(1, self.hidden_size)
self.f_decoder_outputs, _ = tf.nn.dynamic_rnn(self.s_decoder_rnn, self.s_decoder_input, dtype=tf.float32)
self.f_decoder_outputs_dense = self.add_fc(self.f_decoder_outputs[:, -1], 16)
self.y = self.add_fc(self.f_decoder_outputs_dense, self.y_space)
def _init_op(self):
with tf.variable_scope('loss'):
self.loss = tf.losses.mean_squared_error(self.y, self.label)
with tf.variable_scope('train'):
self.global_step = tf.Variable(0, trainable=False)
self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate)
self.train_op = self.optimizer.minimize(self.loss)
self.session.run(tf.global_variables_initializer())
def train(self):
for step in range(self.train_steps):
batch_x, batch_y = self.env.get_batch_data(self.batch_size)
_, loss = self.session.run([self.train_op, self.loss], feed_dict={self.x: batch_x, self.label: batch_y})
if (step + 1) % 1000 == 0:
logging.warning("Step: {0} | Loss: {1:.7f}".format(step + 1, loss))
if step > 0 and (step + 1) % self.save_step == 0:
if self.enable_saver:
self.save(step)
def predict(self, x):
return self.session.run(self.y, feed_dict={self.x: x})
def main(args):
mode = args.mode
# mode = "test"
codes = ["600036"]
# codes = ["600036", "601998"]
# codes = args.codes
# codes = ["AU88", "RB88", "CU88", "AL88"]
market = args.market
# train_steps = args.train_steps
train_steps = 30000
# training_data_ratio = 0.98
training_data_ratio = args.training_data_ratio
env = Market(codes, start_date="2008-01-01", end_date="2018-01-01", **{
"market": market,
"use_sequence": True,
"scaler": MinMaxScaler,
"mix_index_state": True,
"training_data_ratio": training_data_ratio,
})
model_name = os.path.basename(__file__).split('.')[0]
algorithm = Algorithm(tf.Session(config=config), env, env.seq_length, env.data_dim, env.code_count, **{
"mode": mode,
"hidden_size": 5,
"enable_saver": True,
"train_steps": train_steps,
"enable_summary_writer": True,
"save_path": os.path.join(CHECKPOINTS_DIR, "SL", model_name, market, "model"),
"summary_path": os.path.join(CHECKPOINTS_DIR, "SL", model_name, market, "summary"),
})
algorithm.run()
algorithm.eval_and_plot()
if __name__ == '__main__':
main(model_launcher_parser.parse_args())