From d8ec4ad35f142b570cbd7aa0bc52cf887acb8d19 Mon Sep 17 00:00:00 2001 From: Mike Kroutikov Date: Wed, 24 Aug 2016 08:53:04 -0400 Subject: [PATCH] do not add bias term twice to all_input_sums makes model a tiny bit smaller with no change in the expressive power --- model/LSTMTDNN.lua | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/model/LSTMTDNN.lua b/model/LSTMTDNN.lua index 6c4f8c3..879c7de 100644 --- a/model/LSTMTDNN.lua +++ b/model/LSTMTDNN.lua @@ -80,7 +80,7 @@ function LSTMTDNN.lstmtdnn(rnn_size, n, dropout, word_vocab_size, word_vec_size, end -- evaluate the input sums at once for efficiency local i2h = nn.Linear(input_size_L, 4 * rnn_size)(x) - local h2h = nn.Linear(rnn_size, 4 * rnn_size)(prev_h) + local h2h = nn.Linear(rnn_size, 4 * rnn_size, false)(prev_h) local all_input_sums = nn.CAddTable()({i2h, h2h}) local sigmoid_chunk = nn.Narrow(2, 1, 3*rnn_size)(all_input_sums)