From d7d7d35dcf65fb09729a5ef2ba8fa9c09b3fe0c0 Mon Sep 17 00:00:00 2001 From: GitHub Action <52708150+marcpinet@users.noreply.github.com> Date: Wed, 6 Nov 2024 18:26:15 +0100 Subject: [PATCH 1/5] feat: add lstm and rnn support --- .../sentiment_analysis.ipynb | 97 ++-- neuralnetlib/layers.py | 539 ++++++++++++++++-- neuralnetlib/model.py | 26 +- 3 files changed, 574 insertions(+), 88 deletions(-) diff --git a/examples/classification-regression/sentiment_analysis.ipynb b/examples/classification-regression/sentiment_analysis.ipynb index 3922dee..a6cd6b7 100644 --- a/examples/classification-regression/sentiment_analysis.ipynb +++ b/examples/classification-regression/sentiment_analysis.ipynb @@ -21,8 +21,8 @@ "execution_count": 1, "metadata": { "ExecuteTime": { - "end_time": "2024-09-22T23:10:57.538645900Z", - "start_time": "2024-09-22T23:10:55.233016Z" + "end_time": "2024-11-06T16:45:12.192249300Z", + "start_time": "2024-11-06T16:45:03.226068300Z" } }, "outputs": [], @@ -31,8 +31,9 @@ "import pandas as pd\n", "\n", "from neuralnetlib.model import Model\n", - "from neuralnetlib.layers import Input, Dense, Embedding, Flatten\n", + "from neuralnetlib.layers import Input, Dense, Embedding, LSTM, Bidirectional, Dropout\n", "from neuralnetlib.preprocessing import Tokenizer, pad_sequences, CountVectorizer\n", + "from neuralnetlib.optimizers import Adam\n", "from neuralnetlib.metrics import accuracy_score\n", "from neuralnetlib.utils import train_test_split\n", "\n", @@ -48,11 +49,11 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 2, "metadata": { "ExecuteTime": { - "end_time": "2024-09-22T23:13:42.739941500Z", - "start_time": "2024-09-22T23:13:41.184859600Z" + "end_time": "2024-11-06T16:45:13.728513500Z", + "start_time": "2024-11-06T16:45:12.196249Z" } }, "outputs": [], @@ -69,11 +70,11 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 3, "metadata": { "ExecuteTime": { - "end_time": "2024-09-22T23:13:43.449172100Z", - "start_time": "2024-09-22T23:13:43.200238700Z" + "end_time": "2024-11-06T16:45:13.979364300Z", + "start_time": "2024-11-06T16:45:13.715497900Z" } }, "outputs": [ @@ -122,7 +123,6 @@ "max_words = 10000\n", "max_len = 200\n", "\n", - "tokenizer = Tokenizer(num_words=max_words)\n", "x_train = pad_sequences(x_train, max_length=max_len)\n", "x_test = pad_sequences(x_test, max_length=max_len)\n", "\n", @@ -148,20 +148,19 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 9, "metadata": { "ExecuteTime": { - "end_time": "2024-09-22T23:13:48.701766500Z", - "start_time": "2024-09-22T23:13:48.692765600Z" + "end_time": "2024-11-06T17:25:37.243193300Z", + "start_time": "2024-11-06T17:25:37.228686400Z" } }, "outputs": [], "source": [ "model = Model()\n", - "model.add(Input(input_shape=(max_len,)))\n", - "model.add(Embedding(max_words, 50, input_length=max_len))\n", - "model.add(Flatten())\n", - "model.add(Dense(10, activation='relu'))\n", + "model.add(Input(max_len))\n", + "model.add(Embedding(max_words, 100, weights_init='xavier'))\n", + "model.add(Bidirectional(LSTM(32, return_sequences=True)))\n", "model.add(Dense(1, activation='sigmoid'))" ] }, @@ -174,11 +173,11 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 5, "metadata": { "ExecuteTime": { - "end_time": "2024-09-22T23:13:50.151043500Z", - "start_time": "2024-09-22T23:13:50.140043900Z" + "end_time": "2024-11-06T16:45:14.039651700Z", + "start_time": "2024-11-06T16:45:13.995383300Z" } }, "outputs": [ @@ -189,21 +188,19 @@ "Model\n", "-------------------------------------------------\n", "Layer 1: Input(input_shape=(200,))\n", - "Layer 2: Embedding(input_dim=10000, output_dim=50, input_length=200)\n", - "Layer 3: Flatten\n", - "Layer 4: Dense(units=10)\n", - "Layer 5: Activation(ReLU)\n", - "Layer 6: Dense(units=1)\n", - "Layer 7: Activation(Sigmoid)\n", + "Layer 2: \n", + "Layer 3: \n", + "Layer 4: Dense(units=1)\n", + "Layer 5: Activation(Sigmoid)\n", "-------------------------------------------------\n", "Loss function: BinaryCrossentropy\n", - "Optimizer: Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n", + "Optimizer: Adam(learning_rate=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n", "-------------------------------------------------\n" ] } ], "source": [ - "model.compile(optimizer='adam', loss_function='binary_crossentropy')\n", + "model.compile(optimizer=Adam(learning_rate=0.0001), loss_function='binary_crossentropy')\n", "\n", "model.summary()" ] @@ -217,11 +214,11 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 6, "metadata": { "ExecuteTime": { - "end_time": "2024-09-22T23:15:06.090102500Z", - "start_time": "2024-09-22T23:13:52.475373900Z" + "end_time": "2024-11-06T17:07:31.271424100Z", + "start_time": "2024-11-06T16:45:14.010615500Z" } }, "outputs": [ @@ -229,17 +226,25 @@ "name": "stdout", "output_type": "stream", "text": [ - "[==============================] 100% Epoch 1/10 - loss: 0.6922 - accuracy: 0.5208 - 7.01s - val_accuracy: 0.5466\n", - "[==============================] 100% Epoch 2/10 - loss: 0.6494 - accuracy: 0.6512 - 7.02s - val_accuracy: 0.5763\n", - "[==============================] 100% Epoch 3/10 - loss: 0.5619 - accuracy: 0.7295 - 6.99s - val_accuracy: 0.5831\n", - "[==============================] 100% Epoch 4/10 - loss: 0.4977 - accuracy: 0.7723 - 6.97s - val_accuracy: 0.5838\n", - "[==============================] 100% Epoch 5/10 - loss: 0.4506 - accuracy: 0.7991 - 7.05s - val_accuracy: 0.5842\n", - "[==============================] 100% Epoch 6/10 - loss: 0.4123 - accuracy: 0.8224 - 6.98s - val_accuracy: 0.5840\n", - "[==============================] 100% Epoch 7/10 - loss: 0.3792 - accuracy: 0.8418 - 7.01s - val_accuracy: 0.5838\n", - "[==============================] 100% Epoch 8/10 - loss: 0.3495 - accuracy: 0.8586 - 7.06s - val_accuracy: 0.5818\n", - "[==============================] 100% Epoch 9/10 - loss: 0.3219 - accuracy: 0.8752 - 6.99s - val_accuracy: 0.5793\n", - "[==============================] 100% Epoch 10/10 - loss: 0.2963 - accuracy: 0.8907 - 6.98s - val_accuracy: 0.5761\n" + "[==============================] 100% Epoch 1/10 - loss: 0.6769 - accuracy: 0.6458 - 101.45s - val_accuracy: 0.7067\n", + "[==============================] 100% Epoch 2/10 - loss: 0.6020 - accuracy: 0.7501 - 99.85s - val_accuracy: 0.7363\n", + "[==============================] 100% Epoch 3/10 - loss: 0.5234 - accuracy: 0.7831 - 99.19s - val_accuracy: 0.7556\n", + "[==============================] 100% Epoch 4/10 - loss: 0.4632 - accuracy: 0.8075 - 99.11s - val_accuracy: 0.7734\n", + "[==============================] 100% Epoch 5/10 - loss: 0.4166 - accuracy: 0.8300 - 98.83s - val_accuracy: 0.7837\n", + "[==============================] 100% Epoch 6/10 - loss: 0.3784 - accuracy: 0.8466 - 100.29s - val_accuracy: 0.7926\n", + "[==============================] 100% Epoch 7/10 - loss: 0.3461 - accuracy: 0.8626 - 100.78s - val_accuracy: 0.8002\n", + "[==============================] 100% Epoch 8/10 - loss: 0.3193 - accuracy: 0.8748 - 99.56s - val_accuracy: 0.8032\n", + "[==============================] 100% Epoch 9/10 - loss: 0.2973 - accuracy: 0.8845 - 100.20s - val_accuracy: 0.8055\n", + "[==============================] 100% Epoch 10/10 - loss: 0.2789 - accuracy: 0.8924 - 100.57s - val_accuracy: 0.8086\n" ] + }, + { + "data": { + "text/plain": "" + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" } ], "source": [ @@ -255,11 +260,11 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 8, "metadata": { "ExecuteTime": { - "end_time": "2024-09-22T23:15:08.910264900Z", - "start_time": "2024-09-22T23:15:08.577100100Z" + "end_time": "2024-11-06T17:23:02.137602800Z", + "start_time": "2024-11-06T17:22:48.822292400Z" } }, "outputs": [ @@ -267,8 +272,8 @@ "name": "stdout", "output_type": "stream", "text": [ - "Loss: 1.1821619656925941\n", - "Accuracy: 0.5884\n" + "Loss: 1.2941937617667434\n", + "Accuracy: 0.8002\n" ] } ], diff --git a/neuralnetlib/layers.py b/neuralnetlib/layers.py index 47b969e..b457b76 100644 --- a/neuralnetlib/layers.py +++ b/neuralnetlib/layers.py @@ -108,7 +108,8 @@ def initialize_weights(self, input_size: int): stddev = np.sqrt(2 / input_size) self.weights = self.rng.normal(0, stddev, (input_size, self.units)) elif self.weights_init == "default": - self.weights = self.rng.normal(0, 0.01, (input_size, self.units)) + scale = np.sqrt(1.0 / input_size) + self.weights = self.rng.normal(0, scale, (input_size, self.units)) elif self.weights_init == "lecun": stddev = np.sqrt(1 / input_size) self.weights = self.rng.normal(0, stddev, (input_size, self.units)) @@ -771,62 +772,71 @@ def __init__(self, input_dim: int, output_dim: int, input_length: int = None, we self.output_dim = output_dim self.input_length = input_length self.weights = None + self.bias = None self.weights_init = weights_init self.random_state = random_state self.clipped_input = None + + def __str__(self): + return f'Embedding(input_dim={self.input_dim}, output_dim={self.output_dim})' def initialize_weights(self): self.rng = np.random.default_rng( self.random_state if self.random_state is not None else int(time.time_ns())) + if self.weights_init == "xavier": - self.weights = self.rng.normal(0, np.sqrt(2 / (self.input_dim + self.output_dim)), - (self.input_dim, self.output_dim)) - elif self.weights_init == "he": - self.weights = self.rng.normal(0, np.sqrt( - 2 / self.input_dim), (self.input_dim, self.output_dim)) - elif self.weights_init == "default": - self.weights = self.rng.normal( - 0, 0.01, (self.input_dim, self.output_dim)) + scale = np.sqrt(2.0 / (self.input_dim + self.output_dim)) + self.weights = self.rng.normal(0, scale, (self.input_dim, self.output_dim)) + elif self.weights_init == "uniform": + limit = np.sqrt(3.0 / self.output_dim) + self.weights = self.rng.uniform(-limit, limit, (self.input_dim, self.output_dim)) else: - raise ValueError( - "Invalid weights_init value. Possible values are 'xavier', 'he', and 'default'.") - - def __str__(self): - return f'Embedding(input_dim={self.input_dim}, output_dim={self.output_dim}, input_length={self.input_length})' + scale = 0.05 + self.weights = self.rng.normal(0, scale, (self.input_dim, self.output_dim)) + + self.bias = np.zeros((1, 1, self.output_dim)) + + self.d_weights = np.zeros_like(self.weights) + self.d_bias = np.zeros_like(self.bias) def forward_pass(self, input_data: np.ndarray) -> np.ndarray: if self.weights is None: - assert len(input_data.shape) == 2, f"Embedding input must be 2D (batch_size, sequence_length), got {input_data.shape}" self.initialize_weights() self.input = input_data - if not np.issubdtype(input_data.dtype, np.integer): input_data = np.round(input_data).astype(int) - + + if np.any(input_data >= self.input_dim) or np.any(input_data < 0): + print(f"Warning: input indices out of bounds [0, {self.input_dim-1}]") self.clipped_input = np.clip(input_data, 0, self.input_dim - 1) - + output = self.weights[self.clipped_input] + output = output + self.bias return output def backward_pass(self, output_error: np.ndarray) -> np.ndarray: - input_error = np.zeros( - (self.input.shape[0], self.input.shape[1], self.input_dim)) - output_error = output_error.reshape( - output_error.shape[0], output_error.shape[1], -1) + if output_error.ndim != 3: + raise ValueError(f"Expected 3D output_error, got shape {output_error.shape}") + + batch_size, seq_length, emb_dim = output_error.shape + grad_weights = np.zeros_like(self.weights) - for i, index in enumerate(self.clipped_input): - np.add.at(input_error[i], (np.arange(index.shape[0]), index), np.sum(output_error[i], axis=1)) + for i in range(batch_size): + for j in range(seq_length): + idx = self.clipped_input[i, j] + grad_weights[idx] += output_error[i, j] - if not np.issubdtype(self.input.dtype, np.integer): - return np.zeros_like(self.input) + self.d_bias = np.sum(output_error, axis=(0, 1), keepdims=True).reshape(1, 1, -1) + self.d_weights = grad_weights - return input_error + return np.zeros_like(self.input, dtype=np.float32) def get_config(self) -> dict: return { 'name': self.__class__.__name__, 'weights': self.weights.tolist() if self.weights is not None else None, + 'bias': self.bias.tolist() if self.bias is not None else None, 'input_dim': self.input_dim, 'output_dim': self.output_dim, 'input_length': self.input_length, @@ -840,6 +850,7 @@ def from_config(config: dict): config['random_state']) if config['weights'] is not None: layer.weights = np.array(config['weights']) + layer.bias = np.array(config['bias']) return layer @@ -1235,27 +1246,475 @@ def from_config(config: dict): return Reshape(config['target_shape']) +class LSTMCell: + def __init__(self, input_dim: int, units: int, random_state=None): + self.input_dim = input_dim + self.units = units + self.random_state = random_state + self.rng = np.random.default_rng( + random_state if random_state is not None else int(time.time_ns())) + + total_input_dim = input_dim + units + + scale = np.sqrt(6.0 / (total_input_dim + units)) + self.Wf = self.rng.uniform(-scale, scale, (total_input_dim, units)) + self.Wi = self.rng.uniform(-scale, scale, (total_input_dim, units)) + self.Wc = self.rng.uniform(-scale, scale, (total_input_dim, units)) + self.Wo = self.rng.uniform(-scale, scale, (total_input_dim, units)) + + self.bf = np.full((1, units), 1.0) + self.bi = np.zeros((1, units)) + self.bc = np.zeros((1, units)) + self.bo = np.zeros((1, units)) + + self.dWf = np.zeros_like(self.Wf) + self.dWi = np.zeros_like(self.Wi) + self.dWc = np.zeros_like(self.Wc) + self.dWo = np.zeros_like(self.Wo) + + self.dbf = np.zeros_like(self.bf) + self.dbi = np.zeros_like(self.bi) + self.dbc = np.zeros_like(self.bc) + self.dbo = np.zeros_like(self.bo) + + self.grad_clip = 1.0 + + def __str__(self): + return f'LSTMCell(units={self.units}, input_dim={self.input_dim}, random_state={self.random_state})' + + def forward(self, x, prev_h, prev_c): + self.x = x + self.prev_h = prev_h + self.prev_c = prev_c + + concat = np.concatenate((x, prev_h), axis=1) + self.concat = concat + + f_gate = self._sigmoid(self._clip(np.dot(concat, self.Wf) + self.bf)) + i_gate = self._sigmoid(self._clip(np.dot(concat, self.Wi) + self.bi)) + c_tilde = np.tanh(self._clip(np.dot(concat, self.Wc) + self.bc)) + o_gate = self._sigmoid(self._clip(np.dot(concat, self.Wo) + self.bo)) + + c = self._clip(f_gate * prev_c + i_gate * c_tilde) + h = self._clip(o_gate * np.tanh(c)) + + self.gates = (f_gate, i_gate, o_gate, c_tilde) + self.c = c + self.h = h + + return h, c + + def backward(self, dh, dc): + f_gate, i_gate, o_gate, c_tilde = self.gates + + dh = self._clip(dh) + dc = self._clip(dc) + + do = dh * np.tanh(self.c) * o_gate * (1 - o_gate) + dc = dc + dh * o_gate * (1 - np.tanh(self.c)**2) + + df = dc * self.prev_c * f_gate * (1 - f_gate) + di = dc * c_tilde * i_gate * (1 - i_gate) + dc_tilde = dc * i_gate * (1 - c_tilde**2) + + self.dWf = self._clip(np.dot(self.concat.T, df)) + self.dWi = self._clip(np.dot(self.concat.T, di)) + self.dWc = self._clip(np.dot(self.concat.T, dc_tilde)) + self.dWo = self._clip(np.dot(self.concat.T, do)) + + self.dbf = np.sum(df, axis=0, keepdims=True) + self.dbi = np.sum(di, axis=0, keepdims=True) + self.dbc = np.sum(dc_tilde, axis=0, keepdims=True) + self.dbo = np.sum(do, axis=0, keepdims=True) + + dconcat = (np.dot(df, self.Wf.T) + + np.dot(di, self.Wi.T) + + np.dot(dc_tilde, self.Wc.T) + + np.dot(do, self.Wo.T)) + + dx = dconcat[:, :self.input_dim] + dprev_h = dconcat[:, self.input_dim:] + dprev_c = dc * f_gate + + return (self._clip(dx), + self._clip(dprev_h), + self._clip(dprev_c)) + + def _clip(self, x): + return np.clip(x, -self.grad_clip, self.grad_clip) + + @staticmethod + def _sigmoid(x): + x = np.clip(x, -15, 15) + return 1.0 / (1.0 + np.exp(-x)) + + @staticmethod + def _sigmoid_derivative(x): + return x * (1 - x) + + @staticmethod + def _tanh_derivative(x): + return 1 - np.square(np.tanh(x)) + + def get_config(self): + return { + 'name': self.__class__.__name__, + 'units': self.units, + 'random_state': self.random_state + } + + @staticmethod + def from_config(config): + return LSTMCell(config['units'], config['random_state'], config['random_state']) + + +class LSTM(Layer): + def __init__(self, units, return_sequences=False, return_state=False, random_state=None): + super().__init__() + self.units = units + self.return_sequences = return_sequences + self.return_state = return_state + self.random_state = random_state + self.initialized = False + + def __str__(self): + return f'LSTM(units={self.units}, return_sequences={self.return_sequences}, return_state={self.return_state}, random_state={self.random_state})' + + def forward_pass(self, input_data: np.ndarray, training: bool = True) -> np.ndarray: + if len(input_data.shape) != 3: + raise ValueError(f"Expected 3D input (batch, timesteps, features), got {input_data.shape}") + + batch_size, timesteps, input_dim = input_data.shape + + if not self.initialized: + self.cell = LSTMCell(input_dim, self.units, self.random_state) + self.initialized = True + + if self.return_sequences: + h_seq = np.zeros((batch_size, timesteps, self.units)) + + h_t = np.zeros((batch_size, self.units)) + c_t = np.zeros((batch_size, self.units)) + + self.states = [] + self.inputs = input_data + + for t in range(timesteps): + h_t, c_t = self.cell.forward(input_data[:, t, :], h_t, c_t) + if self.return_sequences: + h_seq[:, t, :] = h_t + self.states.append((h_t.copy(), c_t.copy())) + + if self.return_sequences: + output = h_seq + else: + output = h_t + + if self.return_state: + return output, h_t, c_t + return output + + def backward_pass(self, output_error: np.ndarray) -> np.ndarray: + batch_size, timesteps, _ = self.inputs.shape + + if not self.return_sequences and len(output_error.shape) == 2: + temp_error = np.zeros((batch_size, timesteps, self.units)) + temp_error[:, -1, :] = output_error + output_error = temp_error + + dx = np.zeros_like(self.inputs) + dh_next = np.zeros((batch_size, self.units)) + dc_next = np.zeros((batch_size, self.units)) + + for t in reversed(range(timesteps)): + dh = output_error[:, t, :] + dh_next + + dx_t, dh_next, dc_next = self.cell.backward(dh, dc_next) + dx[:, t, :] = dx_t + + return dx + + def get_config(self): + return { + 'name': self.__class__.__name__, + 'units': self.units, + 'return_sequences': self.return_sequences, + 'return_state': self.return_state, + 'random_state': self.random_state + } + + @staticmethod + def from_config(config): + return LSTM( + config['units'], + config['return_sequences'], + config['return_state'], + config['random_state'] + ) + + +class Bidirectional(Layer): + def __init__(self, layer): + super().__init__() + if not isinstance(layer, LSTM): + raise ValueError("Bidirectional layer only supports LSTM layers") + + self.forward_layer = layer + self.backward_layer = LSTM( + layer.units, + layer.return_sequences, + layer.return_state, + layer.random_state + ) + + def __str__(self): + return f'Bidirectional(layer={str(self.forward_layer)})' + + def forward_pass(self, input_data: np.ndarray, training: bool = True) -> np.ndarray: + self.forward_output = self.forward_layer.forward_pass(input_data, training) + backward_input = input_data[:, ::-1, :] # Inversion temporelle + self.backward_output = self.backward_layer.forward_pass(backward_input, training) + + if isinstance(self.forward_output, tuple): + forward_seq, forward_h, forward_c = self.forward_output + backward_seq, backward_h, backward_c = self.backward_output + + if self.forward_layer.return_sequences: + backward_seq = backward_seq[:, ::-1, :] + return np.concatenate([forward_seq, backward_seq], axis=-1), \ + np.concatenate([forward_h, backward_h], axis=-1), \ + np.concatenate([forward_c, backward_c], axis=-1) + else: + return np.concatenate([forward_h, backward_h], axis=-1) + else: + if self.forward_layer.return_sequences: + self.backward_output = self.backward_output[:, ::-1, :] + return np.concatenate([self.forward_output, self.backward_output], axis=-1) + + def backward_pass(self, output_error: np.ndarray) -> np.ndarray: + forward_dim = output_error.shape[-1] // 2 + + if len(output_error.shape) == 3: + forward_error = output_error[:, :, :forward_dim] + backward_error = output_error[:, :, forward_dim:] + backward_error = backward_error[:, ::-1, :] + else: + forward_error = output_error[:, :forward_dim] + backward_error = output_error[:, forward_dim:] + + forward_dx = self.forward_layer.backward_pass(forward_error) + backward_dx = self.backward_layer.backward_pass(backward_error) + + if len(output_error.shape) == 3: + backward_dx = backward_dx[:, ::-1, :] + + return forward_dx + backward_dx + + def get_config(self): + return { + 'name': self.__class__.__name__, + 'layer': self.forward_layer.get_config() + } + + @staticmethod + def from_config(config): + layer = LSTM.from_config(config['layer']) + return Bidirectional(layer) + + +class Unidirectional(Layer): + """Wrapper class that makes it explicit that a layer processes sequences in one direction""" + + def __init__(self, layer): + super().__init__() + if not isinstance(layer, LSTM): + raise ValueError("Unidirectional layer only supports LSTM layers") + self.layer = layer + + def __str__(self): + return f'Unidirectional(layer={str(self.layer)})' + + def forward_pass(self, input_data: np.ndarray, training: bool = True) -> np.ndarray: + return self.layer.forward_pass(input_data, training) + + def backward_pass(self, output_error: np.ndarray) -> np.ndarray: + return self.layer.backward_pass(output_error) + + def get_config(self): + return { + 'name': self.__class__.__name__, + 'layer': self.layer.get_config() + } + + @staticmethod + def from_config(config): + layer = LSTM.from_config(config['layer']) + return Unidirectional(layer) + + +class Attention(Layer): + def __init__(self, use_scale=True, score_type='dot', random_state=None): + super().__init__() + self.use_scale = use_scale + self.score_type = score_type + self.random_state = random_state + self.weights = None + self.bias = None + + if score_type not in ['dot', 'additive']: + raise ValueError("score_type must be either 'dot' or 'additive'") + + def __str__(self): + return f'Attention(score_type={self.score_type}, use_scale={self.use_scale})' + + def initialize_weights(self, query_dim): + if self.score_type == 'additive': + self.rng = np.random.default_rng( + self.random_state if self.random_state is not None else int(time.time_ns())) + + self.Wq = self.rng.normal(0, 0.1, (query_dim, query_dim)) + self.Wk = self.rng.normal(0, 0.1, (query_dim, query_dim)) + self.v = self.rng.normal(0, 0.1, (query_dim, 1)) + + self.dWq = np.zeros_like(self.Wq) + self.dWk = np.zeros_like(self.Wk) + self.dv = np.zeros_like(self.v) + + def forward_pass(self, inputs, mask=None): + query, key, value = inputs + + self.query = query + self.key = key + self.value = value + + if self.weights is None and self.score_type == 'additive': + self.initialize_weights(query.shape[-1]) + + if self.score_type == 'dot': + scores = np.matmul(query, np.transpose(key, (0, 2, 1))) + if self.use_scale: + scores = scores / np.sqrt(key.shape[-1]) + else: + q_transformed = np.dot(query, self.Wq) + k_transformed = np.dot(key, self.Wk) + + q_expanded = q_transformed[:, :, np.newaxis, :] + k_expanded = k_transformed[:, np.newaxis, :, :] + + # Compute scores + scores = np.tanh(q_expanded + k_expanded) + scores = np.dot(scores, self.v) + scores = scores.squeeze(-1) + + if mask is not None: + scores = np.where(mask, scores, -np.inf) + + self.attention_weights = self._softmax(scores) + + outputs = np.matmul(self.attention_weights, value) + + return outputs + + def backward_pass(self, output_error): + batch_size = output_error.shape[0] + + d_value = np.matmul(np.transpose(self.attention_weights, (0, 2, 1)), output_error) + + d_weights = np.matmul(output_error, np.transpose(self.value, (0, 2, 1))) + + d_scores = self._softmax_derivative(self.attention_weights) * d_weights + + if self.score_type == 'dot': + scaling = 1/np.sqrt(self.key.shape[-1]) if self.use_scale else 1 + d_query = scaling * np.matmul(d_scores, self.key) + d_key = scaling * np.matmul(np.transpose(d_scores, (0, 2, 1)), self.query) + else: + d_scores_expanded = d_scores[..., np.newaxis] + d_tanh = d_scores_expanded * self.v.T + d_tanh = d_tanh * (1 - np.tanh(self.scores) ** 2) + + self.dWq = np.zeros_like(self.Wq) + self.dWk = np.zeros_like(self.Wk) + self.dv = np.zeros_like(self.v) + + for b in range(batch_size): + self.dWq += np.dot(self.query[b].T, np.sum(d_tanh[b], axis=1)) + self.dWk += np.dot(self.key[b].T, np.sum(d_tanh[b], axis=0)) + self.dv += np.sum(np.dot(np.transpose(d_scores[b]), + np.tanh(np.dot(self.query[b], self.Wq) + np.dot(self.key[b], self.Wk)))) + + d_query = np.dot(d_tanh, self.Wq.T) + d_key = np.dot(d_tanh, self.Wk.T) + + return (d_query, d_key, d_value) + + def _softmax(self, x): + exp_x = np.exp(x - np.max(x, axis=-1, keepdims=True)) + return exp_x / np.sum(exp_x, axis=-1, keepdims=True) + + def _softmax_derivative(self, softmax_output): + softmax_output = softmax_output[..., np.newaxis] + return softmax_output * (np.eye(softmax_output.shape[-2]) - np.transpose(softmax_output, (0, 1, 3, 2))) + + def get_config(self): + return { + 'name': self.__class__.__name__, + 'use_scale': self.use_scale, + 'score_type': self.score_type, + 'random_state': self.random_state, + 'weights': { + 'Wq': self.Wq.tolist() if hasattr(self, 'Wq') else None, + 'Wk': self.Wk.tolist() if hasattr(self, 'Wk') else None, + 'v': self.v.tolist() if hasattr(self, 'v') else None + } if self.score_type == 'additive' else None + } + + @staticmethod + def from_config(config): + layer = Attention( + use_scale=config['use_scale'], + score_type=config['score_type'], + random_state=config['random_state'] + ) + + if config['weights'] is not None: + if config['weights']['Wq'] is not None: + layer.Wq = np.array(config['weights']['Wq']) + layer.Wk = np.array(config['weights']['Wk']) + layer.v = np.array(config['weights']['v']) + layer.dWq = np.zeros_like(layer.Wq) + layer.dWk = np.zeros_like(layer.Wk) + layer.dv = np.zeros_like(layer.v) + + return layer + + # -------------------------------------------------------------------------------------------------------------- compatibility_dict = { - Input: [Dense, Conv2D, Conv1D, Embedding, Permute, TextVectorization, Reshape], - Dense: [Dense, Activation, Dropout, BatchNormalization, Permute, Reshape], + Input: [Dense, Conv2D, Conv1D, Embedding, Permute, TextVectorization, Reshape, LSTM, Bidirectional, Unidirectional], + Dense: [Dense, Activation, Dropout, BatchNormalization, Permute, Reshape, LSTM, Bidirectional, Unidirectional], Activation: [Dense, Conv2D, Conv1D, MaxPooling2D, AveragePooling2D, MaxPooling1D, AveragePooling1D, Flatten, - Dropout, Permute, Reshape], + Dropout, Permute, Reshape, LSTM, Bidirectional, Unidirectional], Conv2D: [Conv2D, MaxPooling2D, AveragePooling2D, Activation, Dropout, Flatten, BatchNormalization, Permute, Reshape], MaxPooling2D: [Conv2D, MaxPooling2D, AveragePooling2D, Flatten, Permute, Reshape], AveragePooling2D: [Conv2D, MaxPooling2D, AveragePooling2D, Flatten, Permute, Reshape], - Conv1D: [Conv1D, MaxPooling1D, AveragePooling1D, Activation, Dropout, Flatten, BatchNormalization, Permute, Reshape], - MaxPooling1D: [Conv1D, MaxPooling1D, AveragePooling1D, Flatten, Permute, Reshape], - AveragePooling1D: [Conv1D, MaxPooling1D, AveragePooling1D, Flatten, Permute, Reshape], - Flatten: [Dense, Dropout, Permute, Reshape], - Dropout: [Dense, Conv2D, Conv1D, Activation, Permute, Reshape], - Embedding: [Conv1D, Flatten, Dense, Permute, Reshape], - BatchNormalization: [Dense, Conv2D, Conv1D, Activation, Permute, Reshape], + Conv1D: [Conv1D, MaxPooling1D, AveragePooling1D, Activation, Dropout, Flatten, BatchNormalization, Permute, Reshape, LSTM, Bidirectional, Unidirectional], + MaxPooling1D: [Conv1D, MaxPooling1D, AveragePooling1D, Flatten, Permute, Reshape, LSTM, Bidirectional, Unidirectional], + AveragePooling1D: [Conv1D, MaxPooling1D, AveragePooling1D, Flatten, Permute, Reshape, LSTM, Bidirectional, Unidirectional], + Flatten: [Dense, Dropout, Permute, Reshape, LSTM, Bidirectional, Unidirectional], + Dropout: [Dense, Conv2D, Conv1D, Activation, Permute, Reshape, LSTM, Bidirectional, Unidirectional], + Embedding: [Conv1D, Flatten, Dense, Permute, Reshape, LSTM, Bidirectional, Unidirectional], + BatchNormalization: [Dense, Conv2D, Conv1D, Activation, Permute, Reshape, LSTM, Bidirectional, Unidirectional], Permute: [Dense, Conv2D, Conv1D, Activation, - Dropout, Flatten, BatchNormalization, Permute, Reshape], - TextVectorization: [Embedding, Dense, Conv1D, Reshape], + Dropout, Flatten, BatchNormalization, Permute, Reshape, LSTM, Bidirectional, Unidirectional], + TextVectorization: [Embedding, Dense, Conv1D, Reshape, LSTM, Bidirectional, Unidirectional], Reshape: [Dense, Conv2D, Conv1D, Activation, Dropout, Flatten, BatchNormalization, Permute, Reshape, - TextVectorization, Embedding, Input, MaxPooling2D, AveragePooling2D, MaxPooling1D, AveragePooling1D] + TextVectorization, Embedding, Input, MaxPooling2D, AveragePooling2D, MaxPooling1D, AveragePooling1D, + LSTM, Bidirectional, Unidirectional], + LSTM: [Dense, Activation, Dropout, BatchNormalization, Permute, Reshape, LSTM, Bidirectional, Unidirectional], + Bidirectional: [Dense, Activation, Dropout, BatchNormalization, Permute, Reshape, LSTM, Bidirectional, Unidirectional], + Unidirectional: [Dense, Activation, Dropout, BatchNormalization, + Permute, Reshape, LSTM, Bidirectional, Unidirectional] } diff --git a/neuralnetlib/model.py b/neuralnetlib/model.py index 098ccfc..81ca867 100644 --- a/neuralnetlib/model.py +++ b/neuralnetlib/model.py @@ -6,7 +6,7 @@ import numpy as np from neuralnetlib.activations import ActivationFunction -from neuralnetlib.layers import Layer, Input, Activation, Dropout, TextVectorization, compatibility_dict +from neuralnetlib.layers import Layer, Input, Activation, Dropout, TextVectorization, LSTM, Bidirectional, Embedding, compatibility_dict from neuralnetlib.losses import LossFunction, CategoricalCrossentropy from neuralnetlib.optimizers import Optimizer from neuralnetlib.preprocessing import PCA @@ -72,7 +72,7 @@ def compile(self, loss_function: LossFunction | str, optimizer: Optimizer | str, def forward_pass(self, X: np.ndarray, training: bool = True) -> np.ndarray: for layer in self.layers: - if isinstance(layer, Dropout): + if isinstance(layer, (Dropout, LSTM, Bidirectional)): X = layer.forward_pass(X, training) elif isinstance(layer, TextVectorization): X = layer.forward_pass(X) @@ -96,6 +96,16 @@ def backward_pass(self, error: np.ndarray): elif hasattr(layer, 'd_weights'): self.optimizer.update( len(self.layers) - 1 - i, layer.weights, layer.d_weights) + + if isinstance(layer, LSTM): + self.optimizer.update(len(self.layers) - 1 - i, layer.cell.Wf, layer.cell.dWf, layer.cell.bf, layer.cell.dbf) + self.optimizer.update(len(self.layers) - 1 - i, layer.cell.Wi, layer.cell.dWi, layer.cell.bi, layer.cell.dbi) + self.optimizer.update(len(self.layers) - 1 - i, layer.cell.Wc, layer.cell.dWc, layer.cell.bc, layer.cell.dbc) + self.optimizer.update(len(self.layers) - 1 - i, layer.cell.Wo, layer.cell.dWo, layer.cell.bo, layer.cell.dbo) + elif hasattr(layer, 'd_weights') and hasattr(layer, 'd_bias'): + self.optimizer.update(len(self.layers) - 1 - i, layer.weights, layer.d_weights, layer.bias, layer.d_bias) + elif hasattr(layer, 'd_weights'): + self.optimizer.update(len(self.layers) - 1 - i, layer.weights, layer.d_weights) def train_on_batch(self, x_batch: np.ndarray, y_batch: np.ndarray) -> float: self.y_true = y_batch @@ -106,6 +116,8 @@ def train_on_batch(self, x_batch: np.ndarray, y_batch: np.ndarray) -> float: if error.ndim == 1: error = error[:, None] + elif isinstance(self.layers[-1], (LSTM, Bidirectional)) and self.layers[-1].return_sequences: + error = error.reshape(error.shape[0], error.shape[1], -1) self.backward_pass(error) return loss @@ -143,6 +155,16 @@ def fit(self, x_train: np.ndarray, y_train: np.ndarray, epochs: int, batch_size: x_train = np.array(x_train) if not isinstance(x_train, np.ndarray) else x_train y_train = np.array(y_train) if not isinstance(y_train, np.ndarray) else y_train + has_lstm = any(isinstance(layer, (LSTM, Bidirectional)) for layer in self.layers) + has_embedding = any(isinstance(layer, Embedding) for layer in self.layers) + + if has_lstm and not has_embedding: + if len(x_train.shape) != 3: + raise ValueError("Input data must be 3D (batch_size, time_steps, features) for LSTM layers without Embedding") + elif has_embedding: + if len(x_train.shape) != 2: + raise ValueError("Input data must be 2D (batch_size, sequence_length) when using Embedding layer") + if validation_data is not None: x_test, y_test = validation_data x_test = np.array(x_test) From 8d778eb8d5a4492a91e67e54d9b834a3610f01a4 Mon Sep 17 00:00:00 2001 From: GitHub Action <52708150+marcpinet@users.noreply.github.com> Date: Wed, 6 Nov 2024 20:28:22 +0100 Subject: [PATCH 2/5] feat: add attention --- .../sentiment_analysis.ipynb | 91 ++- neuralnetlib/layers.py | 590 +++++++++--------- neuralnetlib/model.py | 4 +- 3 files changed, 342 insertions(+), 343 deletions(-) diff --git a/examples/classification-regression/sentiment_analysis.ipynb b/examples/classification-regression/sentiment_analysis.ipynb index a6cd6b7..63d5cfd 100644 --- a/examples/classification-regression/sentiment_analysis.ipynb +++ b/examples/classification-regression/sentiment_analysis.ipynb @@ -18,11 +18,11 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 2, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T16:45:12.192249300Z", - "start_time": "2024-11-06T16:45:03.226068300Z" + "end_time": "2024-11-06T18:44:44.255458200Z", + "start_time": "2024-11-06T18:44:32.435539700Z" } }, "outputs": [], @@ -31,7 +31,7 @@ "import pandas as pd\n", "\n", "from neuralnetlib.model import Model\n", - "from neuralnetlib.layers import Input, Dense, Embedding, LSTM, Bidirectional, Dropout\n", + "from neuralnetlib.layers import Input, Dense, Embedding, LSTM, Bidirectional, Attention, GlobalAveragePooling1D\n", "from neuralnetlib.preprocessing import Tokenizer, pad_sequences, CountVectorizer\n", "from neuralnetlib.optimizers import Adam\n", "from neuralnetlib.metrics import accuracy_score\n", @@ -49,11 +49,11 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T16:45:13.728513500Z", - "start_time": "2024-11-06T16:45:12.196249Z" + "end_time": "2024-11-06T18:44:45.772697800Z", + "start_time": "2024-11-06T18:44:44.256962100Z" } }, "outputs": [], @@ -70,11 +70,11 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T16:45:13.979364300Z", - "start_time": "2024-11-06T16:45:13.715497900Z" + "end_time": "2024-11-06T18:44:46.040708400Z", + "start_time": "2024-11-06T18:44:45.774698100Z" } }, "outputs": [ @@ -148,11 +148,11 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 5, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T17:25:37.243193300Z", - "start_time": "2024-11-06T17:25:37.228686400Z" + "end_time": "2024-11-06T18:44:46.054955900Z", + "start_time": "2024-11-06T18:44:46.040708400Z" } }, "outputs": [], @@ -161,6 +161,8 @@ "model.add(Input(max_len))\n", "model.add(Embedding(max_words, 100, weights_init='xavier'))\n", "model.add(Bidirectional(LSTM(32, return_sequences=True)))\n", + "model.add(Attention())\n", + "model.add(GlobalAveragePooling1D())\n", "model.add(Dense(1, activation='sigmoid'))" ] }, @@ -173,11 +175,11 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 6, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T16:45:14.039651700Z", - "start_time": "2024-11-06T16:45:13.995383300Z" + "end_time": "2024-11-06T18:44:46.100743200Z", + "start_time": "2024-11-06T18:44:46.054955900Z" } }, "outputs": [ @@ -188,19 +190,21 @@ "Model\n", "-------------------------------------------------\n", "Layer 1: Input(input_shape=(200,))\n", - "Layer 2: \n", - "Layer 3: \n", - "Layer 4: Dense(units=1)\n", - "Layer 5: Activation(Sigmoid)\n", + "Layer 2: Embedding(input_dim=10000, output_dim=100)\n", + "Layer 3: Bidirectional(layer=LSTM(units=32, return_sequences=True, return_state=False, random_state=None))\n", + "Layer 4: Attention(score_mode=dot, use_scale=False, dropout=0.0)\n", + "Layer 5: GlobalAveragePooling1D\n", + "Layer 6: Dense(units=1)\n", + "Layer 7: Activation(Sigmoid)\n", "-------------------------------------------------\n", "Loss function: BinaryCrossentropy\n", - "Optimizer: Adam(learning_rate=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n", + "Optimizer: Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n", "-------------------------------------------------\n" ] } ], "source": [ - "model.compile(optimizer=Adam(learning_rate=0.0001), loss_function='binary_crossentropy')\n", + "model.compile(optimizer='adam', loss_function='binary_crossentropy')\n", "\n", "model.summary()" ] @@ -214,11 +218,11 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 9, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T17:07:31.271424100Z", - "start_time": "2024-11-06T16:45:14.010615500Z" + "end_time": "2024-11-06T19:27:56.073804Z", + "start_time": "2024-11-06T19:27:56.052756700Z" } }, "outputs": [ @@ -226,25 +230,18 @@ "name": "stdout", "output_type": "stream", "text": [ - "[==============================] 100% Epoch 1/10 - loss: 0.6769 - accuracy: 0.6458 - 101.45s - val_accuracy: 0.7067\n", - "[==============================] 100% Epoch 2/10 - loss: 0.6020 - accuracy: 0.7501 - 99.85s - val_accuracy: 0.7363\n", - "[==============================] 100% Epoch 3/10 - loss: 0.5234 - accuracy: 0.7831 - 99.19s - val_accuracy: 0.7556\n", - "[==============================] 100% Epoch 4/10 - loss: 0.4632 - accuracy: 0.8075 - 99.11s - val_accuracy: 0.7734\n", - "[==============================] 100% Epoch 5/10 - loss: 0.4166 - accuracy: 0.8300 - 98.83s - val_accuracy: 0.7837\n", - "[==============================] 100% Epoch 6/10 - loss: 0.3784 - accuracy: 0.8466 - 100.29s - val_accuracy: 0.7926\n", - "[==============================] 100% Epoch 7/10 - loss: 0.3461 - accuracy: 0.8626 - 100.78s - val_accuracy: 0.8002\n", - "[==============================] 100% Epoch 8/10 - loss: 0.3193 - accuracy: 0.8748 - 99.56s - val_accuracy: 0.8032\n", - "[==============================] 100% Epoch 9/10 - loss: 0.2973 - accuracy: 0.8845 - 100.20s - val_accuracy: 0.8055\n", - "[==============================] 100% Epoch 10/10 - loss: 0.2789 - accuracy: 0.8924 - 100.57s - val_accuracy: 0.8086\n" + "\n", + "[==============================] 100% Epoch 1/10 - loss: 0.5315 - accuracy: 0.7552 - 290.73s - val_accuracy: 0.8314\n", + "[==============================] 100% Epoch 2/10 - loss: 0.3029 - accuracy: 0.8838 - 269.72s - val_accuracy: 0.8680\n", + "[==============================] 100% Epoch 3/10 - loss: 0.2369 - accuracy: 0.9095 - 316.64s - val_accuracy: 0.8778\n", + "[==============================] 100% Epoch 4/10 - loss: 0.1979 - accuracy: 0.9251 - 270.75s - val_accuracy: 0.8815\n", + "[==============================] 100% Epoch 5/10 - loss: 0.1687 - accuracy: 0.9382 - 304.63s - val_accuracy: 0.8824\n", + "[==============================] 100% Epoch 6/10 - loss: 0.1447 - accuracy: 0.9503 - 300.43s - val_accuracy: 0.8810\n", + "[==============================] 100% Epoch 7/10 - loss: 0.1240 - accuracy: 0.9594 - 303.27s - val_accuracy: 0.8779\n", + "[==============================] 100% Epoch 8/10 - loss: 0.1063 - accuracy: 0.9666 - 303.07s - val_accuracy: 0.8748\n", + "[==============================] 100% Epoch 9/10 - loss: 0.0911 - accuracy: 0.9726 - 303.07s - val_accuracy: 0.8708\n", + "[==============================] 100% Epoch 10/10 - loss: 0.0781 - accuracy: 0.9776 - 303.07s - val_accuracy: 0.8676\n" ] - }, - { - "data": { - "text/plain": "" - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" } ], "source": [ @@ -263,8 +260,8 @@ "execution_count": 8, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T17:23:02.137602800Z", - "start_time": "2024-11-06T17:22:48.822292400Z" + "end_time": "2024-11-06T19:27:20.060588Z", + "start_time": "2024-11-06T19:27:03.414934400Z" } }, "outputs": [ @@ -272,8 +269,8 @@ "name": "stdout", "output_type": "stream", "text": [ - "Loss: 1.2941937617667434\n", - "Accuracy: 0.8002\n" + "Loss: 2.566060102842103\n", + "Accuracy: 0.8926\n" ] } ], diff --git a/neuralnetlib/layers.py b/neuralnetlib/layers.py index b457b76..674abdd 100644 --- a/neuralnetlib/layers.py +++ b/neuralnetlib/layers.py @@ -776,26 +776,29 @@ def __init__(self, input_dim: int, output_dim: int, input_length: int = None, we self.weights_init = weights_init self.random_state = random_state self.clipped_input = None - + def __str__(self): return f'Embedding(input_dim={self.input_dim}, output_dim={self.output_dim})' def initialize_weights(self): self.rng = np.random.default_rng( self.random_state if self.random_state is not None else int(time.time_ns())) - + if self.weights_init == "xavier": scale = np.sqrt(2.0 / (self.input_dim + self.output_dim)) - self.weights = self.rng.normal(0, scale, (self.input_dim, self.output_dim)) + self.weights = self.rng.normal( + 0, scale, (self.input_dim, self.output_dim)) elif self.weights_init == "uniform": limit = np.sqrt(3.0 / self.output_dim) - self.weights = self.rng.uniform(-limit, limit, (self.input_dim, self.output_dim)) + self.weights = self.rng.uniform(-limit, + limit, (self.input_dim, self.output_dim)) else: scale = 0.05 - self.weights = self.rng.normal(0, scale, (self.input_dim, self.output_dim)) - + self.weights = self.rng.normal( + 0, scale, (self.input_dim, self.output_dim)) + self.bias = np.zeros((1, 1, self.output_dim)) - + self.d_weights = np.zeros_like(self.weights) self.d_bias = np.zeros_like(self.bias) @@ -808,7 +811,8 @@ def forward_pass(self, input_data: np.ndarray) -> np.ndarray: input_data = np.round(input_data).astype(int) if np.any(input_data >= self.input_dim) or np.any(input_data < 0): - print(f"Warning: input indices out of bounds [0, {self.input_dim-1}]") + print( + f"Warning: input indices out of bounds [0, {self.input_dim-1}]") self.clipped_input = np.clip(input_data, 0, self.input_dim - 1) output = self.weights[self.clipped_input] @@ -817,19 +821,21 @@ def forward_pass(self, input_data: np.ndarray) -> np.ndarray: def backward_pass(self, output_error: np.ndarray) -> np.ndarray: if output_error.ndim != 3: - raise ValueError(f"Expected 3D output_error, got shape {output_error.shape}") - + raise ValueError( + f"Expected 3D output_error, got shape {output_error.shape}") + batch_size, seq_length, emb_dim = output_error.shape grad_weights = np.zeros_like(self.weights) - + for i in range(batch_size): for j in range(seq_length): idx = self.clipped_input[i, j] grad_weights[idx] += output_error[i, j] - - self.d_bias = np.sum(output_error, axis=(0, 1), keepdims=True).reshape(1, 1, -1) + + self.d_bias = np.sum(output_error, axis=( + 0, 1), keepdims=True).reshape(1, 1, -1) self.d_weights = grad_weights - + return np.zeros_like(self.input, dtype=np.float32) def get_config(self) -> dict: @@ -1111,6 +1117,54 @@ def _pool_backward(output_error: np.ndarray, input_data: np.ndarray, pool_size: return d_input +class GlobalAveragePooling1D(Layer): + def __init__(self): + self.input_shape = None + + def __str__(self): + return 'GlobalAveragePooling1D' + + def forward_pass(self, input_data: np.ndarray) -> np.ndarray: + assert len( + input_data.shape) == 3, f"GlobalAveragePooling1D input must be 3D (batch_size, steps, features), got {input_data.shape}" + self.input_shape = input_data.shape + return np.mean(input_data, axis=1) + + def backward_pass(self, output_error: np.ndarray) -> np.ndarray: + return np.repeat(output_error[:, np.newaxis, :], self.input_shape[1], axis=1) + + def get_config(self) -> dict: + return {'name': self.__class__.__name__} + + @staticmethod + def from_config(config: dict): + return GlobalAveragePooling1D() + + +class GlobalAveragePooling2D(Layer): + def __init__(self): + self.input_shape = None + + def __str__(self): + return 'GlobalAveragePooling2D' + + def forward_pass(self, input_data: np.ndarray) -> np.ndarray: + assert len( + input_data.shape) == 4, f"GlobalAveragePooling2D input must be 4D (batch_size, channels, height, width), got {input_data.shape}" + self.input_shape = input_data.shape + return np.mean(input_data, axis=(2, 3)) + + def backward_pass(self, output_error: np.ndarray) -> np.ndarray: + return np.repeat(output_error[:, :, np.newaxis, np.newaxis], self.input_shape[2], axis=2) / self.input_shape[2] / self.input_shape[3] + + def get_config(self) -> dict: + return {'name': self.__class__.__name__} + + @staticmethod + def from_config(config: dict): + return GlobalAveragePooling2D() + + class Permute(Layer): def __init__(self, dims): self.dims = dims @@ -1253,182 +1307,124 @@ def __init__(self, input_dim: int, units: int, random_state=None): self.random_state = random_state self.rng = np.random.default_rng( random_state if random_state is not None else int(time.time_ns())) - - total_input_dim = input_dim + units - - scale = np.sqrt(6.0 / (total_input_dim + units)) - self.Wf = self.rng.uniform(-scale, scale, (total_input_dim, units)) - self.Wi = self.rng.uniform(-scale, scale, (total_input_dim, units)) - self.Wc = self.rng.uniform(-scale, scale, (total_input_dim, units)) - self.Wo = self.rng.uniform(-scale, scale, (total_input_dim, units)) - - self.bf = np.full((1, units), 1.0) - self.bi = np.zeros((1, units)) - self.bc = np.zeros((1, units)) - self.bo = np.zeros((1, units)) - - self.dWf = np.zeros_like(self.Wf) - self.dWi = np.zeros_like(self.Wi) - self.dWc = np.zeros_like(self.Wc) - self.dWo = np.zeros_like(self.Wo) - - self.dbf = np.zeros_like(self.bf) - self.dbi = np.zeros_like(self.bi) - self.dbc = np.zeros_like(self.bc) - self.dbo = np.zeros_like(self.bo) - - self.grad_clip = 1.0 - - def __str__(self): - return f'LSTMCell(units={self.units}, input_dim={self.input_dim}, random_state={self.random_state})' + total_dim = input_dim + units + self.W = self.rng.uniform( + -np.sqrt(1 / total_dim), np.sqrt(1 / total_dim), (total_dim, 4 * units)) + self.b = np.zeros((1, 4 * units)) - def forward(self, x, prev_h, prev_c): - self.x = x - self.prev_h = prev_h - self.prev_c = prev_c - - concat = np.concatenate((x, prev_h), axis=1) - self.concat = concat - - f_gate = self._sigmoid(self._clip(np.dot(concat, self.Wf) + self.bf)) - i_gate = self._sigmoid(self._clip(np.dot(concat, self.Wi) + self.bi)) - c_tilde = np.tanh(self._clip(np.dot(concat, self.Wc) + self.bc)) - o_gate = self._sigmoid(self._clip(np.dot(concat, self.Wo) + self.bo)) - - c = self._clip(f_gate * prev_c + i_gate * c_tilde) - h = self._clip(o_gate * np.tanh(c)) - - self.gates = (f_gate, i_gate, o_gate, c_tilde) - self.c = c - self.h = h - + def forward(self, x, h_prev, c_prev): + combined = np.hstack((x, h_prev)) + gates = combined @ self.W + self.b + + i = self.sigmoid(gates[:, :self.units]) + f = self.sigmoid(gates[:, self.units:2*self.units]) + o = self.sigmoid(gates[:, 2*self.units:3*self.units]) + g = np.tanh(gates[:, 3*self.units:]) + + c = f * c_prev + i * g + h = o * np.tanh(c) + + self.cache = (combined, i, f, o, g, c_prev, c) return h, c - def backward(self, dh, dc): - f_gate, i_gate, o_gate, c_tilde = self.gates - - dh = self._clip(dh) - dc = self._clip(dc) - - do = dh * np.tanh(self.c) * o_gate * (1 - o_gate) - dc = dc + dh * o_gate * (1 - np.tanh(self.c)**2) - - df = dc * self.prev_c * f_gate * (1 - f_gate) - di = dc * c_tilde * i_gate * (1 - i_gate) - dc_tilde = dc * i_gate * (1 - c_tilde**2) - - self.dWf = self._clip(np.dot(self.concat.T, df)) - self.dWi = self._clip(np.dot(self.concat.T, di)) - self.dWc = self._clip(np.dot(self.concat.T, dc_tilde)) - self.dWo = self._clip(np.dot(self.concat.T, do)) - - self.dbf = np.sum(df, axis=0, keepdims=True) - self.dbi = np.sum(di, axis=0, keepdims=True) - self.dbc = np.sum(dc_tilde, axis=0, keepdims=True) - self.dbo = np.sum(do, axis=0, keepdims=True) - - dconcat = (np.dot(df, self.Wf.T) + - np.dot(di, self.Wi.T) + - np.dot(dc_tilde, self.Wc.T) + - np.dot(do, self.Wo.T)) - - dx = dconcat[:, :self.input_dim] - dprev_h = dconcat[:, self.input_dim:] - dprev_c = dc * f_gate - - return (self._clip(dx), - self._clip(dprev_h), - self._clip(dprev_c)) + def backward(self, dh_next, dc_next): + combined, i, f, o, g, c_prev, c = self.cache - def _clip(self, x): - return np.clip(x, -self.grad_clip, self.grad_clip) - - @staticmethod - def _sigmoid(x): - x = np.clip(x, -15, 15) - return 1.0 / (1.0 + np.exp(-x)) + do = dh_next * np.tanh(c) + dc = dh_next * o * (1 - np.tanh(c)**2) + dc_next + di = dc * g + dg = dc * i + df = dc * c_prev + dc_prev = dc * f + + di_input = di * i * (1 - i) + df_input = df * f * (1 - f) + do_input = do * o * (1 - o) + dg_input = dg * (1 - g**2) + + d_gates = np.hstack((di_input, df_input, do_input, dg_input)) + + self.dW = combined.T @ d_gates + self.db = np.sum(d_gates, axis=0, keepdims=True) + d_combined = d_gates @ self.W.T + + dx = d_combined[:, :self.input_dim] + dh_prev = d_combined[:, self.input_dim:] + + return dx, dh_prev, dc_prev + + def sigmoid(self, x): + return 1 / (1 + np.exp(-x)) - @staticmethod - def _sigmoid_derivative(x): - return x * (1 - x) - @staticmethod - def _tanh_derivative(x): - return 1 - np.square(np.tanh(x)) - def get_config(self): return { 'name': self.__class__.__name__, 'units': self.units, 'random_state': self.random_state } - + @staticmethod def from_config(config): - return LSTMCell(config['units'], config['random_state'], config['random_state']) + return LSTMCell(config['units'], config['random_state']) class LSTM(Layer): - def __init__(self, units, return_sequences=False, return_state=False, random_state=None): + def __init__(self, units, return_sequences=False, return_state=False, random_state=None, **kwargs): super().__init__() self.units = units self.return_sequences = return_sequences self.return_state = return_state self.random_state = random_state self.initialized = False + self.cell = None + self.states = None + self.h_t = None + self.c_t = None + self.inputs = None + for key, value in kwargs.items(): + setattr(self, key, value) + def __str__(self): return f'LSTM(units={self.units}, return_sequences={self.return_sequences}, return_state={self.return_state}, random_state={self.random_state})' - def forward_pass(self, input_data: np.ndarray, training: bool = True) -> np.ndarray: - if len(input_data.shape) != 3: - raise ValueError(f"Expected 3D input (batch, timesteps, features), got {input_data.shape}") - - batch_size, timesteps, input_dim = input_data.shape - + def forward_pass(self, x, training=True): + batch_size, timesteps, input_dim = x.shape if not self.initialized: self.cell = LSTMCell(input_dim, self.units, self.random_state) self.initialized = True - if self.return_sequences: - h_seq = np.zeros((batch_size, timesteps, self.units)) - - h_t = np.zeros((batch_size, self.units)) - c_t = np.zeros((batch_size, self.units)) - - self.states = [] - self.inputs = input_data - + h = np.zeros((batch_size, self.units)) + c = np.zeros((batch_size, self.units)) + + self.cache = [] + outputs = [] + for t in range(timesteps): - h_t, c_t = self.cell.forward(input_data[:, t, :], h_t, c_t) - if self.return_sequences: - h_seq[:, t, :] = h_t - self.states.append((h_t.copy(), c_t.copy())) + x_t = x[:, t, :] + h, c = self.cell.forward(x_t, h, c) + outputs.append(h) + self.cache.append(self.cell.cache) + + outputs = np.stack(outputs, axis=1) + self.h = h + self.c = c if self.return_sequences: - output = h_seq + return outputs else: - output = h_t - - if self.return_state: - return output, h_t, c_t - return output + return outputs[:, -1, :] - def backward_pass(self, output_error: np.ndarray) -> np.ndarray: - batch_size, timesteps, _ = self.inputs.shape - - if not self.return_sequences and len(output_error.shape) == 2: - temp_error = np.zeros((batch_size, timesteps, self.units)) - temp_error[:, -1, :] = output_error - output_error = temp_error - - dx = np.zeros_like(self.inputs) + def backward_pass(self, dout): + batch_size, timesteps, _ = dout.shape + dx = np.zeros((batch_size, timesteps, self.cell.input_dim)) dh_next = np.zeros((batch_size, self.units)) dc_next = np.zeros((batch_size, self.units)) for t in reversed(range(timesteps)): - dh = output_error[:, t, :] + dh_next - + dh = dout[:, t, :] + dh_next + self.cell.cache = self.cache[t] dx_t, dh_next, dc_next = self.cell.backward(dh, dc_next) dx[:, t, :] = dx_t @@ -1440,7 +1436,12 @@ def get_config(self): 'units': self.units, 'return_sequences': self.return_sequences, 'return_state': self.return_state, - 'random_state': self.random_state + 'random_state': self.random_state, + 'cell': self.cell.get_config() if self.cell is not None else None, + 'states': [(h.tolist(), c.tolist()) for h, c in self.states] if self.states is not None else None, + 'h_t': self.h_t.tolist() if self.h_t is not None else None, + 'c_t': self.c_t.tolist() if self.c_t is not None else None, + 'inputs': self.inputs.tolist() if self.inputs is not None else None } @staticmethod @@ -1449,7 +1450,12 @@ def from_config(config): config['units'], config['return_sequences'], config['return_state'], - config['random_state'] + config['random_state'], + cell=LSTMCell.from_config(config['cell']) if config['cell'] is not None else None, + states=[(np.array(h), np.array(c)) for h, c in config['states']] if config['states'] is not None else None, + h_t=np.array(config['h_t']) if config['h_t'] is not None else None, + c_t=np.array(config['c_t']) if config['c_t'] is not None else None, + inputs=np.array(config['inputs']) if config['inputs'] is not None else None, ) @@ -1466,14 +1472,16 @@ def __init__(self, layer): layer.return_state, layer.random_state ) - + def __str__(self): return f'Bidirectional(layer={str(self.forward_layer)})' def forward_pass(self, input_data: np.ndarray, training: bool = True) -> np.ndarray: - self.forward_output = self.forward_layer.forward_pass(input_data, training) + self.forward_output = self.forward_layer.forward_pass( + input_data, training) backward_input = input_data[:, ::-1, :] # Inversion temporelle - self.backward_output = self.backward_layer.forward_pass(backward_input, training) + self.backward_output = self.backward_layer.forward_pass( + backward_input, training) if isinstance(self.forward_output, tuple): forward_seq, forward_h, forward_c = self.forward_output @@ -1493,7 +1501,7 @@ def forward_pass(self, input_data: np.ndarray, training: bool = True) -> np.ndar def backward_pass(self, output_error: np.ndarray) -> np.ndarray: forward_dim = output_error.shape[-1] // 2 - + if len(output_error.shape) == 3: forward_error = output_error[:, :, :forward_dim] backward_error = output_error[:, :, forward_dim:] @@ -1507,7 +1515,7 @@ def backward_pass(self, output_error: np.ndarray) -> np.ndarray: if len(output_error.shape) == 3: backward_dx = backward_dx[:, ::-1, :] - + return forward_dx + backward_dx def get_config(self): @@ -1530,7 +1538,7 @@ def __init__(self, layer): if not isinstance(layer, LSTM): raise ValueError("Unidirectional layer only supports LSTM layers") self.layer = layer - + def __str__(self): return f'Unidirectional(layer={str(self.layer)})' @@ -1553,168 +1561,160 @@ def from_config(config): class Attention(Layer): - def __init__(self, use_scale=True, score_type='dot', random_state=None): + def __init__(self, use_scale=False, score_mode="dot", dropout=0.0, seed=None, **kwargs): super().__init__() self.use_scale = use_scale - self.score_type = score_type - self.random_state = random_state - self.weights = None - self.bias = None - - if score_type not in ['dot', 'additive']: - raise ValueError("score_type must be either 'dot' or 'additive'") - + self.score_mode = score_mode + self.dropout = dropout + self.seed = seed + self.supports_masking = True + + if score_mode not in ["dot", "concat"]: + raise ValueError("score_mode must be either 'dot' or 'concat'") + def __str__(self): - return f'Attention(score_type={self.score_type}, use_scale={self.use_scale})' - - def initialize_weights(self, query_dim): - if self.score_type == 'additive': - self.rng = np.random.default_rng( - self.random_state if self.random_state is not None else int(time.time_ns())) - - self.Wq = self.rng.normal(0, 0.1, (query_dim, query_dim)) - self.Wk = self.rng.normal(0, 0.1, (query_dim, query_dim)) - self.v = self.rng.normal(0, 0.1, (query_dim, 1)) - - self.dWq = np.zeros_like(self.Wq) - self.dWk = np.zeros_like(self.Wk) - self.dv = np.zeros_like(self.v) - - def forward_pass(self, inputs, mask=None): - query, key, value = inputs - - self.query = query - self.key = key - self.value = value - - if self.weights is None and self.score_type == 'additive': - self.initialize_weights(query.shape[-1]) - - if self.score_type == 'dot': - scores = np.matmul(query, np.transpose(key, (0, 2, 1))) + return f'Attention(score_mode={self.score_mode}, use_scale={self.use_scale}, dropout={self.dropout})' + + def _compute_attention(self, query, key, value, mask=None, training=None, return_attention_scores=False, use_causal_mask=False): + if self.score_mode == "dot": + scores = np.matmul(query, key.transpose(0, 2, 1)) if self.use_scale: - scores = scores / np.sqrt(key.shape[-1]) + scores /= np.sqrt(query.shape[-1]) else: - q_transformed = np.dot(query, self.Wq) - k_transformed = np.dot(key, self.Wk) - - q_expanded = q_transformed[:, :, np.newaxis, :] - k_expanded = k_transformed[:, np.newaxis, :, :] - - # Compute scores - scores = np.tanh(q_expanded + k_expanded) - scores = np.dot(scores, self.v) - scores = scores.squeeze(-1) - + q_expanded = np.expand_dims(query, axis=2) + k_expanded = np.expand_dims(key, axis=1) + concat = np.concatenate([q_expanded, k_expanded], axis=-1) + scores = np.tanh(concat) + scores = np.sum(scores, axis=-1) + + if use_causal_mask: + seq_len = query.shape[1] + causal_mask = np.triu(np.ones((seq_len, seq_len)), k=1).astype(bool) + scores = np.where(causal_mask, -np.inf, scores) + if mask is not None: scores = np.where(mask, scores, -np.inf) - - self.attention_weights = self._softmax(scores) - - outputs = np.matmul(self.attention_weights, value) - + + attention_weights = self._softmax(scores) + + if self.dropout > 0 and training: + rng = np.random.default_rng(self.seed) + dropout_mask = rng.uniform(size=attention_weights.shape) >= self.dropout + attention_weights *= dropout_mask + attention_weights /= 1 - self.dropout + + # Calcul de la sortie + outputs = np.matmul(attention_weights, value) + + if return_attention_scores: + return outputs, attention_weights return outputs - + + def forward_pass(self, input_data, mask=None, training=True, return_attention_scores=False, use_causal_mask=False): + if isinstance(input_data, (list, tuple)): + if len(input_data) == 3: + query, key, value = input_data + elif len(input_data) == 2: + query, value = input_data + key = value + else: + raise ValueError("Attention layer expects 1, 2, or 3 inputs (query, value, key)") + else: + query = key = value = input_data + + self.query = query + self.key = key + self.value = value + self.input = input_data + + return self._compute_attention( + query, key, value, + mask=mask, + training=training, + return_attention_scores=return_attention_scores, + use_causal_mask=use_causal_mask + ) + def backward_pass(self, output_error): - batch_size = output_error.shape[0] - - d_value = np.matmul(np.transpose(self.attention_weights, (0, 2, 1)), output_error) - - d_weights = np.matmul(output_error, np.transpose(self.value, (0, 2, 1))) - - d_scores = self._softmax_derivative(self.attention_weights) * d_weights - - if self.score_type == 'dot': - scaling = 1/np.sqrt(self.key.shape[-1]) if self.use_scale else 1 - d_query = scaling * np.matmul(d_scores, self.key) - d_key = scaling * np.matmul(np.transpose(d_scores, (0, 2, 1)), self.query) + _, attention_weights = self._compute_attention( + self.query, self.key, self.value, return_attention_scores=True + ) + + d_value = np.matmul(attention_weights.transpose(0, 2, 1), output_error) + + d_attention_weights = np.matmul(output_error, self.value.transpose(0, 2, 1)) + + d_scores = d_attention_weights * attention_weights + d_scores -= attention_weights * np.sum(d_attention_weights * attention_weights, axis=-1, keepdims=True) + + if self.score_mode == "dot": + if self.use_scale: + scaling = 1.0 / np.sqrt(self.key.shape[-1]) + else: + scaling = 1.0 + + d_query = np.matmul(d_scores, self.key) * scaling + d_key = np.matmul(d_scores.transpose(0, 2, 1), self.query) * scaling else: - d_scores_expanded = d_scores[..., np.newaxis] - d_tanh = d_scores_expanded * self.v.T - d_tanh = d_tanh * (1 - np.tanh(self.scores) ** 2) - - self.dWq = np.zeros_like(self.Wq) - self.dWk = np.zeros_like(self.Wk) - self.dv = np.zeros_like(self.v) - - for b in range(batch_size): - self.dWq += np.dot(self.query[b].T, np.sum(d_tanh[b], axis=1)) - self.dWk += np.dot(self.key[b].T, np.sum(d_tanh[b], axis=0)) - self.dv += np.sum(np.dot(np.transpose(d_scores[b]), - np.tanh(np.dot(self.query[b], self.Wq) + np.dot(self.key[b], self.Wk)))) - - d_query = np.dot(d_tanh, self.Wq.T) - d_key = np.dot(d_tanh, self.Wk.T) - - return (d_query, d_key, d_value) - - def _softmax(self, x): - exp_x = np.exp(x - np.max(x, axis=-1, keepdims=True)) - return exp_x / np.sum(exp_x, axis=-1, keepdims=True) - - def _softmax_derivative(self, softmax_output): - softmax_output = softmax_output[..., np.newaxis] - return softmax_output * (np.eye(softmax_output.shape[-2]) - np.transpose(softmax_output, (0, 1, 3, 2))) - + raise NotImplementedError("Backward pass for 'concat' score mode is not implemented in this optimization.") + + if not isinstance(self.input, (list, tuple)): + d_input = d_query + d_key + d_value + return d_input + + return [d_query, d_key, d_value] + + @staticmethod + def _softmax(x): + x_max = np.max(x, axis=-1, keepdims=True) + exp_x = np.exp(x - x_max) + sum_exp_x = np.sum(exp_x, axis=-1, keepdims=True) + return exp_x / sum_exp_x + def get_config(self): return { 'name': self.__class__.__name__, 'use_scale': self.use_scale, - 'score_type': self.score_type, - 'random_state': self.random_state, - 'weights': { - 'Wq': self.Wq.tolist() if hasattr(self, 'Wq') else None, - 'Wk': self.Wk.tolist() if hasattr(self, 'Wk') else None, - 'v': self.v.tolist() if hasattr(self, 'v') else None - } if self.score_type == 'additive' else None + 'score_mode': self.score_mode, + 'dropout': self.dropout, + 'seed': self.seed, } - + @staticmethod def from_config(config): - layer = Attention( + return Attention( use_scale=config['use_scale'], - score_type=config['score_type'], - random_state=config['random_state'] + score_mode=config['score_mode'], + dropout=config['dropout'], + seed=config['seed'] ) - - if config['weights'] is not None: - if config['weights']['Wq'] is not None: - layer.Wq = np.array(config['weights']['Wq']) - layer.Wk = np.array(config['weights']['Wk']) - layer.v = np.array(config['weights']['v']) - layer.dWq = np.zeros_like(layer.Wq) - layer.dWk = np.zeros_like(layer.Wk) - layer.dv = np.zeros_like(layer.v) - - return layer + # -------------------------------------------------------------------------------------------------------------- compatibility_dict = { - Input: [Dense, Conv2D, Conv1D, Embedding, Permute, TextVectorization, Reshape, LSTM, Bidirectional, Unidirectional], - Dense: [Dense, Activation, Dropout, BatchNormalization, Permute, Reshape, LSTM, Bidirectional, Unidirectional], - Activation: [Dense, Conv2D, Conv1D, MaxPooling2D, AveragePooling2D, MaxPooling1D, AveragePooling1D, Flatten, - Dropout, Permute, Reshape, LSTM, Bidirectional, Unidirectional], - Conv2D: [Conv2D, MaxPooling2D, AveragePooling2D, Activation, Dropout, Flatten, BatchNormalization, Permute, Reshape], - MaxPooling2D: [Conv2D, MaxPooling2D, AveragePooling2D, Flatten, Permute, Reshape], - AveragePooling2D: [Conv2D, MaxPooling2D, AveragePooling2D, Flatten, Permute, Reshape], - Conv1D: [Conv1D, MaxPooling1D, AveragePooling1D, Activation, Dropout, Flatten, BatchNormalization, Permute, Reshape, LSTM, Bidirectional, Unidirectional], - MaxPooling1D: [Conv1D, MaxPooling1D, AveragePooling1D, Flatten, Permute, Reshape, LSTM, Bidirectional, Unidirectional], - AveragePooling1D: [Conv1D, MaxPooling1D, AveragePooling1D, Flatten, Permute, Reshape, LSTM, Bidirectional, Unidirectional], + Input: [Dense, Conv2D, Conv1D, Embedding, Permute, TextVectorization, Reshape, LSTM, Bidirectional, Unidirectional, Attention], + Dense: [Dense, Activation, Dropout, BatchNormalization, Permute, Reshape, LSTM, Bidirectional, Unidirectional, Attention], + Activation: [Dense, Conv2D, Conv1D, MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, Flatten, Dropout, Permute, Reshape, LSTM, Bidirectional, Unidirectional, Attention], + Conv2D: [Conv2D, MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, Activation, Dropout, Flatten, BatchNormalization, Permute, Reshape], + MaxPooling2D: [Conv2D, MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, Flatten, Permute, Reshape], + AveragePooling2D: [Conv2D, MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, Flatten, Permute, Reshape], + GlobalAveragePooling2D: [Dense, Activation, Dropout, BatchNormalization, Permute, Reshape], + Conv1D: [Conv1D, MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, Activation, Dropout, Flatten, BatchNormalization, Permute, Reshape, LSTM, Bidirectional, Unidirectional, Attention], + MaxPooling1D: [Conv1D, MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, Flatten, Permute, Reshape, LSTM, Bidirectional, Unidirectional, Attention], + AveragePooling1D: [Conv1D, MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, Flatten, Permute, Reshape, LSTM, Bidirectional, Unidirectional, Attention], + GlobalAveragePooling1D: [Dense, Activation, Dropout, BatchNormalization, Permute, Reshape], Flatten: [Dense, Dropout, Permute, Reshape, LSTM, Bidirectional, Unidirectional], - Dropout: [Dense, Conv2D, Conv1D, Activation, Permute, Reshape, LSTM, Bidirectional, Unidirectional], - Embedding: [Conv1D, Flatten, Dense, Permute, Reshape, LSTM, Bidirectional, Unidirectional], - BatchNormalization: [Dense, Conv2D, Conv1D, Activation, Permute, Reshape, LSTM, Bidirectional, Unidirectional], - Permute: [Dense, Conv2D, Conv1D, Activation, - Dropout, Flatten, BatchNormalization, Permute, Reshape, LSTM, Bidirectional, Unidirectional], + Dropout: [Dense, Conv2D, Conv1D, Activation, Permute, Reshape, LSTM, Bidirectional, Unidirectional, Attention], + Embedding: [Conv1D, Flatten, GlobalAveragePooling1D, Dense, Permute, Reshape, LSTM, Bidirectional, Unidirectional, Attention], + BatchNormalization: [Dense, Conv2D, Conv1D, Activation, Permute, Reshape, LSTM, Bidirectional, Unidirectional, Attention], + Permute: [Dense, Conv2D, Conv1D, Activation, Dropout, Flatten, GlobalAveragePooling1D, GlobalAveragePooling2D, BatchNormalization, Permute, Reshape, LSTM, Bidirectional, Unidirectional, Attention], TextVectorization: [Embedding, Dense, Conv1D, Reshape, LSTM, Bidirectional, Unidirectional], - Reshape: [Dense, Conv2D, Conv1D, Activation, Dropout, Flatten, BatchNormalization, Permute, Reshape, - TextVectorization, Embedding, Input, MaxPooling2D, AveragePooling2D, MaxPooling1D, AveragePooling1D, - LSTM, Bidirectional, Unidirectional], - LSTM: [Dense, Activation, Dropout, BatchNormalization, Permute, Reshape, LSTM, Bidirectional, Unidirectional], - Bidirectional: [Dense, Activation, Dropout, BatchNormalization, Permute, Reshape, LSTM, Bidirectional, Unidirectional], - Unidirectional: [Dense, Activation, Dropout, BatchNormalization, - Permute, Reshape, LSTM, Bidirectional, Unidirectional] + Reshape: [Dense, Conv2D, Conv1D, Activation, Dropout, Flatten, GlobalAveragePooling1D, GlobalAveragePooling2D, BatchNormalization, Permute, Reshape, TextVectorization, Embedding, Input, MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, LSTM, Bidirectional, Unidirectional, Attention], + LSTM: [Dense, Activation, Dropout, BatchNormalization, Permute, Reshape, LSTM, Bidirectional, Unidirectional, Attention], + Bidirectional: [Dense, Activation, Dropout, BatchNormalization, Permute, Reshape, LSTM, Bidirectional, Unidirectional, Attention, GlobalAveragePooling1D], + Unidirectional: [Dense, Activation, Dropout, BatchNormalization, Permute, Reshape, LSTM, Bidirectional, Unidirectional, Attention, GlobalAveragePooling1D], + Attention: [Dense, Activation, Dropout, BatchNormalization, Permute, Reshape, LSTM, Bidirectional, Unidirectional, GlobalAveragePooling1D], } diff --git a/neuralnetlib/model.py b/neuralnetlib/model.py index 81ca867..475c5e3 100644 --- a/neuralnetlib/model.py +++ b/neuralnetlib/model.py @@ -6,7 +6,7 @@ import numpy as np from neuralnetlib.activations import ActivationFunction -from neuralnetlib.layers import Layer, Input, Activation, Dropout, TextVectorization, LSTM, Bidirectional, Embedding, compatibility_dict +from neuralnetlib.layers import compatibility_dict, Layer, Input, Activation, Dropout, TextVectorization, LSTM, Bidirectional, Embedding, Attention from neuralnetlib.losses import LossFunction, CategoricalCrossentropy from neuralnetlib.optimizers import Optimizer from neuralnetlib.preprocessing import PCA @@ -76,6 +76,8 @@ def forward_pass(self, X: np.ndarray, training: bool = True) -> np.ndarray: X = layer.forward_pass(X, training) elif isinstance(layer, TextVectorization): X = layer.forward_pass(X) + elif isinstance(layer, Attention): + X = layer.forward_pass(X) else: X = layer.forward_pass(X) return X From 96f2f5a66e7eb1508f249064ae89436108b94a2f Mon Sep 17 00:00:00 2001 From: GitHub Action <52708150+marcpinet@users.noreply.github.com> Date: Wed, 6 Nov 2024 21:21:10 +0100 Subject: [PATCH 3/5] feat: improve attention --- .../mnist_multiclass.ipynb | 102 ++++++++++-------- neuralnetlib/layers.py | 93 ++++++++-------- 2 files changed, 98 insertions(+), 97 deletions(-) diff --git a/examples/classification-regression/mnist_multiclass.ipynb b/examples/classification-regression/mnist_multiclass.ipynb index 7876dd9..9679b1d 100644 --- a/examples/classification-regression/mnist_multiclass.ipynb +++ b/examples/classification-regression/mnist_multiclass.ipynb @@ -18,11 +18,11 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 12, "metadata": { "ExecuteTime": { - "end_time": "2024-09-22T21:23:17.470315300Z", - "start_time": "2024-09-22T21:23:15.274765600Z" + "end_time": "2024-11-06T20:18:10.124074700Z", + "start_time": "2024-11-06T20:18:10.099037300Z" } }, "outputs": [], @@ -49,11 +49,11 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 13, "metadata": { "ExecuteTime": { - "end_time": "2024-09-22T21:23:17.612787400Z", - "start_time": "2024-09-22T21:23:17.472315400Z" + "end_time": "2024-11-06T20:18:10.272828Z", + "start_time": "2024-11-06T20:18:10.103037700Z" } }, "outputs": [], @@ -70,11 +70,11 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 14, "metadata": { "ExecuteTime": { - "end_time": "2024-09-22T21:23:17.702612600Z", - "start_time": "2024-09-22T21:23:17.609786900Z" + "end_time": "2024-11-06T20:18:10.367490500Z", + "start_time": "2024-11-06T20:18:10.273827500Z" } }, "outputs": [], @@ -94,11 +94,11 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 15, "metadata": { "ExecuteTime": { - "end_time": "2024-09-22T21:23:17.718270700Z", - "start_time": "2024-09-22T21:23:17.704611500Z" + "end_time": "2024-11-06T20:18:10.383019Z", + "start_time": "2024-11-06T20:18:10.370001700Z" } }, "outputs": [], @@ -131,11 +131,11 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 16, "metadata": { "ExecuteTime": { - "end_time": "2024-09-22T21:23:17.763653100Z", - "start_time": "2024-09-22T21:23:17.719270900Z" + "end_time": "2024-11-06T20:18:10.429113700Z", + "start_time": "2024-11-06T20:18:10.384020200Z" } }, "outputs": [ @@ -174,11 +174,11 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 17, "metadata": { "ExecuteTime": { - "end_time": "2024-09-22T21:23:28.493706600Z", - "start_time": "2024-09-22T21:23:17.734301400Z" + "end_time": "2024-11-06T20:19:13.012124400Z", + "start_time": "2024-11-06T20:18:10.400054100Z" } }, "outputs": [ @@ -186,17 +186,25 @@ "name": "stdout", "output_type": "stream", "text": [ - "[==============================] 100% Epoch 1/10 - loss: 0.5703 - accuracy_score: 0.8109 - 1.10s\n", - "[==============================] 100% Epoch 2/10 - loss: 0.2287 - accuracy_score: 0.9336 - 1.05s\n", - "[==============================] 100% Epoch 3/10 - loss: 0.1950 - accuracy_score: 0.9437 - 1.13s\n", - "[==============================] 100% Epoch 4/10 - loss: 0.1791 - accuracy_score: 0.9468 - 1.02s\n", - "[==============================] 100% Epoch 5/10 - loss: 0.1600 - accuracy_score: 0.9525 - 1.12s\n", - "[==============================] 100% Epoch 6/10 - loss: 0.1469 - accuracy_score: 0.9567 - 1.01s\n", - "[==============================] 100% Epoch 7/10 - loss: 0.1398 - accuracy_score: 0.9582 - 1.10s\n", - "[==============================] 100% Epoch 8/10 - loss: 0.1337 - accuracy_score: 0.9601 - 1.03s\n", - "[==============================] 100% Epoch 9/10 - loss: 0.1292 - accuracy_score: 0.9620 - 1.12s\n", - "[==============================] 100% Epoch 10/10 - loss: 0.1243 - accuracy_score: 0.9631 - 1.02s\n" + "[==============================] 100% Epoch 1/10 - loss: 1.4752 - accuracy: 0.4954 - 5.55s\n", + "[==============================] 100% Epoch 2/10 - loss: 0.5681 - accuracy: 0.8444 - 6.01s\n", + "[==============================] 100% Epoch 3/10 - loss: 0.4600 - accuracy: 0.8731 - 6.73s\n", + "[==============================] 100% Epoch 4/10 - loss: 0.3906 - accuracy: 0.8941 - 6.53s\n", + "[==============================] 100% Epoch 5/10 - loss: 0.3485 - accuracy: 0.9059 - 6.23s\n", + "[==============================] 100% Epoch 6/10 - loss: 0.3427 - accuracy: 0.9060 - 6.16s\n", + "[==============================] 100% Epoch 7/10 - loss: 0.3277 - accuracy: 0.9108 - 6.23s\n", + "[==============================] 100% Epoch 8/10 - loss: 0.3007 - accuracy: 0.9177 - 6.15s\n", + "[==============================] 100% Epoch 9/10 - loss: 0.2872 - accuracy: 0.9197 - 6.33s\n", + "[==============================] 100% Epoch 10/10 - loss: 0.2879 - accuracy: 0.9206 - 6.60s\n" ] + }, + { + "data": { + "text/plain": "" + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" } ], "source": [ @@ -212,11 +220,11 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 18, "metadata": { "ExecuteTime": { - "end_time": "2024-09-22T21:23:28.536750900Z", - "start_time": "2024-09-22T21:23:28.490707700Z" + "end_time": "2024-11-06T20:19:13.030541800Z", + "start_time": "2024-11-06T20:19:12.985039Z" } }, "outputs": [ @@ -224,7 +232,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Test loss: 0.16901475773235153\n" + "Test loss: 0.3128473793440952\n" ] } ], @@ -242,11 +250,11 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 19, "metadata": { "ExecuteTime": { - "end_time": "2024-09-22T21:23:28.568699500Z", - "start_time": "2024-09-22T21:23:28.537750700Z" + "end_time": "2024-11-06T20:19:13.075362600Z", + "start_time": "2024-11-06T20:19:13.032540100Z" } }, "outputs": [], @@ -263,11 +271,11 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 20, "metadata": { "ExecuteTime": { - "end_time": "2024-09-22T21:23:28.582991Z", - "start_time": "2024-09-22T21:23:28.567699400Z" + "end_time": "2024-11-06T20:19:13.091908700Z", + "start_time": "2024-11-06T20:19:13.076867Z" } }, "outputs": [ @@ -275,9 +283,9 @@ "name": "stdout", "output_type": "stream", "text": [ - "accuracy: 0.9551\n", - "f1_score: 0.9549572674105582\n", - "recall_score 0.9543577978545592\n" + "accuracy: 0.9172\n", + "f1_score: 0.9176016478811294\n", + "recall_score 0.9158401245111591\n" ] } ], @@ -296,11 +304,11 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 21, "metadata": { "ExecuteTime": { - "end_time": "2024-09-22T21:23:28.814879800Z", - "start_time": "2024-09-22T21:23:28.583991Z" + "end_time": "2024-11-06T20:19:13.263086800Z", + "start_time": "2024-11-06T20:19:13.092899Z" } }, "outputs": [ @@ -331,11 +339,11 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 22, "metadata": { "ExecuteTime": { - "end_time": "2024-09-22T21:23:28.867661200Z", - "start_time": "2024-09-22T21:23:28.815905Z" + "end_time": "2024-11-06T20:19:13.316170700Z", + "start_time": "2024-11-06T20:19:13.264088200Z" } }, "outputs": [], diff --git a/neuralnetlib/layers.py b/neuralnetlib/layers.py index 674abdd..e112179 100644 --- a/neuralnetlib/layers.py +++ b/neuralnetlib/layers.py @@ -108,8 +108,7 @@ def initialize_weights(self, input_size: int): stddev = np.sqrt(2 / input_size) self.weights = self.rng.normal(0, stddev, (input_size, self.units)) elif self.weights_init == "default": - scale = np.sqrt(1.0 / input_size) - self.weights = self.rng.normal(0, scale, (input_size, self.units)) + self.weights = self.rng.normal(0, 0.01, (input_size, self.units)) elif self.weights_init == "lecun": stddev = np.sqrt(1 / input_size) self.weights = self.rng.normal(0, stddev, (input_size, self.units)) @@ -1610,59 +1609,53 @@ def _compute_attention(self, query, key, value, mask=None, training=None, return return outputs, attention_weights return outputs - def forward_pass(self, input_data, mask=None, training=True, return_attention_scores=False, use_causal_mask=False): - if isinstance(input_data, (list, tuple)): - if len(input_data) == 3: - query, key, value = input_data - elif len(input_data) == 2: - query, value = input_data - key = value - else: - raise ValueError("Attention layer expects 1, 2, or 3 inputs (query, value, key)") - else: - query = key = value = input_data +class Attention(Layer): + def __init__(self, use_scale=True, score_mode="dot", **kwargs): + super().__init__() + self.use_scale = use_scale + self.score_mode = score_mode + self.supports_masking = True - self.query = query - self.key = key - self.value = value + def forward_pass(self, input_data: np.ndarray) -> np.ndarray: self.input = input_data - - return self._compute_attention( - query, key, value, - mask=mask, - training=training, - return_attention_scores=return_attention_scores, - use_causal_mask=use_causal_mask - ) - - def backward_pass(self, output_error): - _, attention_weights = self._compute_attention( - self.query, self.key, self.value, return_attention_scores=True - ) - - d_value = np.matmul(attention_weights.transpose(0, 2, 1), output_error) - - d_attention_weights = np.matmul(output_error, self.value.transpose(0, 2, 1)) - - d_scores = d_attention_weights * attention_weights - d_scores -= attention_weights * np.sum(d_attention_weights * attention_weights, axis=-1, keepdims=True) - + + self.query = input_data[:, -1:, :] + self.key = self.value = input_data + if self.score_mode == "dot": + self.scores = np.matmul(self.query, self.key.transpose(0, 2, 1)) if self.use_scale: - scaling = 1.0 / np.sqrt(self.key.shape[-1]) - else: - scaling = 1.0 - - d_query = np.matmul(d_scores, self.key) * scaling - d_key = np.matmul(d_scores.transpose(0, 2, 1), self.query) * scaling - else: - raise NotImplementedError("Backward pass for 'concat' score mode is not implemented in this optimization.") - - if not isinstance(self.input, (list, tuple)): - d_input = d_query + d_key + d_value - return d_input + self.scores /= np.sqrt(self.query.shape[-1]) + + self.attention_weights = self._softmax(self.scores) + + context = np.matmul(self.attention_weights, self.value) + + return context.squeeze(1) - return [d_query, d_key, d_value] + def backward_pass(self, output_error: np.ndarray) -> np.ndarray: + output_error = output_error[:, np.newaxis, :] + + d_value = np.matmul(self.attention_weights.transpose(0, 2, 1), output_error) + + d_attention = np.matmul(output_error, self.value.transpose(0, 2, 1)) + + d_scores = d_attention * self.attention_weights + d_scores -= self.attention_weights * np.sum(d_attention * self.attention_weights, axis=-1, keepdims=True) + + if self.use_scale: + scale = np.sqrt(self.query.shape[-1]) + d_scores /= scale + + d_query = np.matmul(d_scores, self.key) + d_key = np.matmul(d_scores.transpose(0, 2, 1), self.query) + + d_input = np.zeros_like(self.input) + d_input[:, -1:, :] = d_query + d_input += d_key + d_input += d_value + + return d_input @staticmethod def _softmax(x): From ce06d1e68458d1d893247071b1318caf5782a99e Mon Sep 17 00:00:00 2001 From: GitHub Action <52708150+marcpinet@users.noreply.github.com> Date: Wed, 6 Nov 2024 21:50:43 +0100 Subject: [PATCH 4/5] refactor: remove useless instructions --- .../mnist_multiclass.ipynb | 88 +++++++++---------- neuralnetlib/model.py | 4 - 2 files changed, 44 insertions(+), 48 deletions(-) diff --git a/examples/classification-regression/mnist_multiclass.ipynb b/examples/classification-regression/mnist_multiclass.ipynb index 9679b1d..f763de3 100644 --- a/examples/classification-regression/mnist_multiclass.ipynb +++ b/examples/classification-regression/mnist_multiclass.ipynb @@ -18,11 +18,11 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 1, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T20:18:10.124074700Z", - "start_time": "2024-11-06T20:18:10.099037300Z" + "end_time": "2024-11-06T20:32:53.645985800Z", + "start_time": "2024-11-06T20:32:44.756007600Z" } }, "outputs": [], @@ -49,11 +49,11 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 2, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T20:18:10.272828Z", - "start_time": "2024-11-06T20:18:10.103037700Z" + "end_time": "2024-11-06T20:32:53.800264300Z", + "start_time": "2024-11-06T20:32:53.647493400Z" } }, "outputs": [], @@ -70,11 +70,11 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 3, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T20:18:10.367490500Z", - "start_time": "2024-11-06T20:18:10.273827500Z" + "end_time": "2024-11-06T20:32:53.893648Z", + "start_time": "2024-11-06T20:32:53.802266700Z" } }, "outputs": [], @@ -94,11 +94,11 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 4, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T20:18:10.383019Z", - "start_time": "2024-11-06T20:18:10.370001700Z" + "end_time": "2024-11-06T20:32:53.909173100Z", + "start_time": "2024-11-06T20:32:53.895647900Z" } }, "outputs": [], @@ -131,11 +131,11 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 5, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T20:18:10.429113700Z", - "start_time": "2024-11-06T20:18:10.384020200Z" + "end_time": "2024-11-06T20:32:53.954260Z", + "start_time": "2024-11-06T20:32:53.910180200Z" } }, "outputs": [ @@ -174,11 +174,11 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 6, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T20:19:13.012124400Z", - "start_time": "2024-11-06T20:18:10.400054100Z" + "end_time": "2024-11-06T20:33:50.410121200Z", + "start_time": "2024-11-06T20:32:53.925195800Z" } }, "outputs": [ @@ -186,23 +186,23 @@ "name": "stdout", "output_type": "stream", "text": [ - "[==============================] 100% Epoch 1/10 - loss: 1.4752 - accuracy: 0.4954 - 5.55s\n", - "[==============================] 100% Epoch 2/10 - loss: 0.5681 - accuracy: 0.8444 - 6.01s\n", - "[==============================] 100% Epoch 3/10 - loss: 0.4600 - accuracy: 0.8731 - 6.73s\n", - "[==============================] 100% Epoch 4/10 - loss: 0.3906 - accuracy: 0.8941 - 6.53s\n", - "[==============================] 100% Epoch 5/10 - loss: 0.3485 - accuracy: 0.9059 - 6.23s\n", - "[==============================] 100% Epoch 6/10 - loss: 0.3427 - accuracy: 0.9060 - 6.16s\n", - "[==============================] 100% Epoch 7/10 - loss: 0.3277 - accuracy: 0.9108 - 6.23s\n", - "[==============================] 100% Epoch 8/10 - loss: 0.3007 - accuracy: 0.9177 - 6.15s\n", - "[==============================] 100% Epoch 9/10 - loss: 0.2872 - accuracy: 0.9197 - 6.33s\n", - "[==============================] 100% Epoch 10/10 - loss: 0.2879 - accuracy: 0.9206 - 6.60s\n" + "[==============================] 100% Epoch 1/10 - loss: 1.4752 - accuracy: 0.4954 - 5.43s\n", + "[==============================] 100% Epoch 2/10 - loss: 0.5681 - accuracy: 0.8444 - 5.59s\n", + "[==============================] 100% Epoch 3/10 - loss: 0.4600 - accuracy: 0.8731 - 5.63s\n", + "[==============================] 100% Epoch 4/10 - loss: 0.3906 - accuracy: 0.8941 - 5.64s\n", + "[==============================] 100% Epoch 5/10 - loss: 0.3485 - accuracy: 0.9059 - 5.89s\n", + "[==============================] 100% Epoch 6/10 - loss: 0.3427 - accuracy: 0.9060 - 5.72s\n", + "[==============================] 100% Epoch 7/10 - loss: 0.3277 - accuracy: 0.9108 - 5.65s\n", + "[==============================] 100% Epoch 8/10 - loss: 0.3007 - accuracy: 0.9177 - 5.58s\n", + "[==============================] 100% Epoch 9/10 - loss: 0.2872 - accuracy: 0.9197 - 5.63s\n", + "[==============================] 100% Epoch 10/10 - loss: 0.2879 - accuracy: 0.9206 - 5.64s\n" ] }, { "data": { "text/plain": "" }, - "execution_count": 17, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } @@ -220,11 +220,11 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 7, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T20:19:13.030541800Z", - "start_time": "2024-11-06T20:19:12.985039Z" + "end_time": "2024-11-06T20:33:50.454199800Z", + "start_time": "2024-11-06T20:33:50.409122300Z" } }, "outputs": [ @@ -250,11 +250,11 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 8, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T20:19:13.075362600Z", - "start_time": "2024-11-06T20:19:13.032540100Z" + "end_time": "2024-11-06T20:33:50.485263300Z", + "start_time": "2024-11-06T20:33:50.455200200Z" } }, "outputs": [], @@ -271,11 +271,11 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 9, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T20:19:13.091908700Z", - "start_time": "2024-11-06T20:19:13.076867Z" + "end_time": "2024-11-06T20:33:50.500716Z", + "start_time": "2024-11-06T20:33:50.486768Z" } }, "outputs": [ @@ -304,11 +304,11 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 10, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T20:19:13.263086800Z", - "start_time": "2024-11-06T20:19:13.092899Z" + "end_time": "2024-11-06T20:33:50.674233200Z", + "start_time": "2024-11-06T20:33:50.501716Z" } }, "outputs": [ @@ -339,11 +339,11 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 11, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T20:19:13.316170700Z", - "start_time": "2024-11-06T20:19:13.264088200Z" + "end_time": "2024-11-06T20:33:50.739851100Z", + "start_time": "2024-11-06T20:33:50.672234200Z" } }, "outputs": [], diff --git a/neuralnetlib/model.py b/neuralnetlib/model.py index 475c5e3..378460e 100644 --- a/neuralnetlib/model.py +++ b/neuralnetlib/model.py @@ -74,10 +74,6 @@ def forward_pass(self, X: np.ndarray, training: bool = True) -> np.ndarray: for layer in self.layers: if isinstance(layer, (Dropout, LSTM, Bidirectional)): X = layer.forward_pass(X, training) - elif isinstance(layer, TextVectorization): - X = layer.forward_pass(X) - elif isinstance(layer, Attention): - X = layer.forward_pass(X) else: X = layer.forward_pass(X) return X From a785241151018f25d0360cbf59ded2feb5aa1bff Mon Sep 17 00:00:00 2001 From: GitHub Action <52708150+marcpinet@users.noreply.github.com> Date: Wed, 6 Nov 2024 23:34:46 +0100 Subject: [PATCH 5/5] fix: some fixes and improvements --- README.md | 7 +- .../mnist_multiclass.ipynb | 72 ++-- .../sentiment_analysis.ipynb | 75 ++-- .../cnn_classification_mnist.ipynb | 388 ++++++++++++++++++ neuralnetlib/layers.py | 133 +++--- neuralnetlib/model.py | 6 +- setup.py | 2 +- 7 files changed, 518 insertions(+), 165 deletions(-) create mode 100644 examples/cnn-classification/cnn_classification_mnist.ipynb diff --git a/README.md b/README.md index 62f4cdf..efa096d 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ I intend to improve the neural networks and add more features in the future. ## 📦 Features -- Many layers (input, activation, dense, dropout, conv1d/2d, maxpooling1d/2d, flatten, embedding, batchnormalization, and more) 🧠 +- Many layers (wrappers, dense, dropout, conv1d/2d, pooling1d/2d, flatten, embedding, batchnormalization, lstm, attention and more) 🧠 - Many activation functions (sigmoid, tanh, relu, leaky relu, softmax, linear, elu, selu) 📈 - Many loss functions (mean squared error, mean absolute error, categorical crossentropy, binary crossentropy, huber loss) 📉 - Many optimizers (sgd, momentum, rmsprop, adam) 📊 @@ -32,8 +32,9 @@ pip install neuralnetlib ## 💡 How to use -See [this file](examples/classification-regression/simple_mnist_multiclass.py) for a simple example of how to use the library. -For a more advanced example, see [this file](examples/cnn-classification/simple_cnn_classification_mnist.py). +See [this file](examples/classification-regression/mnist_multiclass.ipynb) for a simple example of how to use the library.
+For a more advanced example, see [this file](examples/cnn-classification/cnn_classification_mnist.ipynb) for using CNN.
+You can also check [this file](examples/classification-regression/sentiment_analysis.ipynb) for text classification using RNN.
More examples in [this folder](examples). diff --git a/examples/classification-regression/mnist_multiclass.ipynb b/examples/classification-regression/mnist_multiclass.ipynb index f763de3..b2ac07c 100644 --- a/examples/classification-regression/mnist_multiclass.ipynb +++ b/examples/classification-regression/mnist_multiclass.ipynb @@ -21,8 +21,8 @@ "execution_count": 1, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T20:32:53.645985800Z", - "start_time": "2024-11-06T20:32:44.756007600Z" + "end_time": "2024-11-06T21:20:11.860716600Z", + "start_time": "2024-11-06T21:20:03.030565100Z" } }, "outputs": [], @@ -52,8 +52,8 @@ "execution_count": 2, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T20:32:53.800264300Z", - "start_time": "2024-11-06T20:32:53.647493400Z" + "end_time": "2024-11-06T21:20:12.002523Z", + "start_time": "2024-11-06T21:20:11.862717900Z" } }, "outputs": [], @@ -73,8 +73,8 @@ "execution_count": 3, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T20:32:53.893648Z", - "start_time": "2024-11-06T20:32:53.802266700Z" + "end_time": "2024-11-06T21:20:12.091137200Z", + "start_time": "2024-11-06T21:20:11.999925Z" } }, "outputs": [], @@ -97,8 +97,8 @@ "execution_count": 4, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T20:32:53.909173100Z", - "start_time": "2024-11-06T20:32:53.895647900Z" + "end_time": "2024-11-06T21:20:12.107204400Z", + "start_time": "2024-11-06T21:20:12.092135900Z" } }, "outputs": [], @@ -134,8 +134,8 @@ "execution_count": 5, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T20:32:53.954260Z", - "start_time": "2024-11-06T20:32:53.910180200Z" + "end_time": "2024-11-06T21:20:12.152371800Z", + "start_time": "2024-11-06T21:20:12.108612300Z" } }, "outputs": [ @@ -177,8 +177,8 @@ "execution_count": 6, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T20:33:50.410121200Z", - "start_time": "2024-11-06T20:32:53.925195800Z" + "end_time": "2024-11-06T21:21:10.172232400Z", + "start_time": "2024-11-06T21:20:12.124120500Z" } }, "outputs": [ @@ -186,16 +186,16 @@ "name": "stdout", "output_type": "stream", "text": [ - "[==============================] 100% Epoch 1/10 - loss: 1.4752 - accuracy: 0.4954 - 5.43s\n", - "[==============================] 100% Epoch 2/10 - loss: 0.5681 - accuracy: 0.8444 - 5.59s\n", - "[==============================] 100% Epoch 3/10 - loss: 0.4600 - accuracy: 0.8731 - 5.63s\n", - "[==============================] 100% Epoch 4/10 - loss: 0.3906 - accuracy: 0.8941 - 5.64s\n", - "[==============================] 100% Epoch 5/10 - loss: 0.3485 - accuracy: 0.9059 - 5.89s\n", - "[==============================] 100% Epoch 6/10 - loss: 0.3427 - accuracy: 0.9060 - 5.72s\n", - "[==============================] 100% Epoch 7/10 - loss: 0.3277 - accuracy: 0.9108 - 5.65s\n", - "[==============================] 100% Epoch 8/10 - loss: 0.3007 - accuracy: 0.9177 - 5.58s\n", - "[==============================] 100% Epoch 9/10 - loss: 0.2872 - accuracy: 0.9197 - 5.63s\n", - "[==============================] 100% Epoch 10/10 - loss: 0.2879 - accuracy: 0.9206 - 5.64s\n" + "[==============================] 100% Epoch 1/10 - loss: 0.5703 - accuracy: 0.8109 - 5.33s\n", + "[==============================] 100% Epoch 2/10 - loss: 0.2287 - accuracy: 0.9336 - 5.37s\n", + "[==============================] 100% Epoch 3/10 - loss: 0.1950 - accuracy: 0.9437 - 5.41s\n", + "[==============================] 100% Epoch 4/10 - loss: 0.1791 - accuracy: 0.9468 - 5.75s\n", + "[==============================] 100% Epoch 5/10 - loss: 0.1600 - accuracy: 0.9525 - 5.87s\n", + "[==============================] 100% Epoch 6/10 - loss: 0.1469 - accuracy: 0.9567 - 6.02s\n", + "[==============================] 100% Epoch 7/10 - loss: 0.1398 - accuracy: 0.9582 - 6.17s\n", + "[==============================] 100% Epoch 8/10 - loss: 0.1337 - accuracy: 0.9601 - 6.02s\n", + "[==============================] 100% Epoch 9/10 - loss: 0.1292 - accuracy: 0.9620 - 5.99s\n", + "[==============================] 100% Epoch 10/10 - loss: 0.1243 - accuracy: 0.9631 - 6.00s\n" ] }, { @@ -223,8 +223,8 @@ "execution_count": 7, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T20:33:50.454199800Z", - "start_time": "2024-11-06T20:33:50.409122300Z" + "end_time": "2024-11-06T21:21:10.188691300Z", + "start_time": "2024-11-06T21:21:10.145550200Z" } }, "outputs": [ @@ -232,7 +232,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Test loss: 0.3128473793440952\n" + "Test loss: 0.16901475773235153\n" ] } ], @@ -253,8 +253,8 @@ "execution_count": 8, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T20:33:50.485263300Z", - "start_time": "2024-11-06T20:33:50.455200200Z" + "end_time": "2024-11-06T21:21:10.223168Z", + "start_time": "2024-11-06T21:21:10.189691600Z" } }, "outputs": [], @@ -274,8 +274,8 @@ "execution_count": 9, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T20:33:50.500716Z", - "start_time": "2024-11-06T20:33:50.486768Z" + "end_time": "2024-11-06T21:21:10.235337900Z", + "start_time": "2024-11-06T21:21:10.221169700Z" } }, "outputs": [ @@ -283,9 +283,9 @@ "name": "stdout", "output_type": "stream", "text": [ - "accuracy: 0.9172\n", - "f1_score: 0.9176016478811294\n", - "recall_score 0.9158401245111591\n" + "accuracy: 0.9551\n", + "f1_score: 0.9549572674105582\n", + "recall_score 0.9543577978545592\n" ] } ], @@ -307,8 +307,8 @@ "execution_count": 10, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T20:33:50.674233200Z", - "start_time": "2024-11-06T20:33:50.501716Z" + "end_time": "2024-11-06T21:21:10.404184900Z", + "start_time": "2024-11-06T21:21:10.236337600Z" } }, "outputs": [ @@ -342,8 +342,8 @@ "execution_count": 11, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T20:33:50.739851100Z", - "start_time": "2024-11-06T20:33:50.672234200Z" + "end_time": "2024-11-06T21:21:10.456973200Z", + "start_time": "2024-11-06T21:21:10.406688900Z" } }, "outputs": [], diff --git a/examples/classification-regression/sentiment_analysis.ipynb b/examples/classification-regression/sentiment_analysis.ipynb index 63d5cfd..41abc63 100644 --- a/examples/classification-regression/sentiment_analysis.ipynb +++ b/examples/classification-regression/sentiment_analysis.ipynb @@ -18,11 +18,11 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 1, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T18:44:44.255458200Z", - "start_time": "2024-11-06T18:44:32.435539700Z" + "end_time": "2024-11-06T21:51:28.948615200Z", + "start_time": "2024-11-06T21:51:19.721136Z" } }, "outputs": [], @@ -32,8 +32,7 @@ "\n", "from neuralnetlib.model import Model\n", "from neuralnetlib.layers import Input, Dense, Embedding, LSTM, Bidirectional, Attention, GlobalAveragePooling1D\n", - "from neuralnetlib.preprocessing import Tokenizer, pad_sequences, CountVectorizer\n", - "from neuralnetlib.optimizers import Adam\n", + "from neuralnetlib.preprocessing import Tokenizer, pad_sequences\n", "from neuralnetlib.metrics import accuracy_score\n", "from neuralnetlib.utils import train_test_split\n", "\n", @@ -49,11 +48,11 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 2, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T18:44:45.772697800Z", - "start_time": "2024-11-06T18:44:44.256962100Z" + "end_time": "2024-11-06T21:51:30.589179800Z", + "start_time": "2024-11-06T21:51:28.950619500Z" } }, "outputs": [], @@ -70,11 +69,11 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 3, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T18:44:46.040708400Z", - "start_time": "2024-11-06T18:44:45.774698100Z" + "end_time": "2024-11-06T21:51:30.871205900Z", + "start_time": "2024-11-06T21:51:30.590182500Z" } }, "outputs": [ @@ -148,11 +147,11 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 4, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T18:44:46.054955900Z", - "start_time": "2024-11-06T18:44:46.040708400Z" + "end_time": "2024-11-06T21:51:30.899961500Z", + "start_time": "2024-11-06T21:51:30.871205900Z" } }, "outputs": [], @@ -162,7 +161,6 @@ "model.add(Embedding(max_words, 100, weights_init='xavier'))\n", "model.add(Bidirectional(LSTM(32, return_sequences=True)))\n", "model.add(Attention())\n", - "model.add(GlobalAveragePooling1D())\n", "model.add(Dense(1, activation='sigmoid'))" ] }, @@ -175,11 +173,11 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 5, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T18:44:46.100743200Z", - "start_time": "2024-11-06T18:44:46.054955900Z" + "end_time": "2024-11-06T21:51:30.904961800Z", + "start_time": "2024-11-06T21:51:30.886456800Z" } }, "outputs": [ @@ -192,10 +190,9 @@ "Layer 1: Input(input_shape=(200,))\n", "Layer 2: Embedding(input_dim=10000, output_dim=100)\n", "Layer 3: Bidirectional(layer=LSTM(units=32, return_sequences=True, return_state=False, random_state=None))\n", - "Layer 4: Attention(score_mode=dot, use_scale=False, dropout=0.0)\n", - "Layer 5: GlobalAveragePooling1D\n", - "Layer 6: Dense(units=1)\n", - "Layer 7: Activation(Sigmoid)\n", + "Layer 4: Attention(use_scale=True, score_mode=dot)\n", + "Layer 5: Dense(units=1)\n", + "Layer 6: Activation(Sigmoid)\n", "-------------------------------------------------\n", "Loss function: BinaryCrossentropy\n", "Optimizer: Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n", @@ -218,11 +215,11 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 7, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T19:27:56.073804Z", - "start_time": "2024-11-06T19:27:56.052756700Z" + "end_time": "2024-11-06T22:17:05.632380200Z", + "start_time": "2024-11-06T22:17:05.625379900Z" } }, "outputs": [ @@ -231,16 +228,16 @@ "output_type": "stream", "text": [ "\n", - "[==============================] 100% Epoch 1/10 - loss: 0.5315 - accuracy: 0.7552 - 290.73s - val_accuracy: 0.8314\n", - "[==============================] 100% Epoch 2/10 - loss: 0.3029 - accuracy: 0.8838 - 269.72s - val_accuracy: 0.8680\n", - "[==============================] 100% Epoch 3/10 - loss: 0.2369 - accuracy: 0.9095 - 316.64s - val_accuracy: 0.8778\n", - "[==============================] 100% Epoch 4/10 - loss: 0.1979 - accuracy: 0.9251 - 270.75s - val_accuracy: 0.8815\n", - "[==============================] 100% Epoch 5/10 - loss: 0.1687 - accuracy: 0.9382 - 304.63s - val_accuracy: 0.8824\n", - "[==============================] 100% Epoch 6/10 - loss: 0.1447 - accuracy: 0.9503 - 300.43s - val_accuracy: 0.8810\n", - "[==============================] 100% Epoch 7/10 - loss: 0.1240 - accuracy: 0.9594 - 303.27s - val_accuracy: 0.8779\n", - "[==============================] 100% Epoch 8/10 - loss: 0.1063 - accuracy: 0.9666 - 303.07s - val_accuracy: 0.8748\n", - "[==============================] 100% Epoch 9/10 - loss: 0.0911 - accuracy: 0.9726 - 303.07s - val_accuracy: 0.8708\n", - "[==============================] 100% Epoch 10/10 - loss: 0.0781 - accuracy: 0.9776 - 303.07s - val_accuracy: 0.8676\n" + "[==============================] 100% Epoch 1/10 - loss: 0.6193 - accuracy: 0.7079 - 248.72s - val_accuracy: 0.8013\n", + "[==============================] 100% Epoch 2/10 - loss: 0.4215 - accuracy: 0.8477 - 264.70s - val_accuracy: 0.8504\n", + "[==============================] 100% Epoch 3/10 - loss: 0.3301 - accuracy: 0.8799 - 266.74s - val_accuracy: 0.8624\n", + "[==============================] 100% Epoch 4/10 - loss: 0.2835 - accuracy: 0.8954 - 255.44s - val_accuracy: 0.8677\n", + "[==============================] 100% Epoch 5/10 - loss: 0.2519 - accuracy: 0.9093 - 239.53s - val_accuracy: 0.8710\n", + "[==============================] 100% Epoch 6/10 - loss: 0.2283 - accuracy: 0.9183 - 239.53s - val_accuracy: 0.8728\n", + "[==============================] 100% Epoch 7/10 - loss: 0.2090 - accuracy: 0.9260 - 239.53s - val_accuracy: 0.8802\n", + "[==============================] 100% Epoch 8/10 - loss: 0.1926 - accuracy: 0.9320 - 239.53s - val_accuracy: 0.8884\n", + "[==============================] 100% Epoch 9/10 - loss: 0.1784 - accuracy: 0.9376 - 239.53s - val_accuracy: 0.8902\n", + "[==============================] 100% Epoch 10/10 - loss: 0.1660 - accuracy: 0.9423 - 239.53s - val_accuracy: 0.9000\n" ] } ], @@ -260,8 +257,8 @@ "execution_count": 8, "metadata": { "ExecuteTime": { - "end_time": "2024-11-06T19:27:20.060588Z", - "start_time": "2024-11-06T19:27:03.414934400Z" + "end_time": "2024-11-06T22:17:25.754433600Z", + "start_time": "2024-11-06T22:17:14.398517800Z" } }, "outputs": [ @@ -269,8 +266,8 @@ "name": "stdout", "output_type": "stream", "text": [ - "Loss: 2.566060102842103\n", - "Accuracy: 0.8926\n" + "Loss: 1.4010948021794365\n", + "Accuracy: 0.881\n" ] } ], diff --git a/examples/cnn-classification/cnn_classification_mnist.ipynb b/examples/cnn-classification/cnn_classification_mnist.ipynb new file mode 100644 index 0000000..5f31580 --- /dev/null +++ b/examples/cnn-classification/cnn_classification_mnist.ipynb @@ -0,0 +1,388 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Simple MNIST multiclass classification (using CNN)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "## Imports" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "ExecuteTime": { + "end_time": "2024-09-22T21:32:07.913450400Z", + "start_time": "2024-09-22T21:32:05.718419200Z" + } + }, + "outputs": [], + "source": [ + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "from tensorflow.keras.datasets import mnist\n", + "\n", + "from neuralnetlib.activations import ReLU, Softmax\n", + "from neuralnetlib.layers import Input, Conv2D, MaxPooling2D, Flatten, Dense, Activation, AveragePooling2D\n", + "from neuralnetlib.losses import CategoricalCrossentropy\n", + "from neuralnetlib.model import Model\n", + "from neuralnetlib.optimizers import Adam\n", + "from neuralnetlib.preprocessing import one_hot_encode\n", + "from neuralnetlib.metrics import accuracy_score, f1_score, recall_score" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. Loading a dataset (in this case, MNIST)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "ExecuteTime": { + "end_time": "2024-09-22T21:32:08.056161400Z", + "start_time": "2024-09-22T21:32:07.915452400Z" + } + }, + "outputs": [], + "source": [ + "(x_train, y_train), (x_test, y_test) = mnist.load_data()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. Preprocessing" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "ExecuteTime": { + "end_time": "2024-09-22T21:32:08.147899800Z", + "start_time": "2024-09-22T21:32:08.053650300Z" + } + }, + "outputs": [], + "source": [ + "x_train = x_train.reshape(-1, 1, 28, 28) / 255.0 # Normalization and reshaping of the images for CNN\n", + "x_test = x_test.reshape(-1, 1, 28, 28) / 255.0 # Normalization and reshaping of the images for CNN\n", + "y_train = one_hot_encode(y_train, num_classes=10) # One-hot encoding of the labels\n", + "y_test = one_hot_encode(y_test, num_classes=10) # One-hot encoding of the labels" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. Model definition" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "ExecuteTime": { + "end_time": "2024-09-22T21:32:08.163408200Z", + "start_time": "2024-09-22T21:32:08.147899800Z" + } + }, + "outputs": [ + { + "data": { + "text/plain": "\"\\n Side note: if you set the following:\\n \\n - filters to 8 and 16 (in this order)\\n - padding of the Conv2D layers to 'same'\\n - weights initialization to 'he'\\n \\n you'll get an accuracy of ~0.9975 which is actually pretty cool\\n\"" + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model = Model()\n", + "model.add(Input(input_shape=(1, 28, 28)))\n", + "model.add(Conv2D(filters=4, kernel_size=2, random_state=42))\n", + "model.add(Activation(ReLU()))\n", + "model.add(MaxPooling2D(pool_size=2))\n", + "model.add(Conv2D(filters=8, kernel_size=2, random_state=42))\n", + "model.add(Activation(ReLU()))\n", + "model.add(AveragePooling2D(pool_size=2))\n", + "model.add(Flatten())\n", + "model.add(Dense(64, random_state=42))\n", + "model.add(Activation(ReLU()))\n", + "model.add(Dense(10, random_state=42, activation=\"softmax\")) # Yeah, you can also use strings for the activation functions, or directly the class\n", + "\n", + "\"\"\"\n", + " Side note: if you set the following:\n", + " \n", + " - filters to 8 and 16 (in this order)\n", + " - padding of the Conv2D layers to 'same'\n", + " - weights initialization to 'he'\n", + " \n", + " you'll get an accuracy of ~0.9975 which is actually pretty cool\n", + "\"\"\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4. Model compilation" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "ExecuteTime": { + "end_time": "2024-09-22T21:32:08.209470200Z", + "start_time": "2024-09-22T21:32:08.164406800Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Model\n", + "-------------------------------------------------\n", + "Layer 1: Input(input_shape=(1, 28, 28))\n", + "Layer 2: Conv2D(num_filters=4, kernel_size=(2, 2), stride=(1, 1), padding=valid)\n", + "Layer 3: Activation(ReLU)\n", + "Layer 4: MaxPooling2D(pool_size=(2, 2), stride=(2, 2), padding=valid)\n", + "Layer 5: Conv2D(num_filters=8, kernel_size=(2, 2), stride=(1, 1), padding=valid)\n", + "Layer 6: Activation(ReLU)\n", + "Layer 7: AveragePooling2D(pool_size=(2, 2), stride=(2, 2), padding=valid)\n", + "Layer 8: Flatten\n", + "Layer 9: Dense(units=64)\n", + "Layer 10: Activation(ReLU)\n", + "Layer 11: Dense(units=10)\n", + "Layer 12: Activation(Softmax)\n", + "-------------------------------------------------\n", + "Loss function: CategoricalCrossentropy\n", + "Optimizer: Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n", + "-------------------------------------------------\n" + ] + } + ], + "source": [ + "model.compile(loss_function=\"cce\", optimizer=\"adam\") # You can also use strings for the loss function and the optimizer\n", + "\n", + "model.summary()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 5. Model training" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "ExecuteTime": { + "end_time": "2024-09-22T21:34:58.553485Z", + "start_time": "2024-09-22T21:32:08.179948200Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[==============================] 100% Epoch 1/10 - loss: 0.7200 - accuracy: 0.7635 - 15.83s - val_accuracy: 0.8955\n", + "[==============================] 100% Epoch 2/10 - loss: 0.3133 - accuracy: 0.9008 - 16.39s - val_accuracy: 0.9168\n", + "[==============================] 100% Epoch 3/10 - loss: 0.2532 - accuracy: 0.9204 - 16.10s - val_accuracy: 0.9295\n", + "[==============================] 100% Epoch 4/10 - loss: 0.2167 - accuracy: 0.9334 - 16.04s - val_accuracy: 0.9378\n", + "[==============================] 100% Epoch 5/10 - loss: 0.1920 - accuracy: 0.9416 - 15.89s - val_accuracy: 0.9419\n", + "[==============================] 100% Epoch 6/10 - loss: 0.1732 - accuracy: 0.9475 - 16.53s - val_accuracy: 0.9475\n", + "[==============================] 100% Epoch 7/10 - loss: 0.1574 - accuracy: 0.9524 - 15.98s - val_accuracy: 0.9501\n", + "[==============================] 100% Epoch 8/10 - loss: 0.1439 - accuracy: 0.9568 - 16.32s - val_accuracy: 0.9538\n", + "[==============================] 100% Epoch 9/10 - loss: 0.1328 - accuracy: 0.9597 - 16.38s - val_accuracy: 0.9572\n", + "[==============================] 100% Epoch 10/10 - loss: 0.1232 - accuracy: 0.9629 - 16.56s - val_accuracy: 0.9591\n" + ] + } + ], + "source": [ + "model.fit(x_train, y_train, epochs=10, batch_size=128, metrics=[\n", + " \"accuracy\"], random_state=42, validation_data=(x_test, y_test))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 6. Model evaluation" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "ExecuteTime": { + "end_time": "2024-09-22T21:34:59.411359900Z", + "start_time": "2024-09-22T21:34:58.555484800Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Test loss: 0.1342540788279363\n" + ] + } + ], + "source": [ + "loss = model.evaluate(x_test, y_test)\n", + "print(f'Test loss: {loss}')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 7. Model prediction" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "ExecuteTime": { + "end_time": "2024-09-22T21:35:00.252551Z", + "start_time": "2024-09-22T21:34:59.410359Z" + } + }, + "outputs": [], + "source": [ + "y_pred = model.predict(x_test)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 8. Printing some metrics" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "ExecuteTime": { + "end_time": "2024-09-22T21:35:00.267930600Z", + "start_time": "2024-09-22T21:35:00.254057900Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "accuracy: 0.9591\n", + "f1_score: 0.9591824725913856\n", + "recall_score 0.9582045343696823\n" + ] + } + ], + "source": [ + "print(\"accuracy:\", accuracy_score(y_pred, y_test))\n", + "print(\"f1_score:\", f1_score(y_pred, y_test))\n", + "print(\"recall_score\", recall_score(y_pred, y_test))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 9. Plot the first 10 test images, their predicted labels, and the true labels." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "ExecuteTime": { + "end_time": "2024-09-22T21:35:00.500759500Z", + "start_time": "2024-09-22T21:35:00.269931600Z" + } + }, + "outputs": [ + { + "data": { + "text/plain": "
", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAnIAAAMsCAYAAADQ3U+mAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAACBeUlEQVR4nO3deXRUVfb//U8FyBwwQAJEIIQoDgwik4rIJIMEpB0QI6hoiwZl1K+gAi2j2E4MDRK1W4lNAq2IgNgIiBJAnBGhQbABiUCDzCTMAeo8f/CkflRuhVRCJVWXvF9rZS3Ozrmndt0UOTun7rnlMMYYAQAAwHaC/J0AAAAAiodCDgAAwKYo5AAAAGyKQg4AAMCmKOQAAABsikIOAADApijkAAAAbIpCDgAAwKYo5AAAAGzKdoVcnTp19Mgjj7jamZmZcjgcyszM9FtO+eXPEZembdu2atu2rb/TAIACMTeVPY888ojq1Knj7zSKVsilpaXJ4XC4vkJDQ1WvXj0NGDBAe/fuLakcS8SiRYs0evRof6dhMXr0aLdznP9r9erVlzT+pk2bXD+7I0eOFHucCRMmaP78+ZeUS2nI/5rN/5WRkeHvFAFcIuamkrd582YNGzZMjRs3VlRUlGrUqKGuXbvqxx9/9Mn4R44cUWhoqBwOhzZt2lTscaZPn660tDSf5FSSDh48qNdee02tW7dWTEyMrrjiCt1888364IMPijxW+eIkMHbsWCUkJOjUqVP66quvlJqaqkWLFmnDhg0KDw8vzpDF1rp1a508eVLBwcFFOm7RokV68803A+4/zD333KOrrrrKEh8+fLiOHTum5s2bX9L46enpql69ug4fPqyPPvpIffv2LdY4EyZMUI8ePXTXXXddUj4lrXXr1po5c6YlPmnSJK1bt0633367H7ICUBKYm0rOP/7xD7377ru699579dRTTyk7O1tvv/22br75Zi1evFgdOnS4pPHnzJkjh8Oh6tWrKyMjQ+PHjy/WONOnT1fVqlUDfuXxm2++0YgRI5SUlKSRI0eqfPnymjt3rpKTk/XLL79ozJgxXo9VrEKuS5cuatasmSSpb9++qlKliiZOnKgFCxbogQce8HjM8ePHFRERUZyHu6igoCCFhob6fFx/adSokRo1auQW27lzp3bt2qW+ffsW+ZfChYwxmjVrlnr16qXt27crIyOj2IWcXdStW1d169Z1i508eVJPPfWU2rdvr+rVq/spMwC+xtxUch544AGNHj1akZGRrtif//xnXXfddRo9evQlF3Lp6elKSkpSfHy8Zs2aVexCzi7q16+vLVu2KD4+3hV76qmn1KFDB73yyisaNmyY169Ln1wj1759e0nS9u3bJZ1/3zgyMlLbtm1TUlKSoqKi1Lt3b0mS0+nU5MmTVb9+fYWGhqpatWpKSUnR4cOH3cY0xmj8+PGqWbOmwsPD1a5dO23cuNHy2AVdh/Ddd98pKSlJ0dHRioiIUKNGjTRlyhRXfm+++aYkuS3H5/F1jpK0bds2bdu2zdtT6mb27NkyxrjOYXGtXr1aWVlZSk5OVnJyslauXKldu3ZZ+jmdTk2ZMkUNGzZUaGioYmJidMcdd7iW0B0Oh44fP67333/fde7y/vop6JqBvLeMLzRjxgy1b99esbGxCgkJ0fXXX6/U1FSvnsuOHTu0efPmop2A/9/ChQt19OjRSz6fAAIbc5Pv5qamTZu6FXGSVKVKFd12222X9FaodP73+apVq1xz0/bt2/X111977Juenq4WLVooPDxc0dHRat26tZYuXSrp/DWAGzdu1IoVK1znLu/6ak9zkPT/3pbPyspyxRYsWKCuXbsqLi5OISEhSkxM1Lhx43Tu3LlCn8uePXu0efNmnTlz5qL9EhIS3Io46fzP/K677tLp06f122+/FfpYeYq1Ipdf3ougSpUqrtjZs2fVuXNntWrVSq+//rprWTslJUVpaWl69NFHNWjQIG3fvl3Tpk3T2rVrtXr1alWoUEGS9OKLL2r8+PFKSkpSUlKSfvrpJ3Xq1Em5ubmF5vP555+rW7duqlGjhgYPHqzq1atr06ZN+vTTTzV48GClpKRo9+7d+vzzzz2+7VYSOea9hXfhi8VbGRkZqlWrllq3bl3kY/OPk5iYqObNm6tBgwYKDw/X7NmzNXToULd+jz32mNLS0tSlSxf17dtXZ8+e1apVq/Ttt9+qWbNmmjlzpvr27asWLVroiSeekCQlJiYWOZ/U1FTVr19f3bt3V/ny5bVw4UI99dRTcjqd6t+//0WPffjhh7VixQoZY4r8uBkZGQoLC9M999xT5GMB2AdzU8nOTZL0xx9/qGrVqsU6Ns/s2bMVERGhbt26KSwsTImJicrIyFDLli3d+o0ZM0ajR49Wy5YtNXbsWAUHB+u7777Tl19+qU6dOmny5MkaOHCgIiMjNWLECElStWrVipxPWlqaIiMj9cwzzygyMlJffvmlXnzxReXk5Oi111676LEvvPCC3n//fW3fvr1YGyH++OMPSSraOTVFMGPGDCPJLFu2zOzfv9/s3LnT/Otf/zJVqlQxYWFhZteuXcYYY/r06WMkmeeff97t+FWrVhlJJiMjwy2+ePFit/i+fftMcHCw6dq1q3E6na5+w4cPN5JMnz59XLHly5cbSWb58uXGGGPOnj1rEhISTHx8vDl8+LDb41w4Vv/+/Y2np18SORpjTHx8vImPj7c8XmE2bNhgJJlhw4YV+dgL5ebmmipVqpgRI0a4Yr169TI33HCDW78vv/zSSDKDBg2yjHHh84yIiLA8R2PO/+w9Pc9Ro0ZZzveJEycs/Tp37mzq1q3rFmvTpo1p06aNJVbEl68xxpiDBw+a4OBg07NnzyIfCyAwMTeV/txkjDErV640DofD/OUvfynW8XkaNmxoevfu7WoPHz7cVK1a1Zw5c8YV27JliwkKCjJ33323OXfunNvxFz7P+vXrW+YLYzzPQcb8v9fO9u3bXTFPc1NKSooJDw83p06dcsU8zXd5r7ELx/PWwYMHTWxsrLntttuKdFyx3lrt0KGDYmJiVKtWLSUnJysyMlLz5s3TlVde6dbvySefdGvPmTNHlSpVUseOHXXgwAHXV96S7fLlyyVJy5YtU25urgYOHOi2FDpkyJBCc1u7dq22b9+uIUOG6IorrnD7nqdl1fxKKsesrKxir8ZJuuS3AT/77DMdPHjQ7TqRBx54QOvWrXNbcp87d64cDodGjRplGcOb81cUYWFhrn9nZ2frwIEDatOmjX777TdlZ2df9NjMzMxircZ99NFHys3N5W1V4DLE3FR6c9O+ffvUq1cvJSQkaNiwYUU+Ps/69ev1n//8xzI3HThwQEuWLHHF5s+fL6fTqRdffFFBQe6lS0nOTUePHtWBAwd022236cSJE4Ve0pOWliZjTJFX45xOp3r37q0jR45o6tSpRTq2WG+tvvnmm6pXr57Kly+vatWq6ZprrrGc2PLly6tmzZpusS1btig7O1uxsbEex923b58k6ffff5ckXX311W7fj4mJUXR09EVzy1tKb9CggfdPqJRz9Jb5/zcnNGjQwLIBoqjS09OVkJCgkJAQbd26VdL5t0PDw8OVkZGhCRMmSDp//uLi4lS5cuVLzr8wq1ev1qhRo/TNN9/oxIkTbt/Lzs5WpUqVfP6YGRkZqly5srp06eLzsQH4F3NT6cxNx48fV7du3XT06FF99dVXlmvniiI9PV0RERGqW7eua24KDQ1VnTp1lJGRoa5du0o6f/6CgoJ0/fXX++Q5XMzGjRs1cuRIffnll8rJyXH7XmGLDMU1cOBALV68WP/85z91ww03FOnYYhVyLVq0cO0MKkhISIjlP5DT6VRsbGyB9+6KiYkpTjo+FUg5rl69Wr///rtefvnlSxonJydHCxcu1KlTpyz/uSVp1qxZeumll3zyV01BY+S/SHTbtm26/fbbde2112rixImqVauWgoODtWjRIk2aNElOp/OSc8kv74LaJ554wnUtCYDLB3NTycvNzdU999yj9evXa8mSJcUuTKXzixWzZ8/W8ePHPRZo+/bt07Fjxy6pUMzj7dx05MgRtWnTRhUrVtTYsWOVmJio0NBQ/fTTT3ruuedKZG4aM2aMpk+frr/+9a966KGHiny8TzY7eCsxMVHLli3Trbfe6rZ0mV/eTo4tW7a43Tpi//79lt05nh5DkjZs2HDR7dAF/VBLI0dvZWRkyOFwqFevXpc0zscff6xTp04pNTXVcgHlr7/+qpEjR2r16tVq1aqVEhMTtWTJEh06dOiiq3IFnb/o6GiPNxrO+ysxz8KFC3X69Gl98sknql27tiue9/ZASfDV7l8AlxfmJu84nU49/PDD+uKLL/Thhx+qTZs2lzTeihUrtGvXLo0dO1bXXXed2/cOHz6sJ554QvPnz9eDDz6oxMREOZ1O/fLLL2rcuHGBY15sbpLOF2oXvrWdf27KzMzUwYMH9fHHH7ttMMzb+exrefcMHDJkiJ577rlijVGqH9HVs2dPnTt3TuPGjbN87+zZs64CoEOHDqpQoYKmTp3qdh3U5MmTC32MJk2aKCEhQZMnT7YUFBeOlXd/lvx9SirHot5+5MyZM5ozZ45atWrlVugUR3p6uurWrat+/fqpR48ebl/PPvusIiMjXX/l3XvvvTLGeLwZYf7z56lgS0xMVHZ2ttavX++K7dmzR/PmzXPrV65cOcuY2dnZmjFjhlfPqTi3H5k1a5Zq166tVq1aFek4AJc35ibv5qaBAwfqgw8+0PTp032y6z/vbdWhQ4da5qbHH39cV199tWtuuuuuuxQUFKSxY8daVsW8nZskaeXKla5Y3m20LuRpbsrNzdX06dO9ek7e3n5Ekj744AMNGjRIvXv31sSJE70a35NSXZFr06aNUlJS9PLLL+vnn39Wp06dVKFCBW3ZskVz5szRlClT1KNHD8XExOjZZ5/Vyy+/rG7duikpKUlr167VZ599VuiW3KCgIKWmpurOO+9U48aN9eijj6pGjRravHmzNm7c6Lp4smnTppKkQYMGqXPnzipXrpySk5NLLMeibvFesmSJDh48eNHVo7xt6DNmzCjwLta7d+/W8uXLNWjQII/fDwkJUefOnTVnzhz97W9/U7t27fTQQw/pb3/7m7Zs2aI77rhDTqdTq1atUrt27TRgwADX+Vu2bJkmTpyouLg4JSQk6KabblJycrKee+453X333Ro0aJBOnDih1NRU1atXTz/99JPrcTt16qTg4GDdeeedSklJ0bFjx/T3v/9dsbGx2rNnT6Hnp6i3H9mwYYPWr1+v559/3ucXxgKwN+amwuemyZMna/r06brlllsUHh6u9PR0t+/ffffdriI0MzNT7dq106hRowr8hIrTp09r7ty56tixY4E3Tu7evbumTJmiffv26aqrrtKIESM0btw43XbbbbrnnnsUEhKiH374QXFxca5LkJo2barU1FSNHz9eV111lWJjY9W+fXt16tRJtWvX1mOPPaahQ4eqXLlyeu+99xQTE6MdO3a4HrNly5aKjo5Wnz59NGjQIDkcDs2cOdPrucbb2498//33evjhh1WlShXdfvvtlrfMW7ZsabmZfYGKssU1b5vuDz/8cNF+ffr0MREREQV+/5133jFNmzY1YWFhJioqyjRs2NAMGzbM7N6929Xn3LlzZsyYMaZGjRomLCzMtG3b1mzYsMHEx8dfdIt3nq+++sp07NjRREVFmYiICNOoUSMzdepU1/fPnj1rBg4caGJiYozD4bBsS/ZljsYUfYt3cnKyqVChgjl48GCBfaZOnWokmcWLFxfY54033jCSzBdffFFgn7S0NCPJLFiwwBhz/ty89tpr5tprrzXBwcEmJibGdOnSxaxZs8Z1zObNm03r1q1NWFiYZUv70qVLTYMGDUxwcLC55pprTHp6uset35988olp1KiRCQ0NNXXq1DGvvPKKee+99yxbt31x+5Hnn3/eSDLr16/3+hgA9sDcVPJzU95tNQr6uvB39sKFC40k89ZbbxU43ty5c40k8+677xbYJzMz00gyU6ZMccXee+89c+ONN5qQkBATHR1t2rRpYz7//HPX9//44w/TtWtXExUVZSS5zR1r1qwxN910kwkODja1a9c2EydO9Hj7kdWrV5ubb77ZhIWFmbi4ODNs2DCzZMkSy8/zUm4/kve4BX3NmDHjosdfyGFMMe7hgIDQs2dPZWVl6fvvv/d3KgAASJKGDRum2bNna+vWrQoJCfF3Ope9Un1rFb5jjFFmZqZleRsAAH9avny5/vKXv1DElRJW5AAAAGyqVHetAgAAwHco5AAAAGyKQg4AAMCmKOQAAABsKqB2rTqdTu3evVtRUVHctBUyxujo0aOKi4uzfDYiAJQG5iXkF2hzU0AVcrt371atWrX8nQYCzM6dO1WzZk1/pwGgDGJeQkECZW7yfyl5gaioKH+ngADE6wKAv/D7BwUJlNdGQBVyLFvDE14XAPyF3z8oSKC8NgKqkAMAAID3KOQAAABsikIOAADApijkAAAAbIpCDgAAwKYo5AAAAGyKQg4AAMCmKOQAAABsikIOAADApijkAAAAbIpCDgAAwKbK+zsBAABQMp599llLLCwszBJr1KiRJdajR49Cx09NTbXEvvnmG0ts5syZhY6F4mFFDgAAwKYo5AAAAGyKQg4AAMCmKOQAAABsymGMMf5OIk9OTo4qVark7zQQYLKzs1WxYkV/pwGgDLLbvPTBBx+4tb3ZsOBr27Zts8Q6dOjg1t6xY0dppVNiAmVuYkUOAADApijkAAAAbIpCDgAAwKYo5AAAAGyKT3YAAMCG8m9skIq/uWHz5s2W2JIlS9zadevWtfS58847LbHExERLrHfv3m7tl19+uagpogCsyAEAANgUhRwAAIBNUcgBAADYFNfIAQAQ4Jo1a2aJ3X333YUet3HjRkuse/fultiBAwcssWPHjrm1g4ODLX2+/fZbS+yGG26wxKpUqXLRPFF8rMgBAADYFIUcAACATVHIAQAA2BSFHAAAgE2V+c0Onm6e+Pjjj1tiu3fvtsROnTrl1s7IyLD0+eOPPyyxrVu3FiVFAEAZV6NGDUvM4XBYYvk3N3Tu3NnSZ8+ePcXK4f/+7/8sseuvv96rY//9738X6zFROFbkAAAAbIpCDgAAwKYo5AAAAGyKQg4AAMCmHMYY4+8k8uTk5KhSpUql+pi//fabJVanTh2fjX/06FFLzNOdtgPBrl27LLFXX33Vrf3jjz+WVjou2dnZqlixYqk/LgD4Y17yVnx8vCWWf845dOiQzx5v3bp1lliDBg28OrZDhw5u7eXLl/skJ38KlLmJFTkAAACbopADAACwKQo5AAAAm6KQAwAAsKky/8kOnj7FoVGjRpbYpk2bLLHrrrvOrd2kSRNLn7Zt21piN998syW2c+dOt3atWrUsfbx19uxZt/b+/fstfTzdJdyTHTt2uLX9sdkBAGD1+++/l+j4Q4cOdWvXq1fPq+O+++47r2LwDVbkAAAAbIpCDgAAwKYo5AAAAGyqzF8j98UXX3gV82Tx4sWF9omOjrbEGjdubImtWbPGrd28eXOvcvDk1KlTbu3//ve/lj6ervmrXLmyJbZt27Zi5wEAsIdu3bpZYmPHjnVrBwcHW/rs27fPEnvhhRcssRMnTlxCdrgYVuQAAABsikIOAADApijkAAAAbIpCDgAAwKbK/GaHknb48GFLbPny5YUe5+2GC2/ce++9lpinTRj/+c9/LLEPPvjAZ3kAAAJTs2bNLDFPmxvy8zRHrFixwic5wTusyAEAANgUhRwAAIBNUcgBAADYFIUcAACATbHZ4TIUGxvr1p4+fbqlT1CQtYbPfxdvSTp06JDvEgMA+N38+fMtsU6dOhV63D//+U9LbOTIkb5ICZeAFTkAAACbopADAACwKQo5AAAAm6KQAwAAsCk2O1yG+vfv79aOiYmx9PH0iRO//vprieUEACh9NWrUsMRatmxpiYWEhFhiBw4ccGuPHz/e0ufYsWOXkB18gRU5AAAAm6KQAwAAsCkKOQAAAJviGjmbu/XWWy2x559/vtDj7rrrLktsw4YNvkgJABAg5s6da4lVqVLFq2PT09Pd2tu2bfNJTvAtVuQAAABsikIOAADApijkAAAAbIpCDgAAwKbY7GBzSUlJlliFChXc2l988YWlzzfffFNiOQEA/KN79+5u7SZNmnh1XGZmpiU2atQoX6SEEsaKHAAAgE1RyAEAANgUhRwAAIBNUcgBAADYFJsdbCQsLMwSu+OOOyyx3Nxct7anC1bPnDnju8QAAKXO0yc0DB8+3K2df/NbQX7++WdL7NixY8XKC6WLFTkAAACbopADAACwKQo5AAAAm6KQAwAAsCk2O9jI0KFDLbEbb7zRElu8eLFb++uvvy6xnAAA/vF///d/lljz5s0LPW7+/PmWGJ/iYF+syAEAANgUhRwAAIBNUcgBAADYlMMYY/ydRJ6cnBxVqlTJ32kEhK5du1pinq5rOH78uCWW/ybB3377rc/y8ofs7GxVrFjR32kAKIMCeV46deqUJebNDYBr1qxpie3Zs8cnOZUlgTI3sSIHAABgUxRyAAAANkUhBwAAYFMUcgAAADbFDYEDRJUqVdzaf/vb3yx9ypUrZ4ktWrTIErP75gYAQMmpXLmyJXbmzBmfjZ+dnV3o+J42ZXi7qeSKK65waz/zzDPeJ5fPuXPn3NrPPfecpc+JEyeKPX5pYEUOAADApijkAAAAbIpCDgAAwKYo5AAAAGyKzQ5+4GnTwuLFi93aCQkJlj7btm2zxP7yl7/4LjEAwGVv/fr1JTr+nDlzLLH8nxxRrVo1S5/777+/xHLy1h9//GGJvfTSS37IxHusyAEAANgUhRwAAIBNUcgBAADYFIUcAACATbHZwQ8SExMtsaZNmxZ6nKe7V3vaAAEAuPx5+mSfP/3pT37IxN19993ns7HOnj1riTmdzkKP++STTyyxH3/8sdDjVq1a5V1iAYQVOQAAAJuikAMAALApCjkAAACb4hq5EhYfH2+JLV26tNDjhg4daol9+umnPskJAGB/99xzjyU2bNgwt3aFChWKPX79+vXd2pdyw9733nvPrZ2VleXVcXPnzrXENm/eXOw8LkesyAEAANgUhRwAAIBNUcgBAADYFIUcAACATbHZoYQ98cQTlljt2rULPW7FihWWmDHGJzkBAC5Pr776aomN3atXrxIbG8XHihwAAIBNUcgBAADYFIUcAACATVHIAQAA2BSbHXyoVatWltjAgQP9kAkAACgLWJEDAACwKQo5AAAAm6KQAwAAsCkKOQAAAJtis4MP3XbbbZZYZGSkV8du27bNrX3s2DGf5AQAAC5frMgBAADYFIUcAACATVHIAQAA2BTXyPnBunXrLLHbb7/drX3o0KHSSgcAANgUK3IAAAA2RSEHAABgUxRyAAAANkUhBwAAYFMOY4zxdxJ5cnJyVKlSJX+ngQCTnZ2tihUr+jsNAGUQ8xIKEihzEytyAAAANkUhBwAAYFMUcgAAADYVUIVcAF2uhwDC6wKAv/D7BwUJlNdGQBVyR48e9XcKCEC8LgD4C79/UJBAeW0E1K5Vp9Op3bt3KyoqSg6Hw9/pwM+MMTp69Kji4uIUFBRQf3MAKCOYl5BfoM1NAVXIAQAAwHv+LyUBAABQLLYr5OrUqaNHHnnE1c7MzJTD4VBmZqbfcsovf464NI888ojq1Knj7zQAoEDMTWVP27Zt1bZtW3+nUbRCLi0tTQ6Hw/UVGhqqevXqacCAAdq7d29J5VgiFi1apNGjR/s7DY9eeuklde/eXdWqVZPD4fBpnkeOHFFoaKgcDoc2bdpU7HGmT5+utLQ0n+VVkj744AM9+OCDuvrqq+VwOALiPx4A32FuKh1Op1OvvvqqEhISFBoaqkaNGmn27Nk+GXvTpk2un92RI0eKPc6ECRM0f/58n+RUGt59911dd911Cg0N1dVXX62pU6cWeYxirciNHTtWM2fO1LRp09SyZUulpqbqlltu0YkTJ4oz3CVp3bq1Tp48qdatWxfpuEWLFmnMmDEllNWlGTlypH744QfdeOONPh97zpw5cjgcql69ujIyMoo9jp0KudTUVC1YsEC1atVSdHS0v9MBUEKYm0rWiBEj9Nxzz6ljx46aOnWqateurV69eulf//rXJY+dnp6u6tWrS5I++uijYo9jp0Lu7bffVt++fVW/fn1NnTpVt9xyiwYNGqRXXnmlSOOUL86Dd+nSRc2aNZMk9e3bV1WqVNHEiRO1YMECPfDAAx6POX78uCIiIorzcBcVFBSk0NBQn4/rT9u3b1edOnV04MABxcTE+HTs9PR0JSUlKT4+XrNmzdL48eN9On4gmjlzpq688koFBQWpQYMG/k4HQAlhbio5//vf//TGG2+of//+mjZtmqTz57hNmzYaOnSo7rvvPpUrV65YYxtjNGvWLPXq1Uvbt29XRkaG+vbt68v0A87Jkyc1YsQIde3a1VW4Pv7443I6nRo3bpyeeOIJrxcefHKNXPv27SWdL0Ck89c0RUZGatu2bUpKSlJUVJR69+4t6fzS7OTJk1W/fn2FhoaqWrVqSklJ0eHDh93GNMZo/PjxqlmzpsLDw9WuXTtt3LjR8tgFXYfw3XffKSkpSdHR0YqIiFCjRo00ZcoUV35vvvmmJLktx+fxdY6StG3bNm3bts2r81lS14Pt2LFDq1atUnJyspKTk7V9+3Z9/fXXHvump6erRYsWCg8PV3R0tFq3bq2lS5e68tu4caNWrFjhOnd5b1eOHj3a4xb9vLc+srKyXLEFCxaoa9euiouLU0hIiBITEzVu3DidO3eu0OeyZ88ebd68WWfOnCm0b61atQJiiziA0sXc5Lu5acGCBTpz5oyeeuopV8zhcOjJJ5/Url279M033xQ6RkFWr16trKws19y0cuVK7dq1y9LP6XRqypQpatiwoUJDQxUTE6M77rhDP/74oyuf48eP6/3333edu7xrAgu61trTnDVjxgy1b99esbGxCgkJ0fXXX6/U1FSvnsuOHTu0efPmQvstX75cBw8edDufktS/f38dP35c//73v716PKmYK3L55b0IqlSp4oqdPXtWnTt3VqtWrfT6668rPDxckpSSkqK0tDQ9+uijGjRokLZv365p06Zp7dq1Wr16tSpUqCBJevHFFzV+/HglJSUpKSlJP/30kzp16qTc3NxC8/n888/VrVs31ahRQ4MHD1b16tW1adMmffrppxo8eLBSUlK0e/duff7555o5c6bl+JLI8fbbb5ckt0KmtM2ePVsRERHq1q2bwsLClJiYqIyMDLVs2dKt35gxYzR69Gi1bNlSY8eOVXBwsL777jt9+eWX6tSpkyZPnqyBAwcqMjJSI0aMkCRVq1atyPmkpaUpMjJSzzzzjCIjI/Xll1/qxRdfVE5Ojl577bWLHvvCCy/o/fffd61eAkB+zE2+m5vWrl2riIgIXXfddW7xFi1auL7fqlWrQs+BJxkZGUpMTFTz5s3VoEEDhYeHa/bs2Ro6dKhbv8cee0xpaWnq0qWL+vbtq7Nnz2rVqlX69ttv1axZM82cOVN9+/ZVixYt9MQTT0iSEhMTi5xPamqq6tevr+7du6t8+fJauHChnnrqKTmdTvXv3/+ixz788MNasWJFoZ/6sHbtWklyrSDnadq0qYKCgrR27Vo9+OCD3iVsimDGjBlGklm2bJnZv3+/2blzp/nXv/5lqlSpYsLCwsyuXbuMMcb06dPHSDLPP/+82/GrVq0ykkxGRoZbfPHixW7xffv2meDgYNO1a1fjdDpd/YYPH24kmT59+rhiy5cvN5LM8uXLjTHGnD171iQkJJj4+Hhz+PBht8e5cKz+/fsbT0+/JHI0xpj4+HgTHx9vebyL2b9/v5FkRo0aVaTjCtKwYUPTu3dvV3v48OGmatWq5syZM67Yli1bTFBQkLn77rvNuXPn3I6/8HnWr1/ftGnTxvIYo0aN8nhe814727dvd8VOnDhh6ZeSkmLCw8PNqVOnXLE+ffpYzl3ea+zC8bxRUN4A7Iu5qeTnpq5du5q6deta4sePH/d4Tr2Vm5trqlSpYkaMGOGK9erVy9xwww1u/b788ksjyQwaNMgyxoXPMyIiwvIcjfE8jxjjec7yNDd17tzZ8vzbtGljmU/atGnj8eeXX//+/U25cuU8fi8mJsYkJycXOkaeYr3f1KFDB8XExKhWrVpKTk5WZGSk5s2bpyuvvNKt35NPPunWnjNnjipVqqSOHTvqwIEDrq+mTZsqMjJSy5cvlyQtW7ZMubm5GjhwoNuS55AhQwrNbe3atdq+fbuGDBmiK664wu173tyVu6RyzMrK8utq3Pr16/Wf//zH7TqRBx54QAcOHNCSJUtcsfnz58vpdOrFF1+0vB3p67uah4WFuf599OhRHThwQLfddptOnDhR6NJ0WlqajDGsxgFwYW4qubnp5MmTCgkJscTzrgM8efJkoWN48tlnn+ngwYOWuWndunVubwfPnTtXDodDo0aNsoxRknNTdna2Dhw4oDZt2ui3335Tdnb2RY/NzMz06jNYT548qeDgYI/fCw0NLdL5LNZbq2+++abq1aun8uXLq1q1arrmmmssk3758uVVs2ZNt9iWLVuUnZ2t2NhYj+Pu27dPkvT7779Lkq6++mq378fExBR68V/eUnpxL2ovjRz9IT09XREREapbt662bt0q6fyLpU6dOsrIyFDXrl0lnT9/QUFBuv7660s8p40bN2rkyJH68ssvlZOT4/a9wv6zAEB+zE0lNzeFhYXp9OnTlvipU6dc3y+O9PR0JSQkKCQkxDU3JSYmKjw8XBkZGZowYYKk8+cvLi5OlStXLuYz8N7q1as1atQoffPNN5Ydz9nZ2apUqdIlP0ZYWFiBb8efOnWqSOezWIVcixYtLO/r5hcSEmL5D+R0OhUbG1vgbS98vUOzOOyQY1EZYzR79mwdP37cY4G2b98+HTt2TJGRkZf8WAX9ZZR/A8ORI0fUpk0bVaxYUWPHjlViYqJCQ0P1008/6bnnnpPT6bzkXACULcxNJadGjRpavny5jDFuv+f37NkjSYqLiyvymDk5OVq4cKFOnTplKTwladasWXrppZd8suLm7dy0bds23X777br22ms1ceJE1apVS8HBwVq0aJEmTZrks7mpRo0aOnfunPbt2+dWnOfm5urgwYNFOp8+2ezgrcTERC1btky33nrrRavN+Ph4Sef/Aqlbt64rvn//fsvuHE+PIUkbNmxQhw4dCuxX0A+1NHIsbStWrNCuXbs0duxYy4Wqhw8f1hNPPKH58+frwQcfVGJiopxOp3755Rc1bty4wDELOn95f/EdOXLE7e2DvL8S82RmZurgwYP6+OOP3e6zlLe7DABKC3NT4Ro3bqx//OMf2rRpk9uCwHfffef6flF9/PHHOnXqlFJTU1W1alW37/36668aOXKkVq9erVatWikxMVFLlizRoUOHLroqd7G5ydONhvPPTQsXLtTp06f1ySefqHbt2q543lvXvpJ3vn788UclJSW54j/++KOcTmeRzmep3pOhZ8+eOnfunMaNG2f53tmzZ10nuUOHDqpQoYKmTp3q9l7z5MmTC32MJk2aKCEhQZMnT7b80C4cK+++Qfn7lFSORbn9iK/lva06dOhQ9ejRw+3r8ccf19VXX+36K++uu+5SUFCQxo4da/nLI//58/SfIu+X1cqVK12xvO3gF8q739CFY+bm5mr69OlePaei3H4EAC6GuanwuelPf/qTKlSo4PY72hijt956S1deeaXl7gfeSE9PV926ddWvXz/L3PTss88qMjLSNTfde++9MsZ4vFmyt3NTdna21q9f74rt2bNH8+bNc+vnaW7Kzs7WjBkzvHpO3t5+pH379qpcubLltiapqakKDw93Xe7kjVJdkWvTpo1SUlL08ssv6+eff1anTp1UoUIFbdmyRXPmzNGUKVPUo0cPxcTE6Nlnn9XLL7+sbt26KSkpSWvXrtVnn31mqdrzCwoKUmpqqu688041btxYjz76qGrUqKHNmzdr48aNrgv7mzZtKkkaNGiQOnfurHLlyik5ObnEcizK7Udmzpyp33//3fXe/MqVK1037n3ooYdcf3FlZmaqXbt2GjVqVIEf6XL69GnNnTtXHTt2LPDmlN27d9eUKVO0b98+XXXVVRoxYoTGjRun2267Tffcc49CQkL0ww8/KC4uTi+//LLr/KWmpmr8+PG66qqrFBsbq/bt26tTp06qXbu2HnvsMQ0dOlTlypXTe++9p5iYGO3YscP1mC1btlR0dLT69OmjQYMGyeFwaObMmV5dJCoV7fYjK1eudBWW+/fv1/Hjx13ns3Xr1kW+8zqAywtzU+FzU82aNTVkyBC99tprOnPmjJo3b6758+dr1apVysjIcLsZcN4tUmbMmFHgZ7vu3r1by5cv16BBgzx+PyQkRJ07d9acOXP0t7/9Te3atdNDDz2kv/3tb9qyZYvuuOMOOZ1OrVq1Su3atdOAAQNc52/ZsmWaOHGi4uLilJCQoJtuuknJycl67rnndPfdd2vQoEE6ceKEUlNTVa9ePf3000+ux+3UqZOCg4N15513KiUlRceOHdPf//53xcbGut5Gvhhvbz8SFhamcePGqX///rrvvvvUuXNnrVq1Sunp6XrppZeKdi2g1/tbzf/b4v3DDz9ctF+fPn1MREREgd9/5513TNOmTU1YWJiJiooyDRs2NMOGDTO7d+929Tl37pwZM2aMqVGjhgkLCzNt27Y1GzZsMPHx8Rfd4p3nq6++Mh07djRRUVEmIiLCNGrUyEydOtX1/bNnz5qBAweamJgY43A4LNuFfZmjMUW7/Uje9mVPXxc+z4ULFxpJ5q233ipwrLlz5xpJ5t133y2wT2ZmppFkpkyZ4oq999575sYbbzQhISEmOjratGnTxnz++eeu7//xxx+ma9euJioqykhy24K9Zs0ac9NNN5ng4GBTu3ZtM3HiRI+3H1m9erW5+eabTVhYmImLizPDhg0zS5YssTzPS739SN72ck9fvrq1CwD/YW4qnbnp3LlzZsKECSY+Pt4EBweb+vXrm/T0dEu/qVOnGklm8eLFBY71xhtvGEnmiy++KLBPWlqakWQWLFhgjDl/bl577TVz7bXXmuDgYBMTE2O6dOli1qxZ4zpm8+bNpnXr1iYsLMxyu5WlS5eaBg0amODgYHPNNdeY9PR0j7cf+eSTT0yjRo1MaGioqVOnjnnllVfMe++9Z5lzLuX2I3neeecdc80115jg4GCTmJhoJk2a5HY7FW84jPFyCQQBZ9iwYZo9e7a2bt3qcVs4AAClrWfPnsrKytL333/v71TKhFJ9axW+tXz5cv3lL3+hiAMABARjjDIzM5Wenu7vVMoMVuQAAABsik8SBwAAsCkKOQAAAJuikAMAALApCjkAAACbCqhdq06nU7t371ZUVJRPPlsN9maM0dGjRxUXF2f5bEQAKA3MS8gv0OamgCrkdu/erVq1avk7DQSYnTt3qmbNmv5OA0AZxLyEggTK3OT/UvICUVFR/k4BAYjXBQB/4fcPChIor42AKuRYtoYnvC4A+Au/f1CQQHltBFQhBwAAAO9RyAEAANgUhRwAAIBNUcgBAADYFIUcAACATVHIAQAA2BSFHAAAgE1RyAEAANgUhRwAAIBNUcgBAADYFIUcAACATVHIAQAA2BSFHAAAgE1RyAEAANgUhRwAAIBNUcgBAADYVHl/J4BLU69ePUts8+bNbu3Bgwdb+kydOrXEcgIA2EtERIQl9tprr7m1U1JSLH3WrFljid13332W2O+//34J2eFiWJEDAACwKQo5AAAAm6KQAwAAsCkKOQAAAJtis4PN3XjjjZaY0+l0a+/atau00gEA2FCNGjUssccff9ytnX9ukaSmTZtaYt26dbPE3nzzzUvIDhfDihwAAIBNUcgBAADYFIUcAACATXGNnM01btzYEjt+/Lhbe968eaWUDQAg0MXExFhi77//vh8ygS+wIgcAAGBTFHIAAAA2RSEHAABgUxRyAAAANsVmBxtp0KCBJTZgwABLbObMmaWRDgAgwA0aNMgSu+uuuyyxFi1a+OwxW7dubYkFBbmvG61bt87SZ+XKlT7LoSxhRQ4AAMCmKOQAAABsikIOAADApijkAAAAbIrNDjZy7bXXWmIRERGW2AcffFAa6QAAAtykSZMsMafTWaKPec899xQa+/333y197r//fktszZo1vkvsMsWKHAAAgE1RyAEAANgUhRwAAIBNUcgBAADYFJsdbGTYsGGWmKcLRn/88cfSSAcAEGAWLVrk1s7/iQq+dvDgQUvs2LFjllh8fLxbOyEhwdLn+++/t8TKlSt3CdmVDazIAQAA2BSFHAAAgE1RyAEAANgU18gFqDp16lhizZo1s8T++9//WmLHjx8viZQAAAGkTZs2ltg111zj1vZ089/i3hD4rbfessSWLl1qiWVnZ1ti7du3d2uPGDHCq8d88sknLbHU1FSvji0rWJEDAACwKQo5AAAAm6KQAwAAsCkKOQAAAJtis0OA8nQRqyf79+8v4UwAAP7maQPcv/71L0usatWqxRrf083l586d69YeM2aMpc+JEyeKNf4TTzxh6RMTE2OJvfrqq5ZYaGioW3vatGmWPmfOnPEqr8sBK3IAAAA2RSEHAABgUxRyAAAANkUhBwAAYFNsdghQDRs29KqfpwtBAQCXl/LlrdN1cTc2rFixwhJLTk62xA4cOFCs8T3Jv9nh5ZdftvSZOHGiJRYeHm6J5Z/3PvnkE0ufbdu2FTVF22JFDgAAwKYo5AAAAGyKQg4AAMCmKOQAAABsis0OAeLmm292az/66KOWPmvXrrXEPv/88xLLCQBgbz/++KMl9uc//9kS8+XGBm942qDQu3dvS6x58+alkY6tsSIHAABgUxRyAAAANkUhBwAAYFNcIxcgOnTo4NauXLmypc/ixYstsVOnTpVYTgCAwBUUVPhazE033VQKmRSdw+GwxDw9H2+e4+jRoy2xhx56qFh52RErcgAAADZFIQcAAGBTFHIAAAA2RSEHAABgU2x2CBA33HCDW9sYY+nz0UcflVY6AIAA0q9fP0vM6XT6IRPfuPPOOy2xG2+80RLz9BzzxzxtdihLWJEDAACwKQo5AAAAm6KQAwAAsCkKOQAAAJtis4MfVK9e3RK77bbb3Nq//vqrpc+8efNKLCcAQODytDkgUMXExFhi119/vVt7+PDhxR5///79bu0zZ84Ue6zLAStyAAAANkUhBwAAYFMUcgAAADZFIQcAAGBTbHbwg0ceecQSi42NdWt/9tlnpZQNAAC+M2LECEusf//+xRorKyvLEuvTp49be8eOHcUa+3LBihwAAIBNUcgBAADYFIUcAACATXGNnB/Ex8cX2ufw4cOlkAkAAMW3aNEiS+yaa67x2fi//PKLJfbVV1/5bPzLAStyAAAANkUhBwAAYFMUcgAAADZFIQcAAGBTbHbwg27duhXaZ+HChaWQCQDADhwOhyUWFFT4WkyXLl28Gv+dd96xxOLi4go9zlMOTqfTq8f0xp133umzsS5XrMgBAADYFIUcAACATVHIAQAA2BSFHAAAgE2x2aGEtWrVyhKrXr26HzIBANhVamqqJfbqq68Wetynn35qiXm7GaG4mxaKe9xbb71VrOPKOlbkAAAAbIpCDgAAwKYo5AAAAGyKQg4AAMCm2OxQwu6++25LrFy5cpbY2rVr3dorV64ssZwAAPby8ccfW2JDhw61xGJiYkojnYvav3+/JbZp0ya39hNPPGHps2fPnhLL6XLGihwAAIBNUcgBAADYFIUcAACATXGNnA+Fh4dbYklJSV4d+9FHH7m1z50755OcAAD29/vvv1tiycnJlthdd93l1h48eHBJpVSgl156yRJ78803Sz2PsoIVOQAAAJuikAMAALApCjkAAACbopADAACwKTY7+NCZM2csscOHD1tin3zyiSU2ZcqUEskJAHB58nTj+PyxpUuXWvp4uhnvnXfeaYnln6veeecdSx+Hw2GJ/fLLL9ZkUWJYkQMAALApCjkAAACbopADAACwKQo5AAAAm3IYY4y/k8iTk5OjSpUq+TsNBJjs7GxVrFjR32kAKIOYl1CQQJmbWJEDAACwKQo5AAAAm6KQAwAAsCkKOQAAAJuikAMAALApCjkAAACbopADAACwKQo5AAAAm6KQAwAAsCkKOQAAAJuikAMAALApCjkAAACbopADAACwKQo5AAAAm6KQAwAAsCkKOQAAAJuikAMAALCpgCrkjDH+TgEBiNcFAH/h9w8KEiivjYAq5I4ePervFBCAeF0A8Bd+/6AggfLacJhAKSklOZ1O7d69W1FRUXI4HP5OB35mjNHRo0cVFxenoKCA+psDQBnBvIT8Am1uCqhCDgAAAN7zfykJAACAYqGQAwAAsCnbFXJ16tTRI4884mpnZmbK4XAoMzPTbznllz9HXJq2bduqbdu2/k4DAArE3FT2PPLII6pTp46/0yhaIZeWliaHw+H6Cg0NVb169TRgwADt3bu3pHIsEYsWLdLo0aP9nUahMjIy5HA4FBkZ6ZPxNm3a5PrZHTlypNjjTJgwQfPnz/dJTqXpq6++cr1+Dxw44O90APgAc1PpeOmll9S9e3dVq1ZNDofDp3keOXJEoaGhcjgc2rRpU7HHmT59utLS0nyWV0n64IMP9OCDD+rqq6+Ww+Eo9oJFsVbkxo4dq5kzZ2ratGlq2bKlUlNTdcstt+jEiRPFSuJStG7dWidPnlTr1q2LdNyiRYs0ZsyYEsrKN44dO6Zhw4YpIiLCZ2Omp6erevXqkqSPPvqo2OPYsZBzOp0aOHCgT88ngMDB3FSyRo4cqR9++EE33nijz8eeM2eOHA6HqlevroyMjGKPY6dCLjU1VQsWLFCtWrUUHR1d7HGKVch16dJFDz74oPr27au0tDQNGTJE27dv14IFCwo85vjx48VO8mKCgoIUGhoaEFuAfW38+PGKiorSXXfd5ZPxjDGaNWuWevXqpaSkpEv6z2JH77zzjnbu3Km+ffv6OxUAJYC5qWRt375de/bsUXp6us/HTk9PV1JSkh544AHNmjXL5+MHopkzZyo7O1tffvml4uLiij2OT15h7du3l3T+hyydf984MjJS27ZtU1JSkqKiotS7d29J51dFJk+erPr16ys0NFTVqlVTSkqKDh8+7DamMUbjx49XzZo1FR4ernbt2mnjxo2Wxy7oOoTvvvtOSUlJio6OVkREhBo1aqQpU6a48nvzzTclyW05Po+vc5Skbdu2adu2bd6eUm3ZskWTJk3SxIkTVb58ea+Pu5jVq1crKytLycnJSk5O1sqVK7Vr1y5LP6fTqSlTpqhhw4YKDQ1VTEyM7rjjDv3444+Szp+z48eP6/3333edu7zrLgq6ZmD06NGWezDNmDFD7du3V2xsrEJCQnT99dcrNTXVq+eyY8cObd682evnfujQIY0cOVJjx47VFVdc4fVxAOyLucm3c1NJXQ+2Y8cOrVq1yjU3bd++XV9//bXHvunp6WrRooXCw8MVHR2t1q1ba+nSpa78Nm7cqBUrVrjOXd7blZ7mIOn/vS2flZXlii1YsEBdu3ZVXFycQkJClJiYqHHjxuncuXOFPpc9e/Zo8+bNOnPmTKF9a9Wq5ZNC3ycVQt6LoEqVKq7Y2bNn1blzZ7Vq1Uqvv/66wsPDJUkpKSlKS0vTo48+qkGDBmn79u2aNm2a1q5dq9WrV6tChQqSpBdffFHjx49XUlKSkpKS9NNPP6lTp07Kzc0tNJ/PP/9c3bp1U40aNTR48GBVr15dmzZt0qeffqrBgwcrJSVFu3fv1ueff66ZM2daji+JHG+//XZJcnuxXMyQIUPUrl07JSUl6cMPP/TqmMJkZGQoMTFRzZs3V4MGDRQeHq7Zs2dr6NChbv0ee+wxpaWlqUuXLurbt6/Onj2rVatW6dtvv1WzZs00c+ZM9e3bVy1atNATTzwhSUpMTCxyPqmpqapfv766d++u8uXLa+HChXrqqafkdDrVv3//ix778MMPa8WKFV5/RMpf/vIXVa9eXSkpKRo3blyRcwVgP8xNvp+bSsLs2bMVERGhbt26KSwsTImJicrIyFDLli3d+o0ZM0ajR49Wy5YtNXbsWAUHB+u7777Tl19+qU6dOmny5MkaOHCgIiMjNWLECElStWrVipxPWlqaIiMj9cwzzygyMlJffvmlXnzxReXk5Oi111676LEvvPCC3n//fW3fvr30NkKYIpgxY4aRZJYtW2b2799vdu7caf71r3+ZKlWqmLCwMLNr1y5jjDF9+vQxkszzzz/vdvyqVauMJJORkeEWX7x4sVt83759Jjg42HTt2tU4nU5Xv+HDhxtJpk+fPq7Y8uXLjSSzfPlyY4wxZ8+eNQkJCSY+Pt4cPnzY7XEuHKt///7G09MviRyNMSY+Pt7Ex8dbHs+TTz/91JQvX95s3LjRGHP+fEZERHh1bEFyc3NNlSpVzIgRI1yxXr16mRtuuMGt35dffmkkmUGDBlnGuPB5RkREWJ5jXq6enueoUaMs5/vEiROWfp07dzZ169Z1i7Vp08a0adPGEvP25btu3TpTrlw5s2TJErdc9u/f79XxAAIbc1PpzE159u/fbySZUaNGFem4gjRs2ND07t3b1R4+fLipWrWqOXPmjCu2ZcsWExQUZO6++25z7tw5t+MvfJ7169e3zBfGeJ6DjPl/r53t27e7Yp7mppSUFBMeHm5OnTrlinma7/JeYxeO542C8vZGsdb0OnTooJiYGNWqVUvJycmKjIzUvHnzdOWVV7r1e/LJJ93ac+bMUaVKldSxY0cdOHDA9dW0aVNFRkZq+fLlkqRly5YpNzdXAwcOdFsKHTJkSKG5rV27Vtu3b9eQIUMsb6F58/EqJZVjVlaWV3/x5Obm6umnn1a/fv10/fXXF9rfW5999pkOHjyoBx54wBV74IEHtG7dOrcl97lz58rhcGjUqFGWMXz98TRhYWGuf2dnZ+vAgQNq06aNfvvtN2VnZ1/02MzMTK9X4wYNGqQuXbqoU6dOl5QvgMDG3FRyc1NJWb9+vf7zn/9Y5qYDBw5oyZIlrtj8+fPldDr14osvWt6OLMm56ejRozpw4IBuu+02nThxotBLetLS0mSMKdXbkhTrrdU333xT9erVU/ny5VWtWjVdc801lhNbvnx51axZ0y22ZcsWZWdnKzY21uO4+/btkyT9/vvvkqSrr77a7fsxMTGF7uzIW0pv0KCB90+olHO8mEmTJunAgQM+37WUnp6uhIQEhYSEaOvWrZLOvx0aHh6ujIwMTZgwQdL58xcXF6fKlSv79PE9Wb16tUaNGqVvvvnGsqssOztblSpVuuTH+OCDD/T1119rw4YNlzwWgMDG3FRyc1NJSU9PV0REhOrWreuam0JDQ1WnTh1lZGSoa9euks6fv6CgIJ8ucBRk48aNGjlypL788kvl5OS4fa+wRQZ/KFYh16JFCzVr1uyifUJCQiz/gZxOp2JjYwvcLRkTE1OcdHzKnzlmZ2dr/Pjxeuqpp5STk+N6AR07dkzGGGVlZSk8PLzA/8gFycnJ0cKFC3Xq1CnLf25JmjVrll566SWf/FVT0Bj5LxLdtm2bbr/9dl177bWaOHGiatWqpeDgYC1atEiTJk2S0+m85FwkaejQobrvvvsUHBzs+qsz7/55O3fuVG5u7iXtFgIQOJib7MUYo9mzZ+v48eMeC7R9+/bp2LFjPrmPqrdz05EjR9SmTRtVrFhRY8eOVWJiokJDQ/XTTz/pueee89nc5Eu+2Q7ppcTERC1btky33nqr29JlfvHx8ZLO/wVSt25dV3z//v2W3TmeHkOSNmzYoA4dOhTYr6AfamnkWJDDhw/r2LFjevXVV/Xqq69avp+QkKA//elPRb5/28cff6xTp04pNTVVVatWdfver7/+qpEjR2r16tVq1aqVEhMTtWTJEh06dOiiq3IFnb/o6GiPNxrO+ysxz8KFC3X69Gl98sknql27tiue9/aAr+zcuVOzZs3yuJ29SZMmuuGGG/Tzzz/79DEB2Atzk3+sWLFCu3bt0tixY3Xddde5fe/w4cN64oknNH/+fD344INKTEyU0+nUL7/8osaNGxc45sXmJul8oXbhW9v556bMzEwdPHhQH3/8sds9APN2PgeiUr3BTc+ePXXu3DmPuwbPnj3rKgA6dOigChUqaOrUqW7XQU2ePLnQx2jSpIkSEhI0efJkS0Fx4Vh5N4XN36ekcvRmi3dsbKzmzZtn+WrXrp1CQ0M1b948vfDCCxcdw5P09HTVrVtX/fr1U48ePdy+nn32WUVGRrr+yrv33ntljPH41m7+8+epYEtMTFR2drbWr1/viu3Zs0fz5s1z61euXDnLmNnZ2ZoxY4ZXz8nb2494Op/333+/JOmf//ynJk2a5NXjAbh8MTd5f2ssX8p7W3Xo0KGWuenxxx/X1Vdf7Zqb7rrrLgUFBWns2LGWVTFv5yZJWrlypSuWdxutC3mam3JzczV9+nSvnlNRbj/iK6W6ItemTRulpKTo5Zdf1s8//6xOnTqpQoUK2rJli+bMmaMpU6aoR48eiomJ0bPPPquXX35Z3bp1U1JSktauXavPPvvMsqKUX1BQkFJTU3XnnXeqcePGevTRR1WjRg1t3rxZGzdudF082bRpU0nnL4Tv3LmzypUrp+Tk5BLL0Zst3uHh4R5v/jt//nx9//33lu/lbUOfMWNGgZ+ft3v3bi1fvlyDBg3y+P2QkBB17txZc+bM0d/+9je1a9dODz30kP72t79py5YtuuOOO+R0OrVq1Sq1a9dOAwYMcJ2/ZcuWaeLEiYqLi1NCQoJuuukmJScn67nnntPdd9+tQYMG6cSJE0pNTVW9evX0008/uR63U6dOCg4O1p133qmUlBQdO3ZMf//73xUbG6s9e/YUeI7yeHv7EU/nM28FrkuXLoW+ngBc/pibvLv9yMyZM/X777+7rmleuXKlxo8fL0l66KGHXKuBmZmZateunUaNGlXgx3idPn1ac+fOVceOHRUaGuqxT/fu3TVlyhTt27dPV111lUaMGKFx48bptttu0z333KOQkBD98MMPiouL08svv+w6f6mpqRo/fryuuuoqxcbGqn379urUqZNq166txx57TEOHDlW5cuX03nvvKSYmRjt27HA9ZsuWLRUdHa0+ffpo0KBBcjgcmjlzpteb64py+5GVK1e6Csv9+/fr+PHjrvPZunVr7z8VpChbXPO26f7www8X7VfY7TLeeecd07RpUxMWFmaioqJMw4YNzbBhw8zu3btdfc6dO2fGjBljatSoYcLCwkzbtm3Nhg0bTHx8/EW3eOf56quvTMeOHU1UVJSJiIgwjRo1MlOnTnV9/+zZs2bgwIEmJibGOBwOy7ZkX+ZoTPG2eOcp6HxOnTrVSDKLFy8u8Ng33njDSDJffPFFgX3S0tKMJLNgwQJjzPlz89prr5lrr73WBAcHm5iYGNOlSxezZs0a1zGbN282rVu3NmFhYZYt7UuXLjUNGjQwwcHB5pprrjHp6eket35/8sknplGjRiY0NNTUqVPHvPLKK+a9996zbN2+1NuP5MftR4DLC3NT6cxNeb93PX1d+DwXLlxoJJm33nqrwLHmzp1rJJl33323wD6ZmZlGkpkyZYor9t5775kbb7zRhISEmOjoaNOmTRvz+eefu77/xx9/mK5du5qoqCgjyW3uWLNmjbnppptMcHCwqV27tpk4caLH24+sXr3a3HzzzSYsLMzExcWZYcOGmSVLllie56XefiRvLvL0VZRbuziM8bLMRMDp2bOnsrKy9P333/s7FQAAJEnDhg3T7NmztXXrVoWEhPg7ncteqb61Ct8xxigzM7NEPvMOAIDiWr58uf7yl79QxJUSVuQAAABsqlR3rQIAAMB3KOQAAABsikIOAADApijkAAAAbCqgdq06nU7t3r1bUVFRPvncT9ibMUZHjx5VXFyc5bMRAaA0MC8hv0CbmwKqkNu9e7dq1arl7zQQYHbu3KmaNWv6Ow0AZRDzEgoSKHOT/0vJC0RFRfk7BQQgXhcA/IXfPyhIoLw2AqqQY9kanvC6AOAv/P5BQQLltRFQhRwAAAC8RyEHAABgUxRyAAAANkUhBwAAYFMUcgAAADZFIQcAAGBTFHIAAAA2RSEHAABgUxRyAAAANkUhBwAAYFMUcgAAADZFIQcAAGBTFHIAAAA2RSEHAABgUxRyAAAANkUhBwAAYFMUcgAAADZFIQcAAGBTFHIAAAA2RSEHAABgU+X9ncDlpEmTJpbYxx9/bInVqVOnFLK5uE6dOllimzZtssR27txZGukAAC4Td955pyX2ySefuLUHDBhg6fPWW29ZYufOnfNdYpcpVuQAAABsikIOAADApijkAAAAbIpr5Hyoc+fOllhISIgfMimcp2sY/vznP1tiycnJpZEOAMCGqlSpYolNnz690OOmTZtmib333nuW2MmTJ4uXWBnCihwAAIBNUcgBAADYFIUcAACATVHIAQAA2BSbHS5B+fLupy8pKclPmRTdmjVrLLFnnnnGEouIiHBrHz9+vMRyAgDYS+vWrS2xmjVrFnrc7NmzLbFTp075JKeyhhU5AAAAm6KQAwAAsCkKOQAAAJuikAMAALApNjtcgnbt2rm1b7nlFkufV199tbTSKZLo6GhL7Prrr7fEwsPD3dpsdgCAssnTJxWNGDGiWGPNnDnTEjPGFGusso4VOQAAAJuikAMAALApCjkAAACbopADAACwKTY7eKlBgwaWWP47U2/bts3SZ8KECSWW06X405/+5O8UAAA20rBhQ0usadOmXh179uxZt/Znn33mk5zAihwAAIBtUcgBAADYFIUcAACATXGNnJdGjhxpiUVERLi177jjDkufY8eOlVhO3qpcubIl1qZNG0vM6XSWRjoAABu69957i33s0qVLfZgJLsSKHAAAgE1RyAEAANgUhRwAAIBNUcgBAADYFJsdPOjRo4cllpSUZIlt3brVrf3jjz+WWE6XYsSIEZaYp40NmZmZltiRI0dKICMAgN20bt3aq365ubmWmKd5CL7BihwAAIBNUcgBAADYFIUcAACATVHIAQAA2BSbHTy47777LLHw8HBLbPr06aWRTpHVqVPHrd27d29Ln3Pnzlli48ePt8TOnDnjs7wAAPbRsmXLi7YLcvz4cUvs559/9kVK8IAVOQAAAJuikAMAALApCjkAAACbopADAACwqTK/2aFSpUqW2M033+zVsampqb5OxyeeeOIJt3bVqlUtfTZt2mSJLV++vMRyAgDYS/PmzYt1XKDOjZcrVuQAAABsikIOAADApijkAAAAbKrMXyMXEhJiiV155ZWW2OzZs0sjHZ9ITEwstM+GDRtKIRMAgF01a9as0D5HjhyxxLhGrnSxIgcAAGBTFHIAAAA2RSEHAABgUxRyAAAANlXmNzscPXrUEvv5558tsUaNGllilStXdmsfOnTIZ3l5KzY21hLr0aNHocd99dVXJZEOAMCGWrVqZYn16tWr0OOys7MtsV27dvkkJ3iHFTkAAACbopADAACwKQo5AAAAm6KQAwAAsKkyv9nh5MmTlti2bdsssXvvvdcS+/e//+3Wnjhxos/yatCggSVWt25dS6xOnTqWmDGm0PGdTmex8gIAXH6qVKliiQUFFb7W8/nnn5dEOigCVuQAAABsikIOAADApijkAAAAbIpCDgAAwKbK/GYHT0aNGmWJORwOS6xr165u7dmzZ/sshwMHDlhinjYxVK1atVjjp6WlFes4AMDlx5tPBDpy5Igl9vbbb5dANigKVuQAAABsikIOAADApijkAAAAbMphvLl7bCnJyclRpUqV/J2G1xo3buzWvuqqq3w29kcffeRVv/fff98S6927d6HHlS9vn8sjs7OzVbFiRX+nAaAMstu85I2aNWtaYr///rsllv+GwBs2bLD0adiwoe8Ss5lAmZtYkQMAALApCjkAAACbopADAACwKQo5AAAAm7LPFe8B6Oeff75ouzT89ttvxTquQYMGlpinC1kBAJeXli1bWmL5NzZ4Mn/+/BLIBpeKFTkAAACbopADAACwKQo5AAAAm6KQAwAAsCk2O9icw+HwKpYfGxsAoGyqUqWKV/0OHDjg1p4yZUpJpINLxIocAACATVHIAQAA2BSFHAAAgE1RyAEAANgUmx1szhjjVQwAAEnq3LmzV/127Njh1s7Ozi6JdHCJWJEDAACwKQo5AAAAm6KQAwAAsCmukbO50NDQQvucPHmyFDIBAASaChUqWGKJiYleHXvq1Cm39pkzZ3ySE3yLFTkAAACbopADAACwKQo5AAAAm6KQAwAAsCk2O9jco48+aokdOXLErT1u3LhSygYAEEicTqcl9uOPP1piDRo0sMS2bt1aIjnBt1iRAwAAsCkKOQAAAJuikAMAALApCjkAAACbYrODzf3www+W2MSJE93ay5cvL610AAAB5Ny5c5bYiBEjLDFjjCW2Zs2aEskJvsWKHAAAgE1RyAEAANgUhRwAAIBNUcgBAADYlMN4usLRT3JyclSpUiV/p4EAk52drYoVK/o7DQBlEPMSChIocxMrcgAAADZFIQcAAGBTFHIAAAA2RSEHAABgUxRyAAAANkUhBwAAYFMUcgAAADZFIQcAAGBTFHIAAAA2RSEHAABgUxRyAAAANkUhBwAAYFMBVcgZY/ydAgIQrwsA/sLvHxQkUF4bAVXIHT161N8pIADxugDgL/z+QUEC5bXhMIFSUkpyOp3avXu3oqKi5HA4/J0O/MwYo6NHjyouLk5BQQH1NweAMoJ5CfkF2twUUIUcAAAAvOf/UhIAAADFQiEHAABgU7Yr5OrUqaNHHnnE1c7MzJTD4VBmZqbfcsovf464NG3btlXbtm39nQYAFIi5qewJlLmpSIVcWlqaHA6H6ys0NFT16tXTgAEDtHfv3pLKsUQsWrRIo0eP9ncahcrIyJDD4VBkZKRPxtu0aZPrZ3fkyJFijzNhwgTNnz/fJzmVpq+++sr1+j1w4IC/0wHgA8xNpWPr1q3q0aOHoqOjFR4erlatWmn58uU+Gbsszk179+7Vo48+qtjYWIWFhalJkyaaM2dOkccp1orc2LFjNXPmTE2bNk0tW7ZUamqqbrnlFp04caI4w12S1q1b6+TJk2rdunWRjlu0aJHGjBlTQln5xrFjxzRs2DBFRET4bMz09HRVr15dkvTRRx8Vexw7/WfJ43Q6NXDgQJ+eTwCBg7mp5OzcuVO33HKLvvrqKw0dOlQvv/yyjh07pk6dOmnlypWXPH5Zm5tycnLUqlUrzZ07VykpKXr99dcVFRWlnj17atasWUUaq1iFXJcuXfTggw+qb9++SktL05AhQ7R9+3YtWLCgwGOOHz9enIcqVFBQkEJDQwNiC7CvjR8/XlFRUbrrrrt8Mp4xRrNmzVKvXr2UlJSkjIwMn4xrF++884527typvn37+jsVACWAuank/PWvf9WRI0e0YsUKDR8+XIMHD9bXX3+tGjVq6Omnn76kscvi3PT2229r69atmj9/vsaNG6f+/ftr+fLlat68uf7v//5Pubm5Xo/lk1dY+/btJUnbt2+XJD3yyCOKjIzUtm3blJSUpKioKPXu3VvS+VWRyZMnq379+goNDVW1atWUkpKiw4cPu41pjNH48eNVs2ZNhYeHq127dtq4caPlsQu6DuG7775TUlKSoqOjFRERoUaNGmnKlCmu/N58801JcluOz+PrHCVp27Zt2rZtm7enVFu2bNGkSZM0ceJElS9f3uvjLmb16tXKyspScnKykpOTtXLlSu3atcvSz+l0asqUKWrYsKFCQ0MVExOjO+64Qz/++KOk8+fs+PHjev/9913nLu+6i0ceeUR16tSxjDl69GjLPZhmzJih9u3bKzY2ViEhIbr++uuVmprq1XPZsWOHNm/e7PVzP3TokEaOHKmxY8fqiiuu8Po4APbF3OS7uWnVqlW68cYbdc0117hi4eHh6t69u3766Sdt2bKl0DEKUhbnplWrVikmJsb1GpXOF/89e/bUH3/8oRUrVnj1eJLkkwoh70VQpUoVV+zs2bPq3LmzWrVqpddff13h4eGSpJSUFKWlpenRRx/VoEGDtH37dk2bNk1r167V6tWrVaFCBUnSiy++qPHjxyspKUlJSUn66aef1KlTJ6+q1M8//1zdunVTjRo1NHjwYFWvXl2bNm3Sp59+qsGDByslJUW7d+/W559/rpkzZ1qOL4kcb7/9dklSVlaWV+d0yJAhateunZKSkvThhx96dUxhMjIylJiYqObNm6tBgwYKDw/X7NmzNXToULd+jz32mNLS0tSlSxf17dtXZ8+e1apVq/Ttt9+qWbNmmjlzpvr27asWLVroiSeekCQlJiYWOZ/U1FTVr19f3bt3V/ny5bVw4UI99dRTcjqd6t+//0WPffjhh7VixQqvPyLlL3/5i6pXr66UlBSNGzeuyLkCsB/mJt/NTadPn1Z0dLQlnnf+1qxZo6uvvrrQc+BJWZybTp8+rbCwMEv8wvPZsWNH7xI2RTBjxgwjySxbtszs37/f7Ny50/zrX/8yVapUMWFhYWbXrl3GGGP69OljJJnnn3/e7fhVq1YZSSYjI8MtvnjxYrf4vn37THBwsOnatatxOp2ufsOHDzeSTJ8+fVyx5cuXG0lm+fLlxhhjzp49axISEkx8fLw5fPiw2+NcOFb//v2Np6dfEjkaY0x8fLyJj4+3PJ4nn376qSlfvrzZuHGjMeb8+YyIiPDq2ILk5uaaKlWqmBEjRrhivXr1MjfccINbvy+//NJIMoMGDbKMceHzjIiIsDzHvFw9Pc9Ro0ZZzveJEycs/Tp37mzq1q3rFmvTpo1p06aNJebty3fdunWmXLlyZsmSJW657N+/36vjAQQ25qaSn5vuvPNOc8UVV5icnBy3+C233GIkmddff73QMTwpq3PTwIEDTVBQkMnKynKLJycnG0lmwIABhY6Rp1hvrXbo0EExMTGqVauWkpOTFRkZqXnz5unKK6906/fkk0+6tefMmaNKlSqpY8eOOnDggOuradOmioyMdO1+WbZsmXJzczVw4EC3Jc8hQ4YUmtvatWu1fft2DRkyxPIWmjcfr1JSOWZlZXm1Gpebm6unn35a/fr10/XXX19of2999tlnOnjwoB544AFX7IEHHtC6devcltznzp0rh8OhUaNGWcbw9cfTXPjXSHZ2tg4cOKA2bdrot99+U3Z29kWPzczM9Ho1btCgQerSpYs6dep0SfkCCGzMTSU3Nz355JM6cuSI7r//fq1du1b//e9/NWTIENfbmidPnix0DE/K6tzUt29flStXTj179tTXX3+tbdu26eWXX9a8efMkFe18Fuut1TfffFP16tVT+fLlVa1aNV1zzTWWCzrLly+vmjVrusW2bNmi7OxsxcbGehx33759kqTff/9dkizLtDExMR6Xdi+Ut5TeoEED759QKed4MZMmTdKBAwd8vmspPT1dCQkJCgkJ0datWyWdX3IODw9XRkaGJkyYIOn8+YuLi1PlypV9+vierF69WqNGjdI333xj2VWWnZ2tSpUqXfJjfPDBB/r666+1YcOGSx4LQGBjbiq5ualLly6aOnWqnn/+eTVp0kSSdNVVV+mll17SsGHDin2LrLI6NzVq1EizZs1Sv379dOutt0qSqlevrsmTJ+vJJ58s0vksViHXokULNWvW7KJ9QkJCLP+BnE6nYmNjC9yREhMTU5x0fMqfOWZnZ2v8+PF66qmnlJOTo5ycHEnnb0NijFFWVpbCw8ML/I9ckJycHC1cuFCnTp3yeA3DrFmz9NJLL/nkr5qCxjh37pxbe9u2bbr99tt17bXXauLEiapVq5aCg4O1aNEiTZo0SU6n85JzkaShQ4fqvvvuU3BwsOuvzrx7FO3cuVO5ubmKi4vzyWMB8C/mppI1YMAAPfroo1q/fr2Cg4PVuHFjvfvuu5KkevXqFXm8sjw3SVKPHj3UvXt3rVu3TufOnVOTJk1cm2OKcj59sx3SS4mJiVq2bJluvfVWjxf55YmPj5d0/i+QunXruuL79++37M7x9BiStGHDBnXo0KHAfgX9UEsjx4IcPnxYx44d06uvvqpXX33V8v2EhAT96U9/KvI9cj7++GOdOnVKqampqlq1qtv3fv31V40cOVKrV69Wq1atlJiYqCVLlujQoUMX/cunoPMXHR3t8WaOeX8l5lm4cKFOnz6tTz75RLVr13bFfXVzyTw7d+7UrFmzPN6Xp0mTJrrhhhv0888/+/QxAdgLc5P3IiIidMstt7jay5YtU1hYmGtVqSjK8tyUJzg4WM2bN3e1ly1bJkkXfY3kV6o3uOnZs6fOnTvncdfg2bNnXSe5Q4cOqlChgqZOner2XvPkyZMLfYwmTZooISFBkydPtvzQLhwr76aw+fuUVI7ebPGOjY3VvHnzLF/t2rVTaGio5s2bpxdeeOGiY3iSnp6uunXrql+/furRo4fb17PPPqvIyEjXX3n33nuvjDEe39rNf/48/adITExUdna21q9f74rt2bPH9b5/nnLlylnGzM7O1owZM7x6Tt5u8fZ0Pu+//35J0j//+U9NmjTJq8cDcPlibvL+1lgX+vrrr/Xxxx/rscceK9bbjWV5bvJky5Yteuutt9StW7eirXB6vS3C/L+dQT/88MNF+11sl2VKSoqRZLp06WImTZpkpk2bZgYPHmzi4uLMnDlzXP1eeOEFI8kkJSWZadOmmccee8zExcWZqlWrXnRnkDHnd/FUqFDBxMfHm9GjR5u3337bPP3006ZTp06uPh9++KGRZB566CGTnp5uZs+eXWI5GlO0Xavens+8n8eMGTMKPPZ///ufCQoKMkOGDCmwz7333muqVKlicnNzjTHGPPTQQ67nP2XKFDNp0iRzzz33mKlTp7qOSUpKMhEREeaNN94ws2fPNt9++60xxpgDBw6YiIgIU7duXTN58mQzYcIEU6tWLdOkSRO3nTybN282wcHBpmHDhmbatGnmr3/9q0lMTDQ33HCDkWS2b9/u6nupu1bzY9cqcHlhbir5uSkrK8u0aNHCjB8/3vzjH/8wTz/9tAkLCzM33nijZScrc5N3c9N1111nXnzxRfOPf/zDjBgxwlSuXNnEx8e7dll7q9QLOWOMeeedd0zTpk1NWFiYiYqKMg0bNjTDhg0zu3fvdvU5d+6cGTNmjKlRo4YJCwszbdu2NRs2bDDx8fGF/mcxxpivvvrKdOzY0URFRZmIiAjTqFEjtx/22bNnzcCBA01MTIxxOByWE+/LHI0pmUJu6tSpRpJZvHhxgce+8cYbRpL54osvCuyTlpZmJJkFCxYYY86fm9dee81ce+21Jjg42MTExJguXbqYNWvWuI7ZvHmzad26tQkLC7NsaV+6dKlp0KCBCQ4ONtdcc41JT0/3uMX7k08+MY0aNTKhoaGmTp065pVXXjHvvfcehRyAImFuKvm56dChQ+ZPf/qTqV69ugkODjYJCQnmueeesxRxxjA3eTs3JScnm1q1apng4GATFxdn+vXrZ/bu3evVsRdyGOPlPRwQcHr27KmsrCx9//33/k4FAABJzE2lrVQ3O8B3jDHKzMxUenq6v1MBAEASc5M/sCIHAABgU6W6axUAAAC+QyEHAABgUxRyAAAANkUhBwAAYFMBtWvV6XRq9+7dioqK8slnq8HejDE6evSo4uLiLJ+NCAClgXkJ+QXa3BRQhdzu3btVq1Ytf6eBALNz507VrFnT32kAKIOYl1CQQJmb/F9KXiAqKsrfKSAA8boA4C/8/kFBAuW1EVCFHMvW8ITXBQB/4fcPChIor42AKuQAAADgPQo5AAAAm6KQAwAAsCkKOQAAAJuikAMAALApCjkAAACbopADAACwKQo5AAAAm6KQAwAAsCkKOQAAAJuikAMAALApCjkAAACbopADAACwKQo5AAAAm6KQAwAAsCkKOQAAAJuikAMAALApCjkAAACbopADAACwKQo5AAAAm6KQAwAAsKny/k7AzmJjY93aH374oaXP119/bYm98847llhWVpbP8vKlSpUqubVbt25t6bN48WJL7MyZMyWWEwAAOI8VOQAAAJuikAMAALApCjkAAACb4ho5L0VHR1tiGzdudGvnv55Mkvbu3WuJ2eV6OElas2aNWzsmJsbSp2nTppbY1q1bfZcYAMBnKlasaIm9/PLLlliDBg3c2h06dLD04Xpo/2NFDgAAwKYo5AAAAGyKQg4AAMCmKOQAAABsis0OHlStWtUS++CDDyyxypUru7WnT59u6TNw4EDfJVbCRo4caYklJCS4tVNSUix92NgAAIGpd+/elthLL71kidWqVavQsTxtkjh48GDxEoPPsCIHAABgUxRyAAAANkUhBwAAYFMUcgAAADblMMYYfyeRJycnx+OnC5S2Tp06WWKfffZZocdVr17dEtu/f79PcvK1+vXrW2L/+c9/LLF58+a5tR955BFLn6NHj/osL0+ys7M9XmQLACUtUOYlb9WsWdOtvXbtWkufKlWqWGLelAKeNv0NGDDAEjt06FChY10OAmVuYkUOAADApijkAAAAbIpCDgAAwKYo5AAAAGyqzH+yQ2xsrCV27733enXsY4895ta208aGZcuWeXVs/s0OJb2xAQBQfM8++6xbO/8nEF2K+++/3xK74447LDFPnxwxdepUt3Zubq7P8irrWJEDAACwKQo5AAAAm6KQAwAAsKkyf43cG2+8YYk9+OCDltiaNWsssTlz5pRITr522223WWLVqlWzxNLS0iyx9PT0kkgJAHCJ4uPjLbFHH3200OPWr19vie3du9cS69ChQ6FjebpZcv7r9CQpIyPDrf3HH38UOja8w4ocAACATVHIAQAA2BSFHAAAgE1RyAEAANhUmd/sYIyxxJxOpyW2e/duSywQbmgYFhZmiQ0fPtyt/dRTT1n6eHref/7zn32XGACgRDVu3NgSi4qKcmuvWrXK0qdNmzaWWGhoqCX2wAMPuLXzzy2SlJiYaIlVr17dEluwYIFbu0uXLpY+hw4dssRQOFbkAAAAbIpCDgAAwKYo5AAAAGyKQg4AAMCmyvxmB2917drVElu6dKlb+8iRI5Y+qampPsvB0wWqbdu2tcRuvvnmQsf66KOPfJESAMBPQkJCLLH8G9kmTZrk1VinTp2yxGbMmOHWvu+++yx96tat69X4J06ccGsHwmbBywUrcgAAADZFIQcAAGBTFHIAAAA2RSEHAABgU2V+s8OUKVMssXbt2llicXFxlljr1q3d2g6Hw9Kne/ful5CdO0/je/qEhvx+++03S8zTHboBAPaR/5MXPPG0UW/+/PnFerxmzZoV6zhJ+vbbb93ax44dK/ZYcMeKHAAAgE1RyAEAANgUhRwAAIBNlflr5NasWWOJNWrUyBJr3LixJXbHHXe4tYcOHWrps3//fkvs/fffL0KG/8/MmTMtsXXr1hV63Ndff22Jbdu2rVg5AAACw+zZsy2x/NdlN2/e3NLn2muvtcQaNmxoid19991u7ejoaEsfTzfC99Tv8ccfd2t7ms9++eUXSwyFY0UOAADApijkAAAAbIpCDgAAwKYo5AAAAGzKYby5o2wpycnJUaVKlfydRsCqW7euJbZ161ZL7Oeff3Zrd+7c2dLH0yaMQJWdna2KFSv6Ow0AZVAgz0uVK1e2xPLPCZ5yL+7N5ZctW2aJ9e/f3xL79NNPLbGrr77arf33v//d0qdfv36F5hBIAmVuYkUOAADApijkAAAAbIpCDgAAwKYo5AAAAGyqzH+yg528+OKLlpinC1Sfe+45t7adNjYAALxz6NAhS6xnz55u7Y8++sjSx9vNG1OnTnVr559bJOnUqVOW2Mcff2yJPf/8825tT5vwEhMTLTE+hahwrMgBAADYFIUcAACATVHIAQAA2BSFHAAAgE2x2SFA3XfffZbYww8/bIkdPXrUEjt48GCJ5AQACGz5P32hR48elj69evWyxI4cOWKJ5d9g52ljgyfjxo2zxK677jq3dvfu3Qt9PEnq06ePV49ZlrEiBwAAYFMUcgAAADZFIQcAAGBTXCMXoLp06eJVv08//dQS++mnn3ydDgDAhvJfM1dQzJdOnjxpiX3wwQdubU/XyLVr184Sq1y5siXm6UbIZRkrcgAAADZFIQcAAGBTFHIAAAA2RSEHAABgU2x2CFCeNjscP37cEnvjjTdKIx0AAIrtww8/dGt72uxw//33W2IDBgywxMaOHeu7xC4DrMgBAADYFIUcAACATVHIAQAA2BSFHAAAgE05jDHG30nkycnJUaVKlfydhl/069fPrT19+nRLn3379lli1atXL7GcAkV2drYqVqzo7zQAlEFleV4qSY0bN7bEVq9ebYmFhoZaYtddd51b+7///a/P8iqKQJmbWJEDAACwKQo5AAAAm6KQAwAAsCkKOQAAAJvikx0CRP7NDp72oPz73//2aqyoqCi3dnR0tKXPjh07ipAdAAC+8/PPP1tiL774oiX22muvWWITJkxwaz/00EOWPidPnix+cjbDihwAAIBNUcgBAADYFIUcAACATXGNnI2cO3fOEuvdu7cl9vTTT7u1N27caOnTp08f3yUGAMAl+uc//2mJpaSkWGL33HOPW3vs2LGWPuvXr/ddYgGOFTkAAACbopADAACwKQo5AAAAm6KQAwAAsCmH8XTnWT/JyclRpUqV/J2GX+S/OWLDhg0tfRwOhyXm6cf37rvvurXHjRtn6bNz584iZug/2dnZqlixor/TAFAGleV5KRDUrl3bEsvKynJrz54929LH00ZAXwuUuYkVOQAAAJuikAMAALApCjkAAACbopADAACwKT7ZIUAMGDDAre3pTtUrV660xFJTUy2xw4cPu7Vzc3MvMTsAAErfjh07LLFly5a5tbt3727pc/3111tiv/zyi+8SCyCsyAEAANgUhRwAAIBNUcgBAADYFIUcAACATbHZIUB89dVXbu327dv7KRMAAAJXjx493Nrr1q2z9LnqqqssMTY7AAAAIKBQyAEAANgUhRwAAIBNcY0cAACwjZycHLd2QkKCnzIJDKzIAQAA2BSFHAAAgE1RyAEAANgUhRwAAIBNUcgBAADYFIUcAACATVHIAQAA2BSFHAAAgE0FVCFnjPF3CghAvC4A+Au/f1CQQHltBFQhd/ToUX+ngADE6wKAv/D7BwUJlNeGwwRKSSnJ6XRq9+7dioqKksPh8Hc68DNjjI4ePaq4uDgFBQXU3xwAygjmJeQXaHNTQBVyAAAA8J7/S0kAAAAUi+0KuTp16uiRRx5xtTMzM+VwOJSZmem3nPLLnyMuTdu2bdW2bVt/pwEABWJuKnsCZW4qUiGXlpYmh8Ph+goNDVW9evU0YMAA7d27t6RyLBGLFi3S6NGj/Z2GR3v27NETTzyhhIQEhYWFKTExUc8884wOHjx4yWNv2rTJ9bM7cuRIsceZMGGC5s+ff8n5lIYLX7MXfv31r3/1d2oAfIC5qXRs3bpVPXr0UHR0tMLDw9WqVSstX77cJ2OXxblp7969evTRRxUbG6uwsDA1adJEc+bMKfI45Yvz4GPHjlVCQoJOnTqlr776SqmpqVq0aJE2bNig8PDw4gxZbK1bt9bJkycVHBxcpOMWLVqkN998M+D+wxw7dky33HKLjh8/rqeeekq1atXSunXrNG3aNC1fvlxr1qy5pIsr09PTVb16dR0+fFgfffSR+vbtW6xxJkyYoB49euiuu+4qdi6lqWPHjnr44YfdYjfeeKOfsgFQEpibSs7OnTt1yy23qFy5cho6dKgiIiI0Y8YMderUSV988YVat259SeOXtbkpJydHrVq10t69ezV48GBVr15dH374oXr27KmMjAz16tXL67GKVch16dJFzZo1kyT17dtXVapU0cSJE7VgwQI98MADHo85fvy4IiIiivNwFxUUFKTQ0FCfj+svn3zyiX7//Xd9+umn6tq1qyteuXJljR07VuvWrSt2AWKM0axZs9SrVy9t375dGRkZxf7PYjf16tXTgw8+6O80AJQg5qaS89e//lVHjhzRhg0bdM0110iSHn/8cV177bV6+umntWbNmmKPXRbnprfffltbt27VF198ofbt20uSnnzySd188836v//7P/Xo0cPrPwJ8co1cXhLbt2+XJD3yyCOKjIzUtm3blJSUpKioKPXu3VvS+a3ckydPVv369RUaGqpq1aopJSVFhw8fdhvTGKPx48erZs2aCg8PV7t27bRx40bLYxd0HcJ3332npKQkRUdHKyIiQo0aNdKUKVNc+b355puS3N92y+PrHCVp27Zt2rZtW6HnMicnR5JUrVo1t3iNGjUkSWFhYYWOUZDVq1crKytLycnJSk5O1sqVK7Vr1y5LP6fTqSlTpqhhw4YKDQ1VTEyM7rjjDv3444+Szp+z48eP6/3333edu7zrLh555BHVqVPHMubo0aMtW/dnzJih9u3bKzY2ViEhIbr++uuVmprq1XPZsWOHNm/eXKTnf/LkSZ06dapIxwCwL+Ym381Nq1at0o033ugq4iQpPDxc3bt3108//aQtW7YUOkZByuLctGrVKsXExLheo9L54r9nz576448/tGLFCq8eTyrmilx+eS+CKlWquGJnz55V586d1apVK73++uuuZe2UlBSlpaXp0Ucf1aBBg7R9+3ZNmzZNa9eu1erVq1WhQgVJ0osvvqjx48crKSlJSUlJ+umnn9SpUyfl5uYWms/nn3+ubt26qUaNGq4ly02bNunTTz/V4MGDlZKSot27d+vzzz/XzJkzLceXRI633367JCkrK+uiubdu3VpBQUEaPHiw3njjDdWsWVPr16/XSy+9pLvuukvXXnttoc+/IBkZGUpMTFTz5s3VoEEDhYeHa/bs2Ro6dKhbv8cee0xpaWnq0qWL+vbtq7Nnz2rVqlX69ttv1axZM82cOVN9+/ZVixYt9MQTT0iSEhMTi5xPamqq6tevr+7du6t8+fJauHChnnrqKTmdTvXv3/+ixz788MNasWKF13fWTktL0/Tp02WM0XXXXaeRI0cWaekagP0wN/lubjp9+rSio6Mt8bzzt2bNGl199dWFngNPyuLcdPr0aY8LMxeez44dO3qXsCmCGTNmGElm2bJlZv/+/Wbnzp3mX//6l6lSpYoJCwszu3btMsYY06dPHyPJPP/8827Hr1q1ykgyGRkZbvHFixe7xfft22eCg4NN165djdPpdPUbPny4kWT69Onjii1fvtxIMsuXLzfGGHP27FmTkJBg4uPjzeHDh90e58Kx+vfvbzw9/ZLI0Rhj4uPjTXx8vOXxPPnHP/5hrrjiCiPJ9dWnTx9z5swZr473JDc311SpUsWMGDHCFevVq5e54YYb3Pp9+eWXRpIZNGiQZYwLn2dERITlORpz/mfv6XmOGjXKcr5PnDhh6de5c2dTt25dt1ibNm1MmzZtLDFvX74tW7Y0kydPNgsWLDCpqammQYMGRpKZPn26V8cDCGzMTSU/N915553miiuuMDk5OW7xW265xUgyr7/+eqFjeFJW56aBAweaoKAgk5WV5RZPTk42ksyAAQMKHSNPsd5a7dChg2JiYlSrVi0lJycrMjJS8+bN05VXXunW78knn3Rrz5kzR5UqVVLHjh114MAB11fTpk0VGRnp2v2ybNky5ebmauDAgW5LnkOGDCk0t7Vr12r79u0aMmSIrrjiCrfveXNX7pLKMSsrq9C/ePJceeWVatGihSZPnqx58+bpmWeeUUZGhp5//nmvjvfks88+08GDB92uE3nggQe0bt06tyX3uXPnyuFwaNSoUZYxfH1X8wv/GsnOztaBAwfUpk0b/fbbb8rOzr7osZmZmV6vxq1evVqDBw9W9+7d1a9fP61Zs0YNGjTQ8OHDdfLkyUt6DgACB3NTyc1NTz75pI4cOaL7779fa9eu1X//+18NGTLE9bZmcX+XltW5qW/fvipXrpx69uypr7/+Wtu2bdPLL7+sefPmSSra+SzWW6tvvvmm6tWrp/Lly6tatWq65pprLDspy5cvr5o1a7rFtmzZouzsbMXGxnocd9++fZKk33//XZIsy7QxMTEel3YvlLeU3qBBA++fUCnneDGrV69Wt27dXEvFknTXXXepYsWKGjNmjP785z/r+uuvL/K46enpSkhIUEhIiLZu3Srp/JJzeHi4MjIyNGHCBEnnz19cXJwqV65c7OfgrdWrV2vUqFH65ptvdOLECbfvZWdnq1KlSiXyuMHBwRowYICrqGvVqlWJPA6A0sXcVHJzU5cuXTR16lQ9//zzatKkiSTpqquu0ksvvaRhw4YpMjKyWOOW1bmpUaNGmjVrlvr166dbb71VklS9enVNnjxZTz75ZJHOZ7EKuRYtWriKjIKEhIRY/gM5nU7FxsYqIyPD4zExMTHFScen/J3j22+/rWrVqlnOb/fu3TV69Gh9/fXXRS7kcnJytHDhQp06dcrjNQyzZs3SSy+95JO/agoa49y5c27tbdu26fbbb9e1116riRMnqlatWgoODtaiRYs0adIkOZ3OS87lYmrVqiVJOnToUIk+DoDSw9xUsgYMGKBHH31U69evV3BwsBo3bqx3331X0vk7AxRVWZ+bevTooe7du2vdunU6d+6cmjRp4tocU5Tz6ZPNDt5KTEzUsmXLdOutt15092V8fLyk83+B1K1b1xXfv3+/ZXeOp8eQpA0bNqhDhw4F9ivoh1oaOV7M3r17LS8sSTpz5oyk8xfqFtXHH3+sU6dOKTU1VVWrVnX73q+//qqRI0dq9erVatWqlRITE7VkyRIdOnToon/5FHT+oqOjPd7MMe+vxDwLFy7U6dOn9cknn6h27dquuK9uLlmY3377TVJg/IIG4F/MTd6LiIjQLbfc4movW7ZMYWFhrlWlomBuOv8OUfPmzV3tZcuWSdJFXyP5lepHdPXs2VPnzp3TuHHjLN87e/as6yR36NBBFSpU0NSpU93ea548eXKhj9GkSRMlJCRo8uTJlh/ahWPl3Tcof5+SytHbLd716tXT3r17LVvWZ8+eLal4N7FNT09X3bp11a9fP/Xo0cPt69lnn1VkZKTrr7x7771XxhiNGTPGMk7+8+fpP0ViYqKys7O1fv16V2zPnj2u9/3zlCtXzjJmdna2ZsyY4dVz8naL9/79+y2xo0ePavLkyapataqaNm3q1eMBuHwxNxU+N3ny9ddf6+OPP9Zjjz1WrLcby/Lc5MmWLVv01ltvqVu3bkVb4fR6W4T5fzuDfvjhh4v269Onj4mIiPD4vZSUFCPJdOnSxUyaNMlMmzbNDB482MTFxZk5c+a4+r3wwgtGkklKSjLTpk0zjz32mImLizNVq1a96M4gY87v4qlQoYKJj483o0ePNm+//bZ5+umnTadOnVx9PvzwQyPJPPTQQyY9Pd3Mnj27xHI0xvudQZs3bzYREREmMjLSvPDCC+att94yDzzwgJFkOnbs6NY37+cxY8aMAsf73//+Z4KCgsyQIUMK7HPvvfeaKlWqmNzcXGOMMQ899JDr+U+ZMsVMmjTJ3HPPPWbq1KmuY5KSkkxERIR54403zOzZs823335rjDHmwIEDJiIiwtStW9dMnjzZTJgwwdSqVcs0adLEbSfP5s2bTXBwsGnYsKGZNm2a+etf/2oSExPNDTfcYCSZ7du3u/peys6gUaNGmRtuuMGMHDnSvPPOO2bMmDEmPj7eOBwOk56eXujxAAIfc1PJz01ZWVmmRYsWZvz48eYf//iHefrpp01YWJi58cYbLTtZmZu8K62uu+468+KLL5p//OMfZsSIEaZy5comPj7etcvaW6VeyBljzDvvvGOaNm1qwsLCTFRUlGnYsKEZNmyY2b17t6vPuXPnzJgxY0yNGjVMWFiYadu2rdmwYYOJj48v9D+LMcZ89dVXpmPHjiYqKspERESYRo0auf2wz549awYOHGhiYmKMw+GwnHhf5mhM0W4/snnzZtOjRw9Tq1Yt13/6Z5991hw/ftyt39SpU40ks3jx4gLHeuONN4wk88UXXxTYJy0tzUgyCxYscJ2b1157zVx77bUmODjYxMTEmC5dupg1a9a45di6dWsTFhZm2dK+dOlS06BBAxMcHGyuueYak56e7nGL9yeffGIaNWpkQkNDTZ06dcwrr7xi3nvvPZ/+Z1m6dKnp2LGjqV69uqlQoYK54oorTKdOnS56PgDYC3NTyc9Nhw4dMn/6059M9erVTXBwsElISDDPPfecpYgzhrnJ20IuOTnZ1KpVywQHB5u4uDjTr18/s3fvXq+OvZDDGC/v4YCA07NnT2VlZen777/3dyoAAEhibiptpbrZAb5jjFFmZqbS09P9nQoAAJKYm/yBFTkAAACbKtVdqwAAAPAdCjkAAACbopADAACwKQo5AAAAmwqoXatOp1O7d+9WVFSUTz5bDfZmjNHRo0cVFxdn+WxEACgNzEvIL9DmpoAq5Hbv3u36MHMgz86dO1WzZk1/pwGgDGJeQkECZW7yfyl5gaioKH+ngADE6wKAv/D7BwUJlNdGQBVyLFvDE14XAPyF3z8oSKC8NgKqkAMAAID3KOQAAABsikIOAADApijkAAAAbIpCDgAAwKYo5AAAAGyKQg4AAMCmKOQAAABsikIOAADApijkAAAAbIpCDgAAwKYo5AAAAGyKQg4AAMCmKOQAAABsikIOAADApijkAAAAbKq8vxOA70VHR7u1a9euXeyxfv/9d7f2008/bemzYcMGS+y///2vJbZu3bpi5wEAAKxYkQMAALApCjkAAACbopADAACwKQo5AAAAm2Kzg4107drVEuvevbsl1rZtW7f2VVddVezHzL9pIT4+3tInJCTEq7HKlStX7DwAAIAVK3IAAAA2RSEHAABgUxRyAAAANsU1cn6QmJhoifXv39+t/fjjj1v6hIWFWWIOh8N3iXlQr169Eh0fAAAUHytyAAAANkUhBwAAYFMUcgAAADZFIQcAAGBTbHbwg5o1a1pigwcP9kMm7jZv3myJbdy40Q+ZAAD8Lf/N5KtWrWrpc/fdd1ti+W9KL0lOp9Ot/dZbb1n6rF692hLbunVrYWmWeazIAQAA2BSFHAAAgE1RyAEAANgUhRwAAIBNsdnBS54u8sy/QcHThZqLFy+2xE6fPm2JZWdnu7WPHz9u6RMREWGJLV261BLbsGGDW/u7776z9Fm7dq0ldvLkSUvMUx4AAPtq0KCBJTZgwABL7J577nFre5oHi+umm26yxM6ePWuJ/frrr5bYV1995db2tFkwNzf3ErKzF1bkAAAAbIpCDgAAwKYo5AAAAGyKQg4AAMCm2OzggbebCm644Qa3tqc7XHvy7bffWmJNmjRxa2dlZVn61K5d2xLbtWuXJZb/DtoAgLKhUaNGbu3+/ftb+tx///2WWMWKFQsd+3//+58ltmrVKkts+/btltiwYcPc2mvWrLH0adGihSVWuXJlSywpKcmtvW7dOksfT58ccbliRQ4AAMCmKOQAAABsikIOAADApsr8NXLBwcGW2KxZsyyx/NfDSdKECRPc2suWLSt2Hp6uictvx44dxR4fAHB5efvtty2x/Ndqe3sT3y+++MIS+89//uPWHj58uKXPqVOnvBq/ZcuWbu0nn3zS0ue9996zxBo3bmyJ7d2716395ptvWvrMnTvXEtu/f39hadoSK3IAAAA2RSEHAABgUxRyAAAANkUhBwAAYFNlbrNDZGSkW/uFF16w9OnWrZslduDAAUvs9ddfd2ufOHHiErMDAJR1oaGhllj+G+pKUt++fS0xh8Ph1vZ0gX9qaqol9tprr1lix48fv2ieRVGlShW3drly5Sx9Ro8ebYktXrzYEouPj/dZXpcDVuQAAABsikIOAADApijkAAAAbIpCDgAAwKbK3GaHu+66y639/PPPW/p4+gSF2267zRLLzs72WV4AAEhS27ZtLbGhQ4daYvk3NkjS//73P7f2vffea+nz/fffFz+5fDxtWqhVq5Yl9s9//tOtvWjRIkuf6Ohorx4z//OeOXOmpc+RI0e8GutywIocAACATVHIAQAA2BSFHAAAgE1RyAEAANhUmdvs0LJly0L7rF271hLbtWtXSaQDAIAbTxsIzp0759WxZ8+edWvfdNNNlj49evSwxK699tpCxz558qQldt1113kVy//pSNWqVSv08Qqyd+9et/b48eMtfc6cOVPs8e2GFTkAAACbopADAACwKQo5AAAAm3IYY4y/k8iTk5OjSpUqlehj7Nu3z61dpUoVS5/Tp09bYq+88ooltmDBArf2zz//fGnJwaPs7GxVrFjR32kAKINKY17KLywszBKbNWuWJdahQwdLLDw83K0dFGRdr/F22s9/XZ6na/d8yel0WmLz5s2zxAYNGuTW3rNnT4nldDGBMjexIgcAAGBTFHIAAAA2RSEHAABgUxRyAAAANlXmNjvkf7qeLq70Vv5j33rrLUufb7/91hKrXbu2JbZ161a39saNG73KoX79+pbYN99849a2+82MA+WCUgBljz82O3jriiuusMSef/55t/att95q6XPw4EFLbMeOHZZYSEiIW/uGG26w9GnRokVhaXrN0xw6fPhwS+zIkSM+e8xLEShzEytyAAAANkUhBwAAYFMUcgAAADZFIQcAAGBTZW6zw2uvvebWfuaZZ0r08fxh//79bu3MzExLn+Tk5FLK5tIFygWlAMqeQN7sUNr++c9/WmIPPvigV8cePXrUre1p7k1LS7PE8n+6RCAJlLmJFTkAAACbopADAACwKQo5AAAAm6KQAwAAsKkyt9mhXLlybu0bb7zR0mfWrFmWWPny5S2xWrVqubWDggKzLvb0Ix49erQlNn78+FLIpugC5YJSAGVPWd7sMGzYMLe2pznC09zoSe/evd3as2fPLn5iASJQ5qbArDwAAABQKAo5AAAAm6KQAwAAsCnv3ty+jOS/ueCPP/5o6VOvXj2vxrr99tvd2hUqVLD08XQtWvPmzb0a31ccDocl1rRp01LNAQAQuPr27WuJjRw50q3t7fVwGzdutMQ+/vjj4iWGQrEiBwAAYFMUcgAAADZFIQcAAGBTFHIAAAA2VeY2O/jSF198UWifxo0bW2KeNjucPXvWrT1jxgxLn7///e+W2JAhQyyxXr16FZoXAKBsatGihSX2xhtvWGKRkZGFjnXs2DFLrF+/fpbY6dOnvcwORcWKHAAAgE1RyAEAANgUhRwAAIBNUcgBAADYFJsdStjSpUstsZdeeskSy3/H7Mcff9zS56qrrrLE2rZtW6y8du3aVazjAAD2duedd1piUVFRhR53/PhxS6x79+6W2OrVq4uXGIqFFTkAAACbopADAACwKQo5AAAAm6KQAwAAsCmHMcb4O4k8OTk5qlSpkr/T8KmwsDBL7L333rPEevbs6bPHPHfunFv73//+t6XPgw8+aIl5upA1EGRnZ6tixYr+TgNAGWT3ecnTJoYDBw5YYhUqVCh0rHfeeccS8/QpDmVFoMxNrMgBAADYFIUcAACATVHIAQAA2BQ3BC5hJ0+etMSGDBliiUVGRrq1mzVrZukTGxtriWVlZVliM2fOdGuPHj364kkCAC4L+eeSX375xdLHm+vhJGn9+vVubU9zF/yPFTkAAACbopADAACwKQo5AAAAm6KQAwAAsCk2O/jB3r17LbE777zTrf3QQw9Z+tx8882W2JgxYyyxffv2XUJ2AAC7at++vVu7Zs2alj7efg7A008/7dY+depU8RNDiWFFDgAAwKYo5AAAAGyKQg4AAMCmKOQAAABsymG8veqxFOTk5KhSpUr+TgMBJjs7WxUrVvR3GgDKILvNS+vWrXNrN2zY0KvjXnvtNUvsueee80lOl6tAmZtYkQMAALApCjkAAACbopADAACwKQo5AAAAm+KTHQAAuExUrlzZre1wOCx9PH36z+TJk0sqJZQwVuQAAABsikIOAADApijkAAAAbIpr5AAAuExMnDjxom1JGjdunCW2Z8+eEssJJYsVOQAAAJuikAMAALApCjkAAACbopADAACwKYcxxvg7iTw5OTmqVKmSv9NAgMnOzlbFihX9nQaAMoh5CQUJlLmJFTkAAACbopADAACwKQo5AAAAmwqoQi6ALtdDAOF1AcBf+P2DggTKayOgCrmjR4/6OwUEIF4XAPyF3z8oSKC8NgJq16rT6dTu3bsVFRUlh8Ph73TgZ8YYHT16VHFxcQoKCqi/OQCUEcxLyC/Q5qaAKuQAAADgPf+XkgAAACgWCjkAAACbopADAACwKQo5AAAAm6KQAwAAsCkKOQAAAJuikAMAALCp/w/JbckuKugxOwAAAABJRU5ErkJggg==\n" + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "fig = plt.figure(figsize=(10, 10))\n", + "for i in range(10):\n", + " ax = fig.add_subplot(5, 2, i + 1, xticks=[], yticks=[])\n", + " ax.imshow(x_test[i].reshape(28, 28), cmap='gray')\n", + " ax.set_title(f\"Predicted: {np.argmax(y_pred[i])}, Actual: {np.argmax(y_test[i])}\")\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 10. Save the model" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "ExecuteTime": { + "end_time": "2024-09-22T21:35:00.609076100Z", + "start_time": "2024-09-22T21:35:00.501759300Z" + } + }, + "outputs": [], + "source": [ + "model.save(\"my_mnist_model.npz\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.8" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/neuralnetlib/layers.py b/neuralnetlib/layers.py index e112179..ded21d9 100644 --- a/neuralnetlib/layers.py +++ b/neuralnetlib/layers.py @@ -1558,119 +1558,86 @@ def from_config(config): layer = LSTM.from_config(config['layer']) return Unidirectional(layer) +import numpy as np class Attention(Layer): - def __init__(self, use_scale=False, score_mode="dot", dropout=0.0, seed=None, **kwargs): + def __init__(self, use_scale=True, score_mode="dot", return_sequences=True): super().__init__() self.use_scale = use_scale self.score_mode = score_mode - self.dropout = dropout - self.seed = seed - self.supports_masking = True - - if score_mode not in ["dot", "concat"]: - raise ValueError("score_mode must be either 'dot' or 'concat'") - + self.return_sequences = return_sequences + self.cache = {} + def __str__(self): - return f'Attention(score_mode={self.score_mode}, use_scale={self.use_scale}, dropout={self.dropout})' - - def _compute_attention(self, query, key, value, mask=None, training=None, return_attention_scores=False, use_causal_mask=False): - if self.score_mode == "dot": - scores = np.matmul(query, key.transpose(0, 2, 1)) - if self.use_scale: - scores /= np.sqrt(query.shape[-1]) - else: - q_expanded = np.expand_dims(query, axis=2) - k_expanded = np.expand_dims(key, axis=1) - concat = np.concatenate([q_expanded, k_expanded], axis=-1) - scores = np.tanh(concat) - scores = np.sum(scores, axis=-1) - - if use_causal_mask: - seq_len = query.shape[1] - causal_mask = np.triu(np.ones((seq_len, seq_len)), k=1).astype(bool) - scores = np.where(causal_mask, -np.inf, scores) - - if mask is not None: - scores = np.where(mask, scores, -np.inf) - - attention_weights = self._softmax(scores) - - if self.dropout > 0 and training: - rng = np.random.default_rng(self.seed) - dropout_mask = rng.uniform(size=attention_weights.shape) >= self.dropout - attention_weights *= dropout_mask - attention_weights /= 1 - self.dropout - - # Calcul de la sortie - outputs = np.matmul(attention_weights, value) - - if return_attention_scores: - return outputs, attention_weights - return outputs - -class Attention(Layer): - def __init__(self, use_scale=True, score_mode="dot", **kwargs): - super().__init__() - self.use_scale = use_scale - self.score_mode = score_mode - self.supports_masking = True + return f'Attention(use_scale={self.use_scale}, score_mode={self.score_mode}, return_sequences={self.return_sequences})' def forward_pass(self, input_data: np.ndarray) -> np.ndarray: - self.input = input_data + batch_size, seq_length, features = input_data.shape + self.cache.clear() + self.cache['input_shape'] = input_data.shape - self.query = input_data[:, -1:, :] - self.key = self.value = input_data + scores = np.zeros((batch_size, seq_length, seq_length)) + for i in range(batch_size): + if self.score_mode == "dot": + scores[i] = np.dot(input_data[i], input_data[i].T) + if self.use_scale: + scores[i] *= 1.0 / np.sqrt(features) - if self.score_mode == "dot": - self.scores = np.matmul(self.query, self.key.transpose(0, 2, 1)) - if self.use_scale: - self.scores /= np.sqrt(self.query.shape[-1]) + attention_weights = np.zeros_like(scores) + for i in range(batch_size): + attention_weights[i] = self._softmax(scores[i]) - self.attention_weights = self._softmax(self.scores) + self.cache['input'] = input_data + self.cache['attention_weights'] = attention_weights - context = np.matmul(self.attention_weights, self.value) + context = np.zeros_like(input_data) + for i in range(batch_size): + context[i] = np.dot(attention_weights[i], input_data[i]) - return context.squeeze(1) + if not self.return_sequences: + return np.mean(context, axis=1) + return context def backward_pass(self, output_error: np.ndarray) -> np.ndarray: - output_error = output_error[:, np.newaxis, :] - - d_value = np.matmul(self.attention_weights.transpose(0, 2, 1), output_error) + input_data = self.cache['input'] + attention_weights = self.cache['attention_weights'] + batch_size, seq_length, features = self.cache['input_shape'] - d_attention = np.matmul(output_error, self.value.transpose(0, 2, 1)) + if not self.return_sequences: + output_error = np.expand_dims(output_error, 1) / seq_length + output_error = np.repeat(output_error, seq_length, axis=1) - d_scores = d_attention * self.attention_weights - d_scores -= self.attention_weights * np.sum(d_attention * self.attention_weights, axis=-1, keepdims=True) + d_input = np.zeros_like(input_data) - if self.use_scale: - scale = np.sqrt(self.query.shape[-1]) - d_scores /= scale + for i in range(batch_size): + d_context = output_error[i] + d_weights = np.dot(d_context, input_data[i].T) + d_scores = d_weights * attention_weights[i] + d_scores -= attention_weights[i] * np.sum(d_weights * attention_weights[i], axis=-1, keepdims=True) - d_query = np.matmul(d_scores, self.key) - d_key = np.matmul(d_scores.transpose(0, 2, 1), self.query) - - d_input = np.zeros_like(self.input) - d_input[:, -1:, :] = d_query - d_input += d_key - d_input += d_value + if self.use_scale: + d_scores *= 1.0 / np.sqrt(features) + + d_input[i] = np.dot(attention_weights[i].T, d_context) + + if self.score_mode == "dot": + d_input[i] += np.dot(d_scores + d_scores.T, input_data[i]) + self.cache.clear() return d_input @staticmethod def _softmax(x): x_max = np.max(x, axis=-1, keepdims=True) exp_x = np.exp(x - x_max) - sum_exp_x = np.sum(exp_x, axis=-1, keepdims=True) - return exp_x / sum_exp_x + return exp_x / np.sum(exp_x, axis=-1, keepdims=True) def get_config(self): return { 'name': self.__class__.__name__, 'use_scale': self.use_scale, 'score_mode': self.score_mode, - 'dropout': self.dropout, - 'seed': self.seed, + 'return_sequences': self.return_sequences } @staticmethod @@ -1678,12 +1645,10 @@ def from_config(config): return Attention( use_scale=config['use_scale'], score_mode=config['score_mode'], - dropout=config['dropout'], - seed=config['seed'] + return_sequences=config.get('return_sequences', False) ) - # -------------------------------------------------------------------------------------------------------------- diff --git a/neuralnetlib/model.py b/neuralnetlib/model.py index 378460e..0bb9870 100644 --- a/neuralnetlib/model.py +++ b/neuralnetlib/model.py @@ -6,7 +6,7 @@ import numpy as np from neuralnetlib.activations import ActivationFunction -from neuralnetlib.layers import compatibility_dict, Layer, Input, Activation, Dropout, TextVectorization, LSTM, Bidirectional, Embedding, Attention +from neuralnetlib.layers import compatibility_dict, Layer, Input, Activation, Dropout, TextVectorization, LSTM, Bidirectional, Embedding, Attention, Dense from neuralnetlib.losses import LossFunction, CategoricalCrossentropy from neuralnetlib.optimizers import Optimizer from neuralnetlib.preprocessing import PCA @@ -46,6 +46,8 @@ def add(self, layer: Layer): if type(layer) not in compatibility_dict[type(previous_layer)]: raise ValueError( f"{type(layer).__name__} layer cannot follow {type(previous_layer).__name__} layer.") + if isinstance(previous_layer, Attention) and isinstance(layer, Dense): + layer.return_sequences = False self.layers.append(layer) @@ -95,7 +97,7 @@ def backward_pass(self, error: np.ndarray): self.optimizer.update( len(self.layers) - 1 - i, layer.weights, layer.d_weights) - if isinstance(layer, LSTM): + elif isinstance(layer, LSTM): self.optimizer.update(len(self.layers) - 1 - i, layer.cell.Wf, layer.cell.dWf, layer.cell.bf, layer.cell.dbf) self.optimizer.update(len(self.layers) - 1 - i, layer.cell.Wi, layer.cell.dWi, layer.cell.bi, layer.cell.dbi) self.optimizer.update(len(self.layers) - 1 - i, layer.cell.Wc, layer.cell.dWc, layer.cell.bc, layer.cell.dbc) diff --git a/setup.py b/setup.py index d41305c..149d812 100644 --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ setup( name='neuralnetlib', - version='2.9.0', + version='3.0.0', author='Marc Pinet', description='A simple convolutional neural network library with only numpy as dependency', long_description=open('README.md', encoding="utf-8").read(),