From 29675cc8829c0f82175177b690116236c84755eb Mon Sep 17 00:00:00 2001 From: John Schreck Date: Thu, 11 Jul 2024 15:28:28 -0600 Subject: [PATCH] Updated torch model DNN to be more like the Keras version as far as config options --- mlguess/regression_metrics.py | 13 ++++++---- mlguess/torch/models.py | 48 ++++++++++++++++++----------------- 2 files changed, 33 insertions(+), 28 deletions(-) diff --git a/mlguess/regression_metrics.py b/mlguess/regression_metrics.py index 9271497..f100d68 100644 --- a/mlguess/regression_metrics.py +++ b/mlguess/regression_metrics.py @@ -44,11 +44,14 @@ def regression_metrics(y_true, y_pred, total=None, split="val"): # Add PIT skill-score pitd = [] for i, col in enumerate(range(y_true.shape[1])): - pit_score = pit_deviation_skill_score( - y_true[:, i], - np.stack([y_pred[:, i], total[:, i]], -1), - pred_type="gaussian", - ) + try: + pit_score = pit_deviation_skill_score( + y_true[:, i], + np.stack([y_pred[:, i], total[:, i]], -1), + pred_type="gaussian", + ) + except ValueError: + pit_score = -1 pitd.append(pit_score) metrics[f"{split}_pitd_{col}"] = pit_score diff --git a/mlguess/torch/models.py b/mlguess/torch/models.py index 86a379b..78940e0 100644 --- a/mlguess/torch/models.py +++ b/mlguess/torch/models.py @@ -77,51 +77,53 @@ def forward(self, x): return mu, self.evidence(logv), self.evidence(logalpha) + 1, self.evidence(logbeta) -class DNN(torch.nn.Module): - +class DNN(nn.Module): def __init__(self, input_size, output_size, - block_sizes=[1000], + layer_size=[1000], dr=[0.5], batch_norm=True, lng=False, - weight_init=False - ): + weight_init=False, + num_layers=None): - input_size = len(input_size) - output_size = len(output_size) + input_size = len(input_size) if isinstance(input_size, (list, tuple)) else input_size + output_size = len(output_size) if isinstance(output_size, (list, tuple)) else output_size super(DNN, self).__init__() self.lng = lng - if len(block_sizes) > 0: - blocks = self.block(input_size, block_sizes[0], dr[0], batch_norm) - if len(block_sizes) > 1: - for i in range(len(block_sizes)-1): - blocks += self.block(block_sizes[i], block_sizes[i+1], dr[i], batch_norm) + if num_layers is not None and isinstance(layer_size, (int, float)): + layer_size = [layer_size] * num_layers + dr = [dr] * num_layers if isinstance(dr, (int, float)) else dr + + if len(layer_size) > 0: + blocks = self.block(input_size, layer_size[0], dr[0], batch_norm) + if len(layer_size) > 1: + for i in range(len(layer_size) - 1): + blocks += self.block(layer_size[i], layer_size[i + 1], dr[i], batch_norm) if lng: - blocks.append(LinearNormalGamma(block_sizes[-1], output_size)) + blocks.append(LinearNormalGamma(layer_size[-1], output_size)) else: - blocks.append(SpectralNorm(torch.nn.Linear(block_sizes[-1], output_size))) + blocks.append(SpectralNorm(nn.Linear(layer_size[-1], output_size))) else: if lng: blocks = [LinearNormalGamma(input_size, output_size)] else: - blocks = [SpectralNorm(torch.nn.Linear(input_size, output_size))] - self.fcn = torch.nn.Sequential(*blocks) + blocks = [SpectralNorm(nn.Linear(input_size, output_size))] + + self.fcn = nn.Sequential(*blocks) if weight_init: - self.apply(init_weights) + self.apply(self.init_weights) def block(self, input_size, output_size, dr, batch_norm): - block = [ - SpectralNorm(torch.nn.Linear(input_size, output_size)) - ] + block = [SpectralNorm(nn.Linear(input_size, output_size))] if batch_norm: - block.append(torch.nn.BatchNorm1d(output_size)) - block.append(torch.nn.LeakyReLU()) + block.append(nn.BatchNorm1d(output_size)) + block.append(nn.LeakyReLU()) if dr > 0.0: - block.append(torch.nn.Dropout(dr)) + block.append(nn.Dropout(dr)) return block def forward(self, x):