Skip to content

Commit

Permalink
Merge branch 'main' of github.com:ai2es/miles-guess into djgagne
Browse files Browse the repository at this point in the history
  • Loading branch information
djgagne committed Aug 26, 2024
2 parents dc4619f + ea65f40 commit 70b7513
Show file tree
Hide file tree
Showing 12 changed files with 1,259 additions and 1,065 deletions.
8 changes: 6 additions & 2 deletions applications/train_classifier_ptype.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def trainer(conf, evaluate=True, data_split=0, mc_forward_passes=0):
output_features = conf["output_features"]
metric = conf["metric"]
# flag for using the evidential model
if conf["model"]["loss"] == "dirichlet":
if conf["model"]["loss"] == "evidential":
use_uncertainty = True
else:
use_uncertainty = False
Expand Down Expand Up @@ -177,6 +177,10 @@ def trainer(conf, evaluate=True, data_split=0, mc_forward_passes=0):
x = scaled_data[f"{name}_x"]
if use_uncertainty:
pred_probs, u, ale, epi = mlp.predict(x, return_uncertainties=True)
pred_probs = pred_probs.numpy()
u = u.numpy()
ale = ale.numpy()
epi = epi.numpy()
entropy = np.zeros(pred_probs.shape)
mutual_info = np.zeros(pred_probs.shape)
elif mc_forward_passes > 0: # Compute epistemic uncertainty with MC dropout
Expand All @@ -185,7 +189,7 @@ def trainer(conf, evaluate=True, data_split=0, mc_forward_passes=0):
x, mc_forward_passes=mc_forward_passes)
u = np.zeros(pred_probs.shape)
else:
pred_probs = mlp.predict(x)
pred_probs = mlp.predict(x, return_uncertainties=False)
ale = np.zeros(pred_probs.shape)
u = np.zeros(pred_probs.shape)
epi = np.zeros(pred_probs.shape)
Expand Down
31 changes: 15 additions & 16 deletions config/ptype/evidential.yml
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ case_studies:
- '2021-02-17'
- '2021-02-18'
- '2021-02-19'
data_path: /glade/p/cisl/aiml/ai2es/winter_ptypes/ptype_qc/mPING_interpolated_QC2.parquet
data_path: /glade/campaign/cisl/aiml/ai2es/winter_ptypes/ptype_qc/mPING_hourafter_interpolated_QC3.parquet
direction: max
ensemble:
mc_steps: 0
Expand All @@ -147,24 +147,23 @@ input_features:
- VGRD_m/s
metric: val_ave_acc
model:
activation: leaky
annealing_coeff: 34.593686950910275
balanced_classes: 1
batch_size: 100
dropout_alpha: 0.20146936081973893
epochs: 1000
hidden_layers: 2
hidden_neurons: 6461
activation: leaky_relu
annealing_coeff: 34
batch_size: 1130
dropout_alpha: 0.11676011477923032
epochs: 100
evidential: true
n_inputs: 84
hidden_layers: 4
hidden_neurons: 212
l2_weight: 0.000881889591229087
loss: evidential
loss_weights:
- 58.64242174310205
- 94.59680461256323
- 124.5896569779261
- 227.38800030539545
lr: 0.0027750619126744817
lr: 0.004800502096767794
n_classes: 4
optimizer: adam
output_activation: linear
use_dropout: 1
verbose: 0
verbose: 1
mping_path: /glade/p/cisl/aiml/ai2es/winter_ptypes/precip_rap/mPING_mixture/
output_features:
- ra_percent
Expand Down
2 changes: 1 addition & 1 deletion config/surface_layer/mlp.yml
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ model:
hidden_neurons: 500
activation: "relu"
optimizer: "adam"
metrics: "mae"
# metrics: "mae"
lr: 0.0004727390951751
kernel_reg: 'l2'
l1_weight: 0.0
Expand Down
9 changes: 5 additions & 4 deletions environment_gpu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@ channels:
- conda-forge
- nvidia
dependencies:
- python=3.11
- numpy=1.23.*
- python=3.10
- numpy<1.24
- scipy
- matplotlib
- xarray
Expand All @@ -22,11 +22,12 @@ dependencies:
- seaborn
- sphinx
- numba
- imbalanced-learn
- pyarrow
- properscoring
- pyarrow
- imbalanced-learn
- pip:
- tensorflow[and-cuda]
- keras
- echo-opt
- hagelslag
- bridgescaler
Expand Down
19 changes: 13 additions & 6 deletions mlguess/keras/callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,12 @@
import os
import keras
import numpy as np
from hagelslag.evaluation.ProbabilityMetrics import DistributedROC
from sklearn.metrics import precision_recall_fscore_support, roc_auc_score

logger = logging.getLogger(__name__)


def get_callbacks(config: Dict[str, str], path_extend=False) -> List[Callback]:
callbacks = []

Expand Down Expand Up @@ -72,13 +75,17 @@ def on_epoch_end(self, epoch: int, logs: Dict[str, float] = None) -> None:
logs = logs or {}
logs["lr"] = K.get_value(self.model.optimizer.lr)


@keras.saving.register_keras_serializable()
class ReportEpoch(keras.callbacks.Callback):
def __init__(self, epoch_var):
self.epoch_var = epoch_var

def on_epoch_begin(self, epoch, logs=None):
self.epoch_var.assign_add(1)
def on_epoch_end(self, epoch, logs={}):
self.epoch_var += 1

def get_config(self):

return {}


class MetricsCallback(keras.callbacks.Callback):
Expand All @@ -94,10 +101,11 @@ def __init__(self, x, y, name="val", n_bins=10, use_uncertainty=False, **kwargs)
self.bin_uppers = bin_boundaries[1:]

def on_epoch_end(self, epoch, logs={}):
pred_probs = np.asarray(self.model.predict(self.x))
if self.use_uncertainty:
pred_probs, _, _, _ = calc_prob_uncertainty(pred_probs)
pred_probs, _, _, _ = self.model.predict(self.x, return_uncertainties=True)
pred_probs = pred_probs.numpy()
else:
pred_probs = np.asarray(self.model.predict(self.x, return_uncertainties=False))
logs[f"{self.name}_csi"] = self.mean_csi(pred_probs)
true_labels = np.argmax(self.y, 1)
pred_labels = np.argmax(pred_probs, 1)
Expand Down Expand Up @@ -187,4 +195,3 @@ def ece(self, true_labels, pred_probs):
pass
mean = np.mean(ece) if np.isfinite(np.mean(ece)) else self.bin_lowers.shape[0]
return mean

2 changes: 2 additions & 0 deletions mlguess/keras/layers.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import keras
import keras.layers as layers
import keras.ops as ops
from keras.src import activations
from keras.src.layers.layer import Layer


@keras.saving.register_keras_serializable()
Expand Down
1 change: 1 addition & 0 deletions mlguess/keras/losses.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,7 @@ def loss(y, y_pred):

return loss

@keras.saving.register_keras_serializable()
def gaussian_nll(y, y_pred, reduce=True):
"""
Loss function for a parametric Gaussian Loss.
Expand Down
Loading

0 comments on commit 70b7513

Please sign in to comment.