Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Regression #16

Merged
merged 5 commits into from
Feb 29, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions mlguess/keras/layers.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
import keras
import keras.layers as layers
import keras.ops as ops


@keras.saving.register_keras_serializable()
class DenseNormalGamma(layers.Layer):
"""
Implements dense output layer for a deep evidential regression model.
Expand Down
39 changes: 39 additions & 0 deletions mlguess/keras/losses.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,45 @@ def get_config(self):
config.update({"coeff": self.coeff})
return config

@keras.saving.register_keras_serializable()
def EvidentialRegLoss(evi_coef):
"""
Loss function for an evidential regression model. The total loss is the Negative Log Likelihood of the
Normal Inverse Gamma summed with the error and scaled by the evidential coefficient. The coefficient has a strong
influence on the uncertainty predictions (less so for the predictions themselves) of the model and must be tuned
for individual datasets.
Loss = loss_nll + coeff * loss_reg
Args:
coeff (float): Evidential Coefficient
"""

def nig_nll(y, gamma, v, alpha, beta, reduce=True):
v = ops.maximum(v, keras.backend.epsilon())
twoBlambda = 2 * beta * (1 + v)
nll = (0.5 * ops.log(np.pi / v)
- alpha * ops.log(twoBlambda)
+ (alpha + 0.5) * ops.log(v * (y - gamma) ** 2 + twoBlambda)
+ lgamma(alpha)
- lgamma(alpha + 0.5))

return ops.mean(nll) if reduce else nll

def nig_reg(y, gamma, v, alpha, reduce=True):
error = ops.abs(y - gamma)
evi = 2 * v + alpha
reg = error * evi

return ops.mean(reg) if reduce else reg
@keras.saving.register_keras_serializable()
def loss(y, y_pred):

gamma, v, alpha, beta = ops.split(y_pred, 4, axis=-1)
loss_nll = nig_nll(y, gamma, v, alpha, beta)
loss_reg = nig_reg(y, gamma, v, alpha)

return loss_nll + evi_coef * loss_reg

return loss

def gaussian_nll(y, y_pred, reduce=True):
"""
Expand Down
32 changes: 16 additions & 16 deletions mlguess/keras/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,7 @@
import keras.ops as ops
import numpy as np
import pandas as pd
# import tensorflow as tf
from keras import Input, Model
# from tensorflow.python.keras import backend as K
from keras.regularizers import L1, L2, L1L2
from keras.layers import Dense, LeakyReLU, GaussianNoise, Dropout
from keras.optimizers import Adam, SGD
Expand All @@ -20,7 +18,6 @@
from collections import defaultdict
import logging


class BaseRegressor(object):
"""
A base class for regression models.
Expand Down Expand Up @@ -1392,7 +1389,7 @@ def __init__(
self.balanced_classes = balanced_classes
self.steps_per_epoch = steps_per_epoch
self.outputs = 4
self.current_epoch = keras.Variable(initializer=0, dtype='float32', trainable=False)
self.current_epoch = keras.Variable(initializer=20, dtype='float32', trainable=False)

"""
Create Keras neural network model and compile it.
Expand Down Expand Up @@ -1430,9 +1427,14 @@ def call(self, inputs):
for l in range(1, len(self.model_layers)):
layer_output = self.model_layers[l](layer_output)

self.current_epoch.assign_add(1)
return layer_output

# def fit(self, x, y, epochs):
#
# report_epoch_callback = ReportEpoch()
# self.fit(x, y, epochs=epochs)


def get_config(self):
base_config = super().get_config()
# parameter_config = {hp: getattr(self, hp) for hp in self.hyperparameters}
Expand Down Expand Up @@ -1570,18 +1572,18 @@ class EvidentialRegressorDNN_keras3(keras.models.Model):
"""
def __init__(
self,
hidden_layers=1,
hidden_neurons=4,
hidden_layers=2,
hidden_neurons=64,
activation="relu",
loss="evidentialReg",
# loss="evidentialReg",
coupling_coef=1.0, # right now we have alpha = ... v.. so alpha will be coupled in new loss
evidential_coef=0.05,
output_activation='linear',
optimizer="adam",
loss_weights=None,
use_noise=False,
noise_sd=0.01,
lr=0.001,
lr=0.00001,
use_dropout=False,
dropout_alpha=0.1,
batch_size=128,
Expand All @@ -1598,7 +1600,7 @@ def __init__(
metrics=None,
eps=1e-7,
**kwargs):

super().__init__(**kwargs)
self.hidden_layers = hidden_layers
self.hidden_neurons = hidden_neurons
self.activation = activation
Expand All @@ -1608,7 +1610,7 @@ def __init__(
self.sgd_momentum = sgd_momentum
self.adam_beta_1 = adam_beta_1
self.adam_beta_2 = adam_beta_2
self.loss = loss
# self.loss = loss
self.loss_weights = loss_weights
self.lr = lr
self.kernel_reg = kernel_reg
Expand All @@ -1623,15 +1625,14 @@ def __init__(
self.verbose = verbose
self.save_path = save_path
self.model_name = model_name
self.model = None
# self.model = None
self.optimizer_obj = None
self.training_std = None
self.training_var = []
self.metrics = metrics
# self.metrics = metrics
self.eps = eps
self.ensemble_member_files = []
self.n_output_params = 4
super().__init__(**kwargs)

if self.activation == "leaky":
self.activation = LeakyReLU()
Expand All @@ -1654,7 +1655,7 @@ def __init__(
if self.use_noise:
self.model_layers.append(GaussianNoise(self.noise_sd, name=f"noise_{h:02d}"))

self.model_layers.append(Dense(self.n_output_params, activation=self.output_activation, name="dense_output"))
self.model_layers.append(DenseNormalGamma(self.n_output_params, name="dense_output"))

def call(self, inputs):

Expand All @@ -1671,7 +1672,6 @@ def get_config(self):
return base_config



# self.coupling_coef = coupling_coef
# self.evidential_coef = evidential_coef
# self.eps = eps
Expand Down
17 changes: 15 additions & 2 deletions mlguess/tests/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@
from mlguess.keras.models import BaseRegressor as RegressorDNN
from mlguess.keras.models import GaussianRegressorDNN
from mlguess.keras.models import EvidentialRegressorDNN
from mlguess.keras.models import CategoricalDNN_keras3
from mlguess.keras.losses import DirichletEvidentialLoss, EvidentialCatLoss
from mlguess.keras.models import CategoricalDNN_keras3, EvidentialRegressorDNN_keras3
from mlguess.keras.losses import DirichletEvidentialLoss, EvidentialCatLoss, EvidentialRegLoss
from keras.models import load_model

class TestModels(unittest.TestCase):
Expand Down Expand Up @@ -100,5 +100,18 @@ def test_evi_cat(self):
model.save("test_model2.keras")
load_model("test_model2.keras")

def test_evi_reg(self):

x_train = np.random.random(size=(10000, 10)).astype('float32')
y_train = np.random.random(size=(10000, 1)).astype('float32')
model = EvidentialRegressorDNN_keras3(hidden_layers=2)
model.compile(loss=EvidentialRegLoss(0.01), optimizer="adam")
model.fit(x_train, y_train)
model.save("test_model3.keras")
load_model("test_model3.keras")




if __name__ == "__main__":
unittest.main()
Loading