Skip to content

Commit

Permalink
Docstring fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
djgagne committed Aug 26, 2024
1 parent 13dc506 commit 87b1a76
Show file tree
Hide file tree
Showing 24 changed files with 184 additions and 337 deletions.
13 changes: 7 additions & 6 deletions .github/workflows/python-package-conda.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,24 +14,25 @@ jobs:
- uses: actions/checkout@v2
- uses: mamba-org/setup-micromamba@v1
with:
environment-file: environment.yml
environment-file: environment_torch.yml
activate-environment: test
- shell: bash -l {0}
run: |
pip install --upgrade keras
conda info
conda list
conda config --show-sources
conda config --show
printenv | sort
- name: Lint with flake8
- name: Lint with ruff
shell: bash -l {0}
run: |
micromamba install flake8
micromamba install ruff
# stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
ruff . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=100 --max-line-length=127 --statistics
ruff . --count --exit-zero --max-complexity=100 --max-line-length=127 --statistics
# Checking documentation errors
ruff . --count --select=D --exit-zero --max-complexity=100 --max-line-length=127 --statistics
- name: Test with pytest
shell: bash -l {0}
run: |
Expand Down
3 changes: 1 addition & 2 deletions mlguess/keras/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,8 +86,7 @@ def preprocess_data(
groups=[],
seed=1000,
):
"""
Function to select features and scale data for ML
"""Function to select features and scale data for ML
Args:
data (dictionary of dataframes for training and validation data):
input_features (list): Input features
Expand Down
14 changes: 6 additions & 8 deletions mlguess/keras/deprecated/losses.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@


class DirichletEvidentialLoss(keras.losses.Loss):
"""
Loss function for an evidential categorical model.
"""Loss function for an evidential categorical model.
Args:
callback (list): List of callbacks.
name (str): reference name
Expand Down Expand Up @@ -76,8 +76,7 @@ def __call__(self, y, output, sample_weight=None):
return ops.mean(A + B + C)

class EvidentialRegressionLoss(keras.losses.Loss):
"""
Loss function for an evidential regression model. The total loss is the Negative Log Likelihood of the
"""Loss function for an evidential regression model. The total loss is the Negative Log Likelihood of the
Normal Inverse Gamma summed with the error and scaled by the evidential coefficient. The coefficient has a strong
influence on the uncertainty predictions (less so for the predictions themselves) of the model and must be tuned
for individual datasets.
Expand Down Expand Up @@ -120,8 +119,8 @@ def get_config(self):
return config

def gaussian_nll(y, y_pred, reduce=True):
"""
Loss function for a parametric Gaussian Loss.
"""Loss function for a parametric Gaussian Loss.
Args:
y: Training data targets
y_pred: Model predicitons
Expand All @@ -139,8 +138,7 @@ def gaussian_nll(y, y_pred, reduce=True):

class EvidentialRegressionCoupledLoss(keras.losses.Loss):
def __init__(self, r=1.0, coeff=1.0):
"""
implementation of the loss from meinert and lavin that fixes issues with the original
"""Implementation of the loss from meinert and lavin that fixes issues with the original
evidential loss for regression. The loss couples the virtual evidence values with coefficient r.
In this new loss, the regularizer is unnecessary.
"""
Expand Down
64 changes: 24 additions & 40 deletions mlguess/keras/deprecated/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@
import logging

class BaseRegressor(object):
"""
A base class for regression models.
"""A base class for regression models.
Attributes:
hidden_layers: Number of hidden layers
hidden_neurons: Number of neurons in each hidden layer
Expand Down Expand Up @@ -97,8 +97,7 @@ def __init__(
self.history = None

def build_neural_network(self, inputs, outputs, last_layer="Dense"):
"""
Create Keras neural network model and compile it.
"""Create Keras neural network model and compile it.
Args:
inputs (int): Number of input predictor variables.
Expand Down Expand Up @@ -169,8 +168,7 @@ def build_neural_network(self, inputs, outputs, last_layer="Dense"):
)

def build_from_sequential(self, model, optimizer="adam", loss="mse", metrics=None):
"""
Build the neural network model using a Keras Sequential model.
"""Build the neural network model using a Keras Sequential model.
Args:
model (tf.keras.Sequential): Keras Sequential model to use.
Expand Down Expand Up @@ -204,8 +202,8 @@ def fit(
shuffle=True,
**kwargs,
):
"""
Fit the regression model.
"""Fit the regression model.
Args:
x: Input data
y: Target data
Expand All @@ -217,7 +215,6 @@ def fit(
use_multiprocessing: If True, use ProcessPoolExecutor to load data, which is faster but can cause issues with certain GPU setups. If False, use a ThreadPoolExecutor.
**kwargs: Additional arguments to be passed to the `fit` method
"""

if self.model is None:
raise ValueError("Model has not been built. Call build_neural_network first.")
if self.verbose:
Expand All @@ -240,8 +237,7 @@ def fit(
)

def save_model(self):
"""
Save the trained model to a file.
"""Save the trained model to a file.
"""
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
Expand All @@ -259,8 +255,7 @@ def save_model(self):

@classmethod
def load_model(cls, conf):
"""
Load a trained model using args from a configuration
"""Load a trained model using args from a configuration
"""
# Check if weights file exists
weights = os.path.join(conf["model"]["save_path"], "best.h5")
Expand Down Expand Up @@ -306,7 +301,7 @@ def load_model(cls, conf):
return model_class

def mae(self, y_true, y_pred):
""" Compute the MAE """
"""Compute the MAE"""
num_splits = y_pred.shape[-1]
if num_splits == 4:
mu, _, _, _ = ops.split(y_pred, num_splits, axis=-1)
Expand All @@ -317,7 +312,7 @@ def mae(self, y_true, y_pred):
return keras.metrics.mean_absolute_error(y_true, mu)

def mse(self, y_true, y_pred):
""" Compute the MSE """
"""Compute the MSE"""
num_splits = y_pred.shape[-1]
if num_splits == 4:
mu, _, _, _ = ops.split(y_pred, num_splits, axis=-1)
Expand All @@ -329,8 +324,7 @@ def mse(self, y_true, y_pred):
return keras.metrics.mean_squared_error(y_true, mu)

def predict(self, x, scaler=None, batch_size=None):
"""
Predict target values for input data.
"""Predict target values for input data.
Args:
x (numpy.ndarray): Input data.
Expand All @@ -350,8 +344,7 @@ def predict(self, x, scaler=None, batch_size=None):
return y_out

def predict_ensemble(self, x, batch_size=None, scaler=None, num_outputs=1):
"""
Predicts outcomes using an ensemble of trained Keras models.
"""Predicts outcomes using an ensemble of trained Keras models.
Args:
x (numpy.ndarray): Input data for predictions.
Expand Down Expand Up @@ -429,8 +422,7 @@ def predict_ensemble(self, x, batch_size=None, scaler=None, num_outputs=1):
return ensemble_mu, ensemble_ale, ensemble_epi

def predict_monte_carlo(self, x_test, forward_passes, scaler=None, batch_size=None, num_outputs=1):
"""
Perform Monte Carlo dropout predictions for the model.
"""Perform Monte Carlo dropout predictions for the model.
Args:
x_test (numpy.ndarray): Input data for prediction.
Expand All @@ -442,7 +434,6 @@ def predict_monte_carlo(self, x_test, forward_passes, scaler=None, batch_size=No
Returns:
tuple: Tuple of arrays containing predicted target values and specified uncertainties.
"""

n_samples = x_test.shape[0]
pred_size = self.model.output_shape[-1]
_batch_size = self.batch_size if batch_size is None else batch_size
Expand Down Expand Up @@ -531,8 +522,7 @@ def __init__(


class GaussianRegressorDNN(BaseRegressor):
"""
A Dense Neural Network Model that can support arbitrary numbers of hidden layers
"""A Dense Neural Network Model that can support arbitrary numbers of hidden layers
and provides evidential uncertainty estimation.
Inherits from BaseRegressor.
Expand Down Expand Up @@ -581,8 +571,7 @@ def __init__(
metrics=None,
eps=1e-7
):
"""
Initialize the EvidentialRegressorDNN.
"""Initialize the EvidentialRegressorDNN.
Args:
coupling_coef: Coupling coeffient for loss fix
Expand Down Expand Up @@ -617,8 +606,7 @@ def __init__(
self.loss = gaussian_nll

def build_neural_network(self, inputs, outputs, last_layer="DenseNormal"):
"""
Create Keras neural network model and compile it.
"""Create Keras neural network model and compile it.
Args:
inputs (int): Number of input predictor variables.
Expand Down Expand Up @@ -714,8 +702,7 @@ def predict_monte_carlo(self, x_test, forward_passes, scaler=None, batch_size=No


class EvidentialRegressorDNN(BaseRegressor):
"""
A Dense Neural Network Model that can support arbitrary numbers of hidden layers
"""A Dense Neural Network Model that can support arbitrary numbers of hidden layers
and provides evidential uncertainty estimation.
Inherits from BaseRegressor.
Expand Down Expand Up @@ -765,8 +752,7 @@ def __init__(
metrics=None,
eps=1e-7
):
"""
Initialize the EvidentialRegressorDNN.
"""Initialize the EvidentialRegressorDNN.
Args:
coupling_coef: Coupling coeffient for loss fix
Expand Down Expand Up @@ -817,8 +803,7 @@ def __init__(
logging.info(f"Using loss: {loss}")

def build_neural_network(self, inputs, outputs):
"""
Create Keras neural network model and compile it.
"""Create Keras neural network model and compile it.
Args:
inputs (int): Number of input predictor variables.
Expand Down Expand Up @@ -932,8 +917,8 @@ def predict_monte_carlo(


class CategoricalDNN(object):
"""
A Dense Neural Network Model that can support arbitrary numbers of hidden layers.
"""A Dense Neural Network Model that can support arbitrary numbers of hidden layers.
Attributes:
hidden_layers: Number of hidden layers
hidden_neurons: Number of neurons in each hidden layer
Expand Down Expand Up @@ -1022,8 +1007,8 @@ def __init__(
self.steps_per_epoch = steps_per_epoch

def build_neural_network(self, inputs, outputs):
"""
Create Keras neural network model and compile it.
"""Create Keras neural network model and compile it.
Args:
inputs (int): Number of input predictor variables
outputs (int): Number of output predictor variables
Expand Down Expand Up @@ -1081,8 +1066,7 @@ def build_neural_network(self, inputs, outputs):
self.model.compile(optimizer=self.optimizer_obj, loss=self.loss)

def build_from_sequential(self, model, optimizer="adam", loss="mse", metrics=None):
"""
Build the neural network model using a Keras Sequential model.
"""Build the neural network model using a Keras Sequential model.
Args:
model (tf.keras.Sequential): Keras Sequential model to use.
Expand Down
9 changes: 3 additions & 6 deletions mlguess/keras/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,7 @@

@keras.saving.register_keras_serializable()
class DenseNormalGamma(layers.Layer):
"""
Implements dense output layer for a deep evidential regression model.
"""Implements dense output layer for a deep evidential regression model.
Reference: https://www.mit.edu/~amini/pubs/pdf/deep-evidential-regression.pdf
Source: https://github.com/aamini/evidential-deep-learning
Expand Down Expand Up @@ -36,8 +35,7 @@ def __init__(self, units: int,
self.eps = eps

def evidence(self, x):
"""
Converts values from continuous space to greater than 0 using a softplus activation function.
"""Converts values from continuous space to greater than 0 using a softplus activation function.
Args:
x: input value
Expand Down Expand Up @@ -65,8 +63,7 @@ def get_config(self):


class DenseNormal(layers.Layer):
"""
Dense output layer for a Gaussian distribution regression neural network.
"""Dense output layer for a Gaussian distribution regression neural network.
Args:
units (int): Output size of regression tasks
Expand Down
10 changes: 4 additions & 6 deletions mlguess/keras/losses.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,7 @@ def loss(y, y_pred):

@keras.saving.register_keras_serializable()
def evidential_reg_loss(evi_coef):
"""
Loss function for an evidential regression model. The total loss is the Negative Log Likelihood of the
"""Loss function for an evidential regression model. The total loss is the Negative Log Likelihood of the
Normal Inverse Gamma summed with the error and scaled by the evidential coefficient. The coefficient has a strong
influence on the uncertainty predictions (less so for the predictions themselves) of the model and must be tuned
for individual datasets.
Expand Down Expand Up @@ -97,8 +96,8 @@ def loss(y, y_pred):

@keras.saving.register_keras_serializable()
def gaussian_nll(y, y_pred, reduce=True):
"""
Loss function for a parametric Gaussian Loss.
"""Loss function for a parametric Gaussian Loss.
Args:
y: Training data targets
y_pred: Model predicitons
Expand All @@ -116,8 +115,7 @@ def gaussian_nll(y, y_pred, reduce=True):

class EvidentialRegressionCoupledLoss(keras.losses.Loss):
def __init__(self, r=1.0, coeff=1.0):
"""
implementation of the loss from meinert and lavin that fixes issues with the original
"""Implementation of the loss from meinert and lavin that fixes issues with the original
evidential loss for regression. The loss couples the virtual evidence values with coefficient r.
In this new loss, the regularizer is unnecessary.
"""
Expand Down
Loading

0 comments on commit 87b1a76

Please sign in to comment.