diff --git a/.github/workflows/python-package-conda.yml b/.github/workflows/python-package-conda.yml index 61bc64c..4eaaa9c 100644 --- a/.github/workflows/python-package-conda.yml +++ b/.github/workflows/python-package-conda.yml @@ -16,9 +16,6 @@ jobs: with: environment-file: environment.yml activate-environment: test - create-args: >- - python=3.10 - numpy - shell: bash -l {0} run: | pip install --upgrade keras diff --git a/docs/source/conf.py b/docs/source/conf.py index b58173f..ff20911 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -9,7 +9,7 @@ project = 'miles-guess' copyright = '2024, MILES Group' author = 'MILES Group' -release = '0.1' +release = '2024.1.1' # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration diff --git a/docs/source/index.rst b/docs/source/index.rst index ad0512e..023ce9b 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -9,9 +9,11 @@ Welcome to miles-guess's documentation! .. toctree:: :maxdepth: 2 :caption: Contents: - - modules.rst + install.rst + mlguess.rst + notebooks/classifier_example.ipynb + notebooks/regression_example.ipynb Indices and tables ================== diff --git a/docs/source/install.rst b/docs/source/install.rst index 346209f..4e50240 100644 --- a/docs/source/install.rst +++ b/docs/source/install.rst @@ -1,6 +1,73 @@ .. install Install MILES GUESS -------------------- +=================== + +MILES GUESS supports Python 3.8 to 3.11. Support +for newer versions of Python depends on the choice +of deep learning model backend. + +The primary ML library dependency is keras 3. Install one +of the keras 3 backends (tensorflow, pytorch, or jax). + +First, set up a base Python environment on +your system. We highly recommend using miniconda or +mambaforge to easily install all the dependencies. + +To install the stable version of the package: + +.. code-block:: bash + + pip install miles-guess + +To use the latest developed version of the package, +first download : + +.. code-block:: bash + + git clone git@github.com:ai2es/miles-guess.git + cd miles-guess + +Next, build the environment for the package. + +For CPU-based systems: + +.. code-block:: bash + + mamba env create -f environment.yml + +For GPU-based systems: + +.. code-block:: bash + + mamba env create -f environment_casper.yml + +If you want to install miles-guess directly +after building your environment run: + +.. code-block:: bash + + pip install . + +Keras 3 Installation +-------------------- +MILES GUESS depends on Keras 3 as its primary +ML backend and through Keras 3 can support +tensorflow, pytorch, or jax. The current version +of tensorflow (2.15) will downgrade keras 3 to +keras 2.15 upon installation. If you run into +this issue, reinstall keras 3 by running the +following command: + +.. code-block:: bash + + pip install --upgrade keras + + + + + + + diff --git a/docs/source/mlguess.rst b/docs/source/mlguess.rst index 4fa79cd..2a6618a 100644 --- a/docs/source/mlguess.rst +++ b/docs/source/mlguess.rst @@ -8,6 +8,7 @@ Subpackages :maxdepth: 4 mlguess.keras + mlguess.tests mlguess.torch Submodules diff --git a/docs/source/mlguess.tests.rst b/docs/source/mlguess.tests.rst new file mode 100644 index 0000000..a5c21cf --- /dev/null +++ b/docs/source/mlguess.tests.rst @@ -0,0 +1,45 @@ +mlguess.tests package +===================== + +Submodules +---------- + +mlguess.tests.test\_environment module +-------------------------------------- + +.. automodule:: mlguess.tests.test_environment + :members: + :undoc-members: + :show-inheritance: + +mlguess.tests.test\_layers module +--------------------------------- + +.. automodule:: mlguess.tests.test_layers + :members: + :undoc-members: + :show-inheritance: + +mlguess.tests.test\_models module +--------------------------------- + +.. automodule:: mlguess.tests.test_models + :members: + :undoc-members: + :show-inheritance: + +mlguess.tests.test\_pit module +------------------------------ + +.. automodule:: mlguess.tests.test_pit + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: mlguess.tests + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/modules.rst b/docs/source/modules.rst deleted file mode 100644 index 3ca959a..0000000 --- a/docs/source/modules.rst +++ /dev/null @@ -1,7 +0,0 @@ -mlguess -======= - -.. toctree:: - :maxdepth: 4 - - mlguess diff --git a/docs/source/notebooks/classifier_example.ipynb b/docs/source/notebooks/classifier_example.ipynb new file mode 120000 index 0000000..e614503 --- /dev/null +++ b/docs/source/notebooks/classifier_example.ipynb @@ -0,0 +1 @@ +../../../notebooks/classifier_example.ipynb \ No newline at end of file diff --git a/docs/source/notebooks/regression_example.ipynb b/docs/source/notebooks/regression_example.ipynb new file mode 120000 index 0000000..08bc774 --- /dev/null +++ b/docs/source/notebooks/regression_example.ipynb @@ -0,0 +1 @@ +../../../notebooks/regression_example.ipynb \ No newline at end of file diff --git a/environment_casper.yml b/environment_casper.yml index 64debc4..0563423 100644 --- a/environment_casper.yml +++ b/environment_casper.yml @@ -3,7 +3,7 @@ channels: - conda-forge - nvidia dependencies: - - python=3.10 + - python=3.11 - numpy=1.23.* - scipy - matplotlib diff --git a/mlguess/keras/models_deprecated.py b/mlguess/keras/models_deprecated.py index 4b26c96..0473e45 100644 --- a/mlguess/keras/models_deprecated.py +++ b/mlguess/keras/models_deprecated.py @@ -9,7 +9,7 @@ from tensorflow.keras.layers import Dense, LeakyReLU, GaussianNoise, Dropout from tensorflow.keras.optimizers import Adam, SGD from mlguess.keras.layers import DenseNormalGamma, DenseNormal -from mlguess.keras.losses import EvidentialRegressionLoss, EvidentialRegressionCoupledLoss, GaussianNLL +from mlguess.keras.losses import EvidentialRegressionLoss, EvidentialRegressionCoupledLoss, gaussian_nll from mlguess.keras.losses import DirichletEvidentialLoss from mlguess.keras.callbacks import ReportEpoch from imblearn.under_sampling import RandomUnderSampler @@ -597,7 +597,7 @@ def build_neural_network(self, inputs, outputs): inputs (int): Number of input predictor variables outputs (int): Number of output predictor variables """ - self.loss = GaussianNLL + self.loss = gaussian_nll nn_input = Input(shape=(inputs,), name="input") nn_model = nn_input diff --git a/mlguess/torch/regression_losses.py b/mlguess/torch/regression_losses.py index 067f841..e93f6c0 100644 --- a/mlguess/torch/regression_losses.py +++ b/mlguess/torch/regression_losses.py @@ -4,9 +4,9 @@ import torch import torch.nn.functional as F - tol = torch.finfo(torch.float32).eps + def nig_nll(y, gamma, v, alpha, beta): """Implements Normal Inverse Gamma-Negative Log Likelihood for Deep Evidential Regression @@ -16,13 +16,14 @@ def nig_nll(y, gamma, v, alpha, beta): """ two_blambda = 2 * beta * (1 + v) + tol nll = 0.5 * torch.log(np.pi / (v + tol)) \ - - alpha * torch.log(two_blambda + tol) \ - + (alpha + 0.5) * torch.log(v * (y - gamma) ** 2 + two_blambda + tol) \ - + torch.lgamma(alpha) \ - - torch.lgamma(alpha + 0.5) + - alpha * torch.log(two_blambda + tol) \ + + (alpha + 0.5) * torch.log(v * (y - gamma) ** 2 + two_blambda + tol) \ + + torch.lgamma(alpha) \ + - torch.lgamma(alpha + 0.5) return nll + def nig_reg(y, gamma, v, alpha): """Implements Normal Inverse Gamma Regularizer for Deep Evidential Regression @@ -34,6 +35,7 @@ def nig_reg(y, gamma, v, alpha): evi = 2 * v + alpha return error * evi + def evidential_regression_loss(y, pred, coef=1.0): """Implements Evidential Regression Loss for Deep Evidential Regression @@ -67,17 +69,18 @@ def modified_mse(gamma, nu, alpha, beta, target, reduction='mean'): Returns: [FloatTensor]: The loss value. """ - mse = (gamma-target)**2 + mse = (gamma - target) ** 2 c = get_mse_coef(gamma, nu, alpha, beta, target).detach() - mod_mse = mse*c - - if reduction == 'mean': + mod_mse = mse * c + + if reduction == 'mean': return mod_mse.mean() elif reduction == 'sum': return mod_mse.sum() else: return mod_mse + def get_mse_coef(gamma, nu, alpha, beta, y): """ Return the coefficient of the MSE loss for each prediction. @@ -100,7 +103,7 @@ def get_mse_coef(gamma, nu, alpha, beta, y): nu_eff = check_mse_efficiency_nu(gamma, nu, alpha, beta, y) delta = (gamma - y).abs() min_bound = torch.min(nu_eff, alpha_eff).min() - c = (min_bound.sqrt()/(delta + tol)).detach() + c = (min_bound.sqrt() / (delta + tol)).detach() return torch.clip(c, min=False, max=1.) @@ -123,7 +126,7 @@ def check_mse_efficiency_alpha(nu, alpha, beta): where f => the NLL loss (BayesianDTI.loss.MarginalLikelihood) """ - right = (torch.exp((torch.digamma(alpha+0.5)-torch.digamma(alpha))) - 1)*2*beta*(1+nu) / (nu + 1e-8) + right = (torch.exp((torch.digamma(alpha + 0.5) - torch.digamma(alpha))) - 1) * 2 * beta * (1 + nu) / (nu + 1e-8) return right.detach() @@ -161,9 +164,10 @@ class EvidentialMarginalLikelihood(torch.nn.modules.loss._Loss): Reference: https://www.mit.edu/~amini/pubs/pdf/deep-evidential-regression.pdf Source: https://github.com/deargen/MT-ENet/tree/468822188f52e517b1ee8e386eea607b2b7d8829 """ + def __init__(self, size_average=None, reduce=None, reduction: str = 'mean'): super(EvidentialMarginalLikelihood, self).__init__(size_average, reduce, reduction) - + def forward(self, gamma: torch.Tensor, nu: torch.Tensor, alpha: torch.Tensor, beta: torch.Tensor, target: torch.Tensor) -> torch.Tensor: """ @@ -176,25 +180,21 @@ def forward(self, gamma: torch.Tensor, nu: torch.Tensor, alpha: torch.Tensor, be Return: (Tensor) Negative log marginal likelihood of EvidentialNet - p(y|m) = Student-t(y; gamma, (beta(1+nu))/(nu*alpha) , 2*alpha) - then, the negative log likelihood is (CAUTION QUITE COMPLEX!) - NLL = -log(p(y|m)) = - log(3.14/nu)*0.5 - alpha*log(2*beta*(1 + nu)) + (alpha + 0.5)*log( nu(target - gamma)^2 + 2*beta(1 + nu) ) - + log(GammaFunc(alpha)/GammaFunc(alpha + 0.5)) + """ pi = torch.tensor(np.pi) - x1 = torch.log(pi/(nu + tol))*0.5 - x2 = -alpha*torch.log(2.*beta*(1.+ nu) + tol) - x3 = (alpha + 0.5)*torch.log( nu*(target - gamma)**2 + 2.*beta*(1. + nu) + tol) + x1 = torch.log(pi / (nu + tol)) * 0.5 + x2 = -alpha * torch.log(2. * beta * (1. + nu) + tol) + x3 = (alpha + 0.5) * torch.log(nu * (target - gamma) ** 2 + 2. * beta * (1. + nu) + tol) x4 = torch.lgamma(alpha + tol) - torch.lgamma(alpha + 0.5 + tol) - if self.reduction == 'mean': + if self.reduction == 'mean': return (x1 + x2 + x3 + x4).mean() elif self.reduction == 'sum': return (x1 + x2 + x3 + x4).sum() else: return x1 + x2 + x3 + x4 - + class EvidenceRegularizer(torch.nn.modules.loss._Loss): """ Regularization for the regression prior network. @@ -203,10 +203,11 @@ class EvidenceRegularizer(torch.nn.modules.loss._Loss): Reference: https://www.mit.edu/~amini/pubs/pdf/deep-evidential-regression.pdf Source: https://github.com/deargen/MT-ENet/tree/468822188f52e517b1ee8e386eea607b2b7d8829 """ + def __init__(self, size_average=None, reduce=None, reduction: str = 'mean', factor=0.1): super(EvidenceRegularizer, self).__init__(size_average, reduce, reduction) self.factor = factor - + def forward(self, gamma: torch.Tensor, nu: torch.Tensor, alpha: torch.Tensor, target: torch.Tensor) -> torch.Tensor: """ @@ -218,17 +219,15 @@ def forward(self, gamma: torch.Tensor, nu: torch.Tensor, alpha: torch.Tensor, Return: (Tensor) prior network regularization - Loss = |y - gamma|*(2*nu + alpha) * factor - """ - loss_value = torch.abs(target - gamma)*(2*nu + alpha) * self.factor - if self.reduction == 'mean': + loss_value = torch.abs(target - gamma) * (2 * nu + alpha) * self.factor + if self.reduction == 'mean': return loss_value.mean() elif self.reduction == 'sum': return loss_value.sum() else: return loss_value - + class GaussianNLL(torch.nn.modules.loss._Loss): """ @@ -237,15 +236,16 @@ class GaussianNLL(torch.nn.modules.loss._Loss): Reference: https://www.mit.edu/~amini/pubs/pdf/deep-evidential-regression.pdf Source: https://github.com/deargen/MT-ENet/tree/468822188f52e517b1ee8e386eea607b2b7d8829 """ + def __init__(self, size_average=None, reduce=None, reduction: str = 'mean'): super(GaussianNLL, self).__init__(size_average, reduce, reduction) - + def forward(self, input_mu: torch.Tensor, input_std: torch.Tensor, target: torch.Tensor) -> torch.Tensor: - x1 = 0.5*torch.log(2*np.pi*input_std*input_std) - x2 = 0.5/(input_std**2)*((target - input_mu)**2) - + x1 = 0.5 * torch.log(2 * np.pi * input_std * input_std) + x2 = 0.5 / (input_std ** 2) * ((target - input_mu) ** 2) + if self.reduction == 'mean': return torch.mean(x1 + x2) elif self.reduction == 'sum': - return torch.sum(x1 + x2) \ No newline at end of file + return torch.sum(x1 + x2) diff --git a/pyproject.toml b/pyproject.toml index 7008802..67f0267 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "miles-guess" -authors = [{name = "John Schreck, David John Gagne, Charlie Becker, Gabrielle Gantos", email = "miles@ucar.edu"}] +authors = [{name = "John Schreck, David John Gagne, Charlie Becker, Gabrielle Gantos", email = "milescore@ucar.edu"}] readme = "README.md" license = {file = "LICENSE"} dynamic = ["version"] @@ -36,12 +36,14 @@ dependencies = [ ] [project.optional-dependencies] -tensorflow = ["tensorflow>=2.12"] +tensorflow = ["tensorflow==2.16.0rc0"] -tensorflow_gpu = ["tensorflow[and-cuda]"] +tensorflow_gpu = ["tensorflow[and-cuda]==2.16.0rc0"] torch = ["torch"] +jax = ["jax"] + [tool.setuptools]