Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Trajectory Normalization to Koopman Estimators #59

Merged
merged 4 commits into from
Mar 2, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 9 additions & 1 deletion autokoopman/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,15 @@
# TODO: update this
__author__ = "Ethan Lew"
__copyright__ = "Copyright 2023"
__credits__ = ["Ethan Lew", "Abdelrahman Hekal", "Kostiantyn Potomkin", "Niklas Kochdumper", "Brandon Hencey", "Stanley Bak", "Sergiy Bogomolov"]
__credits__ = [
"Ethan Lew",
"Abdelrahman Hekal",
"Kostiantyn Potomkin",
"Niklas Kochdumper",
"Brandon Hencey",
"Stanley Bak",
"Sergiy Bogomolov",
]
__license__ = "GPLv3"
__version__ = "0.21"
__maintainer__ = "Ethan Lew"
Expand Down
48 changes: 35 additions & 13 deletions autokoopman/autokoopman.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,33 +97,33 @@ def get_parameter_space(obs_type, threshold_range, rank):
)


def get_estimator(obs_type, sampling_period, dim, obs, hyperparams):
def get_estimator(obs_type, sampling_period, dim, obs, hyperparams, normalize):
"""from the myriad of user suppled switches, select the right estimator"""
if obs_type == "rff":
observables = kobs.IdentityObservable() | kobs.RFFObservable(
dim, obs, hyperparams[0]
)
return KoopmanDiscEstimator(
observables, sampling_period, dim, rank=hyperparams[1]
observables, sampling_period, dim, rank=hyperparams[1], normalize=normalize
)
elif obs_type == "quadratic":
observables = kobs.IdentityObservable() | kobs.QuadraticObservable(dim)
return KoopmanDiscEstimator(
observables, sampling_period, dim, rank=hyperparams[0]
observables, sampling_period, dim, rank=hyperparams[0], normalize=normalize
)
elif obs_type == "poly":
observables = kobs.PolynomialObservable(dim, hyperparams[0])
return KoopmanDiscEstimator(
observables, sampling_period, dim, rank=hyperparams[1]
observables, sampling_period, dim, rank=hyperparams[1], normalize=normalize
)
elif obs_type == "id":
observables = kobs.IdentityObservable()
return KoopmanDiscEstimator(
observables, sampling_period, dim, rank=hyperparams[0]
observables, sampling_period, dim, rank=hyperparams[0], normalize=normalize
)
elif isinstance(obs_type, KoopmanObservable):
return KoopmanDiscEstimator(
obs_type, sampling_period, dim, rank=hyperparams[0]
obs_type, sampling_period, dim, rank=hyperparams[0], normalize=normalize
)
else:
raise ValueError(f"unknown observables type {obs_type}")
Expand All @@ -133,6 +133,7 @@ def auto_koopman(
training_data: Union[TrajectoriesData, Sequence[np.ndarray]],
inputs_training_data: Optional[Sequence[np.ndarray]] = None,
sampling_period: Optional[float] = None,
normalize: bool = True,
opt: Union[str, HyperparameterTuner] = "monte-carlo",
max_opt_iter: int = 100,
max_epochs: int = 500,
Expand All @@ -158,6 +159,7 @@ def auto_koopman(
:param training_data: training trajectories data from which to learn the system
:param inputs_training_data: optional input trajectories data from which to learn the system (this isn't needed if the training data has inputs already)
:param sampling_period: (for discrete time system) sampling period of training data
:param normalize: normalize the states of the training trajectories
:param opt: hyperparameter optimizer {"grid", "monte-carlo", "bopt"}
:param max_opt_iter: maximum iterations for the tuner to use
:param max_epochs: maximum number of training epochs
Expand Down Expand Up @@ -215,11 +217,24 @@ def auto_koopman(
# get the hyperparameter map
if obs_type in {"deep"}:
modelmap = _deep_model_map(
training_data, max_epochs, n_obs, enc_dim, n_layers, torch_device, verbose
training_data,
max_epochs,
n_obs,
enc_dim,
n_layers,
torch_device,
verbose,
normalize,
)
else:
modelmap = _edmd_model_map(
training_data, rank, obs_type, n_obs, lengthscale, sampling_period
training_data,
rank,
obs_type,
n_obs,
lengthscale,
sampling_period,
normalize,
)

# setup the tuner
Expand All @@ -239,7 +254,7 @@ def auto_koopman(
gt = BayesianOptTuner(modelmap, training_data, verbose=verbose)
else:
raise ValueError(f"could not match a tuner to the string {opt}")

with hide_prints():
res = gt.tune(nattempts=max_opt_iter, scoring_func=get_scoring_func(cost_func))

Expand All @@ -266,6 +281,7 @@ def _deep_model_map(
nlayers,
torch_device,
verbose,
normalize,
) -> HyperparameterMap:
import autokoopman.estimator.deepkoopman as dk

Expand Down Expand Up @@ -311,7 +327,7 @@ def get_model(self, hyperparams: Sequence):


def _edmd_model_map(
training_data, rank, obs_type, n_obs, lengthscale, sampling_period
training_data, rank, obs_type, n_obs, lengthscale, sampling_period, normalize
) -> HyperparameterMap:
"""model map for eDMD based methods

Expand Down Expand Up @@ -346,7 +362,9 @@ def __init__(self):
super(_ModelMap, self).__init__(pspace)

def get_model(self, hyperparams: Sequence):
return get_estimator(obs_type, sampling_period, dim, n_obs, hyperparams)
return get_estimator(
obs_type, sampling_period, dim, n_obs, hyperparams, normalize
)

# get the hyperparameter map
return _ModelMap()
Expand Down Expand Up @@ -377,11 +395,15 @@ def _sanitize_training_data(
# convert the data to autokoopman trajectories
if isinstance(training_data, TrajectoriesData):
if not isinstance(training_data, UniformTimeTrajectoriesData):
print(f"resampling trajectories as they need to be uniform time (sampling period {sampling_period})")
print(
f"resampling trajectories as they need to be uniform time (sampling period {sampling_period})"
)
training_data = training_data.interp_uniform_time(sampling_period)
else:
if not np.isclose(training_data.sampling_period, sampling_period):
print(f"resampling trajectories because the sampling periods differ (original {training_data.sampling_period}, new {sampling_period})")
print(
f"resampling trajectories because the sampling periods differ (original {training_data.sampling_period}, new {sampling_period})"
)
training_data = training_data.interp_uniform_time(sampling_period)
else:
# figure out how to add inputs
Expand Down
40 changes: 37 additions & 3 deletions autokoopman/core/estimator.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,28 @@
import abc
from typing import Optional

from sklearn.preprocessing import MinMaxScaler

import numpy as np

import autokoopman.core.system as asys
import autokoopman.core.trajectory as atraj


class TrajectoryEstimator(abc.ABC):
"""
Trajectory based estimator base class

:param normalize: apply MinMax normalization to the fit data
:param feature_range: range for MinMax scaler
"""
def __init__(self, normalize=True, feature_range=(-1, 1)) -> None:
self.normalize = normalize
if self.normalize:
self.scaler = MinMaxScaler(feature_range=feature_range)
else:
self.scaler = None

@abc.abstractmethod
def fit(self, X: atraj.TrajectoriesData) -> None:
pass
Expand All @@ -27,9 +42,13 @@ class NextStepEstimator(TrajectoryEstimator):
"""Estimator of discrete time dynamical systems

Requires that the data be uniform time

:param normalize: apply MinMax normalization to the fit data
:param feature_range: range for MinMax scaler
"""

def __init__(self) -> None:
def __init__(self, normalize=True, feature_range=(-1, 1)) -> None:
super().__init__(normalize=normalize, feature_range=feature_range)
self.names = None

@abc.abstractmethod
Expand All @@ -43,7 +62,11 @@ def fit(self, X: atraj.TrajectoriesData) -> None:
assert isinstance(
X, atraj.UniformTimeTrajectoriesData
), "X must be uniform time"
self.fit_next_step(*X.next_step_matrices)
X_, Xp_, U_ = X.next_step_matrices
if self.normalize:
X_ = self.scaler.fit_transform(X_.T).T
Xp_ = self.scaler.transform(Xp_.T).T
self.fit_next_step(X_, Xp_, U_)
self.sampling_period = X.sampling_period
self.names = X.state_names

Expand All @@ -52,8 +75,15 @@ class GradientEstimator(TrajectoryEstimator):
"""Estimator of discrete time dynamical systems

Requires that the data be uniform time

:param normalize: apply MinMax normalization to the fit data
:param feature_range: range for MinMax scaler
"""

def __init__(self, normalize=True, feature_range=(-1, 1)) -> None:
super().__init__(normalize=normalize, feature_range=feature_range)
self.names = None

@abc.abstractmethod
def fit_gradient(
self, X: np.ndarray, Y: np.ndarray, U: Optional[np.ndarray] = None
Expand All @@ -65,6 +95,10 @@ def fit(self, X: atraj.TrajectoriesData) -> None:
assert isinstance(
X, atraj.UniformTimeTrajectoriesData
), "X must be uniform time"
self.fit_gradient(*X.differentiate)
X_, Xp_, U_ = X.differentiate
if self.normalize:
X_ = self.scaler.fit_transform(X_.T).T
Xp_ = self.scaler.transform(Xp_.T).T
self.fit_gradient(X_, Xp_, U_)
self.sampling_period = X.sampling_period
self.names = X.state_names
7 changes: 3 additions & 4 deletions autokoopman/core/format.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,11 +38,11 @@ def _clip_list(s: Optional[Sequence], nlength=20) -> str:
class hide_prints:
"""context manager to suppress python print output

this was created to hide "ugly stuff"
this was created to hide "ugly stuff"
(e.g., GPyOpt prints)
"""

def __enter__(self, hide_warnings = True):
def __enter__(self, hide_warnings=True):
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, "w")
self.hw = hide_warnings
Expand All @@ -52,11 +52,10 @@ def __enter__(self, hide_warnings = True):
if self.hw:
self.cw = warnings.catch_warnings()
self.cw.__enter__()
warnings.filterwarnings('ignore')
warnings.filterwarnings("ignore")

def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout.close()
sys.stdout = self._original_stdout
if self.hw:
self.cw.__exit__()

25 changes: 21 additions & 4 deletions autokoopman/core/system.py
Original file line number Diff line number Diff line change
Expand Up @@ -329,8 +329,6 @@ def names(self):
return self._names




class StepDiscreteSystem(DiscreteSystem):
def __init__(
self,
Expand All @@ -357,15 +355,18 @@ class LinearContinuousSystem(ContinuousSystem):
class KoopmanContinuousSystem(LinearContinuousSystem):
...


class KoopmanSystem:
"""
mixin class for Koopman systems
"""
def __init__(self, A, B, obs, names, dim=None):

def __init__(self, A, B, obs, names, dim=None, scaler=None):
self._A = A
self._B = B
self._has_input = B is not None and not np.any(np.array(B.shape) == 0)
self.obs = obs
self.scaler = scaler
self.dim = A.shape[0] if dim is None else dim

def evolv_func(t, x, i):
Expand All @@ -377,7 +378,21 @@ def evolv_func(t, x, i):
else:
return np.real(self._A @ obs.T).flatten()[: len(x)]

super().__init__(evolv_func, names)
def evolv_func_scale(t, x, i):
x = self.scaler.transform(np.atleast_2d(x)).flatten()
obs = (self.obs(x).flatten())[np.newaxis, :]
if self._has_input:
return self.scaler.inverse_transform(
np.real(self._A @ obs.T + self._B @ (i)[:, np.newaxis])[
: self.dim
].T
).flatten()
else:
return self.scaler.inverse_transform(
np.real(self._A @ obs.T)[: len(x)].T
).flatten()

super().__init__(evolv_func if self.scaler is None else evolv_func_scale, names)

@property
def A(self):
Expand All @@ -391,8 +406,10 @@ def B(self):
def obs_func(self):
return self.obs


class KoopmanStepDiscreteSystem(KoopmanSystem, StepDiscreteSystem):
...


class KoopmanGradientContinuousSystem(KoopmanSystem, GradientContinuousSystem):
...
2 changes: 1 addition & 1 deletion autokoopman/core/tuner.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ def generate_predictions(
(np.min(v.times), np.max(v.times)),
inputs=v.inputs,
teval=v.times,
**_kwargs
**_kwargs,
)
preds[k] = sivp_interp
return TrajectoriesData(preds)
Expand Down
16 changes: 10 additions & 6 deletions autokoopman/estimator/koopman.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,8 @@ class KoopmanDiscEstimator(kest.NextStepEstimator):
See https://epubs.siam.org/doi/pdf/10.1137/16M1062296 for more details
"""

def __init__(self, observables, sampling_period, dim, rank):
super().__init__()
def __init__(self, observables, sampling_period, dim, rank, **kwargs):
super().__init__(**kwargs)
self.dim = dim
self.obs = observables
self.rank = int(rank)
Expand All @@ -74,7 +74,9 @@ def model(self) -> ksys.System:
"""
packs the learned linear transform into a discrete linear system
"""
return ksys.KoopmanStepDiscreteSystem(self._A, self._B, self.obs, self.names, self.dim)
return ksys.KoopmanStepDiscreteSystem(
self._A, self._B, self.obs, self.names, self.dim, self.scaler
)


class KoopmanContinuousEstimator(kest.GradientEstimator):
Expand All @@ -91,8 +93,8 @@ class KoopmanContinuousEstimator(kest.GradientEstimator):

"""

def __init__(self, observables, dim, rank):
super().__init__()
def __init__(self, observables, dim, rank, **kwargs):
super().__init__(**kwargs)
self.dim = dim
self.obs = observables
self.rank = int(rank)
Expand All @@ -115,4 +117,6 @@ def model(self) -> ksys.System:
"""
packs the learned linear transform into a continuous linear system
"""
return ksys.KoopmanGradientContinuousSystem(self._A, self._B, self.obs, self.names, self.dim)#grad_func, self.names)
return ksys.KoopmanGradientContinuousSystem(
self._A, self._B, self.obs, self.names, self.dim, self.scaler
)
Loading