Skip to content

Commit

Permalink
Okay think I have finalized everything on the pde_optimization branch…
Browse files Browse the repository at this point in the history
… before the merge. Added some tests, debugged an issue with PDE/WeakPDEs with the generalizedlibrary and ensembling functionalities, cleaned up some code, reran all the jupyter notebooks.
  • Loading branch information
Alan Kaptanoglu authored and Alan Kaptanoglu committed Jan 6, 2022
1 parent 5fa2f39 commit 2d1567f
Show file tree
Hide file tree
Showing 21 changed files with 1,289 additions and 939 deletions.
95 changes: 49 additions & 46 deletions examples/10_PDEFIND_examples.ipynb

Large diffs are not rendered by default.

357 changes: 191 additions & 166 deletions examples/11_SSR_FROLS_examples.ipynb

Large diffs are not rendered by default.

79 changes: 42 additions & 37 deletions examples/12_weakform_SINDy_examples.ipynb

Large diffs are not rendered by default.

222 changes: 121 additions & 101 deletions examples/13_ensembling.ipynb

Large diffs are not rendered by default.

79 changes: 38 additions & 41 deletions examples/14_cavity_flow.ipynb

Large diffs are not rendered by default.

299 changes: 151 additions & 148 deletions examples/15_pysindy_lectures.ipynb

Large diffs are not rendered by default.

99 changes: 37 additions & 62 deletions examples/1_feature_overview.ipynb

Large diffs are not rendered by default.

46 changes: 26 additions & 20 deletions examples/2_introduction_to_sindy.ipynb

Large diffs are not rendered by default.

55 changes: 34 additions & 21 deletions examples/3_original_paper.ipynb

Large diffs are not rendered by default.

21 changes: 13 additions & 8 deletions examples/4_scikit_learn_compatibility.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -74,13 +74,15 @@
"t_train = np.arange(0, 10, dt)\n",
"t_train_span = (t_train[0], t_train[-1])\n",
"x0_train = [-8, 8, 27]\n",
"x_train = solve_ivp(lorenz, t_train_span, x0_train, t_eval=t_train, **integrator_keywords).y.T\n",
"x_train = solve_ivp(lorenz, t_train_span, x0_train, \n",
" t_eval=t_train, **integrator_keywords).y.T\n",
"\n",
"# Evolve the Lorenz equations in time using a different initial condition\n",
"t_test = np.arange(0, 15, dt)\n",
"t_test_span = (t_test[0], t_test[-1])\n",
"x0_test = np.array([8, 7, 15])\n",
"x_test = solve_ivp(lorenz, t_test_span, x0_test, t_eval=t_test, **integrator_keywords).y.T"
"x_test = solve_ivp(lorenz, t_test_span, x0_test, \n",
" t_eval=t_test, **integrator_keywords).y.T"
]
},
{
Expand Down Expand Up @@ -117,10 +119,10 @@
"name": "stdout",
"output_type": "stream",
"text": [
"Best parameters: {'differentiation_method__order': 2, 'feature_library': PolynomialLibrary(), 'optimizer__alpha': 0.01, 'optimizer__threshold': 0.01}\n",
"(x0)' = -9.999 x0 + 9.999 x1\n",
"(x1)' = 27.992 x0 + -0.999 x1 + -1.000 x0 x2\n",
"(x2)' = -2.666 x2 + 1.000 x0 x1\n"
"Best parameters: {'differentiation_method__order': 1, 'feature_library': PolynomialLibrary(), 'optimizer__alpha': 0.01, 'optimizer__threshold': 0.01}\n",
"(x0)' = -10.021 x0 + 9.993 x1\n",
"(x1)' = 0.227 1 + 27.601 x0 + -0.611 x1 + -0.983 x0 x2 + -0.020 x1 x2\n",
"(x2)' = 0.590 1 + 0.045 x0 + -0.018 x1 + -2.691 x2 + 0.965 x0 x1 + 0.026 x1^2\n"
]
}
],
Expand Down Expand Up @@ -344,7 +346,9 @@
"source": [
"from sklearn.linear_model import ElasticNet\n",
"\n",
"model = ps.SINDy(optimizer=ElasticNet(l1_ratio=0.9, fit_intercept=False), t_default=dt)\n",
"model = ps.SINDy(optimizer=ElasticNet(l1_ratio=0.9, \n",
" fit_intercept=False), \n",
" t_default=dt)\n",
"model.fit(x_train)\n",
"model.print()"
]
Expand Down Expand Up @@ -373,7 +377,8 @@
"from sklearn.linear_model import OrthogonalMatchingPursuit\n",
"\n",
"model = ps.SINDy(\n",
" optimizer=OrthogonalMatchingPursuit(n_nonzero_coefs=8, fit_intercept=False),\n",
" optimizer=OrthogonalMatchingPursuit(n_nonzero_coefs=8, \n",
" fit_intercept=False),\n",
" t_default=dt\n",
")\n",
"model.fit(x_train)\n",
Expand Down
7 changes: 4 additions & 3 deletions examples/5_differentiation.ipynb

Large diffs are not rendered by default.

8 changes: 5 additions & 3 deletions examples/6_deeptime_compatibility.ipynb

Large diffs are not rendered by default.

160 changes: 83 additions & 77 deletions examples/7_plasma_examples.ipynb

Large diffs are not rendered by default.

361 changes: 200 additions & 161 deletions examples/8_trapping_sindy_paper_examples.ipynb

Large diffs are not rendered by default.

49 changes: 34 additions & 15 deletions examples/9_sindypi_with_sympy.ipynb

Large diffs are not rendered by default.

15 changes: 15 additions & 0 deletions pysindy/feature_library/generalized_library.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from sklearn.utils.validation import check_is_fitted

from .base import BaseFeatureLibrary
from .weak_pde_library import WeakPDELibrary


class GeneralizedLibrary(BaseFeatureLibrary):
Expand Down Expand Up @@ -105,6 +106,20 @@ def __init__(
)
if len(libraries) > 0:
self.libraries_ = libraries
weak_libraries = False
nonweak_libraries = False
for lib in self.libraries_:
if isinstance(lib, WeakPDELibrary):
weak_libraries = True
else:
nonweak_libraries = True
if weak_libraries and nonweak_libraries:
raise ValueError(
"At least one of the libraries is a weak form library, "
"and at least one of the libraries is not, which will "
"result in a nonsensical optimization problem. Please use "
"all weak form libraries or no weak form libraries."
)
else:
raise ValueError(
"Empty or nonsensical library list passed to this library."
Expand Down
27 changes: 13 additions & 14 deletions pysindy/pysindy.py
Original file line number Diff line number Diff line change
Expand Up @@ -327,22 +327,24 @@ def fit(
raise ValueError("n_models must be a positive integer")
if (n_subset is not None) and n_subset <= 0:
raise ValueError("n_subset must be a positive integer")
pde_libraries = False
weak_libraries = False
if x_dot is None:
if isinstance(self.feature_library, WeakPDELibrary):
x_dot = convert_u_dot_integral(x, self.feature_library)
elif isinstance(self.feature_library, PDELibrary):
x_dot = FiniteDifference(d=1, axis=-2)._differentiate(x, t=t)
elif isinstance(self.feature_library, GeneralizedLibrary):
weak_libraries = isinstance(
np.array(self.feature_library.libraries_), WeakPDELibrary
)
if np.any(weak_libraries):
for lib in self.feature_library.libraries_:
if isinstance(lib, WeakPDELibrary):
weak_libraries = True
if isinstance(lib, PDELibrary):
pde_libraries = True
if weak_libraries:
x_dot = convert_u_dot_integral(
x, self.feature_library.libraries_[weak_libraries]
x, self.feature_library.libraries_[0]
)
elif np.any(
isinstance(np.array(self.feature_library.libraries_), PDELibrary)
):
elif pde_libraries:
x_dot = FiniteDifference(d=1, axis=-2)._differentiate(x, t=t)
if multiple_trajectories:
x, x_dot = self._process_multiple_trajectories(x, t, x_dot)
Expand All @@ -358,7 +360,9 @@ def fit(
else:
if x_dot is None:
x_dot = self.differentiation_method(x, t)
else:
elif (not isinstance(self.feature_library, WeakPDELibrary)) and (
not weak_libraries
):
x_dot = validate_input(x_dot, t)

# Set ensemble variables
Expand Down Expand Up @@ -559,11 +563,6 @@ def predict(self, x, u=None, multiple_trajectories=False):
u = validate_control_variables(
x, u, multiple_trajectories=True, return_array=False
)
print(
x[1].shape,
u[1].shape,
self.model.predict(np.concatenate((x[1], u[1]), axis=1)).shape,
)
return [
self.model.predict(np.concatenate((xi, ui), axis=1)).reshape(
x_shapes[i]
Expand Down
20 changes: 13 additions & 7 deletions pysindy/utils/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@ def validate_input(x, t=T_DEFAULT):
raise ValueError("t must be positive")
# Only apply these tests if t is array-like
elif isinstance(t, np.ndarray):
if not len(t) == x.shape[0]:
raise ValueError("Length of t should match x.shape[0].")
if not len(t) == x.shape[-2]:
raise ValueError("Length of t should match x.shape[-2].")
if not np.all(t[:-1] < t[1:]):
raise ValueError("Values in t should be in strictly increasing order.")
else:
Expand Down Expand Up @@ -116,7 +116,7 @@ def drop_random_rows(x, x_dot, n_subset, replace, feature_library, pde_library_f
if pde_library_flag == "WeakPDE":
# Weak form needs uniform, ascending grid, so cannot replace
replace = False
s = [slice(None, None)] * len(feature_library.spatiotemporal_grid.shape)
s = [slice(None, None)] * feature_library.spatiotemporal_grid.ndim
s[-2] = 0
s[-1] = slice(None, -1)
spatial_grid = feature_library.spatiotemporal_grid[tuple(s)]
Expand All @@ -137,14 +137,20 @@ def drop_random_rows(x, x_dot, n_subset, replace, feature_library, pde_library_f
if n_subset > num_time:
n_subset = num_time
rand_inds = np.sort(choice(range(num_time), n_subset, replace=replace))
x_shaped = np.reshape(x, np.concatenate([dims, [num_time], [n_features]]))

if len(dims) > 0:
x_shaped = np.reshape(x, np.concatenate([dims, [num_time], [n_features]]))
else:
x_shaped = np.reshape(x, np.concatenate([[num_time], [n_features]]))
s0 = [slice(dim) for dim in x_shaped.shape]
s0[len(dims)] = rand_inds

x_new = np.reshape(
x_shaped[tuple(s0)], (np.product(dims) * n_subset, x.shape[1])
)
if len(dims) > 0:
x_new = np.reshape(
x_shaped[tuple(s0)], (np.product(dims) * n_subset, x.shape[1])
)
else:
x_new = np.reshape(x_shaped[tuple(s0)], (n_subset, x.shape[1]))

if pde_library_flag == "WeakPDE":
spatiotemporal_grid = feature_library.spatiotemporal_grid
Expand Down
2 changes: 1 addition & 1 deletion test/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ def data_1d_random_pde():
x = np.linspace(0, 10, n)
u = np.random.randn(n, n, 1)
u_dot = FiniteDifference(axis=1)._differentiate(u, t=dt)
return x, u, u_dot
return t, x, u, u_dot


@pytest.fixture
Expand Down
72 changes: 71 additions & 1 deletion test/feature_library/test_feature_library.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,7 @@ def test_weak_pde_library_bad_parameters(params):
"params",
[
dict(libraries=[]),
dict(libraries=[PolynomialLibrary, WeakPDELibrary]),
dict(libraries=[PolynomialLibrary, PolynomialLibrary], tensor_array=[[0, 0]]),
dict(libraries=[PolynomialLibrary, PolynomialLibrary], tensor_array=[[0, 1]]),
dict(libraries=[PolynomialLibrary, PolynomialLibrary], tensor_array=[[1, -1]]),
Expand Down Expand Up @@ -547,6 +548,75 @@ def test_generalized_library(data_lorenz):
assert len(model.get_feature_names()) == 29


def test_generalized_library_pde(data_1d_random_pde):
t, x, u, u_dot = data_1d_random_pde
poly_library = PolynomialLibrary(include_bias=False)
fourier_library = FourierLibrary()
library_functions = [lambda x: x, lambda x: x * x]
library_function_names = [lambda x: x, lambda x: x + x]
pde_library = PDELibrary(
library_functions=library_functions,
function_names=library_function_names,
derivative_order=2,
spatial_grid=x,
include_bias=True,
is_uniform=True,
)

# First try without tensor libraries and subset of the input variables
sindy_library = GeneralizedLibrary(
[poly_library, fourier_library, pde_library],
)
sindy_opt = STLSQ(threshold=0.25)
model = SINDy(
optimizer=sindy_opt,
feature_library=sindy_library,
)
model.fit(u, t=t)
model.print()
model.get_feature_names()
assert len(model.get_feature_names()) == 13


def test_generalized_library_weak_pde(data_1d_random_pde):
t, x, u, u_dot = data_1d_random_pde
library_functions = [lambda x: x, lambda x: x * x]
library_function_names = [lambda x: x, lambda x: x + x]
X, T = np.meshgrid(x, t)
XT = np.array([X, T]).T
weak_library1 = WeakPDELibrary(
library_functions=library_functions,
function_names=library_function_names,
derivative_order=2,
spatiotemporal_grid=XT,
include_bias=True,
is_uniform=True,
)
library_functions = [lambda x: x * x * x]
library_function_names = [lambda x: x + x + x]
weak_library2 = WeakPDELibrary(
library_functions=library_functions,
function_names=library_function_names,
derivative_order=0,
spatiotemporal_grid=XT,
is_uniform=True,
)

# First try without tensor libraries and subset of the input variables
sindy_library = GeneralizedLibrary(
[weak_library1, weak_library2],
)
sindy_opt = STLSQ(threshold=0.25)
model = SINDy(
optimizer=sindy_opt,
feature_library=sindy_library,
)
model.fit(u, t=t)
model.print()
model.get_feature_names()
assert len(model.get_feature_names()) == 10


# Helper function for testing PDE libraries
def pde_library_helper(library, u, coef_first_dim):
opt = STLSQ(normalize_columns=True, alpha=1e-10, threshold=0)
Expand All @@ -563,7 +633,7 @@ def pde_library_helper(library, u, coef_first_dim):


def test_1D_pdes(data_1d_random_pde):
spatial_grid, u, _ = data_1d_random_pde
_, spatial_grid, u, _ = data_1d_random_pde
library_functions = [lambda x: x, lambda x: x * x]
library_function_names = [lambda x: x, lambda x: x + x]
pde_lib = PDELibrary(
Expand Down
Loading

0 comments on commit 2d1567f

Please sign in to comment.