Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Master 2 #104

Merged
merged 7 commits into from
Mar 20, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 4 additions & 7 deletions smt/applications/mfk.py
Original file line number Diff line number Diff line change
Expand Up @@ -764,9 +764,7 @@ def predict_variances_all_levels(self, X, is_acting=None):
sigma2 = self.optimal_par[0]["sigma2"] / self.y_std**2
MSE[:, 0] = sigma2 * (
# 1 + self.optimal_noise_all[0] - (r_t ** 2).sum(axis=0) + (u_ ** 2).sum(axis=0)
1
- (r_t**2).sum(axis=0)
+ (u_**2).sum(axis=0)
1 - (r_t**2).sum(axis=0) + (u_**2).sum(axis=0)
)

# Calculate recursively kriging variance at level i
Expand Down Expand Up @@ -850,17 +848,16 @@ def predict_variances_all_levels(self, X, is_acting=None):
Q_ = (np.dot((yt - np.dot(Ft, beta)).T, yt - np.dot(Ft, beta)))[0, 0]
MSE[:, i] = (
# sigma2_rho * MSE[:, i - 1]
+Q_ / (2 * (self.nt_all[i] - p - q))
+Q_
/ (2 * (self.nt_all[i] - p - q))
# * (1 + self.optimal_noise_all[i] - (r_t ** 2).sum(axis=0))
* (1 - (r_t**2).sum(axis=0))
+ sigma2 * (u_**2).sum(axis=0)
)
else:
MSE[:, i] = sigma2 * (
# 1 + self.optimal_noise_all[i] - (r_t ** 2).sum(axis=0) + (u_ ** 2).sum(axis=0)
1
- (r_t**2).sum(axis=0)
+ (u_**2).sum(axis=0)
1 - (r_t**2).sum(axis=0) + (u_**2).sum(axis=0)
) # + sigma2_rho * MSE[:, i - 1]
if self.options["propagate_uncertainty"]:
MSE[:, i] = MSE[:, i] + sigma2_rho * MSE[:, i - 1]
Expand Down
32 changes: 22 additions & 10 deletions smt/applications/tests/test_ego.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# coding: utf-8
"""
Author: Remi Lafage <[email protected]> and Nathalie Bartoli
Author: Remi Lafage <[email protected]>
This package is distributed under New BSD license.
"""

Expand Down Expand Up @@ -985,35 +985,47 @@ def f_obj(X):
LHS, design_space, criterion="ese", random_state=random_state
)
Xt = sampling(n_doe)

if ds.HAS_CONFIG_SPACE: # results differs wrt config_space impl
self.assertAlmostEqual(np.sum(Xt), 24.811925491708156, delta=1e-4)
else:
self.assertAlmostEqual(np.sum(Xt), 28.568852027679586, delta=1e-4)
Xt = np.array(
[
[0.37454012, 1.0],
[0.95071431, 0.0],
[0.73199394, 8.0],
[0.59865848, 6.0],
[0.15601864, 7.0],
]
)
# To start the Bayesion optimization
n_iter = 2 # number of iterations
criterion = "EI" # infill criterion
criterion = "LCB" # infill criterion
ego = EGO(
n_iter=n_iter,
criterion=criterion,
xdoe=Xt,
surrogate=KRG(
design_space=design_space,
categorical_kernel=MixIntKernelType.CONT_RELAX,
categorical_kernel=MixIntKernelType.GOWER,
theta0=[1e-2],
n_start=15,
n_start=25,
corr="squar_exp",
hyper_opt="Cobyla",
print_global=False,
),
verbose=False,
enable_tunneling=False,
random_state=random_state,
n_start=15,
n_start=25,
)
x_opt, y_opt, dnk, x_data, y_data = ego.optimize(fun=f_obj)
if ds.HAS_CONFIG_SPACE: # results differs wrt config_space impl
self.assertAlmostEqual(np.sum(y_data), 5.4385331120184475, delta=1e-3)
self.assertAlmostEqual(np.sum(x_data), 39.711522540205394, delta=1e-3)
self.assertAlmostEqual(np.sum(y_data), 8.846225704750577, delta=1e-4)
self.assertAlmostEqual(np.sum(x_data), 41.811925504901374, delta=1e-4)
else:
self.assertAlmostEqual(np.sum(y_data), 1.8911720670620835, delta=1e-6)
self.assertAlmostEqual(np.sum(x_data), 47.56885202767958, delta=1e-6)
self.assertAlmostEqual(np.sum(y_data), 7.8471910288712, delta=1e-4)
self.assertAlmostEqual(np.sum(x_data), 34.81192549, delta=1e-4)

def test_ego_gek(self):
ego, fun = self.initialize_ego_gek()
Expand Down
1 change: 0 additions & 1 deletion smt/applications/tests/test_mfk_variance.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
https://doi.org/10.1080/00401706.2014.928233
"""


import numpy as np
from smt.applications.mfk import MFK, NestedLHS
from smt.sampling_methods import LHS
Expand Down
1 change: 0 additions & 1 deletion smt/applications/tests/test_mfkpls.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
Adapted to new SMT version in march 2020 by Nathalie Bartoli
"""


try:
import matplotlib

Expand Down
1 change: 1 addition & 0 deletions smt/applications/tests/test_mfkplsk.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

Adapted to new SMT version in march 2020 by Nathalie Bartoli
"""

try:
import matplotlib

Expand Down
5 changes: 5 additions & 0 deletions smt/surrogate_models/krg_based.py
Original file line number Diff line number Diff line change
Expand Up @@ -964,6 +964,11 @@ def _reduced_likelihood_function(self, theta):
print("exception : ", e)
print(np.linalg.eig(R)[0])
return reduced_likelihood_function_value, par
if linalg.svd(R, compute_uv=False)[-1] < 1.1 * nugget:
warnings.warn(
"R is too ill conditioned. Poor combination "
"of regression model and observations."
)

# Get generalized least squared solution
Ft = linalg.solve_triangular(C, self.F, lower=True)
Expand Down
47 changes: 47 additions & 0 deletions smt/surrogate_models/tests/test_krg_based.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,13 @@
import numpy as np
from smt.surrogate_models.krg_based import KrgBased

from smt.surrogate_models import KRG


# defining the toy example
def target_fun(x):
return np.cos(5 * x)


class TestKrgBased(unittest.TestCase):
def test_theta0_default_init(self):
Expand All @@ -29,6 +36,46 @@ def test_theta0_erroneous_init(self):
krg.set_training_values(np.array([[1, 2, 3]]), np.array([[1]])) # erroneous
self.assertRaises(ValueError, krg._check_param)

def test_almost_squar_exp(self):
nobs = 50 # number of obsertvations
np.random.seed(0) # a seed for reproducibility
xt = np.random.uniform(size=nobs) # design points

# adding a random noise to observations
yt = target_fun(xt) + np.random.normal(scale=0.05, size=nobs)

# training the model with the option eval_noise= True
sm = KRG(eval_noise=False, corr="pow_exp", pow_exp_power=1.9999)
sm.set_training_values(xt, yt)

self.assertWarns(UserWarning, sm.train)

def test_less_almost_squar_exp(self):
nobs = 50 # number of obsertvations
np.random.seed(0) # a seed for reproducibility
xt = np.random.uniform(size=nobs) # design points

# adding a random noise to observations
yt = target_fun(xt) + np.random.normal(scale=0.05, size=nobs)

# training the model with the option eval_noise= True
sm = KRG(eval_noise=False, corr="pow_exp", pow_exp_power=1.99)
sm.set_training_values(xt, yt)
sm.train()

# predictions
x = np.linspace(0, 1, 500).reshape(-1, 1)
sm.predict_values(x) # predictive mean
sm.predict_variances(x) # predictive variance
sm.predict_derivatives(x, 0) # predictive variance
self.assertLess(
np.abs(
sm.predict_derivatives(x[20], 0)
- (sm.predict_values(x[20] + 1e-6) - sm.predict_values(x[20])) / 1e-6
),
1e-2,
)


if __name__ == "__main__":
unittest.main()
6 changes: 3 additions & 3 deletions smt/surrogate_models/tests/test_surrogate_model_examples.py
Original file line number Diff line number Diff line change
Expand Up @@ -814,9 +814,9 @@ def df_dx(x):
genn.options["hidden_layer_sizes"] = [6, 6]
genn.options["alpha"] = 0.1
genn.options["lambd"] = 0.1
genn.options[
"gamma"
] = 1.0 # 1 = gradient-enhanced on, 0 = gradient-enhanced off
genn.options["gamma"] = (
1.0 # 1 = gradient-enhanced on, 0 = gradient-enhanced off
)
genn.options["num_iterations"] = 1000
genn.options["is_backtracking"] = True
genn.options["is_normalize"] = False
Expand Down
6 changes: 3 additions & 3 deletions smt/utils/design_space.py
Original file line number Diff line number Diff line change
Expand Up @@ -523,9 +523,9 @@ def unfold_x(

# The is_acting matrix is simply repeated column-wise
if is_acting is not None:
is_acting_unfolded[
:, i_x_unfold : i_x_unfold + n_dim_cat
] = np.tile(is_acting[:, [i]], (1, n_dim_cat))
is_acting_unfolded[:, i_x_unfold : i_x_unfold + n_dim_cat] = (
np.tile(is_acting[:, [i]], (1, n_dim_cat))
)

i_x_unfold += n_dim_cat

Expand Down
6 changes: 3 additions & 3 deletions smt/utils/neural_net/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,9 +223,9 @@ def train(

# Compute average cost and print output
avg_cost = np.mean(optimizer.cost_history).squeeze()
self._training_history["epoch_" + str(e)][
"batch_" + str(b)
] = optimizer.cost_history
self._training_history["epoch_" + str(e)]["batch_" + str(b)] = (
optimizer.cost_history
)

if not silent:
print(
Expand Down
25 changes: 14 additions & 11 deletions smt/utils/options_dictionary.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,14 +82,17 @@ def _assert_valid(self, name, value):
types = self._declared_entries[name]["types"]

if values is not None and types is not None:
assert value in values or isinstance(
value, types
), "Option %s: value and type of %s are both invalid - " % (
name,
value,
) + "value must be %s or type must be %s" % (
values,
types,
assert value in values or isinstance(value, types), (
"Option %s: value and type of %s are both invalid - "
% (
name,
value,
)
+ "value must be %s or type must be %s"
% (
values,
types,
)
)
elif values is not None:
assert value in values, "Option %s: value %s is invalid - must be %s" % (
Expand All @@ -98,9 +101,9 @@ def _assert_valid(self, name, value):
values,
)
elif types is not None:
assert isinstance(
value, types
), "Option %s: type of %s is invalid - must be %s" % (name, value, types)
assert isinstance(value, types), (
"Option %s: type of %s is invalid - must be %s" % (name, value, types)
)

def update(self, dict_):
"""
Expand Down