Skip to content

Commit

Permalink
Make krg_based internal optim truly reproducible
Browse files Browse the repository at this point in the history
  • Loading branch information
relf committed Dec 5, 2023
1 parent 966d295 commit 7a09a0e
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 7 deletions.
8 changes: 4 additions & 4 deletions smt/applications/tests/test_ego.py
Original file line number Diff line number Diff line change
Expand Up @@ -1005,11 +1005,11 @@ def f_obj(X):
)
x_opt, y_opt, dnk, x_data, y_data = ego.optimize(fun=f_obj)
if ds.HAS_CONFIG_SPACE: # results differs wrt config_space impl
self.assertAlmostEqual(np.sum(y_data), 2.7639515433083854, delta=1e-4)
self.assertAlmostEqual(np.sum(x_data), 32.11001423996299, delta=1e-4)
self.assertAlmostEqual(np.sum(y_data), 0.9606415626557894, delta=1e-16)
self.assertAlmostEqual(np.sum(x_data), 38.23494224077761, delta=1e-16)
else:
self.assertAlmostEqual(np.sum(y_data), 2.03831406306514, delta=1e-4)
self.assertAlmostEqual(np.sum(x_data), 33.56885202767958, delta=1e-4)
self.assertAlmostEqual(np.sum(y_data), 1.8911720770059735, delta=1e-16)
self.assertAlmostEqual(np.sum(x_data), 47.56885202767958, delta=1e-16)

def test_ego_gek(self):
ego, fun = self.initialize_ego_gek()
Expand Down
10 changes: 7 additions & 3 deletions smt/surrogate_models/krg_based.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,8 @@ def _initialize(self):
self.best_iteration_fail = None
self.nb_ill_matrix = 5
self.is_acting_points = {}
# Make internal optim multistart reproducible
self.random_state = np.random.RandomState(41)
supports["derivatives"] = True
supports["variances"] = True
supports["variance_derivatives"] = True
Expand Down Expand Up @@ -1741,7 +1743,7 @@ def grad_minus_reduced_likelihood_function(log10t):
# to theta in (0,2e1]
theta_bounds = self.options["theta_bounds"]
if self.theta0[i] < theta_bounds[0] or self.theta0[i] > theta_bounds[1]:
self.theta0[i] = np.random.rand()
self.theta0[i] = self.random_state.rand()
self.theta0[i] = (
self.theta0[i] * (theta_bounds[1] - theta_bounds[0])
+ theta_bounds[0]
Expand Down Expand Up @@ -1770,7 +1772,7 @@ def grad_minus_reduced_likelihood_function(log10t):
else:
theta_bounds = self.options["theta_bounds"]
log10t_bounds = np.log10(theta_bounds)
theta0_rand = np.random.rand(len(self.theta0))
theta0_rand = self.random_state.rand(len(self.theta0))
theta0_rand = (
theta0_rand * (log10t_bounds[1] - log10t_bounds[0])
+ log10t_bounds[0]
Expand Down Expand Up @@ -1829,7 +1831,9 @@ def grad_minus_reduced_likelihood_function(log10t):

if self.options["n_start"] > 1:
sampling = LHS(
xlimits=theta_limits, criterion="maximin", random_state=41
xlimits=theta_limits,
criterion="maximin",
random_state=self.random_state,
)
theta_lhs_loops = sampling(self.options["n_start"])
theta_all_loops = np.vstack((theta_all_loops, theta_lhs_loops))
Expand Down

0 comments on commit 7a09a0e

Please sign in to comment.