Skip to content

Commit

Permalink
Make krg_based training reproducible (#490)
Browse files Browse the repository at this point in the history
* Make krg_based internal optim truly reproducible

* Results differs wrt platform

* Remove useless seeding

* Fix test wrt platform
  • Loading branch information
relf authored Dec 5, 2023
1 parent 966d295 commit 139cf92
Show file tree
Hide file tree
Showing 3 changed files with 22 additions and 15 deletions.
14 changes: 9 additions & 5 deletions smt/applications/tests/test_ego.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
import os
import unittest
import numpy as np
from sys import argv
from sys import argv, platform

try:
import matplotlib
Expand Down Expand Up @@ -1005,11 +1005,15 @@ def f_obj(X):
)
x_opt, y_opt, dnk, x_data, y_data = ego.optimize(fun=f_obj)
if ds.HAS_CONFIG_SPACE: # results differs wrt config_space impl
self.assertAlmostEqual(np.sum(y_data), 2.7639515433083854, delta=1e-4)
self.assertAlmostEqual(np.sum(x_data), 32.11001423996299, delta=1e-4)
if platform.startswith("linux"): # results differs wrt platform
self.assertAlmostEqual(np.sum(y_data), 1.0355815090110578, delta=1e-12)
self.assertAlmostEqual(np.sum(x_data), 38.56885202767958, delta=1e-12)
else:
self.assertAlmostEqual(np.sum(y_data), 0.9606415626557894, delta=1e-12)
self.assertAlmostEqual(np.sum(x_data), 38.23494224077761, delta=1e-12)
else:
self.assertAlmostEqual(np.sum(y_data), 2.03831406306514, delta=1e-4)
self.assertAlmostEqual(np.sum(x_data), 33.56885202767958, delta=1e-4)
self.assertAlmostEqual(np.sum(y_data), 1.8911720770059735, delta=1e-12)
self.assertAlmostEqual(np.sum(x_data), 47.56885202767958, delta=1e-12)

def test_ego_gek(self):
ego, fun = self.initialize_ego_gek()
Expand Down
10 changes: 7 additions & 3 deletions smt/surrogate_models/krg_based.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,8 @@ def _initialize(self):
self.best_iteration_fail = None
self.nb_ill_matrix = 5
self.is_acting_points = {}
# Make internal optim multistart reproducible
self.random_state = np.random.RandomState(41)
supports["derivatives"] = True
supports["variances"] = True
supports["variance_derivatives"] = True
Expand Down Expand Up @@ -1741,7 +1743,7 @@ def grad_minus_reduced_likelihood_function(log10t):
# to theta in (0,2e1]
theta_bounds = self.options["theta_bounds"]
if self.theta0[i] < theta_bounds[0] or self.theta0[i] > theta_bounds[1]:
self.theta0[i] = np.random.rand()
self.theta0[i] = self.random_state.rand()
self.theta0[i] = (
self.theta0[i] * (theta_bounds[1] - theta_bounds[0])
+ theta_bounds[0]
Expand Down Expand Up @@ -1770,7 +1772,7 @@ def grad_minus_reduced_likelihood_function(log10t):
else:
theta_bounds = self.options["theta_bounds"]
log10t_bounds = np.log10(theta_bounds)
theta0_rand = np.random.rand(len(self.theta0))
theta0_rand = self.random_state.rand(len(self.theta0))
theta0_rand = (
theta0_rand * (log10t_bounds[1] - log10t_bounds[0])
+ log10t_bounds[0]
Expand Down Expand Up @@ -1829,7 +1831,9 @@ def grad_minus_reduced_likelihood_function(log10t):

if self.options["n_start"] > 1:
sampling = LHS(
xlimits=theta_limits, criterion="maximin", random_state=41
xlimits=theta_limits,
criterion="maximin",
random_state=self.random_state,
)
theta_lhs_loops = sampling(self.options["n_start"])
theta_all_loops = np.vstack((theta_all_loops, theta_lhs_loops))
Expand Down
13 changes: 6 additions & 7 deletions smt/tests/test_kpls_auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import numpy as np
import unittest
import inspect
from sys import platform

from collections import OrderedDict

Expand Down Expand Up @@ -35,8 +36,6 @@ def setUp(self):
problems["exp"] = TensorProduct(ndim=ndim, func="exp")
problems["tanh"] = TensorProduct(ndim=ndim, func="tanh")
problems["cos"] = TensorProduct(ndim=ndim, func="cos")
sms = OrderedDict()
sms["KPLS"] = KPLS(eval_n_comp=True)

t_errors = {}
e_errors = {}
Expand All @@ -47,14 +46,16 @@ def setUp(self):
n_comp_opt["Branin"] = 2
n_comp_opt["Rosenbrock"] = 1
n_comp_opt["sphere"] = 1
n_comp_opt["exp"] = 3
if platform.startswith("linux"): # result depends on platform
n_comp_opt["exp"] = 2
else:
n_comp_opt["exp"] = 3
n_comp_opt["tanh"] = 1
n_comp_opt["cos"] = 1

self.nt = nt
self.ne = ne
self.problems = problems
self.sms = sms
self.t_errors = t_errors
self.e_errors = e_errors
self.n_comp_opt = n_comp_opt
Expand All @@ -68,15 +69,13 @@ def run_test(self):

sampling = LHS(xlimits=prob.xlimits, random_state=42)

np.random.seed(0)
xt = sampling(self.nt)
yt = prob(xt)

np.random.seed(1)
xe = sampling(self.ne)
ye = prob(xe)

sm0 = self.sms[sname]
sm0 = KPLS(eval_n_comp=True)

sm = sm0.__class__()
sm.options = sm0.options.clone()
Expand Down

0 comments on commit 139cf92

Please sign in to comment.