diff --git a/smt/applications/tests/test_ego.py b/smt/applications/tests/test_ego.py index 24c07da4d..71766801e 100644 --- a/smt/applications/tests/test_ego.py +++ b/smt/applications/tests/test_ego.py @@ -11,7 +11,7 @@ import os import unittest import numpy as np -from sys import argv +from sys import argv, platform try: import matplotlib @@ -1005,11 +1005,15 @@ def f_obj(X): ) x_opt, y_opt, dnk, x_data, y_data = ego.optimize(fun=f_obj) if ds.HAS_CONFIG_SPACE: # results differs wrt config_space impl - self.assertAlmostEqual(np.sum(y_data), 2.7639515433083854, delta=1e-4) - self.assertAlmostEqual(np.sum(x_data), 32.11001423996299, delta=1e-4) + if platform.startswith("linux"): # results differs wrt platform + self.assertAlmostEqual(np.sum(y_data), 1.0355815090110578, delta=1e-12) + self.assertAlmostEqual(np.sum(x_data), 38.56885202767958, delta=1e-12) + else: + self.assertAlmostEqual(np.sum(y_data), 0.9606415626557894, delta=1e-12) + self.assertAlmostEqual(np.sum(x_data), 38.23494224077761, delta=1e-12) else: - self.assertAlmostEqual(np.sum(y_data), 2.03831406306514, delta=1e-4) - self.assertAlmostEqual(np.sum(x_data), 33.56885202767958, delta=1e-4) + self.assertAlmostEqual(np.sum(y_data), 1.8911720770059735, delta=1e-12) + self.assertAlmostEqual(np.sum(x_data), 47.56885202767958, delta=1e-12) def test_ego_gek(self): ego, fun = self.initialize_ego_gek() diff --git a/smt/surrogate_models/krg_based.py b/smt/surrogate_models/krg_based.py index 98fcc2e3e..9a912d0d2 100644 --- a/smt/surrogate_models/krg_based.py +++ b/smt/surrogate_models/krg_based.py @@ -191,6 +191,8 @@ def _initialize(self): self.best_iteration_fail = None self.nb_ill_matrix = 5 self.is_acting_points = {} + # Make internal optim multistart reproducible + self.random_state = np.random.RandomState(41) supports["derivatives"] = True supports["variances"] = True supports["variance_derivatives"] = True @@ -1741,7 +1743,7 @@ def grad_minus_reduced_likelihood_function(log10t): # to theta in (0,2e1] theta_bounds = self.options["theta_bounds"] if self.theta0[i] < theta_bounds[0] or self.theta0[i] > theta_bounds[1]: - self.theta0[i] = np.random.rand() + self.theta0[i] = self.random_state.rand() self.theta0[i] = ( self.theta0[i] * (theta_bounds[1] - theta_bounds[0]) + theta_bounds[0] @@ -1770,7 +1772,7 @@ def grad_minus_reduced_likelihood_function(log10t): else: theta_bounds = self.options["theta_bounds"] log10t_bounds = np.log10(theta_bounds) - theta0_rand = np.random.rand(len(self.theta0)) + theta0_rand = self.random_state.rand(len(self.theta0)) theta0_rand = ( theta0_rand * (log10t_bounds[1] - log10t_bounds[0]) + log10t_bounds[0] @@ -1829,7 +1831,9 @@ def grad_minus_reduced_likelihood_function(log10t): if self.options["n_start"] > 1: sampling = LHS( - xlimits=theta_limits, criterion="maximin", random_state=41 + xlimits=theta_limits, + criterion="maximin", + random_state=self.random_state, ) theta_lhs_loops = sampling(self.options["n_start"]) theta_all_loops = np.vstack((theta_all_loops, theta_lhs_loops)) diff --git a/smt/tests/test_kpls_auto.py b/smt/tests/test_kpls_auto.py index c4213ad64..d561d415a 100644 --- a/smt/tests/test_kpls_auto.py +++ b/smt/tests/test_kpls_auto.py @@ -7,6 +7,7 @@ import numpy as np import unittest import inspect +from sys import platform from collections import OrderedDict @@ -35,8 +36,6 @@ def setUp(self): problems["exp"] = TensorProduct(ndim=ndim, func="exp") problems["tanh"] = TensorProduct(ndim=ndim, func="tanh") problems["cos"] = TensorProduct(ndim=ndim, func="cos") - sms = OrderedDict() - sms["KPLS"] = KPLS(eval_n_comp=True) t_errors = {} e_errors = {} @@ -47,14 +46,16 @@ def setUp(self): n_comp_opt["Branin"] = 2 n_comp_opt["Rosenbrock"] = 1 n_comp_opt["sphere"] = 1 - n_comp_opt["exp"] = 3 + if platform.startswith("linux"): # result depends on platform + n_comp_opt["exp"] = 2 + else: + n_comp_opt["exp"] = 3 n_comp_opt["tanh"] = 1 n_comp_opt["cos"] = 1 self.nt = nt self.ne = ne self.problems = problems - self.sms = sms self.t_errors = t_errors self.e_errors = e_errors self.n_comp_opt = n_comp_opt @@ -68,15 +69,13 @@ def run_test(self): sampling = LHS(xlimits=prob.xlimits, random_state=42) - np.random.seed(0) xt = sampling(self.nt) yt = prob(xt) - np.random.seed(1) xe = sampling(self.ne) ye = prob(xe) - sm0 = self.sms[sname] + sm0 = KPLS(eval_n_comp=True) sm = sm0.__class__() sm.options = sm0.options.clone()