diff --git a/smt/applications/tests/test_ego.py b/smt/applications/tests/test_ego.py index 24c07da4d..d886c7a6c 100644 --- a/smt/applications/tests/test_ego.py +++ b/smt/applications/tests/test_ego.py @@ -1005,11 +1005,11 @@ def f_obj(X): ) x_opt, y_opt, dnk, x_data, y_data = ego.optimize(fun=f_obj) if ds.HAS_CONFIG_SPACE: # results differs wrt config_space impl - self.assertAlmostEqual(np.sum(y_data), 2.7639515433083854, delta=1e-4) - self.assertAlmostEqual(np.sum(x_data), 32.11001423996299, delta=1e-4) + self.assertAlmostEqual(np.sum(y_data), 0.9606415626557894, delta=1e-16) + self.assertAlmostEqual(np.sum(x_data), 38.23494224077761, delta=1e-16) else: - self.assertAlmostEqual(np.sum(y_data), 2.03831406306514, delta=1e-4) - self.assertAlmostEqual(np.sum(x_data), 33.56885202767958, delta=1e-4) + self.assertAlmostEqual(np.sum(y_data), 1.8911720770059735, delta=1e-16) + self.assertAlmostEqual(np.sum(x_data), 47.56885202767958, delta=1e-16) def test_ego_gek(self): ego, fun = self.initialize_ego_gek() diff --git a/smt/surrogate_models/krg_based.py b/smt/surrogate_models/krg_based.py index 98fcc2e3e..9a912d0d2 100644 --- a/smt/surrogate_models/krg_based.py +++ b/smt/surrogate_models/krg_based.py @@ -191,6 +191,8 @@ def _initialize(self): self.best_iteration_fail = None self.nb_ill_matrix = 5 self.is_acting_points = {} + # Make internal optim multistart reproducible + self.random_state = np.random.RandomState(41) supports["derivatives"] = True supports["variances"] = True supports["variance_derivatives"] = True @@ -1741,7 +1743,7 @@ def grad_minus_reduced_likelihood_function(log10t): # to theta in (0,2e1] theta_bounds = self.options["theta_bounds"] if self.theta0[i] < theta_bounds[0] or self.theta0[i] > theta_bounds[1]: - self.theta0[i] = np.random.rand() + self.theta0[i] = self.random_state.rand() self.theta0[i] = ( self.theta0[i] * (theta_bounds[1] - theta_bounds[0]) + theta_bounds[0] @@ -1770,7 +1772,7 @@ def grad_minus_reduced_likelihood_function(log10t): else: theta_bounds = self.options["theta_bounds"] log10t_bounds = np.log10(theta_bounds) - theta0_rand = np.random.rand(len(self.theta0)) + theta0_rand = self.random_state.rand(len(self.theta0)) theta0_rand = ( theta0_rand * (log10t_bounds[1] - log10t_bounds[0]) + log10t_bounds[0] @@ -1829,7 +1831,9 @@ def grad_minus_reduced_likelihood_function(log10t): if self.options["n_start"] > 1: sampling = LHS( - xlimits=theta_limits, criterion="maximin", random_state=41 + xlimits=theta_limits, + criterion="maximin", + random_state=self.random_state, ) theta_lhs_loops = sampling(self.options["n_start"]) theta_all_loops = np.vstack((theta_all_loops, theta_lhs_loops))