Skip to content

Commit

Permalink
Black formatting
Browse files Browse the repository at this point in the history
  • Loading branch information
relf committed Oct 13, 2023
1 parent fb23763 commit 0a4d15f
Show file tree
Hide file tree
Showing 4 changed files with 56 additions and 38 deletions.
24 changes: 11 additions & 13 deletions smt/applications/mfk.py
Original file line number Diff line number Diff line change
Expand Up @@ -318,7 +318,7 @@ def _new_train_iteration(self, lvl):
self.optimal_noise = self.options["noise0"] * np.ones(self.nt_all[lvl])
for i in range(self.nt_all[lvl]):
diff = self.y_norma[self.index_unique == i] - y_norma_unique[i]
if np.sum(diff ** 2) != 0.0:
if np.sum(diff**2) != 0.0:
self.optimal_noise[i] = np.std(diff, ddof=1) ** 2
self.optimal_noise = self.optimal_noise / self.nt_reps
self.optimal_noise_all[lvl] = self.optimal_noise
Expand All @@ -327,7 +327,7 @@ def _new_train_iteration(self, lvl):
self.X_norma_all[lvl] = self.X_norma
self.y_norma_all[lvl] = self.y_norma
else:
self.optimal_noise = self.options["noise0"] / self.y_std ** 2
self.optimal_noise = self.options["noise0"] / self.y_std**2
self.optimal_noise_all[lvl] = self.optimal_noise

# Calculate matrix of distances D between samples
Expand Down Expand Up @@ -532,7 +532,6 @@ def _predict_intermediate_values(self, X, lvl, descale=True):

# Calculate recursively kriging mean and variance at level i
for i in range(1, lvl):

g = self._regression_types[self.options["rho_regr"]](X)

if self.is_continuous:
Expand Down Expand Up @@ -726,12 +725,12 @@ def predict_variances_all_levels(self, X, is_acting=None):
G = self.optimal_par[0]["G"]

u_ = solve_triangular(G.T, f.T - np.dot(Ft.T, r_t), lower=True)
sigma2 = self.optimal_par[0]["sigma2"] / self.y_std ** 2
sigma2 = self.optimal_par[0]["sigma2"] / self.y_std**2
MSE[:, 0] = sigma2 * (
# 1 + self.optimal_noise_all[0] - (r_t ** 2).sum(axis=0) + (u_ ** 2).sum(axis=0)
1
- (r_t ** 2).sum(axis=0)
+ (u_ ** 2).sum(axis=0)
- (r_t**2).sum(axis=0)
+ (u_**2).sum(axis=0)
)

# Calculate recursively kriging variance at level i
Expand Down Expand Up @@ -789,7 +788,6 @@ def predict_variances_all_levels(self, X, is_acting=None):
cat_kernel=self.options["categorical_kernel"],
x=X,
).reshape(n_eval, self.nt_all[i])


f = np.vstack((g.T * mu[:, i - 1], f0.T))

Expand All @@ -800,7 +798,7 @@ def predict_variances_all_levels(self, X, is_acting=None):
beta = self.optimal_par[i]["beta"]

# scaled predictor
sigma2 = self.optimal_par[i]["sigma2"] / self.y_std ** 2
sigma2 = self.optimal_par[i]["sigma2"] / self.y_std**2
q = self.q_all[i]
u_ = solve_triangular(G.T, f - np.dot(Ft.T, r_t), lower=True)
sigma2_rho = np.dot(
Expand All @@ -818,21 +816,21 @@ def predict_variances_all_levels(self, X, is_acting=None):
# sigma2_rho * MSE[:, i - 1]
+Q_ / (2 * (self.nt_all[i] - p - q))
# * (1 + self.optimal_noise_all[i] - (r_t ** 2).sum(axis=0))
* (1 - (r_t ** 2).sum(axis=0))
+ sigma2 * (u_ ** 2).sum(axis=0)
* (1 - (r_t**2).sum(axis=0))
+ sigma2 * (u_**2).sum(axis=0)
)
else:
MSE[:, i] = sigma2 * (
# 1 + self.optimal_noise_all[i] - (r_t ** 2).sum(axis=0) + (u_ ** 2).sum(axis=0)
1
- (r_t ** 2).sum(axis=0)
+ (u_ ** 2).sum(axis=0)
- (r_t**2).sum(axis=0)
+ (u_**2).sum(axis=0)
) # + sigma2_rho * MSE[:, i - 1]
if self.options["propagate_uncertainty"]:
MSE[:, i] = MSE[:, i] + sigma2_rho * MSE[:, i - 1]

# scaled predictor
MSE *= self.y_std ** 2
MSE *= self.y_std**2

return MSE, sigma2_rhos

Expand Down
12 changes: 9 additions & 3 deletions smt/applications/mixed_integer.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,10 @@ class MixedIntegerSurrogateModel(SurrogateModel):
"""

def __init__(
self, design_space, surrogate, input_in_folded_space=True,
self,
design_space,
surrogate,
input_in_folded_space=True,
):
"""
Parameters
Expand Down Expand Up @@ -177,7 +180,9 @@ class MixedIntegerKrigingModel(KrgBased):
"""

def __init__(
self, surrogate, input_in_folded_space=True,
self,
surrogate,
input_in_folded_space=True,
):
"""
Parameters
Expand Down Expand Up @@ -330,7 +335,8 @@ def build_kriging_model(self, surrogate):
"""
surrogate.options["design_space"] = self._design_space
return MixedIntegerKrigingModel(
surrogate=surrogate, input_in_folded_space=self._work_in_folded_space,
surrogate=surrogate,
input_in_folded_space=self._work_in_folded_space,
)

def build_surrogate_model(self, surrogate):
Expand Down
3 changes: 0 additions & 3 deletions smt/applications/tests/test_mfk_mfkpls_mixed.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,6 @@


class TestMFKmixed(unittest.TestCase):

# useful functions (g and h)
def g_ZDT(self, x):
x0 = x[0]
Expand Down Expand Up @@ -400,7 +399,6 @@ def run_mfk_mixed_example(self):
# ------------------------------------------------------------------------------

for krg_method in KRG_METHODS:

if "mfk" in krg_method: # if multifi compute LF validation data
n_train_LF = 40 # training set size IF MULTIFI
sampling = MixedIntegerSamplingMethod(LHS, ds, criterion="ese")
Expand Down Expand Up @@ -729,7 +727,6 @@ def run_mfkpls_mixed_example(self):
# ------------------------------------------------------------------------------

for krg_method in KRG_METHODS:

if "mfk" in krg_method: # if multifi compute LF validation data
n_train_LF = 40 # training set size IF MULTIFI
sampling = MixedIntegerSamplingMethod(LHS, ds, criterion="ese")
Expand Down
55 changes: 36 additions & 19 deletions smt/surrogate_models/krg_based.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,10 @@ def _initialize(self):
declare(
"hierarchical_kernel",
MixHrcKernelType.ALG_KERNEL,
values=[MixHrcKernelType.ALG_KERNEL, MixHrcKernelType.ARC_KERNEL,],
values=[
MixHrcKernelType.ALG_KERNEL,
MixHrcKernelType.ARC_KERNEL,
],
desc="The kernel to use for mixed hierarchical inputs. Only for non continuous Kriging",
)
declare(
Expand Down Expand Up @@ -307,9 +310,11 @@ def _new_train(self):
self.optimal_noise = np.array(self.options["noise0"])
elif self.options["use_het_noise"]:
# hetGP works with unique design variables when noise variance are not given
(self.X_norma, index_unique, nt_reps,) = np.unique(
self.X_norma, return_inverse=True, return_counts=True, axis=0
)
(
self.X_norma,
index_unique,
nt_reps,
) = np.unique(self.X_norma, return_inverse=True, return_counts=True, axis=0)
self.nt = self.X_norma.shape[0]

# computing the mean of the output per unique design variable (see Binois et al., 2018)
Expand All @@ -320,7 +325,7 @@ def _new_train(self):
self.optimal_noise = self.options["noise0"] * np.ones(self.nt)
for i in range(self.nt):
diff = self.y_norma[index_unique == i] - y_norma_unique[i]
if np.sum(diff ** 2) != 0.0:
if np.sum(diff**2) != 0.0:
self.optimal_noise[i] = np.std(diff, ddof=1) ** 2
self.optimal_noise = self.optimal_noise / nt_reps
self.y_norma = y_norma_unique
Expand Down Expand Up @@ -537,7 +542,12 @@ def _matrix_data_corr(
)
else:
d = componentwise_distance(
dx, corr, nx, power, theta=None, return_derivative=False,
dx,
corr,
nx,
power,
theta=None,
return_derivative=False,
)
if cat_kernel in [MixIntKernelType.GOWER, MixIntKernelType.CONT_RELAX]:
r = _correlation_types[corr](theta, d)
Expand Down Expand Up @@ -793,11 +803,11 @@ def _reduced_likelihood_function(self, theta):
if self.name in ["MFK", "MFKPLS", "MFKPLSK"]:
p = self.p
q = self.q
sigma2 = (rho ** 2.0).sum(axis=0) / (self.nt - p - q)
sigma2 = (rho**2.0).sum(axis=0) / (self.nt - p - q)
reduced_likelihood_function_value = -(self.nt - p - q) * np.log10(
sigma2.sum()
) - self.nt * np.log10(detR)
par["sigma2"] = sigma2 * self.y_std ** 2.0
par["sigma2"] = sigma2 * self.y_std**2.0
par["beta"] = beta
par["gamma"] = linalg.solve_triangular(C.T, rho)
par["C"] = C
Expand Down Expand Up @@ -919,7 +929,7 @@ def _reduced_likelihood_gradient(self, theta):
- gamma.T.dot(dmu)
- np.dot(gamma.T, dR.dot(gamma))
)
* self.y_std ** 2.0
* self.y_std**2.0
)
dsigma_all.append(dsigma_2)

Expand Down Expand Up @@ -1096,7 +1106,7 @@ def _reduced_likelihood_hessian(self, theta):
dsigma2detadomega = (
(1 / self.nt)
* (sigma_arg_1 + sigma_arg_2 + sigma_arg_3 + sigma_arg_4)
* self.y_std ** 2.0
* self.y_std**2.0
)

# Compute Hessian
Expand Down Expand Up @@ -1419,7 +1429,7 @@ def _predict_variances(self, x: np.ndarray, is_acting=None) -> np.ndarray:
- self._regression_types[self.options["poly"]](X_cont).T,
)
A = self.optimal_par["sigma2"]
B = 1.0 - (rt ** 2.0).sum(axis=0) + (u ** 2.0).sum(axis=0)
B = 1.0 - (rt**2.0).sum(axis=0) + (u**2.0).sum(axis=0)
MSE = np.einsum("i,j -> ji", A, B)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
Expand Down Expand Up @@ -1543,14 +1553,14 @@ def grad_minus_reduced_likelihood_function(theta):
else:

def minus_reduced_likelihood_function(log10t):
return -self._reduced_likelihood_function(theta=10.0 ** log10t)[0]
return -self._reduced_likelihood_function(theta=10.0**log10t)[0]

def grad_minus_reduced_likelihood_function(log10t):
log10t_2d = np.atleast_2d(log10t).T
res = (
-np.log(10.0)
* (10.0 ** log10t_2d)
* (self._reduced_likelihood_gradient(10.0 ** log10t_2d)[0])
* (10.0**log10t_2d)
* (self._reduced_likelihood_gradient(10.0**log10t_2d)[0])
)
return res

Expand Down Expand Up @@ -1649,7 +1659,10 @@ def grad_minus_reduced_likelihood_function(log10t):
[theta0, np.log10(np.array([self.noise0]).flatten())]
)
theta0_rand = np.concatenate(
[theta0_rand, np.log10(np.array([self.noise0]).flatten()),]
[
theta0_rand,
np.log10(np.array([self.noise0]).flatten()),
]
)

for i in range(len(self.noise0)):
Expand Down Expand Up @@ -1697,7 +1710,7 @@ def grad_minus_reduced_likelihood_function(log10t):
optimal_theta_res = optimal_theta_res_loop

elif self.options["hyper_opt"] == "TNC":
theta_all_loops = 10 ** theta_all_loops
theta_all_loops = 10**theta_all_loops
for theta0_loop in theta_all_loops:
optimal_theta_res_loop = optimize.minimize(
minus_reduced_likelihood_function,
Expand All @@ -1717,7 +1730,7 @@ def grad_minus_reduced_likelihood_function(log10t):
optimal_theta = optimal_theta_res["x"]

if self.name not in ["MGP"]:
optimal_theta = 10 ** optimal_theta
optimal_theta = 10**optimal_theta

optimal_rlf_value, optimal_par = self._reduced_likelihood_function(
theta=optimal_theta
Expand Down Expand Up @@ -1796,7 +1809,7 @@ def grad_minus_reduced_likelihood_function(log10t):
return best_optimal_rlf_value, best_optimal_par, best_optimal_theta

if self.options["corr"] == "squar_exp":
self.options["theta0"] = (theta * self.coeff_pls ** 2).sum(1)
self.options["theta0"] = (theta * self.coeff_pls**2).sum(1)
else:
self.options["theta0"] = (theta * np.abs(self.coeff_pls)).sum(1)

Expand Down Expand Up @@ -1838,7 +1851,11 @@ def _check_param(self):

n_comp = self.options["n_comp"] if "n_comp" in self.options else None
n_param = compute_n_param(
self.design_space, self.options["categorical_kernel"], d, n_comp, mat_dim,
self.design_space,
self.options["categorical_kernel"],
d,
n_comp,
mat_dim,
)

self.options["theta0"] *= np.ones(n_param)
Expand Down

0 comments on commit 0a4d15f

Please sign in to comment.