diff --git a/src/elexsolver/QuantileRegressionSolver.py b/src/elexsolver/QuantileRegressionSolver.py index f8f6fd3..91665ee 100644 --- a/src/elexsolver/QuantileRegressionSolver.py +++ b/src/elexsolver/QuantileRegressionSolver.py @@ -118,11 +118,9 @@ def fit( taus = [taus] else: assert y.shape[1] == 1 # you can either have multiple taus or multiple ys - coefficients_array = [] for tau in taus: for y_arr in y.T: - y_arr = y_arr.reshape(-1,1) if lambda_ > 0: coefficients = self._fit_with_regularization( x, y_arr, weights, tau, lambda_, regularize_intercept, n_feat_ignore_reg diff --git a/tests/test_ols.py b/tests/test_ols.py index 935b2e6..e399991 100644 --- a/tests/test_ols.py +++ b/tests/test_ols.py @@ -211,17 +211,17 @@ def test_residuals_no_weights(random_data_no_weights): lm.fit(x, y, fit_intercept=False) predictions = lm.predict(x) - residuals = lm.residuals(y, predictions, loo=False, center=False) - + residuals = lm.residuals(x, y, K=None, center=False) assert residuals[0] == pytest.approx(0.885973530) assert residuals[-1] == pytest.approx(0.841996302) - residuals = lm.residuals(y, predictions, loo=True, center=False) + # equivalent of leave-one-out residuals + residuals = lm.residuals(x, y, K=100, center=False) assert residuals[0] == pytest.approx(0.920112164) assert residuals[-1] == pytest.approx(0.875896477) - residuals = lm.residuals(y, predictions, loo=True, center=True) + residuals = lm.residuals(x, y, K=100, center=True) assert np.sum(residuals) == pytest.approx(0) @@ -234,17 +234,17 @@ def test_residuals_weights(random_data_weights): lm.fit(x, y, weights=weights, fit_intercept=False) predictions = lm.predict(x) - residuals = lm.residuals(y, predictions, loo=False, center=False) + residuals = lm.residuals(x, y, weights=weights, K=None, center=False) assert residuals[0] == pytest.approx(-1.971798590) assert residuals[-1] == pytest.approx(-1.373951578) - residuals = lm.residuals(y, predictions, loo=True, center=False) + residuals = lm.residuals(x, y, weights=weights, K=100, center=False) assert residuals[0] == pytest.approx(-1.999718445) assert residuals[-1] == pytest.approx(-1.438563033) - residuals = lm.residuals(y, predictions, loo=True, center=True) + residuals = lm.residuals(x, y, k=100, center=True) assert np.sum(residuals) == pytest.approx(0) diff --git a/tests/test_quantile.py b/tests/test_quantile.py index 792c6bd..515bdc0 100644 --- a/tests/test_quantile.py +++ b/tests/test_quantile.py @@ -23,7 +23,7 @@ def test_basic_median_1(): preds = quantreg.predict(x) # you'd think it would be 8 instead of 7.5, but run quantreg in R to confirm # has to do with missing intercept - np.testing.assert_array_equal(preds, [[7.5, 7.5, 7.5, 15]]) + np.testing.assert_array_equal(preds, [[7.5], [7.5], [7.5], [15]]) def test_basic_median_2(): @@ -33,7 +33,7 @@ def test_basic_median_2(): y = np.asarray([3, 8, 9, 15]) quantreg.fit(x, y, tau) preds = quantreg.predict(x) - np.testing.assert_array_equal(preds, [[8, 8, 8, 15]]) + np.testing.assert_array_equal(preds, [[8], [8], [8], [15]]) def test_basic_lower(): @@ -43,7 +43,7 @@ def test_basic_lower(): y = np.asarray([3, 8, 9, 15]) quantreg.fit(x, y, tau) preds = quantreg.predict(x) - np.testing.assert_array_equal(preds, [[3, 3, 3, 15]]) + np.testing.assert_array_equal(preds, [[3], [3], [3], [15]]) def test_basic_upper(): @@ -53,7 +53,7 @@ def test_basic_upper(): y = np.asarray([3, 8, 9, 15]) quantreg.fit(x, y, tau) preds = quantreg.predict(x) - np.testing.assert_array_equal(preds, [[9, 9, 9, 15]]) + np.testing.assert_array_equal(preds, [[9], [9], [9], [15]]) ###################### @@ -68,7 +68,7 @@ def test_random_median(random_data_no_weights): y = random_data_no_weights["y"].values quantreg.fit(x, y, tau, fit_intercept=False) quantreg.predict(x) - np.testing.assert_allclose(quantreg.coefficients, [[1.57699, 6.74906, 4.40175, 4.85346, 4.51814]], rtol=TOL) + np.testing.assert_allclose(quantreg.coefficients, [[1.57699], [6.74906], [4.40175], [4.85346], [4.51814]], rtol=TOL) def test_random_lower(random_data_no_weights): @@ -78,7 +78,7 @@ def test_random_lower(random_data_no_weights): y = random_data_no_weights["y"].values quantreg.fit(x, y, tau, fit_intercept=False) quantreg.predict(x) - np.testing.assert_allclose(quantreg.coefficients, [[0.17759, 6.99588, 4.18896, 4.83906, 3.22546]], rtol=TOL) + np.testing.assert_allclose(quantreg.coefficients, [[0.17759], [6.99588], [4.18896], [4.83906], [3.22546]], rtol=TOL) def test_random_upper(random_data_no_weights): @@ -88,7 +88,7 @@ def test_random_upper(random_data_no_weights): y = random_data_no_weights["y"].values quantreg.fit(x, y, tau, fit_intercept=False) quantreg.predict(x) - np.testing.assert_allclose(quantreg.coefficients, [[1.85617, 6.81286, 6.05586, 5.51965, 4.19864]], rtol=TOL) + np.testing.assert_allclose(quantreg.coefficients, [[1.85617], [6.81286], [6.05586], [5.51965], [4.19864]], rtol=TOL) ################# @@ -103,10 +103,10 @@ def test_multiple(random_data_no_weights): y = random_data_no_weights["y"].values quantreg.fit(x, y, taus, fit_intercept=False) quantreg.predict(x) - assert len(quantreg.coefficients) == 3 - np.testing.assert_allclose(quantreg.coefficients[0], [0.17759, 6.99588, 4.18896, 4.83906, 3.22546], rtol=TOL) - np.testing.assert_allclose(quantreg.coefficients[1], [1.57699, 6.74906, 4.40175, 4.85346, 4.51814], rtol=TOL) - np.testing.assert_allclose(quantreg.coefficients[2], [1.85617, 6.81286, 6.05586, 5.51965, 4.19864], rtol=TOL) + assert quantreg.coefficients.shape == (5, 3) + np.testing.assert_allclose(quantreg.coefficients[:,0], [0.17759, 6.99588, 4.18896, 4.83906, 3.22546], rtol=TOL) + np.testing.assert_allclose(quantreg.coefficients[:,1], [1.57699, 6.74906, 4.40175, 4.85346, 4.51814], rtol=TOL) + np.testing.assert_allclose(quantreg.coefficients[:,2], [1.85617, 6.81286, 6.05586, 5.51965, 4.19864], rtol=TOL) ###################### @@ -122,7 +122,7 @@ def test_basic_median_weights(): weights = np.asarray([1, 1, 100, 3]) quantreg.fit(x, y, tau, weights) preds = quantreg.predict(x) - np.testing.assert_array_equal(preds, [[9, 9, 9, 15]]) + np.testing.assert_array_equal(preds, [[9], [9], [9], [15]]) def test_random_median_weights(random_data_weights): @@ -133,7 +133,7 @@ def test_random_median_weights(random_data_weights): weights = random_data_weights["weights"].values quantreg.fit(x, y, tau, weights=weights, fit_intercept=False) quantreg.predict(x) - np.testing.assert_allclose(quantreg.coefficients, [[1.59521, 2.17864, 4.68050, 3.10920, 9.63739]], rtol=TOL) + np.testing.assert_allclose(quantreg.coefficients, [[1.59521], [2.17864], [4.68050], [3.10920], [9.63739]], rtol=TOL) def test_random_lower_weights(random_data_weights): @@ -144,7 +144,7 @@ def test_random_lower_weights(random_data_weights): weights = random_data_weights["weights"].values quantreg.fit(x, y, tau, weights=weights, fit_intercept=False) quantreg.predict(x) - np.testing.assert_allclose(quantreg.coefficients, [[0.63670, 1.27028, 4.81500, 3.08055, 8.69929]], rtol=TOL) + np.testing.assert_allclose(quantreg.coefficients, [[0.63670], [1.27028], [4.81500], [3.08055], [8.69929]], rtol=TOL) def test_random_upper_weights(random_data_weights): @@ -155,7 +155,7 @@ def test_random_upper_weights(random_data_weights): weights = random_data_weights["weights"].values quantreg.fit(x, y, tau, weights=weights, fit_intercept=False) quantreg.predict(x) - np.testing.assert_allclose(quantreg.coefficients, [[3.47742, 2.07360, 4.51754, 4.15237, 9.58856]], rtol=TOL) + np.testing.assert_allclose(quantreg.coefficients, [[3.47742], [2.07360], [4.51754], [4.15237], [9.58856]], rtol=TOL) ############################# @@ -189,12 +189,12 @@ def test_weight_normalization_same_fit(random_data_weights): quantreg = QuantileRegressionSolver() quantreg.fit(x, y, tau, weights, normalize_weights=True) preds = quantreg.predict(x) - np.testing.assert_allclose(preds, [[9, 9, 9, 15]], rtol=TOL) + np.testing.assert_allclose(preds, [[9], [9], [9], [15]], rtol=TOL) quantreg = QuantileRegressionSolver() quantreg.fit(x, y, tau, weights, normalize_weights=False) preds = quantreg.predict(x) - np.testing.assert_allclose(preds, [[9, 9, 9, 15]], rtol=TOL) + np.testing.assert_allclose(preds, [[9], [9], [9], [15]], rtol=TOL) ######################## @@ -205,13 +205,13 @@ def test_weight_normalization_same_fit(random_data_weights): def test_regularization_without_intercept(random_data_no_weights): tau = 0.5 x = random_data_no_weights[["x0", "x1", "x2", "x3", "x4"]].values - y = random_data_no_weights["y"].values + y = random_data_no_weights["y"].values.flatten() quantreg = QuantileRegressionSolver() lambda_ = 1e6 quantreg.fit(x, y, tau, lambda_=lambda_, fit_intercept=False, regularize_intercept=True) np.testing.assert_allclose( - quantreg.coefficients, [[0, 0, 0, 0, 0]], atol=TOL + quantreg.coefficients, [[0], [0], [0], [0], [0]], atol=TOL ) # using absolute tolerance since comparing to zero @@ -225,7 +225,7 @@ def test_regularization_with_intercept(random_data_no_weights): lambda_ = 1e6 quantreg.fit(x, y, tau, lambda_=lambda_, fit_intercept=True, regularize_intercept=False) coefficients_w_reg = quantreg.coefficients - np.testing.assert_allclose(quantreg.coefficients[0][1:], [0, 0, 0, 0], atol=TOL) + np.testing.assert_allclose(quantreg.coefficients[1:], [[0], [0], [0], [0]], atol=TOL) assert np.abs(coefficients_w_reg[0][0]) > TOL @@ -239,10 +239,10 @@ def test_regularization_with_intercept_and_features(random_data_no_weights): lambda_ = 1e6 quantreg.fit(x, y, tau, lambda_=lambda_, fit_intercept=True, regularize_intercept=False, n_feat_ignore_reg=2) coefficients_w_reg = quantreg.coefficients - np.testing.assert_allclose(quantreg.coefficients[0][3:], [0, 0], atol=TOL) - assert np.abs(coefficients_w_reg[0][0]) > TOL - assert np.abs(coefficients_w_reg[0][1]) > TOL - assert np.abs(coefficients_w_reg[0][2]) > TOL + np.testing.assert_allclose(quantreg.coefficients[3:], [[0], [0]], atol=TOL) + assert np.abs(coefficients_w_reg[0]) > TOL + assert np.abs(coefficients_w_reg[1]) > TOL + assert np.abs(coefficients_w_reg[2]) > TOL ########################