diff --git a/darts/tests/models/forecasting/test_regression_models.py b/darts/tests/models/forecasting/test_regression_models.py index b2d3898ae5..307c7eac73 100644 --- a/darts/tests/models/forecasting/test_regression_models.py +++ b/darts/tests/models/forecasting/test_regression_models.py @@ -157,10 +157,31 @@ class NewCls(cls): return NewCls +xgb_test_params = { + "n_estimators": 1, + "max_depth": 1, + "max_leaves": 1, + "verbose": -1, + "random_state": 42, +} +lgbm_test_params = { + "n_estimators": 1, + "max_depth": 1, + "num_leaves": 2, + "verbosity": -1, + "random_state": 42, +} +cb_test_params = { + "iterations": 1, + "depth": 1, + "verbose": -1, + "random_state": 42, +} + + class TestRegressionModels: np.random.seed(42) - # default regression models models = [ RandomForest, @@ -179,10 +200,16 @@ class TestRegressionModels: LinearRegressionModel, likelihood="poisson", random_state=42 ) PoissonXGBModel = partialclass( - XGBModel, likelihood="poisson", random_state=42, tree_method="exact" + XGBModel, + likelihood="poisson", + tree_method="exact", + **xgb_test_params, ) QuantileXGBModel = partialclass( - XGBModel, likelihood="quantile", random_state=42, tree_method="exact" + XGBModel, + likelihood="quantile", + tree_method="exact", + **xgb_test_params, ) # targets for poisson regression must be positive, so we exclude them for some tests models.extend( @@ -200,8 +227,8 @@ class TestRegressionModels: 1e-13, # RegressionModel 0.8, # QuantileLinearRegressionModel 0.4, # PoissonLinearRegressionModel - 1e-01, # PoissonXGBModel - 0.5, # QuantileXGBModel + 0.75, # PoissonXGBModel + 0.75, # QuantileXGBModel ] multivariate_accuracies = [ 0.3, # RandomForest @@ -209,8 +236,8 @@ class TestRegressionModels: 1e-13, # RegressionModel 0.8, # QuantileLinearRegressionModel 0.4, # PoissonLinearRegressionModel - 0.15, # PoissonXGBModel - 0.4, # QuantileXGBModel + 0.75, # PoissonXGBModel + 0.75, # QuantileXGBModel ] multivariate_multiseries_accuracies = [ 0.05, # RandomForest @@ -218,23 +245,26 @@ class TestRegressionModels: 1e-13, # RegressionModel 0.8, # QuantileLinearRegressionModel 0.4, # PoissonLinearRegressionModel - 1e-01, # PoissonXGBModel - 0.4, # QuantileXGBModel + 0.85, # PoissonXGBModel + 0.65, # QuantileXGBModel ] lgbm_w_categorical_covariates = NotImportedModule if lgbm_available: + RegularLightGBMModel = partialclass(LightGBMModel, **lgbm_test_params) QuantileLightGBMModel = partialclass( LightGBMModel, likelihood="quantile", quantiles=[0.05, 0.5, 0.95], - random_state=42, + **lgbm_test_params, ) PoissonLightGBMModel = partialclass( - LightGBMModel, likelihood="poisson", random_state=42 + LightGBMModel, + likelihood="poisson", + **lgbm_test_params, ) models += [ - LightGBMModel, + RegularLightGBMModel, QuantileLightGBMModel, PoissonLightGBMModel, ] @@ -247,62 +277,67 @@ class TestRegressionModels: categorical_future_covariates=["fut_cov_promo_mechanism"], categorical_past_covariates=["past_cov_cat_dummy"], categorical_static_covariates=["product_id"], + **lgbm_test_params, ) univariate_accuracies += [ - 0.3, # LightGBMModel - 0.5, # QuantileLightGBMModel - 0.4, # PoissonLightGBMModel + 0.75, # LightGBMModel + 0.75, # QuantileLightGBMModel + 0.75, # PoissonLightGBMModel ] multivariate_accuracies += [ - 0.4, # LightGBMModel - 0.4, # QuantileLightGBMModel - 0.4, # PoissonLightGBMModel + 0.7, # LightGBMModel + 0.75, # QuantileLightGBMModel + 0.75, # PoissonLightGBMModel ] multivariate_multiseries_accuracies += [ - 0.05, # LightGBMModel - 0.4, # QuantileLightGBMModel - 0.4, # PoissonLightGBMModel + 0.7, # LightGBMModel + 0.7, # QuantileLightGBMModel + 0.75, # PoissonLightGBMModel ] if cb_available: + RegularCatBoostModel = partialclass( + CatBoostModel, + **cb_test_params, + ) QuantileCatBoostModel = partialclass( CatBoostModel, likelihood="quantile", quantiles=[0.05, 0.5, 0.95], - random_state=42, + **cb_test_params, ) PoissonCatBoostModel = partialclass( CatBoostModel, likelihood="poisson", - random_state=42, + **cb_test_params, ) NormalCatBoostModel = partialclass( CatBoostModel, likelihood="gaussian", - random_state=42, + **cb_test_params, ) models += [ - CatBoostModel, + RegularCatBoostModel, QuantileCatBoostModel, PoissonCatBoostModel, NormalCatBoostModel, ] univariate_accuracies += [ 0.75, # CatBoostModel - 1e-03, # QuantileCatBoostModel - 1e-01, # PoissonCatBoostModel - 1e-05, # NormalCatBoostModel + 0.75, # QuantileCatBoostModel + 0.9, # PoissonCatBoostModel + 0.75, # NormalCatBoostModel ] multivariate_accuracies += [ 0.75, # CatBoostModel - 1e-03, # QuantileCatBoostModel - 0.15, # PoissonCatBoostModel - 1e-05, # NormalCatBoostModel + 0.75, # QuantileCatBoostModel + 0.86, # PoissonCatBoostModel + 0.75, # NormalCatBoostModel ] multivariate_multiseries_accuracies += [ 0.75, # CatBoostModel - 1e-03, # QuantileCatBoostModel - 1e-01, # PoissonCatBoostModel - 1e-03, # NormalCatBoostModel + 0.75, # QuantileCatBoostModel + 1.2, # PoissonCatBoostModel + 0.75, # NormalCatBoostModel ] # dummy feature and target TimeSeries instances @@ -1026,7 +1061,6 @@ def test_models_runnability(self, config): prediction = model_instance.predict(n=1) assert len(prediction) == 1 - @pytest.mark.slow @pytest.mark.parametrize( "config", itertools.product( @@ -1036,10 +1070,14 @@ def test_models_runnability(self, config): def test_fit(self, config): # test fitting both on univariate and multivariate timeseries model, mode, series = config + + series = series[:15] + sine_multivariate1 = self.sine_multivariate1[:15] + # auto-regression but past_covariates does not extend enough in the future with pytest.raises(ValueError): model_instance = model(lags=4, lags_past_covariates=4, multi_models=mode) - model_instance.fit(series=series, past_covariates=self.sine_multivariate1) + model_instance.fit(series=series, past_covariates=sine_multivariate1) model_instance.predict(n=10) # inconsistent number of components in series Sequence[TimeSeries] @@ -1072,19 +1110,19 @@ def test_fit(self, config): assert model_instance.lags.get("past") is None model_instance = model(lags=12, lags_past_covariates=12, multi_models=mode) - model_instance.fit(series=series, past_covariates=self.sine_multivariate1) + model_instance.fit(series=series, past_covariates=sine_multivariate1) assert len(model_instance.lags.get("past")) == 12 model_instance = model( lags=12, lags_future_covariates=(0, 1), multi_models=mode ) - model_instance.fit(series=series, future_covariates=self.sine_multivariate1) + model_instance.fit(series=series, future_covariates=sine_multivariate1) assert len(model_instance.lags.get("future")) == 1 model_instance = model( lags=12, lags_past_covariates=[-1, -4, -6], multi_models=mode ) - model_instance.fit(series=series, past_covariates=self.sine_multivariate1) + model_instance.fit(series=series, past_covariates=sine_multivariate1) assert len(model_instance.lags.get("past")) == 3 model_instance = model( @@ -1095,8 +1133,8 @@ def test_fit(self, config): ) model_instance.fit( series=series, - past_covariates=self.sine_multivariate1, - future_covariates=self.sine_multivariate1, + past_covariates=sine_multivariate1, + future_covariates=sine_multivariate1, ) assert len(model_instance.lags.get("past")) == 3 @@ -1289,11 +1327,11 @@ def test_multioutput_wrapper(self, config): horizon=0, target_dim=1 ) - model_configs = [(XGBModel, {"tree_method": "exact"})] + model_configs = [(XGBModel, dict({"tree_method": "exact"}, **xgb_test_params))] if lgbm_available: - model_configs += [(LightGBMModel, {})] + model_configs += [(LightGBMModel, lgbm_test_params)] if cb_available: - model_configs += [(CatBoostModel, {})] + model_configs += [(CatBoostModel, cb_test_params)] @pytest.mark.parametrize( "config", itertools.product(model_configs, [1, 2], [True, False]) @@ -2308,14 +2346,18 @@ def test_output_shift(self, config): @pytest.mark.parametrize( "config", itertools.product( - [RegressionModel, LinearRegressionModel, XGBModel] - + ([LightGBMModel] if lgbm_available else []), + [ + (RegressionModel, {}), + (LinearRegressionModel, {}), + (XGBModel, xgb_test_params), + ] + + ([(LightGBMModel, lgbm_test_params)] if lgbm_available else []), [True, False], [1, 2], ), ) def test_encoders(self, config): - model_cls, mode, ocl = config + (model_cls, model_kwargs), mode, ocl = config max_past_lag = -4 max_future_lag = 4 # target @@ -2358,18 +2400,21 @@ def test_encoders(self, config): add_encoders=encoder_examples["past"], multi_models=mode, output_chunk_length=ocl, + **model_kwargs, ) model_fc_valid0 = model_cls( lags=2, add_encoders=encoder_examples["future"], multi_models=mode, output_chunk_length=ocl, + **model_kwargs, ) model_mixed_valid0 = model_cls( lags=2, add_encoders=encoder_examples["mixed"], multi_models=mode, output_chunk_length=ocl, + **model_kwargs, ) # encoders will not generate covariates without lags @@ -2384,12 +2429,14 @@ def test_encoders(self, config): add_encoders=encoder_examples["past"], multi_models=mode, output_chunk_length=ocl, + **model_kwargs, ) model_fc_valid0 = model_cls( lags_future_covariates=[-1, 0], add_encoders=encoder_examples["future"], multi_models=mode, output_chunk_length=ocl, + **model_kwargs, ) model_mixed_valid0 = model_cls( lags_past_covariates=[-2, -1], @@ -2397,6 +2444,7 @@ def test_encoders(self, config): add_encoders=encoder_examples["mixed"], multi_models=mode, output_chunk_length=ocl, + **model_kwargs, ) # check that fit/predict works with model internal covariate requirement checks for model in [model_pc_valid0, model_fc_valid0, model_mixed_valid0]: @@ -2411,6 +2459,7 @@ def test_encoders(self, config): add_encoders=encoder_examples["past"], multi_models=mode, output_chunk_length=ocl, + **model_kwargs, ) model_fc_valid1 = model_cls( lags=2, @@ -2418,6 +2467,7 @@ def test_encoders(self, config): add_encoders=encoder_examples["future"], multi_models=mode, output_chunk_length=ocl, + **model_kwargs, ) model_mixed_valid1 = model_cls( lags=2, @@ -2426,6 +2476,7 @@ def test_encoders(self, config): add_encoders=encoder_examples["mixed"], multi_models=mode, output_chunk_length=ocl, + **model_kwargs, ) for model, ex in zip( @@ -2733,6 +2784,7 @@ def get_model_params(): return { "lags": int(period / 2), "output_chunk_length": int(period / 2), + "verbose": -1, } # test case without using categorical static covariates @@ -2785,6 +2837,7 @@ def get_model_params(): "past_cov_cat_dummy", ], categorical_static_covariates=["product_id"], + **lgbm_test_params, ), LightGBMModel( lags=1, @@ -2794,12 +2847,14 @@ def get_model_params(): "past_cov_cat_dummy", ], categorical_static_covariates=["does_not_exist"], + **lgbm_test_params, ), LightGBMModel( lags=1, lags_past_covariates=1, output_chunk_length=1, categorical_future_covariates=["does_not_exist"], + **lgbm_test_params, ), ] if lgbm_available @@ -3007,8 +3062,8 @@ class TestProbabilisticRegressionModels: { "lags": 2, "likelihood": "poisson", - "random_state": 42, "multi_models": True, + **xgb_test_params, }, 0.6, ), @@ -3018,8 +3073,8 @@ class TestProbabilisticRegressionModels: "lags": 2, "likelihood": "quantile", "quantiles": [0.1, 0.3, 0.5, 0.7, 0.9], - "random_state": 42, "multi_models": True, + **xgb_test_params, }, 0.4, ), @@ -3031,8 +3086,8 @@ class TestProbabilisticRegressionModels: { "lags": 2, "likelihood": "quantile", - "random_state": 42, "multi_models": True, + **lgbm_test_params, }, 0.4, ), @@ -3042,8 +3097,8 @@ class TestProbabilisticRegressionModels: "lags": 2, "likelihood": "quantile", "quantiles": [0.1, 0.3, 0.5, 0.7, 0.9], - "random_state": 42, "multi_models": True, + **lgbm_test_params, }, 0.4, ), @@ -3052,8 +3107,8 @@ class TestProbabilisticRegressionModels: { "lags": 2, "likelihood": "poisson", - "random_state": 42, "multi_models": True, + **lgbm_test_params, }, 0.6, ), @@ -3065,8 +3120,8 @@ class TestProbabilisticRegressionModels: { "lags": 2, "likelihood": "quantile", - "random_state": 42, "multi_models": True, + **cb_test_params, }, 0.05, ), @@ -3076,8 +3131,8 @@ class TestProbabilisticRegressionModels: "lags": 2, "likelihood": "quantile", "quantiles": [0.1, 0.3, 0.5, 0.7, 0.9], - "random_state": 42, "multi_models": True, + **cb_test_params, }, 0.05, ), @@ -3086,8 +3141,8 @@ class TestProbabilisticRegressionModels: { "lags": 2, "likelihood": "poisson", - "random_state": 42, "multi_models": True, + **cb_test_params, }, 0.6, ), @@ -3096,8 +3151,8 @@ class TestProbabilisticRegressionModels: { "lags": 2, "likelihood": "gaussian", - "random_state": 42, "multi_models": True, + **cb_test_params, }, 0.05, ), @@ -3109,7 +3164,6 @@ class TestProbabilisticRegressionModels: constant_noisy_multivar_ts = constant_noisy_ts.stack(constant_noisy_ts) num_samples = 5 - @pytest.mark.slow @pytest.mark.parametrize( "config", itertools.product(models_cls_kwargs_errs, [True, False]) ) @@ -3131,7 +3185,6 @@ def test_fit_predict_determinism(self, config): pred3 = model.predict(n=10, num_samples=2).values() assert (pred2 != pred3).any() - @pytest.mark.slow @pytest.mark.parametrize( "config", itertools.product(models_cls_kwargs_errs, [True, False]) ) @@ -3146,7 +3199,6 @@ def test_probabilistic_forecast_accuracy_univariate(self, config): self.constant_noisy_ts, ) - @pytest.mark.slow @pytest.mark.parametrize( "config", itertools.product(models_cls_kwargs_errs, [True, False]) )