diff --git a/404.html b/404.html index ebcce9da..181b0d28 100644 --- a/404.html +++ b/404.html @@ -39,7 +39,7 @@
diff --git a/CODE_OF_CONDUCT.html b/CODE_OF_CONDUCT.html index 5f6381f1..83ab7c18 100644 --- a/CODE_OF_CONDUCT.html +++ b/CODE_OF_CONDUCT.html @@ -23,7 +23,7 @@ diff --git a/LICENSE-text.html b/LICENSE-text.html index 31a43c5c..b633eaee 100644 --- a/LICENSE-text.html +++ b/LICENSE-text.html @@ -23,7 +23,7 @@ diff --git a/LICENSE.html b/LICENSE.html index 5c279cf3..4bfa6bb8 100644 --- a/LICENSE.html +++ b/LICENSE.html @@ -23,7 +23,7 @@ diff --git a/SECURITY.html b/SECURITY.html index ea4cc390..b3857387 100644 --- a/SECURITY.html +++ b/SECURITY.html @@ -23,7 +23,7 @@ diff --git a/SUPPORT.html b/SUPPORT.html index 0ac3f07e..b936db67 100644 --- a/SUPPORT.html +++ b/SUPPORT.html @@ -23,7 +23,7 @@ diff --git a/articles/back-testing-and-hyperparameter-tuning.html b/articles/back-testing-and-hyperparameter-tuning.html index 48044795..10238b75 100644 --- a/articles/back-testing-and-hyperparameter-tuning.html +++ b/articles/back-testing-and-hyperparameter-tuning.html @@ -38,7 +38,7 @@ @@ -128,19 +128,19 @@#> Loading required package: modeltime
#> Finn Submission Info
#> • Experiment Name: finnts_fcst
-#> • Run Name: get_prepped_models-20241025T005601Z
+#> • Run Name: get_prepped_models-20241025T225231Z
#>
#> ℹ Prepping Data
-#> ✔ Prepping Data [959ms]
+#> ✔ Prepping Data [1s]
#>
#> ℹ Creating Model Workflows
-#> ✔ Creating Model Workflows [104ms]
+#> ✔ Creating Model Workflows [116ms]
#>
#> ℹ Creating Model Hyperparameters
-#> ✔ Creating Model Hyperparameters [148ms]
+#> ✔ Creating Model Hyperparameters [161ms]
#>
#> ℹ Creating Train Test Splits
-#> ✔ Creating Train Test Splits [361ms]
+#> ✔ Creating Train Test Splits [383ms]
#>
#> # A tibble: 31 × 4
#> Run_Type Train_Test_ID Train_End Test_End
diff --git a/articles/best-model-selection.html b/articles/best-model-selection.html
index 20d1f188..b1dfd480 100644
--- a/articles/best-model-selection.html
+++ b/articles/best-model-selection.html
@@ -38,7 +38,7 @@
diff --git a/articles/external-regressors.html b/articles/external-regressors.html
index e053b69b..3fec3b02 100644
--- a/articles/external-regressors.html
+++ b/articles/external-regressors.html
@@ -38,7 +38,7 @@
diff --git a/articles/feature-engineering.html b/articles/feature-engineering.html
index 578febc3..aa81d89d 100644
--- a/articles/feature-engineering.html
+++ b/articles/feature-engineering.html
@@ -38,7 +38,7 @@
diff --git a/articles/feature-selection.html b/articles/feature-selection.html
index 7f2500e0..fca5d311 100644
--- a/articles/feature-selection.html
+++ b/articles/feature-selection.html
@@ -38,7 +38,7 @@
diff --git a/articles/finnts.html b/articles/finnts.html
index 9553f027..d0490c47 100644
--- a/articles/finnts.html
+++ b/articles/finnts.html
@@ -38,7 +38,7 @@
diff --git a/articles/forecast-components.html b/articles/forecast-components.html
index 5a3d3405..3556ba81 100644
--- a/articles/forecast-components.html
+++ b/articles/forecast-components.html
@@ -38,7 +38,7 @@
diff --git a/articles/hierarchical-forecasting.html b/articles/hierarchical-forecasting.html
index ebd976d8..ff1649d3 100644
--- a/articles/hierarchical-forecasting.html
+++ b/articles/hierarchical-forecasting.html
@@ -38,7 +38,7 @@
diff --git a/articles/index.html b/articles/index.html
index 0c2de624..01518c31 100644
--- a/articles/index.html
+++ b/articles/index.html
@@ -23,7 +23,7 @@
diff --git a/articles/models-used-in-finnts.html b/articles/models-used-in-finnts.html
index 6629def2..af83fa7c 100644
--- a/articles/models-used-in-finnts.html
+++ b/articles/models-used-in-finnts.html
@@ -38,7 +38,7 @@
@@ -159,7 +159,7 @@ Multistep Horizon Modelssvm-rbf
xgboost
-A multistep model optimizes for each period in a forecast horizon. Let’s take an example of a monthly data set with a forecast horizon of 3. When creating the features for the R1 recipe, finnts will create lags of 1, 2, 3, 6, 9, 12 months. Then when training a mulitstep model it will iteratively use specific features to train the model. First it will train a model on the first forecast horizon (H1), where it will use all available feature lags. Then for H2 it will use lags of 2 or more. Finally for H3 it will use lags of 3 or more. So the final model is actually a collection of multiple models that each trained on a specific horizon. This lets the model optimize for using all available data when creating the forecast. So in our example, one glmnet model actually has three separate horizon specific models under the hood.
+A multistep model optimizes for each period in a forecast horizon. Let’s take an example of a monthly data set with a forecast horizon of 3. When creating the features for the R1 recipe, finnts will create lags of 1, 2, 3, 6, 9, 12 months. Then when training a multistep model it will iteratively use specific features to train the model. First it will train a model on the first forecast horizon (H1), where it will use all available feature lags. Then for H2 it will use lags of 2 or more. Finally for H3 it will use lags of 3 or more. So the final model is actually a collection of multiple models that each trained on a specific horizon. This lets the model optimize for using all available data when creating the forecast. So in our example, one glmnet model actually has three separate horizon specific models under the hood.
A few more things to mention. If multistep_horizon
is TRUE then other multivariate models like arima-boost or prophet-xregs will not run a multistep horizon approach. Instead they will use lags that are equal to or greater than the forecast horizon. One set of hyperparameters will be chosen for each multistep model, meaning glmnet will only use one combination of final hyperparameters and apply it to each horizon model. Multistep models are not ran for the R2 recipe, since it has it’s own way of dealing with multiple horizons. Finally if feature_selection
is turned on, it will be ran for each horizon specific model, meaning for a 3 month forecast horizon the feature selection process will be ran 3 times. One for each combination of features tied to a specific horizon.
diff --git a/articles/parallel-processing.html b/articles/parallel-processing.html
index 617fb954..430a506f 100644
--- a/articles/parallel-processing.html
+++ b/articles/parallel-processing.html
@@ -38,7 +38,7 @@
@@ -133,7 +133,7 @@
Local MachineWithin Azure using Spark
To leverage the full power of Finn, running within Azure is the best choice in building production ready forecasts that can easily scale. The most efficient way to run Finn is to set parallel_processing
to “spark” within forecast_time_series()
. This will run each time series in parallel across a spark compute cluster.
-Sparklyr is a great R package that allows you to run R code across a spark cluster. A user simply has to connect to a spark cluster then run Finn. Below is an example on how you can run Finn using spark on Azure Databricks. Also check out the growing R support with using spark on Azure Synapse.
+Sparklyr is a great R package that allows you to run R code across a spark cluster. A user simply has to connect to a spark cluster then run Finn. Below is an example on how you can run Finn using spark on Azure Databricks. Also check out the growing R support with using spark on Azure Synapse.
# load CRAN libraries
library(finnts)
@@ -177,7 +177,7 @@ Within Azure using Spark run_info = run_info,
return_type = "sdf"
)
-The above example runs each time series on a separate core on a spark cluster. You can also submit multiple time series where each time series runs on a separate spark executor (VM) and then leverage all of the cores on that executor to run things like hyperparameter tuning or model refitting in parallel. This creates two levels of parallelization. One at the time series level, then another when doing things like hyperparameter tuning within a specific time series. To do that set inner_parallel
to TRUE in forecast_time_series()
. Also make sure that you adjust the number of spark executor cores to 1, that ensures that only 1 time series runs on an executor at a time. Leverage the “spark.executor.cores” argument when configuring your spark connection. This can be done using sparklyr or within the cluster manager itself within the Azure resource. Use the “num_cores” argument in the “forecast_time_series” function to control how many cores should be used within an executor when running things like hyperparameter tuning.
+The above example runs each time series on a separate core on a spark cluster. You can also submit multiple time series where each time series runs on a separate spark executor (VM) and then leverage all of the cores on that executor to run things like hyperparameter tuning or model refitting in parallel. This creates two levels of parallelization. One at the time series level, then another when doing things like hyperparameter tuning within a specific time series. To do that set inner_parallel
to TRUE in forecast_time_series()
. Also make sure that you adjust the number of spark executor cores to 1, that ensures that only 1 time series runs on an executor at a time. Leverage the “spark.executor.cores” argument when configuring your spark connection. This can be done using sparklyr or within the cluster manager itself within the Azure resource. Use the “num_cores” argument in the “forecast_time_series” function to control how many cores should be used within an executor when running things like hyperparameter tuning.
forecast_time_series()
will be looking for a variable called “sc” to use when submitting tasks to the spark cluster, so make sure you use that as the variable name when connecting to spark. Also it’s important that you mount your spark session to an Azure Data Lake Storage (ADLS) account, and provide the mounted path to where you’d like your Finn results to be written to within set_run_info()
.
diff --git a/articles/tips-for-production.html b/articles/tips-for-production.html
index c77a46e6..249a05b5 100644
--- a/articles/tips-for-production.html
+++ b/articles/tips-for-production.html
@@ -38,7 +38,7 @@
diff --git a/authors.html b/authors.html
index c065cb96..810db30c 100644
--- a/authors.html
+++ b/authors.html
@@ -23,7 +23,7 @@
diff --git a/index.html b/index.html
index b482a0df..caceb6fb 100644
--- a/index.html
+++ b/index.html
@@ -40,7 +40,7 @@
diff --git a/news/index.html b/news/index.html
index e37fab1c..d110629b 100644
--- a/news/index.html
+++ b/news/index.html
@@ -23,7 +23,7 @@
@@ -94,9 +94,9 @@ Changelog
-finnts 0.4.0.9008 (DEVELOPMENT VERSION)
+finnts 0.5.02024-10-25
-Improvements
+Improvements
- Added support for hierarchical forecasting with external regressors
- Allow global models for hierarchical forecasts
- Multistep horizon forecasts for R1 recipe, listed as
multistep_horizon
within prep_data()
@@ -108,12 +108,12 @@ Improvements
-Bug Fixes
+Bug Fixes
- Error in run_type column join in final forecast output
- Error in running feature selection
diff --git a/pkgdown.yml b/pkgdown.yml
index 597fe16e..36722a71 100644
--- a/pkgdown.yml
+++ b/pkgdown.yml
@@ -13,7 +13,7 @@ articles:
models-used-in-finnts: models-used-in-finnts.html
parallel-processing: parallel-processing.html
tips-for-production: tips-for-production.html
-last_built: 2024-10-25T00:53Z
+last_built: 2024-10-25T22:49Z
urls:
reference: https://microsoft.github.io/finnts/reference
article: https://microsoft.github.io/finnts/articles
diff --git a/reference/cubist_multistep.html b/reference/cubist_multistep.html
index ebb4718f..e1d18947 100644
--- a/reference/cubist_multistep.html
+++ b/reference/cubist_multistep.html
@@ -23,7 +23,7 @@
diff --git a/reference/cubist_multistep_fit_impl.html b/reference/cubist_multistep_fit_impl.html
index 870fd4b6..6d37bca1 100644
--- a/reference/cubist_multistep_fit_impl.html
+++ b/reference/cubist_multistep_fit_impl.html
@@ -23,7 +23,7 @@
diff --git a/reference/cubist_multistep_predict_impl.html b/reference/cubist_multistep_predict_impl.html
index aff22d50..dab19f67 100644
--- a/reference/cubist_multistep_predict_impl.html
+++ b/reference/cubist_multistep_predict_impl.html
@@ -23,7 +23,7 @@
diff --git a/reference/ensemble_models.html b/reference/ensemble_models.html
index c0824440..8c92de28 100644
--- a/reference/ensemble_models.html
+++ b/reference/ensemble_models.html
@@ -23,7 +23,7 @@
@@ -161,7 +161,7 @@ Examples
run_info <- set_run_info()
#> Finn Submission Info
#> • Experiment Name: finn_fcst
-#> • Run Name: finn_fcst-20241025T005338Z
+#> • Run Name: finn_fcst-20241025T225004Z
#>
prep_data(run_info,
@@ -172,7 +172,7 @@ Examples
forecast_horizon = 3
)
#> ℹ Prepping Data
-#> ✔ Prepping Data [1.4s]
+#> ✔ Prepping Data [1.5s]
#>
prep_models(run_info,
@@ -180,25 +180,25 @@ Examples
num_hyperparameters = 2
)
#> ℹ Creating Model Workflows
-#> ✔ Creating Model Workflows [199ms]
+#> ✔ Creating Model Workflows [210ms]
#>
#> ℹ Creating Model Hyperparameters
-#> ✔ Creating Model Hyperparameters [197ms]
+#> ✔ Creating Model Hyperparameters [204ms]
#>
#> ℹ Creating Train Test Splits
-#> ✔ Creating Train Test Splits [286ms]
+#> ✔ Creating Train Test Splits [298ms]
#>
train_models(run_info,
run_global_models = FALSE
)
#> ℹ Training Individual Models
-#> ✔ Training Individual Models [10.5s]
+#> ✔ Training Individual Models [11.1s]
#>
ensemble_models(run_info)
#> ℹ Training Ensemble Models
-#> ✔ Training Ensemble Models [1.6s]
+#> ✔ Training Ensemble Models [1.7s]
#>
# }
diff --git a/reference/final_models.html b/reference/final_models.html
index 83fd62ce..f8ba92e2 100644
--- a/reference/final_models.html
+++ b/reference/final_models.html
@@ -23,7 +23,7 @@
@@ -175,7 +175,7 @@