From fc52fa969ea7a7f01d31c535717be9b5f72351e2 Mon Sep 17 00:00:00 2001 From: Steven Liu <59462357+stevhliu@users.noreply.github.com> Date: Wed, 11 Sep 2024 10:31:47 -0700 Subject: [PATCH] [docs] Doc sprint (#3099) * docs sprint * youtube id * feedback --- docs/source/_toctree.yml | 26 +++--- docs/source/basic_tutorials/install.md | 27 +++--- docs/source/basic_tutorials/launch.md | 16 ++-- docs/source/basic_tutorials/migration.md | 7 +- docs/source/basic_tutorials/notebook.md | 8 +- docs/source/basic_tutorials/overview.md | 4 +- .../source/basic_tutorials/troubleshooting.md | 4 +- .../concept_guides/big_model_inference.md | 18 ++-- .../concept_guides/deferring_execution.md | 4 +- .../concept_guides/fsdp_and_deepspeed.md | 16 ++-- .../gradient_synchronization.md | 6 +- .../concept_guides/internal_mechanism.md | 8 +- .../concept_guides/low_precision_training.md | 6 +- docs/source/concept_guides/performance.md | 2 +- docs/source/concept_guides/training_tpu.md | 10 +-- docs/source/index.md | 10 +-- docs/source/package_reference/big_modeling.md | 55 +++++++++++-- docs/source/package_reference/deepspeed.md | 18 +++- docs/source/package_reference/fp8.md | 12 ++- docs/source/package_reference/fsdp.md | 10 ++- docs/source/package_reference/inference.md | 6 +- docs/source/package_reference/kwargs.md | 2 +- docs/source/package_reference/launchers.md | 4 + docs/source/package_reference/logging.md | 4 +- docs/source/package_reference/megatron_lm.md | 18 +++- docs/source/package_reference/state.md | 6 ++ .../package_reference/torch_wrappers.md | 19 ++++- docs/source/package_reference/tracking.md | 21 ++++- docs/source/package_reference/utilities.md | 2 +- docs/source/usage_guides/big_modeling.md | 82 ++++++------------- docs/source/usage_guides/checkpoint.md | 4 +- docs/source/usage_guides/ddp_comm_hook.md | 2 +- docs/source/usage_guides/deepspeed.md | 6 +- .../usage_guides/distributed_inference.md | 16 ++-- docs/source/usage_guides/explore.md | 6 +- docs/source/usage_guides/fsdp.md | 6 +- .../usage_guides/gradient_accumulation.md | 18 ++-- docs/source/usage_guides/ipex.md | 2 +- docs/source/usage_guides/local_sgd.md | 12 +-- .../usage_guides/low_precision_training.md | 10 +-- docs/source/usage_guides/megatron_lm.md | 30 +++---- .../usage_guides/model_size_estimator.md | 6 +- docs/source/usage_guides/mps.md | 2 +- docs/source/usage_guides/profiler.md | 4 +- docs/source/usage_guides/quantization.md | 8 +- docs/source/usage_guides/sagemaker.md | 23 +++--- docs/source/usage_guides/tracking.md | 4 +- docs/source/usage_guides/training_zoo.md | 8 +- 48 files changed, 344 insertions(+), 254 deletions(-) diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml index 14955def60d..6e6d6900400 100644 --- a/docs/source/_toctree.yml +++ b/docs/source/_toctree.yml @@ -16,7 +16,7 @@ - local: basic_tutorials/tpu title: TPU training - local: basic_tutorials/launch - title: Launching distributed code + title: Launching Accelerate scripts - local: basic_tutorials/notebook title: Launching distributed training from Jupyter Notebooks title: Tutorials @@ -34,7 +34,7 @@ - local: usage_guides/profiler title: Profiler - local: usage_guides/checkpoint - title: Save and load training states + title: Checkpointing - local: basic_tutorials/troubleshooting title: Troubleshoot - local: usage_guides/training_zoo @@ -53,7 +53,7 @@ - local: usage_guides/ddp_comm_hook title: DDP Communication Hooks - local: usage_guides/fsdp - title: Fully Sharded Data Parallelism + title: Fully Sharded Data Parallel - local: usage_guides/megatron_lm title: Megatron-LM - local: usage_guides/sagemaker @@ -73,7 +73,7 @@ title: How to guides - sections: - local: concept_guides/internal_mechanism - title: πŸ€— Accelerate's internal mechanism + title: Accelerate's internal mechanism - local: concept_guides/big_model_inference title: Loading big models into memory - local: concept_guides/performance @@ -85,23 +85,23 @@ - local: concept_guides/fsdp_and_deepspeed title: FSDP vs DeepSpeed - local: concept_guides/low_precision_training - title: How training in low-precision environments is possible (FP8) + title: Low precision training methods - local: concept_guides/training_tpu - title: TPU best practices + title: Training on TPUs title: Concepts and fundamentals - sections: - local: package_reference/accelerator title: Accelerator - local: package_reference/state - title: Stateful configuration classes + title: Stateful classes - local: package_reference/cli title: The Command Line - local: package_reference/torch_wrappers - title: Torch wrapper classes + title: DataLoaders, Optimizers, Schedulers - local: package_reference/tracking title: Experiment trackers - local: package_reference/launchers - title: Distributed launchers + title: Launchers - local: package_reference/deepspeed title: DeepSpeed utilities - local: package_reference/logging @@ -109,15 +109,15 @@ - local: package_reference/big_modeling title: Working with large models - local: package_reference/inference - title: Distributed inference with big models + title: Pipeline parallelism - local: package_reference/kwargs title: Kwargs handlers - local: package_reference/fp8 - title: FP8 Functionality + title: FP8 - local: package_reference/utilities title: Utility functions and classes - local: package_reference/megatron_lm - title: Megatron-LM Utilities + title: Megatron-LM utilities - local: package_reference/fsdp - title: Fully Sharded Data Parallelism Utilities + title: Fully Sharded Data Parallel utilities title: "Reference" diff --git a/docs/source/basic_tutorials/install.md b/docs/source/basic_tutorials/install.md index d3e59516886..b09dc119eca 100644 --- a/docs/source/basic_tutorials/install.md +++ b/docs/source/basic_tutorials/install.md @@ -13,31 +13,29 @@ specific language governing permissions and limitations under the License. rendered properly in your Markdown viewer. --> -# Installation and Configuration +# Installation -Before you start, you will need to setup your environment, install the appropriate packages, and configure πŸ€— Accelerate. πŸ€— Accelerate is tested on **Python 3.8+**. +Before you start, you will need to setup your environment, install the appropriate packages, and configure Accelerate. Accelerate is tested on **Python 3.8+**. -## Installing πŸ€— Accelerate +Accelerate is available on pypi and conda, as well as on GitHub. Details to install from each are below: -πŸ€— Accelerate is available on pypi and conda, as well as on GitHub. Details to install from each are below: +## pip -### pip - -To install πŸ€— Accelerate from pypi, perform: +To install Accelerate from pypi, perform: ```bash pip install accelerate ``` -### conda +## conda -πŸ€— Accelerate can also be installed with conda with: +Accelerate can also be installed with conda with: ```bash conda install -c conda-forge accelerate ``` -### Source +## Source New features are added every day that haven't been released yet. To try them out yourself, install from the GitHub repository: @@ -56,9 +54,9 @@ cd accelerate pip install -e . ``` -## Configuring πŸ€— Accelerate +## Configuration -After installing, you need to configure πŸ€— Accelerate for how the current system is setup for training. +After installing, you need to configure Accelerate for how the current system is setup for training. To do so run the following and answer the questions prompted to you: ```bash @@ -70,7 +68,8 @@ To write a barebones configuration that doesn't include options such as DeepSpee ```bash python -c "from accelerate.utils import write_basic_config; write_basic_config(mixed_precision='fp16')" ``` -πŸ€— Accelerate will automatically utilize the maximum number of GPUs available and set the mixed precision mode. + +Accelerate will automatically utilize the maximum number of GPUs available and set the mixed precision mode. To check that your configuration looks fine, run: @@ -99,4 +98,4 @@ An example output is shown below, which describes two GPUs on a single machine w - main_training_function: main - deepspeed_config: {} - fsdp_config: {} -``` \ No newline at end of file +``` diff --git a/docs/source/basic_tutorials/launch.md b/docs/source/basic_tutorials/launch.md index dfcab07b7b4..3dfcbfb6fa4 100644 --- a/docs/source/basic_tutorials/launch.md +++ b/docs/source/basic_tutorials/launch.md @@ -13,9 +13,9 @@ specific language governing permissions and limitations under the License. rendered properly in your Markdown viewer. --> -# Launching your πŸ€— Accelerate scripts +# Launching Accelerate scripts -In the previous tutorial, you were introduced to how to modify your current training script to use πŸ€— Accelerate. +In the previous tutorial, you were introduced to how to modify your current training script to use Accelerate. The final version of that code is shown below: ```python @@ -69,14 +69,14 @@ Next, you need to launch it with `accelerate launch`. It's recommended you run `accelerate config` before using `accelerate launch` to configure your environment to your liking. - Otherwise πŸ€— Accelerate will use very basic defaults depending on your system setup. + Otherwise Accelerate will use very basic defaults depending on your system setup. ## Using accelerate launch -πŸ€— Accelerate has a special CLI command to help you launch your code in your system through `accelerate launch`. +Accelerate has a special CLI command to help you launch your code in your system through `accelerate launch`. This command wraps around all of the different commands needed to launch your script on various platforms, without you having to remember what each of them is. @@ -101,7 +101,7 @@ CUDA_VISIBLE_DEVICES="0" accelerate launch {script_name.py} --arg1 --arg2 ... ``` You can also use `accelerate launch` without performing `accelerate config` first, but you may need to manually pass in the right configuration parameters. -In this case, πŸ€— Accelerate will make some hyperparameter decisions for you, e.g., if GPUs are available, it will use all of them by default without the mixed precision. +In this case, Accelerate will make some hyperparameter decisions for you, e.g., if GPUs are available, it will use all of them by default without the mixed precision. Here is how you would use all GPUs and train with mixed precision disabled: ```bash @@ -129,7 +129,7 @@ accelerate launch -h - Even if you are not using πŸ€— Accelerate in your code, you can still use the launcher for starting your scripts! + Even if you are not using Accelerate in your code, you can still use the launcher for starting your scripts! @@ -178,7 +178,7 @@ accelerate launch {script_name.py} {--arg1} {--arg2} ... ## Custom Configurations As briefly mentioned earlier, `accelerate launch` should be mostly used through combining set configurations -made with the `accelerate config` command. These configs are saved to a `default_config.yaml` file in your cache folder for πŸ€— Accelerate. +made with the `accelerate config` command. These configs are saved to a `default_config.yaml` file in your cache folder for Accelerate. This cache folder is located at (with decreasing order of priority): - The content of your environment variable `HF_HOME` suffixed with `accelerate`. @@ -211,7 +211,7 @@ accelerate launch --config_file {path/to/config/my_config_file.yaml} {script_nam ``` ## Multi-node training -Multi-node training with πŸ€—Accelerate is similar to [multi-node training with torchrun](https://pytorch.org/tutorials/intermediate/ddp_series_multinode.html). The simplest way to launch a multi-node training run is to do the following: +Multi-node training with Accelerate is similar to [multi-node training with torchrun](https://pytorch.org/tutorials/intermediate/ddp_series_multinode.html). The simplest way to launch a multi-node training run is to do the following: - Copy your codebase and data to all nodes. (or place them on a shared filesystem) - Setup your python packages on all nodes. diff --git a/docs/source/basic_tutorials/migration.md b/docs/source/basic_tutorials/migration.md index 8fb2c32f981..3c71ea7a053 100644 --- a/docs/source/basic_tutorials/migration.md +++ b/docs/source/basic_tutorials/migration.md @@ -220,8 +220,5 @@ To further customize where and how states are saved through [`~Accelerator.save_ Any other stateful items to be stored should be registered with the [`~Accelerator.register_for_checkpointing`] method so they can be saved and loaded. Every object passed to this method to be stored must have a `load_state_dict` and `state_dict` function. - - -If you have [`torchdata>=0.8.0`](https://github.com/pytorch/data/tree/main) installed, you can additionally pass `use_stateful_dataloader=True` into your [`~utils.DataLoaderConfiguration`]. This extends Accelerate's DataLoader classes with a `load_state_dict` and `state_dict` function, and makes it so `Accelerator.save_state` and `Accelerator.load_state` also track how far into the training dataset it has read when persisting the model. - - +> [!TIP] +> If you have [`torchdata>=0.8.0`](https://github.com/pytorch/data/tree/main) installed, you can additionally pass `use_stateful_dataloader=True` into your [`~utils.DataLoaderConfiguration`]. This extends Accelerate's DataLoader classes with a `load_state_dict` and `state_dict` function, and makes it so `Accelerator.save_state` and `Accelerator.load_state` also track how far into the training dataset it has read when persisting the model. diff --git a/docs/source/basic_tutorials/notebook.md b/docs/source/basic_tutorials/notebook.md index 805704116f3..745d95d82b2 100644 --- a/docs/source/basic_tutorials/notebook.md +++ b/docs/source/basic_tutorials/notebook.md @@ -13,7 +13,7 @@ specific language governing permissions and limitations under the License. rendered properly in your Markdown viewer. --> -# Launching Multi-GPU Training from a Jupyter Environment +# Launching distributed training from Jupyter Notebooks This tutorial teaches you how to fine tune a computer vision model with πŸ€— Accelerate from a Jupyter Notebook on a distributed system. You will also learn how to setup a few requirements needed for ensuring your environment is configured properly, your data has been prepared properly, and finally how to launch training. @@ -26,13 +26,13 @@ You will also learn how to setup a few requirements needed for ensuring your env ## Configuring the Environment -Before any training can be performed, a πŸ€— Accelerate config file must exist in the system. Usually this can be done by running the following in a terminal and answering the prompts: +Before any training can be performed, a Accelerate config file must exist in the system. Usually this can be done by running the following in a terminal and answering the prompts: ```bash accelerate config ``` -However, if general defaults are fine and you are *not* running on a TPU, πŸ€—Accelerate has a utility to quickly write your GPU configuration into a config file via [`utils.write_basic_config`]. +However, if general defaults are fine and you are *not* running on a TPU, Accelerate has a utility to quickly write your GPU configuration into a config file via [`utils.write_basic_config`]. The following code will restart Jupyter after writing the configuration, as CUDA code was called to perform this. @@ -454,7 +454,7 @@ epoch 4: 94.71 And that's it! -Please note that [`notebook_launcher`] ignores the πŸ€— Accelerate config file, to launch based on the config use: +Please note that [`notebook_launcher`] ignores the Accelerate config file, to launch based on the config use: ```bash accelerate launch diff --git a/docs/source/basic_tutorials/overview.md b/docs/source/basic_tutorials/overview.md index 6a62e72da09..0ac665104ae 100644 --- a/docs/source/basic_tutorials/overview.md +++ b/docs/source/basic_tutorials/overview.md @@ -15,10 +15,10 @@ rendered properly in your Markdown viewer. # Overview -Welcome to the πŸ€— Accelerate tutorials! These introductory guides will help catch you up to speed on working with πŸ€— Accelerate. +Welcome to the Accelerate tutorials! These introductory guides will help catch you up to speed on working with Accelerate. You'll learn how to modify your code to have it work with the API seamlessly, how to launch your script properly, and more! These tutorials assume some basic knowledge of Python and familiarity with the PyTorch framework. -If you have any questions about πŸ€— Accelerate, feel free to join and ask the community on our [forum](https://discuss.huggingface.co/c/accelerate/18). \ No newline at end of file +If you have any questions about Accelerate, feel free to join and ask the community on our [forum](https://discuss.huggingface.co/c/accelerate/18). \ No newline at end of file diff --git a/docs/source/basic_tutorials/troubleshooting.md b/docs/source/basic_tutorials/troubleshooting.md index 6752bffc692..1f76e0b8bfb 100644 --- a/docs/source/basic_tutorials/troubleshooting.md +++ b/docs/source/basic_tutorials/troubleshooting.md @@ -204,8 +204,8 @@ Vastly different GPUs within the same setup can lead to performance bottlenecks. If none of the solutions and advice here helped resolve your issue, you can always reach out to the community and Accelerate team for help. -- Ask for help on the Hugging Face forums by posting your question in the [πŸ€— Accelerate category](https://discuss.huggingface.co/c/accelerate/18). Make sure to write a descriptive post with relevant context about your setup and reproducible code to maximize the likelihood that your problem is solved! +- Ask for help on the Hugging Face forums by posting your question in the [Accelerate category](https://discuss.huggingface.co/c/accelerate/18). Make sure to write a descriptive post with relevant context about your setup and reproducible code to maximize the likelihood that your problem is solved! - Post a question on [Discord](http://hf.co/join/discord), and let the team and the community help you. -- Create an Issue on the πŸ€— Accelerate [GitHub repository](https://github.com/huggingface/accelerate/issues) if you think you've found a bug related to the library. Include context regarding the bug and details about your distributed setup to help us better figure out what's wrong and how we can fix it. +- Create an Issue on the Accelerate [GitHub repository](https://github.com/huggingface/accelerate/issues) if you think you've found a bug related to the library. Include context regarding the bug and details about your distributed setup to help us better figure out what's wrong and how we can fix it. diff --git a/docs/source/concept_guides/big_model_inference.md b/docs/source/concept_guides/big_model_inference.md index b2d8ab038ae..27cb0430772 100644 --- a/docs/source/concept_guides/big_model_inference.md +++ b/docs/source/concept_guides/big_model_inference.md @@ -13,7 +13,7 @@ specific language governing permissions and limitations under the License. rendered properly in your Markdown viewer. --> -# Handling big models for inference +# Loading big models into memory When loading a pre-trained model in PyTorch, the usual workflow looks like this: @@ -46,7 +46,7 @@ This API is quite new and still in its experimental stage. While we strive to pr ### Instantiating an empty model -The first tool πŸ€— Accelerate introduces to help with big models is a context manager [`init_empty_weights`] that helps you initialize a model without using any RAM so that step 1 can be done on models of any size. Here is how it works: +The first tool Accelerate introduces to help with big models is a context manager [`init_empty_weights`] that helps you initialize a model without using any RAM so that step 1 can be done on models of any size. Here is how it works: ```py from accelerate import init_empty_weights @@ -74,7 +74,7 @@ initializes an empty model with a bit more than 100B parameters. Behind the scen It's possible your model is so big that even a single copy won't fit in RAM. That doesn't mean it can't be loaded: if you have one or several GPUs, this is more memory available to store your model. In this case, it's better if your checkpoint is split into several smaller files that we call checkpoint shards. -πŸ€— Accelerate will handle sharded checkpoints as long as you follow the following format: your checkpoint should be in a folder, with several files containing the partial state dicts, and there should be an index in the JSON format that contains a dictionary mapping parameter names to the file containing their weights. You can easily shard your model with [`~Accelerator.save_model`]. For instance, we could have a folder containing: +Accelerate will handle sharded checkpoints as long as you follow the following format: your checkpoint should be in a folder, with several files containing the partial state dicts, and there should be an index in the JSON format that contains a dictionary mapping parameter names to the file containing their weights. You can easily shard your model with [`~Accelerator.save_model`]. For instance, we could have a folder containing: ```bash first_state_dict.bin @@ -97,9 +97,9 @@ and `first_state_dict.bin` containing the weights for `"linear1.weight"` and `"l ### Loading weights -The second tool πŸ€— Accelerate introduces is a function [`load_checkpoint_and_dispatch`], that will allow you to load a checkpoint inside your empty model. This supports full checkpoints (a single file containing the whole state dict) as well as sharded checkpoints. It will also automatically dispatch those weights across the devices you have available (GPUs, CPU RAM), so if you are loading a sharded checkpoint, the maximum RAM usage will be the size of the biggest shard. +The second tool Accelerate introduces is a function [`load_checkpoint_and_dispatch`], that will allow you to load a checkpoint inside your empty model. This supports full checkpoints (a single file containing the whole state dict) as well as sharded checkpoints. It will also automatically dispatch those weights across the devices you have available (GPUs, CPU RAM), so if you are loading a sharded checkpoint, the maximum RAM usage will be the size of the biggest shard. -If you want to use big model inference with πŸ€— Transformers models, check out this [documentation](https://huggingface.co/docs/transformers/main/en/main_classes/model#large-model-loading). +If you want to use big model inference with Transformers models, check out this [documentation](https://huggingface.co/docs/transformers/main/en/main_classes/model#large-model-loading). Here is how we can use this to load the [GPT2-1.5B](https://huggingface.co/marcsun13/gpt2-xl-linear-sharded) model. @@ -145,7 +145,7 @@ model = load_checkpoint_and_dispatch( ) ``` -By passing `device_map="auto"`, we tell πŸ€— Accelerate to determine automatically where to put each layer of the model depending on the available resources: +By passing `device_map="auto"`, we tell Accelerate to determine automatically where to put each layer of the model depending on the available resources: - first, we use the maximum space available on the GPU(s) - if we still need space, we store the remaining weights on the CPU - if there is not enough RAM, we store the remaining weights on the hard drive as memory-mapped tensors @@ -159,7 +159,7 @@ include a residual connection of some kind. #### The `device_map` -You can see the `device_map` that πŸ€— Accelerate picked by accessing the `hf_device_map` attribute of your model: +You can see the `device_map` that Accelerate picked by accessing the `hf_device_map` attribute of your model: ```py model.hf_device_map @@ -210,7 +210,7 @@ outputs = model.generate(x1, max_new_tokens=10, do_sample=False)[0] tokenizer.decode(outputs.cpu().squeeze()) ``` -Behind the scenes, πŸ€— Accelerate added hooks to the model, so that: +Behind the scenes, Accelerate added hooks to the model, so that: - at each layer, the inputs are put on the right device (so even if your model is spread across several GPUs, it works) - for the weights offloaded on the CPU, they are put on a GPU just before the forward pass and cleaned up just after - for the weights offloaded on the hard drive, they are loaded in RAM then put on a GPU just before the forward pass and cleaned up just after @@ -225,7 +225,7 @@ This way, your model can run for inference even if it doesn't fit on one of the ### Designing a device map -You can let πŸ€— Accelerate handle the device map computation by setting `device_map` to one of the supported options (`"auto"`, `"balanced"`, `"balanced_low_0"`, `"sequential"`) or create one yourself if you want more control over where each layer should go. +You can let Accelerate handle the device map computation by setting `device_map` to one of the supported options (`"auto"`, `"balanced"`, `"balanced_low_0"`, `"sequential"`) or create one yourself if you want more control over where each layer should go. diff --git a/docs/source/concept_guides/deferring_execution.md b/docs/source/concept_guides/deferring_execution.md index f90b38e6a8b..451cac35f74 100644 --- a/docs/source/concept_guides/deferring_execution.md +++ b/docs/source/concept_guides/deferring_execution.md @@ -13,9 +13,9 @@ specific language governing permissions and limitations under the License. rendered properly in your Markdown viewer. --> -# Deferring Executions +# DExecuting and deferring jobs -When you run your usual script, instructions are executed in order. Using πŸ€— Accelerate to deploy your script on several +When you run your usual script, instructions are executed in order. Using Accelerate to deploy your script on several GPUs at the same time introduces a complication: while each process executes all instructions in order, some may be faster than others. diff --git a/docs/source/concept_guides/fsdp_and_deepspeed.md b/docs/source/concept_guides/fsdp_and_deepspeed.md index 6665eb8885c..b51a3899ca6 100644 --- a/docs/source/concept_guides/fsdp_and_deepspeed.md +++ b/docs/source/concept_guides/fsdp_and_deepspeed.md @@ -13,15 +13,15 @@ specific language governing permissions and limitations under the License. rendered properly in your Markdown viewer. --> -# Moving between FSDP And DeepSpeed +# FSDP vs DeepSpeed -πŸ€— Accelerate offers flexibilty of training frameworks, by integrating two extremely powerful tools for distributed training, namely [Pytorch FSDP](../usage_guides/fsdp) and [Microsoft DeepSpeed](../usage_guides/deepspeed). The aim of this tutorial is to draw parallels, as well as to outline potential differences, to empower the user to switch seamlessly between these two frameworks. +Accelerate offers flexibilty of training frameworks, by integrating two extremely powerful tools for distributed training, namely [Pytorch FSDP](../usage_guides/fsdp) and [Microsoft DeepSpeed](../usage_guides/deepspeed). The aim of this tutorial is to draw parallels, as well as to outline potential differences, to empower the user to switch seamlessly between these two frameworks. - To switch between the frameworks, we recommend launching code πŸ€— `accelerate launch` passing in the correct config file with `--config_file`, or passing in the respective arguments directly for [FSDP and DeepSpeed](../package_reference/cli#accelerate-launch) . + To switch between the frameworks, we recommend launching code `accelerate launch` passing in the correct config file with `--config_file`, or passing in the respective arguments directly for [FSDP and DeepSpeed](../package_reference/cli#accelerate-launch) . - Example πŸ€— Accelerate configurations can be found here for [DeepSpeed](../usage_guides/deepspeed#accelerate-deepspeed-plugin) and [FSDP](../usage_guides/fsdp#how-it-works-out-of-the-box), or in the [example zoo under "Launch Configurations"](../usage_guides/explore) + Example Accelerate configurations can be found here for [DeepSpeed](../usage_guides/deepspeed#accelerate-deepspeed-plugin) and [FSDP](../usage_guides/fsdp#how-it-works-out-of-the-box), or in the [example zoo under "Launch Configurations"](../usage_guides/explore) @@ -47,7 +47,7 @@ parameters summoning | FSDP
DeepSpeed | `--fsdp_use_orig_params`
None | `t parameters syncing | FSDP
DeepSpeed | `--fsdp_sync_module_states`
None | `true` | training | FSDP
DeepSpeed | None
`--gradient_accumulation_steps`
`--gradient_clipping` |
`auto`
`auto` | Transparent to user -For detailed descriptions of the above, refer to [πŸ€— `Accelerate` launch documentation](../package_reference/cli#accelerate-launch). +For detailed descriptions of the above, refer to [`Accelerate` launch documentation](../package_reference/cli#accelerate-launch). @@ -94,7 +94,7 @@ FSDP only allows *all-or-nothing* offload (i.e., either offload parameters, grad ### Prefetching FSDP allows two prefetching configurations `--fsdp_forward_prefetch` and `--fsdp_backward_prefetch` to improve overlap of comms / computation at a cost of extra memory, see [FSDP documentation](https://pytorch.org/docs/stable/fsdp.html). -For DeepSpeed, the prefetching will be turned on when needed, and it turns on depending on certain hyper-params like `stage3_param_persistence_threshold`, `stage3_max_reuse_distance`, etc, [that can be configured for Zero3](https://www.deepspeed.ai/docs/config-json/#parameter-offloading); πŸ€— `accelerate` may set these hyper-params automatically if you don't set those explicitly in the deepspeed config file. +For DeepSpeed, the prefetching will be turned on when needed, and it turns on depending on certain hyper-params like `stage3_param_persistence_threshold`, `stage3_max_reuse_distance`, etc, [that can be configured for Zero3](https://www.deepspeed.ai/docs/config-json/#parameter-offloading); `accelerate` may set these hyper-params automatically if you don't set those explicitly in the deepspeed config file. @@ -104,11 +104,11 @@ For DeepSpeed, the prefetching will be turned on when needed, and it turns on de ### Model Loading -While FSDP require an explicit `--fsdp_cpu_ram_efficient_loading true` to activate efficient model loading, πŸ€— `transformers` will activate the similar feature whenever DeepSpeed Zero3 is used. +While FSDP require an explicit `--fsdp_cpu_ram_efficient_loading true` to activate efficient model loading, `transformers` will activate the similar feature whenever DeepSpeed Zero3 is used. - For FSDP, whenever setting `--fsdp_cpu_ram_efficient_loading true`, πŸ€— `accelerate` will automatically set `sync_module_states` to true. + For FSDP, whenever setting `--fsdp_cpu_ram_efficient_loading true`, `accelerate` will automatically set `sync_module_states` to true. For RAM efficient loading the weights will be loaded only in a singe rank, and thus requires `sync_module_states` to broadcast weights to other ranks. diff --git a/docs/source/concept_guides/gradient_synchronization.md b/docs/source/concept_guides/gradient_synchronization.md index d90ac29fd23..523f6a916ac 100644 --- a/docs/source/concept_guides/gradient_synchronization.md +++ b/docs/source/concept_guides/gradient_synchronization.md @@ -13,7 +13,7 @@ specific language governing permissions and limitations under the License. rendered properly in your Markdown viewer. --> -# Gradient Synchronization +# Gradient synchronization PyTorch's distributed module operates by communicating back and forth between all of the GPUs in your system. This communication takes time, and ensuring all processes know the states of each other happens at particular triggerpoints @@ -28,7 +28,7 @@ from torch.nn.parallel import DistributedDataParallel model = nn.Linear(10, 10) ddp_model = DistributedDataParallel(model) ``` -In πŸ€— Accelerate this conversion happens automatically when calling [`~Accelerator.prepare`] and passing in your model. +In Accelerate this conversion happens automatically when calling [`~Accelerator.prepare`] and passing in your model. ```diff + from accelerate import Accelerator @@ -90,7 +90,7 @@ for index, batch in enumerate(dataloader): optimizer.step() ``` -In πŸ€— Accelerate to make this an API that can be called no matter the training device (though it may not do anything if you are not in a distributed system!), +In Accelerate to make this an API that can be called no matter the training device (though it may not do anything if you are not in a distributed system!), `ddp_model.no_sync` gets replaced with [`~Accelerator.no_sync`] and operates the same way: ```diff diff --git a/docs/source/concept_guides/internal_mechanism.md b/docs/source/concept_guides/internal_mechanism.md index 2410d882bb5..2e4eb71aa5f 100644 --- a/docs/source/concept_guides/internal_mechanism.md +++ b/docs/source/concept_guides/internal_mechanism.md @@ -13,9 +13,9 @@ specific language governing permissions and limitations under the License. rendered properly in your Markdown viewer. --> -# πŸ€— Accelerate's internal mechanisms +# Accelerate's internal mechanisms -Internally, πŸ€— Accelerate works by first analyzing the environment in which the script is launched to determine which +Internally, Accelerate works by first analyzing the environment in which the script is launched to determine which kind of distributed setup is used, how many different processes there are and which one the current script is in. All that information is stored in the [`~AcceleratorState`]. @@ -69,10 +69,6 @@ setting the same seed in the main random number generator in all processes. - - If you have [`torchdata>=0.8.0`](https://github.com/pytorch/data/tree/main) installed, and you have passed `use_stateful_dataloader=True` into your [`~utils.DataLoaderConfiguration`], these classes will directly inherit from `StatefulDataLoader` instead, and maintain a `state_dict`. - - For more details about the internals, see the [Internals page](package_reference/torch_wrappers). diff --git a/docs/source/concept_guides/low_precision_training.md b/docs/source/concept_guides/low_precision_training.md index 2d8df0ca4e7..e7527cce758 100644 --- a/docs/source/concept_guides/low_precision_training.md +++ b/docs/source/concept_guides/low_precision_training.md @@ -13,7 +13,7 @@ specific language governing permissions and limitations under the License. rendered properly in your Markdown viewer. --> -# Low Precision Training Methods +# Low precision training methods The release of new kinds of hardware led to the emergence of new training paradigms that better utilize them. Currently, this is in the form of training in 8-bit precision using packages such as [TransformersEngine](https://github.com/NVIDIA/TransformerEngine) (TE) or [MS-AMP](https://github.com/Azure/MS-AMP/tree/main). @@ -36,7 +36,7 @@ MS-AMP O3 | FP8 | FP8 | FP8 | FP16 | FP8 | FP8+FP16 `TransformersEngine` is the first solution to trying to train in 8-bit floating point. It works by using drop-in replacement layers for certain ones in a model that utilizes their FP8-engine to reduce the number of bits (such as 32 to 8) without degrading the final accuracy of the model. -Specifically, πŸ€— Accelerate will find and replace the following layers with `TransformersEngine` versions: +Specifically, Accelerate will find and replace the following layers with `TransformersEngine` versions: * `nn.LayerNorm` for `te.LayerNorm` * `nn.Linear` for `te.Linear` @@ -67,7 +67,7 @@ MS-AMP takes a different approach to `TransformersEngine` by providing three dif * The second optimization level (`O2`) improves upon this by also reducing the precision of the optimizer states. One is in FP8 while the other is in FP16. Generally it's been shown that this will only provide a net-gain of no degraded end accuracy, increased training speed, and reduced memory as now every state is either in FP16 or FP8. -* Finally, MS-AMP has a third optimization level (`O3`) which helps during DDP scenarios such as DeepSpeed. The weights of the model in memory are fully cast to FP8, and the master weights are now stored in FP16. This fully reduces memory by the highest factor as now not only is almost everything in FP8, only two states are left in FP16. Currently, only DeepSpeed versions up through 0.9.2 are supported, so this capability is not included in the πŸ€— Accelerate integration +* Finally, MS-AMP has a third optimization level (`O3`) which helps during DDP scenarios such as DeepSpeed. The weights of the model in memory are fully cast to FP8, and the master weights are now stored in FP16. This fully reduces memory by the highest factor as now not only is almost everything in FP8, only two states are left in FP16. Currently, only DeepSpeed versions up through 0.9.2 are supported, so this capability is not included in the Accelerate integration ## Combining the two diff --git a/docs/source/concept_guides/performance.md b/docs/source/concept_guides/performance.md index 8b112005365..b59bb2049a3 100644 --- a/docs/source/concept_guides/performance.md +++ b/docs/source/concept_guides/performance.md @@ -13,7 +13,7 @@ specific language governing permissions and limitations under the License. rendered properly in your Markdown viewer. --> -# Comparing performance between different device setups +# Comparing performance across distributed setups Evaluating and comparing the performance from different setups can be quite tricky if you don't know what to look for. For example, you cannot run the same script with the same batch size across TPU, multi-GPU, and single-GPU with Accelerate diff --git a/docs/source/concept_guides/training_tpu.md b/docs/source/concept_guides/training_tpu.md index 45c10f0384f..97f799179bd 100644 --- a/docs/source/concept_guides/training_tpu.md +++ b/docs/source/concept_guides/training_tpu.md @@ -13,9 +13,9 @@ specific language governing permissions and limitations under the License. rendered properly in your Markdown viewer. --> -# Training on TPUs with πŸ€— Accelerate +# Training on TPUs -Training on TPUs can be slightly different from training on multi-gpu, even with πŸ€— Accelerate. This guide aims to show you +Training on TPUs can be slightly different from training on multi-gpu, even with Accelerate. This guide aims to show you where you should be careful and why, as well as the best practices in general. ## Training in a Notebook @@ -81,7 +81,7 @@ notebook_launcher(training_function) - The `notebook_launcher` will default to 8 processes if πŸ€— Accelerate has been configured for a TPU + The `notebook_launcher` will default to 8 processes if Accelerate has been configured for a TPU @@ -128,10 +128,10 @@ And finally calling the training function with: ## Mixed Precision and Global Variables -As mentioned in the [mixed precision tutorial](../usage_guides/mixed_precision), πŸ€— Accelerate supports fp16 and bf16, both of which can be used on TPUs. +As mentioned in the [mixed precision tutorial](../usage_guides/mixed_precision), Accelerate supports fp16 and bf16, both of which can be used on TPUs. That being said, ideally `bf16` should be utilized as it is extremely efficient to use. -There are two "layers" when using `bf16` and πŸ€— Accelerate on TPUs, at the base level and at the operation level. +There are two "layers" when using `bf16` and Accelerate on TPUs, at the base level and at the operation level. At the base level, this is enabled when passing `mixed_precision="bf16"` to `Accelerator`, such as: ```python diff --git a/docs/source/index.md b/docs/source/index.md index 0c08d5f60b7..28db7a0801c 100644 --- a/docs/source/index.md +++ b/docs/source/index.md @@ -15,7 +15,7 @@ rendered properly in your Markdown viewer. # Accelerate -πŸ€— Accelerate is a library that enables the same PyTorch code to be run across any distributed configuration by adding just four lines of code! In short, training and inference at scale made simple, efficient and adaptable. +Accelerate is a library that enables the same PyTorch code to be run across any distributed configuration by adding just four lines of code! In short, training and inference at scale made simple, efficient and adaptable. ```diff + from accelerate import Accelerator @@ -37,7 +37,7 @@ rendered properly in your Markdown viewer. scheduler.step() ``` -Built on `torch_xla` and `torch.distributed`, πŸ€— Accelerate takes care of the heavy lifting, so you don't have to write any custom code to adapt to these platforms. +Built on `torch_xla` and `torch.distributed`, Accelerate takes care of the heavy lifting, so you don't have to write any custom code to adapt to these platforms. Convert existing codebases to utilize [DeepSpeed](usage_guides/deepspeed), perform [fully sharded data parallelism](usage_guides/fsdp), and have automatic support for mixed-precision training! @@ -56,11 +56,11 @@ accelerate launch {my_script.py} diff --git a/docs/source/package_reference/big_modeling.md b/docs/source/package_reference/big_modeling.md index 98383f702d9..77dab4525ed 100644 --- a/docs/source/package_reference/big_modeling.md +++ b/docs/source/package_reference/big_modeling.md @@ -15,33 +15,78 @@ rendered properly in your Markdown viewer. # Working with large models -## Dispatching and Offloading Models +## Dispatch and offload + +### init_empty_weights [[autodoc]] big_modeling.init_empty_weights + +### cpu_offload + [[autodoc]] big_modeling.cpu_offload + +### cpu_offload_with_hook + [[autodoc]] big_modeling.cpu_offload_with_hook + +### disk_offload + [[autodoc]] big_modeling.disk_offload + +### dispatch_model + [[autodoc]] big_modeling.dispatch_model + +### load_checkpoint_and_dispatch + [[autodoc]] big_modeling.load_checkpoint_and_dispatch + +### load_checkpoint_in_model + [[autodoc]] big_modeling.load_checkpoint_in_model + +### infer_auto_device_map + [[autodoc]] utils.infer_auto_device_map -## Model Hooks +## Hooks -### Hook Classes +### ModelHook [[autodoc]] hooks.ModelHook + +### AlignDevicesHook + [[autodoc]] hooks.AlignDevicesHook + +### SequentialHook + [[autodoc]] hooks.SequentialHook -### Adding Hooks +## Adding Hooks + +### add_hook_to_module [[autodoc]] hooks.add_hook_to_module + +### attach_execution_device_hook + [[autodoc]] hooks.attach_execution_device_hook + +### attach_align_device_hook + [[autodoc]] hooks.attach_align_device_hook + +### attach_align_device_hook_on_blocks + [[autodoc]] hooks.attach_align_device_hook_on_blocks -### Removing Hooks +## Removing Hooks + +### remove_hook_from_module [[autodoc]] hooks.remove_hook_from_module + +### remove_hook_from_submodules + [[autodoc]] hooks.remove_hook_from_submodules \ No newline at end of file diff --git a/docs/source/package_reference/deepspeed.md b/docs/source/package_reference/deepspeed.md index 9e247f1ac40..f6deef072f8 100644 --- a/docs/source/package_reference/deepspeed.md +++ b/docs/source/package_reference/deepspeed.md @@ -13,16 +13,28 @@ specific language governing permissions and limitations under the License. rendered properly in your Markdown viewer. --> -# Utilities for DeepSpeed +# DeepSpeed utilities -[[autodoc]] utils.DeepSpeedPlugin +## DeepSpeedPlugin -[[autodoc]] utils.deepspeed.DummyOptim +[[autodoc]] utils.DeepSpeedPlugin [[autodoc]] utils.deepspeed.DummyScheduler +## DeepSpeedEnginerWrapper + [[autodoc]] utils.deepspeed.DeepSpeedEngineWrapper +## DeepSpeedOptimizerWrapper + [[autodoc]] utils.deepspeed.DeepSpeedOptimizerWrapper +## DeepSpeedSchedulerWrapper + [[autodoc]] utils.deepspeed.DeepSpeedSchedulerWrapper + +## DummyOptim + +[[autodoc]] utils.deepspeed.DummyOptim + +## DummyScheduler \ No newline at end of file diff --git a/docs/source/package_reference/fp8.md b/docs/source/package_reference/fp8.md index 5b16e658cac..2b3420f5d70 100644 --- a/docs/source/package_reference/fp8.md +++ b/docs/source/package_reference/fp8.md @@ -13,16 +13,26 @@ specific language governing permissions and limitations under the License. rendered properly in your Markdown viewer. --> -# FP8 Functionality +# FP8 Below are functions and classes relative to the underlying FP8 implementation +## FP8RecipeKwargs + [[autodoc]] utils.FP8RecipeKwargs +## convert_model + [[autodoc]] utils.convert_model +## has_transformer_engine_layers + [[autodoc]] utils.has_transformer_engine_layers +## contextual_fp8_autocast + [[autodoc]] utils.contextual_fp8_autocast +## apply_fp8_autowrap + [[autodoc]] utils.apply_fp8_autowrap diff --git a/docs/source/package_reference/fsdp.md b/docs/source/package_reference/fsdp.md index ca35ffc7735..b419ea3bc7c 100644 --- a/docs/source/package_reference/fsdp.md +++ b/docs/source/package_reference/fsdp.md @@ -13,12 +13,20 @@ specific language governing permissions and limitations under the License. rendered properly in your Markdown viewer. --> -# Utilities for Fully Sharded Data Parallelism +# Fully Sharded Data Parallel utilities + +## enable_fsdp_ram_efficient_loading [[autodoc]] utils.enable_fsdp_ram_efficient_loading +## disable_fsdp_ram_efficient_loading + [[autodoc]] utils.disable_fsdp_ram_efficient_loading +## merge_fsdp_weights + [[autodoc]] utils.merge_fsdp_weights +## FullyShardedDataParallelPlugin + [[autodoc]] utils.FullyShardedDataParallelPlugin diff --git a/docs/source/package_reference/inference.md b/docs/source/package_reference/inference.md index 4347f98441e..5e41646b94d 100644 --- a/docs/source/package_reference/inference.md +++ b/docs/source/package_reference/inference.md @@ -13,8 +13,10 @@ specific language governing permissions and limitations under the License. rendered properly in your Markdown viewer. --> -# The inference API +# Pipeline parallelism -These docs refer to the [PiPPy](https://github.com/PyTorch/PiPPy) integration. +Accelerate supports pipeline parallelism for large-scale training with the PyTorch [torch.distributed.pipelining](https://pytorch.org/docs/stable/distributed.pipelining.html) API. + +## prepare_pippy [[autodoc]] inference.prepare_pippy diff --git a/docs/source/package_reference/kwargs.md b/docs/source/package_reference/kwargs.md index a45b6793f6f..a2ecf966c12 100644 --- a/docs/source/package_reference/kwargs.md +++ b/docs/source/package_reference/kwargs.md @@ -13,7 +13,7 @@ specific language governing permissions and limitations under the License. rendered properly in your Markdown viewer. --> -# Kwargs Handlers +# Kwargs handlers The following objects can be passed to the main [`Accelerator`] to customize how some PyTorch objects related to distributed training or mixed precision are created. diff --git a/docs/source/package_reference/launchers.md b/docs/source/package_reference/launchers.md index a0335c8cf89..7da4087c853 100644 --- a/docs/source/package_reference/launchers.md +++ b/docs/source/package_reference/launchers.md @@ -17,6 +17,10 @@ rendered properly in your Markdown viewer. Functions for launching training on distributed processes. +## notebook_launcher [[autodoc]] accelerate.notebook_launcher + +## debug_launcher + [[autodoc]] accelerate.debug_launcher \ No newline at end of file diff --git a/docs/source/package_reference/logging.md b/docs/source/package_reference/logging.md index 588913016e1..ef02060654b 100644 --- a/docs/source/package_reference/logging.md +++ b/docs/source/package_reference/logging.md @@ -13,9 +13,9 @@ specific language governing permissions and limitations under the License. rendered properly in your Markdown viewer. --> -# Logging with Accelerate +# Logging Refer to the [Troubleshooting guide](../usage_guides/troubleshooting#logging) or to the example below to learn -how to use πŸ€— Accelerate's logger. +how to use Accelerate's logger. [[autodoc]] logging.get_logger \ No newline at end of file diff --git a/docs/source/package_reference/megatron_lm.md b/docs/source/package_reference/megatron_lm.md index d1874fdab11..cddabdde9bb 100644 --- a/docs/source/package_reference/megatron_lm.md +++ b/docs/source/package_reference/megatron_lm.md @@ -13,20 +13,36 @@ specific language governing permissions and limitations under the License. rendered properly in your Markdown viewer. --> -# Utilities for Megatron-LM +# Megatron-LM utilities + +## MegatronLMPlugin [[autodoc]] utils.MegatronLMPlugin +## MegatronLMDummyScheduler + [[autodoc]] utils.MegatronLMDummyScheduler +## MegatronLMDummyDataLoader + [[autodoc]] utils.MegatronLMDummyDataLoader +## AbstractTrainStep + [[autodoc]] utils.AbstractTrainStep +## GPTTrainStep + [[autodoc]] utils.GPTTrainStep +## BertTrainStep + [[autodoc]] utils.BertTrainStep +## T5TrainStep + [[autodoc]] utils.T5TrainStep +## avg_losses_across_data_parallel_group + [[autodoc]] utils.avg_losses_across_data_parallel_group diff --git a/docs/source/package_reference/state.md b/docs/source/package_reference/state.md index 56c38dd461d..2cdd94b82e0 100644 --- a/docs/source/package_reference/state.md +++ b/docs/source/package_reference/state.md @@ -21,8 +21,14 @@ instances share the same state, which is initialized on the first instantiation. These classes are immutable and store information about certain configurations or states. +## PartialState + [[autodoc]] state.PartialState +## AcceleratorState + [[autodoc]] state.AcceleratorState +## GradientState + [[autodoc]] state.GradientState \ No newline at end of file diff --git a/docs/source/package_reference/torch_wrappers.md b/docs/source/package_reference/torch_wrappers.md index 17350e3441f..84c88bca7e4 100644 --- a/docs/source/package_reference/torch_wrappers.md +++ b/docs/source/package_reference/torch_wrappers.md @@ -13,25 +13,36 @@ specific language governing permissions and limitations under the License. rendered properly in your Markdown viewer. --> -# Wrapper classes for torch Dataloaders, Optimizers, and Schedulers +# DataLoaders, Optimizers, and Schedulers The internal classes Accelerate uses to prepare objects for distributed training when calling [`~Accelerator.prepare`]. -## Datasets and DataLoaders +## DataLoader utilities [[autodoc]] data_loader.prepare_data_loader [[autodoc]] data_loader.skip_first_batches +## BatchSamplerShard + [[autodoc]] data_loader.BatchSamplerShard + +## IterableDatasetShard + [[autodoc]] data_loader.IterableDatasetShard + +## DataLoaderShard + [[autodoc]] data_loader.DataLoaderShard + +## DataLoaderDispatcher + [[autodoc]] data_loader.DataLoaderDispatcher -## Optimizers +## AcceleratedOptimizer [[autodoc]] optimizer.AcceleratedOptimizer -## Schedulers +## AcceleratedScheduler [[autodoc]] scheduler.AcceleratedScheduler \ No newline at end of file diff --git a/docs/source/package_reference/tracking.md b/docs/source/package_reference/tracking.md index 6845ca4bc05..4f69e027b11 100644 --- a/docs/source/package_reference/tracking.md +++ b/docs/source/package_reference/tracking.md @@ -13,23 +13,38 @@ specific language governing permissions and limitations under the License. rendered properly in your Markdown viewer. --> -# Experiment Tracking +# Experiment Trackers -## The Base Tracker Class +## GeneralTracker [[autodoc]] tracking.GeneralTracker -## Integrated Trackers +## TensorBoardTracker [[autodoc]] tracking.TensorBoardTracker - __init__ + +## WandBTracker + [[autodoc]] tracking.WandBTracker - __init__ + +## CometMLTracker + [[autodoc]] tracking.CometMLTracker - __init__ + +## AimTracker + [[autodoc]] tracking.AimTracker - __init__ + +## MLflowTracker + [[autodoc]] tracking.MLflowTracker - __init__ + +## ClearMLTracker + [[autodoc]] tracking.ClearMLTracker - __init__ diff --git a/docs/source/package_reference/utilities.md b/docs/source/package_reference/utilities.md index 2b7fa2c0b4a..b1c34cb5b7a 100644 --- a/docs/source/package_reference/utilities.md +++ b/docs/source/package_reference/utilities.md @@ -13,7 +13,7 @@ specific language governing permissions and limitations under the License. rendered properly in your Markdown viewer. --> -# Helpful Utilities +# Utility functions and classes Below are a variety of utility functions that πŸ€— Accelerate provides, broken down by use-case. diff --git a/docs/source/usage_guides/big_modeling.md b/docs/source/usage_guides/big_modeling.md index 2c95ecf18b5..27c1f9ec749 100644 --- a/docs/source/usage_guides/big_modeling.md +++ b/docs/source/usage_guides/big_modeling.md @@ -13,15 +13,15 @@ specific language governing permissions and limitations under the License. rendered properly in your Markdown viewer. --> -# Handling big models for inference +# Big Model Inference -One of the biggest advancements πŸ€— Accelerate provides is the concept of [large model inference](../concept_guides/big_model_inference) wherein you can perform *inference* on models that cannot fully fit on your graphics card. +One of the biggest advancements Accelerate provides is [Big Model Inference](../concept_guides/big_model_inference), which allows you to perform inference with models that don't fully fit on your graphics card. -This tutorial will be broken down into two parts showcasing how to use both πŸ€— Accelerate and πŸ€— Transformers (a higher API-level) to make use of this idea. +This tutorial will show you how to use Big Model Inference in Accelerate and the Hugging Face ecosystem. -## Using πŸ€— Accelerate +## Accelerate -For these tutorials, we'll assume a typical workflow for loading your model in such that: +A typical workflow for loading a PyTorch model is shown below. `ModelClass` is a model that exceeds the GPU memory of your device (mps or cuda). ```py import torch @@ -31,9 +31,7 @@ state_dict = torch.load(checkpoint_file) my_model.load_state_dict(state_dict) ``` -Note that here we assume that `ModelClass` is a model that takes up more video-card memory than what can fit on your device (be it `mps` or `cuda`). - -The first step is to init an empty skeleton of the model which won't take up any RAM using the [`init_empty_weights`] context manager: +With Big Model Inference, the first step is to init an empty skeleton of the model with the `init_empty_weights` context manager. This doesn't require any memory because `my_model` is "parameterless". ```py from accelerate import init_empty_weights @@ -41,22 +39,14 @@ with init_empty_weights(): my_model = ModelClass(...) ``` -With this `my_model` currently is "parameterless", hence leaving the smaller footprint than what one would normally get loading this onto the CPU directly. - -Next we need to load in the weights to our model so we can perform inference. - -For this we will use [`load_checkpoint_and_dispatch`], which as the name implies will load a checkpoint inside your empty model and dispatch the weights for each layer across all the devices you have available (GPU/MPS and CPU RAM). - -To determine how this `dispatch` can be performed, generally specifying `device_map="auto"` will be good enough as πŸ€— Accelerate -will attempt to fill all the space in your GPU(s), then loading them to the CPU, and finally if there is not enough RAM it will be loaded to the disk (the absolute slowest option). +Next, the weights are loaded into the model for inference. - +The [`load_checkpoint_and_dispatch`] method loads a checkpoint inside your empty model and dispatches the weights for each layer across all available devices, starting with the fastest devices (GPU, MPS, XPU, NPU, MLU, MUSA) first before moving to the slower ones (CPU and hard drive). -For more details on designing your own device map, see this section of the [concept guide](../concept_guides/big_model_inference#designing-a-device-map) +Setting `device_map="auto"` automatically fills all available space on the GPU(s) first, then the CPU, and finally, the hard drive (the absolute slowest option) if there is still not enough memory. - - -See an example below: +> [!TIP} +> Refer to the [Designing a device map](../concept_guides/big_model_inference#designing-a-device-map) guide for more details on how to design your own device map. ```py from accelerate import load_checkpoint_and_dispatch @@ -66,19 +56,11 @@ model = load_checkpoint_and_dispatch( ) ``` - - - If there are certain "chunks" of layers that shouldn't be split, you can pass them in as `no_split_module_classes`. Read more about it [here](../concept_guides/big_model_inference#loading-weights) - - +If there are certain β€œchunks” of layers that shouldn’t be split, pass them to `no_split_module_classes` (see [here](../concept_guides/big_model_inference#loading-weights) for more details). - +A models weights can also be sharded into multiple checkpoints to save memory, such as when the `state_dict` doesn't fit in memory (see [here](../concept_guides/big_model_inference#sharded-checkpoints) for more details). - Also to save on memory (such as if the `state_dict` will not fit in RAM), a model's weights can be divided and split into multiple checkpoint files. Read more about it [here](../concept_guides/big_model_inference#sharded-checkpoints) - - - -Now that the model is dispatched fully, you can perform inference as normal with the model: +Now that the model is fully dispatched, you can perform inference. ```py input = torch.randn(2,3) @@ -86,22 +68,16 @@ input = input.to("cuda") output = model(input) ``` -What will happen now is each time the input gets passed through a layer, it will be sent from the CPU to the GPU (or disk to CPU to GPU), the output is calculated, and then the layer is pulled back off the GPU going back down the line. While this adds some overhead to the inference being performed, through this method it is possible to run **any size model** on your system, as long as the largest layer is capable of fitting on your GPU. - - +Each time an input is passed through a layer, it is sent from the CPU to the GPU (or disk to CPU to GPU), the output is calculated, and the layer is removed from the GPU going back down the line. While this adds some overhead to inference, it enables you to run any size model on your system, as long as the largest layer fits on your GPU. - Multiple GPUs can be utilized, however this is considered "model parallelism" and as a result only one GPU will be active at a given moment, waiting for the prior one to send it the output. You should launch your script normally with `python` - and not need `torchrun`, `accelerate launch`, etc. +Multiple GPUs, or "model parallelism", can be utilized but only one GPU will be active at any given moment. This forces the GPU to wait for the previous GPU to send it the output. You should launch your script normally with Python instead of other tools like torchrun and accelerate launch. - +> [!TIP] +> You may also be interested in *pipeline parallelism* which utilizes all available GPUs at once, instead of only having one GPU active at a time. This approach is less flexbile though. For more details, refer to the [Memory-efficient pipeline parallelism](./distributed_inference#memory-efficient-pipeline-parallelism-experimental) guide. -For a visual representation of this, check out the animation below: + - - -### Complete Example - -Below is the full example showcasing what we performed above: +Take a look at a full example of Big Model Inference below. ```py import torch @@ -119,13 +95,13 @@ input = input.to("cuda") output = model(input) ``` -## Using πŸ€— Transformers, πŸ€— Diffusers, and other πŸ€— Open Source Libraries +## Hugging Face ecosystem -Libraries that support πŸ€— Accelerate big model inference include all of the earlier logic in their `from_pretrained` constructors. +Other libraries in the Hugging Face ecosystem, like Transformers or Diffusers, supports Big Model Inference in their [`~transformers.PreTrainedModel.from_pretrained`] constructors. -These operate by specifying a string representing the model to download from the [πŸ€— Hub](https://hf.co/models) and then denoting `device_map="auto"` along with a few extra parameters. +You just need to add `device_map="auto"` in [`~transformers.PreTrainedModel.from_pretrained`] to enable Big Model Inference. -As a brief example, we will look at using `transformers` and loading in Big Science's T0pp model. +For example, load Big Sciences T0pp 11 billion parameter model with Big Model Inference. ```py from transformers import AutoModelForSeq2SeqLM @@ -133,9 +109,7 @@ from transformers import AutoModelForSeq2SeqLM model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp", device_map="auto") ``` -After loading the model in, the initial steps from before to prepare a model have all been done and the model is fully -ready to make use of all the resources in your machine. Through these constructors, you can also save *more* memory by -specifying the precision the model is loaded into as well, through the `torch_dtype` parameter, such as: +After loading the model, the empty init and smart dispatch steps from before are executed and the model is fully ready to make use of all the resources in your machine. Through these constructors, you can also save more memory by specifying the `torch_dtype` parameter to load a model in a lower precision. ```py from transformers import AutoModelForSeq2SeqLM @@ -143,8 +117,6 @@ from transformers import AutoModelForSeq2SeqLM model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp", device_map="auto", torch_dtype=torch.float16) ``` -To learn more about this, check out the πŸ€— Transformers documentation available [here](https://huggingface.co/docs/transformers/main/en/main_classes/model#large-model-loading). - -## Where to go from here +## Next steps -For a much more detailed look at big model inference, be sure to check out the [Conceptual Guide on it](../concept_guides/big_model_inference) +For a more detailed explanation of Big Model Inference, make sure to check out the [conceptual guide](../concept_guides/big_model_inference)! diff --git a/docs/source/usage_guides/checkpoint.md b/docs/source/usage_guides/checkpoint.md index b8943b421da..9c08a454d0e 100644 --- a/docs/source/usage_guides/checkpoint.md +++ b/docs/source/usage_guides/checkpoint.md @@ -15,8 +15,8 @@ rendered properly in your Markdown viewer. # Checkpointing -When training a PyTorch model with πŸ€— Accelerate, you may often want to save and continue a state of training. Doing so requires -saving and loading the model, optimizer, RNG generators, and the GradScaler. Inside πŸ€— Accelerate are two convenience functions to achieve this quickly: +When training a PyTorch model with Accelerate, you may often want to save and continue a state of training. Doing so requires +saving and loading the model, optimizer, RNG generators, and the GradScaler. Inside Accelerate are two convenience functions to achieve this quickly: - Use [`~Accelerator.save_state`] for saving everything mentioned above to a folder location - Use [`~Accelerator.load_state`] for loading everything stored from an earlier `save_state` diff --git a/docs/source/usage_guides/ddp_comm_hook.md b/docs/source/usage_guides/ddp_comm_hook.md index 9e903212af4..24f8143c189 100644 --- a/docs/source/usage_guides/ddp_comm_hook.md +++ b/docs/source/usage_guides/ddp_comm_hook.md @@ -23,7 +23,7 @@ Distributed Data Parallel (DDP) communication hooks provide a generic interface - **BF16 Compression Hook**: Similar to FP16, but uses the Brain Floating Point format (`torch.bfloat16`), which can be more efficient on certain hardware. - **PowerSGD Hook**: An advanced gradient compression algorithm that provides high compression rates and can accelerate bandwidth-bound distributed training. -In this tutorial, you will see how to quickly set up DDP communication hooks and perform training with the utilities provided in πŸ€— Accelerate, which can be as simple as adding just one new line of code! This demonstrates how to use DDP communication hooks to optimize gradient communication in distributed training with the πŸ€— Accelerate library. +In this tutorial, you will see how to quickly set up DDP communication hooks and perform training with the utilities provided in Accelerate, which can be as simple as adding just one new line of code! This demonstrates how to use DDP communication hooks to optimize gradient communication in distributed training with the Accelerate library. ## FP16 Compression Hook diff --git a/docs/source/usage_guides/deepspeed.md b/docs/source/usage_guides/deepspeed.md index ca071a7eca5..82466597bcd 100644 --- a/docs/source/usage_guides/deepspeed.md +++ b/docs/source/usage_guides/deepspeed.md @@ -33,7 +33,7 @@ DeepSpeed ZeRO-2 is primarily used only for training, as its features are of no DeepSpeed ZeRO-3 can be used for inference as well since it allows huge models to be loaded on multiple GPUs, which won't be possible on a single GPU. -πŸ€— Accelerate integrates [DeepSpeed](https://github.com/microsoft/DeepSpeed) via 2 options: +Accelerate integrates [DeepSpeed](https://github.com/microsoft/DeepSpeed) via 2 options: 1. Integration of the DeepSpeed features via `deepspeed config file` specification in `accelerate config` . You just supply your custom config file or use our template. Most of this document is focused on this feature. This supports all the core features of DeepSpeed and gives user a lot of flexibility. @@ -45,7 +45,7 @@ won't be possible on a single GPU. Training: -1. πŸ€— Accelerate integrates all features of DeepSpeed ZeRO. This includes all the ZeRO stages 1, 2 and 3 as well as ZeRO-Offload, ZeRO-Infinity (which can offload to disk/NVMe) and ZeRO++. +1. Accelerate integrates all features of DeepSpeed ZeRO. This includes all the ZeRO stages 1, 2 and 3 as well as ZeRO-Offload, ZeRO-Infinity (which can offload to disk/NVMe) and ZeRO++. Below is a short description of Data Parallelism using ZeRO - Zero Redundancy Optimizer along with diagram from this [blog post](https://www.microsoft.com/en-us/research/blog/zero-deepspeed-new-system-optimizations-enable-training-models-with-over-100-billion-parameters/) ![ZeRO Data Parallelism](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-zero.png) @@ -727,7 +727,7 @@ Papers: - [ZeRO++: Extremely Efficient Collective Communication for Giant Model Training](https://arxiv.org/abs/2306.10209) -Finally, please, remember that πŸ€— `Accelerate` only integrates DeepSpeed, therefore if you +Finally, please, remember that `Accelerate` only integrates DeepSpeed, therefore if you have any problems or questions with regards to DeepSpeed usage, please, file an issue with [DeepSpeed GitHub](https://github.com/microsoft/DeepSpeed/issues). diff --git a/docs/source/usage_guides/distributed_inference.md b/docs/source/usage_guides/distributed_inference.md index 045630bf3ab..82fdc21031d 100644 --- a/docs/source/usage_guides/distributed_inference.md +++ b/docs/source/usage_guides/distributed_inference.md @@ -13,7 +13,7 @@ specific language governing permissions and limitations under the License. rendered properly in your Markdown viewer. --> -# Distributed Inference with πŸ€— Accelerate +# Distributed inference Distributed inference can fall into three brackets: @@ -56,13 +56,13 @@ def run_inference(rank, world_size): ``` One will notice how we have to check the rank to know what prompt to send, which can be a bit tedious. -A user might then also think that with πŸ€— Accelerate, using the `Accelerator` to prepare a dataloader for such a task might also be +A user might then also think that with Accelerate, using the `Accelerator` to prepare a dataloader for such a task might also be a simple way to manage this. (To learn more, check out the relevant section in the [Quick Tour](../quicktour#distributed-evaluation)) Can it manage it? Yes. Does it add unneeded extra code however: also yes. -With πŸ€— Accelerate, we can simplify this process by using the [`Accelerator.split_between_processes`] context manager (which also exists in `PartialState` and `AcceleratorState`). +With Accelerate, we can simplify this process by using the [`Accelerator.split_between_processes`] context manager (which also exists in `PartialState` and `AcceleratorState`). This function will automatically split whatever data you pass to it (be it a prompt, a set of tensors, a dictionary of the prior data, etc.) across all the processes (with a potential to be padded) for you to use right away. @@ -82,7 +82,7 @@ with distributed_state.split_between_processes(["a dog", "a cat"]) as prompt: result.save(f"result_{distributed_state.process_index}.png") ``` -And then to launch the code, we can use the πŸ€— Accelerate: +And then to launch the code, we can use the Accelerate: If you have generated a config file to be used using `accelerate config`: @@ -144,7 +144,7 @@ You can find more complex examples [here](https://github.com/huggingface/acceler ## Memory-efficient pipeline parallelism (experimental) -This next part will discuss using *pipeline parallelism*. This is an **experimental** API utilizing the [PiPPy library by PyTorch](https://github.com/pytorch/PiPPy/) as a native solution. +This next part will discuss using *pipeline parallelism*. This is an **experimental** API that utilizes [torch.distributed.pipelining](https://pytorch.org/docs/stable/distributed.pipelining.html#) as a native solution. The general idea with pipeline parallelism is: say you have 4 GPUs and a model big enough it can be *split* on four GPUs using `device_map="auto"`. With this method you can send in 4 inputs at a time (for example here, any amount works) and each model chunk will work on an input, then receive the next input once the prior chunk finished, making it *much* more efficient **and faster** than the method described earlier. Here's a visual taken from the PyTorch repository: @@ -152,14 +152,12 @@ The general idea with pipeline parallelism is: say you have 4 GPUs and a model b To illustrate how you can use this with Accelerate, we have created an [example zoo](https://github.com/huggingface/accelerate/tree/main/examples/inference) showcasing a number of different models and situations. In this tutorial, we'll show this method for GPT2 across two GPUs. -Before you proceed, please make sure you have the latest pippy installed by running the following: +Before you proceed, please make sure you have the latest PyTorch version installed by running the following: ```bash -pip install torchpippy +pip install torch ``` -We require at least version 0.2.0. To confirm that you have the correct version, run `pip show torchpippy`. - Start by creating the model on the CPU: ```{python} diff --git a/docs/source/usage_guides/explore.md b/docs/source/usage_guides/explore.md index 533c4cf444f..a2b8cc0d1e1 100644 --- a/docs/source/usage_guides/explore.md +++ b/docs/source/usage_guides/explore.md @@ -13,14 +13,14 @@ specific language governing permissions and limitations under the License. rendered properly in your Markdown viewer. --> -# Learning how to incorporate πŸ€— Accelerate features quickly! +# Start Here! Please use the interactive tool below to help you get started with learning about a particular -feature of πŸ€— Accelerate and how to utilize it! It will provide you with a code diff, an explanation +feature of Accelerate and how to utilize it! It will provide you with a code diff, an explanation towards what is going on, as well as provide you with some useful links to explore more within the documentation! -Most code examples start from the following python code before integrating πŸ€— Accelerate in some way: +Most code examples start from the following python code before integrating Accelerate in some way: ```python for batch in dataloader: diff --git a/docs/source/usage_guides/fsdp.md b/docs/source/usage_guides/fsdp.md index f315d6b5edc..a2f9c717a3f 100644 --- a/docs/source/usage_guides/fsdp.md +++ b/docs/source/usage_guides/fsdp.md @@ -79,7 +79,7 @@ Currently, `Accelerate` supports the following config through the CLI: `fsdp_auto_wrap_policy`: [1] TRANSFORMER_BASED_WRAP, [2] SIZE_BASED_WRAP, [3] NO_WRAP -`fsdp_transformer_layer_cls_to_wrap`: Only applicable for πŸ€— Transformers. When using `fsdp_auto_wrap_policy=TRANSFORMER_BASED_WRAP`, a user may provide a comma-separated string of transformer layer class names (case-sensitive) to wrap, e.g., `BertLayer`, `GPTJBlock`, `T5Block`, `BertLayer,BertEmbeddings,BertSelfOutput`. This is important because submodules that share weights (e.g., embedding layers) should not end up in different FSDP wrapped units. Using this policy, wrapping happens for each block containing Multi-Head Attention followed by a couple of MLP layers. Remaining layers including the shared embeddings are conveniently wrapped in same outermost FSDP unit. Therefore, use this for transformer-based models. You can use the `model._no_split_modules` for πŸ€— Transformer models by answering `yes` to `Do you want to use the model's `_no_split_modules` to wrap. It will try to use `model._no_split_modules` when possible. +`fsdp_transformer_layer_cls_to_wrap`: Only applicable for Transformers. When using `fsdp_auto_wrap_policy=TRANSFORMER_BASED_WRAP`, a user may provide a comma-separated string of transformer layer class names (case-sensitive) to wrap, e.g., `BertLayer`, `GPTJBlock`, `T5Block`, `BertLayer,BertEmbeddings,BertSelfOutput`. This is important because submodules that share weights (e.g., embedding layers) should not end up in different FSDP wrapped units. Using this policy, wrapping happens for each block containing Multi-Head Attention followed by a couple of MLP layers. Remaining layers including the shared embeddings are conveniently wrapped in same outermost FSDP unit. Therefore, use this for transformer-based models. You can use the `model._no_split_modules` for Transformer models by answering `yes` to `Do you want to use the model's `_no_split_modules` to wrap. It will try to use `model._no_split_modules` when possible. `fsdp_min_num_params`: minimum number of parameters when using `fsdp_auto_wrap_policy=SIZE_BASED_WRAP`. @@ -91,7 +91,7 @@ Currently, `Accelerate` supports the following config through the CLI: `fsdp_use_orig_params`: If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable parameters. This setting is useful in cases such as parameter-efficient fine-tuning as discussed in [this post](https://dev-discuss.pytorch.org/t/rethinking-pytorch-fully-sharded-data-parallel-fsdp-from-first-principles/1019). This option also allows one to have multiple optimizer param groups. This should be `True` when creating an optimizer before preparing/wrapping the model with FSDP. -`fsdp_cpu_ram_efficient_loading`: Only applicable for πŸ€— Transformers models. If True, only the first process loads the pretrained model checkpoint while all other processes have empty weights. This should be set to False if you experience errors when loading the pretrained πŸ€— Transformers model via `from_pretrained` method. When this setting is True `fsdp_sync_module_states` also must to be True, otherwise all the processes except the main process would have random weights leading to unexpected behaviour during training. For this to work, make sure the distributed process group is initialized before calling Transformers `from_pretrained` method. When using πŸ€— Trainer API, the distributed process group is initialized when you create an instance of `TrainingArguments` class. +`fsdp_cpu_ram_efficient_loading`: Only applicable for Transformers models. If True, only the first process loads the pretrained model checkpoint while all other processes have empty weights. This should be set to False if you experience errors when loading the pretrained Transformers model via `from_pretrained` method. When this setting is True `fsdp_sync_module_states` also must to be True, otherwise all the processes except the main process would have random weights leading to unexpected behaviour during training. For this to work, make sure the distributed process group is initialized before calling Transformers `from_pretrained` method. When using Trainer API, the distributed process group is initialized when you create an instance of `TrainingArguments` class. `fsdp_sync_module_states`: If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0. @@ -187,7 +187,7 @@ accelerate merge-weights pytorch_model_fsdp_0/ output_path ## A few caveats to be aware of - In case of multiple models, pass the optimizers to the prepare call in the same order as corresponding models else `accelerator.save_state()` and `accelerator.load_state()` will result in wrong/unexpected behaviour. -- This feature is incompatible with `--predict_with_generate` in the `run_translation.py` script of πŸ€— `Transformers` library. +- This feature is incompatible with `--predict_with_generate` in the `run_translation.py` script of `Transformers` library. For more control, users can leverage the `FullyShardedDataParallelPlugin`. After creating an instance of this class, users can pass it to the Accelerator class instantiation. For more information on these options, please refer to the PyTorch [FullyShardedDataParallel](https://github.com/pytorch/pytorch/blob/0df2e863fbd5993a7b9e652910792bd21a516ff3/torch/distributed/fsdp/fully_sharded_data_parallel.py#L236) code. diff --git a/docs/source/usage_guides/gradient_accumulation.md b/docs/source/usage_guides/gradient_accumulation.md index 7960e6b0e4c..3efa5798d2f 100644 --- a/docs/source/usage_guides/gradient_accumulation.md +++ b/docs/source/usage_guides/gradient_accumulation.md @@ -13,7 +13,7 @@ specific language governing permissions and limitations under the License. rendered properly in your Markdown viewer. --> -# Performing gradient accumulation with πŸ€— Accelerate +# Performing gradient accumulation with Accelerate Gradient accumulation is a technique where you can train on bigger batch sizes than your machine would normally be able to fit into memory. This is done by accumulating gradients over @@ -22,7 +22,7 @@ several batches, and only stepping the optimizer after a certain number of batch While technically standard gradient accumulation code would work fine in a distributed setup, it is not the most efficient method for doing so and you may experience considerable slowdowns! -In this tutorial you will see how to quickly setup gradient accumulation and perform it with the utilities provided in πŸ€— Accelerate, +In this tutorial you will see how to quickly setup gradient accumulation and perform it with the utilities provided in Accelerate, which can total to adding just one new line of code! This example will use a very simplistic PyTorch training loop that performs gradient accumulation every two batches: @@ -47,9 +47,9 @@ for index, batch in enumerate(training_dataloader): optimizer.zero_grad() ``` -## Converting it to πŸ€— Accelerate +## Converting it to Accelerate -First the code shown earlier will be converted to utilize πŸ€— Accelerate without the special gradient accumulation helper: +First the code shown earlier will be converted to utilize Accelerate without the special gradient accumulation helper: ```diff + from accelerate import Accelerator @@ -79,9 +79,9 @@ First the code shown earlier will be converted to utilize πŸ€— Accelerate withou -## Letting πŸ€— Accelerate handle gradient accumulation +## Letting Accelerate handle gradient accumulation -All that is left now is to let πŸ€— Accelerate handle the gradient accumulation for us. To do so you should pass in a `gradient_accumulation_steps` parameter to [`Accelerator`], dictating the number +All that is left now is to let Accelerate handle the gradient accumulation for us. To do so you should pass in a `gradient_accumulation_steps` parameter to [`Accelerator`], dictating the number of steps to perform before each call to `step()` and how to automatically adjust the loss during the call to [`~Accelerator.backward`]: ```diff @@ -120,7 +120,7 @@ As you can see the [`Accelerator`] is able to keep track of the batch number you Typically with gradient accumulation, you would need to adjust the number of steps to reflect the change in total batches you are -training on. πŸ€— Accelerate automagically does this for you by default. Behind the scenes we instantiate a [`GradientAccumulationPlugin`] configured to do this. +training on. Accelerate automagically does this for you by default. Behind the scenes we instantiate a [`GradientAccumulationPlugin`] configured to do this. @@ -140,7 +140,7 @@ accelerator = Accelerator(..., gradient_accumulation_plugin=plugin) ## The finished code -Below is the finished implementation for performing gradient accumulation with πŸ€— Accelerate +Below is the finished implementation for performing gradient accumulation with Accelerate ```python from accelerate import Accelerator @@ -171,7 +171,7 @@ To learn more about what magic this wraps around, read the [Gradient Synchroniza ## Self-contained example -Here is a self-contained example that you can run to see gradient accumulation in action with πŸ€— Accelerate: +Here is a self-contained example that you can run to see gradient accumulation in action with Accelerate: ```python import torch diff --git a/docs/source/usage_guides/ipex.md b/docs/source/usage_guides/ipex.md index 8783bdc9bef..bbab293acd4 100644 --- a/docs/source/usage_guides/ipex.md +++ b/docs/source/usage_guides/ipex.md @@ -40,7 +40,7 @@ Check more approaches for [IPEX installation](https://intel.github.io/intel-exte ## How It Works For Training optimization in CPU -πŸ€— Accelerate has integrated [IPEX](https://github.com/intel/intel-extension-for-pytorch), all you need to do is enabling it through the config. +Accelerate has integrated [IPEX](https://github.com/intel/intel-extension-for-pytorch), all you need to do is enabling it through the config. **Scenario 1**: Acceleration of No distributed CPU training diff --git a/docs/source/usage_guides/local_sgd.md b/docs/source/usage_guides/local_sgd.md index 5bee411433d..3b030ae39e5 100644 --- a/docs/source/usage_guides/local_sgd.md +++ b/docs/source/usage_guides/local_sgd.md @@ -13,12 +13,12 @@ specific language governing permissions and limitations under the License. rendered properly in your Markdown viewer. --> -# Using Local SGD with πŸ€— Accelerate +# Using Local SGD with Accelerate Local SGD is a technique for distributed training where gradients are not synchronized every step. Thus, each process updates its own version of the model weights and after a given number of steps these weights are synchronized by averaging across all processes. This improves communication efficiency and can lead to substantial training speed up especially when a computer lacks a faster interconnect such as NVLink. Unlike gradient accumulation (where improving communication efficiency requires increasing the effective batch size), Local SGD does not require changing a batch size or a learning rate / schedule. However, if necessary, Local SGD can be combined with gradient accumulation as well. -In this tutorial you will see how to quickly setup Local SGD πŸ€— Accelerate. Compared to a standard Accelerate setup, this requires only two extra lines of code. +In this tutorial you will see how to quickly setup Local SGD Accelerate. Compared to a standard Accelerate setup, this requires only two extra lines of code. This example will use a very simplistic PyTorch training loop that performs gradient accumulation every two batches: @@ -42,9 +42,9 @@ for index, batch in enumerate(training_dataloader): optimizer.zero_grad() ``` -## Converting it to πŸ€— Accelerate +## Converting it to Accelerate -First the code shown earlier will be converted to use πŸ€— Accelerate with neither a LocalSGD or a gradient accumulation helper: +First the code shown earlier will be converted to use Accelerate with neither a LocalSGD or a gradient accumulation helper: ```diff + from accelerate import Accelerator @@ -67,9 +67,9 @@ First the code shown earlier will be converted to use πŸ€— Accelerate with neit scheduler.step() ``` -## Letting πŸ€— Accelerate handle model synchronization +## Letting Accelerate handle model synchronization -All that is left now is to let πŸ€— Accelerate handle model parameter synchronization **and** the gradient accumulation for us. For simplicity let us assume we need to synchronize every 8 steps. This is +All that is left now is to let Accelerate handle model parameter synchronization **and** the gradient accumulation for us. For simplicity let us assume we need to synchronize every 8 steps. This is achieved by adding one `with LocalSGD` statement and one call `local_sgd.step()` after every optimizer step: ```diff diff --git a/docs/source/usage_guides/low_precision_training.md b/docs/source/usage_guides/low_precision_training.md index f8f7d83df0e..80dad01525c 100644 --- a/docs/source/usage_guides/low_precision_training.md +++ b/docs/source/usage_guides/low_precision_training.md @@ -15,11 +15,11 @@ rendered properly in your Markdown viewer. # Low Precision Training Methods -πŸ€— Accelerate provides integrations to train on lower precision methods using specified supported hardware through the `TransformersEngine` and `MS-AMP` packages. This documentation will help guide you through what hardware is supported, how to configure your [`Accelerator`] to leverage the low precision methods, and what you can expect when training. +Accelerate provides integrations to train on lower precision methods using specified supported hardware through the `TransformersEngine` and `MS-AMP` packages. This documentation will help guide you through what hardware is supported, how to configure your [`Accelerator`] to leverage the low precision methods, and what you can expect when training. ## What training on FP8 means -To explore more of the nitty-gritty in training in FP8 with PyTorch and πŸ€— Accelerate, check out the [concept_guide](../concept_guides/low_precision_training) on why this can be difficult. But essentially rather than training in BF16, some (or all) aspects of training a model can be performed using 8 bits instead of 16. The challenge is doing so without degrading final performance. +To explore more of the nitty-gritty in training in FP8 with PyTorch and Accelerate, check out the [concept_guide](../concept_guides/low_precision_training) on why this can be difficult. But essentially rather than training in BF16, some (or all) aspects of training a model can be performed using 8 bits instead of 16. The challenge is doing so without degrading final performance. This is only enabled on specific NVIDIA hardware, namely: @@ -39,7 +39,7 @@ from accelerate import Accelerator accelerator = Accelerator(mixed_precision="fp8") ``` -By default, if `MS-AMP` is available in your environment, πŸ€— Accelerate will automatically utilize it as a backend. To specify it yourself (and customize other parts of the FP8 mixed precision setup), you can utilize the [`utils.FP8RecipeKwargs`] or clarify it in your config `yaml`/during `accelerate launch`: +By default, if `MS-AMP` is available in your environment, Accelerate will automatically utilize it as a backend. To specify it yourself (and customize other parts of the FP8 mixed precision setup), you can utilize the [`utils.FP8RecipeKwargs`] or clarify it in your config `yaml`/during `accelerate launch`: ```{python} from accelerate import Accelerator @@ -67,7 +67,7 @@ fp8_config: Of the two, `MS-AMP` is traditionally the easier one to configure as there is only a single argument: the optimization level. -Currently two levels of optimization are supported in the πŸ€— Accelerate integration, `"O1"` and `"O2"` (using the letter 'o', not zero). +Currently two levels of optimization are supported in the Accelerate integration, `"O1"` and `"O2"` (using the letter 'o', not zero). * `"O1"` will cast the weight gradients and `all_reduce` communications to happen in 8-bit, while the rest are done in 16 bit. This reduces the general GPU memory usage and speeds up communication bandwidths. * `"O2"` will also cast first-order optimizer states into 8 bit, while the second order states are in FP16. (Currently just the `Adam` optimizer is supported). This tries its best to minimize final accuracy degradation and will save the highest potential memory. @@ -96,7 +96,7 @@ fp8_config: TransformersEngine has much more available for customizing how and what FP8 calculations are performed. A full list of supported arguments and what they mean are available in [NVIDIA's documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html), however they are restated as part of [`FP8KwargsHandler`]'s docstring for your convenience. -πŸ€— Accelerate tries to set sensible defaults, but exploring and tweaking the various parameters yourself can lead to better performance potentially. +Accelerate tries to set sensible defaults, but exploring and tweaking the various parameters yourself can lead to better performance potentially. To use it, specify `backend="te"` and modify any of the arguments you want as part of your kwarg handler: diff --git a/docs/source/usage_guides/megatron_lm.md b/docs/source/usage_guides/megatron_lm.md index 06e0fdc48e6..b1c958b4d13 100644 --- a/docs/source/usage_guides/megatron_lm.md +++ b/docs/source/usage_guides/megatron_lm.md @@ -32,7 +32,7 @@ independently and in parallel by each shard followed by syncing across all GPUs In a simple transformer layer, this leads to 2 `all-reduces` in the forward path and 2 in the backward path. For more details, please refer research paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/pdf/1909.08053.pdf) and -this section of πŸ€— blogpost [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#tensor-parallelism). +this section of blogpost [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#tensor-parallelism). b. **Pipeline Parallelism (PP)**: Reduces memory footprint and enables large scale training via inter-node parallelization. @@ -41,7 +41,7 @@ Layers are distributed uniformly across PP stages. For example, if a model has ` pipeline parallelism, each GPU will have `6` layers (24/4). For more details on schedules to reduce the idle time of PP, please refer to the research paper [Efficient Large-Scale Language Model Training on GPU Clusters Using Megatron-LM](https://arxiv.org/pdf/2104.04473.pdf) and -this section of πŸ€— blogpost [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#pipeline-parallelism). +this section of blogpost [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#pipeline-parallelism). c. **Sequence Parallelism (SP)**: Reduces memory footprint without any additional communication. Only applicable when using TP. It reduces activation memory required as it prevents the same copies to be on the tensor parallel ranks @@ -57,7 +57,7 @@ d. **Data Parallelism (DP)** via Distributed Optimizer: Reduces the memory footp For example, when using Adam optimizer with mixed-precision training, each parameter accounts for 12 bytes of memory. This gets distributed equally across the GPUs, i.e., each parameter would account for 3 bytes (12/4) if we have 4 GPUs. For more details, please refer the research paper [ZeRO: Memory Optimizations Toward Training Trillion -Parameter Models](https://arxiv.org/pdf/1910.02054.pdf) and following section of πŸ€— blog +Parameter Models](https://arxiv.org/pdf/1910.02054.pdf) and following section of blog [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#zero-data-parallelism). e. **Selective Activation Recomputation**: Reduces the memory footprint of activations significantly via smart activation checkpointing. @@ -72,9 +72,9 @@ PyTorch JIT compiled Fused GeLU and Fused Bias+Dropout+Residual addition. g. **Support for Indexed datasets**: Efficient binary format of datasets for large scale training. Support for the `mmap`, `cached` index file and the `lazy` loader format. h. **Checkpoint reshaping and interoperability**: Utility for reshaping Megatron-LM checkpoints of variable -tensor and pipeline parallel sizes to the beloved πŸ€— Transformers sharded checkpoints as it has great support with plethora of tools -such as πŸ€— Accelerate Big Model Inference, Megatron-DeepSpeed Inference etc. -Support is also available for converting πŸ€— Transformers sharded checkpoints to Megatron-LM checkpoint of variable tensor and pipeline parallel sizes +tensor and pipeline parallel sizes to the beloved Transformers sharded checkpoints as it has great support with plethora of tools +such as Accelerate Big Model Inference, Megatron-DeepSpeed Inference etc. +Support is also available for converting Transformers sharded checkpoints to Megatron-LM checkpoint of variable tensor and pipeline parallel sizes for large scale training. @@ -359,7 +359,7 @@ def main(): 2. For using the Megatron-LM datasets, a few more changes are required. Dataloaders for these datasets are available only on rank 0 of each tensor parallel group. As such, there are rank where dataloader won't be available and this requires tweaks to the training loop. Being able to do all this shows how -flexible and extensible πŸ€— Accelerate is. The changes required are as follows. +flexible and extensible Accelerate is. The changes required are as follows. a. For Megatron-LM indexed datasets, we need to use `MegatronLMDummyDataLoader` and pass the required dataset args to it such as `data_path`, `seq_length` etc. @@ -391,7 +391,7 @@ c. Changes to training and evaluation loops as dataloader is only available on t So, we need to iterate only if the dataloader isn't `None` else provide empty dict As such, we loop using `while` loop and break when `completed_steps` is equal to `args.max_train_steps` This is similar to the Megatron-LM setup wherein user has to provide `max_train_steps` when using Megaton-LM indexed datasets. -This displays how flexible and extensible πŸ€— Accelerate is. +This displays how flexible and extensible Accelerate is. ```python while completed_steps < args.max_train_steps: @@ -414,10 +414,10 @@ while completed_steps < args.max_train_steps: ## Utility for Checkpoint reshaping and interoperability -1. The scripts for these are present in πŸ€— Transformers library under respective models. +1. The scripts for these are present in Transformers library under respective models. Currently, it is available for GPT model [checkpoint_reshaping_and_interoperability.py](https://github.com/huggingface/transformers/blob/main/src/transformers/models/megatron_gpt2/checkpoint_reshaping_and_interoperability.py) -2. Below is an example of conversion of checkpoint from Megatron-LM to universal πŸ€— Transformers sharded checkpoint. +2. Below is an example of conversion of checkpoint from Megatron-LM to universal Transformers sharded checkpoint. ```bash python checkpoint_reshaping_and_interoperability.py \ --convert_checkpoint_from_megatron_to_transformers \ @@ -569,18 +569,18 @@ setting is synonymous with gradient accumulation. 7. When using Megatron-LM, use `accelerator.save_state` and `accelerator.load_state` for saving and loading checkpoints. -8. Below are the mapping from Megatron-LM model architectures to the the equivalent πŸ€— transformers model architectures. -Only these πŸ€— transformers model architectures are supported. +8. Below are the mapping from Megatron-LM model architectures to the the equivalent transformers model architectures. +Only these transformers model architectures are supported. a. Megatron-LM [BertModel](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/bert_model.py) : -πŸ€— transformers models with `megatron-bert` in config's model type, e.g., +transformers models with `megatron-bert` in config's model type, e.g., [MegatronBERT](https://huggingface.co/docs/transformers/model_doc/megatron-bert) b. Megatron-LM [GPTModel](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py) : -πŸ€— transformers models with `gpt2` in config's model type, e.g., +transformers models with `gpt2` in config's model type, e.g., [OpenAI GPT2](https://huggingface.co/docs/transformers/model_doc/gpt2) c. Megatron-LM [T5Model](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/t5_model.py) : -πŸ€— transformers models with `t5` in config's model type, e.g., +transformers models with `t5` in config's model type, e.g., [T5](https://huggingface.co/docs/transformers/model_doc/t5) and [MT5](https://huggingface.co/docs/transformers/model_doc/mt5) diff --git a/docs/source/usage_guides/model_size_estimator.md b/docs/source/usage_guides/model_size_estimator.md index 4e95b19875e..7ce67f56a4a 100644 --- a/docs/source/usage_guides/model_size_estimator.md +++ b/docs/source/usage_guides/model_size_estimator.md @@ -13,12 +13,12 @@ specific language governing permissions and limitations under the License. rendered properly in your Markdown viewer. --> -# Understanding how big of a model can fit on your machine +# Model memory estimator One very difficult aspect when exploring potential models to use on your machine is knowing just how big of a model will *fit* into memory with your current graphics card (such as loading the model onto CUDA). -To help alleviate this, πŸ€— Accelerate has a CLI interface through `accelerate estimate-memory`. This tutorial will -help walk you through using it, what to expect, and at the end link to the interactive demo hosted on the πŸ€— Hub which will +To help alleviate this, Accelerate has a CLI interface through `accelerate estimate-memory`. This tutorial will +help walk you through using it, what to expect, and at the end link to the interactive demo hosted on the Hub which will even let you post those results directly on the model repo! Currently we support searching for models that can be used in `timm` and `transformers`. diff --git a/docs/source/usage_guides/mps.md b/docs/source/usage_guides/mps.md index 8bd2912d79c..f86cae52511 100644 --- a/docs/source/usage_guides/mps.md +++ b/docs/source/usage_guides/mps.md @@ -50,5 +50,5 @@ Please refer to https://github.com/pytorch/pytorch/issues/82707 for more details 2. Distributed setups `gloo` and `nccl` are not working with `mps` device. This means that currently only single GPU of `mps` device type can be used. -Finally, please, remember that, πŸ€— `Accelerate` only integrates MPS backend, therefore if you +Finally, please, remember that, `Accelerate` only integrates MPS backend, therefore if you have any problems or questions with regards to MPS backend usage, please, file an issue with [PyTorch GitHub](https://github.com/pytorch/pytorch/issues). \ No newline at end of file diff --git a/docs/source/usage_guides/profiler.md b/docs/source/usage_guides/profiler.md index be32193135e..d01101f9849 100644 --- a/docs/source/usage_guides/profiler.md +++ b/docs/source/usage_guides/profiler.md @@ -18,7 +18,7 @@ rendered properly in your Markdown viewer. Profiler is a tool that allows the collection of performance metrics during training and inference. Profiler’s context manager API can be used to better understand what model operators are the most expensive, examine their input shapes and stack traces, study device kernel activity, and visualize the execution trace. It provides insights into the performance of your model, allowing you to optimize and improve it. -This guide explains how to use PyTorch Profiler to measure the time and memory consumption of the model’s operators and how to integrate this with πŸ€— Accelerate. We will cover various use cases and provide examples for each. +This guide explains how to use PyTorch Profiler to measure the time and memory consumption of the model’s operators and how to integrate this with Accelerate. We will cover various use cases and provide examples for each. ## Using profiler to analyze execution time @@ -329,6 +329,6 @@ Self CUDA time total: 4.165ms ## Conclusion and Further Information -PyTorch Profiler is a powerful tool for analyzing the performance of your models. By integrating it with πŸ€— Accelerate, you can easily profile your models and gain insights into their performance, helping you to optimize and improve them. +PyTorch Profiler is a powerful tool for analyzing the performance of your models. By integrating it with Accelerate, you can easily profile your models and gain insights into their performance, helping you to optimize and improve them. For more detailed information, refer to the [PyTorch Profiler documentation](https://pytorch.org/docs/stable/profiler.html). \ No newline at end of file diff --git a/docs/source/usage_guides/quantization.md b/docs/source/usage_guides/quantization.md index 4c60de4fa2d..073936b2b87 100644 --- a/docs/source/usage_guides/quantization.md +++ b/docs/source/usage_guides/quantization.md @@ -13,13 +13,13 @@ specific language governing permissions and limitations under the License. rendered properly in your Markdown viewer. --> -# Quantization +# Model quantization ## `bitsandbytes` Integration -πŸ€— Accelerate brings `bitsandbytes` quantization to your model. You can now load any pytorch model in 8-bit or 4-bit with a few lines of code. +Accelerate brings `bitsandbytes` quantization to your model. You can now load any pytorch model in 8-bit or 4-bit with a few lines of code. -If you want to use πŸ€— Transformers models with `bitsandbytes`, you should follow this [documentation](https://huggingface.co/docs/transformers/main_classes/quantization). +If you want to use Transformers models with `bitsandbytes`, you should follow this [documentation](https://huggingface.co/docs/transformers/main_classes/quantization). To learn more about how the `bitsandbytes` quantization works, check out the blog posts on [8-bit quantization](https://huggingface.co/blog/hf-bitsandbytes-integration) and [4-bit quantization](https://huggingface.co/blog/4bit-transformers-bitsandbytes). @@ -127,7 +127,7 @@ device_map = { It is not possible to perform pure 8bit or 4bit training on these models. However, you can train these models by leveraging parameter efficient fine tuning methods (PEFT) and train for example adapters on top of them. Please have a look at [peft](https://github.com/huggingface/peft) library for more details. -Currently, you can't add adapters on top of any quantized model. However, with the official support of adapters with πŸ€— Transformers models, you can fine-tune quantized models. If you want to finetune a πŸ€— Transformers model , follow this [documentation](https://huggingface.co/docs/transformers/main_classes/quantization) instead. Check out this [demo](https://colab.research.google.com/drive/1VoYNfYDKcKRQRor98Zbf2-9VQTtGJ24k?usp=sharing) on how to fine-tune a 4-bit πŸ€— Transformers model. +Currently, you can't add adapters on top of any quantized model. However, with the official support of adapters with Transformers models, you can fine-tune quantized models. If you want to finetune a Transformers model , follow this [documentation](https://huggingface.co/docs/transformers/main_classes/quantization) instead. Check out this [demo](https://colab.research.google.com/drive/1VoYNfYDKcKRQRor98Zbf2-9VQTtGJ24k?usp=sharing) on how to fine-tune a 4-bit Transformers model. Note that you don’t need to pass `device_map` when loading the model for training. It will automatically load your model on your GPU. Please note that `device_map=auto` should be used for inference only. diff --git a/docs/source/usage_guides/sagemaker.md b/docs/source/usage_guides/sagemaker.md index 4d7c12f4bcf..00b946c7131 100644 --- a/docs/source/usage_guides/sagemaker.md +++ b/docs/source/usage_guides/sagemaker.md @@ -23,17 +23,16 @@ make it easier than ever to train Hugging Face Transformer models in [Amazon Sag ### Setup & Installation -Before you can run your πŸ€— Accelerate scripts on Amazon SageMaker you need to sign up for an AWS account. If you do not +Before you can run your Accelerate scripts on Amazon SageMaker you need to sign up for an AWS account. If you do not have an AWS account yet learn more [here](https://docs.aws.amazon.com/sagemaker/latest/dg/gs-set-up.html). -After you have your AWS Account you need to install the `sagemaker` sdk for πŸ€— Accelerate with: +After you have your AWS Account you need to install the `sagemaker` sdk for Accelerate with: ```bash pip install "accelerate[sagemaker]" --upgrade ``` -πŸ€— Accelerate currently uses the πŸ€— DLCs, with `transformers`, `datasets` and `tokenizers` pre-installed. πŸ€— -Accelerate is not in the DLC yet (will soon be added!) so to use it within Amazon SageMaker you need to create a +Accelerate currently uses the DLCs, with `transformers`, `datasets` and `tokenizers` pre-installed. Accelerate is not in the DLC yet (will soon be added!) so to use it within Amazon SageMaker you need to create a `requirements.txt` in the same directory where your training script is located and add it as dependency: ``` @@ -43,25 +42,25 @@ accelerate You should also add any other dependencies you have to this `requirements.txt`. -### Configure πŸ€— Accelerate +### Configure Accelerate You can configure the launch configuration for Amazon SageMaker the same as you do for non SageMaker training jobs with -the πŸ€— Accelerate CLI: +the Accelerate CLI: ```bash accelerate config # In which compute environment are you running? ([0] This machine, [1] AWS (Amazon SageMaker)): 1 ``` -πŸ€— Accelerate will go through a questionnaire about your Amazon SageMaker setup and create a config file you can edit. +Accelerate will go through a questionnaire about your Amazon SageMaker setup and create a config file you can edit. - πŸ€— Accelerate is not saving any of your credentials. + Accelerate is not saving any of your credentials. -### Prepare a πŸ€— Accelerate fine-tuning script +### Prepare a Accelerate fine-tuning script The training script is very similar to a training script you might run outside of SageMaker, but to save your model after training you need to specify either `/opt/ml/model` or use `os.environ["SM_MODEL_DIR"]` as your save @@ -82,7 +81,7 @@ directory. After training, artifacts in this directory are uploaded to S3: ### Launch Training -You can launch your training with πŸ€— Accelerate CLI with: +You can launch your training with Accelerate CLI with: ``` accelerate launch path_to_script.py --args_to_the_script @@ -159,7 +158,7 @@ use_cpu: false ### Python packages and dependencies -πŸ€— Accelerate currently uses the πŸ€— DLCs, with `transformers`, `datasets` and `tokenizers` pre-installed. If you +Accelerate currently uses the DLCs, with `transformers`, `datasets` and `tokenizers` pre-installed. If you want to use different/other Python packages you can do this by adding them to the `requirements.txt`. These packages will be installed before your training script is started. @@ -198,7 +197,7 @@ additional_args: max_wait: 86400 ``` -*Note: Spot Instances are subject to be terminated and training to be continued from a checkpoint. This is not handled in πŸ€— Accelerate out of the box. Contact us if you would like this feature.* +*Note: Spot Instances are subject to be terminated and training to be continued from a checkpoint. This is not handled in Accelerate out of the box. Contact us if you would like this feature.* ### Remote scripts: Use scripts located on Github diff --git a/docs/source/usage_guides/tracking.md b/docs/source/usage_guides/tracking.md index 80d69bca046..0878158a8b6 100644 --- a/docs/source/usage_guides/tracking.md +++ b/docs/source/usage_guides/tracking.md @@ -13,10 +13,10 @@ specific language governing permissions and limitations under the License. rendered properly in your Markdown viewer. --> -# Tracking +# Experiment trackers There are a large number of experiment tracking API's available, however getting them all to work with in a multi-processing environment can oftentimes be complex. -πŸ€— Accelerate provides a general tracking API that can be used to log useful items during your script through [`Accelerator.log`] +Accelerate provides a general tracking API that can be used to log useful items during your script through [`Accelerator.log`] ## Integrated Trackers diff --git a/docs/source/usage_guides/training_zoo.md b/docs/source/usage_guides/training_zoo.md index ab7cc072d12..3110af5786f 100644 --- a/docs/source/usage_guides/training_zoo.md +++ b/docs/source/usage_guides/training_zoo.md @@ -15,7 +15,7 @@ rendered properly in your Markdown viewer. # Example Zoo -Below contains a non-exhaustive list of tutorials and scripts showcasing πŸ€— Accelerate +Below contains a non-exhaustive list of tutorials and scripts showcasing Accelerate. ## Official Accelerate Examples: @@ -68,7 +68,7 @@ These examples showcase every feature in Accelerate at once that was shown in "F ## Integration Examples -These are tutorials from libraries that integrate with πŸ€— Accelerate: +These are tutorials from libraries that integrate with Accelerate: > Don't find your integration here? Make a PR to include it! @@ -85,7 +85,7 @@ These are tutorials from libraries that integrate with πŸ€— Accelerate: - [Fine-tuning DALLE2](https://github.com/lucidrains/DALLE2-pytorch#usage) -### πŸ€— diffusers +### Diffusers - [Performing textual inversion with diffusers](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion) - [Training DreamBooth with diffusers](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth) @@ -134,7 +134,7 @@ These are tutorials from libraries that integrate with πŸ€— Accelerate: ## In Science -Below contains a non-exhaustive list of papers utilizing πŸ€— Accelerate. +Below contains a non-exhaustive list of papers utilizing Accelerate. > Don't find your paper here? Make a PR to include it!