diff --git a/.github/workflows/docker.yaml b/.github/workflows/docker.yaml index 730ef3ad9e..976b4241ab 100644 --- a/.github/workflows/docker.yaml +++ b/.github/workflows/docker.yaml @@ -17,14 +17,6 @@ jobs: strategy: matrix: include: - - name: "2.3.1_cu121" - base_image: mosaicml/pytorch:2.3.1_cu121-python3.11-ubuntu20.04 - dep_groups: "[all]" - te_commit: b5a7c9f - - name: "2.3.1_cu121_aws" - base_image: mosaicml/pytorch:2.3.1_cu121-python3.11-ubuntu20.04-aws - dep_groups: "[all]" - te_commit: b5a7c9f - name: "2.4.0_cu124" base_image: mosaicml/pytorch:2.4.0_cu124-python3.11-ubuntu20.04 dep_groups: "[all]" diff --git a/.github/workflows/pr-cpu.yaml b/.github/workflows/pr-cpu.yaml index 13019a83d4..0620164bd0 100644 --- a/.github/workflows/pr-cpu.yaml +++ b/.github/workflows/pr-cpu.yaml @@ -20,9 +20,9 @@ jobs: strategy: matrix: include: - - name: "cpu-2.3.1" + - name: "cpu-2.4.0" pip_deps: "[all-cpu]" - container: mosaicml/pytorch:2.3.1_cpu-python3.11-ubuntu20.04 + container: mosaicml/pytorch:2.4.0_cpu-python3.11-ubuntu20.04 markers: "not gpu" pytest_command: "coverage run -m pytest" steps: diff --git a/.github/workflows/pr-gpu.yaml b/.github/workflows/pr-gpu.yaml index 947d78ae95..d78b9fc067 100644 --- a/.github/workflows/pr-gpu.yaml +++ b/.github/workflows/pr-gpu.yaml @@ -22,8 +22,8 @@ jobs: fail-fast: false matrix: include: - - name: "gpu-2.3.1-1" - container: mosaicml/llm-foundry:2.3.1_cu121-latest + - name: "gpu-2.4.0-1" + container: mosaicml/llm-foundry:2.4.0_cu124-latest markers: "gpu" pip_deps: "[all]" pytest_command: "coverage run -m pytest" @@ -51,8 +51,8 @@ jobs: fail-fast: false matrix: include: - - name: "gpu-2.3.1-2" - container: mosaicml/llm-foundry:2.3.1_cu121-latest + - name: "gpu-2.4.0-2" + container: mosaicml/llm-foundry:2.4.0_cu124-latest markers: "gpu" pip_deps: "[all]" pytest_command: "coverage run -m pytest" @@ -80,8 +80,8 @@ jobs: fail-fast: false matrix: include: - - name: "gpu-2.3.1-4" - container: mosaicml/llm-foundry:2.3.1_cu121-latest + - name: "gpu-2.4.0-4" + container: mosaicml/llm-foundry:2.4.0_cu124-latest markers: "gpu" pip_deps: "[all]" pytest_command: "coverage run -m pytest" diff --git a/README.md b/README.md index e8a6708c5a..0fabb98653 100644 --- a/README.md +++ b/README.md @@ -107,14 +107,14 @@ Something missing? Contribute with a PR! # Hardware and Software Requirements -This codebase has been tested with PyTorch 2.2 with NVIDIA A100s and H100s. +This codebase has been tested with PyTorch 2.4 with NVIDIA A100s and H100s. This codebase may also work on systems with other devices, such as consumer NVIDIA cards and AMD cards, but we are not actively testing these systems. If you have success/failure using LLM Foundry on other systems, please let us know in a Github issue and we will update the support matrix! | Device | Torch Version | Cuda Version | Status | | -------------- | ------------- | ------------ | ---------------------------- | -| A100-40GB/80GB | 2.3.1 | 12.1 | :white_check_mark: Supported | -| H100-80GB | 2.3.1 | 12.1 | :white_check_mark: Supported | +| A100-40GB/80GB | 2.4.0 | 12.4 | :white_check_mark: Supported | +| H100-80GB | 2.4.0 | 12.4 | :white_check_mark: Supported | ## MosaicML Docker Images We highly recommend using our prebuilt Docker images. You can find them here: https://hub.docker.com/orgs/mosaicml/repositories. @@ -122,15 +122,15 @@ We highly recommend using our prebuilt Docker images. You can find them here: ht The `mosaicml/pytorch` images are pinned to specific PyTorch and CUDA versions, and are stable and rarely updated. The `mosaicml/llm-foundry` images are built with new tags upon every commit to the `main` branch. -You can select a specific commit hash such as `mosaicml/llm-foundry:2.3.1_cu121-36ab1ba` or take the latest one using `mosaicml/llm-foundry:2.3.1_cu121-latest`. +You can select a specific commit hash such as `mosaicml/llm-foundry:2.4.0_cu124-36ab1ba` or take the latest one using `mosaicml/llm-foundry:2.4.0_cu124-latest`. **Please Note:** The `mosaicml/llm-foundry` images do not come with the `llm-foundry` package preinstalled, just the dependencies. You will still need to `pip install llm-foundry` either from PyPi or from source. | Docker Image | Torch Version | Cuda Version | LLM Foundry dependencies installed? | | ------------------------------------------------------ | ------------- | ----------------- | ----------------------------------- | -| `mosaicml/pytorch:2.3.1_cu121-python3.11-ubuntu20.04` | 2.3.1 | 12.1 (Infiniband) | No | -| `mosaicml/llm-foundry:2.3.1_cu121-latest` | 2.3.1 | 12.1 (Infiniband) | Yes | -| `mosaicml/llm-foundry:2.3.1_cu121_aws-latest` | 2.3.1 | 12.1 (EFA) | Yes | +| `mosaicml/pytorch:2.4.0_cu124-python3.11-ubuntu20.04` | 2.4.0 | 12.4 (Infiniband) | No | +| `mosaicml/llm-foundry:2.4.0_cu124-latest` | 2.4.0 | 12.4 (Infiniband) | Yes | +| `mosaicml/llm-foundry:2.4.0_cu124_aws-latest` | 2.4.0 | 12.4 (EFA) | Yes | # Installation diff --git a/llmfoundry/models/layers/ffn.py b/llmfoundry/models/layers/ffn.py index 38ca253f80..5b5a6b1449 100644 --- a/llmfoundry/models/layers/ffn.py +++ b/llmfoundry/models/layers/ffn.py @@ -397,7 +397,6 @@ def attach_ffn_mb_args( """ ffn.experts.mlp.hidden_size = args.ffn_hidden_size ffn.experts.mlp.expert_parallel_group = expert_parallel_group - ffn.experts.mlp.weight_parallel_group = args.weight_parallel_group def get_fsdp_submesh_2d(device_mesh: DeviceMesh): diff --git a/llmfoundry/models/utils/mpt_param_count.py b/llmfoundry/models/utils/mpt_param_count.py index d7b61354c7..bd8f279ad5 100644 --- a/llmfoundry/models/utils/mpt_param_count.py +++ b/llmfoundry/models/utils/mpt_param_count.py @@ -62,13 +62,6 @@ def megablocks_n_total_params(mpt_model) -> int: # type: ignore moe_world_size = mpt_model.config.ffn_config.get('moe_world_size') - if mpt_model.config.ffn_config.get('moe_weight_parallelism', False): - # If MegaBlocks shards experts, the total sharding world size - # must be increased by the degree to which MegaBlocks shards the - # experts. - mb_args = mpt_model.model.transformer.mb_args - moe_world_size *= mb_args.weight_parallel_group.size() - n_total_params = 0 for module in mpt_model.modules(): if isinstance( @@ -109,9 +102,6 @@ def megablocks_n_active_params(mpt_model) -> int: # type: ignore moe_world_size = mpt_model.config.ffn_config.get('moe_world_size') local_experts = moe_num_experts / moe_world_size # if local_experts is < 1, then the expert is sharded - if mpt_model.config.ffn_config.get('moe_weight_parallelism', False): - mb_args = mpt_model.model.transformer.mb_args - local_experts /= mb_args.weight_parallel_group.size() moe_top_k = mpt_model.config.ffn_config.get('moe_top_k', 1) n_active_params = 0 diff --git a/llmfoundry/models/utils/param_init_fns.py b/llmfoundry/models/utils/param_init_fns.py index 9941c2d049..180e7b894c 100644 --- a/llmfoundry/models/utils/param_init_fns.py +++ b/llmfoundry/models/utils/param_init_fns.py @@ -484,19 +484,12 @@ def _megablocks_sparse_mlp_generic_param_init_fn_( div_is_residual (float): The value by which parameter initialization is divided if init_div_is_residual flag is enabled. """ - expert_process_group_size, rank, weight_parallel_group_size, weight_parallel_group_rank = 1, 0, 1, 0 + expert_process_group_size, rank = 1, 0 if module.expert_parallel_group is not None: expert_process_group_size = int( module.expert_parallel_group.size(), ) # type: ignore rank = int(module.expert_parallel_group.rank()) # type: ignore - if module.weight_parallel_group is not None: - weight_parallel_group_size = int( - module.weight_parallel_group.size(), - ) # type: ignore - weight_parallel_group_rank = int( - module.weight_parallel_group.rank(), - ) # type: ignore hidden_size = int(module.hidden_size) # type: ignore @@ -505,8 +498,7 @@ def _megablocks_sparse_mlp_generic_param_init_fn_( if isinstance(w1, DTensor): w1 = w1._local_tensor w1_size = list(w1.shape) # type: ignore - w1_size[ - 0] = w1_size[0] * expert_process_group_size * weight_parallel_group_size + w1_size[0] = w1_size[0] * expert_process_group_size n_exp = w1_size[0] // hidden_size _fused = (0, [(n + 1) * hidden_size for n in range(n_exp - 1)]) @@ -514,26 +506,21 @@ def _megablocks_sparse_mlp_generic_param_init_fn_( _w1 = w1.new_empty(w1_size) # type: ignore fused_param_init_helper(_w1, init_fn_, _fused) _w1_local = _w1.chunk(expert_process_group_size, dim=0)[rank] - _w1_local_slice = _w1_local.chunk(weight_parallel_group_size, - dim=0)[weight_parallel_group_rank] with torch.no_grad(): - w1.copy_(_w1_local_slice) # type: ignore + w1.copy_(_w1_local) # type: ignore # Initialize w2 w2 = module.w2 if isinstance(w2, DTensor): w2 = w2._local_tensor w2_size = list(w2.shape) # type: ignore - w2_size[ - 0] = w2_size[0] * expert_process_group_size * weight_parallel_group_size + w2_size[0] = w2_size[0] * expert_process_group_size _w2 = w2.new_empty(w2_size) # type: ignore # MegaBlocks operates on w2 as x @ w2, so needs flipped fan mode fused_param_init_helper(_w2, _flip_fan_mode(init_fn_), _fused) _w2_local = _w2.chunk(expert_process_group_size, dim=0)[rank] - _w2_local_slice = _w2_local.chunk(weight_parallel_group_size, - dim=0)[weight_parallel_group_rank] with torch.no_grad(): - w2.copy_(_w2_local_slice) # type: ignore + w2.copy_(_w2_local) # type: ignore if init_div_is_residual is not False: with torch.no_grad(): w2.div_(div_is_residual) # type: ignore @@ -567,19 +554,12 @@ def _megablocks_sparse_glu_generic_param_init_fn_( ) # Init ported from _megablocks_sparse_mlp_generic_param_init_fn_ for v1 - expert_process_group_size, rank, weight_parallel_group_size, weight_parallel_group_rank = 1, 0, 1, 0 + expert_process_group_size, rank = 1, 0 if module.expert_parallel_group is not None: expert_process_group_size = int( module.expert_parallel_group.size(), ) # type: ignore rank = int(module.expert_parallel_group.rank()) # type: ignore - if module.weight_parallel_group is not None: - weight_parallel_group_size = int( - module.weight_parallel_group.size(), - ) # type: ignore - weight_parallel_group_rank = int( - module.weight_parallel_group.rank(), - ) # type: ignore hidden_size = int(module.hidden_size) # type: ignore @@ -588,8 +568,7 @@ def _megablocks_sparse_glu_generic_param_init_fn_( if isinstance(v1, DTensor): v1 = v1._local_tensor v1_size = list(v1.shape) # type: ignore - v1_size[ - 0] = v1_size[0] * expert_process_group_size * weight_parallel_group_size + v1_size[0] = v1_size[0] * expert_process_group_size n_exp = v1_size[0] // hidden_size _fused = (0, [(n + 1) * hidden_size for n in range(n_exp - 1)]) @@ -597,10 +576,8 @@ def _megablocks_sparse_glu_generic_param_init_fn_( _v1 = v1.new_empty(v1_size) # type: ignore fused_param_init_helper(_v1, init_fn_, _fused) _v1_local = _v1.chunk(expert_process_group_size, dim=0)[rank] - _v1_local_slice = _v1_local.chunk(weight_parallel_group_size, - dim=0)[weight_parallel_group_rank] with torch.no_grad(): - v1.copy_(_v1_local_slice) # type: ignore + v1.copy_(_v1_local) # type: ignore def _megablocks_mlp_generic_param_init_fn_( @@ -623,41 +600,32 @@ def _megablocks_mlp_generic_param_init_fn_( div_is_residual (float): The value by which parameter initialization is divided if init_div_is_residual flag is enabled. """ - expert_process_group_size, rank, weight_parallel_group_size, w_rank = 1, 0, 1, 0 + expert_process_group_size, rank = 1, 0 if module.expert_parallel_group is not None: expert_process_group_size = int( module.expert_parallel_group.size(), ) # type: ignore rank = int(module.expert_parallel_group.rank()) # type: ignore - if module.weight_parallel_group is not None: - weight_parallel_group_size = int( - module.weight_parallel_group.size(), - ) # type: ignore - w_rank = int(module.weight_parallel_group.rank()) # type: ignore _init_fn_ = _flip_fan_mode(init_fn_) # Initialize w1 w1_size = list(module.w1.shape) # type: ignore w1_size[0] = w1_size[0] * expert_process_group_size - w1_size[1] = w1_size[1] * weight_parallel_group_size _w1 = module.w1.new_empty(w1_size) # type: ignore stacked_param_init_helper(_w1, _init_fn_, module._stack_dim) # type: ignore _w1_local = _w1.chunk(expert_process_group_size, dim=0)[rank] - _w1_local_slice = _w1_local.chunk(weight_parallel_group_size, dim=1)[w_rank] with torch.no_grad(): - module.w1.copy_(_w1_local_slice) # type: ignore + module.w1.copy_(_w1_local) # type: ignore # Initialize w2 w2_size = list(module.w2.shape) # type: ignore w2_size[0] = w2_size[0] * expert_process_group_size - w2_size[1] = w2_size[1] * weight_parallel_group_size _w2 = module.w2.new_empty(w2_size) # type: ignore stacked_param_init_helper(_w2, _init_fn_, module._stack_dim) # type: ignore _w2_local = _w2.chunk(expert_process_group_size, dim=0)[rank] - _w2_local_slice = _w2_local.chunk(weight_parallel_group_size, dim=1)[w_rank] with torch.no_grad(): - module.w2.copy_(_w2_local_slice) # type: ignore + module.w2.copy_(_w2_local) # type: ignore if init_div_is_residual is not False: with torch.no_grad(): module.w2.div_(div_is_residual) # type: ignore diff --git a/mcli/mcli-1b-eval.yaml b/mcli/mcli-1b-eval.yaml index 4fcf8b3cb9..2f48fa5ce1 100644 --- a/mcli/mcli-1b-eval.yaml +++ b/mcli/mcli-1b-eval.yaml @@ -9,7 +9,7 @@ integrations: command: | cd llm-foundry/scripts/ composer eval/eval.py /mnt/config/parameters.yaml -image: mosaicml/llm-foundry:2.3.1_cu121-latest +image: mosaicml/llm-foundry:2.4.0_cu124-latest name: mpt-1b-eval compute: diff --git a/mcli/mcli-1b-max-seq-len-8k.yaml b/mcli/mcli-1b-max-seq-len-8k.yaml index fb96c576e0..bb83e2061d 100644 --- a/mcli/mcli-1b-max-seq-len-8k.yaml +++ b/mcli/mcli-1b-max-seq-len-8k.yaml @@ -17,7 +17,7 @@ command: | --out_root ./my-copy-c4 --splits train_small val_small \ --concat_tokens 8192 --tokenizer EleutherAI/gpt-neox-20b --eos_text '<|endoftext|>' composer train/train.py /mnt/config/parameters.yaml -image: mosaicml/llm-foundry:2.3.1_cu121-latest +image: mosaicml/llm-foundry:2.4.0_cu124-latest name: mpt-1b-ctx-8k-gpus-8 compute: diff --git a/mcli/mcli-1b.yaml b/mcli/mcli-1b.yaml index 26255977f4..f371051ca0 100644 --- a/mcli/mcli-1b.yaml +++ b/mcli/mcli-1b.yaml @@ -21,7 +21,7 @@ command: | eval_loader.dataset.split=val_small \ max_duration=100ba \ eval_interval=0 -image: mosaicml/llm-foundry:2.3.1_cu121-latest +image: mosaicml/llm-foundry:2.4.0_cu124-latest name: mpt-1b-gpus-8 compute: diff --git a/mcli/mcli-benchmark-mpt.yaml b/mcli/mcli-benchmark-mpt.yaml index 3995598fd3..b15f3b7eea 100644 --- a/mcli/mcli-benchmark-mpt.yaml +++ b/mcli/mcli-benchmark-mpt.yaml @@ -6,7 +6,7 @@ compute: # cluster: TODO # Name of the cluster to use for this run # gpu_type: a100_80gb # Type of GPU to use. We use a100_80gb in our experiments -image: mosaicml/llm-foundry:2.3.1_cu121-latest +image: mosaicml/llm-foundry:2.4.0_cu124-latest integrations: - integration_type: git_repo diff --git a/mcli/mcli-convert-composer-to-hf.yaml b/mcli/mcli-convert-composer-to-hf.yaml index 7b715f6792..9c5d960a95 100644 --- a/mcli/mcli-convert-composer-to-hf.yaml +++ b/mcli/mcli-convert-composer-to-hf.yaml @@ -13,7 +13,7 @@ command: | --hf_output_path s3://bucket/folder/hf/ \ --output_precision bf16 \ -image: mosaicml/llm-foundry:2.3.1_cu121-latest +image: mosaicml/llm-foundry:2.4.0_cu124-latest name: convert-composer-hf compute: diff --git a/mcli/mcli-hf-eval.yaml b/mcli/mcli-hf-eval.yaml index 27f5938d67..5f6b6c564f 100644 --- a/mcli/mcli-hf-eval.yaml +++ b/mcli/mcli-hf-eval.yaml @@ -16,7 +16,7 @@ gpu_num: 8 # gpu_type: # cluster: # replace with your cluster here! -image: mosaicml/llm-foundry:2.3.1_cu121-latest +image: mosaicml/llm-foundry:2.4.0_cu124-latest # The below is injected as a YAML file: /mnt/config/parameters.yaml parameters: diff --git a/mcli/mcli-hf-generate.yaml b/mcli/mcli-hf-generate.yaml index cb3040e4ee..dfb9763462 100644 --- a/mcli/mcli-hf-generate.yaml +++ b/mcli/mcli-hf-generate.yaml @@ -35,7 +35,7 @@ command: | "Here's a quick recipe for baking chocolate chip cookies: Start by" \ "The best 5 cities to visit in Europe are" -image: mosaicml/llm-foundry:2.3.1_cu121-latest +image: mosaicml/llm-foundry:2.4.0_cu124-latest name: hf-generate compute: diff --git a/mcli/mcli-llama2-finetune.yaml b/mcli/mcli-llama2-finetune.yaml index 7134e6204c..32e8cddbda 100644 --- a/mcli/mcli-llama2-finetune.yaml +++ b/mcli/mcli-llama2-finetune.yaml @@ -9,7 +9,7 @@ integrations: command: | cd llm-foundry/scripts composer train/train.py /mnt/config/parameters.yaml -image: mosaicml/llm-foundry:2.3.1_cu121-latest +image: mosaicml/llm-foundry:2.4.0_cu124-latest name: llama2-finetune compute: diff --git a/mcli/mcli-openai-eval.yaml b/mcli/mcli-openai-eval.yaml index cd04d89f4e..4b69827d69 100644 --- a/mcli/mcli-openai-eval.yaml +++ b/mcli/mcli-openai-eval.yaml @@ -16,7 +16,7 @@ gpu_num: # gpu_type: # cluster: # replace with your cluster here! -image: mosaicml/llm-foundry:2.3.1_cu121-latest +image: mosaicml/llm-foundry:2.4.0_cu124-latest # The below is injected as a YAML file: /mnt/config/parameters.yaml parameters: diff --git a/mcli/mcli-pretokenize-oci-upload.yaml b/mcli/mcli-pretokenize-oci-upload.yaml index 5425ce9897..fafb251aee 100644 --- a/mcli/mcli-pretokenize-oci-upload.yaml +++ b/mcli/mcli-pretokenize-oci-upload.yaml @@ -1,5 +1,5 @@ name: c4-2k-pre-tokenized -image: mosaicml/llm-foundry:2.3.1_cu121-latest +image: mosaicml/llm-foundry:2.4.0_cu124-latest compute: gpus: 8 # Number of GPUs to use diff --git a/scripts/train/yamls/pretrain/testing-moe.yaml b/scripts/train/yamls/pretrain/testing-moe.yaml index e61e3e451e..ee9483ffd0 100644 --- a/scripts/train/yamls/pretrain/testing-moe.yaml +++ b/scripts/train/yamls/pretrain/testing-moe.yaml @@ -23,7 +23,6 @@ model: moe_num_experts: 4 moe_top_k: 2 moe_world_size: 1 - moe_weight_parallelism: false uniform_expert_assignment: false n_heads: 2 n_layers: 2 diff --git a/setup.py b/setup.py index 666fc945b3..d46444aa70 100644 --- a/setup.py +++ b/setup.py @@ -57,7 +57,7 @@ 'accelerate>=0.25,<0.34', # for HF inference `device_map` 'transformers>=4.43.2,<4.44', 'mosaicml-streaming>=0.8.1,<0.9', - 'torch>=2.3.0,<2.4.1', + 'torch>=2.4.0,<2.4.1', 'datasets>=2.19,<2.20', 'fsspec==2023.6.0', # newer version results in a bug in datasets that duplicates data 'sentencepiece==0.2.0', @@ -118,8 +118,8 @@ ] extra_deps['megablocks'] = [ - 'megablocks==0.5.1', - 'grouped-gemm==0.1.4', + 'megablocks==0.6.1', + 'grouped-gemm==0.1.6', ] extra_deps['databricks-serverless'] = { diff --git a/tests/a_scripts/inference/test_convert_composer_to_hf.py b/tests/a_scripts/inference/test_convert_composer_to_hf.py index 260988dc31..b863e1d0a8 100644 --- a/tests/a_scripts/inference/test_convert_composer_to_hf.py +++ b/tests/a_scripts/inference/test_convert_composer_to_hf.py @@ -519,7 +519,6 @@ def _get_model_and_tokenizer( 'moe_num_experts': 4, 'moe_top_k': 2, 'moe_world_size': 1, - 'moe_weight_parallelism': False, 'uniform_expert_assignment': False, }, 'max_seq_len': max_seq_len, @@ -1251,8 +1250,6 @@ def test_mptmoe_huggingface_conversion_callback( 2, 'moe_world_size': 2, - 'moe_weight_parallelism': - False, 'uniform_expert_assignment': True, 'mlp_impl': diff --git a/tests/models/test_mpt_gen.py b/tests/models/test_mpt_gen.py index 134ca35ec0..820da5e71f 100644 --- a/tests/models/test_mpt_gen.py +++ b/tests/models/test_mpt_gen.py @@ -190,7 +190,6 @@ def test_gen_mpt_moe( 'moe_num_experts': 4, 'moe_top_k': 2, 'moe_world_size': 1, - 'moe_weight_parallelism': False, 'uniform_expert_assignment': False, }, )