From b475f290b5bcc001def489b170aaedac315f77d6 Mon Sep 17 00:00:00 2001 From: NanoCode012 Date: Wed, 4 Oct 2023 20:40:47 +0900 Subject: [PATCH] Feat: Allow usage of native Mistral FA when no sample_packing (#669) * Allow usage of native Mistral FA when no sample_packing * fix: do not apply custom patch when sample_pack off * chore: lint * chore: pin transformer to v4.35.0.dev0 * fix: split sample_packing to separate test --- requirements.txt | 2 +- src/axolotl/utils/models.py | 8 +- tests/e2e/test_mistral.py | 92 --------------------- tests/e2e/test_mistral_samplepack.py | 118 +++++++++++++++++++++++++++ 4 files changed, 125 insertions(+), 95 deletions(-) create mode 100644 tests/e2e/test_mistral_samplepack.py diff --git a/requirements.txt b/requirements.txt index cf4ce1d832..554f2ec69a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ torch==2.0.1 auto-gptq packaging peft @ git+https://github.com/huggingface/peft.git -transformers @ git+https://github.com/huggingface/transformers.git@5e11d72d4d0939138fbabfebe9a69d2061519547 +transformers @ git+https://github.com/huggingface/transformers.git@bd6205919aad4d3a2300a39a98a642f1cc3a5348 bitsandbytes>=0.41.1 accelerate @ git+https://github.com/huggingface/accelerate@80da9cfb09bb3cc9f1b385cb55d6b90d025a5fd9 deepspeed diff --git a/src/axolotl/utils/models.py b/src/axolotl/utils/models.py index 3287c0ee93..6e520bd50e 100644 --- a/src/axolotl/utils/models.py +++ b/src/axolotl/utils/models.py @@ -149,7 +149,7 @@ def load_model( # Note: This might overwrite previous additional_special_tokens tokenizer.add_special_tokens({"additional_special_tokens": [MEM_TOKEN]}) - if cfg.is_mistral_derived_model and cfg.flash_attention: + if cfg.is_mistral_derived_model and cfg.flash_attention and cfg.sample_packing: from axolotl.monkeypatch.mistral_attn_hijack_flash import ( replace_mistral_attn_with_flash_attn, ) @@ -200,7 +200,11 @@ def load_model( ) # sample packing uses custom FA2 patch if cfg.flash_attention and not cfg.sample_packing: - if cfg.is_llama_derived_model or cfg.is_falcon_derived_model: + if ( + cfg.is_llama_derived_model + or cfg.is_falcon_derived_model + or cfg.is_mistral_derived_model + ): model_kwargs["use_flash_attention_2"] = True try: if cfg.is_llama_derived_model and not cfg.trust_remote_code and not cfg.gptq: diff --git a/tests/e2e/test_mistral.py b/tests/e2e/test_mistral.py index 4212d36408..f3098f0583 100644 --- a/tests/e2e/test_mistral.py +++ b/tests/e2e/test_mistral.py @@ -71,53 +71,6 @@ def test_lora(self): train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta) assert (Path(output_dir) / "adapter_model.bin").exists() - def test_lora_packing(self): - # pylint: disable=duplicate-code - output_dir = tempfile.mkdtemp() - cfg = DictDefault( - { - "base_model": "openaccess-ai-collective/tiny-mistral", - "base_model_config": "openaccess-ai-collective/tiny-mistral", - "flash_attention": True, - "sample_packing": True, - "sequence_len": 1024, - "load_in_8bit": True, - "adapter": "lora", - "lora_r": 32, - "lora_alpha": 64, - "lora_dropout": 0.05, - "lora_target_linear": True, - "val_set_size": 0.1, - "special_tokens": { - "unk_token": "", - "bos_token": "", - "eos_token": "", - }, - "datasets": [ - { - "path": "mhenrichsen/alpaca_2k_test", - "type": "alpaca", - }, - ], - "num_epochs": 2, - "micro_batch_size": 2, - "gradient_accumulation_steps": 1, - "output_dir": output_dir, - "learning_rate": 0.00001, - "optimizer": "adamw_torch", - "lr_scheduler": "cosine", - "max_steps": 20, - "save_steps": 10, - "eval_steps": 10, - } - ) - normalize_config(cfg) - cli_args = TrainerCliArgs() - dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args) - - train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta) - assert (Path(output_dir) / "adapter_model.bin").exists() - def test_ft(self): # pylint: disable=duplicate-code output_dir = tempfile.mkdtemp() @@ -161,48 +114,3 @@ def test_ft(self): train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta) assert (Path(output_dir) / "pytorch_model.bin").exists() - - def test_ft_packing(self): - # pylint: disable=duplicate-code - output_dir = tempfile.mkdtemp() - cfg = DictDefault( - { - "base_model": "openaccess-ai-collective/tiny-mistral", - "base_model_config": "openaccess-ai-collective/tiny-mistral", - "flash_attention": True, - "sample_packing": True, - "sequence_len": 1024, - "val_set_size": 0.1, - "special_tokens": { - "unk_token": "", - "bos_token": "", - "eos_token": "", - }, - "datasets": [ - { - "path": "mhenrichsen/alpaca_2k_test", - "type": "alpaca", - }, - ], - "num_epochs": 2, - "micro_batch_size": 2, - "gradient_accumulation_steps": 1, - "output_dir": output_dir, - "learning_rate": 0.00001, - "optimizer": "adamw_torch", - "lr_scheduler": "cosine", - "max_steps": 20, - "save_steps": 10, - "eval_steps": 10, - } - ) - if is_torch_bf16_gpu_available(): - cfg.bf16 = True - else: - cfg.fp16 = True - normalize_config(cfg) - cli_args = TrainerCliArgs() - dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args) - - train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta) - assert (Path(output_dir) / "pytorch_model.bin").exists() diff --git a/tests/e2e/test_mistral_samplepack.py b/tests/e2e/test_mistral_samplepack.py new file mode 100644 index 0000000000..623d20b0c2 --- /dev/null +++ b/tests/e2e/test_mistral_samplepack.py @@ -0,0 +1,118 @@ +""" +E2E tests for lora llama +""" + +import logging +import os +import tempfile +import unittest +from pathlib import Path + +from transformers.utils import is_torch_bf16_gpu_available + +from axolotl.cli import load_datasets +from axolotl.common.cli import TrainerCliArgs +from axolotl.train import train +from axolotl.utils.config import normalize_config +from axolotl.utils.dict import DictDefault + +LOG = logging.getLogger("axolotl.tests.e2e") +os.environ["WANDB_DISABLED"] = "true" + + +class TestMistral(unittest.TestCase): + """ + Test case for Llama models using LoRA + """ + + def test_lora_packing(self): + # pylint: disable=duplicate-code + output_dir = tempfile.mkdtemp() + cfg = DictDefault( + { + "base_model": "openaccess-ai-collective/tiny-mistral", + "base_model_config": "openaccess-ai-collective/tiny-mistral", + "flash_attention": True, + "sample_packing": True, + "sequence_len": 1024, + "load_in_8bit": True, + "adapter": "lora", + "lora_r": 32, + "lora_alpha": 64, + "lora_dropout": 0.05, + "lora_target_linear": True, + "val_set_size": 0.1, + "special_tokens": { + "unk_token": "", + "bos_token": "", + "eos_token": "", + }, + "datasets": [ + { + "path": "mhenrichsen/alpaca_2k_test", + "type": "alpaca", + }, + ], + "num_epochs": 2, + "micro_batch_size": 2, + "gradient_accumulation_steps": 1, + "output_dir": output_dir, + "learning_rate": 0.00001, + "optimizer": "adamw_torch", + "lr_scheduler": "cosine", + "max_steps": 20, + "save_steps": 10, + "eval_steps": 10, + } + ) + normalize_config(cfg) + cli_args = TrainerCliArgs() + dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args) + + train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta) + assert (Path(output_dir) / "adapter_model.bin").exists() + + def test_ft_packing(self): + # pylint: disable=duplicate-code + output_dir = tempfile.mkdtemp() + cfg = DictDefault( + { + "base_model": "openaccess-ai-collective/tiny-mistral", + "base_model_config": "openaccess-ai-collective/tiny-mistral", + "flash_attention": True, + "sample_packing": True, + "sequence_len": 1024, + "val_set_size": 0.1, + "special_tokens": { + "unk_token": "", + "bos_token": "", + "eos_token": "", + }, + "datasets": [ + { + "path": "mhenrichsen/alpaca_2k_test", + "type": "alpaca", + }, + ], + "num_epochs": 2, + "micro_batch_size": 2, + "gradient_accumulation_steps": 1, + "output_dir": output_dir, + "learning_rate": 0.00001, + "optimizer": "adamw_torch", + "lr_scheduler": "cosine", + "max_steps": 20, + "save_steps": 10, + "eval_steps": 10, + } + ) + if is_torch_bf16_gpu_available(): + cfg.bf16 = True + else: + cfg.fp16 = True + normalize_config(cfg) + cli_args = TrainerCliArgs() + dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args) + + train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta) + assert (Path(output_dir) / "pytorch_model.bin").exists()