Skip to content

Commit

Permalink
Feat: Warns to add to modules_to_save when adding tokens or switching…
Browse files Browse the repository at this point in the history
… special_tokens (#787)

* Feat: Auto add to modules_to_save when adding tokens

* fix: swap to error instead of warning

* feat: add check when special_tokens differ and add test
  • Loading branch information
NanoCode012 authored Dec 22, 2023
1 parent 62ba160 commit 1ffa386
Show file tree
Hide file tree
Showing 4 changed files with 104 additions and 0 deletions.
14 changes: 14 additions & 0 deletions src/axolotl/utils/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -448,6 +448,20 @@ def validate_config(cfg):
if cfg.neftune_noise_alpha is not None and cfg.neftune_noise_alpha <= 0.0:
raise ValueError("neftune_noise_alpha must be > 0.0")

if (
cfg.adapter
and cfg.tokens
and (
not cfg.lora_modules_to_save
or not all(
x in cfg.lora_modules_to_save for x in ["embed_tokens", "lm_head"]
)
)
):
raise ValueError(
"lora_modules_to_save not properly set yet adding new tokens. Please add `embed_tokens` and `lm_head` to `lora_modules_to_save`."
)

# TODO
# MPT 7b
# https://github.com/facebookresearch/bitsandbytes/issues/25
Expand Down
17 changes: 17 additions & 0 deletions src/axolotl/utils/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,23 @@ def load_tokenizer(cfg):

if cfg.special_tokens:
for k, val in cfg.special_tokens.items():
# check if new special token is not already in tokenizer and
# is adapter training to make sure lora_modules_to_save is set
if (
(getattr(tokenizer, k) is None or getattr(tokenizer, k) != val)
and cfg.adapter
and (
not cfg.lora_modules_to_save
or not all(
x in cfg.lora_modules_to_save
for x in ["embed_tokens", "lm_head"]
)
)
):
raise ValueError(
"Please set lora_modules_to_save to ['embed_tokens', 'lm_head'] when using an adapter and changing the special tokens."
)

tokenizer.add_special_tokens(
{k: AddedToken(val, rstrip=False, lstrip=False, normalized=False)}
)
Expand Down
36 changes: 36 additions & 0 deletions tests/test_tokenizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
"""
import unittest

import pytest

from axolotl.utils.dict import DictDefault
from axolotl.utils.models import load_tokenizer

Expand Down Expand Up @@ -31,6 +33,40 @@ def test_dont_use_fast(self):
tokenizer = load_tokenizer(cfg)
assert "Fast" not in tokenizer.__class__.__name__

def test_special_tokens_modules_to_save(self):
# setting special_tokens to new token
cfg = DictDefault(
{
"tokenizer_config": "huggyllama/llama-7b",
"adapter": "lora",
"special_tokens": {"bos_token": "[INST]"},
}
)
with pytest.raises(
ValueError,
match=r".*Please set lora_modules_to_save*",
):
load_tokenizer(cfg)

# setting special_tokens but not changing from default
cfg = DictDefault(
{
"tokenizer_config": "huggyllama/llama-7b",
"adapter": "lora",
"special_tokens": {"bos_token": "<s>"},
}
)
load_tokenizer(cfg)

# non-adapter setting special_tokens
cfg = DictDefault(
{
"tokenizer_config": "huggyllama/llama-7b",
"special_tokens": {"bos_token": "[INST]"},
}
)
load_tokenizer(cfg)


if __name__ == "__main__":
unittest.main()
37 changes: 37 additions & 0 deletions tests/test_validation.py
Original file line number Diff line number Diff line change
Expand Up @@ -682,6 +682,43 @@ def test_warmup_step_no_conflict(self):

validate_config(cfg)

def test_add_tokens_adapter(self):
cfg = DictDefault(
{"adapter": "qlora", "load_in_4bit": True, "tokens": ["<|imstart|>"]}
)

with pytest.raises(
ValueError,
match=r".*lora_modules_to_save not properly set yet adding new tokens*",
):
validate_config(cfg)

cfg = DictDefault(
{
"adapter": "qlora",
"load_in_4bit": True,
"tokens": ["<|imstart|>"],
"lora_modules_to_save": ["embed_tokens"],
}
)

with pytest.raises(
ValueError,
match=r".*lora_modules_to_save not properly set yet adding new tokens*",
):
validate_config(cfg)

cfg = DictDefault(
{
"adapter": "qlora",
"load_in_4bit": True,
"tokens": ["<|imstart|>"],
"lora_modules_to_save": ["embed_tokens", "lm_head"],
}
)

validate_config(cfg)


class ValidationWandbTest(ValidationTest):
"""
Expand Down

0 comments on commit 1ffa386

Please sign in to comment.