Skip to content

Commit

Permalink
[tests / Quantization] Fix bnb test (#27145)
Browse files Browse the repository at this point in the history
* fix bnb test

* link to GH issue
  • Loading branch information
younesbelkada authored Oct 30, 2023
1 parent 5769949 commit 6b46677
Showing 1 changed file with 14 additions and 4 deletions.
18 changes: 14 additions & 4 deletions tests/quantization/bnb/test_mixed_int8.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,13 +124,13 @@ def tearDown(self):
gc.collect()
torch.cuda.empty_cache()

def test_get_keys_to_not_convert(self):
@unittest.skip("Un-skip once https://github.com/mosaicml/llm-foundry/issues/703 is resolved")
def test_get_keys_to_not_convert_trust_remote_code(self):
r"""
Test the `get_keys_to_not_convert` function.
Test the `get_keys_to_not_convert` function with `trust_remote_code` models.
"""
from accelerate import init_empty_weights

from transformers import AutoModelForMaskedLM, Blip2ForConditionalGeneration, MptForCausalLM, OPTForCausalLM
from transformers.integrations.bitsandbytes import get_keys_to_not_convert

model_id = "mosaicml/mpt-7b"
Expand All @@ -142,7 +142,17 @@ def test_get_keys_to_not_convert(self):
config, trust_remote_code=True, code_revision="72e5f594ce36f9cabfa2a9fd8f58b491eb467ee7"
)
self.assertEqual(get_keys_to_not_convert(model), ["transformer.wte"])
# without trust_remote_code

def test_get_keys_to_not_convert(self):
r"""
Test the `get_keys_to_not_convert` function.
"""
from accelerate import init_empty_weights

from transformers import AutoModelForMaskedLM, Blip2ForConditionalGeneration, MptForCausalLM, OPTForCausalLM
from transformers.integrations.bitsandbytes import get_keys_to_not_convert

model_id = "mosaicml/mpt-7b"
config = AutoConfig.from_pretrained(model_id, revision="72e5f594ce36f9cabfa2a9fd8f58b491eb467ee7")
with init_empty_weights():
model = MptForCausalLM(config)
Expand Down

0 comments on commit 6b46677

Please sign in to comment.