Skip to content

Commit

Permalink
deprecations
Browse files Browse the repository at this point in the history
  • Loading branch information
dakinggg committed Sep 25, 2024
1 parent 7cb1c08 commit 6858db9
Show file tree
Hide file tree
Showing 4 changed files with 4 additions and 109 deletions.
2 changes: 1 addition & 1 deletion llmfoundry/command_utils/eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def evaluate_model(
warnings.warn(
VersionedDeprecationWarning(
'The argument fsdp_config is deprecated. Please use parallelism_config instead.',
remove_version='0.13.0',
remove_version='0.14.0',
),
)
if fsdp_config and parallelism_config:
Expand Down
2 changes: 0 additions & 2 deletions llmfoundry/models/hf/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
prepare_hf_model_for_fsdp,
)
from llmfoundry.models.hf.hf_t5 import ComposerHFT5
from llmfoundry.models.hf.model_wrapper import HuggingFaceModelWithFSDP

__all__ = [
'BaseHuggingFaceModel',
Expand All @@ -18,5 +17,4 @@
'prepare_hf_causal_lm_model_for_fsdp',
'prepare_hf_enc_dec_model_for_fsdp',
'prepare_hf_model_for_fsdp',
'HuggingFaceModelWithFSDP',
]
103 changes: 0 additions & 103 deletions llmfoundry/models/hf/model_wrapper.py

This file was deleted.

6 changes: 3 additions & 3 deletions tests/models/test_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@

from llmfoundry import ComposerHFCausalLM
from llmfoundry.layers_registry import norms
from llmfoundry.models.hf.model_wrapper import HuggingFaceModelWithFSDP
from llmfoundry.models.hf import BaseHuggingFaceModel
from llmfoundry.models.layers import build_alibi_bias
from llmfoundry.models.layers.attention import (
check_alibi_support,
Expand Down Expand Up @@ -2560,7 +2560,7 @@ def test_hf_init(
False,
)

model = HuggingFaceModelWithFSDP(model, tokenizer)
model = BaseHuggingFaceModel(model, tokenizer)

batch = gen_random_batch(batch_size, test_cfg)

Expand Down Expand Up @@ -2609,7 +2609,7 @@ def test_head_dim_8_flash_mqa_attn(batch_size: int = 2):

mpt = MPTForCausalLM(hf_config)

model = HuggingFaceModelWithFSDP(mpt, tokenizer, shift_labels=True)
model = BaseHuggingFaceModel(mpt, tokenizer, shift_labels=True)

model = model.to(test_cfg.device)
batch = gen_random_batch(batch_size, test_cfg)
Expand Down

0 comments on commit 6858db9

Please sign in to comment.