Skip to content

Commit

Permalink
monkeypatch for zero3 w 8bit lora
Browse files Browse the repository at this point in the history
  • Loading branch information
winglian committed Nov 19, 2024
1 parent 543cec0 commit 97b4529
Show file tree
Hide file tree
Showing 2 changed files with 93 additions and 0 deletions.
84 changes: 84 additions & 0 deletions src/axolotl/monkeypatch/modeling_zero3_int8_lora.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
"""
fix for zero3 8-bit lora
see https://github.com/huggingface/transformers/pull/32943/files
"""
import inspect

import transformers
import transformers.modeling_utils
from accelerate.logging import get_logger

LOG = get_logger("axolotl.monkeypatch.modeling_zero3_int8_lora")

ORIGINAL_LOAD_CODE = """
if is_fsdp_enabled() or is_deepspeed_zero3_enabled():
module, tensor_name = get_module_from_name(model, param_name)
value = getattr(module, tensor_name)
param_to = "cpu"
if is_fsdp_enabled() and not is_local_dist_rank_0():
param_to = "meta"
value = type(value)(value.data.to(param_to), **value.__dict__)
setattr(module, tensor_name, value)
"""

PATCHED_LOAD_CODE = """
if is_fsdp_enabled() or is_deepspeed_zero3_enabled():
module, tensor_name = get_module_from_name(model, param_name)
value = getattr(module, tensor_name)
param_to = "cpu"
if is_fsdp_enabled() and not is_local_dist_rank_0():
param_to = "meta"
val_kwargs = {}
if hasattr(module, "weight") and module.weight.__class__.__name__ == "Int8Params":
val_kwargs["requires_grad"] = False
value = type(value)(value.data.to(param_to), **val_kwargs, **value.__dict__)
setattr(module, tensor_name, value)
"""


def get_modeling_state_dict_code() -> str:
load_code = inspect.getsource(
transformers.modeling_utils._load_state_dict_into_meta_model # pylint: disable=protected-access
)
return load_code


def check_modeling_state_dict_code_is_patchable() -> bool:
load_code = get_modeling_state_dict_code()
return ORIGINAL_LOAD_CODE in load_code


def patch_modeling_state_dict_code():
"""
monkeypatch for fixing the meta model loader for zero3 8-bit lora
"""

load_code = get_modeling_state_dict_code()
transformers.modeling_utils._original_load_state_dict_into_meta_model = ( # pylint: disable=protected-access
load_code
)
assert (
ORIGINAL_LOAD_CODE in load_code
), "Original _load_state_dict_into_meta_model code not found"

load_code = load_code.replace(ORIGINAL_LOAD_CODE, PATCHED_LOAD_CODE)
load_code = load_code.replace(
"def _load_state_dict_into_meta_model(",
"def _fixed_load_state_dict_into_meta_model(",
1,
)

items_to_import = []
for item in dir(transformers.modeling_utils):
if item in load_code:
items_to_import.append(item)

exec( # pylint: disable=exec-used # nosec B102
"from transformers.modeling_utils import ("
+ ", ".join(x for x in items_to_import)
+ ")",
globals(),
)
exec(load_code, globals()) # pylint: disable=exec-used # nosec B102
LOG.info("patching _load_state_dict_into_meta_model", main_process_only=True)
transformers.modeling_utils._load_state_dict_into_meta_model = _fixed_load_state_dict_into_meta_model # pylint: disable=protected-access,undefined-variable # noqa: F821
9 changes: 9 additions & 0 deletions src/axolotl/utils/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -437,6 +437,15 @@ def setup_deepspeed_env(cfg, stage=None):
os.environ["ACCELERATE_DEEPSPEED_ZERO_STAGE"] = str(stage)
if stage == 3:
os.environ["ACCELERATE_DEEPSPEED_ZERO3_INIT"] = "true"
if cfg.adapter and cfg.load_in_8bit:
from axolotl.monkeypatch.modeling_zero3_int8_lora import (
patch_modeling_state_dict_code,
)

try:
patch_modeling_state_dict_code()
except AssertionError:
LOG.warning("Failed to patch the meta model loading code")
# If we don't assign this, it doesn't actually get set in the accelerate weakref
_ = HfTrainerDeepSpeedConfig(cfg.deepspeed)

Expand Down

0 comments on commit 97b4529

Please sign in to comment.