Skip to content

Commit

Permalink
Remove validate_quantized_dora
Browse files Browse the repository at this point in the history
DoRA with quantized layers is supported with PEFT 0.10.0
  • Loading branch information
xzuyn authored Apr 5, 2024
1 parent bf4cd67 commit f44f028
Showing 1 changed file with 0 additions and 11 deletions.
11 changes: 0 additions & 11 deletions src/axolotl/utils/config/models/input/v0_4_1/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,17 +242,6 @@ def validate_qlora(self):
raise ValueError("Require cfg.load_in_4bit to be True for qlora")
return self

@model_validator(mode="before")
@classmethod
def validate_quantized_dora(cls, data):
if data.get("peft_use_dora") and (
data.get("load_in_8bit") or data.get("load_in_4bit")
):
raise ValueError(
"`peft_use_dora` is not currently compatible with quantized weights."
)
return data


class ReLoRAConfig(BaseModel):
"""ReLoRA configuration subset"""
Expand Down

0 comments on commit f44f028

Please sign in to comment.