From 5aac4bc2846ac7379c0a52dd894dd1ba5499ec22 Mon Sep 17 00:00:00 2001 From: "Gal Cohen (galco)" Date: Tue, 20 Aug 2024 19:41:48 +0300 Subject: [PATCH] fix: dont change quant storage dtype in case of fsdp (#1837) * fix: dont change quant storage dtype in case of fsdp * fix black --------- Co-authored-by: Gal Cohen --- src/axolotl/utils/models.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/axolotl/utils/models.py b/src/axolotl/utils/models.py index 5ac66260a7..3e8d50f5e7 100644 --- a/src/axolotl/utils/models.py +++ b/src/axolotl/utils/models.py @@ -544,7 +544,9 @@ def load_model( "bnb_4bit_quant_type": "nf4", "bnb_4bit_quant_storage": torch.bfloat16, } - if cfg.model_config_type in ["jamba", "qwen2_moe"] and not cfg.deepspeed: + if cfg.model_config_type in ["jamba", "qwen2_moe"] and not ( + cfg.deepspeed or cfg.fsdp + ): # for some reason, this causes the loss to be off by an order of magnitude # but deepspeed needs this still in bfloat16 bnb_config["bnb_4bit_quant_storage"] = torch.float32