Skip to content

Commit

Permalink
[Bugfix] Fix LoRA with PP (vllm-project#7292)
Browse files Browse the repository at this point in the history
  • Loading branch information
andoorve authored Aug 8, 2024
1 parent 48abee9 commit 6dffa4b
Showing 1 changed file with 3 additions and 0 deletions.
3 changes: 3 additions & 0 deletions vllm/lora/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
from vllm.lora.utils import (from_layer, from_layer_logits_processor,
parse_fine_tuned_lora_name, replace_submodule)
from vllm.model_executor.models.interfaces import SupportsLoRA
from vllm.model_executor.models.utils import PPMissingLayer
from vllm.utils import is_pin_memory_available

logger = init_logger(__name__)
Expand Down Expand Up @@ -432,6 +433,8 @@ def remove_all_adapters(self):
def _create_lora_modules(self):
for module_name, module in self.model.named_modules(
remove_duplicate=False):
if isinstance(module, PPMissingLayer):
continue
if not self._match_target_modules(module_name):
continue
parts = module_name.split(".")[-1]
Expand Down

0 comments on commit 6dffa4b

Please sign in to comment.