-
Notifications
You must be signed in to change notification settings - Fork 37
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
support chatglm4v model and llava support img-txt txt calib data when…
… bs=1 (#222)
- Loading branch information
1 parent
7cd9b09
commit 7b5e918
Showing
4 changed files
with
94 additions
and
12 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,70 @@ | ||
from loguru import logger | ||
from PIL import Image | ||
from transformers import AutoConfig, AutoModelForCausalLM | ||
|
||
from llmc.utils.registry_factory import MODEL_REGISTRY | ||
|
||
from .chatglm import ChatGLM | ||
|
||
|
||
@MODEL_REGISTRY | ||
class GLM4V(ChatGLM): | ||
def __init__(self, config, device_map=None, use_cache=False): | ||
super().__init__(config, device_map, use_cache) | ||
|
||
def build_model(self): | ||
self.vlm_model_config = AutoConfig.from_pretrained( | ||
self.model_path, trust_remote_code=True | ||
) | ||
if not self.use_cache: | ||
self.vlm_model_config.use_cache = False | ||
logger.info(f'self.vlm_model_config : {self.vlm_model_config}') | ||
self.vlm_model = AutoModelForCausalLM.from_pretrained( | ||
self.model_path, | ||
config=self.vlm_model_config, | ||
torch_dtype=self.torch_dtype, | ||
low_cpu_mem_usage=True, | ||
trust_remote_code=True, | ||
) | ||
self.vision_model = self.vlm_model.transformer.vision | ||
self.projector = self.vlm_model.transformer.vision.linear_proj | ||
self.model = self.vlm_model | ||
self.model_config = self.vlm_model_config | ||
|
||
def batch_process(self, img_qas, calib_or_eval='eval'): | ||
assert calib_or_eval == 'calib' or calib_or_eval == 'eval' | ||
messages = [] | ||
answers = [] | ||
for idx in range(len(img_qas)): | ||
img_path = img_qas[idx]['img'] | ||
if img_path is not None: | ||
image = Image.open(img_path).convert('RGB') | ||
message = [ | ||
{ | ||
'role': 'user', | ||
'image': image, | ||
'content': img_qas[idx]['question'], | ||
} | ||
] | ||
else: | ||
message = [{'role': 'user', 'content': img_qas[idx]['question']}] | ||
messages.append(message) | ||
answers.append(img_qas[idx]['answer']) | ||
inputs = self.tokenizer.apply_chat_template( | ||
messages, | ||
add_generation_prompt=True, | ||
tokenize=True, | ||
return_tensors='pt', | ||
return_dict=True, | ||
padding=True, | ||
) | ||
if calib_or_eval == 'calib' and self.config['calib'].get('add_answer', False): | ||
raise Exception( | ||
'glm4v not support add_answer. ' | ||
'Maybe you can modify tokenization_chatglm.py in model path.' | ||
) | ||
if calib_or_eval == 'calib': | ||
logger.info(f'Calib data is:\n{inputs}') | ||
|
||
inputs = inputs.to(next(self.vlm_model.parameters()).dtype) | ||
return inputs |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -28,3 +28,4 @@ more_itertools | |
qtorch | ||
einops | ||
qwen-vl-utils | ||
tiktoken |