Skip to content

Commit

Permalink
Use tiny-random-minicpmv-2_6 (#1000)
Browse files Browse the repository at this point in the history
Co-authored-by: Andrei Kochin <[email protected]>
Co-authored-by: Ilya Lavrenov <[email protected]>
  • Loading branch information
3 people authored Oct 21, 2024
1 parent 5dd3eae commit a5f0d4c
Showing 1 changed file with 7 additions and 7 deletions.
14 changes: 7 additions & 7 deletions tests/python_tests/test_vlm_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,18 +9,18 @@
from openvino_genai import VLMPipeline
from common import get_greedy, get_image_by_link, get_beam_search, get_greedy, get_multinomial_all_parameters

def get_ov_model(model_dir):
def get_ov_model(cache):
model_dir = cache.mkdir("tiny-random-minicpmv-2_6")
if (model_dir / "openvino_language_model.xml").exists():
return model_dir
model_id = "openbmb/MiniCPM-V-2_6"
model_id = "katuni4ka/tiny-random-minicpmv-2_6"
processor = transformers.AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
processor.tokenizer.save_pretrained(model_dir)
ov_tokenizer, ov_detokenizer = openvino_tokenizers.convert_tokenizer(processor.tokenizer, with_detokenizer=True)
openvino.save_model(ov_tokenizer, model_dir / "openvino_tokenizer.xml")
openvino.save_model(ov_detokenizer, model_dir / "openvino_detokenizer.xml")
model = OVModelForVisualCausalLM.from_pretrained(model_id, compile=False, device="CPU", export=True, trust_remote_code=True)
model.config.save_pretrained(model_dir)
model.generation_config.save_pretrained(model_dir)
processor.save_pretrained(model_dir)
model.save_pretrained(model_dir)
return model_dir

Expand Down Expand Up @@ -48,7 +48,7 @@ def test_vlm_pipeline(cache):
def streamer(word: str) -> bool:
return False

model_path = get_ov_model(cache.mkdir("MiniCPM-V-2_6"))
model_path = get_ov_model(cache)

for links in image_links_for_testing:
images = []
Expand All @@ -69,7 +69,7 @@ def streamer(word: str) -> bool:
@pytest.mark.precommit
@pytest.mark.nightly
def test_vlm_get_tokenizer(cache):
model_path = get_ov_model(cache.mkdir("MiniCPM-V-2_6"))
model_path = get_ov_model(cache)
pipe = VLMPipeline(str(model_path), "CPU")
tokenizer = pipe.get_tokenizer()
tokenizer.encode("")
Expand All @@ -83,7 +83,7 @@ def test_vlm_get_tokenizer(cache):
])
@pytest.mark.skip("Enable after sampler are enabled")
def test_sampling(config, cache):
model_path = get_ov_model(cache.mkdir("MiniCPM-V-2_6"))
model_path = get_ov_model(cache)
image = get_image_by_link(image_links[0])
pipe = VLMPipeline(str(model_path), "CPU")
pipe.generate(prompts[0], image=image, generation_config=config)

0 comments on commit a5f0d4c

Please sign in to comment.