Skip to content

Commit

Permalink
Used optimum-cli for miniCPM conversion.
Browse files Browse the repository at this point in the history
  • Loading branch information
popovaan committed Oct 11, 2024
1 parent 0a72aee commit 3afe73a
Show file tree
Hide file tree
Showing 5 changed files with 15 additions and 1,027 deletions.
4 changes: 2 additions & 2 deletions samples/python/visual_language_chat/visual_language_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,15 +55,15 @@ def main():

pipe.start_chat()
prompt = input('question:\n')
pipe(prompt, image=image, generation_config=config, streamer=streamer)
pipe.generate(prompt, image=image, generation_config=config, streamer=streamer)
print('\n----------')

while True:
try:
prompt = input('question:\n')
except EOFError:
break
pipe(prompt, generation_config=config, streamer=streamer)
pipe.generate(prompt, generation_config=config, streamer=streamer)
print('\n----------')
pipe.finish_chat()

Expand Down
29 changes: 1 addition & 28 deletions src/python/py_vlm_pipeline.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,7 @@ void init_vlm_pipeline(py::module_& m) {
.def("start_chat", &ov::genai::VLMPipeline::start_chat, py::arg("system_message") = "")
.def("finish_chat", &ov::genai::VLMPipeline::finish_chat)
.def("get_generation_config", &ov::genai::VLMPipeline::get_generation_config)
.def("set_generation_config", &ov::genai::VLMPipeline::set_generation_config)
.def(
"generate",
[](ov::genai::VLMPipeline& pipe,
Expand Down Expand Up @@ -148,33 +149,5 @@ void init_vlm_pipeline(py::module_& m) {
},
py::arg("prompt"), "Input string",
(vlm_generate_kwargs_docstring + std::string(" \n ")).c_str()
)
.def(
"__call__",
[](ov::genai::VLMPipeline& pipe,
const std::string& prompt,
const std::vector<ov::Tensor>& images,
const ov::genai::GenerationConfig& generation_config,
const utils::PyBindStreamerVariant& streamer,
const py::kwargs& kwargs
) {
return call_vlm_generate(pipe, prompt, images, generation_config, streamer, kwargs);
},
py::arg("prompt"), "Input string",
py::arg("images"), "Input images",
py::arg("generation_config") = std::nullopt, "generation_config",
py::arg("streamer") = std::monostate(), "streamer",
(vlm_generate_docstring + std::string(" \n ")).c_str()
)
.def(
"__call__",
[](ov::genai::VLMPipeline& pipe,
const std::string& prompt,
const py::kwargs& kwargs
) {
return call_vlm_generate(pipe, prompt, kwargs);
},
py::arg("prompt"), "Input string",
(vlm_generate_kwargs_docstring + std::string(" \n ")).c_str()
);
}
Loading

0 comments on commit 3afe73a

Please sign in to comment.