Skip to content

Commit

Permalink
fix vlm running without image
Browse files Browse the repository at this point in the history
  • Loading branch information
eaidova committed Jan 20, 2025
1 parent 17d4ef7 commit b51b487
Show file tree
Hide file tree
Showing 3 changed files with 15 additions and 7 deletions.
4 changes: 2 additions & 2 deletions tools/llm_bench/llm_bench_utils/model_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,10 @@ def get_param_from_file(args, input_key):
if args["use_case"] != "vlm":
raise RuntimeError("Multiple sources for benchmarking supported only for Visual Language Models")
data_dict = {}
if args["media"] is None and args["image"] is None:
if args["media"] is None and args["images"] is None:
log.warn("Input image is not provided. Only text generation part will be evaluated")
else:
data_dict["media"] = args["media"] if args["media"] is not None else args["image"]
data_dict["media"] = args["media"] if args["media"] is not None else args["images"]
if args["prompt"] is None:
data_dict["prompt"] = "What is OpenVINO?" if data_dict["media"] is None else "Describe image"
else:
Expand Down
11 changes: 8 additions & 3 deletions tools/llm_bench/task/text_generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,15 +209,17 @@ def run_text_generation_genai(input_text, num, model, tokenizer, args, iter_data

enable_prompt_permutations = not args.get("disable_prompt_permutation", False)
if enable_prompt_permutations:
log.warning("Enabled input prompt permutations. It means that generation results can be vary on different steps. If it does not expected please specify --disable_prompr_permutation in your benchmarking command to disable this behaviour")
log.warning(
"Enabled input prompt permutations. It means that generation results can be vary on different steps. "
"If it does not expected please specify --disable_prompr_permutation in your benchmarking command to disable this behavior"
)
from openvino_genai import TokenizedInputs
import openvino as ov

input_ids = input_data.input_ids.data
input_ids[:, 0] = num + 1
attention_mask = input_data.attention_mask
input_data = TokenizedInputs(input_ids=ov.Tensor(input_ids), attention_mask=attention_mask)

num_input_tokens = input_data.input_ids.shape[1]
if args['batch_size'] > 1:
out_str = '[warm-up]' if num == 0 else '[{}]'.format(num)
Expand Down Expand Up @@ -379,7 +381,10 @@ def run_text_generation_genai_with_stream(input_text, num, model, tokenizer, arg
gen_config.do_sample = False
enable_prompt_permutations = not args.get("disable_prompt_permutation", False)
if enable_prompt_permutations:
log.warning("Enabled input prompt permutations. It means that generation results can be vary on different steps. If it does not expected please specify --disable_prompr_permutation in your benchmarking command to disable this behaviour")
log.warning(
"Enabled input prompt permutations. It means that generation results can be vary on different steps. "
"If it does not expected please specify --disable_prompr_permutation in your benchmarking command to disable this behavior"
)
from openvino_genai import TokenizedInputs
import openvino as ov

Expand Down
7 changes: 5 additions & 2 deletions tools/llm_bench/task/visual_language_generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def run_visual_language_generation_optimum(
for bs_index, in_text in enumerate(prompts):
llm_bench_utils.output_file.output_input_text(in_text, args, model_precision, prompt_index, bs_index, proc_id)
tok_encode_start = time.perf_counter()
input_data = model.preprocess_inputs(text=prompts[0], image=images[0], **processor)
input_data = model.preprocess_inputs(text=prompts[0], image=images[0] if images else None, **processor)
tok_encode_end = time.perf_counter()
tok_encode_time = (tok_encode_end - tok_encode_start) * 1000
# Remove `token_type_ids` from inputs
Expand Down Expand Up @@ -211,8 +211,11 @@ def run_visual_language_generation_genai(
gen_config.max_new_tokens = max_gen_tokens
gen_config.num_beams = args["num_beams"]
gen_config.do_sample = False
kwargs = {}
if len(images) >= 1:
kwargs["images"] = images[0]
start = time.perf_counter()
generation_result = model.generate(prompts[0], images=images[0], generation_config=gen_config)
generation_result = model.generate(prompts[0], generation_config=gen_config)
end = time.perf_counter()
generated_text = generation_result.texts
perf_metrics = generation_result.perf_metrics
Expand Down

0 comments on commit b51b487

Please sign in to comment.