From b51b48713bbde09b0233c8caf1b6c9f506d29921 Mon Sep 17 00:00:00 2001 From: eaidova Date: Mon, 20 Jan 2025 22:07:59 +0400 Subject: [PATCH] fix vlm running without image --- tools/llm_bench/llm_bench_utils/model_utils.py | 4 ++-- tools/llm_bench/task/text_generation.py | 11 ++++++++--- tools/llm_bench/task/visual_language_generation.py | 7 +++++-- 3 files changed, 15 insertions(+), 7 deletions(-) diff --git a/tools/llm_bench/llm_bench_utils/model_utils.py b/tools/llm_bench/llm_bench_utils/model_utils.py index 585d4a557c..51d77d3215 100644 --- a/tools/llm_bench/llm_bench_utils/model_utils.py +++ b/tools/llm_bench/llm_bench_utils/model_utils.py @@ -37,10 +37,10 @@ def get_param_from_file(args, input_key): if args["use_case"] != "vlm": raise RuntimeError("Multiple sources for benchmarking supported only for Visual Language Models") data_dict = {} - if args["media"] is None and args["image"] is None: + if args["media"] is None and args["images"] is None: log.warn("Input image is not provided. Only text generation part will be evaluated") else: - data_dict["media"] = args["media"] if args["media"] is not None else args["image"] + data_dict["media"] = args["media"] if args["media"] is not None else args["images"] if args["prompt"] is None: data_dict["prompt"] = "What is OpenVINO?" if data_dict["media"] is None else "Describe image" else: diff --git a/tools/llm_bench/task/text_generation.py b/tools/llm_bench/task/text_generation.py index 872fdfa2c2..f308ced75a 100644 --- a/tools/llm_bench/task/text_generation.py +++ b/tools/llm_bench/task/text_generation.py @@ -209,7 +209,10 @@ def run_text_generation_genai(input_text, num, model, tokenizer, args, iter_data enable_prompt_permutations = not args.get("disable_prompt_permutation", False) if enable_prompt_permutations: - log.warning("Enabled input prompt permutations. It means that generation results can be vary on different steps. If it does not expected please specify --disable_prompr_permutation in your benchmarking command to disable this behaviour") + log.warning( + "Enabled input prompt permutations. It means that generation results can be vary on different steps. " + "If it does not expected please specify --disable_prompr_permutation in your benchmarking command to disable this behavior" + ) from openvino_genai import TokenizedInputs import openvino as ov @@ -217,7 +220,6 @@ def run_text_generation_genai(input_text, num, model, tokenizer, args, iter_data input_ids[:, 0] = num + 1 attention_mask = input_data.attention_mask input_data = TokenizedInputs(input_ids=ov.Tensor(input_ids), attention_mask=attention_mask) - num_input_tokens = input_data.input_ids.shape[1] if args['batch_size'] > 1: out_str = '[warm-up]' if num == 0 else '[{}]'.format(num) @@ -379,7 +381,10 @@ def run_text_generation_genai_with_stream(input_text, num, model, tokenizer, arg gen_config.do_sample = False enable_prompt_permutations = not args.get("disable_prompt_permutation", False) if enable_prompt_permutations: - log.warning("Enabled input prompt permutations. It means that generation results can be vary on different steps. If it does not expected please specify --disable_prompr_permutation in your benchmarking command to disable this behaviour") + log.warning( + "Enabled input prompt permutations. It means that generation results can be vary on different steps. " + "If it does not expected please specify --disable_prompr_permutation in your benchmarking command to disable this behavior" + ) from openvino_genai import TokenizedInputs import openvino as ov diff --git a/tools/llm_bench/task/visual_language_generation.py b/tools/llm_bench/task/visual_language_generation.py index a5fb0ecc0c..4eb76bef99 100644 --- a/tools/llm_bench/task/visual_language_generation.py +++ b/tools/llm_bench/task/visual_language_generation.py @@ -44,7 +44,7 @@ def run_visual_language_generation_optimum( for bs_index, in_text in enumerate(prompts): llm_bench_utils.output_file.output_input_text(in_text, args, model_precision, prompt_index, bs_index, proc_id) tok_encode_start = time.perf_counter() - input_data = model.preprocess_inputs(text=prompts[0], image=images[0], **processor) + input_data = model.preprocess_inputs(text=prompts[0], image=images[0] if images else None, **processor) tok_encode_end = time.perf_counter() tok_encode_time = (tok_encode_end - tok_encode_start) * 1000 # Remove `token_type_ids` from inputs @@ -211,8 +211,11 @@ def run_visual_language_generation_genai( gen_config.max_new_tokens = max_gen_tokens gen_config.num_beams = args["num_beams"] gen_config.do_sample = False + kwargs = {} + if len(images) >= 1: + kwargs["images"] = images[0] start = time.perf_counter() - generation_result = model.generate(prompts[0], images=images[0], generation_config=gen_config) + generation_result = model.generate(prompts[0], generation_config=gen_config) end = time.perf_counter() generated_text = generation_result.texts perf_metrics = generation_result.perf_metrics