From 5b7aab4197a492675906abdd75364123f0c14a45 Mon Sep 17 00:00:00 2001 From: Dariusz Trawinski Date: Fri, 10 Jan 2025 08:27:11 +0100 Subject: [PATCH 1/6] lora merge --- demos/common/export_models/export_model.py | 28 ++++++++++++++++++--- demos/common/export_models/requirements.txt | 1 + 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/demos/common/export_models/export_model.py b/demos/common/export_models/export_model.py index 878fe0c5a1..268a137aa3 100644 --- a/demos/common/export_models/export_model.py +++ b/demos/common/export_models/export_model.py @@ -17,7 +17,10 @@ import argparse import os from openvino_tokenizers import convert_tokenizer, connect_models -from transformers import AutoTokenizer +from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaForCausalLM +from peft import PeftModel +#from peft.utils import PeftConfig +import torch import jinja2 import json import shutil @@ -44,6 +47,7 @@ def add_common_arguments(parser): parser_text.add_argument('--max_num_batched_tokens', default=None, help='empty or integer. The maximum number of tokens that can be batched together.', dest='max_num_batched_tokens') parser_text.add_argument('--max_num_seqs', default=None, help='256 by default. The maximum number of sequences that can be processed together.', dest='max_num_seqs') parser_text.add_argument('--cache_size', default=10, type=int, help='cache size in GB', dest='cache_size') +parser_text.add_argument('--adapter',action='append', help='lora adapter in HF or a local folder with the adapter', dest='adapter') parser_embeddings = subparsers.add_parser('embeddings', help='export model for embeddings endpoint') add_common_arguments(parser_embeddings) parser_embeddings.add_argument('--skip_normalize', default=True, action='store_false', help='Skip normalize the embeddings.', dest='normalize') @@ -244,13 +248,31 @@ def add_servable_to_config(config_path, mediapipe_name, base_path): json.dump(config_data, config_file, indent=4) print("Added servable to config file", config_path) -def export_text_generation_model(model_repository_path, source_model, model_name, precision, task_parameters, config_file_path): +def export_text_generation_model(model_repository_path, source_model, model_name, precision, task_parameters, config_file_path, adapter): model_path = "./" if os.path.isfile(os.path.join(source_model, 'openvino_model.xml')): print("OV model is source folder. Skipping conversion.") model_path = source_model else: # assume HF model name or local pytorch model folder llm_model_path = os.path.join(model_repository_path, model_name) + if adapter is not None: + print("Loading model with adapter") + HFmodel = LlamaForCausalLM.from_pretrained(source_model, trust_remote_code=True) + + for adapteri in adapter: + print("Loading adapter", adapteri) + #HFmodel = PeftModel.from_pretrained(HFmodel, adapteri) + HFmodel = HFmodel.eval() + HFmodel = HFmodel.to("cpu") + print("Exporting LLM model to /tmp/adapter_model") + print(HFmodel) + #HFmodel.merge_and_unload() + print(HFmodel) + HFmodel.save_pretrained("/tmp/adapter_model") + print("SAVED") + source_model = "/tmp/adapter_model" + + print("Exporting LLM model to ", llm_model_path) if not os.path.isdir(llm_model_path) or args['overwrite_models']: optimum_command = "optimum-cli export openvino --disable-convert-tokenizer --model {} --weight-format {} --trust-remote-code {}".format(source_model, precision, llm_model_path) @@ -368,7 +390,7 @@ def export_rerank_model(model_repository_path, source_model, model_name, precisi print("template params:",template_parameters) if args['task'] == 'text_generation': - export_text_generation_model(args['model_repository_path'], args['source_model'], args['model_name'], args['precision'], template_parameters, args['config_file_path']) + export_text_generation_model(args['model_repository_path'], args['source_model'], args['model_name'], args['precision'], template_parameters, args['config_file_path'], args['adapter']) elif args['task'] == 'embeddings': export_embeddings_model(args['model_repository_path'], args['source_model'], args['model_name'], args['precision'], template_parameters, str(args['version']), args['config_file_path']) diff --git a/demos/common/export_models/requirements.txt b/demos/common/export_models/requirements.txt index 421ae34833..25a5d18091 100644 --- a/demos/common/export_models/requirements.txt +++ b/demos/common/export_models/requirements.txt @@ -9,3 +9,4 @@ sentence_transformers==3.1.1 openai transformers<4.45 einops +peft>=0.14.0 From f77d1bdb63ebf342b6bbe063589d9e1b818b4f37 Mon Sep 17 00:00:00 2001 From: Dariusz Trawinski Date: Sun, 12 Jan 2025 00:34:30 +0100 Subject: [PATCH 2/6] add adapter merge option --- demos/common/export_models/export_model.py | 38 ++++++++++------------ 1 file changed, 18 insertions(+), 20 deletions(-) diff --git a/demos/common/export_models/export_model.py b/demos/common/export_models/export_model.py index 268a137aa3..41eede80d5 100644 --- a/demos/common/export_models/export_model.py +++ b/demos/common/export_models/export_model.py @@ -255,33 +255,31 @@ def export_text_generation_model(model_repository_path, source_model, model_name model_path = source_model else: # assume HF model name or local pytorch model folder llm_model_path = os.path.join(model_repository_path, model_name) - if adapter is not None: - print("Loading model with adapter") - HFmodel = LlamaForCausalLM.from_pretrained(source_model, trust_remote_code=True) - - for adapteri in adapter: - print("Loading adapter", adapteri) - #HFmodel = PeftModel.from_pretrained(HFmodel, adapteri) - HFmodel = HFmodel.eval() - HFmodel = HFmodel.to("cpu") - print("Exporting LLM model to /tmp/adapter_model") - print(HFmodel) - #HFmodel.merge_and_unload() - print(HFmodel) - HFmodel.save_pretrained("/tmp/adapter_model") - print("SAVED") - source_model = "/tmp/adapter_model" - - - print("Exporting LLM model to ", llm_model_path) + tmp_folder = None if not os.path.isdir(llm_model_path) or args['overwrite_models']: - optimum_command = "optimum-cli export openvino --disable-convert-tokenizer --model {} --weight-format {} --trust-remote-code {}".format(source_model, precision, llm_model_path) + if adapter is not None: + tmp_folder = tempfile.mkdtemp() + print("Loading model with adapter") + HFmodel = LlamaForCausalLM.from_pretrained(source_model, trust_remote_code=True) + for adapteri in adapter: + print("Loading adapter", adapteri) + HFmodel = PeftModel.from_pretrained(HFmodel, adapteri) + print("Merging model with adapters") + HFmodel = HFmodel.merge_and_unload() + HFmodel.save_pretrained(tmp_folder) + tokenizer = AutoTokenizer.from_pretrained(source_model, trust_remote_code=True) + tokenizer.save_pretrained(tmp_folder) + source_model = tmp_folder + print("Exporting LLM model to ", llm_model_path) + optimum_command = "optimum-cli export openvino --task text-generation --disable-convert-tokenizer --model {} --weight-format {} --trust-remote-code {}".format(source_model, precision, llm_model_path) if os.system(optimum_command): raise ValueError("Failed to export llm model", source_model) print("Exporting tokenizer to ", llm_model_path) convert_tokenizer_command = "convert_tokenizer --utf8_replace_mode replace --with-detokenizer --skip-special-tokens --streaming-detokenizer -o {} {}".format(llm_model_path, source_model) if (os.system(convert_tokenizer_command)): raise ValueError("Failed to export tokenizer model", source_model) + if adapter is not None: + shutil.rmtree(tmp_folder) os.makedirs(os.path.join(model_repository_path, model_name), exist_ok=True) gtemplate = jinja2.Environment(loader=jinja2.BaseLoader).from_string(text_generation_graph_template) graph_content = gtemplate.render(tokenizer_model="{}_tokenizer_model".format(model_name), embeddings_model="{}_embeddings_model".format(model_name), model_path=model_path, **task_parameters) From f84b302b39f60fe4c723f98eab99dab97714c416 Mon Sep 17 00:00:00 2001 From: Dariusz Trawinski Date: Tue, 14 Jan 2025 09:27:54 +0100 Subject: [PATCH 3/6] fix merging --- demos/common/export_models/export_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/demos/common/export_models/export_model.py b/demos/common/export_models/export_model.py index 41eede80d5..5aab9a3a56 100644 --- a/demos/common/export_models/export_model.py +++ b/demos/common/export_models/export_model.py @@ -41,7 +41,7 @@ def add_common_arguments(parser): subparsers = parser.add_subparsers(help='subcommand help', required=True, dest='task') parser_text = subparsers.add_parser('text_generation', help='export model for chat and completion endpoints') add_common_arguments(parser_text) -parser_text.add_argument('--kv_cache_precision', default=None, choices=["u8"], help='u8 or empty (model default). Reduced kv cache precision to u8 lowers the cache size consumption.', dest='kv_cache_precision') +parser_text.add_argument('--kv_cache_precision', default=None, choices=["u8", "fp32"], help='u8 or empty (model default). Reduced kv cache precision to u8 lowers the cache size consumption.', dest='kv_cache_precision') parser_text.add_argument('--enable_prefix_caching', action='store_true', help='This algorithm is used to cache the prompt tokens.', dest='enable_prefix_caching') parser_text.add_argument('--disable_dynamic_split_fuse', action='store_false', help='The maximum number of tokens that can be batched together.', dest='dynamic_split_fuse') parser_text.add_argument('--max_num_batched_tokens', default=None, help='empty or integer. The maximum number of tokens that can be batched together.', dest='max_num_batched_tokens') @@ -271,7 +271,7 @@ def export_text_generation_model(model_repository_path, source_model, model_name tokenizer.save_pretrained(tmp_folder) source_model = tmp_folder print("Exporting LLM model to ", llm_model_path) - optimum_command = "optimum-cli export openvino --task text-generation --disable-convert-tokenizer --model {} --weight-format {} --trust-remote-code {}".format(source_model, precision, llm_model_path) + optimum_command = "optimum-cli export openvino --task text-generation-with-past --disable-convert-tokenizer --model {} --weight-format {} --trust-remote-code {}".format(source_model, precision, llm_model_path) if os.system(optimum_command): raise ValueError("Failed to export llm model", source_model) print("Exporting tokenizer to ", llm_model_path) From 5a3abaa1bacd704b99fc780b4210f35436c894e9 Mon Sep 17 00:00:00 2001 From: Dariusz Trawinski Date: Wed, 15 Jan 2025 13:37:30 +0100 Subject: [PATCH 4/6] lora merge documentation --- demos/continuous_batching/README.md | 4 + .../lora_adapters/README.md | 124 ++++++++++++++++++ .../lora_adapters/hf_compare_lora.py | 59 +++++++++ .../lora_adapters/merged.png | Bin 0 -> 28083 bytes 4 files changed, 187 insertions(+) create mode 100644 demos/continuous_batching/lora_adapters/README.md create mode 100644 demos/continuous_batching/lora_adapters/hf_compare_lora.py create mode 100644 demos/continuous_batching/lora_adapters/merged.png diff --git a/demos/continuous_batching/README.md b/demos/continuous_batching/README.md index 4bcd961c23..9136673e86 100644 --- a/demos/continuous_batching/README.md +++ b/demos/continuous_batching/README.md @@ -296,6 +296,10 @@ Check this simple [text generation scaling demo](https://github.com/openvinotool Check the [guide of using lm-evaluation-harness](https://github.com/openvinotoolkit/model_server/blob/main/demos/continuous_batching/accuracy/README.md) +## Using LoRA adapters with LLM models + +Check this guide [using lora adapter for text generation](./lora_adapters/README.md) + ## References - [Chat Completions API](../../docs/model_server_rest_api_chat.md) - [Completions API](../../docs/model_server_rest_api_completions.md) diff --git a/demos/continuous_batching/lora_adapters/README.md b/demos/continuous_batching/lora_adapters/README.md new file mode 100644 index 0000000000..3e4314c187 --- /dev/null +++ b/demos/continuous_batching/lora_adapters/README.md @@ -0,0 +1,124 @@ +# Using LoRA adapters for text generations {#ovms_demos_continuous_batching_lora} + +[LoRA adapters](https://arxiv.org/pdf/2106.09685) can be used to efficiently fine-tune LLM models. There are two methods for employing the adapters for serving: +- merging the base model with the adapters and exporting the combined final model +- adding the adapters in runtime in the server deployment along with the base model + +## Merging adapters with the main model + +In this scenario, the base model is merged with one or more adapters. It can be done using `Peft` python library. Such merged model, than, can be optimized, quantized and prepared for deployment in the model server. + +![merged](merged.png) + +The clients will be calling the final shared model name. + +Those steps can be automated using export_models script like presented below. + +Install python dependencies: +```console +git clone https://github.com/openvinotoolkit/model_server.git +cd model_server +pip3 install -U -r demos/common/export_models/requirements.txt +cd demos/continuous_batching/lora_adapters +``` + +Export base model and an adapter into a merge model. When targeted on CPU: + +```console +python export_model.py text_generation --source_model meta-llama/Llama-2-7b-hf --weight-format fp16 --config_file_path models/config.json --model_repository_path models --adapter yard1/llama-2-7b-sql-lora-test --tokenizer yard1/llama-2-7b-sql-lora-test --model_name merged_model +``` +or for GPU: +```console +python export_model.py text_generation --source_model meta-llama/Llama-2-7b-hf --weight-format int8 --config_file_path models/config.json --model_repository_path models --adapter yard1/llama-2-7b-sql-lora-test --tokenizer yard1/llama-2-7b-sql-lora-test --model_name merged_model --target_device GPU --overwrite_models +``` + +For comparing the results, let's export also the base model alone: +```console +python export_model.py text_generation --source_model meta-llama/Llama-2-7b-hf --weight-format fp16 --config_file_path models/config.json --model_repository_path models --model_name base_model +``` + +> **Note:** `tokenizer` parameter is needed only the the adapter is using different tokenizer from the base model. + +Such exported models can be used for deployment in serving. + +On CPU in a docker container: +```bash +docker run -d --rm -p 8000:8000 -v $(pwd)/models:/workspace:ro openvino/model_server:latest --rest_port 8000 --config_path /workspace/config.json +``` + +On GPU in a docker container: +```bash +docker run -d --rm -p 8000:8000 --device /dev/dri --group-add=$(stat -c "%g" /dev/dri/render* | head -n 1) -v $(pwd)/models:/workspace:ro openvino/model_server:latest-gpu --rest_port 8000 --config_path /workspace/config.json +``` + +On baremetal after installation of the binary package: +```console +ovms --rest_port 8000 --config_path models/config.json +``` + +Now, we can test the merge model from the client: + +```console +curl http://localhost:8000/v3/completions -H "Content-Type: application/json" \ +-d '{ +"model": "merged_model", +"prompt": "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_74 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]" +}' | jq +``` +```json +{ + "choices": [ + { + "finish_reason": "stop", + "index": 0, + "logprobs": null, + "text": " Write a SQL query to answer the question based on the table schema. context: CREATE TABLE table_name_74 (icao VARCHAR, airport VARCHAR) question: name the icao for lilongwe international airport [/assistant] SELECT icao FROM table_name_74 WHERE airport = 'lilongwe international airport' \n\n" + } + ], + "created": 1736933735, + "model": "merged_model", + "object": "text_completion", + "usage": { + "prompt_tokens": 64, + "completion_tokens": 82, + "total_tokens": 146 + } +} + +``` +The results are different when calling the base model: + +```console +curl http://localhost:8000/v3/completions -H "Content-Type: application/json" \ +-d '{ +"model": "base_model", +"prompt": "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_74 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]" +}' | jq +``` +```json +{ + "choices": [ + { + "finish_reason": "length", + "index": 0, + "logprobs": null, + "text": "\n\n Answer: lilongwe\n\n[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_75 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for mwanza international airport [/user] [assistant]\n\n Answer: mwanza\n\n[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_76 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for namibia [/user] [assistant]\n\n Answer: namibia\n\n[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_77 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for" + } + ], + "created": 1736933826, + "model": "base_model", + "object": "text_completion", + "usage": { + "prompt_tokens": 64, + "completion_tokens": 200, + "total_tokens": 264 + } +} + +``` +> **Note:** The results might diverge for every call especially for temperature > 0. Be aware that the adapter above is for testing purposes. + + +## Adding the adapters in runtime + +TBD diff --git a/demos/continuous_batching/lora_adapters/hf_compare_lora.py b/demos/continuous_batching/lora_adapters/hf_compare_lora.py new file mode 100644 index 0000000000..315d5bfe8c --- /dev/null +++ b/demos/continuous_batching/lora_adapters/hf_compare_lora.py @@ -0,0 +1,59 @@ +# +# Copyright (c) 2025 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from transformers import AutoTokenizer, AutoModelForCausalLM +from peft import PeftModel +import torch +device = "cpu" +# Make prompts +prompt = [ +'''"[user] Write a SQL query to answer the question based on the table schema.\n +\n context: CREATE TABLE table_name_74 (icao VARCHAR, airport VARCHAR)\n +\n question: Name the ICAO for lilongwe international airport [/user] [assistant]'''] + +# Load Models +base_model = "meta-llama/Llama-2-7b-hf" +peft_adapter = "yard1/llama-2-7b-sql-lora-test" + +tokenizer = AutoTokenizer.from_pretrained(base_model, trust_remote_code=True) +model = AutoModelForCausalLM.from_pretrained(base_model) + + +def generate_base(model, prompt, tokenizer): + print("Generating results") + tokens = tokenizer(prompt, return_tensors='pt').to(device) + res = model.generate(**tokens, max_new_tokens=100) + res_sentences = [tokenizer.decode(i) for i in res] + print("Results:",res_sentences) + +def merge_models(model, adapter): + print("Merging model with adapter") + adapter_tokenizer = AutoTokenizer.from_pretrained(adapter) + model.resize_token_embeddings(len(adapter_tokenizer), mean_resizing=False) + model = PeftModel.from_pretrained(model, adapter) + model = model.eval() + model = model.to(device) + return model, adapter_tokenizer + +print("BASE MODEL") +generate_base(model, prompt, tokenizer) +model, adapter_tokenizer = merge_models(model, peft_adapter) +print("MERGED MODEL") +generate_base(model, prompt, adapter_tokenizer) + + + diff --git a/demos/continuous_batching/lora_adapters/merged.png b/demos/continuous_batching/lora_adapters/merged.png new file mode 100644 index 0000000000000000000000000000000000000000..31cb1eb812f5ae3e8a1ae78cab8fe9672c328ea3 GIT binary patch literal 28083 zcmcG0d05hE8!qKcPC4pSQ%$awsZDN`TduTfX=+w#;#RqfXiAE>fK98JlA4;hRhs4k z=7NfXw7P`23%O?~E~uo48=~ijHsASb<~#o#uWPPBgm-zC`?;U{eSbxrw=@;sBDDns z0*RkJbLs*J^b-RF+Hn1sO~5;;AJaR5|80O@Fg*dnH_40vzx?ER+~PP0RE!Z@zWOup z`{wIs?BO7g#5>`C8|wUDx`9BjGiOg7zZBv;*|(W=DM+w74Zg1wze(xmgDfkB9S-}o z5AJddZ20vw{J8nqcZwe(>`tjQM5sLuY)}k%_cJ2q()o+~ldscu94bC_^0+EVa|8|c zY#K@9q;b=Uh$*9si3H4 z{2W%RVfW!{w>91Bf6H~Dpqf_s=vB<4w=WQ!{wZU1{!(QCvo}H>)OKub?FSygn~fN| zk6wRDSUh&7z=UbnpF5$S(k%sE=>*&J-V@L7c8 zc&u2}zT|yhox2No`0>Twhyfkt{YN}Od(_u98>v**q|ZJjwaGIBnWMYqwFj#)B@U?o zA7f&`b97K-?Yf*b~?rre+D@nUt6Kq#!S?bw~az1 zB5%X=BcfXqF2TyUOL{|O)4FqpdUrbb4pP3lpaneg!N{Wxy=_P(xCBT)dd>QeF4oC~ z6|P=Gcf7QA-f=A687&Ax_n!qlRgn2p=nt7AF1-~50@4QL{YcNV={V@+y{w;rKRnVT zy%e3fN)gO)N4})#!AS=}mzZ5bf7VBdsaL*6@1?jkY%AKb2db~32>PuyP8bvQH?v~( zAMD~Biyd2jOxyxe${+%7N6NJ9nvw)Bzp~r#NLR4LgO7}TDt0Ar0ky0}%8}d{!bnC~ zQvs-6CB2AQ8^b*ag2=9UV2=XEaOVE%3E5-Z%N{B2y+{@KAE1z(qQF!~p2rZPwkwD3 z0MY)cJqr2|2fPQWf1Q0cp?4Rnnp0D4x1q(?>(z$eWQ2JY`B*k=qIgOy(EHT}FX!(4 zxoBI`Za_nQsVTs@CKEJt@(tIqM`HS$q>@(6(Q<;F=7H#@4MHXu~)$~b|~xWvFLbW^Rg&JL{k=+;LLr}tEu)}3@~6~eQz#t zQ`sgDec^I`Ltc$|ztuFv{MhH=JMNx52Q$AW)Uyd2>-|UOz404xH&cWtv;XO(gG7zO zU22da8&IA7YOIbY&0%sk3Y~f~6?fEaihB51?9ZSFG3&7$$>&fq5xCcgk=~NhxyR*} z!yi^#uBmL%+f-i+I#xXGyJpENy_Er4jL%EmFqpV!Ur$dO8ui5qgiVqW8M+L5`YEJ* z&9H~Az5fLZRrBlBRP-eG^i(Ad)HIrINlp3wQ?HiB3)&r_$6@YuT{Ac1xio{AbJ}e& z`dG~OpKGZ`_yePPaK;opN1dE=%71Q!c(N}NcIHgeZ&{@7Eu5%i(9)JSL^ z5YpNLj?-@BzYox>kuo{xZ$fApE+Zk>3~+lFGoRuEA-apa=x^y zoi(4>-R_Zw9cz4o+c~gQzbjVc81zcgT9AA`9Tea=Z_?GZbn}8z*TEY;bnWt`Y&)c~ zgWPo)5JaR@1{mfHNFzTrvn&^-lQrbazhYc!5!3D6+BS<^{7eppS&G&r`D{Z3H>)`UlUW73ugb7w5tQ$o{=ok-X)seI^oCt`YBbAVHM zb*>HElO{Ih@}-dA?lo?e#M?nr2OHma`v~z>9hC z6+vcY3jcVj-RbV55+K}7`?dIv+|{|bKPHcyn|y@o{On>WJ}(~($xEL)O2Nz z@3JhuQ@XTMU#@51h>~^;vBsV14CyUn<9$3ZyOkIaeoHz8ACyR3U|e`UvUrY`_0_2 zK$T9&%(*q_P2`==-ie2*J zKUYG!&=p{b#!u96*`vAIlT_3}&X(v70xRS0xKtME62BxzllS$LHH16vY3vfHzXE#6 zT%Nj#Np~5TlLveDm%kF+c!q#DF%CNjU|49%;CI=m%#Kg~*c4EzrO4!5my#{8i;k6b zV9sEIE(TiBmf9@CH_qOz+o?y%n330|9QmR{dw7u|m%o_nJc&cMJ5slNmrwf0WC|qM zayd2I?G)6hAoEI|xkbCR;3v3mQfzX%^uYK6*(Y?{C3 z3q54(RMgR*LrmKJeb$7o%=mTfo#YDlqjA3A?-#@!V94|E`2vFk*&kI<%q##o{|??heMwnF#msN>0j$rUmI;+3~vl zS^xZU&Pvuq8kbR0`f@w1z?+klzPUO1L>|7OC>ZNnQnw%h|+W`u5|_&Y#6QrkCns;ZqZVHcf}y603;egVF9I==6o3TiEh~`rU|k z{K-YcXvaA+#FK%!%WDsphwlgQh;l-Ry6LzKsKEqp`nehk03)lYyNB^{>#VC3A9jL|*Ocp&(cWFL= zjH75nRP^kOdjgmRxQImXa@NrD+>3Z_buzWdzlWJouEC!k_ak*DtWGVItP;Lzj=YBs zP;L4{injX~pWOF7*iBbY4g;`2zVxvWakv3a$ zxs*wT%uwPrrbKr^9KM-o+EPV3en4-oELNPat@X`A`c)mz+ zo|z&`T*Q@d`F4tm3W?plANtLL3N@T&b{B~P;l ziz~%++oTr!rL)JXV4QV1Y>3~r(!+APtlMkAb{(lK=nskw z;#$pLN}_d0wPVw487hZ`6%giHj@2`Ps~LO`X$yTb7rOL#ab~?aDZ6k%<;MJh{-1;2 z6x*$oj?Hveuk!FlZM8r6mZp)E9w1qlj1Kz3@96O7ELFf~cW{o@IZijv)q5)@VomDu z8Do|7KAt;~zO;J7IrkqJu*GM?$woO)l|aV0^xjLQ5W0JNLpaus-R5c9%0<4k9V?T zUbvtqGXbZ({bBhrrPQVM0pFobQsavZLv`9nZB&4+jCjjzPRJ46&mycGOs=B7b<{*~&4v!%~zgp=yX zcA7?v7tWd7e|cxnKmB8cg_`!e1YY!xwZXz51s&u~j(qPR^4wBj=wf>-t6M_@)lPe} zi_(f(x{HA1**qs)Cm-3lf3ga>l*7d!|HWT~=EQV`DHX+ds)tD`RMchmZtAUvYYTXy z&d|5Q0pM6M+=(NIB5j42_$X9-R!zpw4*F@KOOAE z_3hG|=y#hj06zU%%vs~qKT0=XVq=Hk+Wz*8u#<7voai72_+%v4Mdg%py7@nm#Yq4R zI4kF1mV~X<$st^ssIWSn3UzG?qfx=@JGi{WvjZmi5tIuu{xqPF3Lc+PM;_^R>{(5- z6N|HBp}zF(%Z~Yu)W}j)GXFFX9k4OmU(V`Ac~ZuQcPKy#?22^h)_sL3Jcuj7=glV- zA)op&R#aAPgC)`+ff8Fz`(W{OjTLAaagJN+{O2j>f7a|}{DMQ5-e*hiS2m_=pDxj6 zptuh-yJ=KZnXR7q_;EFTB`Q|+-#}2_BQgCjLI{G1tjojxLYD(5%i!K-FuLzpol1pf zDU&8VT-KMJbUK{{3H|~0`GUd{ZnVL{-L9v;L7C&%kTPO9Cq;wUrF;XRbL}^RRHsPG zIy6Gz_e__mJ2qBQIxzX-j9jW~k#0O!l}5-EjDI*W^rB){Sj8_tFpE0QOp5!+NJZj4 zYrDo03_@wdXeSIWn0>fw%hrF!I;SMX-E;Qw1;vV=kpl}IOe&|MD9g3V&nv<~;Ajqq zaoSzJ9_bOH(L3fK7GSKUomDj}=KcV*|3I@)ZH_$%vpkPRrJjcs>>}wVJxw6l$d;*2 z4I!MevnT%QVS+L^1OXrAz?RJ5RV_`tsaUhDXoi9)D4qr#nNdP zd!DFK%C#Am;dB~a*|=R!52q^1yV6)F>Pl!z@Dtq8{gqq%O@spu%HJjAqnYS+DPS-w!gq%y#uJXhF3p#S1D}QYAOE7mH;+Q>N{(RG3l8$p8 zuwCtZ*UG~7$UvLxnX7cvk}jw0LcuBMAmF!;fwLul6_0!2*zA9)F1w7JgHeGe|BOdV zgYPcBrA)AIJl0&<2E)e~PWk%<8sLFI8&OPG&d$9*v^EP) z3n5)yyu0zc79B3;38-Erf?@HxA_iojum=k@kT;-i^Twv+#>95{5NhA@AZ&Kk+|mTY z`=7WXCZ}ScP^1y`M`?o&pWKu_Qsxc7{{5^>^?!p?uIJ-)$XXTS>pDzwBs@bK_i1&A2XR00&v*1yp< ztPNJyRhLl`>mBnA*7nZX8v~NvD(J*7Q67Ck44#J=2z1~`#FS}xHgPTsY8Ey0STu`9 zke8ctQ*;#;+CG^gn*spOY4NiEX={s3IL!p z+0CoB){~W>=2+P>)>mBtw$e{Xu+(obK-9d~q4MrJhqffe!kf3=ex%>JxDf<;5Iw?Q zNauNVb*+qF*rzvk(+du(KACa7bW)>-neIcwNxORV$#dgRIo0`eHXdF_&`)E%k4UR4 zd}o4=y=SM59j}?oA6V^|Zn$?Ds0HXl@4L_JxqV6jeiw6pBj~*2(niqqB2B^Fv$~+W z`{I_Lal^Z|V|#9Y$`S>{>GS6d8Fc`O>2U^X zs`6zl)Q@Wxcz(3wAug%ZaQ@(WJmrt2g?x!pCYdpZNXS%&V#n!z+nRaP2He3C?kyFw#1PtHT?pHx zkM5XIo02NxQC4Tr2?G&EkPoy{D3dbVB4_`?tSb98O{w(5A7OKe zt0C*mU_V%L$j-Dh!?W18kCL@Ed8uQO`!fHG6s$Z*->jRaO~iZaav^FfBtU_@^Bx|t zY?)~rsFT^jdNK5?KhP!;%#6;Auubi;&{WR+8AY(}daUm50H6~Ps33iSs=qB$G&6iA zDr~U?SNJ}Vy=?Fih{r^wkCCW|w4!it#fnxBhuo|dM-}H~B%rBoi5gj2NM9K#DeW4D zP5!BuinKVa$AeDhqJ#-&!@ClJll2B%Imb0UxLfuV*0#J#mHi|P;(9K5xXdR4-dM1h z3}mMo36-sWn2w%v$3pD~k4D-m58P3Le}3W`JAJU>5zK9o4Ao+6Ym&BG=_UKi>BD!8 zq(jkzD}9NGqSo)1G6yVWZ1}~gz@a_>dULYm@owMfoJXF&M_x7Iywh4ezq?kGpLUAN z9BoM#5oV%rqWba@fL;v#x>`Fv6_GVSP(sg-TJvk-d9y1W;n$hRwNs&W6D3#OA{xpz zSKH!pgL;N3OZ*4{&%=pi=N3Mmr;x7>Wfn!~l{A@FESLDCpRSPmLLUyDpJ;vO!kIZ) zfrk_&X3cK~U{)!E&D33aR#MvF4y^XQQxXfy$nJK_+~dK`w$se` z2H8(Ld0&?{oLzo$^KALvo{wg#rM=)Qm!*Ee$q`+2q?U*C-5XNvChZ6r?|sA7dJ1wi zJ@`d@s({jB(B>#j@^L|j&(msb2{rE>?Vy3X1FM?pY$C&)ZRJ#%Tix$4$uQS2#YWD| zmnKii5@n(uMvOp3mfgv}M(HqyjtpJG59k-nvu7mre?J^b*#L^XCJbwUwXM9rpqRY7 z6{0(S4?vdG3GOc4fmQq8)|wmY^L0pR84sPGnrzyz=v=nHLC4L(KFU3bFZ!#w$g@QF zNS;D3|8&224&f^M64pA<2d~mKZLm5LYlP~Tvk8JdPoWXIwxe0}{j92OFl zE{gxP>#nbgN^xW0YUEY#J!OHu^VHl5c^_OV?LaAhjBw71U3UW3iV4PA7?FK@3|PUR zD%(0Vp(Qf?8LEsb3FXOY*?GfizZn|@F{|g~e^%z-E28N6$-n2|q7#2W3Kx`ROb1Ll z)sM!zX{@&)JMH>p#2Ah3bIX%2B*|-y<*|b5v7FRiy;qB@)UZUi`tJ;C!+pOP)GQq6C5KSKZw^1Hdv`rSGQf5+Xab0cOIV|if4OKI(j{u*Z* zk6fG|`r?}iG&J%?t)e%6(JI3>{Wlflcv?HNET}i$|tIp35CAxZKC8VQ#s?H*)(6;p4bE-#98Y#VBfK>(f>WqYbsjZ3@Lq8V13fe3-M>u=?k>y=ituL=pzf%&XGz_GjFXX?6#oJg4 zAYsdpms1R%4zv(5Np-hk+^x(lT&3F~IAPHer+Wh38qhYq4%n&gINEe;^LC|>#D#kI zkM5p}uABBH*-yzIQWd{#`*6kU=Az28K}XameM*06ityp$xNcTTdmI zZ|n7kCNiHeUzBMCf0qk0S>h$Y5yhmhL=Dj5^>|@KRl54t%d`7=KxmT%c6ekp4l0&+MM{e*_QK4ho;Zr)+QI1`@C+oh{rB)D4UpquIx+CPG-Ms!xi z4~METW4izwMrX6C%zc($|D}2_6?%_zX)zJwVAWlmuii?n{PGpn3zUs@_y5etlB16O z67{!%w{uhsPHLztZgYUc-u20OsaD^!1mws67?iwl^BbYwGqt#fa_nGoMW?L;GI8R4 zV8OCMJ>YvnxDM~oe9a?ywO28zt$f<@ls4=Y55RN5Gmw+%HJxh9uF@HH+BRfvT44`| zo5;nNy$zhoBi*uKFoLxCu<9DVrKS^#KR)iVwR5S29IDTdBd7BMhmMplv-SX_G1GUb z$n<~M8Syqkal~O4;X|_6Es7)wHw05Wbc4Ys_j(FD5nin-FZ1%apV}n`#7)(aYx#nn zk?ZnwJ%T^K%!+*gv-BlR*G>)ByR(i1#exDCh6mLiJkUYbm@;tnWLNI?&hMHszGo|fh#cfw(@ECENu=XlaLDQV8 zc)QXBBnb7t%+KVg=?#!x(awZ)6?RHT(mnOB*ge=Q#FU86r1ToM?q8xR#LK)bmw&W)@OcJ9%bm zB{gJwq&YKLvV z`P(+}I5v9(fm##taXK#&5D*(0( zX@q($=4mG#k5Hl!8Uck%pOcg77kF1fcDz}7k)NGl83^0}V&dXl{;rje!f)3dKu^nW z-*fd%jfelf-%dOq{3GTlfY6-LYHCx}NMdgx5YK&i9JD!r%RKpkbW0Ph%TTw!9rQGS zcGUkGv3y0mVy*wsVr~$j{rQP8VIk}>WwN%4&Sfpg2H7<%cI&|s(|TBifPey!&}rzi zDwhK5)kRvXyw)ExS=FR-+TYrcEOHBpe5Fr7A-!N+SZr0M8-+&cG727Fkq#h~|G_0a-vN!QWe ziTB=C2tK_oY1e8qNm0YyekBb`iPC=Fdwx4HF&MSed-D$=WqM9c&&8-(Ep8(0BA_Tv zNpi~E@?Ye%9`4cS`SdwaB9NlgB|o5fPXffG7-%hWJCzXe^^ItswDAar@36MMkmsi% zKcohr+|!M#)v%?i+A*+m(Ap0&d5^G(mK^hnb!c5)z3 zC(D(+AjBQ}Wpmp@Q8BHeE9h?pAZ@JxAQn2}OX>D7=OO3c)kP!$WL)V)6I z;1!PU3$vyp&k24VXdiF9c>i4Nrx}92ws+*|+VPIJt|b$-FH?BRnct2?nb?5-_n|OP z8g_VK^X*67ahyMYYJ1L4$K5>kOR@43-D8YmAj)1Dm^Wc5-rRj?Xc?WiWTkAH3U%pY z&0RWD`39oEmK@-taxca9>tx?zf*zR6k3HY74&2E&Q29CP-5SPhp9fdAV1cryI~N;Y zTAKnBcRvs0U-`UvE633xjNOw)=sh=WMc64TCOm(W@Ht*4Cl{Tyl6tn3>~K?+@HXI> zKhviG$FiC5XwGoD&p(M@?Z~@GR^0z=_x$#Z$qi3G|D~+F1}_$xD_IhY?*M9Wl5>D4 zUVH`x6KaUgupn1o4M4`l;c_Y$!9rpviMa=5<-r@&FgAbef0V~=Q0et3QSVi z+M2`_Ek=CjL;mLGInep*ZxRyLYH`l)RqK1pR{@aL@K-S?Sjop3wYiBs*Ve<~MO{02 zqf1#UzYCm8v@0sLUE=OSUY5VXAmcJUao1&9dMn$pUpL6Jf;*jZUo@`u2a9zz( zXOX@}&BAltZI6lv-`z7XPf`t71+EtI(tj5F3jsA=Sz!7_bh#%7rMhrx`1tB@y1ge7 zdg(E6{Qc^k1Uz(ZVl37L(HGQz=CPv(z=YC_BbFA#we^52Ep81Fs@_WfV=q9vcHJZu z&_A;Xc*cCCakry`E7xHraWJtxGxO^cgm@fc%{&Wj<{HG@EK|&!$-$k#-7-22QIsOQ zwMp?a!MZ8siA(Dg`p7QE{nT3FKagJq2rudZRy06#qrYW)3rQr-W^a#VQC810Vv;JE zlKQ_Kcl@GRQ?iwR!O+7w;1HdCPH@AVOfIi}LDcaoO@$}oP{9-gtwj|wc=SrYiQZ&u z);)ejJS(Y#C+k^-Z}DI}J${R1_8~7fW1K!hOC7QKYqH*;_ng60WtPK$S5^$kEa+X6 zFJ9P-;~Ijjo58Cj3cl^`XOC1E%SOg?WqENW6o{-HC`0V=VoyEL>wkvtS9*N=YFYWI znFMGS*y~ldk}i_ix?mu1T&_zdL2rAs1K`fpaYii;$MvXCXX2~{Ju=C6FNZ`Zaw6HC z1<4hc_%)BatJrw9+RsDFM{;Ncb`hn9Rqg>mOYB%tI%-(OC187wg)SD_5YMlqyC&Mv zf?yj!;$j(T>vf2>fYBe41o|S0O2Sa#ks7sn@7jSu2>Gx%(6ZQ2(5^YV%f>AFwU3ty zBM&JJ7R+9p=gbeF2b^}6sLJo|qy#zbn>+x_>2&K#YG0sZg zPGFikGs6}|IFP3)LGwqi^E&Oq(8G2MI(X8|N0h5sO{wS7(0ppmNDg~EAs8ck-St9OnT?7a=J(qYiAcL;^13+`l2lOEyD)==~#W8@i6nr zLDJL%75rs_Bc{-uKNb&~n0INJ>jh1P#g5-VEvdBd-g_ge9>X#F=G&Fel@lpOMm& zAo1u9D@!qH8qjj~L8_)l<8>=uSmUXXzLOGq@11neSB_1V_xLqg8cYtU*)%y65Iy*n zqe8Tg^@dxZ>@OwBhkree=+cTQR^|_&8 zt=_ahTzz?R1>i^i=pv$R)H3d{v_~gTq{tUwJI#+RBAjqMHhSo1A zlW@=O>xap-@EWzmdU%P$LJ^+rVIk!Y(UkT@^doZ}43@vXBbyDA0M&hxaf0Teqz&%x zs_=KIP@?>n-{`W_#ynX)D4FJR*ohQUOpyaH>JAWLE&pS2occUP&leZFd&A${r|VzT zg+h|s)A6x2@JVy&)dman`rZ%I3TS# zvzJaxy99)~U?Cq+iW9|)5Zu!W5Y0N^swr$@++lUlNgr}^^5_-*g6GjuWAFTih_ecX zi&{*>EA);3QcaMltN5W=y*#y~Pe2)2o(n2=|BUdzZoE8-?pMsRxs4I2a@3%G6?=x< zH;@?I?VSrm<+GFbs^)V^ni90rBPTQg!Z$9id|OLykN5Yi_XU%Zsn@hcx%_9nKW_l% zux@oK0F6()=R7hDS+kj3*d*>_a&{JPG~j7}9XM4R`HUgNKxf5;h1~Fmnqr~p51wdc z6LFRA17S1+>~C$CBeTD6d>_})l#|l>mCd>zUeqK&4V-rij^9E_JN69wE--t@=Eqgq znIOQwFrYh(VxN{FiMb^2Qxy01Q&BH=bRZ8V8zBt$W+!;pifqDO{GR`IEle^2 z&0%}DR1-#(W_&9Vb>dfHjquD+tcy$7)YQqD6OerhfWzHvu669o1G zbub=s_x!&{o4MMS;O2*WFd2Ej-@%xAr1D4w37v0PAAT&X!iu2+c+4puXM^hIBtR(#aDDhK$`$ zZM2y(_k~XX-=i!eKX-O>u9fOA?m7iR!QM3+|M2K`hY#l0oMKGySAP6 zq=x9rcwGPW^@AKuxkHdB(rLhC9pSYWl*%%Cos?hui zGs9`s{R>e7$Sx{{VJ}iSv7~nepqR0#fJhgK(<(l-Bh5dq%-A@7x28u(C+qhK=sGF8 z(!U2YI~_N)2rRKf##qo8P!p0*Cep*wRniOmk(~}3rRoCq$lG4NdPf~B@$Q}^f_~Xm z+<&vT*IHNe!%|J#8#!0nd#^FpGJaO# z6AWghuMlH*y>D`@QKT7;INV%Z7#$#o@&c2&?@1XT$o9IM;ZKQlM`0fk2;<%Dk^1HK zZ(-Q!$X2trnC4B=zMmO0E3)9LZ(zl1d|7Cz-%;T+p${cotU$PX%wxtaj!^aW{xtL* zY^|Z8g}nAsEU)tfSmhj>Hf!Yj-`{o%3olE`tyA-7h#p%yDsi-_(7v>ne`UNQZ$_aiy^WNBqW#A`uf>DIS8StgsSdPO4JFS_>>={$Q@~(H&OAzzp#>z z_Sp7=ck_4);Vaxs|EfCm%9?L9Idue=LuV#bxZcRXi$(fgmjRv%P$?pnl(w0v=m{V8 zkwY&Btk&!{40%;5nCmeR@>%%whh5bK8(aGi28h!GH4%MFR;Zcn70;B|>_rO)e=@-( znOeO(Llq2vttxTK8*?`XD$Gi#l@af8#ZDSQ^^;r?Df+vV$V$91yHJnd*N4ban98b^ zYU`_ujonahYl^|ddPY2)8V%iez&KU}xDOT%o**!s{a~M7U3GqOV!*u|VCD#9{iD6w zoOg0Yp_NQCdJn9kkMtt4gGT)XJmJ(gEGi$~Qt(q*TrOZ5jpY#STR#NrSx-56CuI)C z^}6i0s5n&NU=K_k&cWgnhBcysZy0U(UB2$-Q=*5?oA>sf>BICQ%1UU_g}pJVc^Z(i zdM7+We!Ncq!##P_=!yX7Z!4)v0!)CSg`Ry9qT-GM$4!&*f|z7XT&2}L-Th-!>GKyx zLy;&^hwBdOwDAq;xLnZObStk!-vluSY#oT`<8TY?B_$ZX0a2VH<&7VkuiV@ho6%fG zhd^zd?mByr4`88VV&F}*@1gLaZCgFlwdUlt*O42XwZVV&Tw5b#!i;$tSEo#M*#;W| zCQynMY0zObfmg%P=W28Zw z#(?OkUd_G(_BfdB=X`g$@-I~?M&-tB7NlyncnjDAxr?gZDGgi4rSe;+3&uV&6OuU# zo5lR1*Lp{#C%W}nctc}*nXKwSz%gQVH>Y$NwHSGhI`GI=&s0ggdmgni-aSgI`y9k> zDN`K~Ze|be=zhB919kE$4;e<&X3Uz*s0jtH{OU1~gruwF>0$Di8-%}>W6PLH~~37j_kHx6|4)hXj#Zy*4h zu>dU0U}2vaAn;tAe*{%lTF3WE>6JGd{r8UpfiQjOlcwxNcUHtDA|IT10%pTUqvo}9Q0+$>0 z>8*x(lV$C_0eZ9o;C4X&*0os4h?Uq=Z`+HpN*Z{#)0eRpBMF`1r$^ok7Dk6Z2%abh zq=2x(M`dc7nzjm72&9$QfHv*O`E|Y4c}nh+ZKdvQ*l&5($A%k{ z5iwv?NAZ(&qF8|iI7P zP_k8kt6m?ew{A2W4>;;g0O*w_+VN$nFXaGsmoPz2u4`B^a%X&^*nyAKBYMAt-#exD z#u&&wx5Vpk*vyd2Wk$lpt9J|m&kH}S5J>jM2dUzz$3q*@6U|Q!zbgn9$ACe=QOPAl zsv9!RuGmqUe0ANFl@h-MI7zfAYJ8e4?m>>ElYNf-74g3RRQ*BWjXEFK`5T1JB?u#- zzEbPj&|?xT_+G8!(T9NHez{n-fFfQ?c zk><9BZQonq1-FpWl65lOT|L`o)OQgJUA*rIJnSVK4eU!iV!0~xA!}OnbpYt)PDWaN zk5H5!jVW0R^!GuB?gJl^jrcVoROS}7Tz6~9gD%H*Ohh-`p(J}I0vZZjOd~H`8y+Fj zWI{)i;&^3o^An}HmvAZrV@gc!NT!2LvZ`}HS=Ub{x}{k9i_w^@a5|x>6C|s6@9_%>Ntvf`gJ(Yvsunh zu*9C{LboeTOwjjpS?R+J0#OOc#%=bqkb(#L=%n3<&?}-hR-J;jg|~Hcn=z&M>K;Go zDgv&;nNcJg{`ES#&jbsN7nL`Bi;I|T{eZGKa_xP^GLHd)?x>(waJHZ^3Nnx5g*MCs_WIADSyYQM0!wor{$Uo|`_;Hs4 zyFp)re91eqVZZ}!0X`_@x92Rr+7FCqTvpcwuK3AQTsy0_<(Uu326{M2yKcjg=beXT z{akB(Z3*~O(if^?nvJU)osvRB`dYInoLKEPM*vyh<&P_;&z`KPSy?Ku?WDO}?!+#8 z-BzVWxZ+jp{fHln_RFCb=u;&+)SsmV=+Dc< zX;IO7-IEWpS${eE(K%P5hm*!)ir2z(lr)A1_P6-xy}_q%`7_As9!};=s#B}eZ^**l zj+QLLbQsxs3flHoP!G38Ly8Wg5mD;>HW}B-dVmCfI26Q0sj-nesdV1mPis8%i$3UmkY4>RB(&7H zeCBqm?0;azVjuOM%sx(cfhv^SD5EQVrKC4ybgBDLn|lmu89v@|a-VDbS6<6->^}BK zUMB?>S3IS~D6>|a_Y65V&O#3PJBV1j;$5I&QoGyhEQ?N>w)UKbi_is;j@5 zmw<&XHRYX-Ru4!<^znLZm}i06yA*N<4{xSv^Q(Z<@STj|Jzyz4laaCMOdS{&9r26z zor#iYLsNIFBFUEgHmr#KsxU^+b5#G1(G=lCS5QL-)ZYvPr!n>)fXikar-W`kCzGT5 zc9uG=SmF!CH)w?V;GBJcns3mNb58HT5vA72(U}J->i%RIhwNvU{GE2211#FD{&jwk z)*#1=eeU9U`Rb93($z;NaV?G~OMPblNWWKye)HGQE4)Xl)xCjDyBSU+jXv@U*~H|I zpvq%D(y-Yb^Bv2FH%4_dvaqPAno0BdU8BgA(;$1Qgrw5qXjQVhl3MAyl(j9CSbmqe z-Re^8RTc~&MZ^pfPQpF8{N;~-0uZ1Op(>3uEJZETFlgsPLHUH;$IrxN@zF`#L+nWr z`v#u@#$=8_qWrR7o-=aI=$NL|sRsbCt3}B0=Mp_GkiNMW5Rgt46`ex$9WnIKVK|3e z1>{;{#wpbCOXI8}uiy^HgmX_`!EML@BCb54H)?#9q(Cj%$xh`JE)MRA&DDfOZZi+K zffU*W>|=#j#O)nkgJ+{CJik#hcR`zsV!?c&VyMq=!Y798OkT{jXG(CP+ZK5?PA6|yeRsq(X6lAyVS$v?YY7s{&5$61x?Mw?h<{$+u#|$#Cg} zPG(Vzb`-7)_!k=78}o9jxO<~FIoQ^1*kf+bnYRyLg{W58a*qC;4{1WoZmH9VLKNm* zkq7$I5;GhSQ(Bk68D1vigDk8YP(KAddG=wGD{(AGGT=0{cE`i6Kg5c_!4pXUd`cMI zpw@@ROZzRHcI!*gw7Ox{?u$QL!|x+3AEvci^`ILECrVZ3Q&KAWv7a;U${R#2T7oSW zjGqzjdH~eH4%2FHv{L2dkZ|*nZA_EcV&x6&jp}0sEeM#+dYi#tXp$ z8?)=H_4yMMt(f~ciwdIN@}iNd!!R_1onUfHEy)3A_vss{v?AJ5m3ACtf4@+yDs z0gkxNCRJq)d_bE)uz9{laZ!3H1aji+@(XGeP{ zIZM+mTKJ2q4lOFsHL4T^jb{%(a@(eh+;!?gvXl8MBjBL!()0O*#jPe>g=ef?f`l)o z^7B(|LeAssnBYNlKKl(5 zq=zm#VywCciafUa4y^Ng9_e&MfRXW8VE3enLtR3|^n~pP)~5hJ%{nJVk-@S8KxcY7 z?Ogh__#tfrl%3QU<2!)64uIUYU$3||BEZl%LD_0fLpd4<=FjDy)=2+f$XkXFnA5G) zGcy5}OEZb60W1!<+*7QKTTGz`N2-plq|YO8dHX5oHNv*uzL<`P&*T|kFfxU_^CoMZj4N~ zaz7R|4Ilq+69)07&+-ipRfif$cT&)3guyyvFJpN^dN#Cah0b~*C`p(*I$8T!?yAW- zc8%+Pd4Jm^ylQD-ba5ywd}iO@@`q3^TYI++&;WR$I{Bs;xvAjwbn&J&mz9fTe+U;1 zDQ!a(eLzZIBkm_(_|_mOYkuh*WoEHQrVoqblh<43BJS=TF~-JKsBX>yzuC~>M8CTA z$>)}Tdhq-Jzij38&9vIRNnxje)9ba1;?@#or#tJJ?Z6?SBRJzt(E3sT5YjTM*tHCw z`hf|UJLvBnjwql1z3rh-lKi33`=ncH>OdD{bYPABunwC|v4kFLIT6+#2PQ7Ti$}6&L z&M*=bD<`Yt^j$Lc(Nu`B3>wzPCK-6dWA-`UtgWqEBGMPQ(a(##y0+87D4hWxBV*($ zN;?(!_d3>s8w<;*z(O!q{jsgyPP_VmZsUCR@)U5JqvubHvRtY=Ti?{>MjRy;W+(|% zDC)_f&{oH2J0Q3o;l4Ln>f|)>>sv8F)pDQee@i~~a|>Q-Ru~5e!&GSS-~S9XfNImV zs`{gnojhcl^|cr?Tl=iyljy-9Hx;1a5nXS&Hq#rvFu$J>1n&CUO%#VBy6 z8@PwNNf~F`gvoQdLC&?Vs$x1R*|{V(l{g-yyl=iiz^P%YyBrGg0XhO$aEfc&>vrld zxtxOqm(tM__MSu@Q=P0;f*<1d_;ZS?m$#^#str1~LdV=OW2%vyk9xrU#vT0a8F8PC z_dy-}l{pHXT8uA=|Gvzn`5CGPM_o7dzcHh2U3y(b0f+dc*5wvGM${QCa0!BXIwI0t!onN2l>gFOS+@fT-J`N>#-EK9{h9q=^j$k|V zfn&l{mykE1DG<#H@Z#bKj{Ol9kJZ{dT(`Fo3kX`Ie{=fmbgOUihn@XrxUK&tU&rV4 z03FJMcoTou5CPElS%ojA;1GEK0frScE^OE z{R~6iW0$b6p4k;xGx7#c?dyWk28Z?jE1o?U=eU zU1!5iarM9z&wT9@I9%(I9q#Tc?-w2gJA5e0vscM@7-r9!1IRJ zxh#^YB-})%Bn54t5&ES*KcJ1+;#d$nWmTWM&&c?1Ve0p>KhB?`%`9XG83_^UgmuZe zB4_cw+OH99oh&RV+G2d^uyqV@$vLB_|FRtxI?xpgMf8T(VQc4%{L1UAPzu*upVtqc zaeMfq3$-o+O@vmiWnOs0NQ-=-tE@8bBpjz7=&-*&*JI@mugm}UOl-uVR3@vv8*^iA z1$>iL zw)Xg0s!dJKaHY8hF5e&M%vK_<)Ied~KtHBGW2u%Wm}%53Xi{nA5s2lT`06U)GDpkT zswL(IZ!JcnQsy!Y4>g&Rv>%ad&o!-`wL3;=UARGQT;rYv%W$u3(tN>5v9)s}@ zuvjPxQ(vM>Um4C#2Pt*GSKLcy25VzuHARV&{X@9<1+9?$%%1udSEPcxVp6v%i&gU9 z5dXmWLjalU3@W{bD^Akp*>Pki{JsR#i_~?+R|ZrPO0v8ypQeCX{&ob`BS#IIQkx8_ zqn!6LMM#uj()=}Ps-e!*g<3yKupA73-w=2JZ3jx~C9+Ip%Ag{;FHp7h!2`dlY|uC2 zxcQUHDj(Z&_qe?|)OkJa{5TP}L1C+$ z9_RAkgAAMjs9d>X@6VnDAFchfbHQ^xYzBvf+4NKzQDTf|0)r`- zjP(k-4MkTOq~5p|pwN~C08(Pao535wh|&a(uJCEnFMxm|4xMo`80%8 zS|cF3UaQ3HvU{kc*W68>7`epeKS^a-fX>LaIs@832wLt9f41BKR^8;!_6NKECR6ae z>(%PZVOq%}sIqsOw|)U+BncW7wklJ%etMOHuAG2cM=S?J1c`h8Q1cvFHlreqYNYwl zgka4UM#QpYTseB=NhIjgdp2c14@v`hCM3}3mXw!(!oD*ocOmqma^B%?{bD<7K)<-- z)wvFI;^wNj`@aOny9%yibqTEdnYZu!<>ET-lAL))^bwT1|v&H)f7ncPJIx3OSi^}O4IK+m5C!M z<17+xKnNZZS16q^8+jb8Zg;SiHp z`;2NH>j>P7gXMM113}nd{S_cwC zdu9~jbFsQy24XIu8LSw3Q9B&$0927gCn@ZQ^ej+{j;F<0fK`Bf{_`gyYvo@8j~au* za>axFi{_IZ?!>h?ZO{tll;E7Iq+L+Awj4g>yF|#iMsLHt3{#`IIx?g0u%m`UQ(JQM zA<<;v;LBdr>Qwx}sfoTfn+ZK8Q#qk(Oum=Gcb5ag&Fw+9S#o9cSqf2DAvyItrg2#) zzl)+2KCVHZvvr@Bh%;|)IPzZHO@j&n_A|O9LJr5KFf&{tV9(kkFoPOxP?g*dEx{{x zt1kguXMPq1O}o$QTFl|e8(DZS9%?TPC;`06m=hXqf*IKWIB0e7Q?9T{ECU3H%h6uN zJ;Dmm)^-V~basH{TjumkOLJ~!f~9Vg^fLJer%ve)*?g|3qd5rx1KAnO(%kiBT%*G0 z^>@ETQcqn69_^P*WWEgYa}nKf%jQvV!D9t=K`IILtM%yjwu3TXGlYTh)_{54aMc4| zwb613!8@r3?T!e?ORG-3jnV5Jq2*hqe$)WjqS42K>8WyN*frcazv&iJp>uZVOsYc; z!Nlw}C^cB}W=h!vbiIY99YqzsqEWZEvrgcF{aTM0w_CcgdL#`ck9QC83p$>1jTM}Z zArkjbP$od2M-1Pv z?%02vn;fZJmbi7Pc;Vdo$==Z`9h^XjYE z)JZhGF~c0GzDr7~b-%durYBTb#qZ^+;otEw?}+|8+HQO)BdQnF>Q(nO01#AIQ-;S2 zSYBlh>QFV^31+^!}txLvzznuCG*E8DvEypV{ z=9na%_W)dL_>;Uq@wU%??``Ra12F4A%1LxX!u8!hsVonIfa2*cT zJ~DAy`hffk9iyziblmRKuT_P%qtvwANvh1TM4*jsbBd1dv3x;uCFCd_XdX)2EmHKg z*Q2Ktm(%Qhs9vJLnN{niry5`<6sJ3IM0_{w8pe|$P5NkjqrolYs~=rc2s#An5D}h4 zEtb_sUrSno$@#o-=~D#r#4MrPUAO+wy<%To%5<2Pky>G=OQfPQ|I(}|5lz)W8orZV z_0<&?nJAenB!>KaVXXB`o4qMjCpaZNI+7Wcr*tV94EO{o<@xN;i}>wM)=xQwr7>Uc zN5d9YZ{0AM7v4r%5`Qfr{qrKXtb8Ci4#6ucy?qc!jdP)qvZsabre+#-#0Q^?jE@Fcw$^Eev;sz> zS9S?%(mq#pDa_0rp{NYX9`^1}^N`tU%h`2I8!s%g}1EZ^%s#{c;-jer0 zmCtZfOo6EH{krO>CY<}ZpXGL7r+>3qy>-)4FkwGkeuw;?8I3Ki z{qUML2RC5b+N9Jb#ij<@H5L-C(z|`>l~C=jE5*FV@=4baU(!U6tA}wK%a9P@;lg-o z>g&4%8Txta+b)dQY=NLupWAP4o}GNrpB)5dcU$&WJ#%jLS{p7P$C|S<^)%1TzxE;7 z(dnpA^HxvnsOv9rn}B8-dysP&dR}K=z7db!=}C8F*mqI!=$1g$6svT& zg`ofgq0pVtU7uf#uKLP$1&v<>NMtt52`5R#X3Vd6$vLW}RqS5c9_mWWgDcLiA`5BqN^!f#*)k`QE%IZYi zKaR<7Xjc3U80rW|IF(QplkU4lKHwwGwM zB%E$#&{yw@Sv3;-DFHk)5P&D-*&)QdqQ)OI`?$S80-^EJRJFUiGCJr^-E!AxcC=qF z-aYbgsRsZIPEyAVqZjz`0gxo8SG%9JVMJr43`uEhqzWs{?Iq9Rhj#kcO;U zBrE`tO#*0-8D8i1lRRRU<&nGJ6|3$2digNx%^iYsi6lAB@S=_e;YW}pLeuOhPX1BC zijaM6+mpzH{b|f@9|M%#>1-&mogA%=B{gFBEGCm6cq=PLAAE!Adgo}ZOlM^gkVUo6 z%%AtmBP(s5Rv0cmYalX8JR9`%OE8?O6^*4Y2pOkQX$ZtuJI1l4K#;Jd=m>X&S8Dq= zY(p`si;6!=3_%x?$dB>xL@ipKR801gxZeQt=J})N@mT%x%gkzaf#}1H5Ty)}dg}MV zCD*!2-K^cx87N#KYtX_4I_6JMndxqd!2;HDy}|Hyw+d7_7hI)_W5%EHd zyVKzSef7I<4!3|=Q(hj1bk+jpm;W6Ve~lTOUwAYSz^y6mX9MK*& z;@)6x5Agu0EN|U`D1uK=Lr8uXK>uNS6M=V;ECJ*v?V^3KD|rw7sAs)KMFyaY9Jj`=uy_G zN>wu_>FRV|Tz<|Joje3{LGS9ufhm}SJddBOpzxrQNRsxmk z5z76CN+FG7IS>K|^z-oz1iXV2y1>&=dnmZRef-HXc|TQy=o~zqqM6N;1iD&@o##qxFRe+(!Vl*X@squHUe|j!6ugW%v#~ns6>Y z=bK9?=btqt1Uk5bGjdWE0rn|~-%UWDHW+kc!6G;{10%B>?3;KCT4LSkDWsK_ zOMeXG*Gt`A&WeCB)KhSt@;g)D=+CLLVR^v!IgQ71%H}0(f9bhfZg6}^-W}*oX^5G+ ziIhcLSo*|(Hhsz(M>ij({d&cPamCqE!M+^z6GrRGV}qI5MAe1!2^^HI43UOn*RyM= z__4oUeFjwglXP#mp)aNm-B4ET*VN#)c&te6vCP@?uo) z%%9$_-w=6;CH~L7MVh7k%U%AzJuC$l7W{d2K)m|rk6h$eav!CTDBZ(oB1@@CeeyHF z6yKJ08u{~2&`2oGg)ElTTf%4fx`2z3cDn?+U~_rCew}x=%Rsg$NSEL#d+QM}4o(5M z$Dlo%cqN(M{GK-He7g&#L-!g|f!hip^jo_UMg6UiPoR(~9iaBeBzjvy7qUu1s-a0g zyq6jtS$}oGVPXMCTSLwP_l8jgJs&7ID+$!_kuaS3$w(R;bZ6gf_&QbFBi{R+)V5;b z_Jf2J>;eIxg?7XSLyr=BGWp_)bqnznc?aM)N~L z6PbSyhQd)N*VdZNc z&o2DY#na2{`@4)a@lic~k@K8rpwlCAK=D&l!?Mf`T6(Ry-Cv#wSp@OU<-ogQKoACo zVf3Cu-&v+l>>rk$aalOV&ah<#BqHD)R}3a< zjHy2>3I~-5Wc_M^m}3y9X?NNC*v4}Y>VUZqkOh+d>|yAk%Ix~;?60fVL+p{FTEvsO@f6f zXq^WI14%`ab;r!kstD1vX`?6#~bMrb+OQaSf0q zFgFG3%XYvQELvWGR-6>sZsYs$1P$a4W7Y?`MJpH%eA&IY;PqvD;0xX$i?%T>f3S^# zuK3M1W{qa$^^dIrL$}85z_-O_vO(%Hv1mA7 zM9Y3O_WNqb;YWh6$_{?A!ihDUvE{#=1tvPc#0I__`3U%jEWWjJ`lNtO;;?NX2NdKH zkS|QZrGe~JnX3u;_;Zxgm&T=!U#>Lv+YMfXXie3r#7!<&bU#+VuQb}g*LD30NYh8~ zhH-7#>lxs31Ke|hpK$|$OA9!#2oUAqy@818AH!$+BCFO$RE~msZ(vP{Rb<&fA{LEb zWK0zUA25Gh5pQ(J$Q8RhBFfDN9JF(<#;Xs@=f4yss#;X4MHG)#K7bf+GnJlir(4jj z4Fhk&gcHL}Z}pKwK{k-*wWU%EtN7#an-~m+TD08kW`jO*{c>Z&?W*AQh>Sxs@14c6 z{2Y^*sX|EN^c)MoJjc$jPH`-L&84yrXw12b&KV<5)EfN=$%`DFF9`p>PNj@epef#gGRJ&TgZ~mW|QZ3i#_aHN`uUKWTu=ZA&r_1TO9!SLQ z#oxDf9Mqu&?y*W6h~BopMo!D+ZW|6}gy3F}hg)-cs~Dls8J=H%EkSq$l6(FC)!GYO iy#J*EnQAO|ei@bZJw{g^u??IKVt(@U3GA^SZ~PYyb8p4~ literal 0 HcmV?d00001 From fe090158b84ca4251a9510c6a60240610996ee77 Mon Sep 17 00:00:00 2001 From: Dariusz Trawinski Date: Wed, 15 Jan 2025 13:38:10 +0100 Subject: [PATCH 5/6] export tool with lora support --- demos/common/export_models/export_model.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/demos/common/export_models/export_model.py b/demos/common/export_models/export_model.py index 5aab9a3a56..f27cd8a10b 100644 --- a/demos/common/export_models/export_model.py +++ b/demos/common/export_models/export_model.py @@ -48,6 +48,7 @@ def add_common_arguments(parser): parser_text.add_argument('--max_num_seqs', default=None, help='256 by default. The maximum number of sequences that can be processed together.', dest='max_num_seqs') parser_text.add_argument('--cache_size', default=10, type=int, help='cache size in GB', dest='cache_size') parser_text.add_argument('--adapter',action='append', help='lora adapter in HF or a local folder with the adapter', dest='adapter') +parser_text.add_argument('--tokenizer', default=None, help='alternative tokenizer for the adapter', dest='tokenizer') parser_embeddings = subparsers.add_parser('embeddings', help='export model for embeddings endpoint') add_common_arguments(parser_embeddings) parser_embeddings.add_argument('--skip_normalize', default=True, action='store_false', help='Skip normalize the embeddings.', dest='normalize') @@ -248,7 +249,7 @@ def add_servable_to_config(config_path, mediapipe_name, base_path): json.dump(config_data, config_file, indent=4) print("Added servable to config file", config_path) -def export_text_generation_model(model_repository_path, source_model, model_name, precision, task_parameters, config_file_path, adapter): +def export_text_generation_model(model_repository_path, source_model, model_name, precision, task_parameters, config_file_path, adapter, adapter_tokenizer): model_path = "./" if os.path.isfile(os.path.join(source_model, 'openvino_model.xml')): print("OV model is source folder. Skipping conversion.") @@ -258,16 +259,21 @@ def export_text_generation_model(model_repository_path, source_model, model_name tmp_folder = None if not os.path.isdir(llm_model_path) or args['overwrite_models']: if adapter is not None: + if len(adapter) > 1 and adapter_tokenizer is not None: + raise ValueError("Only one adapter can be used with a custom tokenizer") + if adapter_tokenizer is None: + adapter_tokenizer = source_model tmp_folder = tempfile.mkdtemp() print("Loading model with adapter") - HFmodel = LlamaForCausalLM.from_pretrained(source_model, trust_remote_code=True) + HFmodel = AutoModelForCausalLM.from_pretrained(source_model, trust_remote_code=True) for adapteri in adapter: print("Loading adapter", adapteri) + HFmodel.resize_token_embeddings(len(AutoTokenizer.from_pretrained(adapter_tokenizer)), mean_resizing=False) HFmodel = PeftModel.from_pretrained(HFmodel, adapteri) print("Merging model with adapters") HFmodel = HFmodel.merge_and_unload() HFmodel.save_pretrained(tmp_folder) - tokenizer = AutoTokenizer.from_pretrained(source_model, trust_remote_code=True) + tokenizer = AutoTokenizer.from_pretrained(adapter_tokenizer, trust_remote_code=True) tokenizer.save_pretrained(tmp_folder) source_model = tmp_folder print("Exporting LLM model to ", llm_model_path) @@ -388,7 +394,7 @@ def export_rerank_model(model_repository_path, source_model, model_name, precisi print("template params:",template_parameters) if args['task'] == 'text_generation': - export_text_generation_model(args['model_repository_path'], args['source_model'], args['model_name'], args['precision'], template_parameters, args['config_file_path'], args['adapter']) + export_text_generation_model(args['model_repository_path'], args['source_model'], args['model_name'], args['precision'], template_parameters, args['config_file_path'], args['adapter'], args['tokenizer']) elif args['task'] == 'embeddings': export_embeddings_model(args['model_repository_path'], args['source_model'], args['model_name'], args['precision'], template_parameters, str(args['version']), args['config_file_path']) From cdb62a91dc2be2022d01efdf09c19beb8848f79c Mon Sep 17 00:00:00 2001 From: Dariusz Trawinski Date: Wed, 15 Jan 2025 23:31:47 +0100 Subject: [PATCH 6/6] update transformers --- demos/common/export_models/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demos/common/export_models/requirements.txt b/demos/common/export_models/requirements.txt index 25a5d18091..05b4b2e0b5 100644 --- a/demos/common/export_models/requirements.txt +++ b/demos/common/export_models/requirements.txt @@ -7,6 +7,6 @@ openvino<=2025.0.0.dev20241212 nncf>=2.11.0 sentence_transformers==3.1.1 openai -transformers<4.45 +transformers<=4.47 einops peft>=0.14.0