forked from databricks/databricks-ml-examples
-
Notifications
You must be signed in to change notification settings - Fork 0
/
02_mlflow_logging_inference.py
227 lines (164 loc) · 8.69 KB
/
02_mlflow_logging_inference.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
# Databricks notebook source
# MAGIC %md
# MAGIC # Manage Llama-2-70b-chat-hf model with MLFlow on Databricks
# MAGIC
# MAGIC [Llama 2](https://huggingface.co/meta-llama) is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. It is trained with 2T tokens and supports context length window upto 4K tokens. [Llama-2-70b-chat-hf](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) is the 70B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format.
# MAGIC
# MAGIC Environment for this notebook:
# MAGIC - Runtime: 13.2 GPU ML Runtime
# MAGIC - Instance: `Standard_NC48ads_A100_v4` on Azure, `p4d.24xlarge` on AWS
# MAGIC
# MAGIC Requirements:
# MAGIC - To get the access of the model on HuggingFace, please visit the [Meta website](https://ai.meta.com/resources/models-and-libraries/llama-downloads) and accept our license terms and acceptable use policy before submitting this form. Requests will be processed in 1-2 days.
# COMMAND ----------
# MAGIC %pip install --upgrade "mlflow-skinny[databricks]>=2.4.1"
# MAGIC %pip install --upgrade "transformers>=4.31.0" # Llama-2-70B uses Grouped Query Attention that requires transformers>=4.31.0
# MAGIC dbutils.library.restartPython()
# COMMAND ----------
import os
os.environ["HF_HOME"] = "/local_disk0/hf"
os.environ["HF_DATASETS_CACHE"] = "/local_disk0/hf"
os.environ["TRANSFORMERS_CACHE"] = "/local_disk0/hf"
# COMMAND ----------
from huggingface_hub import notebook_login
# Login to Huggingface to get access to the model
notebook_login()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Log the model to MLFlow
# COMMAND ----------
# it is suggested to pin the revision commit hash and not change it for reproducibility because the uploader might change the model afterwards; you can find the commmit history of Llama-2-70b-chat-hf in https://huggingface.co/meta-llama/Llama-2-70b-chat-hf/commits/main
model = "meta-llama/Llama-2-70b-chat-hf"
revision = "e6152b720bd3cd67afc66e36d06893a0e1f84b48"
from huggingface_hub import snapshot_download
# If the model has been downloaded in previous cells, this will not repetitively download large model files, but only the remaining files in the repo
snapshot_location = snapshot_download(repo_id=model, revision=revision, ignore_patterns="*.safetensors*")
# COMMAND ----------
import mlflow
import torch
import transformers
# Define prompt template to get the expected features and performance for the chat versions. See our reference code in github for details: https://github.com/facebookresearch/llama/blob/main/llama/generation.py#L212
DEFAULT_SYSTEM_PROMPT = """\
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."""
# Define PythonModel to log with mlflow.pyfunc.log_model
class Llama2(mlflow.pyfunc.PythonModel):
def load_context(self, context):
"""
This method initializes the tokenizer and language model
using the specified model repository.
"""
# Initialize tokenizer and language model
self.tokenizer = transformers.AutoTokenizer.from_pretrained(
context.artifacts['repository'], padding_side="left")
self.model = transformers.AutoModelForCausalLM.from_pretrained(
context.artifacts['repository'],
torch_dtype=torch.bfloat16,
low_cpu_mem_usage=True,
trust_remote_code=True,
device_map="auto",
pad_token_id=self.tokenizer.eos_token_id)
self.model.eval()
def _build_prompt(self, instruction):
"""
This method generates the prompt for the model.
"""
return f"""<s>[INST]<<SYS>>\n{DEFAULT_SYSTEM_PROMPT}\n<</SYS>>\n\n\n{instruction}[/INST]\n"""
def _generate_response(self, prompt, temperature, max_new_tokens):
"""
This method generates prediction for a single input.
"""
# Build the prompt
prompt = self._build_prompt(prompt)
# Encode the input and generate prediction
encoded_input = self.tokenizer.encode(prompt, return_tensors='pt').to('cuda')
output = self.model.generate(encoded_input, do_sample=True, temperature=temperature,
max_new_tokens=max_new_tokens)
# Decode the prediction to text
generated_text = self.tokenizer.decode(output[0], skip_special_tokens=True)
# Removing the prompt from the generated text
prompt_length = len(self.tokenizer.encode(prompt, return_tensors='pt')[0])
generated_response = self.tokenizer.decode(output[0][prompt_length:], skip_special_tokens=True)
return generated_response
def predict(self, context, model_input):
"""
This method generates prediction for the given input.
"""
outputs = []
for i in range(len(model_input)):
prompt = model_input["prompt"][i]
temperature = model_input.get("temperature", [1.0])[i]
max_new_tokens = model_input.get("max_new_tokens", [100])[i]
outputs.append(self._generate_response(prompt, temperature, max_new_tokens))
# {"candidates": [...]} is the required response format for MLflow AI gateway -- see 07_ai_gateway for example
return {"candidates": outputs}
# COMMAND ----------
from mlflow.models.signature import ModelSignature
from mlflow.types import DataType, Schema, ColSpec
import pandas as pd
# Define input and output schema
input_schema = Schema([
ColSpec(DataType.string, "prompt"),
ColSpec(DataType.double, "temperature", optional=True),
ColSpec(DataType.long, "max_new_tokens", optional=True)])
output_schema = Schema([ColSpec(DataType.string)])
signature = ModelSignature(inputs=input_schema, outputs=output_schema)
# Define input example
input_example = pd.DataFrame({
"prompt": ["what is ML?"],
"temperature": [0.5],
"max_new_tokens": [100]})
# Log the model with its details such as artifacts, pip requirements and input example
# This may take about 1.7 minutes to complete
with mlflow.start_run() as run:
mlflow.pyfunc.log_model(
"model",
python_model=Llama2(),
artifacts={'repository': snapshot_location},
pip_requirements=["torch", "transformers", "accelerate", "sentencepiece"],
input_example=input_example,
signature=signature,
)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Register the model to Unity Catalog
# MAGIC By default, MLflow registers models in the Databricks workspace model registry. To register models in Unity Catalog instead, we follow the [documentation](https://docs.databricks.com/machine-learning/manage-model-lifecycle/index.html) and set the registry server as Databricks Unity Catalog.
# MAGIC
# MAGIC In order to register a model in Unity Catalog, there are [several requirements](https://docs.databricks.com/machine-learning/manage-model-lifecycle/index.html#requirements), such as Unity Catalog must be enabled in your workspace.
# MAGIC
# COMMAND ----------
# Configure MLflow Python client to register model in Unity Catalog
import mlflow
mlflow.set_registry_uri("databricks-uc")
# COMMAND ----------
# Register model to Unity Catalog
# This may take 2.2 minutes to complete
registered_name = "models.default.llamav2_70b_chat_model" # Note that the UC model name follows the pattern <catalog_name>.<schema_name>.<model_name>, corresponding to the catalog, schema, and registered model name
result = mlflow.register_model(
"runs:/" + run.info.run_id + "/model",
registered_name,
)
# COMMAND ----------
from mlflow import MlflowClient
client = MlflowClient()
# Choose the right model version registered in the above cell.
client.set_registered_model_alias(name=registered_name, alias="Champion", version=result.version)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Load the model from Unity Catalog
# COMMAND ----------
import mlflow
mlflow.set_registry_uri("databricks-uc")
registered_name = "models.default.llamav2_70b_chat_model"
# COMMAND ----------
import mlflow
import pandas as pd
loaded_model = mlflow.pyfunc.load_model(f"models:/{registered_name}@Champion")
# Make a prediction using the loaded model
loaded_model.predict(
{
"prompt": ["What is ML?", "What is large language model?"],
"temperature": [0.1, 0.5],
"max_new_tokens": [100, 100],
}
)