Skip to content

Commit

Permalink
Evaluation of Llama 2 on WebNLG mini
Browse files Browse the repository at this point in the history
  • Loading branch information
Rasmus authored and Rasmus committed Dec 11, 2023
1 parent ea32bd9 commit 898aa71
Show file tree
Hide file tree
Showing 2 changed files with 40 additions and 26 deletions.
42 changes: 28 additions & 14 deletions relation_extraction/evaluation/evaluation_results.json
Original file line number Diff line number Diff line change
@@ -1,13 +1,18 @@
{
"naive": {
"multilingual": {
"triples": [
{
"sentence": "Turn Me On is a 35.1 minute long album produced by Wharton Tiers that was followed by the album entitled Take it Off.",
"triples_from_solution": [
[
"Wharton_Tiers",
"producer",
"Turn_Me_On"
],
[
"Turn_Me_On",
"album",
"35.1"
"producedBy",
"Wharton_Tiers"
]
],
"expected_triples": [
Expand All @@ -32,6 +37,21 @@
{
"sentence": "The location of Trane is Swords, Dublin.",
"triples_from_solution": [
[
"Trane",
"location",
"Swords,_Dublin"
],
[
"Trane",
"location",
"Swords,_Dublin"
],
[
"Trane",
"location",
"Swords,_Dublin"
],
[
"Trane",
"location",
Expand All @@ -52,8 +72,8 @@
"triples_from_solution": [
[
"Ciudad_Ayala",
"part",
"1777539"
"location",
"Morelos"
]
],
"expected_triples": [
Expand Down Expand Up @@ -97,13 +117,7 @@
},
{
"sentence": "The 17068.8 millimeter long ALCO RS-3 has a diesel-electric transmission.",
"triples_from_solution": [
[
"ALCO_RS-3",
"transmission",
"Diesel-electric_transmission"
]
],
"triples_from_solution": [],
"expected_triples": [
[
"ALCO_RS-3",
Expand All @@ -125,9 +139,9 @@
"hit_percentage": 0.07692307692307693
},
"score": {
"precision": 0.25,
"precision": 0.14285714285714285,
"recall": 0.07692307692307693,
"F1_score": 0.11764705882352941
"F1_score": 0.1
}
}
}
24 changes: 12 additions & 12 deletions relation_extraction/multilingual/llm_messenger.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,22 +10,22 @@ def API_endpoint():
return "http://knox-proxy01.srv.aau.dk/llama-api/llama"

def send_request(request):
# HEADERS = {"Access-Authorization": os.getenv("ACCESS_SECRET")}
# response = requests.post(url=LLMMessenger.API_endpoint(), json=request, headers=HEADERS)
HEADERS = {"Access-Authorization": os.getenv("ACCESS_SECRET")}
response = requests.post(url=LLMMessenger.API_endpoint(), json=request, headers=HEADERS)

# Put the location of to the GGUF model that you've download from HuggingFace here
model_path = "./relation_extraction/multilingual/llama-2-13b-chat.Q2_K.gguf"
# # Put the location of to the GGUF model that you've download from HuggingFace here
# model_path = "./relation_extraction/multilingual/llama-2-13b-chat.Q2_K.gguf"

# Create a llama model
model = Llama(model_path=model_path, n_ctx=4096)
# # Create a llama model
# model = Llama(model_path=model_path, n_ctx=4096)

prompt = f"""<s>[INST] <<SYS>>
{request["system_message"]}
<</SYS>>
{request["user_message"]} [/INST]"""
# prompt = f"""<s>[INST] <<SYS>>
# {request["system_message"]}
# <</SYS>>
# {request["user_message"]} [/INST]"""

# Run the model
output = model(prompt, max_tokens=request["max_tokens"], echo=True)
# # Run the model
# output = model(prompt, max_tokens=request["max_tokens"], echo=True)

return output

Expand Down

0 comments on commit 898aa71

Please sign in to comment.