diff --git a/relation_extraction/evaluation/evaluation_results.json b/relation_extraction/evaluation/evaluation_results.json index 55a5c22..575a08b 100644 --- a/relation_extraction/evaluation/evaluation_results.json +++ b/relation_extraction/evaluation/evaluation_results.json @@ -1,13 +1,18 @@ { - "naive": { + "multilingual": { "triples": [ { "sentence": "Turn Me On is a 35.1 minute long album produced by Wharton Tiers that was followed by the album entitled Take it Off.", "triples_from_solution": [ + [ + "Wharton_Tiers", + "producer", + "Turn_Me_On" + ], [ "Turn_Me_On", - "album", - "35.1" + "producedBy", + "Wharton_Tiers" ] ], "expected_triples": [ @@ -32,6 +37,21 @@ { "sentence": "The location of Trane is Swords, Dublin.", "triples_from_solution": [ + [ + "Trane", + "location", + "Swords,_Dublin" + ], + [ + "Trane", + "location", + "Swords,_Dublin" + ], + [ + "Trane", + "location", + "Swords,_Dublin" + ], [ "Trane", "location", @@ -52,8 +72,8 @@ "triples_from_solution": [ [ "Ciudad_Ayala", - "part", - "1777539" + "location", + "Morelos" ] ], "expected_triples": [ @@ -97,13 +117,7 @@ }, { "sentence": "The 17068.8 millimeter long ALCO RS-3 has a diesel-electric transmission.", - "triples_from_solution": [ - [ - "ALCO_RS-3", - "transmission", - "Diesel-electric_transmission" - ] - ], + "triples_from_solution": [], "expected_triples": [ [ "ALCO_RS-3", @@ -125,9 +139,9 @@ "hit_percentage": 0.07692307692307693 }, "score": { - "precision": 0.25, + "precision": 0.14285714285714285, "recall": 0.07692307692307693, - "F1_score": 0.11764705882352941 + "F1_score": 0.1 } } } \ No newline at end of file diff --git a/relation_extraction/multilingual/llm_messenger.py b/relation_extraction/multilingual/llm_messenger.py index 6f46079..d5981ce 100644 --- a/relation_extraction/multilingual/llm_messenger.py +++ b/relation_extraction/multilingual/llm_messenger.py @@ -10,22 +10,22 @@ def API_endpoint(): return "http://knox-proxy01.srv.aau.dk/llama-api/llama" def send_request(request): - # HEADERS = {"Access-Authorization": os.getenv("ACCESS_SECRET")} - # response = requests.post(url=LLMMessenger.API_endpoint(), json=request, headers=HEADERS) + HEADERS = {"Access-Authorization": os.getenv("ACCESS_SECRET")} + response = requests.post(url=LLMMessenger.API_endpoint(), json=request, headers=HEADERS) - # Put the location of to the GGUF model that you've download from HuggingFace here - model_path = "./relation_extraction/multilingual/llama-2-13b-chat.Q2_K.gguf" + # # Put the location of to the GGUF model that you've download from HuggingFace here + # model_path = "./relation_extraction/multilingual/llama-2-13b-chat.Q2_K.gguf" - # Create a llama model - model = Llama(model_path=model_path, n_ctx=4096) + # # Create a llama model + # model = Llama(model_path=model_path, n_ctx=4096) - prompt = f"""[INST] <> - {request["system_message"]} - <> - {request["user_message"]} [/INST]""" + # prompt = f"""[INST] <> + # {request["system_message"]} + # <> + # {request["user_message"]} [/INST]""" - # Run the model - output = model(prompt, max_tokens=request["max_tokens"], echo=True) + # # Run the model + # output = model(prompt, max_tokens=request["max_tokens"], echo=True) return output