Skip to content
This repository has been archived by the owner on Apr 18, 2024. It is now read-only.

Commit

Permalink
track llmReqId wherever possible
Browse files Browse the repository at this point in the history
  • Loading branch information
patcher9 committed Feb 10, 2024
1 parent 64eb77b commit 8eca16a
Show file tree
Hide file tree
Showing 3 changed files with 16 additions and 3 deletions.
1 change: 1 addition & 0 deletions src/dokumetry/anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ def patched_completions_create(*args, **kwargs):
completion_tokens = llm.count_tokens(response.completion)

data = {
"llmReqId": response.id,
"environment": environment,
"applicationName": application_name,
"sourceLanguage": "python",
Expand Down
8 changes: 6 additions & 2 deletions src/dokumetry/cohere.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@ def stream_generator():
for event in original_generate(*args, **kwargs):
accumulated_content += event.text
yield event

end_time = time.time()
duration = end_time - start_time
prompt = kwargs.get('prompt')
Expand Down Expand Up @@ -96,6 +95,7 @@ def stream_generator():

for generation in response:
data = {
"llmReqId": generation.id,
"environment": environment,
"applicationName": application_name,
"sourceLanguage": "python",
Expand Down Expand Up @@ -168,14 +168,16 @@ def chat_generate(*args, **kwargs):
def stream_generator():
accumulated_content = ""
for event in original_chat(*args, **kwargs):
if event.event_type == "stream-start":
responseId = event.generation_id
if event.event_type == "text-generation":
accumulated_content += event.text
yield event

end_time = time.time()
duration = end_time - start_time
prompt = kwargs.get('message')
data = {
"llmReqId": responseId,
"environment": environment,
"applicationName": application_name,
"sourceLanguage": "python",
Expand All @@ -201,6 +203,7 @@ def stream_generator():
model = kwargs.get('model', "command")
prompt = kwargs.get('message')
data = {
"llmReqId": response.generation_id,
"environment": environment,
"applicationName": application_name,
"sourceLanguage": "python",
Expand Down Expand Up @@ -239,6 +242,7 @@ def summarize_generate(*args, **kwargs):
prompt = kwargs.get('text')

data = {
"llmReqId": response.id,
"environment": environment,
"applicationName": application_name,
"sourceLanguage": "python",
Expand Down
10 changes: 9 additions & 1 deletion src/dokumetry/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ def stream_generator():
if content:
accumulated_content += content
yield chunk
responseId = chunk.id
end_time = time.time()
duration = end_time - start_time
message_prompt = kwargs.get('messages', "No prompt provided")
Expand All @@ -74,6 +75,7 @@ def stream_generator():

prompt = "\n".join(formatted_messages)
data = {
"llmReqId": responseId,
"environment": environment,
"applicationName": application_name,
"sourceLanguage": "python",
Expand Down Expand Up @@ -114,6 +116,7 @@ def stream_generator():
prompt = "\n".join(formatted_messages)

data = {
"llmReqId": response.id,
"environment": environment,
"applicationName": application_name,
"sourceLanguage": "python",
Expand Down Expand Up @@ -172,10 +175,12 @@ def stream_generator():
if content:
accumulated_content += content
yield chunk
responseId = chunk.id
end_time = time.time()
duration = end_time - start_time
prompt = kwargs.get('prompt', "No prompt provided")
data = {
"llmReqId": responseId,
"environment": environment,
"applicationName": application_name,
"sourceLanguage": "python",
Expand All @@ -199,6 +204,7 @@ def stream_generator():
prompt = kwargs.get('prompt', "No prompt provided")

data = {
"llmReqId": response.id,
"environment": environment,
"applicationName": application_name,
"sourceLanguage": "python",
Expand Down Expand Up @@ -296,7 +302,7 @@ def patched_fine_tuning_create(*args, **kwargs):
"skipResp": skip_resp,
"requestDuration": duration,
"model": model,
"finetuneJobId": response.id,
"llmReqId": response.id,
"finetuneJobStatus": response.status,
}

Expand Down Expand Up @@ -339,6 +345,7 @@ def patched_image_create(*args, **kwargs):

for items in response.data:
data = {
"llmReqId": response.created,
"environment": environment,
"applicationName": application_name,
"sourceLanguage": "python",
Expand Down Expand Up @@ -387,6 +394,7 @@ def patched_image_create_variation(*args, **kwargs):
for items in response.data:

data = {
"llmReqId": response.created,
"environment": environment,
"applicationName": application_name,
"sourceLanguage": "python",
Expand Down

0 comments on commit 8eca16a

Please sign in to comment.