diff --git a/src/dokumetry/anthropic.py b/src/dokumetry/anthropic.py index bb90128..a0e3688 100644 --- a/src/dokumetry/anthropic.py +++ b/src/dokumetry/anthropic.py @@ -45,6 +45,7 @@ def patched_completions_create(*args, **kwargs): completion_tokens = llm.count_tokens(response.completion) data = { + "llmReqId": response.id, "environment": environment, "applicationName": application_name, "sourceLanguage": "python", diff --git a/src/dokumetry/cohere.py b/src/dokumetry/cohere.py index 2ab8b64..733ae30 100644 --- a/src/dokumetry/cohere.py +++ b/src/dokumetry/cohere.py @@ -64,7 +64,6 @@ def stream_generator(): for event in original_generate(*args, **kwargs): accumulated_content += event.text yield event - end_time = time.time() duration = end_time - start_time prompt = kwargs.get('prompt') @@ -96,6 +95,7 @@ def stream_generator(): for generation in response: data = { + "llmReqId": generation.id, "environment": environment, "applicationName": application_name, "sourceLanguage": "python", @@ -168,14 +168,16 @@ def chat_generate(*args, **kwargs): def stream_generator(): accumulated_content = "" for event in original_chat(*args, **kwargs): + if event.event_type == "stream-start": + responseId = event.generation_id if event.event_type == "text-generation": accumulated_content += event.text yield event - end_time = time.time() duration = end_time - start_time prompt = kwargs.get('message') data = { + "llmReqId": responseId, "environment": environment, "applicationName": application_name, "sourceLanguage": "python", @@ -201,6 +203,7 @@ def stream_generator(): model = kwargs.get('model', "command") prompt = kwargs.get('message') data = { + "llmReqId": response.generation_id, "environment": environment, "applicationName": application_name, "sourceLanguage": "python", @@ -239,6 +242,7 @@ def summarize_generate(*args, **kwargs): prompt = kwargs.get('text') data = { + "llmReqId": response.id, "environment": environment, "applicationName": application_name, "sourceLanguage": "python", diff --git a/src/dokumetry/openai.py b/src/dokumetry/openai.py index ac19879..a182436 100644 --- a/src/dokumetry/openai.py +++ b/src/dokumetry/openai.py @@ -53,6 +53,7 @@ def stream_generator(): if content: accumulated_content += content yield chunk + responseId = chunk.id end_time = time.time() duration = end_time - start_time message_prompt = kwargs.get('messages', "No prompt provided") @@ -74,6 +75,7 @@ def stream_generator(): prompt = "\n".join(formatted_messages) data = { + "llmReqId": responseId, "environment": environment, "applicationName": application_name, "sourceLanguage": "python", @@ -114,6 +116,7 @@ def stream_generator(): prompt = "\n".join(formatted_messages) data = { + "llmReqId": response.id, "environment": environment, "applicationName": application_name, "sourceLanguage": "python", @@ -172,10 +175,12 @@ def stream_generator(): if content: accumulated_content += content yield chunk + responseId = chunk.id end_time = time.time() duration = end_time - start_time prompt = kwargs.get('prompt', "No prompt provided") data = { + "llmReqId": responseId, "environment": environment, "applicationName": application_name, "sourceLanguage": "python", @@ -199,6 +204,7 @@ def stream_generator(): prompt = kwargs.get('prompt', "No prompt provided") data = { + "llmReqId": response.id, "environment": environment, "applicationName": application_name, "sourceLanguage": "python", @@ -296,7 +302,7 @@ def patched_fine_tuning_create(*args, **kwargs): "skipResp": skip_resp, "requestDuration": duration, "model": model, - "finetuneJobId": response.id, + "llmReqId": response.id, "finetuneJobStatus": response.status, } @@ -339,6 +345,7 @@ def patched_image_create(*args, **kwargs): for items in response.data: data = { + "llmReqId": response.created, "environment": environment, "applicationName": application_name, "sourceLanguage": "python", @@ -387,6 +394,7 @@ def patched_image_create_variation(*args, **kwargs): for items in response.data: data = { + "llmReqId": response.created, "environment": environment, "applicationName": application_name, "sourceLanguage": "python",