Skip to content
This repository has been archived by the owner on Apr 18, 2024. It is now read-only.

Commit

Permalink
changes and testing
Browse files Browse the repository at this point in the history
  • Loading branch information
patcher9 committed Mar 16, 2024
1 parent 6e8a6e4 commit 8ccb598
Show file tree
Hide file tree
Showing 5 changed files with 126 additions and 31 deletions.
3 changes: 2 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,9 @@ keywords = ["openai", "anthropic", "claude", "cohere", "llm monitoring", "observ
[tool.poetry.dependencies]
python = "^3.7.1"
requests = "^2.26.0"
openai = "^1.13.0"
openai = "^1.1.0"
anthropic = "^0.19.0"
mistralai = "^0.1.5"

[build-system]
requires = ["poetry-core>=1.1.0"]
Expand Down
21 changes: 10 additions & 11 deletions src/dokumetry/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,16 +66,15 @@ def init(llm, doku_url, api_key, environment="default", application_name="defaul
elif hasattr(llm, 'generate') and callable(llm.generate):
init_cohere(llm, doku_url, api_key, environment, application_name, skip_resp)
return
elif hasattr(llm, 'chat') and callable(llm.chat):
if isinstance(llm, MistralClient):
init_mistral(llm, doku_url, api_key, environment, application_name, skip_resp)
elif isinstance(llm, MistralAsyncClient):
init_async_mistral(llm, doku_url, api_key, environment, application_name, skip_resp)
elif isinstance(llm, MistralClient):
init_mistral(llm, doku_url, api_key, environment, application_name, skip_resp)
return
elif hasattr(llm, 'messages') and callable(llm.messages.create):
if isinstance(llm, AsyncAnthropic):
init_async_anthropic(llm, doku_url, api_key, environment, application_name, skip_resp)
elif isinstance(llm, Anthropic):
init_anthropic(llm, doku_url, api_key, environment, application_name, skip_resp)

elif isinstance(llm, MistralAsyncClient):
init_async_mistral(llm, doku_url, api_key, environment, application_name, skip_resp)
return
elif isinstance(llm, AsyncAnthropic):
init_async_anthropic(llm, doku_url, api_key, environment, application_name, skip_resp)
return
elif isinstance(llm, Anthropic):
init_anthropic(llm, doku_url, api_key, environment, application_name, skip_resp)
return
6 changes: 2 additions & 4 deletions src/dokumetry/async_azure_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,8 +194,7 @@ async def stream_generator():
"response": accumulated_content,
}

print(data)
#send_data(data, doku_url, api_key)
send_data(data, doku_url, api_key)

return stream_generator()
else:
Expand Down Expand Up @@ -239,8 +238,7 @@ async def stream_generator():
data["promptTokens"] = response.usage.prompt_tokens
data["totalTokens"] = response.usage.total_tokens

print(data)
#send_data(data, doku_url, api_key)
send_data(data, doku_url, api_key)

return response

Expand Down
28 changes: 13 additions & 15 deletions src/dokumetry/azure_openai.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# pylint: disable=duplicate-code
"""
Module for monitoring OpenAI API calls.
Module for monitoring Azure OpenAI API calls.
"""

import time
Expand All @@ -11,10 +11,10 @@
# pylint: disable=too-many-statements
def init(llm, doku_url, api_key, environment, application_name, skip_resp):
"""
Initialize OpenAI monitoring for Doku.
Initialize Azure OpenAI monitoring for Doku.
Args:
llm: The OpenAI function to be patched.
llm: The Azure OpenAI function to be patched.
doku_url (str): Doku URL.
api_key (str): Doku Authentication api_key.
environment (str): Doku environment.
Expand All @@ -29,14 +29,14 @@ def init(llm, doku_url, api_key, environment, application_name, skip_resp):

def llm_chat_completions(*args, **kwargs):
"""
Patched version of OpenAI's chat completions create method.
Patched version of Azure OpenAI's chat completions create method.
Args:
*args: Variable positional arguments.
**kwargs: Variable keyword arguments.
Returns:
OpenAIResponse: The response from OpenAI's chat completions create method.
OpenAIResponse: The response from Azure OpenAI's chat completions create method.
"""
is_streaming = kwargs.get('stream', False)
start_time = time.time()
Expand Down Expand Up @@ -154,14 +154,14 @@ def stream_generator():

def llm_completions(*args, **kwargs):
"""
Patched version of OpenAI's completions create method.
Patched version of Azure OpenAI's completions create method.
Args:
*args: Variable positional arguments.
**kwargs: Variable keyword arguments.
Returns:
OpenAIResponse: The response from OpenAI's completions create method.
OpenAIResponse: The response from Azure OpenAI's completions create method.
"""
start_time = time.time()
streaming = kwargs.get('stream', False)
Expand Down Expand Up @@ -194,8 +194,7 @@ def stream_generator():
"response": accumulated_content,
}

print(data)
#send_data(data, doku_url, api_key)
send_data(data, doku_url, api_key)

return stream_generator()
else:
Expand Down Expand Up @@ -239,21 +238,20 @@ def stream_generator():
data["promptTokens"] = response.usage.prompt_tokens
data["totalTokens"] = response.usage.total_tokens

print(data)
#send_data(data, doku_url, api_key)
send_data(data, doku_url, api_key)

return response

def patched_embeddings_create(*args, **kwargs):
"""
Patched version of OpenAI's embeddings create method.
Patched version of Azure OpenAI's embeddings create method.
Args:
*args: Variable positional arguments.
**kwargs: Variable keyword arguments.
Returns:
OpenAIResponse: The response from OpenAI's embeddings create method.
OpenAIResponse: The response from Azure OpenAI's embeddings create method.
"""

start_time = time.time()
Expand Down Expand Up @@ -282,14 +280,14 @@ def patched_embeddings_create(*args, **kwargs):

def patched_image_create(*args, **kwargs):
"""
Patched version of OpenAI's images generate method.
Patched version of Azure OpenAI's images generate method.
Args:
*args: Variable positional arguments.
**kwargs: Variable keyword arguments.
Returns:
OpenAIResponse: The response from OpenAI's images generate method.
OpenAIResponse: The response from Azure OpenAI's images generate method.
"""

start_time = time.time()
Expand Down
99 changes: 99 additions & 0 deletions tests/test_azure.py.hold
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
"""
Azure OpenAI Test Suite

This module contains a suite of tests for OpenAI functionality using the OpenAI Python library.
It includes tests for various OpenAI API endpoints such as completions, chat completions,
embeddings creation, fine-tuning job creation, image generation, image variation creation,
and audio speech generation.

The tests are designed to cover different aspects of OpenAI's capabilities and serve as a
validation mechanism for the integration with the Doku monitoring system.

Global client and initialization are set up for the OpenAI client and Doku monitoring.

Environment Variables:
- AZURE_OPENAI_API_TOKEN: OpenAI API key for authentication.
- DOKU_URL: Doku URL for monitoring data submission.
- DOKU_TOKEN: Doku authentication api_key.

Note: Ensure the environment variables are properly set before running the tests.
"""

import os
from openai import AzureOpenAI
import dokumetry

# Global client
client = AzureOpenAI(
api_key=os.getenv("AZURE_OPENAI_API_TOKEN"),
api_version = "2024-02-01",
azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
)

azure_chat_model = os.getenv("AZURE_OPENAI_CHAT_MODEL")
azure_embedding_model = os.getenv("AZURE_OPENAI_EMBEDDING_MODEL")
azure_image_model = os.getenv("AZURE_OPENAI_IMAGE_MODEL")

# Global initialization
# pylint: disable=line-too-long
dokumetry.init(llm=client, doku_url=os.getenv("DOKU_URL"), api_key=os.getenv("DOKU_TOKEN"), environment="dokumetry-testing", application_name="dokumetry-python-test", skip_resp=False)

def test_completion():
"""
Test the completions.

Raises:
AssertionError: If the completion response object is not as expected.
"""

completions_resp = client.completions.create(
model=azure_chat_model,
prompt="Hello world",
max_tokens=100
)
assert completions_resp.object == 'text_completion'

def test_chat_completion():
"""
Test chat completions.

Raises:
AssertionError: If the chat completion response object is not as expected.
"""

chat_completions_resp = client.chat.completions.create(
model=azure_chat_model,
max_tokens=100,
messages=[{"role": "user", "content": "What is Grafana?"}]
)
assert chat_completions_resp.object == 'chat.completion'

def test_embedding_creation():
"""
Test embedding creation.

Raises:
AssertionError: If the embedding response object is not as expected.
"""

embeddings_resp = client.embeddings.create(
model=azure_embedding_model,
input="The quick brown fox jumped over the lazy dog",
encoding_format="float"
)
assert embeddings_resp.data[0].object == 'embedding'

def test_image_generation():
"""
Test image generation.

Raises:
AssertionError: If the image generation response created timestamp is not present.
"""

image_generation_resp = client.images.generate(
model=azure_chat_model,
prompt='Generate an image for LLM Observability Dashboard',
n=1
)
assert image_generation_resp.created is not None

0 comments on commit 8ccb598

Please sign in to comment.