From 4620295ebd2917e89310492b1f1fcc3e28952fd4 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Tue, 20 Feb 2024 13:59:22 +0530 Subject: [PATCH 01/62] feat: revamp- wrapper around openai sdk --- portkey_ai/__init__.py | 32 +- portkey_ai/api_resources/__init__.py | 28 ++ portkey_ai/api_resources/apis/__init__.py | 17 + portkey_ai/api_resources/apis/assistants.py | 186 +++++++++++ .../api_resources/apis/chat_complete.py | 122 +++---- portkey_ai/api_resources/apis/complete.py | 107 ++++--- portkey_ai/api_resources/apis/embeddings.py | 66 +--- portkey_ai/api_resources/apis/images.py | 62 ++++ portkey_ai/api_resources/apis/threads.py | 298 ++++++++++++++++++ portkey_ai/api_resources/base_client.py | 2 + portkey_ai/api_resources/client.py | 32 ++ portkey_ai/api_resources/global_constants.py | 4 +- portkey_ai/api_resources/utils.py | 1 + 13 files changed, 798 insertions(+), 159 deletions(-) create mode 100644 portkey_ai/api_resources/apis/assistants.py create mode 100644 portkey_ai/api_resources/apis/images.py create mode 100644 portkey_ai/api_resources/apis/threads.py diff --git a/portkey_ai/__init__.py b/portkey_ai/__init__.py index b7ed91f3..7102130a 100644 --- a/portkey_ai/__init__.py +++ b/portkey_ai/__init__.py @@ -26,17 +26,33 @@ AsyncPrompts, Portkey, AsyncPortkey, + Images, + AsyncImages, + Assistants, + AsyncAssistants, + Threads, + AsyncThreads, + Messages, + AsyncMessages, + Files, + AsyncFiles, + Runs, + AsyncRuns, + Steps, + AsyncSteps ) + from portkey_ai.version import VERSION from portkey_ai.api_resources.global_constants import ( PORTKEY_BASE_URL, + PORTKEY_DEV_BASE_URL, PORTKEY_API_KEY_ENV, PORTKEY_PROXY_ENV, PORTKEY_GATEWAY_URL, ) api_key = os.environ.get(PORTKEY_API_KEY_ENV) -base_url = os.environ.get(PORTKEY_PROXY_ENV, PORTKEY_BASE_URL) +base_url = os.environ.get(PORTKEY_PROXY_ENV, PORTKEY_DEV_BASE_URL) config: Optional[Union[Mapping, str]] = None mode: Optional[Union[Modes, ModesLiteral]] = None @@ -70,4 +86,18 @@ "AsyncPrompts", "Portkey", "AsyncPortkey", + "Images", + "AsyncImages", + "Assistants", + "AsyncAssistants", + "Threads", + "AsyncThreads", + "Messages", + "AsyncMessages", + "Files", + "AsyncFiles", + "Runs", + "AsyncRuns", + "Steps", + "AsyncSteps" ] diff --git a/portkey_ai/api_resources/__init__.py b/portkey_ai/api_resources/__init__.py index 0971a5b7..746f69d8 100644 --- a/portkey_ai/api_resources/__init__.py +++ b/portkey_ai/api_resources/__init__.py @@ -11,6 +11,20 @@ Feedback, AsyncFeedback, createHeaders, + Images, + AsyncImages, + Assistants, + AsyncAssistants, + Threads, + AsyncThreads, + Messages, + AsyncMessages, + Files, + AsyncFiles, + Runs, + AsyncRuns, + Steps, + AsyncSteps ) from .utils import ( Modes, @@ -65,4 +79,18 @@ "createHeaders", "Portkey", "AsyncPortkey", + "Images", + "AsyncImages", + "Assistants", + "AsyncAssistants", + "Threads", + "AsyncThreads", + "Messages", + "AsyncMessages", + "Files", + "AsyncFiles", + "Runs", + "AsyncRuns", + "Steps", + "AsyncSteps" ] diff --git a/portkey_ai/api_resources/apis/__init__.py b/portkey_ai/api_resources/apis/__init__.py index 83545951..37ba2c7c 100644 --- a/portkey_ai/api_resources/apis/__init__.py +++ b/portkey_ai/api_resources/apis/__init__.py @@ -5,6 +5,9 @@ from .create_headers import createHeaders from .post import Post, AsyncPost from .embeddings import Embeddings, AsyncEmbeddings +from .images import Images, AsyncImages +from .assistants import Assistants, Files, AsyncAssistants, AsyncFiles +from .threads import Threads, Messages, Files, Runs, Steps, AsyncThreads, AsyncMessages, AsyncFiles, AsyncRuns, AsyncSteps __all__ = [ "Completion", @@ -22,4 +25,18 @@ "AsyncPost", "Embeddings", "AsyncEmbeddings", + "Images", + "AsyncImages", + "Assistants", + "AsyncAssistants", + "Files", + "AsyncFiles", + "Threads", + "AsyncThreads", + "Messages", + "AsyncMessages", + "Runs", + "AsyncRuns", + "Steps", + "AsyncSteps" ] diff --git a/portkey_ai/api_resources/apis/assistants.py b/portkey_ai/api_resources/apis/assistants.py new file mode 100644 index 00000000..25e59ffa --- /dev/null +++ b/portkey_ai/api_resources/apis/assistants.py @@ -0,0 +1,186 @@ +from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource +from portkey_ai.api_resources.base_client import APIClient, AsyncAPIClient +from portkey_ai.api_resources.client import AsyncPortkey, Portkey +from portkey_ai.api_resources.utils import PortkeyApiPaths, GenericResponse + + +class Assistants(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + self.files = Files(client) + + def create( + self, + **kwargs + ) -> GenericResponse: + + response = self.openai_client.beta.assistants.create(**kwargs) + return response + + def retrieve( + self, + assistant_id, + **kwargs + ) -> GenericResponse: + + response = self.openai_client.beta.assistants.retrieve( + assistant_id=assistant_id, **kwargs) + return response + + def update( + self, + assistant_id, + **kwargs + ) -> GenericResponse: + + response = self.openai_client.beta.assistants.update( + assistant_id=assistant_id, **kwargs) + return response + + def delete( + self, + assistant_id, + **kwargs + ) -> GenericResponse: + + response = self.openai_client.beta.assistants.delete( + assistant_id=assistant_id, **kwargs) + return response + + +class Files(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + def create( + self, + **kwargs + ) -> GenericResponse: + + response = self.openai_client.beta.assistants.files.create(**kwargs) + return response + + def list( + self, + assistant_id, + **kwargs + ) -> GenericResponse: + + response = self.openai_client.beta.assistants.files.list( + assistant_id=assistant_id, **kwargs) + return response + + def retrieve( + self, + assistant_id, + file_id, + **kwargs + ) -> GenericResponse: + + response = self.openai_client.beta.assistants.files.retrieve( + assistant_id=assistant_id, file_id=file_id, **kwargs) + return response + + def delete( + self, + assistant_id, + file_id, + **kwargs + ) -> GenericResponse: + + response = self.openai_client.beta.assistants.files.delete( + assistant_id=assistant_id, file_id=file_id, **kwargs) + return response + +class AsyncAssistants(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + self.files = Files(client) + + async def create( + self, + **kwargs + ) -> GenericResponse: + + response = await self.openai_client.beta.assistants.create(**kwargs) + return response + + async def retrieve( + self, + assistant_id, + **kwargs + ) -> GenericResponse: + + response = await self.openai_client.beta.assistants.retrieve( + assistant_id=assistant_id, **kwargs) + return response + + async def update( + self, + assistant_id, + **kwargs + ) -> GenericResponse: + + response = await self.openai_client.beta.assistants.update( + assistant_id=assistant_id, **kwargs) + return response + + async def delete( + self, + assistant_id, + **kwargs + ) -> GenericResponse: + + response = await self.openai_client.beta.assistants.delete( + assistant_id=assistant_id, **kwargs) + return response + + +class AsyncFiles(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + async def create( + self, + **kwargs + ) -> GenericResponse: + + response = await self.openai_client.beta.assistants.files.create(**kwargs) + return response + + async def list( + self, + assistant_id, + **kwargs + ) -> GenericResponse: + + response = await self.openai_client.beta.assistants.files.list( + assistant_id=assistant_id, **kwargs) + return response + + async def retrieve( + self, + assistant_id, + file_id, + **kwargs + ) -> GenericResponse: + + response = await self.openai_client.beta.assistants.files.retrieve( + assistant_id=assistant_id, file_id=file_id, **kwargs) + return response + + async def delete( + self, + assistant_id, + file_id, + **kwargs + ) -> GenericResponse: + + response = await self.openai_client.beta.assistants.files.delete( + assistant_id=assistant_id, file_id=file_id, **kwargs) + return response + diff --git a/portkey_ai/api_resources/apis/chat_complete.py b/portkey_ai/api_resources/apis/chat_complete.py index 4d0bc71b..10f6eb7c 100644 --- a/portkey_ai/api_resources/apis/chat_complete.py +++ b/portkey_ai/api_resources/apis/chat_complete.py @@ -3,6 +3,7 @@ import json from typing import Mapping, Optional, Union, overload, Literal, List from portkey_ai.api_resources.base_client import APIClient, AsyncAPIClient +from portkey_ai.api_resources.client import AsyncPortkey, Portkey from portkey_ai.api_resources.utils import ( PortkeyApiPaths, Message, @@ -34,8 +35,9 @@ def __init__(self, client: AsyncAPIClient) -> None: class Completions(APIResource): - def __init__(self, client: APIClient) -> None: + def __init__(self, client: Portkey) -> None: super().__init__(client) + self.openai_client = client.openai_client @overload def create( @@ -84,42 +86,49 @@ def create( def create( self, - *, - messages: Optional[List[Message]] = None, - stream: bool = False, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_k: Optional[int] = None, - top_p: Optional[float] = None, **kwargs, ) -> Union[ChatCompletions, Stream[ChatCompletionChunk]]: - body = dict( - messages=messages, - temperature=temperature, - max_tokens=max_tokens, - top_k=top_k, - top_p=top_p, - stream=stream, - **kwargs, - ) - - return self._post( - PortkeyApiPaths.CHAT_COMPLETE_API, - body=body, - params=None, - cast_to=ChatCompletions, - stream_cls=Stream[ChatCompletionChunk], - stream=stream, - headers={}, - ) + + if 'stream' in kwargs and kwargs['stream'] == True: + final_responses = [] + response = self.openai_client.chat.completions.create(**kwargs) + for chunk in response: + finalResponse = {} + finalResponse['id'] = chunk.id + finalResponse['object'] = chunk.object + finalResponse['created'] = chunk.created + finalResponse['model'] = chunk.model + finalResponse['choices'] = [{'index': chunk.choices[0].index, + 'delta': { + 'role': chunk.choices[0].delta.role, + 'content': chunk.choices[0].delta.content, + 'tool_calls': chunk.choices[0].delta.tool_calls + }, + 'logprobs': chunk.choices[0].logprobs, + 'finish_reason': chunk.choices[0].finish_reason}] + finalResponse['system_fingerprint'] = chunk.system_fingerprint + final_responses.append(finalResponse) + return final_responses + elif 'stream' in kwargs and kwargs['stream'] == False: + response = self.openai_client.with_raw_response.chat.completions.create( + **kwargs) + response = response.text + return json.loads(response) + else: + response = self.openai_client.with_raw_response.chat.completions.create( + **kwargs) + response = response.text + return json.loads(response) + def _get_config_string(self, config: Union[Mapping, str]) -> str: return config if isinstance(config, str) else json.dumps(config) class AsyncCompletions(AsyncAPIResource): - def __init__(self, client: AsyncAPIClient) -> None: + def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) + self.openai_client = client.openai_client @overload async def create( @@ -168,34 +177,39 @@ async def create( async def create( self, - *, - messages: Optional[List[Message]] = None, - stream: bool = False, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_k: Optional[int] = None, - top_p: Optional[float] = None, **kwargs, ) -> Union[ChatCompletions, AsyncStream[ChatCompletionChunk]]: - body = dict( - messages=messages, - temperature=temperature, - max_tokens=max_tokens, - top_k=top_k, - top_p=top_p, - stream=stream, - **kwargs, - ) - - return await self._post( - PortkeyApiPaths.CHAT_COMPLETE_API, - body=body, - params=None, - cast_to=ChatCompletions, - stream_cls=AsyncStream[ChatCompletionChunk], - stream=stream, - headers={}, - ) + + if 'stream' in kwargs and kwargs['stream'] == True: + final_responses = [] + response = await self.openai_client.chat.completions.create(**kwargs) + async for chunk in response: + finalResponse = {} + finalResponse['id'] = chunk.id + finalResponse['object'] = chunk.object + finalResponse['created'] = chunk.created + finalResponse['model'] = chunk.model + finalResponse['choices'] = [{'index': chunk.choices[0].index, + 'delta': { + 'role': chunk.choices[0].delta.role, + 'content': chunk.choices[0].delta.content, + 'tool_calls': chunk.choices[0].delta.tool_calls + }, + 'logprobs': chunk.choices[0].logprobs, + 'finish_reason': chunk.choices[0].finish_reason}] + finalResponse['system_fingerprint'] = chunk.system_fingerprint + final_responses.append(finalResponse) + return final_responses + elif 'stream' in kwargs and kwargs['stream'] == False: + response = await self.openai_client.with_raw_response.chat.completions.create( + **kwargs) + response = response.text + return json.loads(response) + else: + response = await self.openai_client.with_raw_response.chat.completions.create( + **kwargs) + response = response.text + return json.loads(response) def _get_config_string(self, config: Union[Mapping, str]) -> str: return config if isinstance(config, str) else json.dumps(config) diff --git a/portkey_ai/api_resources/apis/complete.py b/portkey_ai/api_resources/apis/complete.py index c87fbbd7..7a97658b 100644 --- a/portkey_ai/api_resources/apis/complete.py +++ b/portkey_ai/api_resources/apis/complete.py @@ -1,5 +1,7 @@ +import json from typing import Optional, Union, overload, Literal from portkey_ai.api_resources.base_client import APIClient, AsyncAPIClient +from portkey_ai.api_resources.client import AsyncPortkey, Portkey from portkey_ai.api_resources.utils import ( PortkeyApiPaths, TextCompletion, @@ -11,8 +13,10 @@ class Completion(APIResource): - def __init__(self, client: APIClient) -> None: + def __init__(self, client: Portkey) -> None: super().__init__(client) + self.openai_client = client.openai_client + self.client = client @overload def create( @@ -58,38 +62,38 @@ def create( def create( self, - *, - prompt: Optional[str] = None, - stream: bool = False, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_k: Optional[int] = None, - top_p: Optional[float] = None, **kwargs, ) -> Union[TextCompletion, Stream[TextCompletionChunk]]: - body = dict( - prompt=prompt, - temperature=temperature, - max_tokens=max_tokens, - top_k=top_k, - top_p=top_p, - stream=stream, - **kwargs, - ) - return self._post( - PortkeyApiPaths.TEXT_COMPLETE_API, - body=body, - params=None, - cast_to=TextCompletion, - stream_cls=Stream[TextCompletionChunk], - stream=stream, - headers={}, - ) + + if 'stream' in kwargs and kwargs['stream'] == True: + final_responses = [] + response = self.openai_client.completions.create(**kwargs) + for chunk in response: + finalResponse = {} + finalResponse['id'] = chunk.id + finalResponse['object'] = chunk.object + finalResponse['created'] = chunk.created + finalResponse['model'] = chunk.model + finalResponse['choices'] = [{'index': chunk.choices[0].index, + 'text': chunk.choices[0].text, + 'logprobs': chunk.choices[0].logprobs, + 'finish_reason': chunk.choices[0].finish_reason}] + final_responses.append(finalResponse) + return final_responses + elif 'stream' in kwargs and kwargs['stream'] == False: + response = self.openai_client.with_raw_response.completions.create(**kwargs) + response = response.text + return json.loads(response) + else: + response = self.openai_client.with_raw_response.completions.create(**kwargs) + response = response.text + return json.loads(response) class AsyncCompletion(AsyncAPIResource): - def __init__(self, client: AsyncAPIClient) -> None: + def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) + self.openai_client = client.openai_client @overload async def create( @@ -135,30 +139,29 @@ async def create( async def create( self, - *, - prompt: Optional[str] = None, - stream: bool = False, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_k: Optional[int] = None, - top_p: Optional[float] = None, **kwargs, ) -> Union[TextCompletion, AsyncStream[TextCompletionChunk]]: - body = dict( - prompt=prompt, - temperature=temperature, - max_tokens=max_tokens, - top_k=top_k, - top_p=top_p, - stream=stream, - **kwargs, - ) - return await self._post( - PortkeyApiPaths.TEXT_COMPLETE_API, - body=body, - params=None, - cast_to=TextCompletion, - stream_cls=AsyncStream[TextCompletionChunk], - stream=stream, - headers={}, - ) + + if 'stream' in kwargs and kwargs['stream'] == True: + final_responses = [] + response = await self.openai_client.completions.create(**kwargs) + async for chunk in response: + finalResponse = {} + finalResponse['id'] = chunk.id + finalResponse['object'] = chunk.object + finalResponse['created'] = chunk.created + finalResponse['model'] = chunk.model + finalResponse['choices'] = [{'index': chunk.choices[0].index, + 'text': chunk.choices[0].text, + 'logprobs': chunk.choices[0].logprobs, + 'finish_reason': chunk.choices[0].finish_reason}] + final_responses.append(finalResponse) + return final_responses + elif 'stream' in kwargs and kwargs['stream'] == False: + response = await self.openai_client.with_raw_response.completions.create(**kwargs) + response = response.text + return json.loads(response) + else: + response = await self.openai_client.with_raw_response.completions.create(**kwargs) + response = response.text + return json.loads(response) diff --git a/portkey_ai/api_resources/apis/embeddings.py b/portkey_ai/api_resources/apis/embeddings.py index dd35ed22..5eae6aa4 100644 --- a/portkey_ai/api_resources/apis/embeddings.py +++ b/portkey_ai/api_resources/apis/embeddings.py @@ -1,72 +1,36 @@ -from typing import Optional +import json from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.base_client import APIClient, AsyncAPIClient +from portkey_ai.api_resources.client import AsyncPortkey, Portkey from portkey_ai.api_resources.utils import PortkeyApiPaths, GenericResponse class Embeddings(APIResource): - def __init__(self, client: APIClient) -> None: + + def __init__(self, client: Portkey) -> None: super().__init__(client) + self.openai_client = client.openai_client def create( self, - *, - input: str, - model: Optional[str] = None, - dimensions: Optional[int] = None, - encoding_format: Optional[str] = None, - user: Optional[str] = None, **kwargs ) -> GenericResponse: - body = dict( - input=input, - model=model, - dimensions=dimensions, - encoding_format=encoding_format, - user=user, - **kwargs, - ) - - return self._post( - PortkeyApiPaths.EMBEDDING_API, - body=body, - params=None, - cast_to=GenericResponse, - stream_cls=None, - stream=False, - headers={}, - ) + + response = self.openai_client.with_raw_response.embeddings.create(**kwargs) + response = response.text + return json.loads(response) class AsyncEmbeddings(AsyncAPIResource): - def __init__(self, client: AsyncAPIClient) -> None: + def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) + self.openai_client = client.openai_client async def create( self, - *, - input: str, - model: Optional[str] = None, - dimensions: Optional[int] = None, - encoding_format: Optional[str] = None, - user: Optional[str] = None, **kwargs ) -> GenericResponse: - body = dict( - input=input, - model=model, - user=user, - dimensions=dimensions, - encoding_format=encoding_format, - **kwargs, - ) - - return await self._post( - PortkeyApiPaths.EMBEDDING_API, - body=body, - params=None, - cast_to=GenericResponse, - stream_cls=None, - stream=False, - headers={}, - ) + + response = await self.openai_client.with_raw_response.embeddings.create(**kwargs) + response = response.text + return json.loads(response) diff --git a/portkey_ai/api_resources/apis/images.py b/portkey_ai/api_resources/apis/images.py new file mode 100644 index 00000000..0fae363a --- /dev/null +++ b/portkey_ai/api_resources/apis/images.py @@ -0,0 +1,62 @@ +from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource +from portkey_ai.api_resources.base_client import APIClient, AsyncAPIClient +from portkey_ai.api_resources.client import AsyncPortkey, Portkey +from portkey_ai.api_resources.utils import PortkeyApiPaths, GenericResponse + +class Images(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + def generate( + self, + **kwargs + ) -> GenericResponse: + + response = self.openai_client.images.generate(**kwargs) + return response + + def edit( + self, + **kwargs + ) -> GenericResponse: + + response = self.openai_client.images.edit(**kwargs) + return response + + def create_variation( + self, + **kwargs + ) -> GenericResponse: + + response = self.openai_client.images.create_variation(**kwargs) + return response + +class AsyncImages(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + async def generate( + self, + **kwargs + ) -> GenericResponse: + + response = await self.openai_client.images.generate(**kwargs) + return response + + async def edit( + self, + **kwargs + ) -> GenericResponse: + + response = await self.openai_client.images.edit(**kwargs) + return response + + async def create_variation( + self, + **kwargs + ) -> GenericResponse: + + response = await self.openai_client.images.create_variation(**kwargs) + return response \ No newline at end of file diff --git a/portkey_ai/api_resources/apis/threads.py b/portkey_ai/api_resources/apis/threads.py new file mode 100644 index 00000000..f4c4e5cd --- /dev/null +++ b/portkey_ai/api_resources/apis/threads.py @@ -0,0 +1,298 @@ +from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource +from portkey_ai.api_resources.base_client import APIClient, AsyncAPIClient +from portkey_ai.api_resources.client import AsyncPortkey, Portkey +from portkey_ai.api_resources.utils import PortkeyApiPaths, GenericResponse + + +class Threads(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + self.messages = Messages(client) + self.runs = Runs(client) + + def create( + self, + ) -> GenericResponse: + + response = self.openai_client.beta.threads.create() + return response + + def retrieve( + self, + thread_id, + **kwargs + ) -> GenericResponse: + + response = self.openai_client.beta.threads.retrieve( + thread_id=thread_id, **kwargs) + return response + + def update( + self, + thread_id, + **kwargs + ) -> GenericResponse: + + response = self.openai_client.beta.threads.update( + thread_id=thread_id, **kwargs) + return response + + def delete( + self, + thread_id, + ) -> GenericResponse: + + response = self.openai_client.beta.threads.delete(thread_id=thread_id) + return response + + def create_and_run( + self, + assistant_id, + **kwargs + ) -> GenericResponse: + + response = self.openai_client.beta.threads.create_and_run( + assistant_id == assistant_id, **kwargs) + return response + + + +class Messages(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + self.files = Files(client) + + def create(self, thread_id, **kwargs) -> GenericResponse: + response = self.openai_client.beta.threads.messages.create( + thread_id=thread_id, **kwargs) + return response + + def list(self, thread_id, **kwargs) -> GenericResponse: + response = self.openai_client.beta.threads.messages.list( + thread_id=thread_id, **kwargs) + return response + + def retrieve(self, thread_id, message_id, **kwargs) -> GenericResponse: + response = self.openai_client.beta.threads.messages.retrieve( + thread_id=thread_id, message_id=message_id, **kwargs) + return response + + def update(self, thread_id, message_id, **kwargs) -> GenericResponse: + response = self.openai_client.beta.threads.messages.update( + thread_id=thread_id, message_id=message_id, **kwargs) + return response + + + +class Files(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + def list(self, thread_id, message_id, **kwargs) -> GenericResponse: + response = self.openai_client.beta.threads.messages.files.list( + thread_id=thread_id, message_id=message_id, **kwargs) + return response + + def retrieve(self, thread_id, message_id, file_id, **kwargs) -> GenericResponse: + response = self.openai_client.beta.threads.messages.files.retrieve( + thread_id=thread_id, message_id=message_id, file_id=file_id ** kwargs) + return response + + + +class Runs(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + self.steps = Steps(client) + + def create(self, **kwargs) -> GenericResponse: + response = self.openai_client.beta.threads.runs.create(**kwargs) + return response + + def retrieve(self, thread_id, run_id, **kwargs) -> GenericResponse: + response = self.openai_client.beta.threads.runs.retrieve( + thread_id=thread_id, run_id=run_id, **kwargs) + return response + + def list(self, thread_id, **kwargs) -> GenericResponse: + response = self.openai_client.beta.threads.runs.list( + thread_id=thread_id, **kwargs) + return response + + def update(self, thread_id, run_id, **kwargs) -> GenericResponse: + response = self.openai_client.beta.threads.runs.update( + thread_id=thread_id, run_id=run_id, **kwargs) + return response + + def submit_tool_outputs(self, thread_id, tool_outputs, run_id, **kwargs) -> GenericResponse: + response = self.openai_client.beta.threads.runs.submit_tool_outputs( + thread_id=thread_id, run_id=run_id, tool_outputs=tool_outputs, **kwargs) + return response + + def cancel(self, thread_id, run_id, **kwargs) -> GenericResponse: + response = self.openai_client.beta.threads.runs.cancel( + thread_id=thread_id, run_id=run_id, **kwargs) + return response + +class Steps(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + def list(self, thread_id, run_id, **kwargs) -> GenericResponse: + reponse = self.openai_client.beta.threads.runs.steps.list( + thread_id=thread_id, run_id=run_id, **kwargs) + return reponse + + def retrieve(self, thread_id, run_id, step_id, **kwargs) -> GenericResponse: + response = self.openai_client.beta.threads.runs.steps.retrieve( + thread_id=thread_id, run_id=run_id, step_id=step_id, **kwargs) + return response + +class AsyncThreads(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + self.messages = AsyncMessages(client) + self.runs = AsyncRuns(client) + + async def create( + self, + ) -> GenericResponse: + + response = await self.openai_client.beta.threads.create() + return response + + async def retrieve( + self, + thread_id, + **kwargs + ) -> GenericResponse: + + response = await self.openai_client.beta.threads.retrieve( + thread_id=thread_id, **kwargs) + return response + + async def update( + self, + thread_id, + **kwargs + ) -> GenericResponse: + + response = await self.openai_client.beta.threads.update( + thread_id=thread_id, **kwargs) + return response + + async def delete( + self, + thread_id, + ) -> GenericResponse: + + response = await self.openai_client.beta.threads.delete(thread_id=thread_id) + return response + + async def create_and_run( + self, + assistant_id, + **kwargs + ) -> GenericResponse: + + response = await self.openai_client.beta.threads.create_and_run( + assistant_id == assistant_id, **kwargs) + return response + +class AsyncMessages(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + self.files = AsyncFiles(client) + + async def create(self, thread_id, **kwargs) -> GenericResponse: + response = await self.openai_client.beta.threads.messages.create( + thread_id=thread_id, **kwargs) + return response + + async def list(self, thread_id, **kwargs) -> GenericResponse: + response = await self.openai_client.beta.threads.messages.list( + thread_id=thread_id, **kwargs) + return response + + async def retrieve(self, thread_id, message_id, **kwargs) -> GenericResponse: + response = await self.openai_client.beta.threads.messages.retrieve( + thread_id=thread_id, message_id=message_id, **kwargs) + return response + + async def update(self, thread_id, message_id, **kwargs) -> GenericResponse: + response = await self.openai_client.beta.threads.messages.update( + thread_id=thread_id, message_id=message_id, **kwargs) + return response + +class AsyncFiles(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + async def list(self, thread_id, message_id, **kwargs) -> GenericResponse: + response = await self.openai_client.beta.threads.messages.files.list( + thread_id=thread_id, message_id=message_id, **kwargs) + return response + + async def retrieve(self, thread_id, message_id, file_id, **kwargs) -> GenericResponse: + response = await self.openai_client.beta.threads.messages.files.retrieve( + thread_id=thread_id, message_id=message_id, file_id=file_id ** kwargs) + return response + +class AsyncRuns(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + self.steps = AsyncSteps(client) + + async def create(self, **kwargs) -> GenericResponse: + response = await self.openai_client.beta.threads.runs.create(**kwargs) + return response + + async def retrieve(self, thread_id, run_id, **kwargs) -> GenericResponse: + response = await self.openai_client.beta.threads.runs.retrieve( + thread_id=thread_id, run_id=run_id, **kwargs) + return response + + async def list(self, thread_id, **kwargs) -> GenericResponse: + response = await self.openai_client.beta.threads.runs.list( + thread_id=thread_id, **kwargs) + return response + + async def update(self, thread_id, run_id, **kwargs) -> GenericResponse: + response = await self.openai_client.beta.threads.runs.update( + thread_id=thread_id, run_id=run_id, **kwargs) + return response + + async def submit_tool_outputs(self, thread_id, tool_outputs, run_id, **kwargs) -> GenericResponse: + response = await self.openai_client.beta.threads.runs.submit_tool_outputs( + thread_id=thread_id, run_id=run_id, tool_outputs=tool_outputs, **kwargs) + return response + + async def cancel(self, thread_id, run_id, **kwargs) -> GenericResponse: + response = await self.openai_client.beta.threads.runs.cancel( + thread_id=thread_id, run_id=run_id, **kwargs) + return response + +class AsyncSteps(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + async def list(self, thread_id, run_id, **kwargs) -> GenericResponse: + reponse = await self.openai_client.beta.threads.runs.steps.list( + thread_id=thread_id, run_id=run_id, **kwargs) + return reponse + + async def retrieve(self, thread_id, run_id, step_id, **kwargs) -> GenericResponse: + response = await self.openai_client.beta.threads.runs.steps.retrieve( + thread_id=thread_id, run_id=run_id, step_id=step_id, **kwargs) + return response + diff --git a/portkey_ai/api_resources/base_client.py b/portkey_ai/api_resources/base_client.py index 60628ffe..b174e633 100644 --- a/portkey_ai/api_resources/base_client.py +++ b/portkey_ai/api_resources/base_client.py @@ -66,6 +66,7 @@ def __init__( self.kwargs = kwargs self.custom_headers = createHeaders( + api_key=api_key, virtual_key=virtual_key, config=config, provider=provider, @@ -413,6 +414,7 @@ def __init__( self.kwargs = kwargs self.custom_headers = createHeaders( + api_key=api_key, virtual_key=virtual_key, config=config, provider=provider, diff --git a/portkey_ai/api_resources/client.py b/portkey_ai/api_resources/client.py index d05be2b8..6fe82019 100644 --- a/portkey_ai/api_resources/client.py +++ b/portkey_ai/api_resources/client.py @@ -4,6 +4,8 @@ from portkey_ai.api_resources import apis from portkey_ai.api_resources.base_client import APIClient, AsyncAPIClient +from openai import AsyncOpenAI, OpenAI +from portkey_ai.api_resources.global_constants import OPEN_AI_API_KEY, PORTKEY_DEV_BASE_URL class Portkey(APIClient): completions: apis.Completion @@ -11,6 +13,13 @@ class Portkey(APIClient): generations: apis.Generations prompts: apis.Prompts embeddings: apis.Embeddings + images: apis.Images + + class beta: + def __init__(self, client:Portkey) -> None: + self.assistants = apis.Assistants(client) + self.threads = apis.Threads(client) + def __init__( self, @@ -35,12 +44,21 @@ def __init__( **kwargs, ) + self.openai_client = OpenAI( + api_key=OPEN_AI_API_KEY, + base_url=PORTKEY_DEV_BASE_URL, + default_headers= self.custom_headers + ) + + self.completions = apis.Completion(self) self.chat = apis.ChatCompletion(self) self.generations = apis.Generations(self) self.prompts = apis.Prompts(self) self.embeddings = apis.Embeddings(self) self.feedback = apis.Feedback(self) + self.images = apis.Images(self) + self.beta = self.beta(self) def copy( self, @@ -78,6 +96,12 @@ class AsyncPortkey(AsyncAPIClient): generations: apis.AsyncGenerations prompts: apis.AsyncPrompts embeddings: apis.AsyncEmbeddings + images: apis.AsyncImages + + class beta: + def __init__(self, client:AsyncPortkey) -> None: + self.assistants = apis.AsyncAssistants(client) + self.threads = apis.AsyncThreads(client) def __init__( self, @@ -102,12 +126,20 @@ def __init__( **kwargs, ) + self.openai_client = AsyncOpenAI( + api_key=OPEN_AI_API_KEY, + base_url=PORTKEY_DEV_BASE_URL, + default_headers= self.custom_headers + ) + self.completions = apis.AsyncCompletion(self) self.chat = apis.AsyncChatCompletion(self) self.generations = apis.AsyncGenerations(self) self.prompts = apis.AsyncPrompts(self) self.embeddings = apis.AsyncEmbeddings(self) self.feedback = apis.AsyncFeedback(self) + self.images = apis.AsyncImages(self) + self.beta = self.beta(self) def copy( self, diff --git a/portkey_ai/api_resources/global_constants.py b/portkey_ai/api_resources/global_constants.py index 68501dba..4d2d9e77 100644 --- a/portkey_ai/api_resources/global_constants.py +++ b/portkey_ai/api_resources/global_constants.py @@ -30,6 +30,8 @@ DEFAULT_TIMEOUT = 60 PORTKEY_HEADER_PREFIX = "x-portkey-" PORTKEY_BASE_URL = "https://api.portkey.ai/v1" -PORTKEY_GATEWAY_URL = PORTKEY_BASE_URL +PORTKEY_DEV_BASE_URL = "https://api.portkeydev.com/v1" +PORTKEY_GATEWAY_URL = PORTKEY_DEV_BASE_URL PORTKEY_API_KEY_ENV = "PORTKEY_API_KEY" PORTKEY_PROXY_ENV = "PORTKEY_PROXY" +OPEN_AI_API_KEY = "DUMMY-KEY" diff --git a/portkey_ai/api_resources/utils.py b/portkey_ai/api_resources/utils.py index c28a9f31..ce3209be 100644 --- a/portkey_ai/api_resources/utils.py +++ b/portkey_ai/api_resources/utils.py @@ -22,6 +22,7 @@ MISSING_BASE_URL, MISSING_MODE_MESSAGE, PORTKEY_BASE_URL, + PORTKEY_DEV_BASE_URL, PORTKEY_API_KEY_ENV, PORTKEY_HEADER_PREFIX, PORTKEY_PROXY_ENV, From eb9ff467d146aa9720436476d053300a79d251f6 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Thu, 22 Feb 2024 17:50:17 +0530 Subject: [PATCH 02/62] feat: raw response and with streaming for completion and chat completion --- .../api_resources/apis/chat_complete.py | 395 +++++++++++------- portkey_ai/api_resources/apis/complete.py | 322 ++++++++------ portkey_ai/api_resources/utils.py | 6 +- 3 files changed, 445 insertions(+), 278 deletions(-) diff --git a/portkey_ai/api_resources/apis/chat_complete.py b/portkey_ai/api_resources/apis/chat_complete.py index 10f6eb7c..c441dca7 100644 --- a/portkey_ai/api_resources/apis/chat_complete.py +++ b/portkey_ai/api_resources/apis/chat_complete.py @@ -1,7 +1,8 @@ from __future__ import annotations +import asyncio import json -from typing import Mapping, Optional, Union, overload, Literal, List +from typing import Any, AsyncIterator, Generator, Iterable, Iterator, Mapping, Optional, Type, Union, cast, overload, Literal, List from portkey_ai.api_resources.base_client import APIClient, AsyncAPIClient from portkey_ai.api_resources.client import AsyncPortkey, Portkey from portkey_ai.api_resources.utils import ( @@ -38,91 +39,152 @@ class Completions(APIResource): def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client - - @overload - def create( - self, - *, - messages: Optional[List[Message]] = None, - config: Optional[Union[Mapping, str]] = None, - stream: Literal[True], - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_k: Optional[int] = None, - top_p: Optional[float] = None, - **kwargs, - ) -> Stream[ChatCompletionChunk]: - ... - - @overload + + def stream_create(self,**kwargs) -> Union[ChatCompletions, Iterator[ChatCompletionChunk]]: + with self.openai_client.with_streaming_response.chat.completions.create(**kwargs) as response: + for line in response.iter_lines(): + json_string = line.replace('data: ', '') + json_string = json_string.strip().rstrip('\n') + if json_string == '': + continue + elif json_string == '[DONE]': + break + elif json_string!= '': + json_data = json.loads(json_string) + json_data = ChatCompletionChunk(**json_data) + yield json_data + else: + return "" + + def normal_create(self, **kwargs) -> ChatCompletions: + response = self.openai_client.with_raw_response.chat.completions.create( + **kwargs) + json_response = json.loads(response.text) + return ChatCompletions(**json_response) + def create( self, - *, - messages: Optional[List[Message]] = None, - config: Optional[Union[Mapping, str]] = None, - stream: Literal[False] = False, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_k: Optional[int] = None, - top_p: Optional[float] = None, **kwargs, ) -> ChatCompletions: - ... - - @overload - def create( - self, - *, - messages: Optional[List[Message]] = None, - config: Optional[Union[Mapping, str]] = None, - stream: bool = False, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_k: Optional[int] = None, - top_p: Optional[float] = None, - **kwargs, - ) -> Union[ChatCompletions, Stream[ChatCompletionChunk]]: - ... - - def create( - self, - **kwargs, - ) -> Union[ChatCompletions, Stream[ChatCompletionChunk]]: - + if 'stream' in kwargs and kwargs['stream'] == True: - final_responses = [] - response = self.openai_client.chat.completions.create(**kwargs) - for chunk in response: - finalResponse = {} - finalResponse['id'] = chunk.id - finalResponse['object'] = chunk.object - finalResponse['created'] = chunk.created - finalResponse['model'] = chunk.model - finalResponse['choices'] = [{'index': chunk.choices[0].index, - 'delta': { - 'role': chunk.choices[0].delta.role, - 'content': chunk.choices[0].delta.content, - 'tool_calls': chunk.choices[0].delta.tool_calls - }, - 'logprobs': chunk.choices[0].logprobs, - 'finish_reason': chunk.choices[0].finish_reason}] - finalResponse['system_fingerprint'] = chunk.system_fingerprint - final_responses.append(finalResponse) - return final_responses + return (self.stream_create(**kwargs)) elif 'stream' in kwargs and kwargs['stream'] == False: - response = self.openai_client.with_raw_response.chat.completions.create( - **kwargs) - response = response.text - return json.loads(response) + return (self.normal_create(**kwargs)) else: - response = self.openai_client.with_raw_response.chat.completions.create( - **kwargs) - response = response.text - return json.loads(response) + return (self.normal_create(**kwargs)) + - def _get_config_string(self, config: Union[Mapping, str]) -> str: - return config if isinstance(config, str) else json.dumps(config) + # def create( + # self, + # **kwargs + # ) -> Union[ChatCompletions, Iterator[ChatCompletionChunk]]: + # print("Res kw:", kwargs) + # if 'stream' in kwargs and kwargs['stream'] == True: + # with self.openai_client.with_streaming_response.chat.completions.create(**kwargs) as response: + # for line in response.iter_lines(): + # json_string = line.replace('data: ', '') + # json_string = json_string.strip().rstrip('\n') + # if json_string == '': + # continue + # elif json_string == '[DONE]': + # break + # else: + # json_data = json.loads(json_string) + # json_data = ChatCompletionChunk(**json_data) + # yield json_data + # elif 'stream' in kwargs and kwargs['stream'] == False: + # response = self.openai_client.with_raw_response.chat.completions.create( + # **kwargs) + # print("Res Stream:", response) + # response = response.text + # return json.loads(response) + # else: + # response = self.openai_client.with_raw_response.chat.completions.create( + # **kwargs) + # print("Res:", response) + # response = response.text + # response = json.loads(response) + # response = ChatCompletions(**response) + # return response + + # @overload + # def create( + # self, + # *, + # messages: Optional[List[Message]] = None, + # config: Optional[Union[Mapping, str]] = None, + # stream: Literal[True], + # temperature: Optional[float] = None, + # max_tokens: Optional[int] = None, + # top_k: Optional[int] = None, + # top_p: Optional[float] = None, + # **kwargs, + # ) -> Stream[ChatCompletionChunk]: + # ... + + # @overload + # def create( + # self, + # *, + # messages: Optional[List[Message]] = None, + # config: Optional[Union[Mapping, str]] = None, + # stream: bool = False, + # temperature: Optional[float] = None, + # max_tokens: Optional[int] = None, + # top_k: Optional[int] = None, + # top_p: Optional[float] = None, + # **kwargs, + # ) -> Union[ChatCompletions, Iterator[ChatCompletionChunk]]: + # ... + + # def create( + # self, + # **kwargs + # ) -> Union[ChatCompletions, Stream[ChatCompletionChunk]]: + + # print("Res kw:", kwargs) + # if 'stream' in kwargs and kwargs['stream'] == True: + # print("Res kwwww:", kwargs) + # with self.openai_client.with_streaming_response.chat.completions.create(**kwargs) as response: + # for line in response.iter_lines(): + # json_string = line.replace('data: ', '') + # json_string = json_string.strip().rstrip('\n') + # if json_string == '': + # continue + # elif json_string == '[DONE]': + # break + # elif json_string!= '': + # json_data = json.loads(json_string) + # json_data = ChatCompletionChunk(**json_data) + # yield json_data + # else: + # return "" + + # if 'stream' in kwargs and kwargs['stream'] == False: + # response = self.openai_client.with_raw_response.chat.completions.create( + # **kwargs) + # print("REs Stream:", response.text) + # json_response = json.loads(response.text) + # return ChatCompletions(**json_response) + # response = response.text + # return json.loads(response) + # elif 'stream' not in kwargs: + # response = self.openai_client.with_raw_response.chat.completions.create( + # **kwargs) + # print("REssss:", response) + # json_response = json.loads(response.text) + # print("TYPE:", type(ChatCompletions(**json_response))) + # # response = response.text + # # return json.loads(response) + # return ChatCompletions(**json_response) + # else: + # return "Streaming not requested" + + + # def _get_config_string(self, config: Union[Mapping, str]) -> str: + # return config if isinstance(config, str) else json.dumps(config) class AsyncCompletions(AsyncAPIResource): @@ -130,86 +192,121 @@ def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - @overload - async def create( - self, - *, - messages: Optional[List[Message]] = None, - config: Optional[Union[Mapping, str]] = None, - stream: Literal[True], - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_k: Optional[int] = None, - top_p: Optional[float] = None, - **kwargs, - ) -> AsyncStream[ChatCompletionChunk]: - ... - - @overload + async def stream_create(self,**kwargs) -> Union[ChatCompletions, AsyncIterator[ChatCompletionChunk]]: + async with self.openai_client.with_streaming_response.chat.completions.create(**kwargs) as response: + async for line in response.iter_lines(): + json_string = line.replace('data: ', '') + json_string = json_string.strip().rstrip('\n') + if json_string == '': + continue + elif json_string == '[DONE]': + break + elif json_string!= '': + json_data = json.loads(json_string) + json_data = ChatCompletionChunk(**json_data) + yield json_data + else: + pass + + async def normal_create(self, **kwargs) -> ChatCompletions: + response = await self.openai_client.with_raw_response.chat.completions.create( + **kwargs) + json_response = json.loads(response.text) + return ChatCompletions(**json_response) + async def create( self, - *, - messages: Optional[List[Message]] = None, - config: Optional[Union[Mapping, str]] = None, - stream: Literal[False] = False, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_k: Optional[int] = None, - top_p: Optional[float] = None, **kwargs, ) -> ChatCompletions: - ... - - @overload - async def create( - self, - *, - messages: Optional[List[Message]] = None, - config: Optional[Union[Mapping, str]] = None, - stream: bool = False, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_k: Optional[int] = None, - top_p: Optional[float] = None, - **kwargs, - ) -> Union[ChatCompletions, AsyncStream[ChatCompletionChunk]]: - ... - - async def create( - self, - **kwargs, - ) -> Union[ChatCompletions, AsyncStream[ChatCompletionChunk]]: if 'stream' in kwargs and kwargs['stream'] == True: - final_responses = [] - response = await self.openai_client.chat.completions.create(**kwargs) - async for chunk in response: - finalResponse = {} - finalResponse['id'] = chunk.id - finalResponse['object'] = chunk.object - finalResponse['created'] = chunk.created - finalResponse['model'] = chunk.model - finalResponse['choices'] = [{'index': chunk.choices[0].index, - 'delta': { - 'role': chunk.choices[0].delta.role, - 'content': chunk.choices[0].delta.content, - 'tool_calls': chunk.choices[0].delta.tool_calls - }, - 'logprobs': chunk.choices[0].logprobs, - 'finish_reason': chunk.choices[0].finish_reason}] - finalResponse['system_fingerprint'] = chunk.system_fingerprint - final_responses.append(finalResponse) - return final_responses + return (self.stream_create(**kwargs)) elif 'stream' in kwargs and kwargs['stream'] == False: - response = await self.openai_client.with_raw_response.chat.completions.create( - **kwargs) - response = response.text - return json.loads(response) + return await (self.normal_create(**kwargs)) else: - response = await self.openai_client.with_raw_response.chat.completions.create( - **kwargs) - response = response.text - return json.loads(response) + return await (self.normal_create(**kwargs)) + + + # @overload + # async def create( + # self, + # *, + # messages: Optional[List[Message]] = None, + # config: Optional[Union[Mapping, str]] = None, + # stream: Literal[True], + # temperature: Optional[float] = None, + # max_tokens: Optional[int] = None, + # top_k: Optional[int] = None, + # top_p: Optional[float] = None, + # **kwargs, + # ) -> AsyncStream[ChatCompletionChunk]: + # ... + + # @overload + # async def create( + # self, + # *, + # messages: Optional[List[Message]] = None, + # config: Optional[Union[Mapping, str]] = None, + # stream: Literal[False] = False, + # temperature: Optional[float] = None, + # max_tokens: Optional[int] = None, + # top_k: Optional[int] = None, + # top_p: Optional[float] = None, + # **kwargs, + # ) -> ChatCompletions: + # ... + + # @overload + # async def create( + # self, + # *, + # messages: Optional[List[Message]] = None, + # config: Optional[Union[Mapping, str]] = None, + # stream: bool = False, + # temperature: Optional[float] = None, + # max_tokens: Optional[int] = None, + # top_k: Optional[int] = None, + # top_p: Optional[float] = None, + # **kwargs, + # ) -> Union[ChatCompletions, AsyncStream[ChatCompletionChunk]]: + # ... + + # async def create( + # self, + # **kwargs, + # ) -> Union[ChatCompletions, AsyncStream[ChatCompletionChunk]]: + + # if 'stream' in kwargs and kwargs['stream'] == True: + # final_responses = [] + # response = await self.openai_client.chat.completions.create(**kwargs) + # async for chunk in response: + # finalResponse = {} + # finalResponse['id'] = chunk.id + # finalResponse['object'] = chunk.object + # finalResponse['created'] = chunk.created + # finalResponse['model'] = chunk.model + # finalResponse['choices'] = [{'index': chunk.choices[0].index, + # 'delta': { + # 'role': chunk.choices[0].delta.role, + # 'content': chunk.choices[0].delta.content, + # 'tool_calls': chunk.choices[0].delta.tool_calls + # }, + # 'logprobs': chunk.choices[0].logprobs, + # 'finish_reason': chunk.choices[0].finish_reason}] + # finalResponse['system_fingerprint'] = chunk.system_fingerprint + # final_responses.append(finalResponse) + # return final_responses + # elif 'stream' in kwargs and kwargs['stream'] == False: + # response = await self.openai_client.with_raw_response.chat.completions.create( + # **kwargs) + # response = response.text + # return json.loads(response) + # else: + # response = await self.openai_client.with_raw_response.chat.completions.create( + # **kwargs) + # response = response.text + # return json.loads(response) def _get_config_string(self, config: Union[Mapping, str]) -> str: return config if isinstance(config, str) else json.dumps(config) diff --git a/portkey_ai/api_resources/apis/complete.py b/portkey_ai/api_resources/apis/complete.py index 7a97658b..b907585c 100644 --- a/portkey_ai/api_resources/apis/complete.py +++ b/portkey_ai/api_resources/apis/complete.py @@ -1,5 +1,5 @@ import json -from typing import Optional, Union, overload, Literal +from typing import AsyncIterator, Iterator, Optional, Union, overload, Literal from portkey_ai.api_resources.base_client import APIClient, AsyncAPIClient from portkey_ai.api_resources.client import AsyncPortkey, Portkey from portkey_ai.api_resources.utils import ( @@ -18,150 +18,218 @@ def __init__(self, client: Portkey) -> None: self.openai_client = client.openai_client self.client = client - @overload + def stream_create(self,**kwargs) -> Union[TextCompletion, Iterator[TextCompletionChunk]]: + with self.openai_client.with_streaming_response.completions.create(**kwargs) as response: + for line in response.iter_lines(): + json_string = line.replace('data: ', '') + json_string = json_string.strip().rstrip('\n') + if json_string == '': + continue + elif json_string == '[DONE]': + break + elif json_string!= '': + json_data = json.loads(json_string) + json_data = TextCompletionChunk(**json_data) + yield json_data + else: + return "" + + def normal_create(self, **kwargs) -> TextCompletion: + response = self.openai_client.with_raw_response.completions.create( + **kwargs) + json_response = json.loads(response.text) + return TextCompletion(**json_response) + def create( self, - *, - prompt: Optional[str] = None, - stream: Literal[True], - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_k: Optional[int] = None, - top_p: Optional[float] = None, - **kwargs, - ) -> Stream[TextCompletionChunk]: - ... - - @overload - def create( - self, - *, - prompt: Optional[str] = None, - stream: Literal[False] = False, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_k: Optional[int] = None, - top_p: Optional[float] = None, **kwargs, ) -> TextCompletion: - ... - - @overload - def create( - self, - *, - prompt: Optional[str] = None, - stream: bool = False, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_k: Optional[int] = None, - top_p: Optional[float] = None, - **kwargs, - ) -> Union[TextCompletion, Stream[TextCompletionChunk]]: - ... - - def create( - self, - **kwargs, - ) -> Union[TextCompletion, Stream[TextCompletionChunk]]: if 'stream' in kwargs and kwargs['stream'] == True: - final_responses = [] - response = self.openai_client.completions.create(**kwargs) - for chunk in response: - finalResponse = {} - finalResponse['id'] = chunk.id - finalResponse['object'] = chunk.object - finalResponse['created'] = chunk.created - finalResponse['model'] = chunk.model - finalResponse['choices'] = [{'index': chunk.choices[0].index, - 'text': chunk.choices[0].text, - 'logprobs': chunk.choices[0].logprobs, - 'finish_reason': chunk.choices[0].finish_reason}] - final_responses.append(finalResponse) - return final_responses + return (self.stream_create(**kwargs)) elif 'stream' in kwargs and kwargs['stream'] == False: - response = self.openai_client.with_raw_response.completions.create(**kwargs) - response = response.text - return json.loads(response) + return (self.normal_create(**kwargs)) else: - response = self.openai_client.with_raw_response.completions.create(**kwargs) - response = response.text - return json.loads(response) + return (self.normal_create(**kwargs)) + + # @overload + # def create( + # self, + # *, + # prompt: Optional[str] = None, + # stream: Literal[True], + # temperature: Optional[float] = None, + # max_tokens: Optional[int] = None, + # top_k: Optional[int] = None, + # top_p: Optional[float] = None, + # **kwargs, + # ) -> Stream[TextCompletionChunk]: + # ... + + # @overload + # def create( + # self, + # *, + # prompt: Optional[str] = None, + # stream: Literal[False] = False, + # temperature: Optional[float] = None, + # max_tokens: Optional[int] = None, + # top_k: Optional[int] = None, + # top_p: Optional[float] = None, + # **kwargs, + # ) -> TextCompletion: + # ... + + # @overload + # def create( + # self, + # *, + # prompt: Optional[str] = None, + # stream: bool = False, + # temperature: Optional[float] = None, + # max_tokens: Optional[int] = None, + # top_k: Optional[int] = None, + # top_p: Optional[float] = None, + # **kwargs, + # ) -> Union[TextCompletion, Stream[TextCompletionChunk]]: + # ... + + # def create( + # self, + # **kwargs, + # ) -> Union[TextCompletion, Stream[TextCompletionChunk]]: + + # if 'stream' in kwargs and kwargs['stream'] == True: + # final_responses = [] + # response = self.openai_client.completions.create(**kwargs) + # for chunk in response: + # finalResponse = {} + # finalResponse['id'] = chunk.id + # finalResponse['object'] = chunk.object + # finalResponse['created'] = chunk.created + # finalResponse['model'] = chunk.model + # finalResponse['choices'] = [{'index': chunk.choices[0].index, + # 'text': chunk.choices[0].text, + # 'logprobs': chunk.choices[0].logprobs, + # 'finish_reason': chunk.choices[0].finish_reason}] + # final_responses.append(finalResponse) + # return final_responses + # elif 'stream' in kwargs and kwargs['stream'] == False: + # response = self.openai_client.with_raw_response.completions.create(**kwargs) + # response = response + # return json.loads(response) + # else: + # response = self.openai_client.with_raw_response.completions.create(**kwargs) + # response = response.text + # return json.loads(response) class AsyncCompletion(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - - @overload + + async def stream_create(self,**kwargs) -> Union[TextCompletion, AsyncIterator[TextCompletionChunk]]: + async with self.openai_client.with_streaming_response.completions.create(**kwargs) as response: + async for line in response.iter_lines(): + json_string = line.replace('data: ', '') + json_string = json_string.strip().rstrip('\n') + if json_string == '': + continue + elif json_string == '[DONE]': + break + elif json_string!= '': + json_data = json.loads(json_string) + json_data = TextCompletionChunk(**json_data) + yield json_data + else: + pass + + async def normal_create(self, **kwargs) -> TextCompletion: + response = await self.openai_client.with_raw_response.completions.create( + **kwargs) + json_response = json.loads(response.text) + return TextCompletion(**json_response) + async def create( self, - *, - prompt: Optional[str] = None, - stream: Literal[True], - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_k: Optional[int] = None, - top_p: Optional[float] = None, - **kwargs, - ) -> AsyncStream[TextCompletionChunk]: - ... - - @overload - async def create( - self, - *, - prompt: Optional[str] = None, - stream: Literal[False] = False, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_k: Optional[int] = None, - top_p: Optional[float] = None, **kwargs, ) -> TextCompletion: - ... - - @overload - async def create( - self, - *, - prompt: Optional[str] = None, - stream: bool = False, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_k: Optional[int] = None, - top_p: Optional[float] = None, - **kwargs, - ) -> Union[TextCompletion, AsyncStream[TextCompletionChunk]]: - ... - - async def create( - self, - **kwargs, - ) -> Union[TextCompletion, AsyncStream[TextCompletionChunk]]: - + if 'stream' in kwargs and kwargs['stream'] == True: - final_responses = [] - response = await self.openai_client.completions.create(**kwargs) - async for chunk in response: - finalResponse = {} - finalResponse['id'] = chunk.id - finalResponse['object'] = chunk.object - finalResponse['created'] = chunk.created - finalResponse['model'] = chunk.model - finalResponse['choices'] = [{'index': chunk.choices[0].index, - 'text': chunk.choices[0].text, - 'logprobs': chunk.choices[0].logprobs, - 'finish_reason': chunk.choices[0].finish_reason}] - final_responses.append(finalResponse) - return final_responses + return (self.stream_create(**kwargs)) elif 'stream' in kwargs and kwargs['stream'] == False: - response = await self.openai_client.with_raw_response.completions.create(**kwargs) - response = response.text - return json.loads(response) + return await (self.normal_create(**kwargs)) else: - response = await self.openai_client.with_raw_response.completions.create(**kwargs) - response = response.text - return json.loads(response) + return await (self.normal_create(**kwargs)) + + # @overload + # async def create( + # self, + # *, + # prompt: Optional[str] = None, + # stream: Literal[True], + # temperature: Optional[float] = None, + # max_tokens: Optional[int] = None, + # top_k: Optional[int] = None, + # top_p: Optional[float] = None, + # **kwargs, + # ) -> AsyncStream[TextCompletionChunk]: + # ... + + # @overload + # async def create( + # self, + # *, + # prompt: Optional[str] = None, + # stream: Literal[False] = False, + # temperature: Optional[float] = None, + # max_tokens: Optional[int] = None, + # top_k: Optional[int] = None, + # top_p: Optional[float] = None, + # **kwargs, + # ) -> TextCompletion: + # ... + + # @overload + # async def create( + # self, + # *, + # prompt: Optional[str] = None, + # stream: bool = False, + # temperature: Optional[float] = None, + # max_tokens: Optional[int] = None, + # top_k: Optional[int] = None, + # top_p: Optional[float] = None, + # **kwargs, + # ) -> Union[TextCompletion, AsyncStream[TextCompletionChunk]]: + # ... + + # async def create( + # self, + # **kwargs, + # ) -> Union[TextCompletion, AsyncStream[TextCompletionChunk]]: + + # if 'stream' in kwargs and kwargs['stream'] == True: + # final_responses = [] + # response = await self.openai_client.completions.create(**kwargs) + # async for chunk in response: + # finalResponse = {} + # finalResponse['id'] = chunk.id + # finalResponse['object'] = chunk.object + # finalResponse['created'] = chunk.created + # finalResponse['model'] = chunk.model + # finalResponse['choices'] = [{'index': chunk.choices[0].index, + # 'text': chunk.choices[0].text, + # 'logprobs': chunk.choices[0].logprobs, + # 'finish_reason': chunk.choices[0].finish_reason}] + # final_responses.append(finalResponse) + # return final_responses + # elif 'stream' in kwargs and kwargs['stream'] == False: + # response = await self.openai_client.with_raw_response.completions.create(**kwargs) + # response = response.text + # return json.loads(response) + # else: + # response = await self.openai_client.with_raw_response.completions.create(**kwargs) + # response = response.text + # return json.loads(response) diff --git a/portkey_ai/api_resources/utils.py b/portkey_ai/api_resources/utils.py index ce3209be..87b70924 100644 --- a/portkey_ai/api_resources/utils.py +++ b/portkey_ai/api_resources/utils.py @@ -396,7 +396,7 @@ def get_headers(self) -> Optional[Dict[str, str]]: class TextChoice(BaseModel, extra="allow"): index: Optional[int] = None text: Optional[str] = None - logprobs: Any + logprobs: Optional[Any] = None finish_reason: Optional[str] = None def __str__(self): @@ -433,12 +433,14 @@ def get_headers(self) -> Optional[Dict[str, str]]: # Models for text completion stream + class TextCompletionChunk(BaseModel, extra="allow"): id: Optional[str] = None object: Optional[str] = None created: Optional[int] = None model: Optional[str] = None - choices: Union[List[TextChoice], Dict[Any, Any]] = {} + provider: Optional[str] = None + choices: List[TextChoice] def __str__(self): return json.dumps(self.dict(), indent=4) From c2aaa175ccc5cd2b043ddbd4ce2dae0f38cd2833 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Fri, 23 Feb 2024 12:04:03 +0530 Subject: [PATCH 03/62] feat: added files route support --- portkey_ai/__init__.py | 4 + portkey_ai/api_resources/__init__.py | 4 + portkey_ai/api_resources/apis/__init__.py | 3 + portkey_ai/api_resources/apis/mainFiles.py | 112 +++++++++++++++++++++ portkey_ai/api_resources/client.py | 4 + 5 files changed, 127 insertions(+) create mode 100644 portkey_ai/api_resources/apis/mainFiles.py diff --git a/portkey_ai/__init__.py b/portkey_ai/__init__.py index 7102130a..11a411d0 100644 --- a/portkey_ai/__init__.py +++ b/portkey_ai/__init__.py @@ -34,6 +34,8 @@ AsyncThreads, Messages, AsyncMessages, + MainFiles, + AsyncMainFiles, Files, AsyncFiles, Runs, @@ -94,6 +96,8 @@ "AsyncThreads", "Messages", "AsyncMessages", + "MainFiles", + "AsyncMainFiles", "Files", "AsyncFiles", "Runs", diff --git a/portkey_ai/api_resources/__init__.py b/portkey_ai/api_resources/__init__.py index 746f69d8..d6ce1265 100644 --- a/portkey_ai/api_resources/__init__.py +++ b/portkey_ai/api_resources/__init__.py @@ -19,6 +19,8 @@ AsyncThreads, Messages, AsyncMessages, + MainFiles, + AsyncMainFiles, Files, AsyncFiles, Runs, @@ -87,6 +89,8 @@ "AsyncThreads", "Messages", "AsyncMessages", + "MainFiles", + "AsyncMainFiles", "Files", "AsyncFiles", "Runs", diff --git a/portkey_ai/api_resources/apis/__init__.py b/portkey_ai/api_resources/apis/__init__.py index 37ba2c7c..4c211bfb 100644 --- a/portkey_ai/api_resources/apis/__init__.py +++ b/portkey_ai/api_resources/apis/__init__.py @@ -8,6 +8,7 @@ from .images import Images, AsyncImages from .assistants import Assistants, Files, AsyncAssistants, AsyncFiles from .threads import Threads, Messages, Files, Runs, Steps, AsyncThreads, AsyncMessages, AsyncFiles, AsyncRuns, AsyncSteps +from .mainFiles import MainFiles, AsyncMainFiles __all__ = [ "Completion", @@ -29,6 +30,8 @@ "AsyncImages", "Assistants", "AsyncAssistants", + "MainFiles", + "AsyncMainFiles", "Files", "AsyncFiles", "Threads", diff --git a/portkey_ai/api_resources/apis/mainFiles.py b/portkey_ai/api_resources/apis/mainFiles.py new file mode 100644 index 00000000..b45d36d1 --- /dev/null +++ b/portkey_ai/api_resources/apis/mainFiles.py @@ -0,0 +1,112 @@ +from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource +from portkey_ai.api_resources.base_client import APIClient, AsyncAPIClient +from portkey_ai.api_resources.client import AsyncPortkey, Portkey +from portkey_ai.api_resources.utils import PortkeyApiPaths, GenericResponse + + +class MainFiles(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + def create( + self, + file, + purpose, + **kwargs + ) -> GenericResponse: + + response = self.openai_client.files.create(file=file, purpose=purpose, **kwargs) + return response + + def list( + self, + **kwargs + ) -> GenericResponse: + + response = self.openai_client.files.list(**kwargs) + return response + + def retrieve( + self, + file_id, + **kwargs + ) -> GenericResponse: + + response = self.openai_client.files.retrieve( + file_id=file_id, **kwargs) + return response + + def delete( + self, + file_id, + **kwargs + ) -> GenericResponse: + + response = self.openai_client.files.delete( + file_id=file_id, **kwargs) + return response + + def retrieveContent( + self, + file_id, + **kwargs + ) -> GenericResponse: + + response = self.openai_client.files.retrieveContent( + file_id=file_id, **kwargs) + return response + + +class AsyncMainFiles(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + async def create( + self, + file, + purpose, + **kwargs + ) -> GenericResponse: + + response = await self.openai_client.files.create(file=file, purpose=purpose, **kwargs) + return response + + async def list( + self, + **kwargs + ) -> GenericResponse: + + response = await self.openai_client.files.list(**kwargs) + return response + + async def retrieve( + self, + file_id, + **kwargs + ) -> GenericResponse: + + response = await self.openai_client.files.retrieve( + file_id=file_id, **kwargs) + return response + + async def delete( + self, + file_id, + **kwargs + ) -> GenericResponse: + + response = await self.openai_client.files.delete( + file_id=file_id, **kwargs) + return response + + async def retrieveContent( + self, + file_id, + **kwargs + ) -> GenericResponse: + + response = await self.openai_client.files.retrieveContent( + file_id=file_id, **kwargs) + return response \ No newline at end of file diff --git a/portkey_ai/api_resources/client.py b/portkey_ai/api_resources/client.py index 6fe82019..98322abd 100644 --- a/portkey_ai/api_resources/client.py +++ b/portkey_ai/api_resources/client.py @@ -14,6 +14,7 @@ class Portkey(APIClient): prompts: apis.Prompts embeddings: apis.Embeddings images: apis.Images + files: apis.MainFiles class beta: def __init__(self, client:Portkey) -> None: @@ -58,6 +59,7 @@ def __init__( self.embeddings = apis.Embeddings(self) self.feedback = apis.Feedback(self) self.images = apis.Images(self) + self.files = apis.MainFiles(self) self.beta = self.beta(self) def copy( @@ -97,6 +99,7 @@ class AsyncPortkey(AsyncAPIClient): prompts: apis.AsyncPrompts embeddings: apis.AsyncEmbeddings images: apis.AsyncImages + files: apis.AsyncMainFiles class beta: def __init__(self, client:AsyncPortkey) -> None: @@ -139,6 +142,7 @@ def __init__( self.embeddings = apis.AsyncEmbeddings(self) self.feedback = apis.AsyncFeedback(self) self.images = apis.AsyncImages(self) + self.files = apis.AsyncMainFiles(self) self.beta = self.beta(self) def copy( From fcc42ad7a937a56db23b2fe80501ac67f76ede20 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Fri, 23 Feb 2024 19:46:45 +0530 Subject: [PATCH 04/62] fix: linting issues fixed --- portkey_ai/__init__.py | 17 +- portkey_ai/api_resources/__init__.py | 16 +- portkey_ai/api_resources/apis/__init__.py | 23 +- portkey_ai/api_resources/apis/assistants.py | 161 ++++-------- .../api_resources/apis/chat_complete.py | 124 +++++----- portkey_ai/api_resources/apis/complete.py | 117 ++++----- portkey_ai/api_resources/apis/embeddings.py | 28 +-- portkey_ai/api_resources/apis/images.py | 51 ++-- portkey_ai/api_resources/apis/mainFiles.py | 97 ++------ portkey_ai/api_resources/apis/threads.py | 229 +++++++++--------- portkey_ai/api_resources/client.py | 20 +- portkey_ai/api_resources/utils.py | 24 +- 12 files changed, 406 insertions(+), 501 deletions(-) diff --git a/portkey_ai/__init__.py b/portkey_ai/__init__.py index 11a411d0..f2d92ebd 100644 --- a/portkey_ai/__init__.py +++ b/portkey_ai/__init__.py @@ -36,17 +36,18 @@ AsyncMessages, MainFiles, AsyncMainFiles, - Files, - AsyncFiles, + ThreadFiles, + AsyncThreadFiles, + AssistantFiles, + AsyncAssistantFiles, Runs, AsyncRuns, Steps, - AsyncSteps + AsyncSteps, ) from portkey_ai.version import VERSION from portkey_ai.api_resources.global_constants import ( - PORTKEY_BASE_URL, PORTKEY_DEV_BASE_URL, PORTKEY_API_KEY_ENV, PORTKEY_PROXY_ENV, @@ -98,10 +99,12 @@ "AsyncMessages", "MainFiles", "AsyncMainFiles", - "Files", - "AsyncFiles", + "ThreadFiles", + "AsyncThreadFiles", + "AssistantFiles", + "AsyncAssistantFiles", "Runs", "AsyncRuns", "Steps", - "AsyncSteps" + "AsyncSteps", ] diff --git a/portkey_ai/api_resources/__init__.py b/portkey_ai/api_resources/__init__.py index d6ce1265..05260264 100644 --- a/portkey_ai/api_resources/__init__.py +++ b/portkey_ai/api_resources/__init__.py @@ -21,12 +21,14 @@ AsyncMessages, MainFiles, AsyncMainFiles, - Files, - AsyncFiles, + ThreadFiles, + AsyncThreadFiles, + AssistantFiles, + AsyncAssistantFiles, Runs, AsyncRuns, Steps, - AsyncSteps + AsyncSteps, ) from .utils import ( Modes, @@ -91,10 +93,12 @@ "AsyncMessages", "MainFiles", "AsyncMainFiles", - "Files", - "AsyncFiles", + "ThreadFiles", + "AsyncThreadFiles", + "AssistantFiles", + "AsyncAssistantFiles", "Runs", "AsyncRuns", "Steps", - "AsyncSteps" + "AsyncSteps", ] diff --git a/portkey_ai/api_resources/apis/__init__.py b/portkey_ai/api_resources/apis/__init__.py index 4c211bfb..5b4c2c4c 100644 --- a/portkey_ai/api_resources/apis/__init__.py +++ b/portkey_ai/api_resources/apis/__init__.py @@ -6,8 +6,19 @@ from .post import Post, AsyncPost from .embeddings import Embeddings, AsyncEmbeddings from .images import Images, AsyncImages -from .assistants import Assistants, Files, AsyncAssistants, AsyncFiles -from .threads import Threads, Messages, Files, Runs, Steps, AsyncThreads, AsyncMessages, AsyncFiles, AsyncRuns, AsyncSteps +from .assistants import Assistants, AssistantFiles, AsyncAssistants, AsyncAssistantFiles +from .threads import ( + Threads, + Messages, + ThreadFiles, + Runs, + Steps, + AsyncThreads, + AsyncMessages, + AsyncThreadFiles, + AsyncRuns, + AsyncSteps, +) from .mainFiles import MainFiles, AsyncMainFiles __all__ = [ @@ -32,8 +43,10 @@ "AsyncAssistants", "MainFiles", "AsyncMainFiles", - "Files", - "AsyncFiles", + "AssistantFiles", + "ThreadFiles", + "AsyncAssistantFiles", + "AsyncThreadFiles", "Threads", "AsyncThreads", "Messages", @@ -41,5 +54,5 @@ "Runs", "AsyncRuns", "Steps", - "AsyncSteps" + "AsyncSteps", ] diff --git a/portkey_ai/api_resources/apis/assistants.py b/portkey_ai/api_resources/apis/assistants.py index 25e59ffa..3cf2347c 100644 --- a/portkey_ai/api_resources/apis/assistants.py +++ b/portkey_ai/api_resources/apis/assistants.py @@ -1,186 +1,117 @@ +from typing import Any from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource -from portkey_ai.api_resources.base_client import APIClient, AsyncAPIClient from portkey_ai.api_resources.client import AsyncPortkey, Portkey -from portkey_ai.api_resources.utils import PortkeyApiPaths, GenericResponse class Assistants(APIResource): def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client - self.files = Files(client) - - def create( - self, - **kwargs - ) -> GenericResponse: + self.files = AssistantFiles(client) + def create(self, **kwargs) -> Any: response = self.openai_client.beta.assistants.create(**kwargs) return response - def retrieve( - self, - assistant_id, - **kwargs - ) -> GenericResponse: - + def retrieve(self, assistant_id, **kwargs) -> Any: response = self.openai_client.beta.assistants.retrieve( - assistant_id=assistant_id, **kwargs) + assistant_id=assistant_id, **kwargs + ) return response - - def update( - self, - assistant_id, - **kwargs - ) -> GenericResponse: + def update(self, assistant_id, **kwargs) -> Any: response = self.openai_client.beta.assistants.update( - assistant_id=assistant_id, **kwargs) + assistant_id=assistant_id, **kwargs + ) return response - - def delete( - self, - assistant_id, - **kwargs - ) -> GenericResponse: + def delete(self, assistant_id, **kwargs) -> Any: response = self.openai_client.beta.assistants.delete( - assistant_id=assistant_id, **kwargs) + assistant_id=assistant_id, **kwargs + ) return response -class Files(APIResource): +class AssistantFiles(APIResource): def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client - def create( - self, - **kwargs - ) -> GenericResponse: - + def create(self, **kwargs) -> Any: response = self.openai_client.beta.assistants.files.create(**kwargs) return response - def list( - self, - assistant_id, - **kwargs - ) -> GenericResponse: - + def list(self, assistant_id, **kwargs) -> Any: response = self.openai_client.beta.assistants.files.list( - assistant_id=assistant_id, **kwargs) + assistant_id=assistant_id, **kwargs + ) return response - def retrieve( - self, - assistant_id, - file_id, - **kwargs - ) -> GenericResponse: - + def retrieve(self, assistant_id, file_id, **kwargs) -> Any: response = self.openai_client.beta.assistants.files.retrieve( - assistant_id=assistant_id, file_id=file_id, **kwargs) + assistant_id=assistant_id, file_id=file_id, **kwargs + ) return response - - def delete( - self, - assistant_id, - file_id, - **kwargs - ) -> GenericResponse: + def delete(self, assistant_id, file_id, **kwargs) -> Any: response = self.openai_client.beta.assistants.files.delete( - assistant_id=assistant_id, file_id=file_id, **kwargs) + assistant_id=assistant_id, file_id=file_id, **kwargs + ) return response + class AsyncAssistants(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - self.files = Files(client) - - async def create( - self, - **kwargs - ) -> GenericResponse: + self.files = AsyncAssistantFiles(client) + async def create(self, **kwargs) -> Any: response = await self.openai_client.beta.assistants.create(**kwargs) return response - async def retrieve( - self, - assistant_id, - **kwargs - ) -> GenericResponse: - + async def retrieve(self, assistant_id, **kwargs) -> Any: response = await self.openai_client.beta.assistants.retrieve( - assistant_id=assistant_id, **kwargs) + assistant_id=assistant_id, **kwargs + ) return response - - async def update( - self, - assistant_id, - **kwargs - ) -> GenericResponse: + async def update(self, assistant_id, **kwargs) -> Any: response = await self.openai_client.beta.assistants.update( - assistant_id=assistant_id, **kwargs) + assistant_id=assistant_id, **kwargs + ) return response - - async def delete( - self, - assistant_id, - **kwargs - ) -> GenericResponse: + async def delete(self, assistant_id, **kwargs) -> Any: response = await self.openai_client.beta.assistants.delete( - assistant_id=assistant_id, **kwargs) + assistant_id=assistant_id, **kwargs + ) return response -class AsyncFiles(AsyncAPIResource): +class AsyncAssistantFiles(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - async def create( - self, - **kwargs - ) -> GenericResponse: - + async def create(self, **kwargs) -> Any: response = await self.openai_client.beta.assistants.files.create(**kwargs) return response - async def list( - self, - assistant_id, - **kwargs - ) -> GenericResponse: - + async def list(self, assistant_id, **kwargs) -> Any: response = await self.openai_client.beta.assistants.files.list( - assistant_id=assistant_id, **kwargs) + assistant_id=assistant_id, **kwargs + ) return response - async def retrieve( - self, - assistant_id, - file_id, - **kwargs - ) -> GenericResponse: - + async def retrieve(self, assistant_id, file_id, **kwargs) -> Any: response = await self.openai_client.beta.assistants.files.retrieve( - assistant_id=assistant_id, file_id=file_id, **kwargs) + assistant_id=assistant_id, file_id=file_id, **kwargs + ) return response - - async def delete( - self, - assistant_id, - file_id, - **kwargs - ) -> GenericResponse: + async def delete(self, assistant_id, file_id, **kwargs) -> Any: response = await self.openai_client.beta.assistants.files.delete( - assistant_id=assistant_id, file_id=file_id, **kwargs) + assistant_id=assistant_id, file_id=file_id, **kwargs + ) return response - diff --git a/portkey_ai/api_resources/apis/chat_complete.py b/portkey_ai/api_resources/apis/chat_complete.py index c441dca7..38e0439f 100644 --- a/portkey_ai/api_resources/apis/chat_complete.py +++ b/portkey_ai/api_resources/apis/chat_complete.py @@ -1,18 +1,18 @@ from __future__ import annotations -import asyncio import json -from typing import Any, AsyncIterator, Generator, Iterable, Iterator, Mapping, Optional, Type, Union, cast, overload, Literal, List -from portkey_ai.api_resources.base_client import APIClient, AsyncAPIClient +from typing import ( + AsyncIterator, + Iterator, + Mapping, + Union, +) from portkey_ai.api_resources.client import AsyncPortkey, Portkey from portkey_ai.api_resources.utils import ( - PortkeyApiPaths, - Message, ChatCompletionChunk, ChatCompletions, ) -from portkey_ai.api_resources.streaming import AsyncStream, Stream from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource @@ -22,7 +22,7 @@ class ChatCompletion(APIResource): completions: Completions - def __init__(self, client: APIClient) -> None: + def __init__(self, client: Portkey) -> None: super().__init__(client) self.completions = Completions(client) @@ -30,7 +30,7 @@ def __init__(self, client: APIClient) -> None: class AsyncChatCompletion(AsyncAPIResource): completions: AsyncCompletions - def __init__(self, client: AsyncAPIClient) -> None: + def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.completions = AsyncCompletions(client) @@ -39,42 +39,44 @@ class Completions(APIResource): def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client - - def stream_create(self,**kwargs) -> Union[ChatCompletions, Iterator[ChatCompletionChunk]]: - with self.openai_client.with_streaming_response.chat.completions.create(**kwargs) as response: + + def stream_create( + self, **kwargs + ) -> Union[ChatCompletions, Iterator[ChatCompletionChunk]]: + with self.openai_client.with_streaming_response.chat.completions.create( + **kwargs + ) as response: for line in response.iter_lines(): - json_string = line.replace('data: ', '') - json_string = json_string.strip().rstrip('\n') - if json_string == '': + json_string = line.replace("data: ", "") + json_string = json_string.strip().rstrip("\n") + if json_string == "": continue - elif json_string == '[DONE]': + elif json_string == "[DONE]": break - elif json_string!= '': + elif json_string != "": json_data = json.loads(json_string) json_data = ChatCompletionChunk(**json_data) yield json_data else: return "" - + def normal_create(self, **kwargs) -> ChatCompletions: response = self.openai_client.with_raw_response.chat.completions.create( - **kwargs) + **kwargs + ) json_response = json.loads(response.text) return ChatCompletions(**json_response) - + def create( self, **kwargs, ) -> ChatCompletions: - - if 'stream' in kwargs and kwargs['stream'] == True: - return (self.stream_create(**kwargs)) - elif 'stream' in kwargs and kwargs['stream'] == False: - return (self.normal_create(**kwargs)) + if "stream" in kwargs and kwargs["stream"] is True: + return self.stream_create(**kwargs) # type: ignore + elif "stream" in kwargs and kwargs["stream"] is False: + return self.normal_create(**kwargs) else: - return (self.normal_create(**kwargs)) - - + return self.normal_create(**kwargs) # def create( # self, @@ -82,7 +84,8 @@ def create( # ) -> Union[ChatCompletions, Iterator[ChatCompletionChunk]]: # print("Res kw:", kwargs) # if 'stream' in kwargs and kwargs['stream'] == True: - # with self.openai_client.with_streaming_response.chat.completions.create(**kwargs) as response: + # with self.openai_client.with_streaming_response.chat. + # completions.create(**kwargs) as response: # for line in response.iter_lines(): # json_string = line.replace('data: ', '') # json_string = json_string.strip().rstrip('\n') @@ -108,7 +111,7 @@ def create( # response = json.loads(response) # response = ChatCompletions(**response) # return response - + # @overload # def create( # self, @@ -143,11 +146,12 @@ def create( # self, # **kwargs # ) -> Union[ChatCompletions, Stream[ChatCompletionChunk]]: - + # print("Res kw:", kwargs) # if 'stream' in kwargs and kwargs['stream'] == True: # print("Res kwwww:", kwargs) - # with self.openai_client.with_streaming_response.chat.completions.create(**kwargs) as response: + # with self.openai_client.with_streaming_response. + # chat.completions.create(**kwargs) as response: # for line in response.iter_lines(): # json_string = line.replace('data: ', '') # json_string = json_string.strip().rstrip('\n') @@ -182,7 +186,6 @@ def create( # else: # return "Streaming not requested" - # def _get_config_string(self, config: Union[Mapping, str]) -> str: # return config if isinstance(config, str) else json.dumps(config) @@ -192,40 +195,43 @@ def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - async def stream_create(self,**kwargs) -> Union[ChatCompletions, AsyncIterator[ChatCompletionChunk]]: - async with self.openai_client.with_streaming_response.chat.completions.create(**kwargs) as response: + async def stream_create( + self, **kwargs + ) -> Union[ChatCompletions, AsyncIterator[ChatCompletionChunk]]: # type: ignore + async with self.openai_client.with_streaming_response.chat.completions.create( + **kwargs + ) as response: async for line in response.iter_lines(): - json_string = line.replace('data: ', '') - json_string = json_string.strip().rstrip('\n') - if json_string == '': + json_string = line.replace("data: ", "") + json_string = json_string.strip().rstrip("\n") + if json_string == "": continue - elif json_string == '[DONE]': + elif json_string == "[DONE]": break - elif json_string!= '': + elif json_string != "": json_data = json.loads(json_string) json_data = ChatCompletionChunk(**json_data) yield json_data else: pass - + async def normal_create(self, **kwargs) -> ChatCompletions: response = await self.openai_client.with_raw_response.chat.completions.create( - **kwargs) + **kwargs + ) json_response = json.loads(response.text) return ChatCompletions(**json_response) - + async def create( self, **kwargs, ) -> ChatCompletions: - - if 'stream' in kwargs and kwargs['stream'] == True: - return (self.stream_create(**kwargs)) - elif 'stream' in kwargs and kwargs['stream'] == False: - return await (self.normal_create(**kwargs)) + if "stream" in kwargs and kwargs["stream"] is True: + return self.stream_create(**kwargs) # type: ignore + elif "stream" in kwargs and kwargs["stream"] is False: + return await self.normal_create(**kwargs) else: - return await (self.normal_create(**kwargs)) - + return await self.normal_create(**kwargs) # @overload # async def create( @@ -276,7 +282,7 @@ async def create( # self, # **kwargs, # ) -> Union[ChatCompletions, AsyncStream[ChatCompletionChunk]]: - + # if 'stream' in kwargs and kwargs['stream'] == True: # final_responses = [] # response = await self.openai_client.chat.completions.create(**kwargs) @@ -286,24 +292,26 @@ async def create( # finalResponse['object'] = chunk.object # finalResponse['created'] = chunk.created # finalResponse['model'] = chunk.model - # finalResponse['choices'] = [{'index': chunk.choices[0].index, - # 'delta': { - # 'role': chunk.choices[0].delta.role, - # 'content': chunk.choices[0].delta.content, - # 'tool_calls': chunk.choices[0].delta.tool_calls - # }, + # finalResponse['choices'] = + # [{'index': chunk.choices[0].index, + # 'delta': { + # 'role': chunk.choices[0].delta.role, + # 'content': chunk.choices[0].delta.content, + # 'tool_calls': chunk.choices[0].delta.tool_calls }, # 'logprobs': chunk.choices[0].logprobs, # 'finish_reason': chunk.choices[0].finish_reason}] # finalResponse['system_fingerprint'] = chunk.system_fingerprint # final_responses.append(finalResponse) # return final_responses # elif 'stream' in kwargs and kwargs['stream'] == False: - # response = await self.openai_client.with_raw_response.chat.completions.create( + # response = await self.openai_client.with_raw_response. + # chat.completions.create( # **kwargs) # response = response.text # return json.loads(response) # else: - # response = await self.openai_client.with_raw_response.chat.completions.create( + # response = await self.openai_client.with_raw_response. + # chat.completions.create( # **kwargs) # response = response.text # return json.loads(response) diff --git a/portkey_ai/api_resources/apis/complete.py b/portkey_ai/api_resources/apis/complete.py index b907585c..6735562a 100644 --- a/portkey_ai/api_resources/apis/complete.py +++ b/portkey_ai/api_resources/apis/complete.py @@ -1,14 +1,11 @@ import json -from typing import AsyncIterator, Iterator, Optional, Union, overload, Literal -from portkey_ai.api_resources.base_client import APIClient, AsyncAPIClient +from typing import Any, AsyncIterator, Iterator, Union from portkey_ai.api_resources.client import AsyncPortkey, Portkey from portkey_ai.api_resources.utils import ( - PortkeyApiPaths, TextCompletion, TextCompletionChunk, ) -from portkey_ai.api_resources.streaming import AsyncStream, Stream from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource @@ -18,39 +15,41 @@ def __init__(self, client: Portkey) -> None: self.openai_client = client.openai_client self.client = client - def stream_create(self,**kwargs) -> Union[TextCompletion, Iterator[TextCompletionChunk]]: - with self.openai_client.with_streaming_response.completions.create(**kwargs) as response: + def stream_create( + self, **kwargs + ) -> Union[TextCompletion, Iterator[TextCompletionChunk]]: + with self.openai_client.with_streaming_response.completions.create( + **kwargs + ) as response: for line in response.iter_lines(): - json_string = line.replace('data: ', '') - json_string = json_string.strip().rstrip('\n') - if json_string == '': + json_string = line.replace("data: ", "") + json_string = json_string.strip().rstrip("\n") + if json_string == "": continue - elif json_string == '[DONE]': + elif json_string == "[DONE]": break - elif json_string!= '': + elif json_string != "": json_data = json.loads(json_string) json_data = TextCompletionChunk(**json_data) yield json_data else: return "" - + def normal_create(self, **kwargs) -> TextCompletion: - response = self.openai_client.with_raw_response.completions.create( - **kwargs) + response = self.openai_client.with_raw_response.completions.create(**kwargs) json_response = json.loads(response.text) return TextCompletion(**json_response) - + def create( self, **kwargs, ) -> TextCompletion: - - if 'stream' in kwargs and kwargs['stream'] == True: - return (self.stream_create(**kwargs)) - elif 'stream' in kwargs and kwargs['stream'] == False: - return (self.normal_create(**kwargs)) + if "stream" in kwargs and kwargs["stream"] is True: + return self.stream_create(**kwargs) # type: ignore + elif "stream" in kwargs and kwargs["stream"] is False: + return self.normal_create(**kwargs) else: - return (self.normal_create(**kwargs)) + return self.normal_create(**kwargs) # @overload # def create( @@ -98,7 +97,7 @@ def create( # self, # **kwargs, # ) -> Union[TextCompletion, Stream[TextCompletionChunk]]: - + # if 'stream' in kwargs and kwargs['stream'] == True: # final_responses = [] # response = self.openai_client.completions.create(**kwargs) @@ -108,18 +107,21 @@ def create( # finalResponse['object'] = chunk.object # finalResponse['created'] = chunk.created # finalResponse['model'] = chunk.model - # finalResponse['choices'] = [{'index': chunk.choices[0].index, - # 'text': chunk.choices[0].text, - # 'logprobs': chunk.choices[0].logprobs, - # 'finish_reason': chunk.choices[0].finish_reason}] - # final_responses.append(finalResponse) + # finalResponse['choices'] = + # [{'index': chunk.choices[0].index, + # 'text': chunk.choices[0].text, + # 'logprobs': chunk.choices[0].logprobs, + # 'finish_reason': chunk.choices[0].finish_reason}] + # final_responses.append(finalResponse) # return final_responses # elif 'stream' in kwargs and kwargs['stream'] == False: - # response = self.openai_client.with_raw_response.completions.create(**kwargs) + # response = self.openai_client.with_raw_response. + # completions.create(**kwargs) # response = response # return json.loads(response) # else: - # response = self.openai_client.with_raw_response.completions.create(**kwargs) + # response = self.openai_client.with_raw_response. + # completions.create(**kwargs) # response = response.text # return json.loads(response) @@ -128,40 +130,44 @@ class AsyncCompletion(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - - async def stream_create(self,**kwargs) -> Union[TextCompletion, AsyncIterator[TextCompletionChunk]]: - async with self.openai_client.with_streaming_response.completions.create(**kwargs) as response: + + async def stream_create( + self, **kwargs + ) -> Union[TextCompletion, AsyncIterator[TextCompletionChunk]]: + async with self.openai_client.with_streaming_response.completions.create( + **kwargs + ) as response: async for line in response.iter_lines(): - json_string = line.replace('data: ', '') - json_string = json_string.strip().rstrip('\n') - if json_string == '': + json_string = line.replace("data: ", "") + json_string = json_string.strip().rstrip("\n") + if json_string == "": continue - elif json_string == '[DONE]': + elif json_string == "[DONE]": break - elif json_string!= '': + elif json_string != "": json_data = json.loads(json_string) json_data = TextCompletionChunk(**json_data) yield json_data else: pass - + async def normal_create(self, **kwargs) -> TextCompletion: response = await self.openai_client.with_raw_response.completions.create( - **kwargs) + **kwargs + ) json_response = json.loads(response.text) return TextCompletion(**json_response) - + async def create( self, **kwargs, - ) -> TextCompletion: - - if 'stream' in kwargs and kwargs['stream'] == True: - return (self.stream_create(**kwargs)) - elif 'stream' in kwargs and kwargs['stream'] == False: - return await (self.normal_create(**kwargs)) + ) -> Any: + if "stream" in kwargs and kwargs["stream"] is True: + return self.stream_create(**kwargs) # type: ignore + elif "stream" in kwargs and kwargs["stream"] is False: + return await self.normal_create(**kwargs) else: - return await (self.normal_create(**kwargs)) + return await self.normal_create(**kwargs) # @overload # async def create( @@ -219,17 +225,20 @@ async def create( # finalResponse['object'] = chunk.object # finalResponse['created'] = chunk.created # finalResponse['model'] = chunk.model - # finalResponse['choices'] = [{'index': chunk.choices[0].index, - # 'text': chunk.choices[0].text, - # 'logprobs': chunk.choices[0].logprobs, - # 'finish_reason': chunk.choices[0].finish_reason}] - # final_responses.append(finalResponse) + # finalResponse['choices'] = + # [{'index': chunk.choices[0].index, + # 'text': chunk.choices[0].text, + # 'logprobs': chunk.choices[0].logprobs, + # 'finish_reason': chunk.choices[0].finish_reason}] + # final_responses.append(finalResponse) # return final_responses # elif 'stream' in kwargs and kwargs['stream'] == False: - # response = await self.openai_client.with_raw_response.completions.create(**kwargs) + # response = await self.openai_client.with_raw_response. + # completions.create(**kwargs) # response = response.text # return json.loads(response) # else: - # response = await self.openai_client.with_raw_response.completions.create(**kwargs) + # response = await self.openai_client.with_raw_response. + # completions.create(**kwargs) # response = response.text # return json.loads(response) diff --git a/portkey_ai/api_resources/apis/embeddings.py b/portkey_ai/api_resources/apis/embeddings.py index 5eae6aa4..5834e004 100644 --- a/portkey_ai/api_resources/apis/embeddings.py +++ b/portkey_ai/api_resources/apis/embeddings.py @@ -1,24 +1,18 @@ import json from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource -from portkey_ai.api_resources.base_client import APIClient, AsyncAPIClient from portkey_ai.api_resources.client import AsyncPortkey, Portkey -from portkey_ai.api_resources.utils import PortkeyApiPaths, GenericResponse +from portkey_ai.api_resources.utils import GenericResponse class Embeddings(APIResource): - def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client - def create( - self, - **kwargs - ) -> GenericResponse: - + def create(self, **kwargs) -> GenericResponse: response = self.openai_client.with_raw_response.embeddings.create(**kwargs) - response = response.text - return json.loads(response) + response_text = response.text + return json.loads(response_text) class AsyncEmbeddings(AsyncAPIResource): @@ -26,11 +20,9 @@ def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - async def create( - self, - **kwargs - ) -> GenericResponse: - - response = await self.openai_client.with_raw_response.embeddings.create(**kwargs) - response = response.text - return json.loads(response) + async def create(self, **kwargs) -> GenericResponse: + response = await self.openai_client.with_raw_response.embeddings.create( + **kwargs + ) + response_text = response.text + return json.loads(response_text) diff --git a/portkey_ai/api_resources/apis/images.py b/portkey_ai/api_resources/apis/images.py index 0fae363a..2df4737c 100644 --- a/portkey_ai/api_resources/apis/images.py +++ b/portkey_ai/api_resources/apis/images.py @@ -1,62 +1,39 @@ +from typing import Any from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource -from portkey_ai.api_resources.base_client import APIClient, AsyncAPIClient from portkey_ai.api_resources.client import AsyncPortkey, Portkey -from portkey_ai.api_resources.utils import PortkeyApiPaths, GenericResponse + class Images(APIResource): def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client - def generate( - self, - **kwargs - ) -> GenericResponse: - + def generate(self, **kwargs) -> Any: response = self.openai_client.images.generate(**kwargs) return response - - def edit( - self, - **kwargs - ) -> GenericResponse: - + + def edit(self, **kwargs) -> Any: response = self.openai_client.images.edit(**kwargs) return response - - def create_variation( - self, - **kwargs - ) -> GenericResponse: + def create_variation(self, **kwargs) -> Any: response = self.openai_client.images.create_variation(**kwargs) return response - + + class AsyncImages(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - async def generate( - self, - **kwargs - ) -> GenericResponse: - + async def generate(self, **kwargs) -> Any: response = await self.openai_client.images.generate(**kwargs) return response - - async def edit( - self, - **kwargs - ) -> GenericResponse: - + + async def edit(self, **kwargs) -> Any: response = await self.openai_client.images.edit(**kwargs) return response - - async def create_variation( - self, - **kwargs - ) -> GenericResponse: - + + async def create_variation(self, **kwargs) -> Any: response = await self.openai_client.images.create_variation(**kwargs) - return response \ No newline at end of file + return response diff --git a/portkey_ai/api_resources/apis/mainFiles.py b/portkey_ai/api_resources/apis/mainFiles.py index b45d36d1..72abef90 100644 --- a/portkey_ai/api_resources/apis/mainFiles.py +++ b/portkey_ai/api_resources/apis/mainFiles.py @@ -1,7 +1,6 @@ +from typing import Any from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource -from portkey_ai.api_resources.base_client import APIClient, AsyncAPIClient from portkey_ai.api_resources.client import AsyncPortkey, Portkey -from portkey_ai.api_resources.utils import PortkeyApiPaths, GenericResponse class MainFiles(APIResource): @@ -9,52 +8,24 @@ def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client - def create( - self, - file, - purpose, - **kwargs - ) -> GenericResponse: - + def create(self, file, purpose, **kwargs) -> Any: response = self.openai_client.files.create(file=file, purpose=purpose, **kwargs) return response - def list( - self, - **kwargs - ) -> GenericResponse: - + def list(self, **kwargs) -> Any: response = self.openai_client.files.list(**kwargs) return response - - def retrieve( - self, - file_id, - **kwargs - ) -> GenericResponse: - response = self.openai_client.files.retrieve( - file_id=file_id, **kwargs) + def retrieve(self, file_id, **kwargs) -> Any: + response = self.openai_client.files.retrieve(file_id=file_id, **kwargs) return response - - def delete( - self, - file_id, - **kwargs - ) -> GenericResponse: - response = self.openai_client.files.delete( - file_id=file_id, **kwargs) + def delete(self, file_id, **kwargs) -> Any: + response = self.openai_client.files.delete(file_id=file_id, **kwargs) return response - - def retrieveContent( - self, - file_id, - **kwargs - ) -> GenericResponse: - response = self.openai_client.files.retrieveContent( - file_id=file_id, **kwargs) + def retrieveContent(self, file_id, **kwargs) -> Any: + response = self.openai_client.files.content(file_id=file_id, **kwargs) return response @@ -63,50 +34,24 @@ def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - async def create( - self, - file, - purpose, - **kwargs - ) -> GenericResponse: - - response = await self.openai_client.files.create(file=file, purpose=purpose, **kwargs) + async def create(self, file, purpose, **kwargs) -> Any: + response = await self.openai_client.files.create( + file=file, purpose=purpose, **kwargs + ) return response - async def list( - self, - **kwargs - ) -> GenericResponse: - + async def list(self, **kwargs) -> Any: response = await self.openai_client.files.list(**kwargs) return response - - async def retrieve( - self, - file_id, - **kwargs - ) -> GenericResponse: - response = await self.openai_client.files.retrieve( - file_id=file_id, **kwargs) + async def retrieve(self, file_id, **kwargs) -> Any: + response = await self.openai_client.files.retrieve(file_id=file_id, **kwargs) return response - - async def delete( - self, - file_id, - **kwargs - ) -> GenericResponse: - response = await self.openai_client.files.delete( - file_id=file_id, **kwargs) + async def delete(self, file_id, **kwargs) -> Any: + response = await self.openai_client.files.delete(file_id=file_id, **kwargs) return response - - async def retrieveContent( - self, - file_id, - **kwargs - ) -> GenericResponse: - response = await self.openai_client.files.retrieveContent( - file_id=file_id, **kwargs) - return response \ No newline at end of file + async def retrieveContent(self, file_id, **kwargs) -> Any: + response = await self.openai_client.files.content(file_id=file_id, **kwargs) + return response diff --git a/portkey_ai/api_resources/apis/threads.py b/portkey_ai/api_resources/apis/threads.py index f4c4e5cd..66cded63 100644 --- a/portkey_ai/api_resources/apis/threads.py +++ b/portkey_ai/api_resources/apis/threads.py @@ -1,7 +1,6 @@ +from typing import Any from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource -from portkey_ai.api_resources.base_client import APIClient, AsyncAPIClient from portkey_ai.api_resources.client import AsyncPortkey, Portkey -from portkey_ai.api_resources.utils import PortkeyApiPaths, GenericResponse class Threads(APIResource): @@ -13,146 +12,142 @@ def __init__(self, client: Portkey) -> None: def create( self, - ) -> GenericResponse: - + ) -> Any: response = self.openai_client.beta.threads.create() return response - def retrieve( - self, - thread_id, - **kwargs - ) -> GenericResponse: - + def retrieve(self, thread_id, **kwargs) -> Any: response = self.openai_client.beta.threads.retrieve( - thread_id=thread_id, **kwargs) + thread_id=thread_id, **kwargs + ) return response - def update( - self, - thread_id, - **kwargs - ) -> GenericResponse: - - response = self.openai_client.beta.threads.update( - thread_id=thread_id, **kwargs) + def update(self, thread_id, **kwargs) -> Any: + response = self.openai_client.beta.threads.update(thread_id=thread_id, **kwargs) return response def delete( self, thread_id, - ) -> GenericResponse: - + ) -> Any: response = self.openai_client.beta.threads.delete(thread_id=thread_id) return response - def create_and_run( - self, - assistant_id, - **kwargs - ) -> GenericResponse: - + def create_and_run(self, assistant_id, **kwargs) -> Any: response = self.openai_client.beta.threads.create_and_run( - assistant_id == assistant_id, **kwargs) + assistant_id=assistant_id, **kwargs + ) return response - class Messages(APIResource): def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client - self.files = Files(client) + self.files = ThreadFiles(client) - def create(self, thread_id, **kwargs) -> GenericResponse: + def create(self, thread_id, **kwargs) -> Any: response = self.openai_client.beta.threads.messages.create( - thread_id=thread_id, **kwargs) + thread_id=thread_id, **kwargs + ) return response - def list(self, thread_id, **kwargs) -> GenericResponse: + def list(self, thread_id, **kwargs) -> Any: response = self.openai_client.beta.threads.messages.list( - thread_id=thread_id, **kwargs) + thread_id=thread_id, **kwargs + ) return response - def retrieve(self, thread_id, message_id, **kwargs) -> GenericResponse: + def retrieve(self, thread_id, message_id, **kwargs) -> Any: response = self.openai_client.beta.threads.messages.retrieve( - thread_id=thread_id, message_id=message_id, **kwargs) + thread_id=thread_id, message_id=message_id, **kwargs + ) return response - def update(self, thread_id, message_id, **kwargs) -> GenericResponse: + def update(self, thread_id, message_id, **kwargs) -> Any: response = self.openai_client.beta.threads.messages.update( - thread_id=thread_id, message_id=message_id, **kwargs) + thread_id=thread_id, message_id=message_id, **kwargs + ) return response - -class Files(APIResource): +class ThreadFiles(APIResource): def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client - def list(self, thread_id, message_id, **kwargs) -> GenericResponse: + def list(self, thread_id, message_id, **kwargs) -> Any: response = self.openai_client.beta.threads.messages.files.list( - thread_id=thread_id, message_id=message_id, **kwargs) + thread_id=thread_id, message_id=message_id, **kwargs + ) return response - def retrieve(self, thread_id, message_id, file_id, **kwargs) -> GenericResponse: + def retrieve(self, thread_id, message_id, file_id, **kwargs) -> Any: response = self.openai_client.beta.threads.messages.files.retrieve( - thread_id=thread_id, message_id=message_id, file_id=file_id ** kwargs) + thread_id=thread_id, message_id=message_id, file_id=file_id**kwargs + ) return response - class Runs(APIResource): def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client self.steps = Steps(client) - def create(self, **kwargs) -> GenericResponse: + def create(self, **kwargs) -> Any: response = self.openai_client.beta.threads.runs.create(**kwargs) return response - def retrieve(self, thread_id, run_id, **kwargs) -> GenericResponse: + def retrieve(self, thread_id, run_id, **kwargs) -> Any: response = self.openai_client.beta.threads.runs.retrieve( - thread_id=thread_id, run_id=run_id, **kwargs) + thread_id=thread_id, run_id=run_id, **kwargs + ) return response - def list(self, thread_id, **kwargs) -> GenericResponse: + def list(self, thread_id, **kwargs) -> Any: response = self.openai_client.beta.threads.runs.list( - thread_id=thread_id, **kwargs) + thread_id=thread_id, **kwargs + ) return response - def update(self, thread_id, run_id, **kwargs) -> GenericResponse: + def update(self, thread_id, run_id, **kwargs) -> Any: response = self.openai_client.beta.threads.runs.update( - thread_id=thread_id, run_id=run_id, **kwargs) + thread_id=thread_id, run_id=run_id, **kwargs + ) return response - def submit_tool_outputs(self, thread_id, tool_outputs, run_id, **kwargs) -> GenericResponse: + def submit_tool_outputs(self, thread_id, tool_outputs, run_id, **kwargs) -> Any: response = self.openai_client.beta.threads.runs.submit_tool_outputs( - thread_id=thread_id, run_id=run_id, tool_outputs=tool_outputs, **kwargs) + thread_id=thread_id, run_id=run_id, tool_outputs=tool_outputs, **kwargs + ) return response - def cancel(self, thread_id, run_id, **kwargs) -> GenericResponse: + def cancel(self, thread_id, run_id, **kwargs) -> Any: response = self.openai_client.beta.threads.runs.cancel( - thread_id=thread_id, run_id=run_id, **kwargs) + thread_id=thread_id, run_id=run_id, **kwargs + ) return response + class Steps(APIResource): def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client - def list(self, thread_id, run_id, **kwargs) -> GenericResponse: + def list(self, thread_id, run_id, **kwargs) -> Any: reponse = self.openai_client.beta.threads.runs.steps.list( - thread_id=thread_id, run_id=run_id, **kwargs) + thread_id=thread_id, run_id=run_id, **kwargs + ) return reponse - def retrieve(self, thread_id, run_id, step_id, **kwargs) -> GenericResponse: + def retrieve(self, thread_id, run_id, step_id, **kwargs) -> Any: response = self.openai_client.beta.threads.runs.steps.retrieve( - thread_id=thread_id, run_id=run_id, step_id=step_id, **kwargs) + thread_id=thread_id, run_id=run_id, step_id=step_id, **kwargs + ) return response + class AsyncThreads(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) @@ -162,137 +157,141 @@ def __init__(self, client: AsyncPortkey) -> None: async def create( self, - ) -> GenericResponse: - + ) -> Any: response = await self.openai_client.beta.threads.create() return response - async def retrieve( - self, - thread_id, - **kwargs - ) -> GenericResponse: - + async def retrieve(self, thread_id, **kwargs) -> Any: response = await self.openai_client.beta.threads.retrieve( - thread_id=thread_id, **kwargs) + thread_id=thread_id, **kwargs + ) return response - async def update( - self, - thread_id, - **kwargs - ) -> GenericResponse: - + async def update(self, thread_id, **kwargs) -> Any: response = await self.openai_client.beta.threads.update( - thread_id=thread_id, **kwargs) + thread_id=thread_id, **kwargs + ) return response async def delete( self, thread_id, - ) -> GenericResponse: - + ) -> Any: response = await self.openai_client.beta.threads.delete(thread_id=thread_id) return response - async def create_and_run( - self, - assistant_id, - **kwargs - ) -> GenericResponse: - + async def create_and_run(self, assistant_id, **kwargs) -> Any: response = await self.openai_client.beta.threads.create_and_run( - assistant_id == assistant_id, **kwargs) + assistant_id=assistant_id, **kwargs + ) return response + class AsyncMessages(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - self.files = AsyncFiles(client) + self.files = AsyncThreadFiles(client) - async def create(self, thread_id, **kwargs) -> GenericResponse: + async def create(self, thread_id, **kwargs) -> Any: response = await self.openai_client.beta.threads.messages.create( - thread_id=thread_id, **kwargs) + thread_id=thread_id, **kwargs + ) return response - async def list(self, thread_id, **kwargs) -> GenericResponse: + async def list(self, thread_id, **kwargs) -> Any: response = await self.openai_client.beta.threads.messages.list( - thread_id=thread_id, **kwargs) + thread_id=thread_id, **kwargs + ) return response - async def retrieve(self, thread_id, message_id, **kwargs) -> GenericResponse: + async def retrieve(self, thread_id, message_id, **kwargs) -> Any: response = await self.openai_client.beta.threads.messages.retrieve( - thread_id=thread_id, message_id=message_id, **kwargs) + thread_id=thread_id, message_id=message_id, **kwargs + ) return response - async def update(self, thread_id, message_id, **kwargs) -> GenericResponse: + async def update(self, thread_id, message_id, **kwargs) -> Any: response = await self.openai_client.beta.threads.messages.update( - thread_id=thread_id, message_id=message_id, **kwargs) + thread_id=thread_id, message_id=message_id, **kwargs + ) return response -class AsyncFiles(AsyncAPIResource): + +class AsyncThreadFiles(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - async def list(self, thread_id, message_id, **kwargs) -> GenericResponse: + async def list(self, thread_id, message_id, **kwargs) -> Any: response = await self.openai_client.beta.threads.messages.files.list( - thread_id=thread_id, message_id=message_id, **kwargs) + thread_id=thread_id, message_id=message_id, **kwargs + ) return response - async def retrieve(self, thread_id, message_id, file_id, **kwargs) -> GenericResponse: + async def retrieve(self, thread_id, message_id, file_id, **kwargs) -> Any: response = await self.openai_client.beta.threads.messages.files.retrieve( - thread_id=thread_id, message_id=message_id, file_id=file_id ** kwargs) + thread_id=thread_id, message_id=message_id, file_id=file_id**kwargs + ) return response + class AsyncRuns(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client self.steps = AsyncSteps(client) - async def create(self, **kwargs) -> GenericResponse: + async def create(self, **kwargs) -> Any: response = await self.openai_client.beta.threads.runs.create(**kwargs) return response - async def retrieve(self, thread_id, run_id, **kwargs) -> GenericResponse: + async def retrieve(self, thread_id, run_id, **kwargs) -> Any: response = await self.openai_client.beta.threads.runs.retrieve( - thread_id=thread_id, run_id=run_id, **kwargs) + thread_id=thread_id, run_id=run_id, **kwargs + ) return response - async def list(self, thread_id, **kwargs) -> GenericResponse: + async def list(self, thread_id, **kwargs) -> Any: response = await self.openai_client.beta.threads.runs.list( - thread_id=thread_id, **kwargs) + thread_id=thread_id, **kwargs + ) return response - async def update(self, thread_id, run_id, **kwargs) -> GenericResponse: + async def update(self, thread_id, run_id, **kwargs) -> Any: response = await self.openai_client.beta.threads.runs.update( - thread_id=thread_id, run_id=run_id, **kwargs) + thread_id=thread_id, run_id=run_id, **kwargs + ) return response - async def submit_tool_outputs(self, thread_id, tool_outputs, run_id, **kwargs) -> GenericResponse: + async def submit_tool_outputs( + self, thread_id, tool_outputs, run_id, **kwargs + ) -> Any: response = await self.openai_client.beta.threads.runs.submit_tool_outputs( - thread_id=thread_id, run_id=run_id, tool_outputs=tool_outputs, **kwargs) + thread_id=thread_id, run_id=run_id, tool_outputs=tool_outputs, **kwargs + ) return response - async def cancel(self, thread_id, run_id, **kwargs) -> GenericResponse: + async def cancel(self, thread_id, run_id, **kwargs) -> Any: response = await self.openai_client.beta.threads.runs.cancel( - thread_id=thread_id, run_id=run_id, **kwargs) + thread_id=thread_id, run_id=run_id, **kwargs + ) return response + class AsyncSteps(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - async def list(self, thread_id, run_id, **kwargs) -> GenericResponse: + async def list(self, thread_id, run_id, **kwargs) -> Any: reponse = await self.openai_client.beta.threads.runs.steps.list( - thread_id=thread_id, run_id=run_id, **kwargs) + thread_id=thread_id, run_id=run_id, **kwargs + ) return reponse - async def retrieve(self, thread_id, run_id, step_id, **kwargs) -> GenericResponse: + async def retrieve(self, thread_id, run_id, step_id, **kwargs) -> Any: response = await self.openai_client.beta.threads.runs.steps.retrieve( - thread_id=thread_id, run_id=run_id, step_id=step_id, **kwargs) + thread_id=thread_id, run_id=run_id, step_id=step_id, **kwargs + ) return response - diff --git a/portkey_ai/api_resources/client.py b/portkey_ai/api_resources/client.py index 98322abd..d9fe9eee 100644 --- a/portkey_ai/api_resources/client.py +++ b/portkey_ai/api_resources/client.py @@ -5,7 +5,11 @@ from portkey_ai.api_resources.base_client import APIClient, AsyncAPIClient from openai import AsyncOpenAI, OpenAI -from portkey_ai.api_resources.global_constants import OPEN_AI_API_KEY, PORTKEY_DEV_BASE_URL +from portkey_ai.api_resources.global_constants import ( + OPEN_AI_API_KEY, + PORTKEY_DEV_BASE_URL, +) + class Portkey(APIClient): completions: apis.Completion @@ -17,11 +21,10 @@ class Portkey(APIClient): files: apis.MainFiles class beta: - def __init__(self, client:Portkey) -> None: + def __init__(self, client: Portkey) -> None: self.assistants = apis.Assistants(client) self.threads = apis.Threads(client) - def __init__( self, *, @@ -48,9 +51,8 @@ def __init__( self.openai_client = OpenAI( api_key=OPEN_AI_API_KEY, base_url=PORTKEY_DEV_BASE_URL, - default_headers= self.custom_headers + default_headers=self.custom_headers, ) - self.completions = apis.Completion(self) self.chat = apis.ChatCompletion(self) @@ -60,7 +62,7 @@ def __init__( self.feedback = apis.Feedback(self) self.images = apis.Images(self) self.files = apis.MainFiles(self) - self.beta = self.beta(self) + self.beta = self.beta(self) # type: ignore def copy( self, @@ -102,7 +104,7 @@ class AsyncPortkey(AsyncAPIClient): files: apis.AsyncMainFiles class beta: - def __init__(self, client:AsyncPortkey) -> None: + def __init__(self, client: AsyncPortkey) -> None: self.assistants = apis.AsyncAssistants(client) self.threads = apis.AsyncThreads(client) @@ -132,7 +134,7 @@ def __init__( self.openai_client = AsyncOpenAI( api_key=OPEN_AI_API_KEY, base_url=PORTKEY_DEV_BASE_URL, - default_headers= self.custom_headers + default_headers=self.custom_headers, ) self.completions = apis.AsyncCompletion(self) @@ -143,7 +145,7 @@ def __init__( self.feedback = apis.AsyncFeedback(self) self.images = apis.AsyncImages(self) self.files = apis.AsyncMainFiles(self) - self.beta = self.beta(self) + self.beta = self.beta(self) # type: ignore def copy( self, diff --git a/portkey_ai/api_resources/utils.py b/portkey_ai/api_resources/utils.py index 87b70924..a2982bc1 100644 --- a/portkey_ai/api_resources/utils.py +++ b/portkey_ai/api_resources/utils.py @@ -22,7 +22,6 @@ MISSING_BASE_URL, MISSING_MODE_MESSAGE, PORTKEY_BASE_URL, - PORTKEY_DEV_BASE_URL, PORTKEY_API_KEY_ENV, PORTKEY_HEADER_PREFIX, PORTKEY_PROXY_ENV, @@ -434,6 +433,7 @@ def get_headers(self) -> Optional[Dict[str, str]]: # Models for text completion stream + class TextCompletionChunk(BaseModel, extra="allow"): id: Optional[str] = None object: Optional[str] = None @@ -584,3 +584,25 @@ def parse_headers(headers: Optional[httpx.Headers]) -> dict: _headers[k] = v return _headers + + +class FileDeleteResponse(BaseModel): + id: str + + deleted: bool + + object: Literal["assistant.file.deleted"] + + +class AssistantFile(BaseModel): + id: str + """The identifier, which can be referenced in API endpoints.""" + + assistant_id: str + """The assistant ID that the file is attached to.""" + + created_at: int + """The Unix timestamp (in seconds) for when the assistant file was created.""" + + object: Literal["assistant.file"] + """The object type, which is always `assistant.file`.""" From 7904026272f12c1b163c27b488cdfdff56b05510 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Sat, 24 Feb 2024 19:43:47 +0530 Subject: [PATCH 05/62] feat: added and test cases for newly added routes --- setup.cfg | 1 + .../anthropic_n_openai.json | 28 ++ .../anyscale_n_openai.json | 28 ++ .../azure_n_openai.json | 28 ++ .../cohere_n_openai.json | 27 ++ .../loadbalance_with_two_apikeys.json | 15 ++ .../single_provider/single_provider.json | 4 + .../single_provider_with_vk_retry_cache.json | 13 + .../single_with_basic_config.json | 3 + .../anthropic_n_openai.json | 6 +- .../anyscale_n_openai.json | 6 +- .../azure_n_openai.json | 6 +- .../cohere_n_openai.json | 6 +- .../loadbalance_with_two_apikeys.json | 4 +- .../single_provider/single_provider.json | 2 +- .../single_provider_with_vk_retry_cache.json | 2 +- .../single_with_basic_config.json | 2 +- .../anthropic_n_openai.json | 6 +- .../anyscale_n_openai.json | 6 +- .../azure_n_openai.json | 6 +- .../cohere_n_openai.json | 6 +- .../loadbalance_with_two_apikeys.json | 4 +- .../single_provider/single_provider.json | 2 +- .../single_provider_with_vk_retry_cache.json | 2 +- .../single_with_basic_config.json | 2 +- .../anthropic_n_openai.json | 28 ++ .../anyscale_n_openai.json | 28 ++ .../azure_n_openai.json | 28 ++ .../cohere_n_openai.json | 27 ++ .../loadbalance_with_two_apikeys.json | 15 ++ .../single_provider/single_provider.json | 4 + .../single_provider_with_vk_retry_cache.json | 13 + .../single_with_basic_config.json | 3 + .../anthropic_n_openai.json | 28 ++ .../anyscale_n_openai.json | 28 ++ .../azure_n_openai.json | 28 ++ .../cohere_n_openai.json | 27 ++ .../loadbalance_with_two_apikeys.json | 15 ++ .../single_provider/single_provider.json | 4 + .../single_provider_with_vk_retry_cache.json | 13 + .../single_with_basic_config.json | 3 + tests/models.json | 25 +- tests/test_assistants.py | 104 +++++++ tests/test_async_images.py | 254 ++++++++++++++++++ tests/test_images.py | 249 +++++++++++++++++ tests/test_threads.py | 94 +++++++ 46 files changed, 1189 insertions(+), 44 deletions(-) create mode 100644 tests/configs/assistants/loadbalance_and_fallback/anthropic_n_openai.json create mode 100644 tests/configs/assistants/loadbalance_and_fallback/anyscale_n_openai.json create mode 100644 tests/configs/assistants/loadbalance_and_fallback/azure_n_openai.json create mode 100644 tests/configs/assistants/loadbalance_and_fallback/cohere_n_openai.json create mode 100644 tests/configs/assistants/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json create mode 100644 tests/configs/assistants/single_provider/single_provider.json create mode 100644 tests/configs/assistants/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json create mode 100644 tests/configs/assistants/single_with_basic_config/single_with_basic_config.json create mode 100644 tests/configs/images/loadbalance_and_fallback/anthropic_n_openai.json create mode 100644 tests/configs/images/loadbalance_and_fallback/anyscale_n_openai.json create mode 100644 tests/configs/images/loadbalance_and_fallback/azure_n_openai.json create mode 100644 tests/configs/images/loadbalance_and_fallback/cohere_n_openai.json create mode 100644 tests/configs/images/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json create mode 100644 tests/configs/images/single_provider/single_provider.json create mode 100644 tests/configs/images/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json create mode 100644 tests/configs/images/single_with_basic_config/single_with_basic_config.json create mode 100644 tests/configs/threads/loadbalance_and_fallback/anthropic_n_openai.json create mode 100644 tests/configs/threads/loadbalance_and_fallback/anyscale_n_openai.json create mode 100644 tests/configs/threads/loadbalance_and_fallback/azure_n_openai.json create mode 100644 tests/configs/threads/loadbalance_and_fallback/cohere_n_openai.json create mode 100644 tests/configs/threads/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json create mode 100644 tests/configs/threads/single_provider/single_provider.json create mode 100644 tests/configs/threads/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json create mode 100644 tests/configs/threads/single_with_basic_config/single_with_basic_config.json create mode 100644 tests/test_assistants.py create mode 100644 tests/test_async_images.py create mode 100644 tests/test_images.py create mode 100644 tests/test_threads.py diff --git a/setup.cfg b/setup.cfg index 00798242..2fdb60e0 100644 --- a/setup.cfg +++ b/setup.cfg @@ -42,6 +42,7 @@ dev = python-dotenv==1.0.0 ruff==0.0.292 pytest-asyncio==0.23.5 + openai==1.12.0 [mypy] ignore_missing_imports = true diff --git a/tests/configs/assistants/loadbalance_and_fallback/anthropic_n_openai.json b/tests/configs/assistants/loadbalance_and_fallback/anthropic_n_openai.json new file mode 100644 index 00000000..2c5c4a25 --- /dev/null +++ b/tests/configs/assistants/loadbalance_and_fallback/anthropic_n_openai.json @@ -0,0 +1,28 @@ +{ + "strategy": { + "mode": "loadbalance" + }, + "targets": [ + { + "provider": "openai", + "virtual_key": "openai-virtual-key" + }, + { + "strategy": { + "mode": "fallback", + "on_status_codes": [ + 429, + 241 + ] + }, + "targets": [ + { + "virtual_key": "anthropic-virtual-key" + }, + { + "virtual_key": "openai-virtual-key" + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/configs/assistants/loadbalance_and_fallback/anyscale_n_openai.json b/tests/configs/assistants/loadbalance_and_fallback/anyscale_n_openai.json new file mode 100644 index 00000000..2c90ddac --- /dev/null +++ b/tests/configs/assistants/loadbalance_and_fallback/anyscale_n_openai.json @@ -0,0 +1,28 @@ +{ + "strategy": { + "mode": "loadbalance" + }, + "targets": [ + { + "provider": "openai", + "virtual_key": "openai-virtual-key" + }, + { + "strategy": { + "mode": "fallback", + "on_status_codes": [ + 429, + 241 + ] + }, + "targets": [ + { + "virtual_key": "anyscale-virtual-key" + }, + { + "virtual_key": "openai-virtual-key" + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/configs/assistants/loadbalance_and_fallback/azure_n_openai.json b/tests/configs/assistants/loadbalance_and_fallback/azure_n_openai.json new file mode 100644 index 00000000..440c2591 --- /dev/null +++ b/tests/configs/assistants/loadbalance_and_fallback/azure_n_openai.json @@ -0,0 +1,28 @@ +{ + "strategy": { + "mode": "loadbalance" + }, + "targets": [ + { + "provider": "openai", + "virtual_key": "openai-virtual-key" + }, + { + "strategy": { + "mode": "fallback", + "on_status_codes": [ + 429, + 241 + ] + }, + "targets": [ + { + "virtual_key": "azure-virtual-key" + }, + { + "virtual_key": "openai-virtual-key" + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/configs/assistants/loadbalance_and_fallback/cohere_n_openai.json b/tests/configs/assistants/loadbalance_and_fallback/cohere_n_openai.json new file mode 100644 index 00000000..1e697928 --- /dev/null +++ b/tests/configs/assistants/loadbalance_and_fallback/cohere_n_openai.json @@ -0,0 +1,27 @@ +{ + "strategy": { + "mode": "loadbalance" + }, + "targets": [ + { + "virtual_key": "openai-virtual-key" + }, + { + "strategy": { + "mode": "fallback", + "on_status_codes": [ + 429, + 241 + ] + }, + "targets": [ + { + "virtual_key": "cohere-virtual-key" + }, + { + "virtual_key": "openai-virtual-key" + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/configs/assistants/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json b/tests/configs/assistants/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json new file mode 100644 index 00000000..06973872 --- /dev/null +++ b/tests/configs/assistants/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json @@ -0,0 +1,15 @@ +{ + "strategy": { + "mode": "loadbalance" + }, + "targets": [ + { + "provider": "openai", + "virtual_key": "openai-virtual-key" + }, + { + "provider": "anthropic", + "virtual_key": "anthropic-virtual-key" + } + ] +} \ No newline at end of file diff --git a/tests/configs/assistants/single_provider/single_provider.json b/tests/configs/assistants/single_provider/single_provider.json new file mode 100644 index 00000000..7c4ed82a --- /dev/null +++ b/tests/configs/assistants/single_provider/single_provider.json @@ -0,0 +1,4 @@ +{ + "provider": "openai", + "virtual_key": "openai-virtual-key" +} \ No newline at end of file diff --git a/tests/configs/assistants/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json b/tests/configs/assistants/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json new file mode 100644 index 00000000..52281ce7 --- /dev/null +++ b/tests/configs/assistants/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json @@ -0,0 +1,13 @@ +{ + "virtual_key": "openai-virtual-key", + "cache": { + "mode": "semantic", + "max_age": 60 + }, + "retry": { + "attempts": 5, + "on_status_codes": [ + 429 + ] + } +} \ No newline at end of file diff --git a/tests/configs/assistants/single_with_basic_config/single_with_basic_config.json b/tests/configs/assistants/single_with_basic_config/single_with_basic_config.json new file mode 100644 index 00000000..9471258a --- /dev/null +++ b/tests/configs/assistants/single_with_basic_config/single_with_basic_config.json @@ -0,0 +1,3 @@ +{ + "virtual_key": "openai-virtual-key" +} \ No newline at end of file diff --git a/tests/configs/chat_completions/loadbalance_and_fallback/anthropic_n_openai.json b/tests/configs/chat_completions/loadbalance_and_fallback/anthropic_n_openai.json index 4f2b3396..2c5c4a25 100644 --- a/tests/configs/chat_completions/loadbalance_and_fallback/anthropic_n_openai.json +++ b/tests/configs/chat_completions/loadbalance_and_fallback/anthropic_n_openai.json @@ -5,7 +5,7 @@ "targets": [ { "provider": "openai", - "virtual_key": "vdkey-ff9e7c" + "virtual_key": "openai-virtual-key" }, { "strategy": { @@ -17,10 +17,10 @@ }, "targets": [ { - "virtual_key": "vdanthropic-87ad2c" + "virtual_key": "anthropic-virtual-key" }, { - "virtual_key": "vdkey-ff9e7c" + "virtual_key": "openai-virtual-key" } ] } diff --git a/tests/configs/chat_completions/loadbalance_and_fallback/anyscale_n_openai.json b/tests/configs/chat_completions/loadbalance_and_fallback/anyscale_n_openai.json index e23100fb..2c90ddac 100644 --- a/tests/configs/chat_completions/loadbalance_and_fallback/anyscale_n_openai.json +++ b/tests/configs/chat_completions/loadbalance_and_fallback/anyscale_n_openai.json @@ -5,7 +5,7 @@ "targets": [ { "provider": "openai", - "virtual_key": "vdkey-ff9e7c" + "virtual_key": "openai-virtual-key" }, { "strategy": { @@ -17,10 +17,10 @@ }, "targets": [ { - "virtual_key": "vdanyscale-354c5b" + "virtual_key": "anyscale-virtual-key" }, { - "virtual_key": "vdkey-ff9e7c" + "virtual_key": "openai-virtual-key" } ] } diff --git a/tests/configs/chat_completions/loadbalance_and_fallback/azure_n_openai.json b/tests/configs/chat_completions/loadbalance_and_fallback/azure_n_openai.json index dc7eaf32..440c2591 100644 --- a/tests/configs/chat_completions/loadbalance_and_fallback/azure_n_openai.json +++ b/tests/configs/chat_completions/loadbalance_and_fallback/azure_n_openai.json @@ -5,7 +5,7 @@ "targets": [ { "provider": "openai", - "virtual_key": "vdkey-ff9e7c" + "virtual_key": "openai-virtual-key" }, { "strategy": { @@ -17,10 +17,10 @@ }, "targets": [ { - "virtual_key": "azure-api-key-993da0" + "virtual_key": "azure-virtual-key" }, { - "virtual_key": "vdkey-ff9e7c" + "virtual_key": "openai-virtual-key" } ] } diff --git a/tests/configs/chat_completions/loadbalance_and_fallback/cohere_n_openai.json b/tests/configs/chat_completions/loadbalance_and_fallback/cohere_n_openai.json index 91216c2b..1e697928 100644 --- a/tests/configs/chat_completions/loadbalance_and_fallback/cohere_n_openai.json +++ b/tests/configs/chat_completions/loadbalance_and_fallback/cohere_n_openai.json @@ -4,7 +4,7 @@ }, "targets": [ { - "virtual_key": "vdkey-ff9e7c" + "virtual_key": "openai-virtual-key" }, { "strategy": { @@ -16,10 +16,10 @@ }, "targets": [ { - "virtual_key": "vdcohere-1402b0" + "virtual_key": "cohere-virtual-key" }, { - "virtual_key": "vdkey-ff9e7c" + "virtual_key": "openai-virtual-key" } ] } diff --git a/tests/configs/chat_completions/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json b/tests/configs/chat_completions/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json index 7e8392d3..06973872 100644 --- a/tests/configs/chat_completions/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json +++ b/tests/configs/chat_completions/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json @@ -5,11 +5,11 @@ "targets": [ { "provider": "openai", - "virtual_key": "vdkey-ff9e7c" + "virtual_key": "openai-virtual-key" }, { "provider": "anthropic", - "virtual_key": "vdanthropic-87ad2c" + "virtual_key": "anthropic-virtual-key" } ] } \ No newline at end of file diff --git a/tests/configs/chat_completions/single_provider/single_provider.json b/tests/configs/chat_completions/single_provider/single_provider.json index 713c9374..7c4ed82a 100644 --- a/tests/configs/chat_completions/single_provider/single_provider.json +++ b/tests/configs/chat_completions/single_provider/single_provider.json @@ -1,4 +1,4 @@ { "provider": "openai", - "virtual_key": "vdkey-ff9e7c" + "virtual_key": "openai-virtual-key" } \ No newline at end of file diff --git a/tests/configs/chat_completions/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json b/tests/configs/chat_completions/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json index 12a13b21..52281ce7 100644 --- a/tests/configs/chat_completions/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json +++ b/tests/configs/chat_completions/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json @@ -1,5 +1,5 @@ { - "virtual_key": "vdkey-ff9e7c", + "virtual_key": "openai-virtual-key", "cache": { "mode": "semantic", "max_age": 60 diff --git a/tests/configs/chat_completions/single_with_basic_config/single_with_basic_config.json b/tests/configs/chat_completions/single_with_basic_config/single_with_basic_config.json index 5f27cb16..9471258a 100644 --- a/tests/configs/chat_completions/single_with_basic_config/single_with_basic_config.json +++ b/tests/configs/chat_completions/single_with_basic_config/single_with_basic_config.json @@ -1,3 +1,3 @@ { - "virtual_key": "vdkey-ff9e7c" + "virtual_key": "openai-virtual-key" } \ No newline at end of file diff --git a/tests/configs/completions/loadbalance_and_fallback/anthropic_n_openai.json b/tests/configs/completions/loadbalance_and_fallback/anthropic_n_openai.json index 4f2b3396..2c5c4a25 100644 --- a/tests/configs/completions/loadbalance_and_fallback/anthropic_n_openai.json +++ b/tests/configs/completions/loadbalance_and_fallback/anthropic_n_openai.json @@ -5,7 +5,7 @@ "targets": [ { "provider": "openai", - "virtual_key": "vdkey-ff9e7c" + "virtual_key": "openai-virtual-key" }, { "strategy": { @@ -17,10 +17,10 @@ }, "targets": [ { - "virtual_key": "vdanthropic-87ad2c" + "virtual_key": "anthropic-virtual-key" }, { - "virtual_key": "vdkey-ff9e7c" + "virtual_key": "openai-virtual-key" } ] } diff --git a/tests/configs/completions/loadbalance_and_fallback/anyscale_n_openai.json b/tests/configs/completions/loadbalance_and_fallback/anyscale_n_openai.json index e23100fb..2c90ddac 100644 --- a/tests/configs/completions/loadbalance_and_fallback/anyscale_n_openai.json +++ b/tests/configs/completions/loadbalance_and_fallback/anyscale_n_openai.json @@ -5,7 +5,7 @@ "targets": [ { "provider": "openai", - "virtual_key": "vdkey-ff9e7c" + "virtual_key": "openai-virtual-key" }, { "strategy": { @@ -17,10 +17,10 @@ }, "targets": [ { - "virtual_key": "vdanyscale-354c5b" + "virtual_key": "anyscale-virtual-key" }, { - "virtual_key": "vdkey-ff9e7c" + "virtual_key": "openai-virtual-key" } ] } diff --git a/tests/configs/completions/loadbalance_and_fallback/azure_n_openai.json b/tests/configs/completions/loadbalance_and_fallback/azure_n_openai.json index dc7eaf32..440c2591 100644 --- a/tests/configs/completions/loadbalance_and_fallback/azure_n_openai.json +++ b/tests/configs/completions/loadbalance_and_fallback/azure_n_openai.json @@ -5,7 +5,7 @@ "targets": [ { "provider": "openai", - "virtual_key": "vdkey-ff9e7c" + "virtual_key": "openai-virtual-key" }, { "strategy": { @@ -17,10 +17,10 @@ }, "targets": [ { - "virtual_key": "azure-api-key-993da0" + "virtual_key": "azure-virtual-key" }, { - "virtual_key": "vdkey-ff9e7c" + "virtual_key": "openai-virtual-key" } ] } diff --git a/tests/configs/completions/loadbalance_and_fallback/cohere_n_openai.json b/tests/configs/completions/loadbalance_and_fallback/cohere_n_openai.json index 91216c2b..1e697928 100644 --- a/tests/configs/completions/loadbalance_and_fallback/cohere_n_openai.json +++ b/tests/configs/completions/loadbalance_and_fallback/cohere_n_openai.json @@ -4,7 +4,7 @@ }, "targets": [ { - "virtual_key": "vdkey-ff9e7c" + "virtual_key": "openai-virtual-key" }, { "strategy": { @@ -16,10 +16,10 @@ }, "targets": [ { - "virtual_key": "vdcohere-1402b0" + "virtual_key": "cohere-virtual-key" }, { - "virtual_key": "vdkey-ff9e7c" + "virtual_key": "openai-virtual-key" } ] } diff --git a/tests/configs/completions/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json b/tests/configs/completions/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json index 7e8392d3..06973872 100644 --- a/tests/configs/completions/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json +++ b/tests/configs/completions/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json @@ -5,11 +5,11 @@ "targets": [ { "provider": "openai", - "virtual_key": "vdkey-ff9e7c" + "virtual_key": "openai-virtual-key" }, { "provider": "anthropic", - "virtual_key": "vdanthropic-87ad2c" + "virtual_key": "anthropic-virtual-key" } ] } \ No newline at end of file diff --git a/tests/configs/completions/single_provider/single_provider.json b/tests/configs/completions/single_provider/single_provider.json index 713c9374..7c4ed82a 100644 --- a/tests/configs/completions/single_provider/single_provider.json +++ b/tests/configs/completions/single_provider/single_provider.json @@ -1,4 +1,4 @@ { "provider": "openai", - "virtual_key": "vdkey-ff9e7c" + "virtual_key": "openai-virtual-key" } \ No newline at end of file diff --git a/tests/configs/completions/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json b/tests/configs/completions/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json index 12a13b21..52281ce7 100644 --- a/tests/configs/completions/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json +++ b/tests/configs/completions/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json @@ -1,5 +1,5 @@ { - "virtual_key": "vdkey-ff9e7c", + "virtual_key": "openai-virtual-key", "cache": { "mode": "semantic", "max_age": 60 diff --git a/tests/configs/completions/single_with_basic_config/single_with_basic_config.json b/tests/configs/completions/single_with_basic_config/single_with_basic_config.json index 5f27cb16..9471258a 100644 --- a/tests/configs/completions/single_with_basic_config/single_with_basic_config.json +++ b/tests/configs/completions/single_with_basic_config/single_with_basic_config.json @@ -1,3 +1,3 @@ { - "virtual_key": "vdkey-ff9e7c" + "virtual_key": "openai-virtual-key" } \ No newline at end of file diff --git a/tests/configs/images/loadbalance_and_fallback/anthropic_n_openai.json b/tests/configs/images/loadbalance_and_fallback/anthropic_n_openai.json new file mode 100644 index 00000000..2c5c4a25 --- /dev/null +++ b/tests/configs/images/loadbalance_and_fallback/anthropic_n_openai.json @@ -0,0 +1,28 @@ +{ + "strategy": { + "mode": "loadbalance" + }, + "targets": [ + { + "provider": "openai", + "virtual_key": "openai-virtual-key" + }, + { + "strategy": { + "mode": "fallback", + "on_status_codes": [ + 429, + 241 + ] + }, + "targets": [ + { + "virtual_key": "anthropic-virtual-key" + }, + { + "virtual_key": "openai-virtual-key" + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/configs/images/loadbalance_and_fallback/anyscale_n_openai.json b/tests/configs/images/loadbalance_and_fallback/anyscale_n_openai.json new file mode 100644 index 00000000..2c90ddac --- /dev/null +++ b/tests/configs/images/loadbalance_and_fallback/anyscale_n_openai.json @@ -0,0 +1,28 @@ +{ + "strategy": { + "mode": "loadbalance" + }, + "targets": [ + { + "provider": "openai", + "virtual_key": "openai-virtual-key" + }, + { + "strategy": { + "mode": "fallback", + "on_status_codes": [ + 429, + 241 + ] + }, + "targets": [ + { + "virtual_key": "anyscale-virtual-key" + }, + { + "virtual_key": "openai-virtual-key" + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/configs/images/loadbalance_and_fallback/azure_n_openai.json b/tests/configs/images/loadbalance_and_fallback/azure_n_openai.json new file mode 100644 index 00000000..440c2591 --- /dev/null +++ b/tests/configs/images/loadbalance_and_fallback/azure_n_openai.json @@ -0,0 +1,28 @@ +{ + "strategy": { + "mode": "loadbalance" + }, + "targets": [ + { + "provider": "openai", + "virtual_key": "openai-virtual-key" + }, + { + "strategy": { + "mode": "fallback", + "on_status_codes": [ + 429, + 241 + ] + }, + "targets": [ + { + "virtual_key": "azure-virtual-key" + }, + { + "virtual_key": "openai-virtual-key" + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/configs/images/loadbalance_and_fallback/cohere_n_openai.json b/tests/configs/images/loadbalance_and_fallback/cohere_n_openai.json new file mode 100644 index 00000000..1e697928 --- /dev/null +++ b/tests/configs/images/loadbalance_and_fallback/cohere_n_openai.json @@ -0,0 +1,27 @@ +{ + "strategy": { + "mode": "loadbalance" + }, + "targets": [ + { + "virtual_key": "openai-virtual-key" + }, + { + "strategy": { + "mode": "fallback", + "on_status_codes": [ + 429, + 241 + ] + }, + "targets": [ + { + "virtual_key": "cohere-virtual-key" + }, + { + "virtual_key": "openai-virtual-key" + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/configs/images/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json b/tests/configs/images/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json new file mode 100644 index 00000000..06973872 --- /dev/null +++ b/tests/configs/images/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json @@ -0,0 +1,15 @@ +{ + "strategy": { + "mode": "loadbalance" + }, + "targets": [ + { + "provider": "openai", + "virtual_key": "openai-virtual-key" + }, + { + "provider": "anthropic", + "virtual_key": "anthropic-virtual-key" + } + ] +} \ No newline at end of file diff --git a/tests/configs/images/single_provider/single_provider.json b/tests/configs/images/single_provider/single_provider.json new file mode 100644 index 00000000..7c4ed82a --- /dev/null +++ b/tests/configs/images/single_provider/single_provider.json @@ -0,0 +1,4 @@ +{ + "provider": "openai", + "virtual_key": "openai-virtual-key" +} \ No newline at end of file diff --git a/tests/configs/images/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json b/tests/configs/images/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json new file mode 100644 index 00000000..52281ce7 --- /dev/null +++ b/tests/configs/images/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json @@ -0,0 +1,13 @@ +{ + "virtual_key": "openai-virtual-key", + "cache": { + "mode": "semantic", + "max_age": 60 + }, + "retry": { + "attempts": 5, + "on_status_codes": [ + 429 + ] + } +} \ No newline at end of file diff --git a/tests/configs/images/single_with_basic_config/single_with_basic_config.json b/tests/configs/images/single_with_basic_config/single_with_basic_config.json new file mode 100644 index 00000000..9471258a --- /dev/null +++ b/tests/configs/images/single_with_basic_config/single_with_basic_config.json @@ -0,0 +1,3 @@ +{ + "virtual_key": "openai-virtual-key" +} \ No newline at end of file diff --git a/tests/configs/threads/loadbalance_and_fallback/anthropic_n_openai.json b/tests/configs/threads/loadbalance_and_fallback/anthropic_n_openai.json new file mode 100644 index 00000000..2c5c4a25 --- /dev/null +++ b/tests/configs/threads/loadbalance_and_fallback/anthropic_n_openai.json @@ -0,0 +1,28 @@ +{ + "strategy": { + "mode": "loadbalance" + }, + "targets": [ + { + "provider": "openai", + "virtual_key": "openai-virtual-key" + }, + { + "strategy": { + "mode": "fallback", + "on_status_codes": [ + 429, + 241 + ] + }, + "targets": [ + { + "virtual_key": "anthropic-virtual-key" + }, + { + "virtual_key": "openai-virtual-key" + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/configs/threads/loadbalance_and_fallback/anyscale_n_openai.json b/tests/configs/threads/loadbalance_and_fallback/anyscale_n_openai.json new file mode 100644 index 00000000..2c90ddac --- /dev/null +++ b/tests/configs/threads/loadbalance_and_fallback/anyscale_n_openai.json @@ -0,0 +1,28 @@ +{ + "strategy": { + "mode": "loadbalance" + }, + "targets": [ + { + "provider": "openai", + "virtual_key": "openai-virtual-key" + }, + { + "strategy": { + "mode": "fallback", + "on_status_codes": [ + 429, + 241 + ] + }, + "targets": [ + { + "virtual_key": "anyscale-virtual-key" + }, + { + "virtual_key": "openai-virtual-key" + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/configs/threads/loadbalance_and_fallback/azure_n_openai.json b/tests/configs/threads/loadbalance_and_fallback/azure_n_openai.json new file mode 100644 index 00000000..440c2591 --- /dev/null +++ b/tests/configs/threads/loadbalance_and_fallback/azure_n_openai.json @@ -0,0 +1,28 @@ +{ + "strategy": { + "mode": "loadbalance" + }, + "targets": [ + { + "provider": "openai", + "virtual_key": "openai-virtual-key" + }, + { + "strategy": { + "mode": "fallback", + "on_status_codes": [ + 429, + 241 + ] + }, + "targets": [ + { + "virtual_key": "azure-virtual-key" + }, + { + "virtual_key": "openai-virtual-key" + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/configs/threads/loadbalance_and_fallback/cohere_n_openai.json b/tests/configs/threads/loadbalance_and_fallback/cohere_n_openai.json new file mode 100644 index 00000000..1e697928 --- /dev/null +++ b/tests/configs/threads/loadbalance_and_fallback/cohere_n_openai.json @@ -0,0 +1,27 @@ +{ + "strategy": { + "mode": "loadbalance" + }, + "targets": [ + { + "virtual_key": "openai-virtual-key" + }, + { + "strategy": { + "mode": "fallback", + "on_status_codes": [ + 429, + 241 + ] + }, + "targets": [ + { + "virtual_key": "cohere-virtual-key" + }, + { + "virtual_key": "openai-virtual-key" + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/configs/threads/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json b/tests/configs/threads/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json new file mode 100644 index 00000000..06973872 --- /dev/null +++ b/tests/configs/threads/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json @@ -0,0 +1,15 @@ +{ + "strategy": { + "mode": "loadbalance" + }, + "targets": [ + { + "provider": "openai", + "virtual_key": "openai-virtual-key" + }, + { + "provider": "anthropic", + "virtual_key": "anthropic-virtual-key" + } + ] +} \ No newline at end of file diff --git a/tests/configs/threads/single_provider/single_provider.json b/tests/configs/threads/single_provider/single_provider.json new file mode 100644 index 00000000..7c4ed82a --- /dev/null +++ b/tests/configs/threads/single_provider/single_provider.json @@ -0,0 +1,4 @@ +{ + "provider": "openai", + "virtual_key": "openai-virtual-key" +} \ No newline at end of file diff --git a/tests/configs/threads/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json b/tests/configs/threads/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json new file mode 100644 index 00000000..52281ce7 --- /dev/null +++ b/tests/configs/threads/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json @@ -0,0 +1,13 @@ +{ + "virtual_key": "openai-virtual-key", + "cache": { + "mode": "semantic", + "max_age": 60 + }, + "retry": { + "attempts": 5, + "on_status_codes": [ + 429 + ] + } +} \ No newline at end of file diff --git a/tests/configs/threads/single_with_basic_config/single_with_basic_config.json b/tests/configs/threads/single_with_basic_config/single_with_basic_config.json new file mode 100644 index 00000000..9471258a --- /dev/null +++ b/tests/configs/threads/single_with_basic_config/single_with_basic_config.json @@ -0,0 +1,3 @@ +{ + "virtual_key": "openai-virtual-key" +} \ No newline at end of file diff --git a/tests/models.json b/tests/models.json index c1f99861..dc433493 100644 --- a/tests/models.json +++ b/tests/models.json @@ -17,6 +17,10 @@ ], "text": [ "gpt-3.5-turbo-instruct" + ], + "image":[ + "dall-e-3", + "dall-e-2" ] }, "anyscale": { @@ -34,24 +38,21 @@ "meta-llama/Llama-2-70b-chat-hf", "codellama/CodeLlama-34b-Instruct-hf", "mistralai/Mistral-7B-Instruct-v0.1" + ], + "image":[ + "dall-e-3", + "dall-e-2" ] }, "anthropic": { "env_variable": "ANTHROPIC_API_KEY", "chat": [ "claude-instant-1.2", - "claude-1", - "claude-1-100k", - "claude-instant-1", - "claude-instant-1-100k", "claude-1.3", - "claude-1.3-100k", "claude-1.2", "claude-1.0", "claude-instant-1.1", - "claude-instant-1.1-100k", - "claude-instant-1.0", - "claude-2" + "claude-instant-1.0" ], "text": [ "claude-instant-1.2", @@ -67,7 +68,9 @@ "claude-instant-1.1-100k", "claude-instant-1.0", "claude-2" - ] + ], + "image":["dall-e-3", + "dall-e-2"] }, "cohere": { "env_variable": "COHERE_API_KEY", @@ -84,6 +87,8 @@ "embed-multilingual-v3.0", "embed-multilingual-light-v3.0" ], - "text": [] + "text": [], + "image":["dall-e-3", + "dall-e-2"] } } \ No newline at end of file diff --git a/tests/test_assistants.py b/tests/test_assistants.py new file mode 100644 index 00000000..bf2b402a --- /dev/null +++ b/tests/test_assistants.py @@ -0,0 +1,104 @@ +from __future__ import annotations + +import os +from typing import Any, Dict, List, cast +from uuid import uuid4 + +import pytest +from os import walk + +from portkey_ai import Portkey +from openai.pagination import SyncCursorPage, AsyncCursorPage +from openai.types.beta import ( + Assistant, + AssistantDeleted, +) +import inspect +from tests.utils import read_json_file + +base_url = os.environ.get("PORTKEY_BASE_URL") +api_key = os.environ.get("PORTKEY_API_KEY") +virtual_api_key = os.environ.get("OPENAI_VIRTUAL_KEY") + +CONFIGS_PATH = "./tests/configs/assistants" + +def get_configs(folder_path) -> List[Dict[str, Any]]: + config_files = [] + for dirpath, _, file_names in walk(folder_path): + for f in file_names: + config_files.append(read_json_file(os.path.join(dirpath, f))) + + return config_files + + +class TestAssistants: + client = Portkey + parametrize = pytest.mark.parametrize("client", [client], ids=["strict"]) + models = read_json_file("./tests/models.json") + + def get_metadata(self): + return { + "case": "testing", + "function": inspect.currentframe().f_back.f_code.co_name, + "random_id": str(uuid4()), + } + + # -------------------------- + # Test-1 + + t1_params = [] + t = [] + for k, v in models.items(): + for i in v["chat"]: + t.append((client, k, os.environ.get(v["env_variable"]), i)) + + t1_params.extend(t) + + @pytest.mark.parametrize("client, provider, auth, model", t1_params) + def test_method_single_with_vk_and_provider( + self, client: Any, provider: str, auth: str, model + ) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + provider=f"{provider}", + Authorization=f"Bearer {auth}", + trace_id=str(uuid4()), + metadata=self.get_metadata(), + ) + assistant = portkey.beta.assistants.create( + model=model, + ) + print(assistant) + + + # -------------------------- + # Test-3 + + t3_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_provider"): + t3_params.append((client, i)) + + @pytest.mark.parametrize("client, provider, auth, model", t3_params) + def test_method_all_params( + self, client: Any, provider: str, auth: str, model + ) -> None: + metadata = self.get_metadata() + portkey = client( + base_url=base_url, + api_key=api_key, + provider=f"{provider}", + Authorization=f"Bearer {auth}", + trace_id=str(uuid4()), + metadata=metadata, + ) + assistant = portkey.beta.assistants.create( + model=model, + description="string", + file_ids=["file-m9QiEaDT9Le28LydiUTsUwDv"], + instructions="You are a personal math tutor. Write and run code to answer math questions.", + metadata=metadata, + name="Math Tutor", + tools=[{"type": "code_interpreter"}], + ) + print(assistant) \ No newline at end of file diff --git a/tests/test_async_images.py b/tests/test_async_images.py new file mode 100644 index 00000000..e0219856 --- /dev/null +++ b/tests/test_async_images.py @@ -0,0 +1,254 @@ +from __future__ import annotations +import inspect + +import os +from os import walk +from typing import Any, Dict, List +import pytest +from uuid import uuid4 +from portkey_ai import AsyncPortkey +from time import sleep +from dotenv import load_dotenv +from .utils import read_json_file + + +load_dotenv(override=True) + +base_url = os.environ.get("PORTKEY_BASE_URL") +api_key = os.environ.get("PORTKEY_API_KEY") +virtual_api_key = os.environ.get("OPENAI_VIRTUAL_KEY") +CONFIGS_PATH = "./tests/configs/images" + + +def get_configs(folder_path) -> List[Dict[str, Any]]: + config_files = [] + for dirpath, _, file_names in walk(folder_path): + for f in file_names: + config_files.append(read_json_file(os.path.join(dirpath, f))) + + return config_files + + +class TestImages: + client = AsyncPortkey + parametrize = pytest.mark.parametrize("client", [client], ids=["strict"]) + models = read_json_file("./tests/models.json") + + def get_metadata(self): + return { + "case": "testing", + "function": inspect.currentframe().f_back.f_code.co_name, + "random_id": str(uuid4()), + } + + # -------------------------- + # Test-1 + t1_params = [] + t = [] + for k, v in models.items(): + for i in v["image"]: + t.append((client, k, os.environ.get(v["env_variable"]), i)) + + t1_params.extend(t) + @pytest.mark.asyncio + @pytest.mark.parametrize("client, provider, auth, model", t1_params) + async def test_method_single_with_vk_and_provider( + self, client: Any, provider: str, auth: str, model + ) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + provider=f"{provider}", + Authorization=f"Bearer {auth}", + trace_id=str(uuid4()), + metadata=self.get_metadata(), + ) + + await portkey.images.generate( + model=model, + prompt="A cute baby sea otter", + n=1, + size="1024x1024" + ) + + # -------------------------- + # Test -2 + t2_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_with_basic_config"): + t2_params.append((client, i)) + + @pytest.mark.asyncio + @pytest.mark.parametrize("client, config", t2_params) + async def test_method_single_with_basic_config(self, client: Any, config: Dict) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=str(uuid4()), + metadata=self.get_metadata(), + config=config, + ) + + await portkey.images.generate( + model="dall-e-3", + prompt="A cute baby sea otter", + n=1, + size="1024x1024" + ) + + + # -------------------------- + # Test-3 + t3_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_provider_with_vk_retry_cache"): + t3_params.append((client, i)) + + @pytest.mark.asyncio + @pytest.mark.parametrize("client, config", t3_params) + async def test_method_single_provider_with_vk_retry_cache( + self, client: Any, config: Dict + ) -> None: + # 1. Make a new cache the cache + # 2. Make a cache hit and see if the response contains the data. + random_id = str(uuid4()) + metadata = self.get_metadata() + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + await portkey.images.generate( + model="dall-e-3", + prompt="A cute baby sea otter", + n=1, + size="1024x1024" + ) + # Sleeping for the cache to reflect across the workers. The cache has an + # eventual consistency and not immediate consistency. + sleep(20) + portkey_2 = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + await portkey_2.images.generate( + model="dall-e-3", + prompt="A cute baby sea otter", + n=1, + size="1024x1024" + ) + + # -------------------------- + # Test-4 + t4_params = [] + for i in get_configs(f"{CONFIGS_PATH}/loadbalance_with_two_apikeys"): + t4_params.append((client, i)) + + @pytest.mark.asyncio + @pytest.mark.parametrize("client, config", t4_params) + async def test_method_loadbalance_with_two_apikeys( + self, client: Any, config: Dict + ) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + # virtual_key=virtual_api_key, + trace_id=str(uuid4()), + metadata=self.get_metadata(), + config=config, + ) + + image = await portkey.images.generate( + model="dall-e-3", + prompt="A cute baby sea otter", + n=1, + size="1024x1024" + ) + + print(image.data) + + # -------------------------- + # Test-5 + t5_params = [] + for i in get_configs(f"{CONFIGS_PATH}/loadbalance_and_fallback"): + t5_params.append((client, i)) + + @pytest.mark.asyncio + @pytest.mark.parametrize("client, config", t5_params) + async def test_method_loadbalance_and_fallback(self, client: Any, config: Dict) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=str(uuid4()), + config=config, + ) + + image = await portkey.images.generate( + model="dall-e-3", + prompt="A cute baby sea otter", + n=1, + size="1024x1024" + ) + + print(image.data) + + # -------------------------- + # Test-6 + t6_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_provider"): + t6_params.append((client, i)) + + @pytest.mark.asyncio + @pytest.mark.parametrize("client, config", t6_params) + async def test_method_single_provider(self, client: Any, config: Dict) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=str(uuid4()), + config=config, + ) + + image = await portkey.images.generate( + model="dall-e-3", + prompt="A cute baby sea otter", + n=1, + size="1024x1024" + ) + + print(image.data) + + # -------------------------- + # Test-7 + t7_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_provider"): + t7_params.append((client, i)) + + @pytest.mark.asyncio + @pytest.mark.parametrize("client, config", t7_params) + async def test_method_all_params(self, client: Any, config: Dict) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=str(uuid4()), + config=config, + ) + + image = await portkey.images.generate( + prompt="A cute baby sea otter", + model="dall-e-3", + n=1, + quality="standard", + response_format="url", + size="1024x1024", + style="vivid", + user="user-1234", + ) + + print(image.data) \ No newline at end of file diff --git a/tests/test_images.py b/tests/test_images.py new file mode 100644 index 00000000..1727e8e8 --- /dev/null +++ b/tests/test_images.py @@ -0,0 +1,249 @@ +from __future__ import annotations +import inspect + +import os +from os import walk +from typing import Any, Dict, List +import pytest +from uuid import uuid4 +from portkey_ai import Portkey +from time import sleep +from dotenv import load_dotenv +from .utils import read_json_file + + +load_dotenv(override=True) + +base_url = os.environ.get("PORTKEY_BASE_URL") +api_key = os.environ.get("PORTKEY_API_KEY") +virtual_api_key = os.environ.get("OPENAI_VIRTUAL_KEY") + +CONFIGS_PATH = "./tests/configs/images" + + +def get_configs(folder_path) -> List[Dict[str, Any]]: + config_files = [] + for dirpath, _, file_names in walk(folder_path): + for f in file_names: + config_files.append(read_json_file(os.path.join(dirpath, f))) + + return config_files + + +class TestImages: + client = Portkey + parametrize = pytest.mark.parametrize("client", [client], ids=["strict"]) + models = read_json_file("./tests/models.json") + + def get_metadata(self): + return { + "case": "testing", + "function": inspect.currentframe().f_back.f_code.co_name, + "random_id": str(uuid4()), + } + + # -------------------------- + # Test-1 + t1_params = [] + t = [] + for k, v in models.items(): + for i in v["image"]: + t.append((client, k, os.environ.get(v["env_variable"]), i)) + + t1_params.extend(t) + + @pytest.mark.parametrize("client, provider, auth, model", t1_params) + def test_method_single_with_vk_and_provider( + self, client: Any, provider: str, auth: str, model + ) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + provider=f"{provider}", + Authorization=f"Bearer {auth}", + trace_id=str(uuid4()), + metadata=self.get_metadata(), + ) + + portkey.images.generate( + model=model, + prompt="A cute baby sea otter", + n=1, + size="1024x1024" + ) + + # -------------------------- + # Test -2 + t2_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_with_basic_config"): + t2_params.append((client, i)) + + @pytest.mark.parametrize("client, config", t2_params) + def test_method_single_with_basic_config(self, client: Any, config: Dict) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=str(uuid4()), + metadata=self.get_metadata(), + config=config, + ) + + portkey.images.generate( + model="dall-e-3", + prompt="A cute baby sea otter", + n=1, + size="1024x1024" + ) + + + # -------------------------- + # Test-3 + t3_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_provider_with_vk_retry_cache"): + t3_params.append((client, i)) + + @pytest.mark.parametrize("client, config", t3_params) + def test_method_single_provider_with_vk_retry_cache( + self, client: Any, config: Dict + ) -> None: + # 1. Make a new cache the cache + # 2. Make a cache hit and see if the response contains the data. + random_id = str(uuid4()) + metadata = self.get_metadata() + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + portkey.images.generate( + model="dall-e-3", + prompt="A cute baby sea otter", + n=1, + size="1024x1024" + ) + # Sleeping for the cache to reflect across the workers. The cache has an + # eventual consistency and not immediate consistency. + sleep(20) + portkey_2 = client( + base_url=base_url, + api_key=api_key, + trace_id=random_id, + virtual_key=virtual_api_key, + metadata=metadata, + config=config, + ) + + portkey_2.images.generate( + model="dall-e-3", + prompt="A cute baby sea otter", + n=1, + size="1024x1024" + ) + + # -------------------------- + # Test-4 + t4_params = [] + for i in get_configs(f"{CONFIGS_PATH}/loadbalance_with_two_apikeys"): + t4_params.append((client, i)) + + @pytest.mark.parametrize("client, config", t4_params) + def test_method_loadbalance_with_two_apikeys( + self, client: Any, config: Dict + ) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + # virtual_key=virtual_api_key, + trace_id=str(uuid4()), + metadata=self.get_metadata(), + config=config, + ) + + image = portkey.images.generate( + model="dall-e-3", + prompt="A cute baby sea otter", + n=1, + size="1024x1024" + ) + + print(image.data) + + # -------------------------- + # Test-5 + t5_params = [] + for i in get_configs(f"{CONFIGS_PATH}/loadbalance_and_fallback"): + t5_params.append((client, i)) + + @pytest.mark.parametrize("client, config", t5_params) + def test_method_loadbalance_and_fallback(self, client: Any, config: Dict) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=str(uuid4()), + config=config, + ) + + image = portkey.images.generate( + model="dall-e-3", + prompt="A cute baby sea otter", + n=1, + size="1024x1024" + ) + + print(image.data) + + # -------------------------- + # Test-6 + t6_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_provider"): + t6_params.append((client, i)) + + @pytest.mark.parametrize("client, config", t6_params) + def test_method_single_provider(self, client: Any, config: Dict) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=str(uuid4()), + config=config, + ) + + image = portkey.images.generate( + model="dall-e-3", + prompt="A cute baby sea otter", + n=1, + size="1024x1024" + ) + + print(image.data) + + # -------------------------- + # Test-7 + t7_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_provider"): + t7_params.append((client, i)) + + @pytest.mark.parametrize("client, config", t7_params) + def test_method_all_params(self, client: Any, config: Dict) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + trace_id=str(uuid4()), + config=config, + ) + + image = portkey.images.generate( + prompt="A cute baby sea otter", + model="dall-e-3", + n=1, + quality="standard", + response_format="url", + size="1024x1024", + style="vivid", + user="user-1234", + ) + + print(image.data) \ No newline at end of file diff --git a/tests/test_threads.py b/tests/test_threads.py new file mode 100644 index 00000000..2cd81e60 --- /dev/null +++ b/tests/test_threads.py @@ -0,0 +1,94 @@ +from __future__ import annotations + +import os +from typing import Any, Dict, List, cast +from uuid import uuid4 + +import pytest + +from portkey_ai import Portkey +from openai.pagination import SyncCursorPage, AsyncCursorPage +from openai.types.beta import ( + Assistant, + AssistantDeleted, +) +import inspect +from tests.utils import read_json_file + +base_url = os.environ.get("PORTKEY_BASE_URL") +api_key = os.environ.get("PORTKEY_API_KEY") +virtual_api_key = os.environ.get("OPENAI_VIRTUAL_KEY") + +CONFIGS_PATH = "./tests/configs/threads" + + +def get_configs(folder_path) -> List[Dict[str, Any]]: + config_files = [] + for dirpath, _, file_names in os.walk(folder_path): + for f in file_names: + config_files.append(read_json_file(os.path.join(dirpath, f))) + + return config_files + +class TestThreads: + client = Portkey + parametrize = pytest.mark.parametrize("client", [client], ids=["strict"]) + models = read_json_file("./tests/models.json") + + def get_metadata(self): + return { + "case": "testing", + "function": inspect.currentframe().f_back.f_code.co_name, + "random_id": str(uuid4()), + } + + # # -------------------------- + # # Test-1 + + # t1_params = [] + # t = [] + # for k, v in models.items(): + # for i in v["chat"]: + # t.append((client, k, os.environ.get(v["env_variable"]), i)) + + # t1_params.extend(t) + + # @pytest.mark.parametrize("client, provider, auth, model", t1_params) + # def test_method_single_with_vk_and_provider( + # self, client: Any, provider: str, auth: str, model + # ) -> None: + # metadata = self.get_metadata() + # portkey = client( + # base_url=base_url, + # api_key=api_key, + # provider=f"{provider}", + # Authorization=f"Bearer {auth}", + # trace_id=str(uuid4()), + # metadata=metadata, + # ) + # thread = portkey.beta.threads.retrieve(thread_id="thread_6dWkyyEFNNI8pQw8YEObrlna") + # print(thread) + + + # -------------------------- + # Test-1 + + t2_params = [] + for i in get_configs(f"{CONFIGS_PATH}/single_with_basic_config"): + t2_params.append((client, i)) + + @pytest.mark.parametrize("client, provider, auth, model", t2_params) + def test_method_single_with_vk_and_provider( + self, client: Any, provider: str, auth: str, model + ) -> None: + metadata = self.get_metadata() + portkey = client( + base_url=base_url, + api_key=api_key, + provider=f"{provider}", + Authorization=f"Bearer {auth}", + trace_id=str(uuid4()), + metadata=metadata, + ) + thread = portkey.beta.threads.create() + print(thread) From 24e98e65eb7228ccd28d4c965242557ce35c4539 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Mon, 26 Feb 2024 11:33:48 +0530 Subject: [PATCH 06/62] fix: linting issues --- tests/test_assistants.py | 16 +++++-------- tests/test_async_images.py | 47 ++++++++++++-------------------------- tests/test_images.py | 38 +++++++----------------------- tests/test_threads.py | 12 ++++------ 4 files changed, 33 insertions(+), 80 deletions(-) diff --git a/tests/test_assistants.py b/tests/test_assistants.py index bf2b402a..edd07d2e 100644 --- a/tests/test_assistants.py +++ b/tests/test_assistants.py @@ -1,18 +1,13 @@ from __future__ import annotations import os -from typing import Any, Dict, List, cast +from typing import Any, Dict, List from uuid import uuid4 import pytest from os import walk from portkey_ai import Portkey -from openai.pagination import SyncCursorPage, AsyncCursorPage -from openai.types.beta import ( - Assistant, - AssistantDeleted, -) import inspect from tests.utils import read_json_file @@ -22,6 +17,7 @@ CONFIGS_PATH = "./tests/configs/assistants" + def get_configs(folder_path) -> List[Dict[str, Any]]: config_files = [] for dirpath, _, file_names in walk(folder_path): @@ -71,10 +67,9 @@ def test_method_single_with_vk_and_provider( ) print(assistant) - # -------------------------- # Test-3 - + t3_params = [] for i in get_configs(f"{CONFIGS_PATH}/single_provider"): t3_params.append((client, i)) @@ -96,9 +91,10 @@ def test_method_all_params( model=model, description="string", file_ids=["file-m9QiEaDT9Le28LydiUTsUwDv"], - instructions="You are a personal math tutor. Write and run code to answer math questions.", + instructions="You are a personal math tutor." + + "Write and run code to answer math questions.", metadata=metadata, name="Math Tutor", tools=[{"type": "code_interpreter"}], ) - print(assistant) \ No newline at end of file + print(assistant) diff --git a/tests/test_async_images.py b/tests/test_async_images.py index e0219856..8ef5b89a 100644 --- a/tests/test_async_images.py +++ b/tests/test_async_images.py @@ -50,6 +50,7 @@ def get_metadata(self): t.append((client, k, os.environ.get(v["env_variable"]), i)) t1_params.extend(t) + @pytest.mark.asyncio @pytest.mark.parametrize("client, provider, auth, model", t1_params) async def test_method_single_with_vk_and_provider( @@ -65,10 +66,7 @@ async def test_method_single_with_vk_and_provider( ) await portkey.images.generate( - model=model, - prompt="A cute baby sea otter", - n=1, - size="1024x1024" + model=model, prompt="A cute baby sea otter", n=1, size="1024x1024" ) # -------------------------- @@ -79,7 +77,9 @@ async def test_method_single_with_vk_and_provider( @pytest.mark.asyncio @pytest.mark.parametrize("client, config", t2_params) - async def test_method_single_with_basic_config(self, client: Any, config: Dict) -> None: + async def test_method_single_with_basic_config( + self, client: Any, config: Dict + ) -> None: portkey = client( base_url=base_url, api_key=api_key, @@ -89,12 +89,8 @@ async def test_method_single_with_basic_config(self, client: Any, config: Dict) ) await portkey.images.generate( - model="dall-e-3", - prompt="A cute baby sea otter", - n=1, - size="1024x1024" + model="dall-e-3", prompt="A cute baby sea otter", n=1, size="1024x1024" ) - # -------------------------- # Test-3 @@ -121,10 +117,7 @@ async def test_method_single_provider_with_vk_retry_cache( ) await portkey.images.generate( - model="dall-e-3", - prompt="A cute baby sea otter", - n=1, - size="1024x1024" + model="dall-e-3", prompt="A cute baby sea otter", n=1, size="1024x1024" ) # Sleeping for the cache to reflect across the workers. The cache has an # eventual consistency and not immediate consistency. @@ -139,10 +132,7 @@ async def test_method_single_provider_with_vk_retry_cache( ) await portkey_2.images.generate( - model="dall-e-3", - prompt="A cute baby sea otter", - n=1, - size="1024x1024" + model="dall-e-3", prompt="A cute baby sea otter", n=1, size="1024x1024" ) # -------------------------- @@ -166,10 +156,7 @@ async def test_method_loadbalance_with_two_apikeys( ) image = await portkey.images.generate( - model="dall-e-3", - prompt="A cute baby sea otter", - n=1, - size="1024x1024" + model="dall-e-3", prompt="A cute baby sea otter", n=1, size="1024x1024" ) print(image.data) @@ -182,7 +169,9 @@ async def test_method_loadbalance_with_two_apikeys( @pytest.mark.asyncio @pytest.mark.parametrize("client, config", t5_params) - async def test_method_loadbalance_and_fallback(self, client: Any, config: Dict) -> None: + async def test_method_loadbalance_and_fallback( + self, client: Any, config: Dict + ) -> None: portkey = client( base_url=base_url, api_key=api_key, @@ -191,10 +180,7 @@ async def test_method_loadbalance_and_fallback(self, client: Any, config: Dict) ) image = await portkey.images.generate( - model="dall-e-3", - prompt="A cute baby sea otter", - n=1, - size="1024x1024" + model="dall-e-3", prompt="A cute baby sea otter", n=1, size="1024x1024" ) print(image.data) @@ -216,10 +202,7 @@ async def test_method_single_provider(self, client: Any, config: Dict) -> None: ) image = await portkey.images.generate( - model="dall-e-3", - prompt="A cute baby sea otter", - n=1, - size="1024x1024" + model="dall-e-3", prompt="A cute baby sea otter", n=1, size="1024x1024" ) print(image.data) @@ -251,4 +234,4 @@ async def test_method_all_params(self, client: Any, config: Dict) -> None: user="user-1234", ) - print(image.data) \ No newline at end of file + print(image.data) diff --git a/tests/test_images.py b/tests/test_images.py index 1727e8e8..e3797ef8 100644 --- a/tests/test_images.py +++ b/tests/test_images.py @@ -66,10 +66,7 @@ def test_method_single_with_vk_and_provider( ) portkey.images.generate( - model=model, - prompt="A cute baby sea otter", - n=1, - size="1024x1024" + model=model, prompt="A cute baby sea otter", n=1, size="1024x1024" ) # -------------------------- @@ -89,13 +86,9 @@ def test_method_single_with_basic_config(self, client: Any, config: Dict) -> Non ) portkey.images.generate( - model="dall-e-3", - prompt="A cute baby sea otter", - n=1, - size="1024x1024" + model="dall-e-3", prompt="A cute baby sea otter", n=1, size="1024x1024" ) - # -------------------------- # Test-3 t3_params = [] @@ -120,10 +113,7 @@ def test_method_single_provider_with_vk_retry_cache( ) portkey.images.generate( - model="dall-e-3", - prompt="A cute baby sea otter", - n=1, - size="1024x1024" + model="dall-e-3", prompt="A cute baby sea otter", n=1, size="1024x1024" ) # Sleeping for the cache to reflect across the workers. The cache has an # eventual consistency and not immediate consistency. @@ -138,10 +128,7 @@ def test_method_single_provider_with_vk_retry_cache( ) portkey_2.images.generate( - model="dall-e-3", - prompt="A cute baby sea otter", - n=1, - size="1024x1024" + model="dall-e-3", prompt="A cute baby sea otter", n=1, size="1024x1024" ) # -------------------------- @@ -164,10 +151,7 @@ def test_method_loadbalance_with_two_apikeys( ) image = portkey.images.generate( - model="dall-e-3", - prompt="A cute baby sea otter", - n=1, - size="1024x1024" + model="dall-e-3", prompt="A cute baby sea otter", n=1, size="1024x1024" ) print(image.data) @@ -188,10 +172,7 @@ def test_method_loadbalance_and_fallback(self, client: Any, config: Dict) -> Non ) image = portkey.images.generate( - model="dall-e-3", - prompt="A cute baby sea otter", - n=1, - size="1024x1024" + model="dall-e-3", prompt="A cute baby sea otter", n=1, size="1024x1024" ) print(image.data) @@ -212,10 +193,7 @@ def test_method_single_provider(self, client: Any, config: Dict) -> None: ) image = portkey.images.generate( - model="dall-e-3", - prompt="A cute baby sea otter", - n=1, - size="1024x1024" + model="dall-e-3", prompt="A cute baby sea otter", n=1, size="1024x1024" ) print(image.data) @@ -246,4 +224,4 @@ def test_method_all_params(self, client: Any, config: Dict) -> None: user="user-1234", ) - print(image.data) \ No newline at end of file + print(image.data) diff --git a/tests/test_threads.py b/tests/test_threads.py index 2cd81e60..c65eb41a 100644 --- a/tests/test_threads.py +++ b/tests/test_threads.py @@ -1,17 +1,12 @@ from __future__ import annotations import os -from typing import Any, Dict, List, cast +from typing import Any, Dict, List from uuid import uuid4 import pytest from portkey_ai import Portkey -from openai.pagination import SyncCursorPage, AsyncCursorPage -from openai.types.beta import ( - Assistant, - AssistantDeleted, -) import inspect from tests.utils import read_json_file @@ -30,6 +25,7 @@ def get_configs(folder_path) -> List[Dict[str, Any]]: return config_files + class TestThreads: client = Portkey parametrize = pytest.mark.parametrize("client", [client], ids=["strict"]) @@ -66,10 +62,10 @@ def get_metadata(self): # trace_id=str(uuid4()), # metadata=metadata, # ) - # thread = portkey.beta.threads.retrieve(thread_id="thread_6dWkyyEFNNI8pQw8YEObrlna") + # thread = + # portkey.beta.threads.retrieve(thread_id="thread_6dWkyyEFNNI8pQw8YEObrlna") # print(thread) - # -------------------------- # Test-1 From d0c25cba34198d145ba2bfc30cb86ebf3b659d14 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Sat, 2 Mar 2024 14:12:24 +0530 Subject: [PATCH 07/62] fix: removed comments --- .../api_resources/apis/chat_complete.py | 193 ------------------ portkey_ai/api_resources/apis/complete.py | 148 -------------- 2 files changed, 341 deletions(-) diff --git a/portkey_ai/api_resources/apis/chat_complete.py b/portkey_ai/api_resources/apis/chat_complete.py index 38e0439f..d529a649 100644 --- a/portkey_ai/api_resources/apis/chat_complete.py +++ b/portkey_ai/api_resources/apis/chat_complete.py @@ -78,116 +78,6 @@ def create( else: return self.normal_create(**kwargs) - # def create( - # self, - # **kwargs - # ) -> Union[ChatCompletions, Iterator[ChatCompletionChunk]]: - # print("Res kw:", kwargs) - # if 'stream' in kwargs and kwargs['stream'] == True: - # with self.openai_client.with_streaming_response.chat. - # completions.create(**kwargs) as response: - # for line in response.iter_lines(): - # json_string = line.replace('data: ', '') - # json_string = json_string.strip().rstrip('\n') - # if json_string == '': - # continue - # elif json_string == '[DONE]': - # break - # else: - # json_data = json.loads(json_string) - # json_data = ChatCompletionChunk(**json_data) - # yield json_data - # elif 'stream' in kwargs and kwargs['stream'] == False: - # response = self.openai_client.with_raw_response.chat.completions.create( - # **kwargs) - # print("Res Stream:", response) - # response = response.text - # return json.loads(response) - # else: - # response = self.openai_client.with_raw_response.chat.completions.create( - # **kwargs) - # print("Res:", response) - # response = response.text - # response = json.loads(response) - # response = ChatCompletions(**response) - # return response - - # @overload - # def create( - # self, - # *, - # messages: Optional[List[Message]] = None, - # config: Optional[Union[Mapping, str]] = None, - # stream: Literal[True], - # temperature: Optional[float] = None, - # max_tokens: Optional[int] = None, - # top_k: Optional[int] = None, - # top_p: Optional[float] = None, - # **kwargs, - # ) -> Stream[ChatCompletionChunk]: - # ... - - # @overload - # def create( - # self, - # *, - # messages: Optional[List[Message]] = None, - # config: Optional[Union[Mapping, str]] = None, - # stream: bool = False, - # temperature: Optional[float] = None, - # max_tokens: Optional[int] = None, - # top_k: Optional[int] = None, - # top_p: Optional[float] = None, - # **kwargs, - # ) -> Union[ChatCompletions, Iterator[ChatCompletionChunk]]: - # ... - - # def create( - # self, - # **kwargs - # ) -> Union[ChatCompletions, Stream[ChatCompletionChunk]]: - - # print("Res kw:", kwargs) - # if 'stream' in kwargs and kwargs['stream'] == True: - # print("Res kwwww:", kwargs) - # with self.openai_client.with_streaming_response. - # chat.completions.create(**kwargs) as response: - # for line in response.iter_lines(): - # json_string = line.replace('data: ', '') - # json_string = json_string.strip().rstrip('\n') - # if json_string == '': - # continue - # elif json_string == '[DONE]': - # break - # elif json_string!= '': - # json_data = json.loads(json_string) - # json_data = ChatCompletionChunk(**json_data) - # yield json_data - # else: - # return "" - - # if 'stream' in kwargs and kwargs['stream'] == False: - # response = self.openai_client.with_raw_response.chat.completions.create( - # **kwargs) - # print("REs Stream:", response.text) - # json_response = json.loads(response.text) - # return ChatCompletions(**json_response) - # response = response.text - # return json.loads(response) - # elif 'stream' not in kwargs: - # response = self.openai_client.with_raw_response.chat.completions.create( - # **kwargs) - # print("REssss:", response) - # json_response = json.loads(response.text) - # print("TYPE:", type(ChatCompletions(**json_response))) - # # response = response.text - # # return json.loads(response) - # return ChatCompletions(**json_response) - # else: - # return "Streaming not requested" - - # def _get_config_string(self, config: Union[Mapping, str]) -> str: - # return config if isinstance(config, str) else json.dumps(config) class AsyncCompletions(AsyncAPIResource): @@ -233,88 +123,5 @@ async def create( else: return await self.normal_create(**kwargs) - # @overload - # async def create( - # self, - # *, - # messages: Optional[List[Message]] = None, - # config: Optional[Union[Mapping, str]] = None, - # stream: Literal[True], - # temperature: Optional[float] = None, - # max_tokens: Optional[int] = None, - # top_k: Optional[int] = None, - # top_p: Optional[float] = None, - # **kwargs, - # ) -> AsyncStream[ChatCompletionChunk]: - # ... - - # @overload - # async def create( - # self, - # *, - # messages: Optional[List[Message]] = None, - # config: Optional[Union[Mapping, str]] = None, - # stream: Literal[False] = False, - # temperature: Optional[float] = None, - # max_tokens: Optional[int] = None, - # top_k: Optional[int] = None, - # top_p: Optional[float] = None, - # **kwargs, - # ) -> ChatCompletions: - # ... - - # @overload - # async def create( - # self, - # *, - # messages: Optional[List[Message]] = None, - # config: Optional[Union[Mapping, str]] = None, - # stream: bool = False, - # temperature: Optional[float] = None, - # max_tokens: Optional[int] = None, - # top_k: Optional[int] = None, - # top_p: Optional[float] = None, - # **kwargs, - # ) -> Union[ChatCompletions, AsyncStream[ChatCompletionChunk]]: - # ... - - # async def create( - # self, - # **kwargs, - # ) -> Union[ChatCompletions, AsyncStream[ChatCompletionChunk]]: - - # if 'stream' in kwargs and kwargs['stream'] == True: - # final_responses = [] - # response = await self.openai_client.chat.completions.create(**kwargs) - # async for chunk in response: - # finalResponse = {} - # finalResponse['id'] = chunk.id - # finalResponse['object'] = chunk.object - # finalResponse['created'] = chunk.created - # finalResponse['model'] = chunk.model - # finalResponse['choices'] = - # [{'index': chunk.choices[0].index, - # 'delta': { - # 'role': chunk.choices[0].delta.role, - # 'content': chunk.choices[0].delta.content, - # 'tool_calls': chunk.choices[0].delta.tool_calls }, - # 'logprobs': chunk.choices[0].logprobs, - # 'finish_reason': chunk.choices[0].finish_reason}] - # finalResponse['system_fingerprint'] = chunk.system_fingerprint - # final_responses.append(finalResponse) - # return final_responses - # elif 'stream' in kwargs and kwargs['stream'] == False: - # response = await self.openai_client.with_raw_response. - # chat.completions.create( - # **kwargs) - # response = response.text - # return json.loads(response) - # else: - # response = await self.openai_client.with_raw_response. - # chat.completions.create( - # **kwargs) - # response = response.text - # return json.loads(response) - def _get_config_string(self, config: Union[Mapping, str]) -> str: return config if isinstance(config, str) else json.dumps(config) diff --git a/portkey_ai/api_resources/apis/complete.py b/portkey_ai/api_resources/apis/complete.py index 6735562a..588460a1 100644 --- a/portkey_ai/api_resources/apis/complete.py +++ b/portkey_ai/api_resources/apis/complete.py @@ -51,80 +51,6 @@ def create( else: return self.normal_create(**kwargs) - # @overload - # def create( - # self, - # *, - # prompt: Optional[str] = None, - # stream: Literal[True], - # temperature: Optional[float] = None, - # max_tokens: Optional[int] = None, - # top_k: Optional[int] = None, - # top_p: Optional[float] = None, - # **kwargs, - # ) -> Stream[TextCompletionChunk]: - # ... - - # @overload - # def create( - # self, - # *, - # prompt: Optional[str] = None, - # stream: Literal[False] = False, - # temperature: Optional[float] = None, - # max_tokens: Optional[int] = None, - # top_k: Optional[int] = None, - # top_p: Optional[float] = None, - # **kwargs, - # ) -> TextCompletion: - # ... - - # @overload - # def create( - # self, - # *, - # prompt: Optional[str] = None, - # stream: bool = False, - # temperature: Optional[float] = None, - # max_tokens: Optional[int] = None, - # top_k: Optional[int] = None, - # top_p: Optional[float] = None, - # **kwargs, - # ) -> Union[TextCompletion, Stream[TextCompletionChunk]]: - # ... - - # def create( - # self, - # **kwargs, - # ) -> Union[TextCompletion, Stream[TextCompletionChunk]]: - - # if 'stream' in kwargs and kwargs['stream'] == True: - # final_responses = [] - # response = self.openai_client.completions.create(**kwargs) - # for chunk in response: - # finalResponse = {} - # finalResponse['id'] = chunk.id - # finalResponse['object'] = chunk.object - # finalResponse['created'] = chunk.created - # finalResponse['model'] = chunk.model - # finalResponse['choices'] = - # [{'index': chunk.choices[0].index, - # 'text': chunk.choices[0].text, - # 'logprobs': chunk.choices[0].logprobs, - # 'finish_reason': chunk.choices[0].finish_reason}] - # final_responses.append(finalResponse) - # return final_responses - # elif 'stream' in kwargs and kwargs['stream'] == False: - # response = self.openai_client.with_raw_response. - # completions.create(**kwargs) - # response = response - # return json.loads(response) - # else: - # response = self.openai_client.with_raw_response. - # completions.create(**kwargs) - # response = response.text - # return json.loads(response) - class AsyncCompletion(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: @@ -168,77 +94,3 @@ async def create( return await self.normal_create(**kwargs) else: return await self.normal_create(**kwargs) - - # @overload - # async def create( - # self, - # *, - # prompt: Optional[str] = None, - # stream: Literal[True], - # temperature: Optional[float] = None, - # max_tokens: Optional[int] = None, - # top_k: Optional[int] = None, - # top_p: Optional[float] = None, - # **kwargs, - # ) -> AsyncStream[TextCompletionChunk]: - # ... - - # @overload - # async def create( - # self, - # *, - # prompt: Optional[str] = None, - # stream: Literal[False] = False, - # temperature: Optional[float] = None, - # max_tokens: Optional[int] = None, - # top_k: Optional[int] = None, - # top_p: Optional[float] = None, - # **kwargs, - # ) -> TextCompletion: - # ... - - # @overload - # async def create( - # self, - # *, - # prompt: Optional[str] = None, - # stream: bool = False, - # temperature: Optional[float] = None, - # max_tokens: Optional[int] = None, - # top_k: Optional[int] = None, - # top_p: Optional[float] = None, - # **kwargs, - # ) -> Union[TextCompletion, AsyncStream[TextCompletionChunk]]: - # ... - - # async def create( - # self, - # **kwargs, - # ) -> Union[TextCompletion, AsyncStream[TextCompletionChunk]]: - - # if 'stream' in kwargs and kwargs['stream'] == True: - # final_responses = [] - # response = await self.openai_client.completions.create(**kwargs) - # async for chunk in response: - # finalResponse = {} - # finalResponse['id'] = chunk.id - # finalResponse['object'] = chunk.object - # finalResponse['created'] = chunk.created - # finalResponse['model'] = chunk.model - # finalResponse['choices'] = - # [{'index': chunk.choices[0].index, - # 'text': chunk.choices[0].text, - # 'logprobs': chunk.choices[0].logprobs, - # 'finish_reason': chunk.choices[0].finish_reason}] - # final_responses.append(finalResponse) - # return final_responses - # elif 'stream' in kwargs and kwargs['stream'] == False: - # response = await self.openai_client.with_raw_response. - # completions.create(**kwargs) - # response = response.text - # return json.loads(response) - # else: - # response = await self.openai_client.with_raw_response. - # completions.create(**kwargs) - # response = response.text - # return json.loads(response) From b18f0a19b65fe45a2c15f22bfddb4611f53b3c01 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Sat, 2 Mar 2024 14:13:10 +0530 Subject: [PATCH 08/62] fix: linting fixed --- portkey_ai/api_resources/apis/chat_complete.py | 1 - 1 file changed, 1 deletion(-) diff --git a/portkey_ai/api_resources/apis/chat_complete.py b/portkey_ai/api_resources/apis/chat_complete.py index d529a649..bda4d02b 100644 --- a/portkey_ai/api_resources/apis/chat_complete.py +++ b/portkey_ai/api_resources/apis/chat_complete.py @@ -79,7 +79,6 @@ def create( return self.normal_create(**kwargs) - class AsyncCompletions(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) From 70d76628a752f5e1955b347edce8c37d8e37698c Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Tue, 5 Mar 2024 21:18:24 +0530 Subject: [PATCH 09/62] feat: added models method + versioning for openai sdk --- portkey_ai/__init__.py | 8 +++-- portkey_ai/api_resources/__init__.py | 4 +++ portkey_ai/api_resources/apis/__init__.py | 3 ++ portkey_ai/api_resources/apis/models.py | 38 ++++++++++++++++++++ portkey_ai/api_resources/client.py | 10 ++++-- portkey_ai/api_resources/global_constants.py | 3 +- setup.cfg | 2 +- 7 files changed, 60 insertions(+), 8 deletions(-) create mode 100644 portkey_ai/api_resources/apis/models.py diff --git a/portkey_ai/__init__.py b/portkey_ai/__init__.py index f2d92ebd..6bd807a5 100644 --- a/portkey_ai/__init__.py +++ b/portkey_ai/__init__.py @@ -36,6 +36,8 @@ AsyncMessages, MainFiles, AsyncMainFiles, + Models, + AsyncModels, ThreadFiles, AsyncThreadFiles, AssistantFiles, @@ -48,14 +50,14 @@ from portkey_ai.version import VERSION from portkey_ai.api_resources.global_constants import ( - PORTKEY_DEV_BASE_URL, + PORTKEY_BASE_URL, PORTKEY_API_KEY_ENV, PORTKEY_PROXY_ENV, PORTKEY_GATEWAY_URL, ) api_key = os.environ.get(PORTKEY_API_KEY_ENV) -base_url = os.environ.get(PORTKEY_PROXY_ENV, PORTKEY_DEV_BASE_URL) +base_url = os.environ.get(PORTKEY_PROXY_ENV, PORTKEY_BASE_URL) config: Optional[Union[Mapping, str]] = None mode: Optional[Union[Modes, ModesLiteral]] = None @@ -99,6 +101,8 @@ "AsyncMessages", "MainFiles", "AsyncMainFiles", + "Models", + "AsyncModels" "ThreadFiles", "AsyncThreadFiles", "AssistantFiles", diff --git a/portkey_ai/api_resources/__init__.py b/portkey_ai/api_resources/__init__.py index 05260264..d4c19e0a 100644 --- a/portkey_ai/api_resources/__init__.py +++ b/portkey_ai/api_resources/__init__.py @@ -21,6 +21,8 @@ AsyncMessages, MainFiles, AsyncMainFiles, + Models, + AsyncModels, ThreadFiles, AsyncThreadFiles, AssistantFiles, @@ -93,6 +95,8 @@ "AsyncMessages", "MainFiles", "AsyncMainFiles", + "Models", + "AsyncModels", "ThreadFiles", "AsyncThreadFiles", "AssistantFiles", diff --git a/portkey_ai/api_resources/apis/__init__.py b/portkey_ai/api_resources/apis/__init__.py index 5b4c2c4c..ebdf0fb0 100644 --- a/portkey_ai/api_resources/apis/__init__.py +++ b/portkey_ai/api_resources/apis/__init__.py @@ -20,6 +20,7 @@ AsyncSteps, ) from .mainFiles import MainFiles, AsyncMainFiles +from .models import Models, AsyncModels __all__ = [ "Completion", @@ -43,6 +44,8 @@ "AsyncAssistants", "MainFiles", "AsyncMainFiles", + "Models", + "AsyncModels", "AssistantFiles", "ThreadFiles", "AsyncAssistantFiles", diff --git a/portkey_ai/api_resources/apis/models.py b/portkey_ai/api_resources/apis/models.py new file mode 100644 index 00000000..c7cc0942 --- /dev/null +++ b/portkey_ai/api_resources/apis/models.py @@ -0,0 +1,38 @@ +from typing import Any +from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource +from portkey_ai.api_resources.client import AsyncPortkey, Portkey + + +class Models(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + def list(self, **kwargs) -> Any: + response = self.openai_client.models.list(**kwargs) + return response + + def retrieve(self, model, **kwargs) -> Any: + response = self.openai_client.models.retrieve(model=model, **kwargs) + return response + + def delete(self, model, **kwargs) -> Any: + response = self.openai_client.models.delete(model=model, **kwargs) + return response + +class AsyncModels(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + async def list(self, **kwargs) -> Any: + response = await self.openai_client.models.list(**kwargs) + return response + + async def retrieve(self, model, **kwargs) -> Any: + response = await self.openai_client.models.retrieve(model=model, **kwargs) + return response + + async def delete(self, model, **kwargs) -> Any: + response = await self.openai_client.models.delete(model=model, **kwargs) + return response \ No newline at end of file diff --git a/portkey_ai/api_resources/client.py b/portkey_ai/api_resources/client.py index d9fe9eee..98f30c9b 100644 --- a/portkey_ai/api_resources/client.py +++ b/portkey_ai/api_resources/client.py @@ -7,7 +7,7 @@ from openai import AsyncOpenAI, OpenAI from portkey_ai.api_resources.global_constants import ( OPEN_AI_API_KEY, - PORTKEY_DEV_BASE_URL, + PORTKEY_BASE_URL, ) @@ -19,6 +19,7 @@ class Portkey(APIClient): embeddings: apis.Embeddings images: apis.Images files: apis.MainFiles + models: apis.Models class beta: def __init__(self, client: Portkey) -> None: @@ -50,7 +51,7 @@ def __init__( self.openai_client = OpenAI( api_key=OPEN_AI_API_KEY, - base_url=PORTKEY_DEV_BASE_URL, + base_url=PORTKEY_BASE_URL, default_headers=self.custom_headers, ) @@ -62,6 +63,7 @@ def __init__( self.feedback = apis.Feedback(self) self.images = apis.Images(self) self.files = apis.MainFiles(self) + self.models = apis.Models(self) self.beta = self.beta(self) # type: ignore def copy( @@ -102,6 +104,7 @@ class AsyncPortkey(AsyncAPIClient): embeddings: apis.AsyncEmbeddings images: apis.AsyncImages files: apis.AsyncMainFiles + models: apis.AsyncModels class beta: def __init__(self, client: AsyncPortkey) -> None: @@ -133,7 +136,7 @@ def __init__( self.openai_client = AsyncOpenAI( api_key=OPEN_AI_API_KEY, - base_url=PORTKEY_DEV_BASE_URL, + base_url=PORTKEY_BASE_URL, default_headers=self.custom_headers, ) @@ -145,6 +148,7 @@ def __init__( self.feedback = apis.AsyncFeedback(self) self.images = apis.AsyncImages(self) self.files = apis.AsyncMainFiles(self) + self.models = apis.AsyncModels(self) self.beta = self.beta(self) # type: ignore def copy( diff --git a/portkey_ai/api_resources/global_constants.py b/portkey_ai/api_resources/global_constants.py index 4d2d9e77..7556fb18 100644 --- a/portkey_ai/api_resources/global_constants.py +++ b/portkey_ai/api_resources/global_constants.py @@ -30,8 +30,7 @@ DEFAULT_TIMEOUT = 60 PORTKEY_HEADER_PREFIX = "x-portkey-" PORTKEY_BASE_URL = "https://api.portkey.ai/v1" -PORTKEY_DEV_BASE_URL = "https://api.portkeydev.com/v1" -PORTKEY_GATEWAY_URL = PORTKEY_DEV_BASE_URL +PORTKEY_GATEWAY_URL = PORTKEY_BASE_URL PORTKEY_API_KEY_ENV = "PORTKEY_API_KEY" PORTKEY_PROXY_ENV = "PORTKEY_PROXY" OPEN_AI_API_KEY = "DUMMY-KEY" diff --git a/setup.cfg b/setup.cfg index 2fdb60e0..94185fca 100644 --- a/setup.cfg +++ b/setup.cfg @@ -42,7 +42,7 @@ dev = python-dotenv==1.0.0 ruff==0.0.292 pytest-asyncio==0.23.5 - openai==1.12.0 + openai>=1.12.0,<1.12.9 [mypy] ignore_missing_imports = true From 2cec8e1070354a177d4780f09f4915a81eba7dd3 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Tue, 5 Mar 2024 21:38:33 +0530 Subject: [PATCH 10/62] fix: linting issues --- portkey_ai/__init__.py | 2 +- portkey_ai/api_resources/apis/models.py | 5 +++-- portkey_ai/api_resources/global_constants.py | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/portkey_ai/__init__.py b/portkey_ai/__init__.py index 6bd807a5..2ed8ac2a 100644 --- a/portkey_ai/__init__.py +++ b/portkey_ai/__init__.py @@ -102,7 +102,7 @@ "MainFiles", "AsyncMainFiles", "Models", - "AsyncModels" + "AsyncModels", "ThreadFiles", "AsyncThreadFiles", "AssistantFiles", diff --git a/portkey_ai/api_resources/apis/models.py b/portkey_ai/api_resources/apis/models.py index c7cc0942..1f33ef3e 100644 --- a/portkey_ai/api_resources/apis/models.py +++ b/portkey_ai/api_resources/apis/models.py @@ -19,7 +19,8 @@ def retrieve(self, model, **kwargs) -> Any: def delete(self, model, **kwargs) -> Any: response = self.openai_client.models.delete(model=model, **kwargs) return response - + + class AsyncModels(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) @@ -35,4 +36,4 @@ async def retrieve(self, model, **kwargs) -> Any: async def delete(self, model, **kwargs) -> Any: response = await self.openai_client.models.delete(model=model, **kwargs) - return response \ No newline at end of file + return response diff --git a/portkey_ai/api_resources/global_constants.py b/portkey_ai/api_resources/global_constants.py index 7556fb18..ad6fdf15 100644 --- a/portkey_ai/api_resources/global_constants.py +++ b/portkey_ai/api_resources/global_constants.py @@ -30,7 +30,7 @@ DEFAULT_TIMEOUT = 60 PORTKEY_HEADER_PREFIX = "x-portkey-" PORTKEY_BASE_URL = "https://api.portkey.ai/v1" -PORTKEY_GATEWAY_URL = PORTKEY_BASE_URL +PORTKEY_GATEWAY_URL = PORTKEY_BASE_URL PORTKEY_API_KEY_ENV = "PORTKEY_API_KEY" PORTKEY_PROXY_ENV = "PORTKEY_PROXY" OPEN_AI_API_KEY = "DUMMY-KEY" From ac61c7d236b0cfa6bdc3bee99a75a7aac5f4b794 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Thu, 7 Mar 2024 14:10:37 +0530 Subject: [PATCH 11/62] feat: suggestion for DevEx --- .../api_resources/apis/chat_complete.py | 37 +++++++++++------- portkey_ai/api_resources/apis/complete.py | 38 +++++++++++-------- portkey_ai/api_resources/apis/embeddings.py | 10 +++-- portkey_ai/api_resources/apis/images.py | 28 ++++++++------ 4 files changed, 68 insertions(+), 45 deletions(-) diff --git a/portkey_ai/api_resources/apis/chat_complete.py b/portkey_ai/api_resources/apis/chat_complete.py index bda4d02b..95117e80 100644 --- a/portkey_ai/api_resources/apis/chat_complete.py +++ b/portkey_ai/api_resources/apis/chat_complete.py @@ -2,9 +2,12 @@ import json from typing import ( + Any, AsyncIterator, + Iterable, Iterator, Mapping, + Optional, Union, ) from portkey_ai.api_resources.client import AsyncPortkey, Portkey @@ -41,10 +44,10 @@ def __init__(self, client: Portkey) -> None: self.openai_client = client.openai_client def stream_create( - self, **kwargs + self, model, messages, **kwargs ) -> Union[ChatCompletions, Iterator[ChatCompletionChunk]]: with self.openai_client.with_streaming_response.chat.completions.create( - **kwargs + model=model, messages=messages, **kwargs ) as response: for line in response.iter_lines(): json_string = line.replace("data: ", "") @@ -60,23 +63,26 @@ def stream_create( else: return "" - def normal_create(self, **kwargs) -> ChatCompletions: + def normal_create(self, model, messages, **kwargs) -> ChatCompletions: response = self.openai_client.with_raw_response.chat.completions.create( - **kwargs + model=model, messages=messages, **kwargs ) json_response = json.loads(response.text) return ChatCompletions(**json_response) def create( self, + *, + model: Optional[str] = None, + messages: Iterable[Any], **kwargs, ) -> ChatCompletions: if "stream" in kwargs and kwargs["stream"] is True: - return self.stream_create(**kwargs) # type: ignore + return self.stream_create(model=model, messages=messages, **kwargs) # type: ignore elif "stream" in kwargs and kwargs["stream"] is False: - return self.normal_create(**kwargs) + return self.normal_create(model=model, messages=messages, **kwargs) else: - return self.normal_create(**kwargs) + return self.normal_create(model=model, messages=messages, **kwargs) class AsyncCompletions(AsyncAPIResource): @@ -85,10 +91,10 @@ def __init__(self, client: AsyncPortkey) -> None: self.openai_client = client.openai_client async def stream_create( - self, **kwargs + self, model, messages, **kwargs ) -> Union[ChatCompletions, AsyncIterator[ChatCompletionChunk]]: # type: ignore async with self.openai_client.with_streaming_response.chat.completions.create( - **kwargs + model=model, messages=messages, **kwargs ) as response: async for line in response.iter_lines(): json_string = line.replace("data: ", "") @@ -104,23 +110,26 @@ async def stream_create( else: pass - async def normal_create(self, **kwargs) -> ChatCompletions: + async def normal_create(self, model, messages, **kwargs) -> ChatCompletions: response = await self.openai_client.with_raw_response.chat.completions.create( - **kwargs + model=model, messages=messages, **kwargs ) json_response = json.loads(response.text) return ChatCompletions(**json_response) async def create( self, + *, + model: Optional[str] = None, + messages: Iterable[Any], **kwargs, ) -> ChatCompletions: if "stream" in kwargs and kwargs["stream"] is True: - return self.stream_create(**kwargs) # type: ignore + return self.stream_create(model=model, messages=messages, **kwargs) # type: ignore elif "stream" in kwargs and kwargs["stream"] is False: - return await self.normal_create(**kwargs) + return await self.normal_create(model=model, messages=messages, **kwargs) else: - return await self.normal_create(**kwargs) + return await self.normal_create(model=model, messages=messages, **kwargs) def _get_config_string(self, config: Union[Mapping, str]) -> str: return config if isinstance(config, str) else json.dumps(config) diff --git a/portkey_ai/api_resources/apis/complete.py b/portkey_ai/api_resources/apis/complete.py index 588460a1..e58bf175 100644 --- a/portkey_ai/api_resources/apis/complete.py +++ b/portkey_ai/api_resources/apis/complete.py @@ -1,5 +1,5 @@ import json -from typing import Any, AsyncIterator, Iterator, Union +from typing import Any, AsyncIterator, Iterator, Optional, Union from portkey_ai.api_resources.client import AsyncPortkey, Portkey from portkey_ai.api_resources.utils import ( TextCompletion, @@ -16,10 +16,10 @@ def __init__(self, client: Portkey) -> None: self.client = client def stream_create( - self, **kwargs + self, model, prompt, **kwargs ) -> Union[TextCompletion, Iterator[TextCompletionChunk]]: with self.openai_client.with_streaming_response.completions.create( - **kwargs + model=model, prompt=prompt, **kwargs ) as response: for line in response.iter_lines(): json_string = line.replace("data: ", "") @@ -35,21 +35,26 @@ def stream_create( else: return "" - def normal_create(self, **kwargs) -> TextCompletion: - response = self.openai_client.with_raw_response.completions.create(**kwargs) + def normal_create(self, model, prompt, **kwargs) -> TextCompletion: + response = self.openai_client.with_raw_response.completions.create( + model=model, prompt=prompt, **kwargs + ) json_response = json.loads(response.text) return TextCompletion(**json_response) def create( self, + *, + model: Optional[str] = None, + prompt: Optional[str] = None, **kwargs, ) -> TextCompletion: if "stream" in kwargs and kwargs["stream"] is True: - return self.stream_create(**kwargs) # type: ignore + return self.stream_create(model=model, prompt=prompt, **kwargs) # type: ignore elif "stream" in kwargs and kwargs["stream"] is False: - return self.normal_create(**kwargs) + return self.normal_create(model=model, prompt=prompt, **kwargs) else: - return self.normal_create(**kwargs) + return self.normal_create(model=model, prompt=prompt, **kwargs) class AsyncCompletion(AsyncAPIResource): @@ -58,10 +63,10 @@ def __init__(self, client: AsyncPortkey) -> None: self.openai_client = client.openai_client async def stream_create( - self, **kwargs + self, model, prompt, **kwargs ) -> Union[TextCompletion, AsyncIterator[TextCompletionChunk]]: async with self.openai_client.with_streaming_response.completions.create( - **kwargs + model=model, prompt=prompt, **kwargs ) as response: async for line in response.iter_lines(): json_string = line.replace("data: ", "") @@ -77,20 +82,23 @@ async def stream_create( else: pass - async def normal_create(self, **kwargs) -> TextCompletion: + async def normal_create(self, model, prompt, **kwargs) -> TextCompletion: response = await self.openai_client.with_raw_response.completions.create( - **kwargs + model=model, prompt=prompt, **kwargs ) json_response = json.loads(response.text) return TextCompletion(**json_response) async def create( self, + *, + model: Optional[str] = None, + prompt: Optional[str] = None, **kwargs, ) -> Any: if "stream" in kwargs and kwargs["stream"] is True: - return self.stream_create(**kwargs) # type: ignore + return self.stream_create(model=model, prompt=prompt, **kwargs) # type: ignore elif "stream" in kwargs and kwargs["stream"] is False: - return await self.normal_create(**kwargs) + return await self.normal_create(model=model, prompt=prompt, **kwargs) else: - return await self.normal_create(**kwargs) + return await self.normal_create(model=model, prompt=prompt, **kwargs) diff --git a/portkey_ai/api_resources/apis/embeddings.py b/portkey_ai/api_resources/apis/embeddings.py index 5834e004..38cfa5c8 100644 --- a/portkey_ai/api_resources/apis/embeddings.py +++ b/portkey_ai/api_resources/apis/embeddings.py @@ -9,8 +9,10 @@ def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client - def create(self, **kwargs) -> GenericResponse: - response = self.openai_client.with_raw_response.embeddings.create(**kwargs) + def create(self, *, input: str, model: str, **kwargs) -> GenericResponse: + response = self.openai_client.with_raw_response.embeddings.create( + input=input, model=model, **kwargs + ) response_text = response.text return json.loads(response_text) @@ -20,9 +22,9 @@ def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - async def create(self, **kwargs) -> GenericResponse: + async def create(self, *, input: str, model: str, **kwargs) -> GenericResponse: response = await self.openai_client.with_raw_response.embeddings.create( - **kwargs + input=input, model=model, **kwargs ) response_text = response.text return json.loads(response_text) diff --git a/portkey_ai/api_resources/apis/images.py b/portkey_ai/api_resources/apis/images.py index 2df4737c..e74ab69c 100644 --- a/portkey_ai/api_resources/apis/images.py +++ b/portkey_ai/api_resources/apis/images.py @@ -8,16 +8,16 @@ def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client - def generate(self, **kwargs) -> Any: - response = self.openai_client.images.generate(**kwargs) + def generate(self, prompt: str, **kwargs) -> Any: + response = self.openai_client.images.generate(prompt=prompt, **kwargs) return response - def edit(self, **kwargs) -> Any: - response = self.openai_client.images.edit(**kwargs) + def edit(self, prompt: str, image, **kwargs) -> Any: + response = self.openai_client.images.edit(prompt=prompt, image=image, **kwargs) return response - def create_variation(self, **kwargs) -> Any: - response = self.openai_client.images.create_variation(**kwargs) + def create_variation(self, image, **kwargs) -> Any: + response = self.openai_client.images.create_variation(image=image, **kwargs) return response @@ -26,14 +26,18 @@ def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - async def generate(self, **kwargs) -> Any: - response = await self.openai_client.images.generate(**kwargs) + async def generate(self, prompt: str, **kwargs) -> Any: + response = await self.openai_client.images.generate(prompt=prompt, **kwargs) return response - async def edit(self, **kwargs) -> Any: - response = await self.openai_client.images.edit(**kwargs) + async def edit(self, prompt: str, image, **kwargs) -> Any: + response = await self.openai_client.images.edit( + prompt=prompt, image=image, **kwargs + ) return response - async def create_variation(self, **kwargs) -> Any: - response = await self.openai_client.images.create_variation(**kwargs) + async def create_variation(self, image, **kwargs) -> Any: + response = await self.openai_client.images.create_variation( + image=image, **kwargs + ) return response From 7c41ac9ecda333d598f6eb9287d4894e93ce8c32 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Sat, 9 Mar 2024 08:03:40 +0530 Subject: [PATCH 12/62] fix: removed image test models fro anyscale tests --- tests/models.json | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tests/models.json b/tests/models.json index dc433493..4441f43a 100644 --- a/tests/models.json +++ b/tests/models.json @@ -39,10 +39,7 @@ "codellama/CodeLlama-34b-Instruct-hf", "mistralai/Mistral-7B-Instruct-v0.1" ], - "image":[ - "dall-e-3", - "dall-e-2" - ] + "image":[] }, "anthropic": { "env_variable": "ANTHROPIC_API_KEY", From f7eb4c564b04f37dc125049becc38b636ad3551a Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Sat, 9 Mar 2024 08:05:33 +0530 Subject: [PATCH 13/62] fix: removed image test models from cohere, anthropic tests --- tests/models.json | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/models.json b/tests/models.json index 4441f43a..f63a806e 100644 --- a/tests/models.json +++ b/tests/models.json @@ -66,8 +66,7 @@ "claude-instant-1.0", "claude-2" ], - "image":["dall-e-3", - "dall-e-2"] + "image":[] }, "cohere": { "env_variable": "COHERE_API_KEY", @@ -85,7 +84,6 @@ "embed-multilingual-light-v3.0" ], "text": [], - "image":["dall-e-3", - "dall-e-2"] + "image":[] } } \ No newline at end of file From efa01099384d871b185bf1b39cc69f94c1a6977f Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Sat, 9 Mar 2024 08:12:03 +0530 Subject: [PATCH 14/62] fix: removed unsupported providers test cases --- .../anthropic_n_openai.json | 28 ------------------- .../anyscale_n_openai.json | 28 ------------------- .../azure_n_openai.json | 28 ------------------- .../cohere_n_openai.json | 27 ------------------ .../loadbalance_with_two_apikeys.json | 15 ---------- .../anthropic_n_openai.json | 28 ------------------- .../anyscale_n_openai.json | 28 ------------------- .../azure_n_openai.json | 28 ------------------- .../cohere_n_openai.json | 27 ------------------ .../loadbalance_with_two_apikeys.json | 15 ---------- .../anthropic_n_openai.json | 28 ------------------- .../anyscale_n_openai.json | 28 ------------------- .../azure_n_openai.json | 28 ------------------- .../cohere_n_openai.json | 27 ------------------ .../loadbalance_with_two_apikeys.json | 15 ---------- 15 files changed, 378 deletions(-) delete mode 100644 tests/configs/assistants/loadbalance_and_fallback/anthropic_n_openai.json delete mode 100644 tests/configs/assistants/loadbalance_and_fallback/anyscale_n_openai.json delete mode 100644 tests/configs/assistants/loadbalance_and_fallback/azure_n_openai.json delete mode 100644 tests/configs/assistants/loadbalance_and_fallback/cohere_n_openai.json delete mode 100644 tests/configs/assistants/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json delete mode 100644 tests/configs/images/loadbalance_and_fallback/anthropic_n_openai.json delete mode 100644 tests/configs/images/loadbalance_and_fallback/anyscale_n_openai.json delete mode 100644 tests/configs/images/loadbalance_and_fallback/azure_n_openai.json delete mode 100644 tests/configs/images/loadbalance_and_fallback/cohere_n_openai.json delete mode 100644 tests/configs/images/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json delete mode 100644 tests/configs/threads/loadbalance_and_fallback/anthropic_n_openai.json delete mode 100644 tests/configs/threads/loadbalance_and_fallback/anyscale_n_openai.json delete mode 100644 tests/configs/threads/loadbalance_and_fallback/azure_n_openai.json delete mode 100644 tests/configs/threads/loadbalance_and_fallback/cohere_n_openai.json delete mode 100644 tests/configs/threads/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json diff --git a/tests/configs/assistants/loadbalance_and_fallback/anthropic_n_openai.json b/tests/configs/assistants/loadbalance_and_fallback/anthropic_n_openai.json deleted file mode 100644 index 2c5c4a25..00000000 --- a/tests/configs/assistants/loadbalance_and_fallback/anthropic_n_openai.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "strategy": { - "mode": "loadbalance" - }, - "targets": [ - { - "provider": "openai", - "virtual_key": "openai-virtual-key" - }, - { - "strategy": { - "mode": "fallback", - "on_status_codes": [ - 429, - 241 - ] - }, - "targets": [ - { - "virtual_key": "anthropic-virtual-key" - }, - { - "virtual_key": "openai-virtual-key" - } - ] - } - ] -} \ No newline at end of file diff --git a/tests/configs/assistants/loadbalance_and_fallback/anyscale_n_openai.json b/tests/configs/assistants/loadbalance_and_fallback/anyscale_n_openai.json deleted file mode 100644 index 2c90ddac..00000000 --- a/tests/configs/assistants/loadbalance_and_fallback/anyscale_n_openai.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "strategy": { - "mode": "loadbalance" - }, - "targets": [ - { - "provider": "openai", - "virtual_key": "openai-virtual-key" - }, - { - "strategy": { - "mode": "fallback", - "on_status_codes": [ - 429, - 241 - ] - }, - "targets": [ - { - "virtual_key": "anyscale-virtual-key" - }, - { - "virtual_key": "openai-virtual-key" - } - ] - } - ] -} \ No newline at end of file diff --git a/tests/configs/assistants/loadbalance_and_fallback/azure_n_openai.json b/tests/configs/assistants/loadbalance_and_fallback/azure_n_openai.json deleted file mode 100644 index 440c2591..00000000 --- a/tests/configs/assistants/loadbalance_and_fallback/azure_n_openai.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "strategy": { - "mode": "loadbalance" - }, - "targets": [ - { - "provider": "openai", - "virtual_key": "openai-virtual-key" - }, - { - "strategy": { - "mode": "fallback", - "on_status_codes": [ - 429, - 241 - ] - }, - "targets": [ - { - "virtual_key": "azure-virtual-key" - }, - { - "virtual_key": "openai-virtual-key" - } - ] - } - ] -} \ No newline at end of file diff --git a/tests/configs/assistants/loadbalance_and_fallback/cohere_n_openai.json b/tests/configs/assistants/loadbalance_and_fallback/cohere_n_openai.json deleted file mode 100644 index 1e697928..00000000 --- a/tests/configs/assistants/loadbalance_and_fallback/cohere_n_openai.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "strategy": { - "mode": "loadbalance" - }, - "targets": [ - { - "virtual_key": "openai-virtual-key" - }, - { - "strategy": { - "mode": "fallback", - "on_status_codes": [ - 429, - 241 - ] - }, - "targets": [ - { - "virtual_key": "cohere-virtual-key" - }, - { - "virtual_key": "openai-virtual-key" - } - ] - } - ] -} \ No newline at end of file diff --git a/tests/configs/assistants/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json b/tests/configs/assistants/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json deleted file mode 100644 index 06973872..00000000 --- a/tests/configs/assistants/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "strategy": { - "mode": "loadbalance" - }, - "targets": [ - { - "provider": "openai", - "virtual_key": "openai-virtual-key" - }, - { - "provider": "anthropic", - "virtual_key": "anthropic-virtual-key" - } - ] -} \ No newline at end of file diff --git a/tests/configs/images/loadbalance_and_fallback/anthropic_n_openai.json b/tests/configs/images/loadbalance_and_fallback/anthropic_n_openai.json deleted file mode 100644 index 2c5c4a25..00000000 --- a/tests/configs/images/loadbalance_and_fallback/anthropic_n_openai.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "strategy": { - "mode": "loadbalance" - }, - "targets": [ - { - "provider": "openai", - "virtual_key": "openai-virtual-key" - }, - { - "strategy": { - "mode": "fallback", - "on_status_codes": [ - 429, - 241 - ] - }, - "targets": [ - { - "virtual_key": "anthropic-virtual-key" - }, - { - "virtual_key": "openai-virtual-key" - } - ] - } - ] -} \ No newline at end of file diff --git a/tests/configs/images/loadbalance_and_fallback/anyscale_n_openai.json b/tests/configs/images/loadbalance_and_fallback/anyscale_n_openai.json deleted file mode 100644 index 2c90ddac..00000000 --- a/tests/configs/images/loadbalance_and_fallback/anyscale_n_openai.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "strategy": { - "mode": "loadbalance" - }, - "targets": [ - { - "provider": "openai", - "virtual_key": "openai-virtual-key" - }, - { - "strategy": { - "mode": "fallback", - "on_status_codes": [ - 429, - 241 - ] - }, - "targets": [ - { - "virtual_key": "anyscale-virtual-key" - }, - { - "virtual_key": "openai-virtual-key" - } - ] - } - ] -} \ No newline at end of file diff --git a/tests/configs/images/loadbalance_and_fallback/azure_n_openai.json b/tests/configs/images/loadbalance_and_fallback/azure_n_openai.json deleted file mode 100644 index 440c2591..00000000 --- a/tests/configs/images/loadbalance_and_fallback/azure_n_openai.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "strategy": { - "mode": "loadbalance" - }, - "targets": [ - { - "provider": "openai", - "virtual_key": "openai-virtual-key" - }, - { - "strategy": { - "mode": "fallback", - "on_status_codes": [ - 429, - 241 - ] - }, - "targets": [ - { - "virtual_key": "azure-virtual-key" - }, - { - "virtual_key": "openai-virtual-key" - } - ] - } - ] -} \ No newline at end of file diff --git a/tests/configs/images/loadbalance_and_fallback/cohere_n_openai.json b/tests/configs/images/loadbalance_and_fallback/cohere_n_openai.json deleted file mode 100644 index 1e697928..00000000 --- a/tests/configs/images/loadbalance_and_fallback/cohere_n_openai.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "strategy": { - "mode": "loadbalance" - }, - "targets": [ - { - "virtual_key": "openai-virtual-key" - }, - { - "strategy": { - "mode": "fallback", - "on_status_codes": [ - 429, - 241 - ] - }, - "targets": [ - { - "virtual_key": "cohere-virtual-key" - }, - { - "virtual_key": "openai-virtual-key" - } - ] - } - ] -} \ No newline at end of file diff --git a/tests/configs/images/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json b/tests/configs/images/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json deleted file mode 100644 index 06973872..00000000 --- a/tests/configs/images/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "strategy": { - "mode": "loadbalance" - }, - "targets": [ - { - "provider": "openai", - "virtual_key": "openai-virtual-key" - }, - { - "provider": "anthropic", - "virtual_key": "anthropic-virtual-key" - } - ] -} \ No newline at end of file diff --git a/tests/configs/threads/loadbalance_and_fallback/anthropic_n_openai.json b/tests/configs/threads/loadbalance_and_fallback/anthropic_n_openai.json deleted file mode 100644 index 2c5c4a25..00000000 --- a/tests/configs/threads/loadbalance_and_fallback/anthropic_n_openai.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "strategy": { - "mode": "loadbalance" - }, - "targets": [ - { - "provider": "openai", - "virtual_key": "openai-virtual-key" - }, - { - "strategy": { - "mode": "fallback", - "on_status_codes": [ - 429, - 241 - ] - }, - "targets": [ - { - "virtual_key": "anthropic-virtual-key" - }, - { - "virtual_key": "openai-virtual-key" - } - ] - } - ] -} \ No newline at end of file diff --git a/tests/configs/threads/loadbalance_and_fallback/anyscale_n_openai.json b/tests/configs/threads/loadbalance_and_fallback/anyscale_n_openai.json deleted file mode 100644 index 2c90ddac..00000000 --- a/tests/configs/threads/loadbalance_and_fallback/anyscale_n_openai.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "strategy": { - "mode": "loadbalance" - }, - "targets": [ - { - "provider": "openai", - "virtual_key": "openai-virtual-key" - }, - { - "strategy": { - "mode": "fallback", - "on_status_codes": [ - 429, - 241 - ] - }, - "targets": [ - { - "virtual_key": "anyscale-virtual-key" - }, - { - "virtual_key": "openai-virtual-key" - } - ] - } - ] -} \ No newline at end of file diff --git a/tests/configs/threads/loadbalance_and_fallback/azure_n_openai.json b/tests/configs/threads/loadbalance_and_fallback/azure_n_openai.json deleted file mode 100644 index 440c2591..00000000 --- a/tests/configs/threads/loadbalance_and_fallback/azure_n_openai.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "strategy": { - "mode": "loadbalance" - }, - "targets": [ - { - "provider": "openai", - "virtual_key": "openai-virtual-key" - }, - { - "strategy": { - "mode": "fallback", - "on_status_codes": [ - 429, - 241 - ] - }, - "targets": [ - { - "virtual_key": "azure-virtual-key" - }, - { - "virtual_key": "openai-virtual-key" - } - ] - } - ] -} \ No newline at end of file diff --git a/tests/configs/threads/loadbalance_and_fallback/cohere_n_openai.json b/tests/configs/threads/loadbalance_and_fallback/cohere_n_openai.json deleted file mode 100644 index 1e697928..00000000 --- a/tests/configs/threads/loadbalance_and_fallback/cohere_n_openai.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "strategy": { - "mode": "loadbalance" - }, - "targets": [ - { - "virtual_key": "openai-virtual-key" - }, - { - "strategy": { - "mode": "fallback", - "on_status_codes": [ - 429, - 241 - ] - }, - "targets": [ - { - "virtual_key": "cohere-virtual-key" - }, - { - "virtual_key": "openai-virtual-key" - } - ] - } - ] -} \ No newline at end of file diff --git a/tests/configs/threads/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json b/tests/configs/threads/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json deleted file mode 100644 index 06973872..00000000 --- a/tests/configs/threads/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "strategy": { - "mode": "loadbalance" - }, - "targets": [ - { - "provider": "openai", - "virtual_key": "openai-virtual-key" - }, - { - "provider": "anthropic", - "virtual_key": "anthropic-virtual-key" - } - ] -} \ No newline at end of file From 30eeda1f94e25dae263880a7323d9697323e2e10 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Sat, 9 Mar 2024 08:20:57 +0530 Subject: [PATCH 15/62] fix: removed comments --- tests/test_threads.py | 31 ------------------------------- 1 file changed, 31 deletions(-) diff --git a/tests/test_threads.py b/tests/test_threads.py index c65eb41a..5bc3e58b 100644 --- a/tests/test_threads.py +++ b/tests/test_threads.py @@ -38,37 +38,6 @@ def get_metadata(self): "random_id": str(uuid4()), } - # # -------------------------- - # # Test-1 - - # t1_params = [] - # t = [] - # for k, v in models.items(): - # for i in v["chat"]: - # t.append((client, k, os.environ.get(v["env_variable"]), i)) - - # t1_params.extend(t) - - # @pytest.mark.parametrize("client, provider, auth, model", t1_params) - # def test_method_single_with_vk_and_provider( - # self, client: Any, provider: str, auth: str, model - # ) -> None: - # metadata = self.get_metadata() - # portkey = client( - # base_url=base_url, - # api_key=api_key, - # provider=f"{provider}", - # Authorization=f"Bearer {auth}", - # trace_id=str(uuid4()), - # metadata=metadata, - # ) - # thread = - # portkey.beta.threads.retrieve(thread_id="thread_6dWkyyEFNNI8pQw8YEObrlna") - # print(thread) - - # -------------------------- - # Test-1 - t2_params = [] for i in get_configs(f"{CONFIGS_PATH}/single_with_basic_config"): t2_params.append((client, i)) From c59e17418618c95f7b47593a8fc341266fe5b354 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Sat, 9 Mar 2024 08:31:42 +0530 Subject: [PATCH 16/62] fix: following snake_case --- portkey_ai/api_resources/apis/__init__.py | 2 +- portkey_ai/api_resources/apis/{mainFiles.py => main_files.py} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename portkey_ai/api_resources/apis/{mainFiles.py => main_files.py} (100%) diff --git a/portkey_ai/api_resources/apis/__init__.py b/portkey_ai/api_resources/apis/__init__.py index ebdf0fb0..e064e3c6 100644 --- a/portkey_ai/api_resources/apis/__init__.py +++ b/portkey_ai/api_resources/apis/__init__.py @@ -19,7 +19,7 @@ AsyncRuns, AsyncSteps, ) -from .mainFiles import MainFiles, AsyncMainFiles +from .main_files import MainFiles, AsyncMainFiles from .models import Models, AsyncModels __all__ = [ diff --git a/portkey_ai/api_resources/apis/mainFiles.py b/portkey_ai/api_resources/apis/main_files.py similarity index 100% rename from portkey_ai/api_resources/apis/mainFiles.py rename to portkey_ai/api_resources/apis/main_files.py From e2d266c8da5df4b993398ccd540be244bc362cf7 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Sat, 9 Mar 2024 10:24:29 +0530 Subject: [PATCH 17/62] fix: support for openai 1.x.x till 1.13.0 --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 94185fca..3e32b70e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -42,7 +42,7 @@ dev = python-dotenv==1.0.0 ruff==0.0.292 pytest-asyncio==0.23.5 - openai>=1.12.0,<1.12.9 + openai>=1.0.0,<=1.13.0 [mypy] ignore_missing_imports = true From 38a68d9a4494ed5b6a1f4a54860df9b147517951 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Sat, 9 Mar 2024 10:57:57 +0530 Subject: [PATCH 18/62] fix: threads and assistants test cases fixed --- tests/test_assistants.py | 2 +- tests/test_threads.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_assistants.py b/tests/test_assistants.py index edd07d2e..ef7711fc 100644 --- a/tests/test_assistants.py +++ b/tests/test_assistants.py @@ -74,7 +74,7 @@ def test_method_single_with_vk_and_provider( for i in get_configs(f"{CONFIGS_PATH}/single_provider"): t3_params.append((client, i)) - @pytest.mark.parametrize("client, provider, auth, model", t3_params) + @pytest.mark.parametrize("client, config", t3_params) def test_method_all_params( self, client: Any, provider: str, auth: str, model ) -> None: diff --git a/tests/test_threads.py b/tests/test_threads.py index 5bc3e58b..a8a074f2 100644 --- a/tests/test_threads.py +++ b/tests/test_threads.py @@ -42,7 +42,7 @@ def get_metadata(self): for i in get_configs(f"{CONFIGS_PATH}/single_with_basic_config"): t2_params.append((client, i)) - @pytest.mark.parametrize("client, provider, auth, model", t2_params) + @pytest.mark.parametrize("client, config", t2_params) def test_method_single_with_vk_and_provider( self, client: Any, provider: str, auth: str, model ) -> None: From 354a10e9569d7c5b804123831329aa3cf844daa0 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Wed, 13 Mar 2024 13:35:35 +0530 Subject: [PATCH 19/62] feat: get_headers() for embeddings + x-portkey-headers for all the headers --- portkey_ai/api_resources/apis/embeddings.py | 13 +++++++++---- portkey_ai/api_resources/base_client.py | 13 +++++++------ portkey_ai/api_resources/client.py | 4 ++-- 3 files changed, 18 insertions(+), 12 deletions(-) diff --git a/portkey_ai/api_resources/apis/embeddings.py b/portkey_ai/api_resources/apis/embeddings.py index 38cfa5c8..fa88977d 100644 --- a/portkey_ai/api_resources/apis/embeddings.py +++ b/portkey_ai/api_resources/apis/embeddings.py @@ -13,8 +13,11 @@ def create(self, *, input: str, model: str, **kwargs) -> GenericResponse: response = self.openai_client.with_raw_response.embeddings.create( input=input, model=model, **kwargs ) - response_text = response.text - return json.loads(response_text) + + data = GenericResponse(**json.loads(response.text)) + data._headers = response.headers + + return data class AsyncEmbeddings(AsyncAPIResource): @@ -26,5 +29,7 @@ async def create(self, *, input: str, model: str, **kwargs) -> GenericResponse: response = await self.openai_client.with_raw_response.embeddings.create( input=input, model=model, **kwargs ) - response_text = response.text - return json.loads(response_text) + data = GenericResponse(**json.loads(response.text)) + data._headers = response.headers + + return data diff --git a/portkey_ai/api_resources/base_client.py b/portkey_ai/api_resources/base_client.py index b174e633..b40b4c98 100644 --- a/portkey_ai/api_resources/base_client.py +++ b/portkey_ai/api_resources/base_client.py @@ -74,6 +74,8 @@ def __init__( metadata=metadata, **kwargs, ) + + self.allHeaders = self._build_headers(Options.construct()) self._client = httpx.Client( base_url=self.base_url, headers={ @@ -224,13 +226,12 @@ def _default_headers(self) -> Mapping[str, str]: f"{PORTKEY_HEADER_PREFIX}runtime-version": platform.python_version(), } - def _build_headers(self, options: Options) -> httpx.Headers: + def _build_headers(self, options: Options) -> Dict[str, Any]: option_headers = options.headers or {} headers_dict = self._merge_mappings( self._default_headers, option_headers, self.custom_headers ) - headers = httpx.Headers(headers_dict) - return headers + return headers_dict def _merge_mappings( self, @@ -423,6 +424,7 @@ def __init__( **kwargs, ) + self.allHeaders = self._build_headers(Options.construct()) self._client = AsyncHttpxClientWrapper( base_url=self.base_url, headers={ @@ -573,13 +575,12 @@ def _default_headers(self) -> Mapping[str, str]: f"{PORTKEY_HEADER_PREFIX}runtime-version": platform.python_version(), } - def _build_headers(self, options: Options) -> httpx.Headers: + def _build_headers(self, options: Options) -> Dict[str, Any]: option_headers = options.headers or {} headers_dict = self._merge_mappings( self._default_headers, option_headers, self.custom_headers ) - headers = httpx.Headers(headers_dict) - return headers + return headers_dict def _merge_mappings( self, diff --git a/portkey_ai/api_resources/client.py b/portkey_ai/api_resources/client.py index 98f30c9b..de7ab879 100644 --- a/portkey_ai/api_resources/client.py +++ b/portkey_ai/api_resources/client.py @@ -52,7 +52,7 @@ def __init__( self.openai_client = OpenAI( api_key=OPEN_AI_API_KEY, base_url=PORTKEY_BASE_URL, - default_headers=self.custom_headers, + default_headers=self.allHeaders, ) self.completions = apis.Completion(self) @@ -137,7 +137,7 @@ def __init__( self.openai_client = AsyncOpenAI( api_key=OPEN_AI_API_KEY, base_url=PORTKEY_BASE_URL, - default_headers=self.custom_headers, + default_headers=self.allHeaders, ) self.completions = apis.AsyncCompletion(self) From ab20abb14ed66e17793767c4df4180d0d76753be Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Wed, 13 Mar 2024 13:54:02 +0530 Subject: [PATCH 20/62] feat: get_headers for complete and chat_complete --- portkey_ai/api_resources/apis/chat_complete.py | 10 ++++++---- portkey_ai/api_resources/apis/complete.py | 10 ++++++---- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/portkey_ai/api_resources/apis/chat_complete.py b/portkey_ai/api_resources/apis/chat_complete.py index 95117e80..931a1ecf 100644 --- a/portkey_ai/api_resources/apis/chat_complete.py +++ b/portkey_ai/api_resources/apis/chat_complete.py @@ -67,8 +67,9 @@ def normal_create(self, model, messages, **kwargs) -> ChatCompletions: response = self.openai_client.with_raw_response.chat.completions.create( model=model, messages=messages, **kwargs ) - json_response = json.loads(response.text) - return ChatCompletions(**json_response) + data = ChatCompletions(**json.loads(response.text)) + data._headers = response.headers + return data def create( self, @@ -114,8 +115,9 @@ async def normal_create(self, model, messages, **kwargs) -> ChatCompletions: response = await self.openai_client.with_raw_response.chat.completions.create( model=model, messages=messages, **kwargs ) - json_response = json.loads(response.text) - return ChatCompletions(**json_response) + data = ChatCompletions(**json.loads(response.text)) + data._headers = response.headers + return data async def create( self, diff --git a/portkey_ai/api_resources/apis/complete.py b/portkey_ai/api_resources/apis/complete.py index e58bf175..37aab280 100644 --- a/portkey_ai/api_resources/apis/complete.py +++ b/portkey_ai/api_resources/apis/complete.py @@ -39,8 +39,9 @@ def normal_create(self, model, prompt, **kwargs) -> TextCompletion: response = self.openai_client.with_raw_response.completions.create( model=model, prompt=prompt, **kwargs ) - json_response = json.loads(response.text) - return TextCompletion(**json_response) + data = TextCompletion(**json.loads(response.text)) + data._headers = response.headers + return data def create( self, @@ -86,8 +87,9 @@ async def normal_create(self, model, prompt, **kwargs) -> TextCompletion: response = await self.openai_client.with_raw_response.completions.create( model=model, prompt=prompt, **kwargs ) - json_response = json.loads(response.text) - return TextCompletion(**json_response) + data = TextCompletion(**json.loads(response.text)) + data._headers = response.headers + return data async def create( self, From 6d44dd0e74008aa5b19b34fe9b49f8dc6a6a2339 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Wed, 13 Mar 2024 14:16:40 +0530 Subject: [PATCH 21/62] feat: get_headers for images --- portkey_ai/api_resources/apis/images.py | 57 +++++++++++++++++-------- portkey_ai/api_resources/utils.py | 12 ++++++ 2 files changed, 51 insertions(+), 18 deletions(-) diff --git a/portkey_ai/api_resources/apis/images.py b/portkey_ai/api_resources/apis/images.py index e74ab69c..400866eb 100644 --- a/portkey_ai/api_resources/apis/images.py +++ b/portkey_ai/api_resources/apis/images.py @@ -1,6 +1,9 @@ +import json from typing import Any from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey +from portkey_ai.api_resources.utils import ImageResponse + class Images(APIResource): @@ -8,17 +11,26 @@ def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client - def generate(self, prompt: str, **kwargs) -> Any: - response = self.openai_client.images.generate(prompt=prompt, **kwargs) - return response + def generate(self, prompt: str, **kwargs) -> ImageResponse: + response = self.openai_client.with_raw_response.images.generate(prompt=prompt, **kwargs) + data = ImageResponse(**json.loads(response.text)) + data._headers = response.headers + + return data + + def edit(self, prompt: str, image, **kwargs) -> ImageResponse: + response = self.openai_client.with_raw_response.images.edit(prompt=prompt, image=image, **kwargs) + data = ImageResponse(**json.loads(response.text)) + data._headers = response.headers + + return data - def edit(self, prompt: str, image, **kwargs) -> Any: - response = self.openai_client.images.edit(prompt=prompt, image=image, **kwargs) - return response + def create_variation(self, image, **kwargs) -> ImageResponse: + response = self.openai_client.with_raw_response.images.create_variation(image=image, **kwargs) + data = ImageResponse(**json.loads(response.text)) + data._headers = response.headers - def create_variation(self, image, **kwargs) -> Any: - response = self.openai_client.images.create_variation(image=image, **kwargs) - return response + return data class AsyncImages(AsyncAPIResource): @@ -26,18 +38,27 @@ def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - async def generate(self, prompt: str, **kwargs) -> Any: - response = await self.openai_client.images.generate(prompt=prompt, **kwargs) - return response + async def generate(self, prompt: str, **kwargs) -> ImageResponse: + response = await self.openai_client.with_raw_response.images.generate(prompt=prompt, **kwargs) + data = ImageResponse(**json.loads(response.text)) + data._headers = response.headers - async def edit(self, prompt: str, image, **kwargs) -> Any: - response = await self.openai_client.images.edit( + return data + + async def edit(self, prompt: str, image, **kwargs) -> ImageResponse: + response = await self.openai_client.with_raw_response.images.edit( prompt=prompt, image=image, **kwargs ) - return response + data = ImageResponse(**json.loads(response.text)) + data._headers = response.headers + + return data - async def create_variation(self, image, **kwargs) -> Any: - response = await self.openai_client.images.create_variation( + async def create_variation(self, image, **kwargs) -> ImageResponse: + response = await self.openai_client.with_raw_response.images.create_variation( image=image, **kwargs ) - return response + data = ImageResponse(**json.loads(response.text)) + data._headers = response.headers + + return data diff --git a/portkey_ai/api_resources/utils.py b/portkey_ai/api_resources/utils.py index a2982bc1..918e7994 100644 --- a/portkey_ai/api_resources/utils.py +++ b/portkey_ai/api_resources/utils.py @@ -465,6 +465,18 @@ def __str__(self): def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) +class ImageResponse(BaseModel, extra="allow"): + created: int + + data: List[Any] + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + def apikey_from_env(provider: Union[ProviderTypes, ProviderTypesLiteral, str]) -> str: env_key = f"{provider.upper().replace('-', '_')}_API_KEY" From ebfe2e6b5cd3f277293587106460e54fcf1f6dbb Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Wed, 13 Mar 2024 16:09:48 +0530 Subject: [PATCH 22/62] feat: get_headers for models and files --- portkey_ai/api_resources/apis/main_files.py | 88 ++++++++++++++------- portkey_ai/api_resources/apis/models.py | 54 ++++++++----- portkey_ai/api_resources/utils.py | 81 +++++++++++++++++++ 3 files changed, 175 insertions(+), 48 deletions(-) diff --git a/portkey_ai/api_resources/apis/main_files.py b/portkey_ai/api_resources/apis/main_files.py index 72abef90..abaa48e7 100644 --- a/portkey_ai/api_resources/apis/main_files.py +++ b/portkey_ai/api_resources/apis/main_files.py @@ -1,6 +1,8 @@ +import json from typing import Any from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey +from portkey_ai.api_resources.utils import FileDeleteResponse, FileDeleted, FileObject, FileList class MainFiles(APIResource): @@ -8,23 +10,39 @@ def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client - def create(self, file, purpose, **kwargs) -> Any: - response = self.openai_client.files.create(file=file, purpose=purpose, **kwargs) - return response + def create(self, file, purpose, **kwargs) -> FileObject: + response = self.openai_client.with_raw_response.files.create(file=file, purpose=purpose, **kwargs) + data = FileObject(**json.loads(response.text)) + data._headers = response.headers - def list(self, **kwargs) -> Any: - response = self.openai_client.files.list(**kwargs) - return response + return data - def retrieve(self, file_id, **kwargs) -> Any: - response = self.openai_client.files.retrieve(file_id=file_id, **kwargs) - return response + def list(self, **kwargs) -> FileList: + response = self.openai_client.with_raw_response.files.list(**kwargs) + data = FileList(**json.loads(response.text)) + data._headers = response.headers - def delete(self, file_id, **kwargs) -> Any: - response = self.openai_client.files.delete(file_id=file_id, **kwargs) - return response + return data + + def retrieve(self, file_id, **kwargs) -> FileObject: + response = self.openai_client.with_raw_response.files.retrieve(file_id=file_id, **kwargs) + data = FileObject(**json.loads(response.text)) + data._headers = response.headers - def retrieveContent(self, file_id, **kwargs) -> Any: + return data + + def delete(self, file_id, **kwargs) -> FileDeleted: + response = self.openai_client.with_raw_response.files.delete(file_id=file_id, **kwargs) + data = FileDeleted(**json.loads(response.text)) + data._headers = response.headers + + return data + + def content(self, file_id, **kwargs) -> Any: + response = self.openai_client.files.content(file_id=file_id, **kwargs) + return response + + def retrieve_content(self, file_id, **kwargs) -> Any: response = self.openai_client.files.content(file_id=file_id, **kwargs) return response @@ -34,24 +52,38 @@ def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - async def create(self, file, purpose, **kwargs) -> Any: - response = await self.openai_client.files.create( - file=file, purpose=purpose, **kwargs - ) - return response + async def create(self, file, purpose, **kwargs) -> FileObject: + response = await self.openai_client.with_raw_response.files.create(file=file, purpose=purpose, **kwargs) + data = FileObject(**json.loads(response.text)) + data._headers = response.headers - async def list(self, **kwargs) -> Any: - response = await self.openai_client.files.list(**kwargs) - return response + return data - async def retrieve(self, file_id, **kwargs) -> Any: - response = await self.openai_client.files.retrieve(file_id=file_id, **kwargs) - return response + async def list(self, **kwargs) -> FileList: + response = await self.openai_client.with_raw_response.files.list(**kwargs) + data = FileList(**json.loads(response.text)) + data._headers = response.headers - async def delete(self, file_id, **kwargs) -> Any: - response = await self.openai_client.files.delete(file_id=file_id, **kwargs) - return response + return data + + async def retrieve(self, file_id, **kwargs) -> FileObject: + response = await self.openai_client.with_raw_response.files.retrieve(file_id=file_id, **kwargs) + data = FileObject(**json.loads(response.text)) + data._headers = response.headers + + return data - async def retrieveContent(self, file_id, **kwargs) -> Any: + async def delete(self, file_id, **kwargs) -> FileDeleted: + response = await self.openai_client.with_raw_response.files.delete(file_id=file_id, **kwargs) + data = FileDeleted(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def content(self, file_id, **kwargs) -> Any: + response = await self.openai_client.files.content(file_id=file_id, **kwargs) + return response + + async def retrieve_content(self, file_id, **kwargs) -> Any: response = await self.openai_client.files.content(file_id=file_id, **kwargs) return response diff --git a/portkey_ai/api_resources/apis/models.py b/portkey_ai/api_resources/apis/models.py index 1f33ef3e..68e2dca5 100644 --- a/portkey_ai/api_resources/apis/models.py +++ b/portkey_ai/api_resources/apis/models.py @@ -1,6 +1,8 @@ +import json from typing import Any from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey +from portkey_ai.api_resources.utils import Model, ModelDeleted, ModelList class Models(APIResource): @@ -8,17 +10,23 @@ def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client - def list(self, **kwargs) -> Any: - response = self.openai_client.models.list(**kwargs) - return response + def list(self, **kwargs) -> ModelList: + response = self.openai_client.with_raw_response.models.list(**kwargs) + data = ModelList(**json.loads(response.text)) + data._headers = response.headers + return data - def retrieve(self, model, **kwargs) -> Any: - response = self.openai_client.models.retrieve(model=model, **kwargs) - return response + def retrieve(self, model, **kwargs) -> Model: + response = self.openai_client.with_raw_response.models.retrieve(model=model, **kwargs) + data = Model(**json.loads(response.text)) + data._headers = response.headers + return data - def delete(self, model, **kwargs) -> Any: - response = self.openai_client.models.delete(model=model, **kwargs) - return response + def delete(self, model, **kwargs) -> ModelDeleted: + response = self.openai_client.with_raw_response.models.delete(model=model, **kwargs) + data = ModelDeleted(**json.loads(response.text)) + data._headers = response.headers + return data class AsyncModels(AsyncAPIResource): @@ -26,14 +34,20 @@ def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - async def list(self, **kwargs) -> Any: - response = await self.openai_client.models.list(**kwargs) - return response - - async def retrieve(self, model, **kwargs) -> Any: - response = await self.openai_client.models.retrieve(model=model, **kwargs) - return response - - async def delete(self, model, **kwargs) -> Any: - response = await self.openai_client.models.delete(model=model, **kwargs) - return response + async def list(self, **kwargs) -> ModelList: + response = await self.openai_client.with_raw_response.models.list(**kwargs) + data = ModelList(**json.loads(response.text)) + data._headers = response.headers + return data + + async def retrieve(self, model, **kwargs) -> Model: + response = await self.openai_client.with_raw_response.models.retrieve(model=model, **kwargs) + data = Model(**json.loads(response.text)) + data._headers = response.headers + return data + + async def delete(self, model, **kwargs) -> ModelDeleted: + response = await self.openai_client.with_raw_response.models.delete(model=model, **kwargs) + data = ModelDeleted(**json.loads(response.text)) + data._headers = response.headers + return data diff --git a/portkey_ai/api_resources/utils.py b/portkey_ai/api_resources/utils.py index 918e7994..4235cf52 100644 --- a/portkey_ai/api_resources/utils.py +++ b/portkey_ai/api_resources/utils.py @@ -467,8 +467,78 @@ def get_headers(self) -> Optional[Dict[str, str]]: class ImageResponse(BaseModel, extra="allow"): created: int + data: List[Any] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + +class FileObject(BaseModel): + id: Optional[str] + bytes: Optional[int] + created_at: Optional[int] + filename: Optional[str] + object: Optional[Literal["file"]] + purpose: Optional[Literal["fine-tune", "fine-tune-results", "assistants", "assistants_output"]] + status: Optional[Literal["uploaded", "processed", "error"]] + status_details: Optional[str] = None + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + +class FileList(BaseModel): + object: str data: List[Any] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + +class FileDeleted(BaseModel): + id: Optional[str] + deleted: Optional[bool] + object: Optional[Literal["file"]] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + +class ModelDeleted(BaseModel): + id: str + deleted: bool + object: str + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + +class Model(BaseModel): + id: str + created: int + object: Literal["model"] + owned_by: str + _headers: Optional[httpx.Headers] = None def __str__(self): del self._headers @@ -476,7 +546,18 @@ def __str__(self): def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) + +class ModelList(BaseModel): + object: str + data: List[Any] + _headers: Optional[httpx.Headers] = None + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) def apikey_from_env(provider: Union[ProviderTypes, ProviderTypesLiteral, str]) -> str: env_key = f"{provider.upper().replace('-', '_')}_API_KEY" From 37dd886ac2f057aba0b4a2251853ec162883519f Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Wed, 13 Mar 2024 16:11:18 +0530 Subject: [PATCH 23/62] fix: linting issues --- portkey_ai/api_resources/apis/images.py | 18 +++++++---- portkey_ai/api_resources/apis/main_files.py | 36 +++++++++++++++------ portkey_ai/api_resources/apis/models.py | 17 +++++++--- portkey_ai/api_resources/utils.py | 18 ++++++++--- 4 files changed, 64 insertions(+), 25 deletions(-) diff --git a/portkey_ai/api_resources/apis/images.py b/portkey_ai/api_resources/apis/images.py index 400866eb..c019bcbf 100644 --- a/portkey_ai/api_resources/apis/images.py +++ b/portkey_ai/api_resources/apis/images.py @@ -1,32 +1,36 @@ import json -from typing import Any from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey from portkey_ai.api_resources.utils import ImageResponse - class Images(APIResource): def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client def generate(self, prompt: str, **kwargs) -> ImageResponse: - response = self.openai_client.with_raw_response.images.generate(prompt=prompt, **kwargs) + response = self.openai_client.with_raw_response.images.generate( + prompt=prompt, **kwargs + ) data = ImageResponse(**json.loads(response.text)) data._headers = response.headers return data def edit(self, prompt: str, image, **kwargs) -> ImageResponse: - response = self.openai_client.with_raw_response.images.edit(prompt=prompt, image=image, **kwargs) + response = self.openai_client.with_raw_response.images.edit( + prompt=prompt, image=image, **kwargs + ) data = ImageResponse(**json.loads(response.text)) data._headers = response.headers return data def create_variation(self, image, **kwargs) -> ImageResponse: - response = self.openai_client.with_raw_response.images.create_variation(image=image, **kwargs) + response = self.openai_client.with_raw_response.images.create_variation( + image=image, **kwargs + ) data = ImageResponse(**json.loads(response.text)) data._headers = response.headers @@ -39,7 +43,9 @@ def __init__(self, client: AsyncPortkey) -> None: self.openai_client = client.openai_client async def generate(self, prompt: str, **kwargs) -> ImageResponse: - response = await self.openai_client.with_raw_response.images.generate(prompt=prompt, **kwargs) + response = await self.openai_client.with_raw_response.images.generate( + prompt=prompt, **kwargs + ) data = ImageResponse(**json.loads(response.text)) data._headers = response.headers diff --git a/portkey_ai/api_resources/apis/main_files.py b/portkey_ai/api_resources/apis/main_files.py index abaa48e7..2311b23d 100644 --- a/portkey_ai/api_resources/apis/main_files.py +++ b/portkey_ai/api_resources/apis/main_files.py @@ -2,7 +2,11 @@ from typing import Any from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey -from portkey_ai.api_resources.utils import FileDeleteResponse, FileDeleted, FileObject, FileList +from portkey_ai.api_resources.utils import ( + FileDeleted, + FileObject, + FileList, +) class MainFiles(APIResource): @@ -11,7 +15,9 @@ def __init__(self, client: Portkey) -> None: self.openai_client = client.openai_client def create(self, file, purpose, **kwargs) -> FileObject: - response = self.openai_client.with_raw_response.files.create(file=file, purpose=purpose, **kwargs) + response = self.openai_client.with_raw_response.files.create( + file=file, purpose=purpose, **kwargs + ) data = FileObject(**json.loads(response.text)) data._headers = response.headers @@ -25,23 +31,27 @@ def list(self, **kwargs) -> FileList: return data def retrieve(self, file_id, **kwargs) -> FileObject: - response = self.openai_client.with_raw_response.files.retrieve(file_id=file_id, **kwargs) + response = self.openai_client.with_raw_response.files.retrieve( + file_id=file_id, **kwargs + ) data = FileObject(**json.loads(response.text)) data._headers = response.headers return data def delete(self, file_id, **kwargs) -> FileDeleted: - response = self.openai_client.with_raw_response.files.delete(file_id=file_id, **kwargs) + response = self.openai_client.with_raw_response.files.delete( + file_id=file_id, **kwargs + ) data = FileDeleted(**json.loads(response.text)) data._headers = response.headers return data - + def content(self, file_id, **kwargs) -> Any: response = self.openai_client.files.content(file_id=file_id, **kwargs) return response - + def retrieve_content(self, file_id, **kwargs) -> Any: response = self.openai_client.files.content(file_id=file_id, **kwargs) return response @@ -53,7 +63,9 @@ def __init__(self, client: AsyncPortkey) -> None: self.openai_client = client.openai_client async def create(self, file, purpose, **kwargs) -> FileObject: - response = await self.openai_client.with_raw_response.files.create(file=file, purpose=purpose, **kwargs) + response = await self.openai_client.with_raw_response.files.create( + file=file, purpose=purpose, **kwargs + ) data = FileObject(**json.loads(response.text)) data._headers = response.headers @@ -67,14 +79,18 @@ async def list(self, **kwargs) -> FileList: return data async def retrieve(self, file_id, **kwargs) -> FileObject: - response = await self.openai_client.with_raw_response.files.retrieve(file_id=file_id, **kwargs) + response = await self.openai_client.with_raw_response.files.retrieve( + file_id=file_id, **kwargs + ) data = FileObject(**json.loads(response.text)) data._headers = response.headers return data async def delete(self, file_id, **kwargs) -> FileDeleted: - response = await self.openai_client.with_raw_response.files.delete(file_id=file_id, **kwargs) + response = await self.openai_client.with_raw_response.files.delete( + file_id=file_id, **kwargs + ) data = FileDeleted(**json.loads(response.text)) data._headers = response.headers @@ -83,7 +99,7 @@ async def delete(self, file_id, **kwargs) -> FileDeleted: async def content(self, file_id, **kwargs) -> Any: response = await self.openai_client.files.content(file_id=file_id, **kwargs) return response - + async def retrieve_content(self, file_id, **kwargs) -> Any: response = await self.openai_client.files.content(file_id=file_id, **kwargs) return response diff --git a/portkey_ai/api_resources/apis/models.py b/portkey_ai/api_resources/apis/models.py index 68e2dca5..e5707c9c 100644 --- a/portkey_ai/api_resources/apis/models.py +++ b/portkey_ai/api_resources/apis/models.py @@ -1,5 +1,4 @@ import json -from typing import Any from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey from portkey_ai.api_resources.utils import Model, ModelDeleted, ModelList @@ -17,13 +16,17 @@ def list(self, **kwargs) -> ModelList: return data def retrieve(self, model, **kwargs) -> Model: - response = self.openai_client.with_raw_response.models.retrieve(model=model, **kwargs) + response = self.openai_client.with_raw_response.models.retrieve( + model=model, **kwargs + ) data = Model(**json.loads(response.text)) data._headers = response.headers return data def delete(self, model, **kwargs) -> ModelDeleted: - response = self.openai_client.with_raw_response.models.delete(model=model, **kwargs) + response = self.openai_client.with_raw_response.models.delete( + model=model, **kwargs + ) data = ModelDeleted(**json.loads(response.text)) data._headers = response.headers return data @@ -41,13 +44,17 @@ async def list(self, **kwargs) -> ModelList: return data async def retrieve(self, model, **kwargs) -> Model: - response = await self.openai_client.with_raw_response.models.retrieve(model=model, **kwargs) + response = await self.openai_client.with_raw_response.models.retrieve( + model=model, **kwargs + ) data = Model(**json.loads(response.text)) data._headers = response.headers return data async def delete(self, model, **kwargs) -> ModelDeleted: - response = await self.openai_client.with_raw_response.models.delete(model=model, **kwargs) + response = await self.openai_client.with_raw_response.models.delete( + model=model, **kwargs + ) data = ModelDeleted(**json.loads(response.text)) data._headers = response.headers return data diff --git a/portkey_ai/api_resources/utils.py b/portkey_ai/api_resources/utils.py index 4235cf52..62fc0fda 100644 --- a/portkey_ai/api_resources/utils.py +++ b/portkey_ai/api_resources/utils.py @@ -465,6 +465,7 @@ def __str__(self): def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) + class ImageResponse(BaseModel, extra="allow"): created: int data: List[Any] @@ -477,24 +478,28 @@ def __str__(self): def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) + class FileObject(BaseModel): id: Optional[str] bytes: Optional[int] created_at: Optional[int] filename: Optional[str] object: Optional[Literal["file"]] - purpose: Optional[Literal["fine-tune", "fine-tune-results", "assistants", "assistants_output"]] + purpose: Optional[ + Literal["fine-tune", "fine-tune-results", "assistants", "assistants_output"] + ] status: Optional[Literal["uploaded", "processed", "error"]] status_details: Optional[str] = None _headers: Optional[httpx.Headers] = None - + def __str__(self): del self._headers return json.dumps(self.dict(), indent=4) def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) - + + class FileList(BaseModel): object: str data: List[Any] @@ -507,6 +512,7 @@ def __str__(self): def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) + class FileDeleted(BaseModel): id: Optional[str] deleted: Optional[bool] @@ -520,6 +526,7 @@ def __str__(self): def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) + class ModelDeleted(BaseModel): id: str deleted: bool @@ -533,6 +540,7 @@ def __str__(self): def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) + class Model(BaseModel): id: str created: int @@ -546,7 +554,8 @@ def __str__(self): def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) - + + class ModelList(BaseModel): object: str data: List[Any] @@ -559,6 +568,7 @@ def __str__(self): def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) + def apikey_from_env(provider: Union[ProviderTypes, ProviderTypesLiteral, str]) -> str: env_key = f"{provider.upper().replace('-', '_')}_API_KEY" if provider is None: From 7e4c5dbd95f591346a9ba1601a4542aa35367404 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Wed, 13 Mar 2024 17:17:30 +0530 Subject: [PATCH 24/62] feat: get_headers for assistants and assistants files --- portkey_ai/api_resources/apis/assistants.py | 188 +++++++++++++------- portkey_ai/api_resources/utils.py | 97 ++++++++-- 2 files changed, 207 insertions(+), 78 deletions(-) diff --git a/portkey_ai/api_resources/apis/assistants.py b/portkey_ai/api_resources/apis/assistants.py index 3cf2347c..5e14471d 100644 --- a/portkey_ai/api_resources/apis/assistants.py +++ b/portkey_ai/api_resources/apis/assistants.py @@ -1,6 +1,8 @@ +import json from typing import Any from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey +from portkey_ai.api_resources.utils import Assistant, AssistantDeleted, AssistantFile, AssistantFileDeleted, AssistantFileList, AssistantList class Assistants(APIResource): @@ -9,27 +11,48 @@ def __init__(self, client: Portkey) -> None: self.openai_client = client.openai_client self.files = AssistantFiles(client) - def create(self, **kwargs) -> Any: - response = self.openai_client.beta.assistants.create(**kwargs) - return response + def create(self, **kwargs) -> Assistant: + response = self.openai_client.with_raw_response.beta.assistants.create( + **kwargs) + data = Assistant(**json.loads(response.text)) + data._headers = response.headers - def retrieve(self, assistant_id, **kwargs) -> Any: - response = self.openai_client.beta.assistants.retrieve( + return data + + def retrieve(self, assistant_id, **kwargs) -> Assistant: + response = self.openai_client.with_raw_response.beta.assistants.retrieve( assistant_id=assistant_id, **kwargs ) - return response + data = Assistant(**json.loads(response.text)) + data._headers = response.headers + + return data - def update(self, assistant_id, **kwargs) -> Any: - response = self.openai_client.beta.assistants.update( + def update(self, assistant_id, **kwargs) -> Assistant: + response = self.openai_client.with_raw_response.beta.assistants.update( assistant_id=assistant_id, **kwargs ) - return response + data = Assistant(**json.loads(response.text)) + data._headers = response.headers + + return data + + def list(self, **kwargs) -> AssistantList: + response = self.openai_client.with_raw_response.beta.assistants.list( + **kwargs) + data = AssistantList(**json.loads(response.text)) + data._headers = response.headers - def delete(self, assistant_id, **kwargs) -> Any: - response = self.openai_client.beta.assistants.delete( + return data + + def delete(self, assistant_id, **kwargs) -> AssistantDeleted: + response = self.openai_client.with_raw_response.beta.assistants.delete( assistant_id=assistant_id, **kwargs ) - return response + data = AssistantDeleted(**json.loads(response.text)) + data._headers = response.headers + + return data class AssistantFiles(APIResource): @@ -37,27 +60,37 @@ def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client - def create(self, **kwargs) -> Any: - response = self.openai_client.beta.assistants.files.create(**kwargs) - return response + def create(self, assistant_id, file_id, **kwargs) -> AssistantFile: + response = self.openai_client.with_raw_response.beta.assistants.files.create( + assistant_id=assistant_id, file_id=file_id, **kwargs) + data = AssistantFile(**json.loads(response.text)) + data._headers = response.headers - def list(self, assistant_id, **kwargs) -> Any: - response = self.openai_client.beta.assistants.files.list( - assistant_id=assistant_id, **kwargs - ) - return response + return data - def retrieve(self, assistant_id, file_id, **kwargs) -> Any: - response = self.openai_client.beta.assistants.files.retrieve( - assistant_id=assistant_id, file_id=file_id, **kwargs - ) - return response + def list(self, assistant_id, **kwargs) -> AssistantFileList: + response = self.openai_client.with_raw_response.beta.assistants.files.list( + assistant_id=assistant_id, **kwargs) + data = AssistantFileList(**json.loads(response.text)) + data._headers = response.headers + + return data - def delete(self, assistant_id, file_id, **kwargs) -> Any: - response = self.openai_client.beta.assistants.files.delete( - assistant_id=assistant_id, file_id=file_id, **kwargs - ) - return response + def retrieve(self, assistant_id, file_id, **kwargs) -> AssistantFile: + response = self.openai_client.with_raw_response.beta.assistants.files.retrieve( + assistant_id=assistant_id, file_id=file_id, **kwargs) + data = AssistantFile(**json.loads(response.text)) + data._headers = response.headers + + return data + + def delete(self, assistant_id, file_id, **kwargs) -> AssistantFileDeleted: + response = self.openai_client.with_raw_response.beta.assistants.files.delete( + assistant_id=assistant_id, file_id=file_id, **kwargs) + data = AssistantFileDeleted(**json.loads(response.text)) + data._headers = response.headers + + return data class AsyncAssistants(AsyncAPIResource): @@ -66,27 +99,46 @@ def __init__(self, client: AsyncPortkey) -> None: self.openai_client = client.openai_client self.files = AsyncAssistantFiles(client) - async def create(self, **kwargs) -> Any: - response = await self.openai_client.beta.assistants.create(**kwargs) - return response + async def create(self, **kwargs) -> Assistant: + response = await self.openai_client.with_raw_response.beta.assistants.create(**kwargs) + data = Assistant(**json.loads(response.text)) + data._headers = response.headers - async def retrieve(self, assistant_id, **kwargs) -> Any: - response = await self.openai_client.beta.assistants.retrieve( + return data + + async def retrieve(self, assistant_id, **kwargs) -> Assistant: + response = await self.openai_client.with_raw_response.beta.assistants.retrieve( assistant_id=assistant_id, **kwargs ) - return response + data = Assistant(**json.loads(response.text)) + data._headers = response.headers + + return data - async def update(self, assistant_id, **kwargs) -> Any: - response = await self.openai_client.beta.assistants.update( + async def update(self, assistant_id, **kwargs) -> Assistant: + response = await self.openai_client.with_raw_response.beta.assistants.update( assistant_id=assistant_id, **kwargs ) - return response + data = Assistant(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def list(self, **kwargs) -> AssistantList: + response = await self.openai_client.with_raw_response.beta.assistants.list(**kwargs) + data = AssistantList(**json.loads(response.text)) + data._headers = response.headers - async def delete(self, assistant_id, **kwargs) -> Any: - response = await self.openai_client.beta.assistants.delete( + return data + + async def delete(self, assistant_id, **kwargs) -> AssistantDeleted: + response = await self.openai_client.with_raw_response.beta.assistants.delete( assistant_id=assistant_id, **kwargs ) - return response + data = AssistantDeleted(**json.loads(response.text)) + data._headers = response.headers + + return data class AsyncAssistantFiles(AsyncAPIResource): @@ -94,24 +146,34 @@ def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - async def create(self, **kwargs) -> Any: - response = await self.openai_client.beta.assistants.files.create(**kwargs) - return response - - async def list(self, assistant_id, **kwargs) -> Any: - response = await self.openai_client.beta.assistants.files.list( - assistant_id=assistant_id, **kwargs - ) - return response - - async def retrieve(self, assistant_id, file_id, **kwargs) -> Any: - response = await self.openai_client.beta.assistants.files.retrieve( - assistant_id=assistant_id, file_id=file_id, **kwargs - ) - return response - - async def delete(self, assistant_id, file_id, **kwargs) -> Any: - response = await self.openai_client.beta.assistants.files.delete( - assistant_id=assistant_id, file_id=file_id, **kwargs - ) - return response + async def create(self, assistant_id, file_id, **kwargs) -> AssistantFile: + response = await self.openai_client.with_raw_response.beta.assistants.files.create( + assistant_id=assistant_id, file_id=file_id, **kwargs) + data = AssistantFile(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def list(self, assistant_id, **kwargs) -> AssistantFileList: + response = await self.openai_client.with_raw_response.beta.assistants.files.list( + assistant_id=assistant_id, **kwargs) + data = AssistantFileList(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def retrieve(self, assistant_id, file_id, **kwargs) -> AssistantFile: + response = await self.openai_client.with_raw_response.beta.assistants.files.retrieve( + assistant_id=assistant_id, file_id=file_id, **kwargs) + data = AssistantFile(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def delete(self, assistant_id, file_id, **kwargs) -> AssistantFileDeleted: + response = await self.openai_client.with_raw_response.beta.assistants.files.delete( + assistant_id=assistant_id, file_id=file_id, **kwargs) + data = AssistantFileDeleted(**json.loads(response.text)) + data._headers = response.headers + + return data diff --git a/portkey_ai/api_resources/utils.py b/portkey_ai/api_resources/utils.py index 62fc0fda..32e584f9 100644 --- a/portkey_ai/api_resources/utils.py +++ b/portkey_ai/api_resources/utils.py @@ -568,6 +568,51 @@ def __str__(self): def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) +class Assistant(BaseModel): + id: Optional[str] + created_at: Optional[int] + description: Optional[str] = None + file_ids: Optional[List[str]] + instructions: Optional[str] = None + metadata: Optional[object] = None + model: Optional[str] + name: Optional[str] = None + object: Optional[str] + tools: Optional[List[Any]] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + +class AssistantList(BaseModel): + object: Optional[str] + data: Optional[List[Any]] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + +class AssistantDeleted(BaseModel): + id: Optional[str] + object: Optional[str] + deleted: Optional[bool] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + def apikey_from_env(provider: Union[ProviderTypes, ProviderTypesLiteral, str]) -> str: env_key = f"{provider.upper().replace('-', '_')}_API_KEY" @@ -689,23 +734,45 @@ def parse_headers(headers: Optional[httpx.Headers]) -> dict: return _headers -class FileDeleteResponse(BaseModel): - id: str - - deleted: bool - - object: Literal["assistant.file.deleted"] +class AssistantFileDeleted(BaseModel): + id: Optional[str] + deleted: Optional[bool] + object: Optional[str] + _headers: Optional[httpx.Headers] = None + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + class AssistantFile(BaseModel): - id: str - """The identifier, which can be referenced in API endpoints.""" - - assistant_id: str - """The assistant ID that the file is attached to.""" + id: Optional[str] + assistant_id: Optional[str] + created_at: Optional[int] + object: Optional[str] + _headers: Optional[httpx.Headers] = None - created_at: int - """The Unix timestamp (in seconds) for when the assistant file was created.""" + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + +class AssistantFileList(BaseModel): + object: Optional[str] + data: Optional[List[Any]] + first_id: Optional[str] + last_id: Optional[str] + has_more: Optional[bool] + _headers: Optional[httpx.Headers] = None - object: Literal["assistant.file"] - """The object type, which is always `assistant.file`.""" + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) \ No newline at end of file From 25e5cbf8faf9962c1d4af430d7be24d6c986a92c Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Wed, 13 Mar 2024 17:18:32 +0530 Subject: [PATCH 25/62] fix: linting issues --- portkey_ai/api_resources/apis/assistants.py | 68 ++++++++++++++------- portkey_ai/api_resources/utils.py | 18 +++--- 2 files changed, 57 insertions(+), 29 deletions(-) diff --git a/portkey_ai/api_resources/apis/assistants.py b/portkey_ai/api_resources/apis/assistants.py index 5e14471d..98639622 100644 --- a/portkey_ai/api_resources/apis/assistants.py +++ b/portkey_ai/api_resources/apis/assistants.py @@ -1,8 +1,14 @@ import json -from typing import Any from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey -from portkey_ai.api_resources.utils import Assistant, AssistantDeleted, AssistantFile, AssistantFileDeleted, AssistantFileList, AssistantList +from portkey_ai.api_resources.utils import ( + Assistant, + AssistantDeleted, + AssistantFile, + AssistantFileDeleted, + AssistantFileList, + AssistantList, +) class Assistants(APIResource): @@ -12,8 +18,7 @@ def __init__(self, client: Portkey) -> None: self.files = AssistantFiles(client) def create(self, **kwargs) -> Assistant: - response = self.openai_client.with_raw_response.beta.assistants.create( - **kwargs) + response = self.openai_client.with_raw_response.beta.assistants.create(**kwargs) data = Assistant(**json.loads(response.text)) data._headers = response.headers @@ -38,8 +43,7 @@ def update(self, assistant_id, **kwargs) -> Assistant: return data def list(self, **kwargs) -> AssistantList: - response = self.openai_client.with_raw_response.beta.assistants.list( - **kwargs) + response = self.openai_client.with_raw_response.beta.assistants.list(**kwargs) data = AssistantList(**json.loads(response.text)) data._headers = response.headers @@ -62,7 +66,8 @@ def __init__(self, client: Portkey) -> None: def create(self, assistant_id, file_id, **kwargs) -> AssistantFile: response = self.openai_client.with_raw_response.beta.assistants.files.create( - assistant_id=assistant_id, file_id=file_id, **kwargs) + assistant_id=assistant_id, file_id=file_id, **kwargs + ) data = AssistantFile(**json.loads(response.text)) data._headers = response.headers @@ -70,15 +75,17 @@ def create(self, assistant_id, file_id, **kwargs) -> AssistantFile: def list(self, assistant_id, **kwargs) -> AssistantFileList: response = self.openai_client.with_raw_response.beta.assistants.files.list( - assistant_id=assistant_id, **kwargs) + assistant_id=assistant_id, **kwargs + ) data = AssistantFileList(**json.loads(response.text)) data._headers = response.headers - + return data def retrieve(self, assistant_id, file_id, **kwargs) -> AssistantFile: response = self.openai_client.with_raw_response.beta.assistants.files.retrieve( - assistant_id=assistant_id, file_id=file_id, **kwargs) + assistant_id=assistant_id, file_id=file_id, **kwargs + ) data = AssistantFile(**json.loads(response.text)) data._headers = response.headers @@ -86,7 +93,8 @@ def retrieve(self, assistant_id, file_id, **kwargs) -> AssistantFile: def delete(self, assistant_id, file_id, **kwargs) -> AssistantFileDeleted: response = self.openai_client.with_raw_response.beta.assistants.files.delete( - assistant_id=assistant_id, file_id=file_id, **kwargs) + assistant_id=assistant_id, file_id=file_id, **kwargs + ) data = AssistantFileDeleted(**json.loads(response.text)) data._headers = response.headers @@ -100,7 +108,9 @@ def __init__(self, client: AsyncPortkey) -> None: self.files = AsyncAssistantFiles(client) async def create(self, **kwargs) -> Assistant: - response = await self.openai_client.with_raw_response.beta.assistants.create(**kwargs) + response = await self.openai_client.with_raw_response.beta.assistants.create( + **kwargs + ) data = Assistant(**json.loads(response.text)) data._headers = response.headers @@ -125,7 +135,9 @@ async def update(self, assistant_id, **kwargs) -> Assistant: return data async def list(self, **kwargs) -> AssistantList: - response = await self.openai_client.with_raw_response.beta.assistants.list(**kwargs) + response = await self.openai_client.with_raw_response.beta.assistants.list( + **kwargs + ) data = AssistantList(**json.loads(response.text)) data._headers = response.headers @@ -147,32 +159,44 @@ def __init__(self, client: AsyncPortkey) -> None: self.openai_client = client.openai_client async def create(self, assistant_id, file_id, **kwargs) -> AssistantFile: - response = await self.openai_client.with_raw_response.beta.assistants.files.create( - assistant_id=assistant_id, file_id=file_id, **kwargs) + response = ( + await self.openai_client.with_raw_response.beta.assistants.files.create( + assistant_id=assistant_id, file_id=file_id, **kwargs + ) + ) data = AssistantFile(**json.loads(response.text)) data._headers = response.headers return data async def list(self, assistant_id, **kwargs) -> AssistantFileList: - response = await self.openai_client.with_raw_response.beta.assistants.files.list( - assistant_id=assistant_id, **kwargs) + response = ( + await self.openai_client.with_raw_response.beta.assistants.files.list( + assistant_id=assistant_id, **kwargs + ) + ) data = AssistantFileList(**json.loads(response.text)) data._headers = response.headers - + return data async def retrieve(self, assistant_id, file_id, **kwargs) -> AssistantFile: - response = await self.openai_client.with_raw_response.beta.assistants.files.retrieve( - assistant_id=assistant_id, file_id=file_id, **kwargs) + response = ( + await self.openai_client.with_raw_response.beta.assistants.files.retrieve( + assistant_id=assistant_id, file_id=file_id, **kwargs + ) + ) data = AssistantFile(**json.loads(response.text)) data._headers = response.headers return data async def delete(self, assistant_id, file_id, **kwargs) -> AssistantFileDeleted: - response = await self.openai_client.with_raw_response.beta.assistants.files.delete( - assistant_id=assistant_id, file_id=file_id, **kwargs) + response = ( + await self.openai_client.with_raw_response.beta.assistants.files.delete( + assistant_id=assistant_id, file_id=file_id, **kwargs + ) + ) data = AssistantFileDeleted(**json.loads(response.text)) data._headers = response.headers diff --git a/portkey_ai/api_resources/utils.py b/portkey_ai/api_resources/utils.py index 32e584f9..a57fce2e 100644 --- a/portkey_ai/api_resources/utils.py +++ b/portkey_ai/api_resources/utils.py @@ -568,6 +568,7 @@ def __str__(self): def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) + class Assistant(BaseModel): id: Optional[str] created_at: Optional[int] @@ -588,6 +589,7 @@ def __str__(self): def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) + class AssistantList(BaseModel): object: Optional[str] data: Optional[List[Any]] @@ -599,7 +601,8 @@ def __str__(self): def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) - + + class AssistantDeleted(BaseModel): id: Optional[str] object: Optional[str] @@ -743,10 +746,10 @@ class AssistantFileDeleted(BaseModel): def __str__(self): del self._headers return json.dumps(self.dict(), indent=4) - + def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) - + class AssistantFile(BaseModel): id: Optional[str] @@ -758,10 +761,11 @@ class AssistantFile(BaseModel): def __str__(self): del self._headers return json.dumps(self.dict(), indent=4) - + def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) - + + class AssistantFileList(BaseModel): object: Optional[str] data: Optional[List[Any]] @@ -773,6 +777,6 @@ class AssistantFileList(BaseModel): def __str__(self): del self._headers return json.dumps(self.dict(), indent=4) - + def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) \ No newline at end of file + return parse_headers(self._headers) From b1027275ca6cc0320fc466980bfb37f3e3eb29d9 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Wed, 13 Mar 2024 19:50:47 +0530 Subject: [PATCH 26/62] feat: get_headers for threads and following sub methods --- portkey_ai/api_resources/apis/threads.py | 449 ++++++++++++++++------- portkey_ai/api_resources/utils.py | 194 +++++++++- 2 files changed, 496 insertions(+), 147 deletions(-) diff --git a/portkey_ai/api_resources/apis/threads.py b/portkey_ai/api_resources/apis/threads.py index 66cded63..610cf54c 100644 --- a/portkey_ai/api_resources/apis/threads.py +++ b/portkey_ai/api_resources/apis/threads.py @@ -1,6 +1,18 @@ -from typing import Any +import json + from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey +from portkey_ai.api_resources.utils import ( + MessageList, + Run, + RunList, + RunStep, + RunStepList, + Thread, + ThreadDeleted, + ThreadMessage, + ThreadMessageFileRetrieve, +) class Threads(APIResource): @@ -12,32 +24,50 @@ def __init__(self, client: Portkey) -> None: def create( self, - ) -> Any: - response = self.openai_client.beta.threads.create() - return response + ) -> Thread: + response = self.openai_client.with_raw_response.beta.threads.create() + data = Thread(**json.loads(response.text)) + data._headers = response.headers + + return data + + def retrieve(self, thread_id, **kwargs) -> Thread: + response = self.openai_client.with_raw_response.beta.threads.retrieve( + thread_id=thread_id, **kwargs + ) + data = Thread(**json.loads(response.text)) + data._headers = response.headers + + return data - def retrieve(self, thread_id, **kwargs) -> Any: - response = self.openai_client.beta.threads.retrieve( + def update(self, thread_id, **kwargs) -> Thread: + response = self.openai_client.with_raw_response.beta.threads.update( thread_id=thread_id, **kwargs ) - return response + data = Thread(**json.loads(response.text)) + data._headers = response.headers - def update(self, thread_id, **kwargs) -> Any: - response = self.openai_client.beta.threads.update(thread_id=thread_id, **kwargs) - return response + return data def delete( self, thread_id, - ) -> Any: - response = self.openai_client.beta.threads.delete(thread_id=thread_id) - return response + ) -> ThreadDeleted: + response = self.openai_client.with_raw_response.beta.threads.delete( + thread_id=thread_id + ) + data = ThreadDeleted(**json.loads(response.text)) + data._headers = response.headers + + return data - def create_and_run(self, assistant_id, **kwargs) -> Any: - response = self.openai_client.beta.threads.create_and_run( + def create_and_run(self, assistant_id, **kwargs) -> Run: + response = self.openai_client.with_raw_response.beta.threads.create_and_run( assistant_id=assistant_id, **kwargs ) - return response + data = Run(**json.loads(response.text)) + data._headers = response.headers + return data class Messages(APIResource): @@ -46,29 +76,38 @@ def __init__(self, client: Portkey) -> None: self.openai_client = client.openai_client self.files = ThreadFiles(client) - def create(self, thread_id, **kwargs) -> Any: - response = self.openai_client.beta.threads.messages.create( + def create(self, thread_id, **kwargs) -> ThreadMessage: + response = self.openai_client.with_raw_response.beta.threads.messages.create( thread_id=thread_id, **kwargs ) - return response + data = ThreadMessage(**json.loads(response.text)) + data._headers = response.headers + + return data - def list(self, thread_id, **kwargs) -> Any: - response = self.openai_client.beta.threads.messages.list( + def list(self, thread_id, **kwargs) -> MessageList: + response = self.openai_client.with_raw_response.beta.threads.messages.list( thread_id=thread_id, **kwargs ) - return response + data = MessageList(**json.loads(response.text)) + data._headers = response.headers + return data - def retrieve(self, thread_id, message_id, **kwargs) -> Any: - response = self.openai_client.beta.threads.messages.retrieve( + def retrieve(self, thread_id, message_id, **kwargs) -> ThreadMessage: + response = self.openai_client.with_raw_response.beta.threads.messages.retrieve( thread_id=thread_id, message_id=message_id, **kwargs ) - return response + data = ThreadMessage(**json.loads(response.text)) + data._headers = response.headers + return data - def update(self, thread_id, message_id, **kwargs) -> Any: - response = self.openai_client.beta.threads.messages.update( + def update(self, thread_id, message_id, **kwargs) -> ThreadMessage: + response = self.openai_client.with_raw_response.beta.threads.messages.update( thread_id=thread_id, message_id=message_id, **kwargs ) - return response + data = ThreadMessage(**json.loads(response.text)) + data._headers = response.headers + return data class ThreadFiles(APIResource): @@ -76,17 +115,29 @@ def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client - def list(self, thread_id, message_id, **kwargs) -> Any: - response = self.openai_client.beta.threads.messages.files.list( - thread_id=thread_id, message_id=message_id, **kwargs + def list(self, thread_id, message_id, **kwargs) -> MessageList: + response = ( + self.openai_client.with_raw_response.beta.threads.messages.files.list( + thread_id=thread_id, message_id=message_id, **kwargs + ) ) - return response - - def retrieve(self, thread_id, message_id, file_id, **kwargs) -> Any: - response = self.openai_client.beta.threads.messages.files.retrieve( - thread_id=thread_id, message_id=message_id, file_id=file_id**kwargs + data = MessageList(**json.loads(response.text)) + data._headers = response.headers + + return data + + def retrieve( + self, thread_id, message_id, file_id, **kwargs + ) -> ThreadMessageFileRetrieve: + response = ( + self.openai_client.with_raw_response.beta.threads.messages.files.retrieve( + thread_id=thread_id, message_id=message_id, file_id=file_id**kwargs + ) ) - return response + data = ThreadMessageFileRetrieve(**json.loads(response.text)) + data._headers = response.headers + + return data class Runs(APIResource): @@ -95,39 +146,61 @@ def __init__(self, client: Portkey) -> None: self.openai_client = client.openai_client self.steps = Steps(client) - def create(self, **kwargs) -> Any: - response = self.openai_client.beta.threads.runs.create(**kwargs) - return response + def create(self, **kwargs) -> Run: + response = self.openai_client.with_raw_response.beta.threads.runs.create( + **kwargs + ) + data = Run(**json.loads(response.text)) + data._headers = response.headers + + return data - def retrieve(self, thread_id, run_id, **kwargs) -> Any: - response = self.openai_client.beta.threads.runs.retrieve( + def retrieve(self, thread_id, run_id, **kwargs) -> Run: + response = self.openai_client.with_raw_response.beta.threads.runs.retrieve( thread_id=thread_id, run_id=run_id, **kwargs ) - return response + data = Run(**json.loads(response.text)) + data._headers = response.headers + + return data - def list(self, thread_id, **kwargs) -> Any: - response = self.openai_client.beta.threads.runs.list( + def list(self, thread_id, **kwargs) -> RunList: + response = self.openai_client.with_raw_response.beta.threads.runs.list( thread_id=thread_id, **kwargs ) - return response + data = RunList(**json.loads(response.text)) + data._headers = response.headers - def update(self, thread_id, run_id, **kwargs) -> Any: - response = self.openai_client.beta.threads.runs.update( + return data + + def update(self, thread_id, run_id, **kwargs) -> Run: + response = self.openai_client.with_raw_response.beta.threads.runs.update( thread_id=thread_id, run_id=run_id, **kwargs ) - return response + data = Run(**json.loads(response.text)) + data._headers = response.headers + + return data - def submit_tool_outputs(self, thread_id, tool_outputs, run_id, **kwargs) -> Any: - response = self.openai_client.beta.threads.runs.submit_tool_outputs( - thread_id=thread_id, run_id=run_id, tool_outputs=tool_outputs, **kwargs + def submit_tool_outputs(self, thread_id, tool_outputs, run_id, **kwargs) -> Run: + response = ( + self.openai_client.with_raw_response.beta.threads.runs.submit_tool_outputs( + thread_id=thread_id, run_id=run_id, tool_outputs=tool_outputs, **kwargs + ) ) - return response + data = Run(**json.loads(response.text)) + data._headers = response.headers + + return data - def cancel(self, thread_id, run_id, **kwargs) -> Any: - response = self.openai_client.beta.threads.runs.cancel( + def cancel(self, thread_id, run_id, **kwargs) -> Run: + response = self.openai_client.with_raw_response.beta.threads.runs.cancel( thread_id=thread_id, run_id=run_id, **kwargs ) - return response + data = Run(**json.loads(response.text)) + data._headers = response.headers + + return data class Steps(APIResource): @@ -135,17 +208,25 @@ def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client - def list(self, thread_id, run_id, **kwargs) -> Any: - reponse = self.openai_client.beta.threads.runs.steps.list( + def list(self, thread_id, run_id, **kwargs) -> RunStepList: + response = self.openai_client.with_raw_response.beta.threads.runs.steps.list( thread_id=thread_id, run_id=run_id, **kwargs ) - return reponse + data = RunStepList(**json.loads(response.text)) + data._headers = response.headers + + return data - def retrieve(self, thread_id, run_id, step_id, **kwargs) -> Any: - response = self.openai_client.beta.threads.runs.steps.retrieve( - thread_id=thread_id, run_id=run_id, step_id=step_id, **kwargs + def retrieve(self, thread_id, run_id, step_id, **kwargs) -> RunStep: + response = ( + self.openai_client.with_raw_response.beta.threads.runs.steps.retrieve( + thread_id=thread_id, run_id=run_id, step_id=step_id, **kwargs + ) ) - return response + data = RunStep(**json.loads(response.text)) + data._headers = response.headers + + return data class AsyncThreads(AsyncAPIResource): @@ -157,34 +238,52 @@ def __init__(self, client: AsyncPortkey) -> None: async def create( self, - ) -> Any: - response = await self.openai_client.beta.threads.create() - return response + ) -> Thread: + response = await self.openai_client.with_raw_response.beta.threads.create() + data = Thread(**json.loads(response.text)) + data._headers = response.headers - async def retrieve(self, thread_id, **kwargs) -> Any: - response = await self.openai_client.beta.threads.retrieve( + return data + + async def retrieve(self, thread_id, **kwargs) -> Thread: + response = await self.openai_client.with_raw_response.beta.threads.retrieve( thread_id=thread_id, **kwargs ) - return response + data = Thread(**json.loads(response.text)) + data._headers = response.headers + + return data - async def update(self, thread_id, **kwargs) -> Any: - response = await self.openai_client.beta.threads.update( + async def update(self, thread_id, **kwargs) -> Thread: + response = await self.openai_client.with_raw_response.beta.threads.update( thread_id=thread_id, **kwargs ) - return response + data = Thread(**json.loads(response.text)) + data._headers = response.headers + + return data async def delete( self, thread_id, - ) -> Any: - response = await self.openai_client.beta.threads.delete(thread_id=thread_id) - return response + ) -> ThreadDeleted: + response = await self.openai_client.with_raw_response.beta.threads.delete( + thread_id=thread_id + ) + data = ThreadDeleted(**json.loads(response.text)) + data._headers = response.headers - async def create_and_run(self, assistant_id, **kwargs) -> Any: - response = await self.openai_client.beta.threads.create_and_run( - assistant_id=assistant_id, **kwargs + return data + + async def create_and_run(self, assistant_id, **kwargs) -> Run: + response = ( + await self.openai_client.with_raw_response.beta.threads.create_and_run( + assistant_id=assistant_id, **kwargs + ) ) - return response + data = Run(**json.loads(response.text)) + data._headers = response.headers + return data class AsyncMessages(AsyncAPIResource): @@ -193,29 +292,46 @@ def __init__(self, client: AsyncPortkey) -> None: self.openai_client = client.openai_client self.files = AsyncThreadFiles(client) - async def create(self, thread_id, **kwargs) -> Any: - response = await self.openai_client.beta.threads.messages.create( - thread_id=thread_id, **kwargs + async def create(self, thread_id, **kwargs) -> ThreadMessage: + response = ( + await self.openai_client.with_raw_response.beta.threads.messages.create( + thread_id=thread_id, **kwargs + ) ) - return response + data = ThreadMessage(**json.loads(response.text)) + data._headers = response.headers - async def list(self, thread_id, **kwargs) -> Any: - response = await self.openai_client.beta.threads.messages.list( - thread_id=thread_id, **kwargs - ) - return response + return data - async def retrieve(self, thread_id, message_id, **kwargs) -> Any: - response = await self.openai_client.beta.threads.messages.retrieve( - thread_id=thread_id, message_id=message_id, **kwargs + async def list(self, thread_id, **kwargs) -> MessageList: + response = ( + await self.openai_client.with_raw_response.beta.threads.messages.list( + thread_id=thread_id, **kwargs + ) ) - return response - - async def update(self, thread_id, message_id, **kwargs) -> Any: - response = await self.openai_client.beta.threads.messages.update( - thread_id=thread_id, message_id=message_id, **kwargs + data = MessageList(**json.loads(response.text)) + data._headers = response.headers + return data + + async def retrieve(self, thread_id, message_id, **kwargs) -> ThreadMessage: + response = ( + await self.openai_client.with_raw_response.beta.threads.messages.retrieve( + thread_id=thread_id, message_id=message_id, **kwargs + ) ) - return response + data = ThreadMessage(**json.loads(response.text)) + data._headers = response.headers + return data + + async def update(self, thread_id, message_id, **kwargs) -> ThreadMessage: + response = ( + await self.openai_client.with_raw_response.beta.threads.messages.update( + thread_id=thread_id, message_id=message_id, **kwargs + ) + ) + data = ThreadMessage(**json.loads(response.text)) + data._headers = response.headers + return data class AsyncThreadFiles(AsyncAPIResource): @@ -223,17 +339,38 @@ def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - async def list(self, thread_id, message_id, **kwargs) -> Any: - response = await self.openai_client.beta.threads.messages.files.list( - thread_id=thread_id, message_id=message_id, **kwargs - ) - return response - - async def retrieve(self, thread_id, message_id, file_id, **kwargs) -> Any: - response = await self.openai_client.beta.threads.messages.files.retrieve( - thread_id=thread_id, message_id=message_id, file_id=file_id**kwargs + async def list(self, thread_id, message_id, **kwargs) -> MessageList: + response = ( + await self.openai_client.with_raw_response.beta.threads.messages.files.list( + thread_id=thread_id, message_id=message_id, **kwargs + ) ) - return response + data = MessageList(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def retrieve( + self, thread_id, message_id, file_id, **kwargs + ) -> ThreadMessageFileRetrieve: + # fmt: off + response = await self.openai_client\ + .with_raw_response\ + .beta\ + .threads\ + .messages\ + .files\ + .retrieve( + thread_id=thread_id, + message_id=message_id, + file_id=file_id, + **kwargs + ) + # fmt: off + data = ThreadMessageFileRetrieve(**json.loads( response.text)) + data._headers = response.headers + + return data class AsyncRuns(AsyncAPIResource): @@ -242,41 +379,73 @@ def __init__(self, client: AsyncPortkey) -> None: self.openai_client = client.openai_client self.steps = AsyncSteps(client) - async def create(self, **kwargs) -> Any: - response = await self.openai_client.beta.threads.runs.create(**kwargs) - return response + async def create(self, **kwargs) -> Run: + response = await self.openai_client.with_raw_response.beta.threads.runs.create( + **kwargs + ) + data = Run(**json.loads(response.text)) + data._headers = response.headers - async def retrieve(self, thread_id, run_id, **kwargs) -> Any: - response = await self.openai_client.beta.threads.runs.retrieve( - thread_id=thread_id, run_id=run_id, **kwargs + return data + + async def retrieve(self, thread_id, run_id, **kwargs) -> Run: + response = ( + await self.openai_client.with_raw_response.beta.threads.runs.retrieve( + thread_id=thread_id, run_id=run_id, **kwargs + ) ) - return response + data = Run(**json.loads(response.text)) + data._headers = response.headers + + return data - async def list(self, thread_id, **kwargs) -> Any: - response = await self.openai_client.beta.threads.runs.list( + async def list(self, thread_id, **kwargs) -> RunList: + response = await self.openai_client.with_raw_response.beta.threads.runs.list( thread_id=thread_id, **kwargs ) - return response + data = RunList(**json.loads(response.text)) + data._headers = response.headers + + return data - async def update(self, thread_id, run_id, **kwargs) -> Any: - response = await self.openai_client.beta.threads.runs.update( + async def update(self, thread_id, run_id, **kwargs) -> Run: + response = await self.openai_client.with_raw_response.beta.threads.runs.update( thread_id=thread_id, run_id=run_id, **kwargs ) - return response + data = Run(**json.loads(response.text)) + data._headers = response.headers + + return data async def submit_tool_outputs( self, thread_id, tool_outputs, run_id, **kwargs - ) -> Any: - response = await self.openai_client.beta.threads.runs.submit_tool_outputs( - thread_id=thread_id, run_id=run_id, tool_outputs=tool_outputs, **kwargs + ) -> Run: + # fmt: off + response = await self.openai_client\ + .with_raw_response\ + .beta\ + .threads\ + .runs\ + .submit_tool_outputs( + thread_id=thread_id, + run_id=run_id, + tool_outputs=tool_outputs, + **kwargs ) - return response + # fmt: on + data = Run(**json.loads(response.text)) + data._headers = response.headers + + return data - async def cancel(self, thread_id, run_id, **kwargs) -> Any: - response = await self.openai_client.beta.threads.runs.cancel( + async def cancel(self, thread_id, run_id, **kwargs) -> Run: + response = await self.openai_client.with_raw_response.beta.threads.runs.cancel( thread_id=thread_id, run_id=run_id, **kwargs ) - return response + data = Run(**json.loads(response.text)) + data._headers = response.headers + + return data class AsyncSteps(AsyncAPIResource): @@ -284,14 +453,24 @@ def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - async def list(self, thread_id, run_id, **kwargs) -> Any: - reponse = await self.openai_client.beta.threads.runs.steps.list( - thread_id=thread_id, run_id=run_id, **kwargs + async def list(self, thread_id, run_id, **kwargs) -> RunStepList: + response = ( + await self.openai_client.with_raw_response.beta.threads.runs.steps.list( + thread_id=thread_id, run_id=run_id, **kwargs + ) ) - return reponse + data = RunStepList(**json.loads(response.text)) + data._headers = response.headers + + return data - async def retrieve(self, thread_id, run_id, step_id, **kwargs) -> Any: - response = await self.openai_client.beta.threads.runs.steps.retrieve( - thread_id=thread_id, run_id=run_id, step_id=step_id, **kwargs + async def retrieve(self, thread_id, run_id, step_id, **kwargs) -> RunStep: + response = ( + await self.openai_client.with_raw_response.beta.threads.runs.steps.retrieve( + thread_id=thread_id, run_id=run_id, step_id=step_id, **kwargs + ) ) - return response + data = RunStep(**json.loads(response.text)) + data._headers = response.headers + + return data diff --git a/portkey_ai/api_resources/utils.py b/portkey_ai/api_resources/utils.py index a57fce2e..28b4e3dd 100644 --- a/portkey_ai/api_resources/utils.py +++ b/portkey_ai/api_resources/utils.py @@ -479,7 +479,7 @@ def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) -class FileObject(BaseModel): +class FileObject(BaseModel, extra="allow"): id: Optional[str] bytes: Optional[int] created_at: Optional[int] @@ -500,7 +500,7 @@ def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) -class FileList(BaseModel): +class FileList(BaseModel, extra="allow"): object: str data: List[Any] _headers: Optional[httpx.Headers] = None @@ -513,7 +513,7 @@ def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) -class FileDeleted(BaseModel): +class FileDeleted(BaseModel, extra="allow"): id: Optional[str] deleted: Optional[bool] object: Optional[Literal["file"]] @@ -527,7 +527,7 @@ def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) -class ModelDeleted(BaseModel): +class ModelDeleted(BaseModel, extra="allow"): id: str deleted: bool object: str @@ -541,7 +541,7 @@ def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) -class Model(BaseModel): +class Model(BaseModel, extra="allow"): id: str created: int object: Literal["model"] @@ -556,7 +556,7 @@ def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) -class ModelList(BaseModel): +class ModelList(BaseModel, extra="allow"): object: str data: List[Any] _headers: Optional[httpx.Headers] = None @@ -569,7 +569,7 @@ def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) -class Assistant(BaseModel): +class Assistant(BaseModel, extra="allow"): id: Optional[str] created_at: Optional[int] description: Optional[str] = None @@ -590,7 +590,7 @@ def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) -class AssistantList(BaseModel): +class AssistantList(BaseModel, extra="allow"): object: Optional[str] data: Optional[List[Any]] _headers: Optional[httpx.Headers] = None @@ -603,7 +603,7 @@ def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) -class AssistantDeleted(BaseModel): +class AssistantDeleted(BaseModel, extra="allow"): id: Optional[str] object: Optional[str] deleted: Optional[bool] @@ -737,7 +737,7 @@ def parse_headers(headers: Optional[httpx.Headers]) -> dict: return _headers -class AssistantFileDeleted(BaseModel): +class AssistantFileDeleted(BaseModel, extra="allow"): id: Optional[str] deleted: Optional[bool] object: Optional[str] @@ -751,7 +751,7 @@ def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) -class AssistantFile(BaseModel): +class AssistantFile(BaseModel, extra="allow"): id: Optional[str] assistant_id: Optional[str] created_at: Optional[int] @@ -766,7 +766,177 @@ def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) -class AssistantFileList(BaseModel): +class AssistantFileList(BaseModel, extra="allow"): + object: Optional[str] + data: Optional[List[Any]] + first_id: Optional[str] + last_id: Optional[str] + has_more: Optional[bool] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class Thread(BaseModel, extra="allow"): + id: Optional[str] + created_at: Optional[int] + metadata: Optional[object] = None + object: Optional[str] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class ThreadDeleted(BaseModel, extra="allow"): + id: Optional[str] + object: Optional[str] + deleted: Optional[bool] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class Run(BaseModel, extra="allow"): + id: Optional[str] + assistant_id: Optional[str] + cancelled_at: Optional[int] = None + completed_at: Optional[int] = None + created_at: Optional[int] + expires_at: Optional[int] + failed_at: Optional[int] = None + file_ids: Optional[List[str]] = None + instructions: Optional[str] + last_error: Optional[Any] = None + metadata: Optional[object] = None + model: Optional[str] + object: Optional[str] + required_action: Optional[str] = None + started_at: Optional[int] = None + status: Optional[str] + thread_id: Optional[str] + tools: Optional[List[Any]] = None + usage: Optional[Any] = None + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class ThreadMessage(BaseModel, extra="allow"): + id: Optional[str] + assistant_id: Optional[str] = None + content: Optional[List[Any]] + created_at: Optional[int] + file_ids: Optional[List[str]] + metadata: Optional[object] = None + object: Optional[str] + role: Optional[str] + run_id: Optional[str] = None + thread_id: Optional[str] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class MessageList(BaseModel, extra="allow"): + object: Optional[str] + data: Optional[List[Any]] + first_id: Optional[str] + last_id: Optional[str] + has_more: Optional[bool] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class ThreadMessageFileRetrieve(BaseModel, extra="allow"): + id: Optional[str] + object: Optional[str] + created_at: Optional[int] + message_id: Optional[str] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class RunList(BaseModel, extra="allow"): + object: Optional[str] + data: Optional[List[Any]] + first_id: Optional[str] + last_id: Optional[str] + has_more: Optional[bool] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class RunStep(BaseModel, extra="allow"): + id: Optional[str] + assistant_id: Optional[str] + cancelled_at: Optional[int] = None + completed_at: Optional[int] = None + created_at: Optional[int] + expired_at: Optional[int] = None + failed_at: Optional[int] = None + last_error: Optional[Any] = None + metadata: Optional[object] = None + object: Optional[str] + run_id: Optional[str] + status: Optional[str] + step_details: Optional[Any] + thread_id: Optional[str] + type: Optional[str] + usage: Optional[Any] = None + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class RunStepList(BaseModel, extra="allow"): object: Optional[str] data: Optional[List[Any]] first_id: Optional[str] From 42181382ccdb99f1619b28cb7e5408206bbe6a14 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Sat, 16 Mar 2024 15:45:45 +0530 Subject: [PATCH 27/62] fix: test cases for assistants and threads --- tests/test_assistants.py | 6 +++--- tests/test_threads.py | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/test_assistants.py b/tests/test_assistants.py index ef7711fc..16498c9c 100644 --- a/tests/test_assistants.py +++ b/tests/test_assistants.py @@ -74,15 +74,15 @@ def test_method_single_with_vk_and_provider( for i in get_configs(f"{CONFIGS_PATH}/single_provider"): t3_params.append((client, i)) - @pytest.mark.parametrize("client, config", t3_params) + @pytest.mark.parametrize("client, virtual_key", t3_params) def test_method_all_params( - self, client: Any, provider: str, auth: str, model + self, client: Any, auth: str, model, virtual_key:str ) -> None: metadata = self.get_metadata() portkey = client( base_url=base_url, api_key=api_key, - provider=f"{provider}", + virtual_key=virtual_key, Authorization=f"Bearer {auth}", trace_id=str(uuid4()), metadata=metadata, diff --git a/tests/test_threads.py b/tests/test_threads.py index a8a074f2..1e189bd0 100644 --- a/tests/test_threads.py +++ b/tests/test_threads.py @@ -42,15 +42,15 @@ def get_metadata(self): for i in get_configs(f"{CONFIGS_PATH}/single_with_basic_config"): t2_params.append((client, i)) - @pytest.mark.parametrize("client, config", t2_params) + @pytest.mark.parametrize("client, virtual_key", t2_params) def test_method_single_with_vk_and_provider( - self, client: Any, provider: str, auth: str, model + self, client: Any, auth: str, virtual_key:str ) -> None: metadata = self.get_metadata() portkey = client( base_url=base_url, api_key=api_key, - provider=f"{provider}", + virtual_key=virtual_key, Authorization=f"Bearer {auth}", trace_id=str(uuid4()), metadata=metadata, From f877ebe6cfa5a4fa20594ccbdecffd0a0d3c88bc Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Sat, 16 Mar 2024 15:50:05 +0530 Subject: [PATCH 28/62] fix: linting issues --- tests/test_assistants.py | 2 +- tests/test_threads.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_assistants.py b/tests/test_assistants.py index 16498c9c..660aff98 100644 --- a/tests/test_assistants.py +++ b/tests/test_assistants.py @@ -76,7 +76,7 @@ def test_method_single_with_vk_and_provider( @pytest.mark.parametrize("client, virtual_key", t3_params) def test_method_all_params( - self, client: Any, auth: str, model, virtual_key:str + self, client: Any, auth: str, model, virtual_key: str ) -> None: metadata = self.get_metadata() portkey = client( diff --git a/tests/test_threads.py b/tests/test_threads.py index 1e189bd0..4656bc55 100644 --- a/tests/test_threads.py +++ b/tests/test_threads.py @@ -44,7 +44,7 @@ def get_metadata(self): @pytest.mark.parametrize("client, virtual_key", t2_params) def test_method_single_with_vk_and_provider( - self, client: Any, auth: str, virtual_key:str + self, client: Any, auth: str, virtual_key: str ) -> None: metadata = self.get_metadata() portkey = client( From 2c768791b6838f1be5b9223e0cafacbd9258251f Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Sat, 16 Mar 2024 16:05:34 +0530 Subject: [PATCH 29/62] fix: openai specific test cases --- tests/test_assistants.py | 7 ++++--- tests/test_async_images.py | 7 ++++--- tests/test_images.py | 7 ++++--- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/tests/test_assistants.py b/tests/test_assistants.py index 660aff98..c1a1e13c 100644 --- a/tests/test_assistants.py +++ b/tests/test_assistants.py @@ -45,10 +45,11 @@ def get_metadata(self): t1_params = [] t = [] for k, v in models.items(): - for i in v["chat"]: - t.append((client, k, os.environ.get(v["env_variable"]), i)) + if k == "openai": + for i in v["chat"]: + t.append((client, k, os.environ.get(v["env_variable"]), i)) - t1_params.extend(t) + t1_params.extend(t) @pytest.mark.parametrize("client, provider, auth, model", t1_params) def test_method_single_with_vk_and_provider( diff --git a/tests/test_async_images.py b/tests/test_async_images.py index 8ef5b89a..c63d9fc0 100644 --- a/tests/test_async_images.py +++ b/tests/test_async_images.py @@ -46,10 +46,11 @@ def get_metadata(self): t1_params = [] t = [] for k, v in models.items(): - for i in v["image"]: - t.append((client, k, os.environ.get(v["env_variable"]), i)) + if k == "openai": + for i in v["image"]: + t.append((client, k, os.environ.get(v["env_variable"]), i)) - t1_params.extend(t) + t1_params.extend(t) @pytest.mark.asyncio @pytest.mark.parametrize("client, provider, auth, model", t1_params) diff --git a/tests/test_images.py b/tests/test_images.py index e3797ef8..01e25781 100644 --- a/tests/test_images.py +++ b/tests/test_images.py @@ -47,10 +47,11 @@ def get_metadata(self): t1_params = [] t = [] for k, v in models.items(): - for i in v["image"]: - t.append((client, k, os.environ.get(v["env_variable"]), i)) + if k == "openai": + for i in v["image"]: + t.append((client, k, os.environ.get(v["env_variable"]), i)) - t1_params.extend(t) + t1_params.extend(t) @pytest.mark.parametrize("client, provider, auth, model", t1_params) def test_method_single_with_vk_and_provider( From b8476be9b27f52573b3c5a88b4603ab92a5d32da Mon Sep 17 00:00:00 2001 From: visargD Date: Sat, 16 Mar 2024 17:13:46 +0530 Subject: [PATCH 30/62] chore: update models json with latest provider models --- tests/models.json | 69 +++++++++++++++++++---------------------------- 1 file changed, 28 insertions(+), 41 deletions(-) diff --git a/tests/models.json b/tests/models.json index f63a806e..c195a987 100644 --- a/tests/models.json +++ b/tests/models.json @@ -2,18 +2,17 @@ "openai": { "env_variable": "OPENAI_API_KEY", "chat": [ - "gpt-4-32k-0613", - "gpt-3.5-turbo-0613", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", "gpt-4-1106-preview", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-16k", + "gpt-4-vision-preview", "gpt-4", - "gpt-4-0314", + "gpt-4-0613", "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-3.5-turbo-0301", + "gpt-4-32k-0613", "gpt-3.5-turbo", - "gpt-4-0613" + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-1106" ], "text": [ "gpt-3.5-turbo-instruct" @@ -29,42 +28,32 @@ "meta-llama/Llama-2-7b-chat-hf", "meta-llama/Llama-2-13b-chat-hf", "meta-llama/Llama-2-70b-chat-hf", - "codellama/CodeLlama-34b-Instruct-hf", - "mistralai/Mistral-7B-Instruct-v0.1" + "mistralai/Mistral-7B-Instruct-v0.1", + "mistralai/Mixtral-8x7B-Instruct-v0.1" ], "text": [ "meta-llama/Llama-2-7b-chat-hf", "meta-llama/Llama-2-13b-chat-hf", "meta-llama/Llama-2-70b-chat-hf", - "codellama/CodeLlama-34b-Instruct-hf", - "mistralai/Mistral-7B-Instruct-v0.1" + "mistralai/Mistral-7B-Instruct-v0.1", + "mistralai/Mixtral-8x7B-Instruct-v0.1" ], "image":[] }, "anthropic": { "env_variable": "ANTHROPIC_API_KEY", "chat": [ - "claude-instant-1.2", - "claude-1.3", - "claude-1.2", - "claude-1.0", - "claude-instant-1.1", - "claude-instant-1.0" + "claude-3-opus-20240229", + "claude-3-sonnet-20240229", + "claude-3-haiku-20240307", + "claude-2.1", + "claude-2.0", + "claude-instant-1.2" ], "text": [ - "claude-instant-1.2", - "claude-1", - "claude-1-100k", - "claude-instant-1", - "claude-instant-1-100k", - "claude-1.3", - "claude-1.3-100k", - "claude-1.2", - "claude-1.0", - "claude-instant-1.1", - "claude-instant-1.1-100k", - "claude-instant-1.0", - "claude-2" + "claude-2.1", + "claude-2.0", + "claude-instant-1.2" ], "image":[] }, @@ -72,18 +61,16 @@ "env_variable": "COHERE_API_KEY", "chat": [ "command-light", + "command-light-nightly", + "command", + "command-nightly" + ], + "text": [ + "command-light", + "command-light-nightly", "command", - "base-light", - "base", - "embed-english-v2.0", - "embed-english-light-v2.0", - "embed-multilingual-v2.0", - "embed-english-v3.0", - "embed-english-light-v3.0", - "embed-multilingual-v3.0", - "embed-multilingual-light-v3.0" + "command-nightly" ], - "text": [], "image":[] } } \ No newline at end of file From 9ae35af7464a96d3a4deb791591d0b149db027f8 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Sat, 16 Mar 2024 18:17:28 +0530 Subject: [PATCH 31/62] feat: default models --- portkey_ai/api_resources/apis/chat_complete.py | 4 ++-- portkey_ai/api_resources/apis/complete.py | 4 ++-- portkey_ai/api_resources/apis/embeddings.py | 5 +++-- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/portkey_ai/api_resources/apis/chat_complete.py b/portkey_ai/api_resources/apis/chat_complete.py index 931a1ecf..9b72df1d 100644 --- a/portkey_ai/api_resources/apis/chat_complete.py +++ b/portkey_ai/api_resources/apis/chat_complete.py @@ -74,7 +74,7 @@ def normal_create(self, model, messages, **kwargs) -> ChatCompletions: def create( self, *, - model: Optional[str] = None, + model: Optional[str] = "portkey-default", messages: Iterable[Any], **kwargs, ) -> ChatCompletions: @@ -122,7 +122,7 @@ async def normal_create(self, model, messages, **kwargs) -> ChatCompletions: async def create( self, *, - model: Optional[str] = None, + model: Optional[str] = "portkey-default", messages: Iterable[Any], **kwargs, ) -> ChatCompletions: diff --git a/portkey_ai/api_resources/apis/complete.py b/portkey_ai/api_resources/apis/complete.py index 37aab280..911af7c9 100644 --- a/portkey_ai/api_resources/apis/complete.py +++ b/portkey_ai/api_resources/apis/complete.py @@ -46,7 +46,7 @@ def normal_create(self, model, prompt, **kwargs) -> TextCompletion: def create( self, *, - model: Optional[str] = None, + model: Optional[str] = "portkey-default", prompt: Optional[str] = None, **kwargs, ) -> TextCompletion: @@ -94,7 +94,7 @@ async def normal_create(self, model, prompt, **kwargs) -> TextCompletion: async def create( self, *, - model: Optional[str] = None, + model: Optional[str] = "portkey-default-model", prompt: Optional[str] = None, **kwargs, ) -> Any: diff --git a/portkey_ai/api_resources/apis/embeddings.py b/portkey_ai/api_resources/apis/embeddings.py index fa88977d..30d05f95 100644 --- a/portkey_ai/api_resources/apis/embeddings.py +++ b/portkey_ai/api_resources/apis/embeddings.py @@ -1,4 +1,5 @@ import json +from typing import Optional from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey from portkey_ai.api_resources.utils import GenericResponse @@ -9,7 +10,7 @@ def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client - def create(self, *, input: str, model: str, **kwargs) -> GenericResponse: + def create(self, *, input: str, model: Optional[str] = "portkey-default", **kwargs) -> GenericResponse: response = self.openai_client.with_raw_response.embeddings.create( input=input, model=model, **kwargs ) @@ -25,7 +26,7 @@ def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - async def create(self, *, input: str, model: str, **kwargs) -> GenericResponse: + async def create(self, *, input: str, model: Optional[str] = "portkey-default", **kwargs) -> GenericResponse: response = await self.openai_client.with_raw_response.embeddings.create( input=input, model=model, **kwargs ) From c84bdcf6315d91a7b11e82118cfaf92a2727c248 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Sat, 16 Mar 2024 18:20:21 +0530 Subject: [PATCH 32/62] fix: linting issues --- portkey_ai/api_resources/apis/embeddings.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/portkey_ai/api_resources/apis/embeddings.py b/portkey_ai/api_resources/apis/embeddings.py index 30d05f95..36f0f06f 100644 --- a/portkey_ai/api_resources/apis/embeddings.py +++ b/portkey_ai/api_resources/apis/embeddings.py @@ -10,9 +10,11 @@ def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client - def create(self, *, input: str, model: Optional[str] = "portkey-default", **kwargs) -> GenericResponse: + def create( + self, *, input: str, model: Optional[str] = "portkey-default", **kwargs + ) -> GenericResponse: response = self.openai_client.with_raw_response.embeddings.create( - input=input, model=model, **kwargs + input=input, model=model, **kwargs # type: ignore ) data = GenericResponse(**json.loads(response.text)) @@ -26,9 +28,11 @@ def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - async def create(self, *, input: str, model: Optional[str] = "portkey-default", **kwargs) -> GenericResponse: + async def create( + self, *, input: str, model: Optional[str] = "portkey-default", **kwargs + ) -> GenericResponse: response = await self.openai_client.with_raw_response.embeddings.create( - input=input, model=model, **kwargs + input=input, model=model, **kwargs # type: ignore ) data = GenericResponse(**json.loads(response.text)) data._headers = response.headers From 2c5fbead1b40ae7c463aba3d92a9a32a7789cb30 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Sat, 16 Mar 2024 18:24:13 +0530 Subject: [PATCH 33/62] fix: default model name typo --- portkey_ai/api_resources/apis/complete.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/portkey_ai/api_resources/apis/complete.py b/portkey_ai/api_resources/apis/complete.py index 911af7c9..0ab4cbb6 100644 --- a/portkey_ai/api_resources/apis/complete.py +++ b/portkey_ai/api_resources/apis/complete.py @@ -94,7 +94,7 @@ async def normal_create(self, model, prompt, **kwargs) -> TextCompletion: async def create( self, *, - model: Optional[str] = "portkey-default-model", + model: Optional[str] = "portkey-default", prompt: Optional[str] = None, **kwargs, ) -> Any: From 3aeef46b059a077813cbd502c74e0532f41e97c7 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Mon, 18 Mar 2024 13:29:21 +0530 Subject: [PATCH 34/62] fix: base_url --- portkey_ai/api_resources/client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/portkey_ai/api_resources/client.py b/portkey_ai/api_resources/client.py index de7ab879..c0d200c5 100644 --- a/portkey_ai/api_resources/client.py +++ b/portkey_ai/api_resources/client.py @@ -51,7 +51,7 @@ def __init__( self.openai_client = OpenAI( api_key=OPEN_AI_API_KEY, - base_url=PORTKEY_BASE_URL, + base_url=self.base_url, default_headers=self.allHeaders, ) @@ -136,7 +136,7 @@ def __init__( self.openai_client = AsyncOpenAI( api_key=OPEN_AI_API_KEY, - base_url=PORTKEY_BASE_URL, + base_url=self.base_url, default_headers=self.allHeaders, ) From 691f751ad069e3532b8e8f985f4f1ad2c5c54261 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Mon, 18 Mar 2024 13:30:12 +0530 Subject: [PATCH 35/62] fix: linting issue --- portkey_ai/api_resources/client.py | 1 - 1 file changed, 1 deletion(-) diff --git a/portkey_ai/api_resources/client.py b/portkey_ai/api_resources/client.py index c0d200c5..c60d5c86 100644 --- a/portkey_ai/api_resources/client.py +++ b/portkey_ai/api_resources/client.py @@ -7,7 +7,6 @@ from openai import AsyncOpenAI, OpenAI from portkey_ai.api_resources.global_constants import ( OPEN_AI_API_KEY, - PORTKEY_BASE_URL, ) From 9a4ff45ad580e8e86f8e8b030b387525bd8d7614 Mon Sep 17 00:00:00 2001 From: visargD Date: Mon, 18 Mar 2024 18:11:04 +0530 Subject: [PATCH 36/62] chore: allow 1.0 to 2.0 openai package version --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 3e32b70e..896df0b4 100644 --- a/setup.cfg +++ b/setup.cfg @@ -42,7 +42,7 @@ dev = python-dotenv==1.0.0 ruff==0.0.292 pytest-asyncio==0.23.5 - openai>=1.0.0,<=1.13.0 + openai>=1.0,<2.0 [mypy] ignore_missing_imports = true From 58bfc50e4ebd4a8143f4783a3e3aa9d95d752f16 Mon Sep 17 00:00:00 2001 From: visargD Date: Mon, 18 Mar 2024 18:14:36 +0530 Subject: [PATCH 37/62] chore: update openai test model list --- tests/models.json | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/models.json b/tests/models.json index c195a987..b8af6331 100644 --- a/tests/models.json +++ b/tests/models.json @@ -8,8 +8,6 @@ "gpt-4-vision-preview", "gpt-4", "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0613", "gpt-3.5-turbo", "gpt-3.5-turbo-0125", "gpt-3.5-turbo-1106" From 014a6dd1c234f666142a5e0b2f6ef35523af500a Mon Sep 17 00:00:00 2001 From: visargD Date: Mon, 18 Mar 2024 18:15:59 +0530 Subject: [PATCH 38/62] chore: update test case configs with override models and remove provider name --- .../single_provider/single_provider.json | 1 - .../anthropic_n_openai.json | 16 ++++++++++++---- .../anyscale_n_openai.json | 16 ++++++++++++---- .../loadbalance_and_fallback/azure_n_openai.json | 16 ++++++++++++---- .../cohere_n_openai.json | 15 ++++++++++++--- .../loadbalance_with_two_apikeys.json | 12 ++++++++---- .../single_provider/single_provider.json | 6 ++++-- .../single_provider_with_vk_retry_cache.json | 3 +++ .../single_with_basic_config.json | 5 ++++- .../anthropic_n_openai.json | 16 ++++++++++++---- .../anyscale_n_openai.json | 16 ++++++++++++---- .../loadbalance_and_fallback/azure_n_openai.json | 16 ++++++++++++---- .../cohere_n_openai.json | 15 ++++++++++++--- .../loadbalance_with_two_apikeys.json | 12 ++++++++---- .../single_provider/single_provider.json | 6 ++++-- .../single_provider_with_vk_retry_cache.json | 3 +++ .../single_with_basic_config.json | 5 ++++- .../images/single_provider/single_provider.json | 1 - .../threads/single_provider/single_provider.json | 1 - 19 files changed, 134 insertions(+), 47 deletions(-) diff --git a/tests/configs/assistants/single_provider/single_provider.json b/tests/configs/assistants/single_provider/single_provider.json index 7c4ed82a..9471258a 100644 --- a/tests/configs/assistants/single_provider/single_provider.json +++ b/tests/configs/assistants/single_provider/single_provider.json @@ -1,4 +1,3 @@ { - "provider": "openai", "virtual_key": "openai-virtual-key" } \ No newline at end of file diff --git a/tests/configs/chat_completions/loadbalance_and_fallback/anthropic_n_openai.json b/tests/configs/chat_completions/loadbalance_and_fallback/anthropic_n_openai.json index 2c5c4a25..a941fddb 100644 --- a/tests/configs/chat_completions/loadbalance_and_fallback/anthropic_n_openai.json +++ b/tests/configs/chat_completions/loadbalance_and_fallback/anthropic_n_openai.json @@ -4,8 +4,10 @@ }, "targets": [ { - "provider": "openai", - "virtual_key": "openai-virtual-key" + "virtual_key": "openai-virtual-key", + "override_params": { + "model": "gpt-3.5-turbo" + } }, { "strategy": { @@ -17,10 +19,16 @@ }, "targets": [ { - "virtual_key": "anthropic-virtual-key" + "virtual_key": "anthropic-virtual-key", + "override_params": { + "model": "claude-2.1" + } }, { - "virtual_key": "openai-virtual-key" + "virtual_key": "openai-virtual-key", + "override_params": { + "model": "gpt-3.5-turbo" + } } ] } diff --git a/tests/configs/chat_completions/loadbalance_and_fallback/anyscale_n_openai.json b/tests/configs/chat_completions/loadbalance_and_fallback/anyscale_n_openai.json index 2c90ddac..d4a1ca94 100644 --- a/tests/configs/chat_completions/loadbalance_and_fallback/anyscale_n_openai.json +++ b/tests/configs/chat_completions/loadbalance_and_fallback/anyscale_n_openai.json @@ -4,8 +4,10 @@ }, "targets": [ { - "provider": "openai", - "virtual_key": "openai-virtual-key" + "virtual_key": "openai-virtual-key", + "override_params": { + "model": "gpt-3.5-turbo" + } }, { "strategy": { @@ -17,10 +19,16 @@ }, "targets": [ { - "virtual_key": "anyscale-virtual-key" + "virtual_key": "anyscale-virtual-key", + "override_params": { + "model": "mistralai/Mistral-7B-Instruct-v0.1" + } }, { - "virtual_key": "openai-virtual-key" + "virtual_key": "openai-virtual-key", + "override_params": { + "model": "gpt-3.5-turbo" + } } ] } diff --git a/tests/configs/chat_completions/loadbalance_and_fallback/azure_n_openai.json b/tests/configs/chat_completions/loadbalance_and_fallback/azure_n_openai.json index 440c2591..3a6d1c70 100644 --- a/tests/configs/chat_completions/loadbalance_and_fallback/azure_n_openai.json +++ b/tests/configs/chat_completions/loadbalance_and_fallback/azure_n_openai.json @@ -4,8 +4,10 @@ }, "targets": [ { - "provider": "openai", - "virtual_key": "openai-virtual-key" + "virtual_key": "openai-virtual-key", + "override_params": { + "model": "gpt-3.5-turbo" + } }, { "strategy": { @@ -17,10 +19,16 @@ }, "targets": [ { - "virtual_key": "azure-virtual-key" + "virtual_key": "azure-virtual-key", + "override_params": { + "model": "gpt-35-turbo" + } }, { - "virtual_key": "openai-virtual-key" + "virtual_key": "openai-virtual-key", + "override_params": { + "model": "gpt-3.5-turbo" + } } ] } diff --git a/tests/configs/chat_completions/loadbalance_and_fallback/cohere_n_openai.json b/tests/configs/chat_completions/loadbalance_and_fallback/cohere_n_openai.json index 1e697928..28bfae9d 100644 --- a/tests/configs/chat_completions/loadbalance_and_fallback/cohere_n_openai.json +++ b/tests/configs/chat_completions/loadbalance_and_fallback/cohere_n_openai.json @@ -4,7 +4,10 @@ }, "targets": [ { - "virtual_key": "openai-virtual-key" + "virtual_key": "openai-virtual-key", + "override_params": { + "model": "gpt-3.5-turbo" + } }, { "strategy": { @@ -16,10 +19,16 @@ }, "targets": [ { - "virtual_key": "cohere-virtual-key" + "virtual_key": "cohere-virtual-key", + "override_params": { + "model": "command" + } }, { - "virtual_key": "openai-virtual-key" + "virtual_key": "openai-virtual-key", + "override_params": { + "model": "gpt-3.5-turbo" + } } ] } diff --git a/tests/configs/chat_completions/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json b/tests/configs/chat_completions/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json index 06973872..08dbed81 100644 --- a/tests/configs/chat_completions/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json +++ b/tests/configs/chat_completions/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json @@ -4,12 +4,16 @@ }, "targets": [ { - "provider": "openai", - "virtual_key": "openai-virtual-key" + "virtual_key": "openai-virtual-key", + "override_params": { + "model": "gpt-3.5-turbo" + } }, { - "provider": "anthropic", - "virtual_key": "anthropic-virtual-key" + "virtual_key": "anthropic-virtual-key", + "override_params": { + "model": "claude-2.1" + } } ] } \ No newline at end of file diff --git a/tests/configs/chat_completions/single_provider/single_provider.json b/tests/configs/chat_completions/single_provider/single_provider.json index 7c4ed82a..155422e4 100644 --- a/tests/configs/chat_completions/single_provider/single_provider.json +++ b/tests/configs/chat_completions/single_provider/single_provider.json @@ -1,4 +1,6 @@ { - "provider": "openai", - "virtual_key": "openai-virtual-key" + "virtual_key": "openai-virtual-key", + "override_params": { + "model": "gpt-3.5-turbo" + } } \ No newline at end of file diff --git a/tests/configs/chat_completions/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json b/tests/configs/chat_completions/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json index 52281ce7..5adda115 100644 --- a/tests/configs/chat_completions/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json +++ b/tests/configs/chat_completions/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json @@ -1,5 +1,8 @@ { "virtual_key": "openai-virtual-key", + "override_params": { + "model": "gpt-3.5-turbo" + }, "cache": { "mode": "semantic", "max_age": 60 diff --git a/tests/configs/chat_completions/single_with_basic_config/single_with_basic_config.json b/tests/configs/chat_completions/single_with_basic_config/single_with_basic_config.json index 9471258a..155422e4 100644 --- a/tests/configs/chat_completions/single_with_basic_config/single_with_basic_config.json +++ b/tests/configs/chat_completions/single_with_basic_config/single_with_basic_config.json @@ -1,3 +1,6 @@ { - "virtual_key": "openai-virtual-key" + "virtual_key": "openai-virtual-key", + "override_params": { + "model": "gpt-3.5-turbo" + } } \ No newline at end of file diff --git a/tests/configs/completions/loadbalance_and_fallback/anthropic_n_openai.json b/tests/configs/completions/loadbalance_and_fallback/anthropic_n_openai.json index 2c5c4a25..0c6b973c 100644 --- a/tests/configs/completions/loadbalance_and_fallback/anthropic_n_openai.json +++ b/tests/configs/completions/loadbalance_and_fallback/anthropic_n_openai.json @@ -4,8 +4,10 @@ }, "targets": [ { - "provider": "openai", - "virtual_key": "openai-virtual-key" + "virtual_key": "openai-virtual-key", + "override_params": { + "model": "gpt-3.5-turbo-instruct" + } }, { "strategy": { @@ -17,10 +19,16 @@ }, "targets": [ { - "virtual_key": "anthropic-virtual-key" + "virtual_key": "anthropic-virtual-key", + "override_params": { + "model": "claude-2.1" + } }, { - "virtual_key": "openai-virtual-key" + "virtual_key": "openai-virtual-key", + "override_params": { + "model": "gpt-3.5-turbo-instruct" + } } ] } diff --git a/tests/configs/completions/loadbalance_and_fallback/anyscale_n_openai.json b/tests/configs/completions/loadbalance_and_fallback/anyscale_n_openai.json index 2c90ddac..38c4e5cb 100644 --- a/tests/configs/completions/loadbalance_and_fallback/anyscale_n_openai.json +++ b/tests/configs/completions/loadbalance_and_fallback/anyscale_n_openai.json @@ -4,8 +4,10 @@ }, "targets": [ { - "provider": "openai", - "virtual_key": "openai-virtual-key" + "virtual_key": "openai-virtual-key", + "override_params": { + "model": "gpt-3.5-turbo-instruct" + } }, { "strategy": { @@ -17,10 +19,16 @@ }, "targets": [ { - "virtual_key": "anyscale-virtual-key" + "virtual_key": "anyscale-virtual-key", + "override_params": { + "model": "meta-llama/Llama-2-7b-chat-hf" + } }, { - "virtual_key": "openai-virtual-key" + "virtual_key": "openai-virtual-key", + "override_params": { + "model": "gpt-3.5-turbo-instruct" + } } ] } diff --git a/tests/configs/completions/loadbalance_and_fallback/azure_n_openai.json b/tests/configs/completions/loadbalance_and_fallback/azure_n_openai.json index 440c2591..64fa75b2 100644 --- a/tests/configs/completions/loadbalance_and_fallback/azure_n_openai.json +++ b/tests/configs/completions/loadbalance_and_fallback/azure_n_openai.json @@ -4,8 +4,10 @@ }, "targets": [ { - "provider": "openai", - "virtual_key": "openai-virtual-key" + "virtual_key": "openai-virtual-key", + "override_params": { + "model": "gpt-3.5-turbo-instruct" + } }, { "strategy": { @@ -17,10 +19,16 @@ }, "targets": [ { - "virtual_key": "azure-virtual-key" + "virtual_key": "azure-virtual-key", + "override_params": { + "model": "gpt-3.5-turbo-instruct" + } }, { - "virtual_key": "openai-virtual-key" + "virtual_key": "openai-virtual-key", + "override_params": { + "model": "gpt-3.5-turbo-instruct" + } } ] } diff --git a/tests/configs/completions/loadbalance_and_fallback/cohere_n_openai.json b/tests/configs/completions/loadbalance_and_fallback/cohere_n_openai.json index 1e697928..91b2d8bc 100644 --- a/tests/configs/completions/loadbalance_and_fallback/cohere_n_openai.json +++ b/tests/configs/completions/loadbalance_and_fallback/cohere_n_openai.json @@ -4,7 +4,10 @@ }, "targets": [ { - "virtual_key": "openai-virtual-key" + "virtual_key": "openai-virtual-key", + "override_params": { + "model": "gpt-3.5-turbo-instruct" + } }, { "strategy": { @@ -16,10 +19,16 @@ }, "targets": [ { - "virtual_key": "cohere-virtual-key" + "virtual_key": "cohere-virtual-key", + "override_params": { + "model": "command" + } }, { - "virtual_key": "openai-virtual-key" + "virtual_key": "openai-virtual-key", + "override_params": { + "model": "gpt-3.5-turbo-instruct" + } } ] } diff --git a/tests/configs/completions/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json b/tests/configs/completions/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json index 06973872..dc5a8453 100644 --- a/tests/configs/completions/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json +++ b/tests/configs/completions/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json @@ -4,12 +4,16 @@ }, "targets": [ { - "provider": "openai", - "virtual_key": "openai-virtual-key" + "virtual_key": "openai-virtual-key", + "override_params": { + "model": "gpt-3.5-turbo-instruct" + } }, { - "provider": "anthropic", - "virtual_key": "anthropic-virtual-key" + "virtual_key": "anthropic-virtual-key", + "override_params": { + "model": "claude-2.1" + } } ] } \ No newline at end of file diff --git a/tests/configs/completions/single_provider/single_provider.json b/tests/configs/completions/single_provider/single_provider.json index 7c4ed82a..26a6f0d8 100644 --- a/tests/configs/completions/single_provider/single_provider.json +++ b/tests/configs/completions/single_provider/single_provider.json @@ -1,4 +1,6 @@ { - "provider": "openai", - "virtual_key": "openai-virtual-key" + "virtual_key": "openai-virtual-key", + "override_params": { + "model": "gpt-3.5-turbo-instruct" + } } \ No newline at end of file diff --git a/tests/configs/completions/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json b/tests/configs/completions/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json index 52281ce7..d25999d1 100644 --- a/tests/configs/completions/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json +++ b/tests/configs/completions/single_provider_with_vk_retry_cache/single_provider_with_vk_retry_cache.json @@ -1,5 +1,8 @@ { "virtual_key": "openai-virtual-key", + "override_params": { + "model": "gpt-3.5-turbo-instruct" + }, "cache": { "mode": "semantic", "max_age": 60 diff --git a/tests/configs/completions/single_with_basic_config/single_with_basic_config.json b/tests/configs/completions/single_with_basic_config/single_with_basic_config.json index 9471258a..26a6f0d8 100644 --- a/tests/configs/completions/single_with_basic_config/single_with_basic_config.json +++ b/tests/configs/completions/single_with_basic_config/single_with_basic_config.json @@ -1,3 +1,6 @@ { - "virtual_key": "openai-virtual-key" + "virtual_key": "openai-virtual-key", + "override_params": { + "model": "gpt-3.5-turbo-instruct" + } } \ No newline at end of file diff --git a/tests/configs/images/single_provider/single_provider.json b/tests/configs/images/single_provider/single_provider.json index 7c4ed82a..9471258a 100644 --- a/tests/configs/images/single_provider/single_provider.json +++ b/tests/configs/images/single_provider/single_provider.json @@ -1,4 +1,3 @@ { - "provider": "openai", "virtual_key": "openai-virtual-key" } \ No newline at end of file diff --git a/tests/configs/threads/single_provider/single_provider.json b/tests/configs/threads/single_provider/single_provider.json index 7c4ed82a..9471258a 100644 --- a/tests/configs/threads/single_provider/single_provider.json +++ b/tests/configs/threads/single_provider/single_provider.json @@ -1,4 +1,3 @@ { - "provider": "openai", "virtual_key": "openai-virtual-key" } \ No newline at end of file From 16bbae69f17deb33f538d7be479d1ebfc2b9d076 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Mon, 18 Mar 2024 18:23:53 +0530 Subject: [PATCH 39/62] fix: Bearer in authorization headers --- portkey_ai/api_resources/apis/create_headers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/portkey_ai/api_resources/apis/create_headers.py b/portkey_ai/api_resources/apis/create_headers.py index 84a682f9..47105475 100644 --- a/portkey_ai/api_resources/apis/create_headers.py +++ b/portkey_ai/api_resources/apis/create_headers.py @@ -21,7 +21,7 @@ def json(self) -> Mapping: if k.lower() != "authorization": headers[get_portkey_header(k)] = str(v) else: - headers[k] = str(v) + headers[k] = str('Bearer ' + v) return headers From be24c0f6a1b754a9de06ad5ffa64c0c2a53288a9 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Mon, 18 Mar 2024 18:47:20 +0530 Subject: [PATCH 40/62] fix: linting issues --- portkey_ai/api_resources/apis/create_headers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/portkey_ai/api_resources/apis/create_headers.py b/portkey_ai/api_resources/apis/create_headers.py index 47105475..12f51021 100644 --- a/portkey_ai/api_resources/apis/create_headers.py +++ b/portkey_ai/api_resources/apis/create_headers.py @@ -21,7 +21,7 @@ def json(self) -> Mapping: if k.lower() != "authorization": headers[get_portkey_header(k)] = str(v) else: - headers[k] = str('Bearer ' + v) + headers[k] = str("Bearer " + v) return headers From 0e526cf61632ee13cb0bde036c8a5707f4f91887 Mon Sep 17 00:00:00 2001 From: visargD Date: Tue, 19 Mar 2024 17:36:35 +0530 Subject: [PATCH 41/62] chore: add stream chunk test util functions --- tests/utils.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tests/utils.py b/tests/utils.py index 1f9484b1..37a532b6 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -4,3 +4,13 @@ def read_json_file(path: str) -> Dict[str, Any]: return json.load(open(path, "r")) + +def check_chat_streaming_chunk(chunk) -> bool: + stop_reason = chunk.choices[0].finish_reason + if type(stop_reason) is str: + return chunk.choices[0].delta == {} + else: + return type(chunk.choices[0].delta.content) is str + +def check_text_streaming_chunk(chunk) -> bool: + return type(chunk.choices[0].text) is str From 617574bc9b8324430e0fcdaf83ee5595035749c9 Mon Sep 17 00:00:00 2001 From: visargD Date: Tue, 19 Mar 2024 17:37:54 +0530 Subject: [PATCH 42/62] chore: remove bearer from test cases authorization param --- tests/test_assistants.py | 4 ++-- tests/test_async_images.py | 2 +- tests/test_images.py | 2 +- tests/test_threads.py | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/test_assistants.py b/tests/test_assistants.py index c1a1e13c..aebdd20a 100644 --- a/tests/test_assistants.py +++ b/tests/test_assistants.py @@ -59,7 +59,7 @@ def test_method_single_with_vk_and_provider( base_url=base_url, api_key=api_key, provider=f"{provider}", - Authorization=f"Bearer {auth}", + Authorization=f"{auth}", trace_id=str(uuid4()), metadata=self.get_metadata(), ) @@ -84,7 +84,7 @@ def test_method_all_params( base_url=base_url, api_key=api_key, virtual_key=virtual_key, - Authorization=f"Bearer {auth}", + Authorization=f"{auth}", trace_id=str(uuid4()), metadata=metadata, ) diff --git a/tests/test_async_images.py b/tests/test_async_images.py index c63d9fc0..2680b84b 100644 --- a/tests/test_async_images.py +++ b/tests/test_async_images.py @@ -61,7 +61,7 @@ async def test_method_single_with_vk_and_provider( base_url=base_url, api_key=api_key, provider=f"{provider}", - Authorization=f"Bearer {auth}", + Authorization=f"{auth}", trace_id=str(uuid4()), metadata=self.get_metadata(), ) diff --git a/tests/test_images.py b/tests/test_images.py index 01e25781..13ae5886 100644 --- a/tests/test_images.py +++ b/tests/test_images.py @@ -61,7 +61,7 @@ def test_method_single_with_vk_and_provider( base_url=base_url, api_key=api_key, provider=f"{provider}", - Authorization=f"Bearer {auth}", + Authorization=f"{auth}", trace_id=str(uuid4()), metadata=self.get_metadata(), ) diff --git a/tests/test_threads.py b/tests/test_threads.py index 4656bc55..e5bee08d 100644 --- a/tests/test_threads.py +++ b/tests/test_threads.py @@ -51,7 +51,7 @@ def test_method_single_with_vk_and_provider( base_url=base_url, api_key=api_key, virtual_key=virtual_key, - Authorization=f"Bearer {auth}", + Authorization=f"{auth}", trace_id=str(uuid4()), metadata=metadata, ) From aac9ad771ad19a0b680c05fdae9a6992ecd8aeb9 Mon Sep 17 00:00:00 2001 From: visargD Date: Wed, 20 Mar 2024 12:38:20 +0530 Subject: [PATCH 43/62] chore: formatting changes --- tests/utils.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/utils.py b/tests/utils.py index 37a532b6..372a9b05 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -5,12 +5,14 @@ def read_json_file(path: str) -> Dict[str, Any]: return json.load(open(path, "r")) + def check_chat_streaming_chunk(chunk) -> bool: stop_reason = chunk.choices[0].finish_reason if type(stop_reason) is str: return chunk.choices[0].delta == {} else: return type(chunk.choices[0].delta.content) is str - + + def check_text_streaming_chunk(chunk) -> bool: return type(chunk.choices[0].text) is str From 6b96afcad83f2fb6b41b645c237e6db32c174f0f Mon Sep 17 00:00:00 2001 From: visargD Date: Wed, 20 Mar 2024 12:39:47 +0530 Subject: [PATCH 44/62] chore: add strict test cases to check response values and types --- tests/test_assistants.py | 93 ++++++++++++++++++++----------- tests/test_async_chat_complete.py | 54 ++++++++++-------- tests/test_async_complete.py | 60 ++++++++++++-------- tests/test_async_images.py | 21 ++++--- tests/test_chat_complete.py | 54 ++++++++++-------- tests/test_complete.py | 58 +++++++++++-------- tests/test_images.py | 20 ++++--- tests/test_threads.py | 33 +++++++++-- 8 files changed, 247 insertions(+), 146 deletions(-) diff --git a/tests/test_assistants.py b/tests/test_assistants.py index aebdd20a..295ac91a 100644 --- a/tests/test_assistants.py +++ b/tests/test_assistants.py @@ -39,55 +39,55 @@ def get_metadata(self): "random_id": str(uuid4()), } - # -------------------------- - # Test-1 - - t1_params = [] - t = [] - for k, v in models.items(): - if k == "openai": - for i in v["chat"]: - t.append((client, k, os.environ.get(v["env_variable"]), i)) - - t1_params.extend(t) - - @pytest.mark.parametrize("client, provider, auth, model", t1_params) - def test_method_single_with_vk_and_provider( - self, client: Any, provider: str, auth: str, model - ) -> None: - portkey = client( - base_url=base_url, - api_key=api_key, - provider=f"{provider}", - Authorization=f"{auth}", - trace_id=str(uuid4()), - metadata=self.get_metadata(), - ) - assistant = portkey.beta.assistants.create( - model=model, - ) - print(assistant) + # # -------------------------- + # # Test-1 + + # t1_params = [] + # t = [] + # for k, v in models.items(): + # if k == "openai": + # for i in v["chat"]: + # if "vision" not in i: + # t.append((client, k, os.environ.get(v["env_variable"]), i)) + + # t1_params.extend(t) + + # @pytest.mark.parametrize("client, provider, auth, model", t1_params) + # def test_method_single_with_vk_and_provider( + # self, client: Any, provider: str, auth: str, model + # ) -> None: + # portkey = client( + # base_url=base_url, + # api_key=api_key, + # provider=f"{provider}", + # Authorization=f"{auth}", + # trace_id=str(uuid4()), + # metadata=self.get_metadata(), + # ) + # assistant = portkey.beta.assistants.create( + # model=model, + # ) + # print(assistant) # -------------------------- # Test-3 t3_params = [] for i in get_configs(f"{CONFIGS_PATH}/single_provider"): - t3_params.append((client, i)) + t3_params.append((client, i["virtual_key"])) @pytest.mark.parametrize("client, virtual_key", t3_params) - def test_method_all_params( - self, client: Any, auth: str, model, virtual_key: str - ) -> None: + def test_method_all_params(self, client: Any, virtual_key: str) -> None: metadata = self.get_metadata() + model = "gpt-4" portkey = client( base_url=base_url, api_key=api_key, virtual_key=virtual_key, - Authorization=f"{auth}", trace_id=str(uuid4()), metadata=metadata, ) + assistant = portkey.beta.assistants.create( model=model, description="string", @@ -98,4 +98,29 @@ def test_method_all_params( name="Math Tutor", tools=[{"type": "code_interpreter"}], ) - print(assistant) + + assert type(assistant.id) is str + assert assistant.object == "assistant" + assert assistant.model == model + assert assistant.name == "Math Tutor" + assert assistant.tools[0].type == "code_interpreter" + + update_assistant = portkey.beta.assistants.update( + assistant.id, description="updated string" + ) + + assert update_assistant.description == "updated string" + + retrieve_assistant = portkey.beta.assistants.retrieve(assistant.id) + + assert retrieve_assistant.id == assistant.id + assert retrieve_assistant.object == "assistant" + assert retrieve_assistant.model == model + assert retrieve_assistant.name == "Math Tutor" + assert retrieve_assistant.tools[0].type == "code_interpreter" + + delete_assistant = portkey.beta.assistants.delete(assistant.id) + + assert delete_assistant.id == assistant.id + assert delete_assistant.object == "assistant.deleted" + assert delete_assistant.deleted == True diff --git a/tests/test_async_chat_complete.py b/tests/test_async_chat_complete.py index 7ed67393..ac82cdd4 100644 --- a/tests/test_async_chat_complete.py +++ b/tests/test_async_chat_complete.py @@ -9,7 +9,7 @@ from portkey_ai import AsyncPortkey from time import sleep from dotenv import load_dotenv -from .utils import read_json_file +from .utils import read_json_file, check_chat_streaming_chunk load_dotenv(override=True) @@ -59,17 +59,19 @@ async def test_method_single_with_vk_and_provider( base_url=base_url, api_key=api_key, provider=f"{provider}", - Authorization=f"Bearer {auth}", + Authorization=f"{auth}", trace_id=str(uuid4()), metadata=self.get_metadata(), ) - await portkey.chat.completions.create( + completion = await portkey.chat.completions.create( messages=[{"role": "user", "content": "Say this is a test"}], model=model, max_tokens=245, ) + assert type(completion.choices[0].message.content) is str + # -------------------------- # Test -2 t2_params = [] @@ -113,15 +115,12 @@ async def test_method_single_with_basic_config( config=config, ) - await portkey.chat.completions.create( + completion = await portkey.chat.completions.create( messages=[{"role": "user", "content": "Say this is a test"}], model="gpt-3.5-turbo", ) - # print(completion.choices) - # assert("True", "True") - - # assert_matches_type(TextCompletion, completion, path=["response"]) + assert type(completion.choices[0].message.content) is str # -------------------------- # Test-3 @@ -163,10 +162,11 @@ async def test_method_single_provider_with_vk_retry_cache( config=config, ) - portkey_2.chat.completions.create( + cached_completion = portkey_2.chat.completions.create( messages=[{"role": "user", "content": "Say this is a test"}], model="gpt-3.5-turbo", ) + assert type(cached_completion.choices[0].message.content) is str # -------------------------- # Test-4 @@ -192,7 +192,7 @@ async def test_method_loadbalance_with_two_apikeys( messages=[{"role": "user", "content": "Say this is a test"}], max_tokens=245 ) - print(completion.choices) + assert type(completion.choices[0].message.content) is str # -------------------------- # Test-5 @@ -221,7 +221,7 @@ async def test_method_loadbalance_and_fallback( ], ) - print(completion.choices) + assert type(completion.choices[0].message.content) is str # -------------------------- # Test-6 @@ -244,7 +244,7 @@ async def test_method_single_provider(self, client: Any, config: Dict) -> None: model="gpt-3.5-turbo", ) - print(completion.choices) + assert type(completion.choices[0].message.content) is str class TestChatCompletionsStreaming: @@ -278,18 +278,21 @@ async def test_method_single_with_vk_and_provider( base_url=base_url, api_key=api_key, provider=f"{provider}", - Authorization=f"Bearer {auth}", + Authorization=f"{auth}", trace_id=str(uuid4()), metadata=self.get_metadata(), ) - await portkey.chat.completions.create( + completion = await portkey.chat.completions.create( messages=[{"role": "user", "content": "Say this is a test"}], model=model, max_tokens=245, stream=True, ) + async for chunk in completion: + assert check_chat_streaming_chunk(chunk) == True + # -------------------------- # Test -2 t2_params = [] @@ -333,16 +336,14 @@ async def test_method_single_with_basic_config( config=config, ) - await portkey.chat.completions.create( + completion = await portkey.chat.completions.create( messages=[{"role": "user", "content": "Say this is a test"}], model="gpt-3.5-turbo", stream=True, ) - # print(completion.choices) - # assert("True", "True") - - # assert_matches_type(TextCompletion, completion, path=["response"]) + async for chunk in completion: + assert check_chat_streaming_chunk(chunk) == True # -------------------------- # Test-3 @@ -385,12 +386,15 @@ async def test_method_single_provider_with_vk_retry_cache( config=config, ) - portkey_2.chat.completions.create( + cached_completion = portkey_2.chat.completions.create( messages=[{"role": "user", "content": "Say this is a test"}], model="gpt-3.5-turbo", stream=True, ) + async for chunk in cached_completion: + assert check_chat_streaming_chunk(chunk) == True + # -------------------------- # Test-4 t4_params = [] @@ -417,7 +421,8 @@ async def test_method_loadbalance_with_two_apikeys( stream=True, ) - print(completion) + async for chunk in completion: + assert check_chat_streaming_chunk(chunk) == True # -------------------------- # Test-5 @@ -446,8 +451,8 @@ async def test_method_loadbalance_and_fallback( ], stream=True, ) - - print(completion) + async for chunk in completion: + assert check_chat_streaming_chunk(chunk) == True # -------------------------- # Test-6 @@ -471,4 +476,5 @@ async def test_method_single_provider(self, client: Any, config: Dict) -> None: stream=True, ) - print(completion) + async for chunk in completion: + assert check_chat_streaming_chunk(chunk) == True diff --git a/tests/test_async_complete.py b/tests/test_async_complete.py index 0a64e9bb..6a2471d0 100644 --- a/tests/test_async_complete.py +++ b/tests/test_async_complete.py @@ -9,7 +9,7 @@ from portkey_ai import AsyncPortkey from time import sleep from dotenv import load_dotenv -from .utils import read_json_file +from .utils import read_json_file, check_text_streaming_chunk load_dotenv(override=True) @@ -59,17 +59,19 @@ async def test_method_single_with_vk_and_provider( base_url=base_url, api_key=api_key, provider=f"{provider}", - Authorization=f"Bearer {auth}", + Authorization=f"{auth}", trace_id=str(uuid4()), metadata=self.get_metadata(), ) - await portkey.completions.create( + completion = await portkey.completions.create( prompt="Say this is a test", model=model, max_tokens=245, ) + assert type(completion.choices[0].text) is str + # -------------------------- # Test -2 t2_params = [] @@ -112,14 +114,11 @@ async def test_method_single_with_basic_config( config=config, ) - await portkey.completions.create( + completion = await portkey.completions.create( prompt="Say this is a test", ) - # print(completion.choices) - # assert("True", "True") - - # assert_matches_type(TextCompletion, completion, path=["response"]) + assert type(completion.choices[0].text) is str # -------------------------- # Test-3 @@ -160,7 +159,11 @@ async def test_method_single_provider_with_vk_retry_cache( config=config, ) - await portkey_2.completions.create(prompt="Say this is a test") + cached_completion = await portkey_2.completions.create( + prompt="Say this is a test" + ) + + assert type(cached_completion.choices[0].text) is str # -------------------------- # Test-4 @@ -186,7 +189,7 @@ async def test_method_loadbalance_with_two_apikeys( prompt="Say this is a test", max_tokens=245 ) - print(completion.choices) + assert type(completion.choices[0].text) is str # -------------------------- # Test-5 @@ -210,7 +213,7 @@ async def test_method_loadbalance_and_fallback( prompt="Say this is just a loadbalance and fallback test test" ) - print(completion.choices) + assert type(completion.choices[0].text) is str # -------------------------- # Test-6 @@ -232,7 +235,7 @@ async def test_method_single_provider(self, client: Any, config: Dict) -> None: prompt="Say this is a test", ) - print(completion.choices) + assert type(completion.choices[0].text) is str class TestChatCompletionsStreaming: @@ -266,15 +269,18 @@ async def test_method_single_with_vk_and_provider( base_url=base_url, api_key=api_key, provider=f"{provider}", - Authorization=f"Bearer {auth}", + Authorization=f"{auth}", trace_id=str(uuid4()), metadata=self.get_metadata(), ) - await portkey.completions.create( + completion = await portkey.completions.create( prompt="Say this is a test", model=model, max_tokens=245, stream=True ) + async for chunk in completion: + assert check_text_streaming_chunk(chunk) == True + # -------------------------- # Test -2 t2_params = [] @@ -317,12 +323,12 @@ async def test_method_single_with_basic_config( config=config, ) - await portkey.completions.create(prompt="Say this is a test", stream=True) - - # print(completion.choices) - # assert("True", "True") + completion = await portkey.completions.create( + prompt="Say this is a test", stream=True + ) - # assert_matches_type(TextCompletion, completion, path=["response"]) + async for chunk in completion: + assert check_text_streaming_chunk(chunk) == True # -------------------------- # Test-3 @@ -361,7 +367,12 @@ async def test_method_single_provider_with_vk_retry_cache( config=config, ) - await portkey_2.completions.create(prompt="Say this is a test", stream=True) + cached_completion = await portkey_2.completions.create( + prompt="Say this is a test", stream=True + ) + + async for chunk in cached_completion: + assert check_text_streaming_chunk(chunk) == True # -------------------------- # Test-4 @@ -387,7 +398,8 @@ async def test_method_loadbalance_with_two_apikeys( prompt="Say this is a test", max_tokens=245, stream=True ) - print(completion) + async for chunk in completion: + assert check_text_streaming_chunk(chunk) == True # -------------------------- # Test-5 @@ -411,7 +423,8 @@ async def test_method_loadbalance_and_fallback( prompt="Say this is just a loadbalance and fallback test test", stream=True ) - print(completion) + async for chunk in completion: + assert check_text_streaming_chunk(chunk) == True # -------------------------- # Test-6 @@ -433,4 +446,5 @@ async def test_method_single_provider(self, client: Any, config: Dict) -> None: prompt="Say this is a test", stream=True ) - print(completion) + async for chunk in completion: + assert check_text_streaming_chunk(chunk) == True diff --git a/tests/test_async_images.py b/tests/test_async_images.py index 2680b84b..a8413865 100644 --- a/tests/test_async_images.py +++ b/tests/test_async_images.py @@ -66,10 +66,12 @@ async def test_method_single_with_vk_and_provider( metadata=self.get_metadata(), ) - await portkey.images.generate( + generation = await portkey.images.generate( model=model, prompt="A cute baby sea otter", n=1, size="1024x1024" ) + assert type(generation.data[0].url) is str + # -------------------------- # Test -2 t2_params = [] @@ -89,10 +91,12 @@ async def test_method_single_with_basic_config( config=config, ) - await portkey.images.generate( + generation = await portkey.images.generate( model="dall-e-3", prompt="A cute baby sea otter", n=1, size="1024x1024" ) + assert type(generation.data[0].url) is str + # -------------------------- # Test-3 t3_params = [] @@ -120,6 +124,7 @@ async def test_method_single_provider_with_vk_retry_cache( await portkey.images.generate( model="dall-e-3", prompt="A cute baby sea otter", n=1, size="1024x1024" ) + # Sleeping for the cache to reflect across the workers. The cache has an # eventual consistency and not immediate consistency. sleep(20) @@ -132,10 +137,12 @@ async def test_method_single_provider_with_vk_retry_cache( config=config, ) - await portkey_2.images.generate( + cached_generation = await portkey_2.images.generate( model="dall-e-3", prompt="A cute baby sea otter", n=1, size="1024x1024" ) + assert type(cached_generation.data[0].url) is str + # -------------------------- # Test-4 t4_params = [] @@ -160,7 +167,7 @@ async def test_method_loadbalance_with_two_apikeys( model="dall-e-3", prompt="A cute baby sea otter", n=1, size="1024x1024" ) - print(image.data) + assert type(image.data[0].url) is str # -------------------------- # Test-5 @@ -184,7 +191,7 @@ async def test_method_loadbalance_and_fallback( model="dall-e-3", prompt="A cute baby sea otter", n=1, size="1024x1024" ) - print(image.data) + assert type(image.data[0].url) is str # -------------------------- # Test-6 @@ -206,7 +213,7 @@ async def test_method_single_provider(self, client: Any, config: Dict) -> None: model="dall-e-3", prompt="A cute baby sea otter", n=1, size="1024x1024" ) - print(image.data) + assert type(image.data[0].url) is str # -------------------------- # Test-7 @@ -235,4 +242,4 @@ async def test_method_all_params(self, client: Any, config: Dict) -> None: user="user-1234", ) - print(image.data) + assert type(image.data[0].url) is str diff --git a/tests/test_chat_complete.py b/tests/test_chat_complete.py index 8d08ee29..a244b012 100644 --- a/tests/test_chat_complete.py +++ b/tests/test_chat_complete.py @@ -9,7 +9,7 @@ from portkey_ai import Portkey from time import sleep from dotenv import load_dotenv -from .utils import read_json_file +from .utils import read_json_file, check_chat_streaming_chunk load_dotenv(override=True) @@ -58,17 +58,19 @@ def test_method_single_with_vk_and_provider( base_url=base_url, api_key=api_key, provider=f"{provider}", - Authorization=f"Bearer {auth}", + Authorization=f"{auth}", trace_id=str(uuid4()), metadata=self.get_metadata(), ) - portkey.chat.completions.create( + completion = portkey.chat.completions.create( messages=[{"role": "user", "content": "Say this is a test"}], model=model, max_tokens=245, ) + assert type(completion.choices[0].message.content) is str + # -------------------------- # Test -2 t2_params = [] @@ -109,15 +111,12 @@ def test_method_single_with_basic_config(self, client: Any, config: Dict) -> Non config=config, ) - portkey.chat.completions.create( + completion = portkey.chat.completions.create( messages=[{"role": "user", "content": "Say this is a test"}], model="gpt-3.5-turbo", ) - # print(completion.choices) - # assert("True", "True") - - # assert_matches_type(TextCompletion, completion, path=["response"]) + assert type(completion.choices[0].message.content) is str # -------------------------- # Test-3 @@ -158,11 +157,13 @@ def test_method_single_provider_with_vk_retry_cache( config=config, ) - portkey_2.chat.completions.create( + cached_completion = portkey_2.chat.completions.create( messages=[{"role": "user", "content": "Say this is a test"}], model="gpt-3.5-turbo", ) + assert type(cached_completion.choices[0].message.content) is str + # -------------------------- # Test-4 t4_params = [] @@ -186,7 +187,7 @@ def test_method_loadbalance_with_two_apikeys( messages=[{"role": "user", "content": "Say this is a test"}], max_tokens=245 ) - print(completion.choices) + assert type(completion.choices[0].message.content) is str # -------------------------- # Test-5 @@ -212,7 +213,7 @@ def test_method_loadbalance_and_fallback(self, client: Any, config: Dict) -> Non ], ) - print(completion.choices) + assert type(completion.choices[0].message.content) is str # -------------------------- # Test-6 @@ -234,7 +235,7 @@ def test_method_single_provider(self, client: Any, config: Dict) -> None: model="gpt-3.5-turbo", ) - print(completion.choices) + assert type(completion.choices[0].message.content) is str class TestChatCompletionsStreaming: @@ -267,18 +268,21 @@ def test_method_single_with_vk_and_provider( base_url=base_url, api_key=api_key, provider=f"{provider}", - Authorization=f"Bearer {auth}", + Authorization=f"{auth}", trace_id=str(uuid4()), metadata=self.get_metadata(), ) - portkey.chat.completions.create( + completion = portkey.chat.completions.create( messages=[{"role": "user", "content": "Say this is a test"}], model=model, max_tokens=245, stream=True, ) + for chunk in completion: + assert check_chat_streaming_chunk(chunk) == True + # -------------------------- # Test -2 t2_params = [] @@ -319,16 +323,14 @@ def test_method_single_with_basic_config(self, client: Any, config: Dict) -> Non config=config, ) - portkey.chat.completions.create( + completion = portkey.chat.completions.create( messages=[{"role": "user", "content": "Say this is a test"}], model="gpt-3.5-turbo", stream=True, ) - # print(completion.choices) - # assert("True", "True") - - # assert_matches_type(TextCompletion, completion, path=["response"]) + for chunk in completion: + assert check_chat_streaming_chunk(chunk) == True # -------------------------- # Test-3 @@ -370,12 +372,15 @@ def test_method_single_provider_with_vk_retry_cache( config=config, ) - portkey_2.chat.completions.create( + cache_completion = portkey_2.chat.completions.create( messages=[{"role": "user", "content": "Say this is a test"}], model="gpt-3.5-turbo", stream=True, ) + for chunk in cache_completion: + assert check_chat_streaming_chunk(chunk) == True + # -------------------------- # Test-4 t4_params = [] @@ -401,7 +406,8 @@ def test_method_loadbalance_with_two_apikeys( stream=True, ) - print(completion) + for chunk in completion: + assert check_chat_streaming_chunk(chunk) == True # -------------------------- # Test-5 @@ -428,7 +434,8 @@ def test_method_loadbalance_and_fallback(self, client: Any, config: Dict) -> Non stream=True, ) - print(completion) + for chunk in completion: + assert check_chat_streaming_chunk(chunk) == True # -------------------------- # Test-6 @@ -451,4 +458,5 @@ def test_method_single_provider(self, client: Any, config: Dict) -> None: stream=True, ) - print(completion) + for chunk in completion: + assert check_chat_streaming_chunk(chunk) == True diff --git a/tests/test_complete.py b/tests/test_complete.py index 527defba..d47104dc 100644 --- a/tests/test_complete.py +++ b/tests/test_complete.py @@ -9,7 +9,7 @@ from portkey_ai import Portkey from time import sleep from dotenv import load_dotenv -from .utils import read_json_file +from .utils import read_json_file, check_text_streaming_chunk load_dotenv(override=True) @@ -58,17 +58,19 @@ def test_method_single_with_vk_and_provider( base_url=base_url, api_key=api_key, provider=f"{provider}", - Authorization=f"Bearer {auth}", + Authorization=f"{auth}", trace_id=str(uuid4()), metadata=self.get_metadata(), ) - portkey.completions.create( + completion = portkey.completions.create( prompt="Say this is a test", model=model, max_tokens=245, ) + assert type(completion.choices[0].text) is str + # -------------------------- # Test -2 t2_params = [] @@ -108,14 +110,11 @@ def test_method_single_with_basic_config(self, client: Any, config: Dict) -> Non config=config, ) - portkey.completions.create( + completion = portkey.completions.create( prompt="Say this is a test", ) - # print(completion.choices) - # assert("True", "True") - - # assert_matches_type(TextCompletion, completion, path=["response"]) + assert type(completion.choices[0].text) is str # -------------------------- # Test-3 @@ -155,7 +154,8 @@ def test_method_single_provider_with_vk_retry_cache( config=config, ) - portkey_2.completions.create(prompt="Say this is a test") + cached_completion = portkey_2.completions.create(prompt="Say this is a test") + assert type(cached_completion.choices[0].text) is str # -------------------------- # Test-4 @@ -179,8 +179,7 @@ def test_method_loadbalance_with_two_apikeys( completion = portkey.completions.create( prompt="Say this is a test", max_tokens=245 ) - - print(completion.choices) + assert type(completion.choices[0].text) is str # -------------------------- # Test-5 @@ -201,7 +200,7 @@ def test_method_loadbalance_and_fallback(self, client: Any, config: Dict) -> Non prompt="Say this is just a loadbalance and fallback test test" ) - print(completion.choices) + assert type(completion.choices[0].text) is str # -------------------------- # Test-6 @@ -222,7 +221,7 @@ def test_method_single_provider(self, client: Any, config: Dict) -> None: prompt="Say this is a test", ) - print(completion.choices) + assert type(completion.choices[0].text) is str class TestChatCompletionsStreaming: @@ -255,15 +254,18 @@ def test_method_single_with_vk_and_provider( base_url=base_url, api_key=api_key, provider=f"{provider}", - Authorization=f"Bearer {auth}", + Authorization=f"{auth}", trace_id=str(uuid4()), metadata=self.get_metadata(), ) - portkey.completions.create( + completion = portkey.completions.create( prompt="Say this is a test", model=model, max_tokens=245, stream=True ) + for chunk in completion: + assert check_text_streaming_chunk(chunk) == True + # -------------------------- # Test -2 t2_params = [] @@ -303,12 +305,12 @@ def test_method_single_with_basic_config(self, client: Any, config: Dict) -> Non config=config, ) - portkey.completions.create(prompt="Say this is a test", stream=True) - - # print(completion.choices) - # assert("True", "True") + completion = portkey.completions.create( + prompt="Say this is a test", stream=True + ) - # assert_matches_type(TextCompletion, completion, path=["response"]) + for chunk in completion: + assert check_text_streaming_chunk(chunk) == True # -------------------------- # Test-3 @@ -346,7 +348,12 @@ def test_method_single_provider_with_vk_retry_cache( config=config, ) - portkey_2.completions.create(prompt="Say this is a test", stream=True) + cached_completion = portkey_2.completions.create( + prompt="Say this is a test", stream=True + ) + + for chunk in cached_completion: + assert check_text_streaming_chunk(chunk) == True # -------------------------- # Test-4 @@ -371,7 +378,8 @@ def test_method_loadbalance_with_two_apikeys( prompt="Say this is a test", max_tokens=245, stream=True ) - print(completion) + for chunk in completion: + assert check_text_streaming_chunk(chunk) == True # -------------------------- # Test-5 @@ -392,7 +400,8 @@ def test_method_loadbalance_and_fallback(self, client: Any, config: Dict) -> Non prompt="Say this is just a loadbalance and fallback test test", stream=True ) - print(completion) + for chunk in completion: + assert check_text_streaming_chunk(chunk) == True # -------------------------- # Test-6 @@ -413,4 +422,5 @@ def test_method_single_provider(self, client: Any, config: Dict) -> None: prompt="Say this is a test", stream=True ) - print(completion) + for chunk in completion: + assert check_text_streaming_chunk(chunk) == True diff --git a/tests/test_images.py b/tests/test_images.py index 13ae5886..4f6dc400 100644 --- a/tests/test_images.py +++ b/tests/test_images.py @@ -66,10 +66,12 @@ def test_method_single_with_vk_and_provider( metadata=self.get_metadata(), ) - portkey.images.generate( + generation = portkey.images.generate( model=model, prompt="A cute baby sea otter", n=1, size="1024x1024" ) + assert type(generation.data[0].url) is str + # -------------------------- # Test -2 t2_params = [] @@ -86,10 +88,12 @@ def test_method_single_with_basic_config(self, client: Any, config: Dict) -> Non config=config, ) - portkey.images.generate( + generation = portkey.images.generate( model="dall-e-3", prompt="A cute baby sea otter", n=1, size="1024x1024" ) + assert type(generation.data[0].url) is str + # -------------------------- # Test-3 t3_params = [] @@ -128,10 +132,12 @@ def test_method_single_provider_with_vk_retry_cache( config=config, ) - portkey_2.images.generate( + cached_generation = portkey_2.images.generate( model="dall-e-3", prompt="A cute baby sea otter", n=1, size="1024x1024" ) + assert type(cached_generation.data[0].url) is str + # -------------------------- # Test-4 t4_params = [] @@ -155,7 +161,7 @@ def test_method_loadbalance_with_two_apikeys( model="dall-e-3", prompt="A cute baby sea otter", n=1, size="1024x1024" ) - print(image.data) + assert type(image.data[0].url) is str # -------------------------- # Test-5 @@ -176,7 +182,7 @@ def test_method_loadbalance_and_fallback(self, client: Any, config: Dict) -> Non model="dall-e-3", prompt="A cute baby sea otter", n=1, size="1024x1024" ) - print(image.data) + assert type(image.data[0].url) is str # -------------------------- # Test-6 @@ -197,7 +203,7 @@ def test_method_single_provider(self, client: Any, config: Dict) -> None: model="dall-e-3", prompt="A cute baby sea otter", n=1, size="1024x1024" ) - print(image.data) + assert type(image.data[0].url) is str # -------------------------- # Test-7 @@ -225,4 +231,4 @@ def test_method_all_params(self, client: Any, config: Dict) -> None: user="user-1234", ) - print(image.data) + assert type(image.data[0].url) is str diff --git a/tests/test_threads.py b/tests/test_threads.py index e5bee08d..cdd455e9 100644 --- a/tests/test_threads.py +++ b/tests/test_threads.py @@ -40,20 +40,45 @@ def get_metadata(self): t2_params = [] for i in get_configs(f"{CONFIGS_PATH}/single_with_basic_config"): - t2_params.append((client, i)) + t2_params.append((client, i["virtual_key"])) @pytest.mark.parametrize("client, virtual_key", t2_params) def test_method_single_with_vk_and_provider( - self, client: Any, auth: str, virtual_key: str + self, client: Any, virtual_key: str ) -> None: metadata = self.get_metadata() portkey = client( base_url=base_url, api_key=api_key, virtual_key=virtual_key, - Authorization=f"{auth}", trace_id=str(uuid4()), metadata=metadata, ) thread = portkey.beta.threads.create() - print(thread) + + assert type(thread.id) is str + assert thread.object == "thread" + + retrieve_thread = portkey.beta.threads.retrieve(thread.id) + + assert retrieve_thread.id == thread.id + assert retrieve_thread.object == "thread" + assert type(retrieve_thread.metadata) is dict + + update_thread = portkey.beta.threads.update( + thread.id, + metadata={ + "modified": "true", + }, + ) + + assert update_thread.id == thread.id + assert update_thread.object == "thread" + assert type(update_thread.metadata) is dict + assert update_thread.metadata["modified"] == "true" + + delete_thread = portkey.beta.threads.delete(thread.id) + + assert delete_thread.id == thread.id + assert delete_thread.object == "thread.deleted" + assert delete_thread.deleted == True From 44a1d71939284a3cb62713acb5dc3224eb31bd97 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Wed, 20 Mar 2024 16:26:57 +0530 Subject: [PATCH 45/62] feat: types for each route --- portkey_ai/__init__.py | 6 +- portkey_ai/api_resources/__init__.py | 4 - portkey_ai/api_resources/apis/assistants.py | 9 +- .../api_resources/apis/chat_complete.py | 6 +- portkey_ai/api_resources/apis/complete.py | 11 +- portkey_ai/api_resources/apis/embeddings.py | 10 +- portkey_ai/api_resources/apis/images.py | 26 +- portkey_ai/api_resources/apis/main_files.py | 6 +- portkey_ai/api_resources/apis/models.py | 3 +- portkey_ai/api_resources/apis/threads.py | 24 +- portkey_ai/api_resources/types/__init__.py | 0 .../api_resources/types/assistant_type.py | 139 +++++++ .../api_resources/types/chat_complete_type.py | 90 +++++ .../api_resources/types/complete_type.py | 52 +++ .../api_resources/types/embeddings_type.py | 44 +++ portkey_ai/api_resources/types/image_type.py | 33 ++ .../api_resources/types/main_file_type.py | 72 ++++ portkey_ai/api_resources/types/models_type.py | 60 +++ .../types/thread_message_type.py | 122 ++++++ .../api_resources/types/thread_run_type.py | 252 ++++++++++++ portkey_ai/api_resources/types/thread_type.py | 37 ++ portkey_ai/api_resources/utils.py | 367 ------------------ 22 files changed, 937 insertions(+), 436 deletions(-) create mode 100644 portkey_ai/api_resources/types/__init__.py create mode 100644 portkey_ai/api_resources/types/assistant_type.py create mode 100644 portkey_ai/api_resources/types/chat_complete_type.py create mode 100644 portkey_ai/api_resources/types/complete_type.py create mode 100644 portkey_ai/api_resources/types/embeddings_type.py create mode 100644 portkey_ai/api_resources/types/image_type.py create mode 100644 portkey_ai/api_resources/types/main_file_type.py create mode 100644 portkey_ai/api_resources/types/models_type.py create mode 100644 portkey_ai/api_resources/types/thread_message_type.py create mode 100644 portkey_ai/api_resources/types/thread_run_type.py create mode 100644 portkey_ai/api_resources/types/thread_type.py diff --git a/portkey_ai/__init__.py b/portkey_ai/__init__.py index 2ed8ac2a..bb8cf8b9 100644 --- a/portkey_ai/__init__.py +++ b/portkey_ai/__init__.py @@ -9,8 +9,7 @@ CacheType, CacheLiteral, Message, - PortkeyResponse, - ChatCompletions, + PortkeyResponse, Completion, AsyncCompletion, Params, @@ -19,7 +18,6 @@ ChatCompletion, AsyncChatCompletion, ChatCompletionChunk, - TextCompletion, TextCompletionChunk, createHeaders, Prompts, @@ -72,7 +70,6 @@ "CacheType", "CacheLiteral", "Message", - "ChatCompletions", "Completion", "AsyncCompletion", "Params", @@ -80,7 +77,6 @@ "ChatCompletion", "AsyncChatCompletion", "ChatCompletionChunk", - "TextCompletion", "TextCompletionChunk", "Config", "api_key", diff --git a/portkey_ai/api_resources/__init__.py b/portkey_ai/api_resources/__init__.py index d4c19e0a..d4e3cc4f 100644 --- a/portkey_ai/api_resources/__init__.py +++ b/portkey_ai/api_resources/__init__.py @@ -45,9 +45,7 @@ Params, Config, RetrySettings, - ChatCompletions, ChatCompletionChunk, - TextCompletion, TextCompletionChunk, ) from .client import Portkey, AsyncPortkey @@ -65,7 +63,6 @@ "CacheType", "CacheLiteral", "Message", - "ChatCompletions", "Completion", "AsyncCompletion", "Params", @@ -74,7 +71,6 @@ "ChatCompletion", "AsyncChatCompletion", "ChatCompletionChunk", - "TextCompletion", "TextCompletionChunk", "Generations", "AsyncGenerations", diff --git a/portkey_ai/api_resources/apis/assistants.py b/portkey_ai/api_resources/apis/assistants.py index 98639622..2a97c2b7 100644 --- a/portkey_ai/api_resources/apis/assistants.py +++ b/portkey_ai/api_resources/apis/assistants.py @@ -1,14 +1,7 @@ import json from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey -from portkey_ai.api_resources.utils import ( - Assistant, - AssistantDeleted, - AssistantFile, - AssistantFileDeleted, - AssistantFileList, - AssistantList, -) +from portkey_ai.api_resources.types.assistant_type import Assistant, AssistantList, AssistantDeleted, AssistantFile, AssistantFileList, AssistantFileDeleted class Assistants(APIResource): diff --git a/portkey_ai/api_resources/apis/chat_complete.py b/portkey_ai/api_resources/apis/chat_complete.py index 9b72df1d..38420e8c 100644 --- a/portkey_ai/api_resources/apis/chat_complete.py +++ b/portkey_ai/api_resources/apis/chat_complete.py @@ -11,13 +11,11 @@ Union, ) from portkey_ai.api_resources.client import AsyncPortkey, Portkey -from portkey_ai.api_resources.utils import ( - ChatCompletionChunk, - ChatCompletions, -) +from portkey_ai.api_resources.types.chat_complete_type import ChatCompletions from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource +from portkey_ai.api_resources.utils import ChatCompletionChunk __all__ = ["ChatCompletion", "AsyncChatCompletion"] diff --git a/portkey_ai/api_resources/apis/complete.py b/portkey_ai/api_resources/apis/complete.py index 0ab4cbb6..41f2f0fb 100644 --- a/portkey_ai/api_resources/apis/complete.py +++ b/portkey_ai/api_resources/apis/complete.py @@ -1,11 +1,8 @@ import json -from typing import Any, AsyncIterator, Iterator, Optional, Union +from typing import AsyncIterator, Iterator, Optional, Union from portkey_ai.api_resources.client import AsyncPortkey, Portkey -from portkey_ai.api_resources.utils import ( - TextCompletion, - TextCompletionChunk, -) - +from portkey_ai.api_resources.utils import TextCompletionChunk +from portkey_ai.api_resources.types.complete_type import TextCompletion from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource @@ -97,7 +94,7 @@ async def create( model: Optional[str] = "portkey-default", prompt: Optional[str] = None, **kwargs, - ) -> Any: + ) -> TextCompletion: if "stream" in kwargs and kwargs["stream"] is True: return self.stream_create(model=model, prompt=prompt, **kwargs) # type: ignore elif "stream" in kwargs and kwargs["stream"] is False: diff --git a/portkey_ai/api_resources/apis/embeddings.py b/portkey_ai/api_resources/apis/embeddings.py index 36f0f06f..239f2373 100644 --- a/portkey_ai/api_resources/apis/embeddings.py +++ b/portkey_ai/api_resources/apis/embeddings.py @@ -2,7 +2,7 @@ from typing import Optional from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey -from portkey_ai.api_resources.utils import GenericResponse +from portkey_ai.api_resources.types.embeddings_type import CreateEmbeddingResponse class Embeddings(APIResource): @@ -12,12 +12,12 @@ def __init__(self, client: Portkey) -> None: def create( self, *, input: str, model: Optional[str] = "portkey-default", **kwargs - ) -> GenericResponse: + ) -> CreateEmbeddingResponse: response = self.openai_client.with_raw_response.embeddings.create( input=input, model=model, **kwargs # type: ignore ) - data = GenericResponse(**json.loads(response.text)) + data = CreateEmbeddingResponse(**json.loads(response.text)) data._headers = response.headers return data @@ -30,11 +30,11 @@ def __init__(self, client: AsyncPortkey) -> None: async def create( self, *, input: str, model: Optional[str] = "portkey-default", **kwargs - ) -> GenericResponse: + ) -> CreateEmbeddingResponse: response = await self.openai_client.with_raw_response.embeddings.create( input=input, model=model, **kwargs # type: ignore ) - data = GenericResponse(**json.loads(response.text)) + data = CreateEmbeddingResponse(**json.loads(response.text)) data._headers = response.headers return data diff --git a/portkey_ai/api_resources/apis/images.py b/portkey_ai/api_resources/apis/images.py index c019bcbf..1d63a56e 100644 --- a/portkey_ai/api_resources/apis/images.py +++ b/portkey_ai/api_resources/apis/images.py @@ -1,7 +1,7 @@ import json from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey -from portkey_ai.api_resources.utils import ImageResponse +from portkey_ai.api_resources.types.image_type import ImagesResponse class Images(APIResource): @@ -9,29 +9,29 @@ def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client - def generate(self, prompt: str, **kwargs) -> ImageResponse: + def generate(self, prompt: str, **kwargs) -> ImagesResponse: response = self.openai_client.with_raw_response.images.generate( prompt=prompt, **kwargs ) - data = ImageResponse(**json.loads(response.text)) + data = ImagesResponse(**json.loads(response.text)) data._headers = response.headers return data - def edit(self, prompt: str, image, **kwargs) -> ImageResponse: + def edit(self, prompt: str, image, **kwargs) -> ImagesResponse: response = self.openai_client.with_raw_response.images.edit( prompt=prompt, image=image, **kwargs ) - data = ImageResponse(**json.loads(response.text)) + data = ImagesResponse(**json.loads(response.text)) data._headers = response.headers return data - def create_variation(self, image, **kwargs) -> ImageResponse: + def create_variation(self, image, **kwargs) -> ImagesResponse: response = self.openai_client.with_raw_response.images.create_variation( image=image, **kwargs ) - data = ImageResponse(**json.loads(response.text)) + data = ImagesResponse(**json.loads(response.text)) data._headers = response.headers return data @@ -42,29 +42,29 @@ def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - async def generate(self, prompt: str, **kwargs) -> ImageResponse: + async def generate(self, prompt: str, **kwargs) -> ImagesResponse: response = await self.openai_client.with_raw_response.images.generate( prompt=prompt, **kwargs ) - data = ImageResponse(**json.loads(response.text)) + data = ImagesResponse(**json.loads(response.text)) data._headers = response.headers return data - async def edit(self, prompt: str, image, **kwargs) -> ImageResponse: + async def edit(self, prompt: str, image, **kwargs) -> ImagesResponse: response = await self.openai_client.with_raw_response.images.edit( prompt=prompt, image=image, **kwargs ) - data = ImageResponse(**json.loads(response.text)) + data = ImagesResponse(**json.loads(response.text)) data._headers = response.headers return data - async def create_variation(self, image, **kwargs) -> ImageResponse: + async def create_variation(self, image, **kwargs) -> ImagesResponse: response = await self.openai_client.with_raw_response.images.create_variation( image=image, **kwargs ) - data = ImageResponse(**json.loads(response.text)) + data = ImagesResponse(**json.loads(response.text)) data._headers = response.headers return data diff --git a/portkey_ai/api_resources/apis/main_files.py b/portkey_ai/api_resources/apis/main_files.py index 2311b23d..d7cacc67 100644 --- a/portkey_ai/api_resources/apis/main_files.py +++ b/portkey_ai/api_resources/apis/main_files.py @@ -2,11 +2,7 @@ from typing import Any from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey -from portkey_ai.api_resources.utils import ( - FileDeleted, - FileObject, - FileList, -) +from portkey_ai.api_resources.types.main_file_type import FileDeleted, FileList, FileObject class MainFiles(APIResource): diff --git a/portkey_ai/api_resources/apis/models.py b/portkey_ai/api_resources/apis/models.py index e5707c9c..e5fbe061 100644 --- a/portkey_ai/api_resources/apis/models.py +++ b/portkey_ai/api_resources/apis/models.py @@ -1,8 +1,7 @@ import json from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey -from portkey_ai.api_resources.utils import Model, ModelDeleted, ModelList - +from portkey_ai.api_resources.types.models_type import Model, ModelDeleted, ModelList class Models(APIResource): def __init__(self, client: Portkey) -> None: diff --git a/portkey_ai/api_resources/apis/threads.py b/portkey_ai/api_resources/apis/threads.py index 610cf54c..634855e1 100644 --- a/portkey_ai/api_resources/apis/threads.py +++ b/portkey_ai/api_resources/apis/threads.py @@ -2,17 +2,9 @@ from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey -from portkey_ai.api_resources.utils import ( - MessageList, - Run, - RunList, - RunStep, - RunStepList, - Thread, - ThreadDeleted, - ThreadMessage, - ThreadMessageFileRetrieve, -) +from portkey_ai.api_resources.types.thread_message_type import MessageFile, MessageList, ThreadMessage +from portkey_ai.api_resources.types.thread_run_type import Run, RunList, RunStep, RunStepList +from portkey_ai.api_resources.types.thread_type import Thread, ThreadDeleted class Threads(APIResource): @@ -128,13 +120,13 @@ def list(self, thread_id, message_id, **kwargs) -> MessageList: def retrieve( self, thread_id, message_id, file_id, **kwargs - ) -> ThreadMessageFileRetrieve: + ) -> MessageFile: response = ( self.openai_client.with_raw_response.beta.threads.messages.files.retrieve( - thread_id=thread_id, message_id=message_id, file_id=file_id**kwargs + thread_id=thread_id, message_id=message_id, file_id=file_id, **kwargs ) ) - data = ThreadMessageFileRetrieve(**json.loads(response.text)) + data = MessageFile(**json.loads(response.text)) data._headers = response.headers return data @@ -352,7 +344,7 @@ async def list(self, thread_id, message_id, **kwargs) -> MessageList: async def retrieve( self, thread_id, message_id, file_id, **kwargs - ) -> ThreadMessageFileRetrieve: + ) -> MessageFile: # fmt: off response = await self.openai_client\ .with_raw_response\ @@ -367,7 +359,7 @@ async def retrieve( **kwargs ) # fmt: off - data = ThreadMessageFileRetrieve(**json.loads( response.text)) + data = MessageFile(**json.loads( response.text)) data._headers = response.headers return data diff --git a/portkey_ai/api_resources/types/__init__.py b/portkey_ai/api_resources/types/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/portkey_ai/api_resources/types/assistant_type.py b/portkey_ai/api_resources/types/assistant_type.py new file mode 100644 index 00000000..ea72d384 --- /dev/null +++ b/portkey_ai/api_resources/types/assistant_type.py @@ -0,0 +1,139 @@ +import json +from typing import Dict, Optional, Union +import httpx +from portkey_ai.api_resources.utils import parse_headers +from typing import List, Any +from pydantic import BaseModel + +__all__ = [ + "Assistant", + "AssistantList", + "AssistantDeleted", + "AssistantFile", + "AssistantFileList", + "AssistantFileDeleted", + "ToolCodeInterpreter", + "ToolRetrieval", + "ToolFunction", + "Tool", +] + + +class ToolCodeInterpreter(BaseModel): + type: Optional[str] + + +class ToolRetrieval(BaseModel): + type: Optional[str] + + +class ToolFunction(BaseModel): + type: Optional[str] + + +Tool = Union[ToolCodeInterpreter, ToolRetrieval, ToolFunction] + + +class Assistant(BaseModel): + id: Optional[str] + created_at: Optional[int] + description: Optional[str] = None + file_ids: Optional[List[str]] + instructions: Optional[str] = None + metadata: Optional[object] = None + model: Optional[str] + name: Optional[str] = None + object: Optional[str] + tools: Optional[List[Tool]] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class AssistantList(BaseModel, extra="allow"): + object: Optional[str] + data: Optional[List[Assistant]] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class AssistantDeleted(BaseModel, extra="allow"): + id: Optional[str] + object: Optional[str] + deleted: Optional[bool] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class AssistantFile(BaseModel, extra="allow"): + id: Optional[str] + assistant_id: Optional[str] + created_at: Optional[int] + object: Optional[str] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class AssistantFileList(BaseModel, extra="allow"): + object: Optional[str] + data: Optional[List[AssistantFile]] + first_id: Optional[str] + last_id: Optional[str] + has_more: Optional[bool] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class AssistantFileDeleted(BaseModel, extra="allow"): + id: Optional[str] + deleted: Optional[bool] + object: Optional[str] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) diff --git a/portkey_ai/api_resources/types/chat_complete_type.py b/portkey_ai/api_resources/types/chat_complete_type.py new file mode 100644 index 00000000..d0993437 --- /dev/null +++ b/portkey_ai/api_resources/types/chat_complete_type.py @@ -0,0 +1,90 @@ +import json +from typing import Dict, Optional +import httpx +from portkey_ai.api_resources.utils import parse_headers +from typing import List, Any +from pydantic import BaseModel + +__all__ = [ + "ChatCompletions", + "ChatCompletionMessage", + "ChatCompletionMessageToolCall", + "FunctionCall", + "TopLogprob", + "ChatCompletionTokenLogprob", + "ChoiceLogprobs", + "Choice", + "Usage", +] + + +class Usage(BaseModel, extra="allow"): + prompt_tokens: Optional[int] = None + completion_tokens: Optional[int] = None + total_tokens: Optional[int] = None + + +class FunctionCall(BaseModel): + arguments: str + name: str + + +class ChatCompletionMessageToolCall(BaseModel): + id: Optional[str] + function: FunctionCall + type: Optional[str] + + +class ChatCompletionMessage(BaseModel): + content: Optional[str] = None + role: Optional[str] + function_call: Optional[FunctionCall] = None + tool_calls: Optional[List[ChatCompletionMessageToolCall]] = None + + +class TopLogprob(BaseModel): + token: Optional[str] + bytes: Optional[List[int]] = None + logprob: Optional[float] + + +class ChatCompletionTokenLogprob(BaseModel): + token: Optional[str] + bytes: Optional[List[int]] = None + logprob: Optional[float] + top_logprobs: List[TopLogprob] + + +class ChoiceLogprobs(BaseModel): + content: Optional[List[ChatCompletionTokenLogprob]] = None + + +class Choice(BaseModel): + finish_reason: Optional[str] + index: Optional[int] + logprobs: Optional[ChoiceLogprobs] = None + message: ChatCompletionMessage + + +class ChatCompletions(BaseModel): + id: Optional[str] + choices: List[Choice] + created: Optional[int] + model: Optional[str] + object: Optional[str] + system_fingerprint: Optional[str] = None + usage: Optional[Usage] = None + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) diff --git a/portkey_ai/api_resources/types/complete_type.py b/portkey_ai/api_resources/types/complete_type.py new file mode 100644 index 00000000..8d4eaaca --- /dev/null +++ b/portkey_ai/api_resources/types/complete_type.py @@ -0,0 +1,52 @@ +import json +from typing import Dict, Literal, Optional +import httpx +from portkey_ai.api_resources.utils import parse_headers +from typing import List, Any +from pydantic import BaseModel + +__all__ = ["CompletionUsage", "Logprobs", "CompletionChoice", "TextCompletion"] + + +class CompletionUsage(BaseModel): + completion_tokens: Optional[int] + prompt_tokens: Optional[int] + total_tokens: Optional[int] + + +class Logprobs(BaseModel): + text_offset: Optional[List[int]] = None + token_logprobs: Optional[List[float]] = None + tokens: Optional[List[str]] = None + top_logprobs: Optional[List[Dict[str, float]]] = None + + +class CompletionChoice(BaseModel): + finish_reason: Optional[str] + index: Optional[int] + logprobs: Optional[Logprobs] = None + text: Optional[str] + + +class TextCompletion(BaseModel): + id: Optional[str] + choices: List[CompletionChoice] + created: Optional[int] + model: Optional[str] + object: Optional[str] + system_fingerprint: Optional[str] = None + usage: Optional[CompletionUsage] = None + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) diff --git a/portkey_ai/api_resources/types/embeddings_type.py b/portkey_ai/api_resources/types/embeddings_type.py new file mode 100644 index 00000000..81ef3920 --- /dev/null +++ b/portkey_ai/api_resources/types/embeddings_type.py @@ -0,0 +1,44 @@ +import json +from typing import Dict, Optional, Union +import httpx + +from portkey_ai.api_resources.utils import parse_headers +from typing import List, Any +from pydantic import BaseModel + +__all__ = ["CreateEmbeddingResponse", "Usage", "Embedding"] + + +class Usage(BaseModel, extra="allow"): + prompt_tokens: Optional[int] = None + completion_tokens: Optional[int] = None + total_tokens: Optional[int] = None + + +class Embedding(BaseModel): + embedding: Union[List[float], str] + index: Optional[int] + object: Optional[str] + + +class CreateEmbeddingResponse(BaseModel): + success: Optional[bool] = None + warning: Optional[str] = None + data: List[Embedding] + model: Optional[str] + object: Optional[str] + usage: Usage + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) diff --git a/portkey_ai/api_resources/types/image_type.py b/portkey_ai/api_resources/types/image_type.py new file mode 100644 index 00000000..1dbedb0a --- /dev/null +++ b/portkey_ai/api_resources/types/image_type.py @@ -0,0 +1,33 @@ +import json +from typing import Dict, Optional +import httpx +from portkey_ai.api_resources.utils import parse_headers +from typing import List, Any +from pydantic import BaseModel + +__all__ = ["ImagesResponse", "Image"] + + +class Image(BaseModel): + b64_json: Optional[str] = None + revised_prompt: Optional[str] = None + url: Optional[str] = None + + +class ImagesResponse(BaseModel): + created: Optional[int] + data: List[Image] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) diff --git a/portkey_ai/api_resources/types/main_file_type.py b/portkey_ai/api_resources/types/main_file_type.py new file mode 100644 index 00000000..52b7e733 --- /dev/null +++ b/portkey_ai/api_resources/types/main_file_type.py @@ -0,0 +1,72 @@ +import json +from typing import Dict, Optional +import httpx +from portkey_ai.api_resources.utils import parse_headers +from typing import List, Any +from pydantic import BaseModel + +__all__ = ["FileObject", "FileList", "FileDeleted"] + + +class FileObject(BaseModel): + id: Optional[str] + bytes: Optional[int] + created_at: Optional[int] + filename: Optional[str] + object: Optional[str] + purpose: Optional[str] + status: Optional[str] + status_details: Optional[str] = None + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class FileList(BaseModel): + object: Optional[str] + data: List[FileObject] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class FileDeleted(BaseModel): + id: Optional[str] + deleted: Optional[bool] + object: Optional[str] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) diff --git a/portkey_ai/api_resources/types/models_type.py b/portkey_ai/api_resources/types/models_type.py new file mode 100644 index 00000000..5974f4e9 --- /dev/null +++ b/portkey_ai/api_resources/types/models_type.py @@ -0,0 +1,60 @@ +import json +from typing import Dict, Optional +import httpx +from portkey_ai.api_resources.utils import parse_headers +from typing import List, Any +from pydantic import BaseModel + +__all__ = ["Model", "ModelDeleted", "ModelList"] + +class Model(BaseModel): + id: Optional[str] + created: Optional[int] + object: Optional[str] + owned_by: Optional[str] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class ModelList(BaseModel, extra="allow"): + object: Optional[str] + data: List[Model] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + +class ModelDeleted(BaseModel): + id: Optional[str] + deleted: Optional[bool] + object: Optional[str] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) diff --git a/portkey_ai/api_resources/types/thread_message_type.py b/portkey_ai/api_resources/types/thread_message_type.py new file mode 100644 index 00000000..8134130e --- /dev/null +++ b/portkey_ai/api_resources/types/thread_message_type.py @@ -0,0 +1,122 @@ +import json +from typing import Dict, List, Optional, Union +import httpx +from portkey_ai.api_resources.utils import parse_headers +from pydantic import BaseModel + +__all__ = [ + "ThreadMessage", + "MessageList", + "Content", + "Text", + "TextAnnotation", + "TextAnnotationFileCitation", + "TextAnnotationFileCitationFileCitation", + "TextAnnotationFilePath", + "TextAnnotationFilePathFilePath", + "MessageContentImageFile", + "ImageFile", + "MessageContentText", +] + + +class TextAnnotationFilePathFilePath(BaseModel): + file_id: Optional[str] + + +class TextAnnotationFileCitationFileCitation(BaseModel): + file_id: Optional[str] + quote: Optional[str] + + +class TextAnnotationFilePath(BaseModel): + end_index: Optional[int] + file_path: Optional[TextAnnotationFilePathFilePath] + start_index: Optional[int] + text: Optional[str] + type: Optional[str] + + +class TextAnnotationFileCitation(BaseModel): + end_index: Optional[int] + file_citation: Optional[TextAnnotationFileCitationFileCitation] + start_index: Optional[int] + text: Optional[str] + type: Optional[str] + + +TextAnnotation = Union[TextAnnotationFileCitation, TextAnnotationFilePath] + + +class Text(BaseModel): + annotations: Optional[List[TextAnnotation]] + value: Optional[str] + + +class MessageContentText(BaseModel): + text: Optional[Text] + type: Optional[str] + + +class ImageFile(BaseModel): + file_id: Optional[str] + + +class MessageContentImageFile(BaseModel): + image_file: Optional[ImageFile] + type: Optional[str] + + +Content = Union[MessageContentImageFile, MessageContentText] + + +class ThreadMessage(BaseModel, extra="allow"): + id: Optional[str] + assistant_id: Optional[str] = None + content: Optional[List[Content]] + created_at: Optional[int] + file_ids: Optional[List[str]] + metadata: Optional[object] = None + object: Optional[str] + role: Optional[str] + run_id: Optional[str] = None + thread_id: Optional[str] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class MessageList(BaseModel, extra="allow"): + object: Optional[str] + data: Optional[List[ThreadMessage]] + first_id: Optional[str] + last_id: Optional[str] + has_more: Optional[bool] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class MessageFile(BaseModel, extra="allow"): + id: Optional[str] + object: Optional[str] + created_at: Optional[int] + message_id: Optional[str] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) diff --git a/portkey_ai/api_resources/types/thread_run_type.py b/portkey_ai/api_resources/types/thread_run_type.py new file mode 100644 index 00000000..d4633447 --- /dev/null +++ b/portkey_ai/api_resources/types/thread_run_type.py @@ -0,0 +1,252 @@ +import json +from typing import Dict, Literal, Optional, Union +import httpx +from portkey_ai.api_resources.utils import parse_headers +from typing import List +from pydantic import BaseModel + +__all__ = [ + "Run", + "Usage", + "LastError", + "Function", + "RequiredActionFunctionToolCall", + "RequiredActionSubmitToolOutputs", + "RequiredAction", + "FunctionDefinition", + "ToolAssistantToolsCode", + "ToolAssistantToolsRetrieval", + "ToolAssistantToolsFunction", + "Tool", + "RunList", + "RunStep", + "StepDetails", + "ToolCallsStepDetails", + "MessageCreationStepDetails", + "MessageCreation", + "ToolCall", + "CodeInterpreter", + "CodeInterpreterOutput", + "CodeInterpreterOutputLogs", + "CodeInterpreterOutputImage", + "CodeInterpreterOutputImageImage", + "CodeToolCall", + "RetrievalToolCall", + "FunctionToolCall", + "FunctionParameters", + "RunStepList", +] + + +class Function(BaseModel): + arguments: Optional[str] + name: Optional[str] + output: Optional[str] = None + + +class FunctionToolCall(BaseModel): + id: Optional[str] + function: Function + type: Literal["function"] + + +class RetrievalToolCall(BaseModel): + id: Optional[str] + retrieval: Optional[object] + type: Optional[str] + + +class CodeInterpreterOutputLogs(BaseModel): + logs: Optional[str] + type: Optional[str] + + +class CodeInterpreterOutputImageImage(BaseModel): + file_id: Optional[str] + + +class CodeInterpreterOutputImage(BaseModel): + image: CodeInterpreterOutputImageImage + type: Optional[str] + + +CodeInterpreterOutput = Union[CodeInterpreterOutputLogs, CodeInterpreterOutputImage] + + +class CodeInterpreter(BaseModel): + input: Optional[str] + outputs: List[CodeInterpreterOutput] + + +class CodeToolCall(BaseModel): + id: Optional[str] + code_interpreter: CodeInterpreter + type: Optional[str] + + +ToolCall = Union[CodeToolCall, RetrievalToolCall, FunctionToolCall] + + +class ToolCallsStepDetails(BaseModel): + tool_calls: Optional[List[ToolCall]] + type: Optional[str] + + +class MessageCreation(BaseModel): + message_id: Optional[str] + + +class MessageCreationStepDetails(BaseModel): + message_creation: Optional[MessageCreation] + + type: Optional[str] + + +StepDetails = Union[MessageCreationStepDetails, ToolCallsStepDetails] + + +class Usage(BaseModel): + completion_tokens: Optional[int] + prompt_tokens: Optional[int] + total_tokens: Optional[int] + + +class LastError(BaseModel): + code: Optional[str] + message: Optional[str] + + +class Function(BaseModel): + arguments: Optional[str] + name: Optional[str] + + +class RequiredActionFunctionToolCall(BaseModel): + id: Optional[str] + function: Optional[Function] + type: Optional[str] + + +class RequiredActionSubmitToolOutputs(BaseModel): + tool_calls: Optional[List[RequiredActionFunctionToolCall]] + + +class RequiredAction(BaseModel): + submit_tool_outputs: Optional[RequiredActionSubmitToolOutputs] + type: Optional[str] + + +FunctionParameters = Dict[str, object] + + +class FunctionDefinition(BaseModel): + name: Optional[str] + description: Optional[str] = None + parameters: Optional[FunctionParameters] = None + + +class ToolAssistantToolsCode(BaseModel): + type: Optional[str] + + +class ToolAssistantToolsRetrieval(BaseModel): + type: Optional[str] + + +class ToolAssistantToolsFunction(BaseModel): + function: Optional[FunctionDefinition] + type: Optional[str] + + +Tool = Union[ + ToolAssistantToolsCode, ToolAssistantToolsRetrieval, ToolAssistantToolsFunction +] + + +class Run(BaseModel): + id: Optional[str] + assistant_id: Optional[str] + cancelled_at: Optional[int] = None + completed_at: Optional[int] = None + created_at: Optional[int] + expires_at: Optional[int] + failed_at: Optional[int] = None + file_ids: Optional[List[str]] + instructions: Optional[str] + last_error: Optional[LastError] = None + metadata: Optional[object] = None + model: Optional[str] + object: Optional[str] + required_action: Optional[RequiredAction] = None + started_at: Optional[int] = None + status: Optional[str] + thread_id: Optional[str] + tools: Optional[List[Tool]] + usage: Optional[Usage] = None + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class RunList(BaseModel, extra="allow"): + object: Optional[str] + data: Optional[List[Run]] + first_id: Optional[str] + last_id: Optional[str] + has_more: Optional[bool] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class RunStep(BaseModel, extra="allow"): + id: Optional[str] + assistant_id: Optional[str] + cancelled_at: Optional[int] = None + completed_at: Optional[int] = None + created_at: Optional[int] + expired_at: Optional[int] = None + failed_at: Optional[int] = None + last_error: Optional[LastError] = None + metadata: Optional[object] = None + object: Optional[str] + run_id: Optional[str] + status: Optional[str] + step_details: Optional[StepDetails] + thread_id: Optional[str] + type: Optional[str] + usage: Optional[Usage] = None + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class RunStepList(BaseModel, extra="allow"): + object: Optional[str] + data: Optional[List[RunStep]] + first_id: Optional[str] + last_id: Optional[str] + has_more: Optional[bool] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) diff --git a/portkey_ai/api_resources/types/thread_type.py b/portkey_ai/api_resources/types/thread_type.py new file mode 100644 index 00000000..4f047e1d --- /dev/null +++ b/portkey_ai/api_resources/types/thread_type.py @@ -0,0 +1,37 @@ +import json +from typing import Dict, Optional +import httpx +from portkey_ai.api_resources.utils import parse_headers +from pydantic import BaseModel + + +__all__ = ["Thread", "ThreadDeleted"] + + +class Thread(BaseModel, extra="allow"): + id: Optional[str] + created_at: Optional[int] + metadata: Optional[object] = None + object: Optional[str] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class ThreadDeleted(BaseModel, extra="allow"): + id: Optional[str] + object: Optional[str] + deleted: Optional[bool] + _headers: Optional[httpx.Headers] = None + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) diff --git a/portkey_ai/api_resources/utils.py b/portkey_ai/api_resources/utils.py index 28b4e3dd..e82b0dcb 100644 --- a/portkey_ai/api_resources/utils.py +++ b/portkey_ai/api_resources/utils.py @@ -465,158 +465,6 @@ def __str__(self): def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) - -class ImageResponse(BaseModel, extra="allow"): - created: int - data: List[Any] - _headers: Optional[httpx.Headers] = None - - def __str__(self): - del self._headers - return json.dumps(self.dict(), indent=4) - - def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) - - -class FileObject(BaseModel, extra="allow"): - id: Optional[str] - bytes: Optional[int] - created_at: Optional[int] - filename: Optional[str] - object: Optional[Literal["file"]] - purpose: Optional[ - Literal["fine-tune", "fine-tune-results", "assistants", "assistants_output"] - ] - status: Optional[Literal["uploaded", "processed", "error"]] - status_details: Optional[str] = None - _headers: Optional[httpx.Headers] = None - - def __str__(self): - del self._headers - return json.dumps(self.dict(), indent=4) - - def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) - - -class FileList(BaseModel, extra="allow"): - object: str - data: List[Any] - _headers: Optional[httpx.Headers] = None - - def __str__(self): - del self._headers - return json.dumps(self.dict(), indent=4) - - def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) - - -class FileDeleted(BaseModel, extra="allow"): - id: Optional[str] - deleted: Optional[bool] - object: Optional[Literal["file"]] - _headers: Optional[httpx.Headers] = None - - def __str__(self): - del self._headers - return json.dumps(self.dict(), indent=4) - - def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) - - -class ModelDeleted(BaseModel, extra="allow"): - id: str - deleted: bool - object: str - _headers: Optional[httpx.Headers] = None - - def __str__(self): - del self._headers - return json.dumps(self.dict(), indent=4) - - def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) - - -class Model(BaseModel, extra="allow"): - id: str - created: int - object: Literal["model"] - owned_by: str - _headers: Optional[httpx.Headers] = None - - def __str__(self): - del self._headers - return json.dumps(self.dict(), indent=4) - - def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) - - -class ModelList(BaseModel, extra="allow"): - object: str - data: List[Any] - _headers: Optional[httpx.Headers] = None - - def __str__(self): - del self._headers - return json.dumps(self.dict(), indent=4) - - def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) - - -class Assistant(BaseModel, extra="allow"): - id: Optional[str] - created_at: Optional[int] - description: Optional[str] = None - file_ids: Optional[List[str]] - instructions: Optional[str] = None - metadata: Optional[object] = None - model: Optional[str] - name: Optional[str] = None - object: Optional[str] - tools: Optional[List[Any]] - _headers: Optional[httpx.Headers] = None - - def __str__(self): - del self._headers - return json.dumps(self.dict(), indent=4) - - def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) - - -class AssistantList(BaseModel, extra="allow"): - object: Optional[str] - data: Optional[List[Any]] - _headers: Optional[httpx.Headers] = None - - def __str__(self): - del self._headers - return json.dumps(self.dict(), indent=4) - - def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) - - -class AssistantDeleted(BaseModel, extra="allow"): - id: Optional[str] - object: Optional[str] - deleted: Optional[bool] - _headers: Optional[httpx.Headers] = None - - def __str__(self): - del self._headers - return json.dumps(self.dict(), indent=4) - - def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) - - def apikey_from_env(provider: Union[ProviderTypes, ProviderTypesLiteral, str]) -> str: env_key = f"{provider.upper().replace('-', '_')}_API_KEY" if provider is None: @@ -735,218 +583,3 @@ def parse_headers(headers: Optional[httpx.Headers]) -> dict: _headers[k] = v return _headers - - -class AssistantFileDeleted(BaseModel, extra="allow"): - id: Optional[str] - deleted: Optional[bool] - object: Optional[str] - _headers: Optional[httpx.Headers] = None - - def __str__(self): - del self._headers - return json.dumps(self.dict(), indent=4) - - def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) - - -class AssistantFile(BaseModel, extra="allow"): - id: Optional[str] - assistant_id: Optional[str] - created_at: Optional[int] - object: Optional[str] - _headers: Optional[httpx.Headers] = None - - def __str__(self): - del self._headers - return json.dumps(self.dict(), indent=4) - - def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) - - -class AssistantFileList(BaseModel, extra="allow"): - object: Optional[str] - data: Optional[List[Any]] - first_id: Optional[str] - last_id: Optional[str] - has_more: Optional[bool] - _headers: Optional[httpx.Headers] = None - - def __str__(self): - del self._headers - return json.dumps(self.dict(), indent=4) - - def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) - - -class Thread(BaseModel, extra="allow"): - id: Optional[str] - created_at: Optional[int] - metadata: Optional[object] = None - object: Optional[str] - _headers: Optional[httpx.Headers] = None - - def __str__(self): - del self._headers - return json.dumps(self.dict(), indent=4) - - def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) - - -class ThreadDeleted(BaseModel, extra="allow"): - id: Optional[str] - object: Optional[str] - deleted: Optional[bool] - _headers: Optional[httpx.Headers] = None - - def __str__(self): - del self._headers - return json.dumps(self.dict(), indent=4) - - def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) - - -class Run(BaseModel, extra="allow"): - id: Optional[str] - assistant_id: Optional[str] - cancelled_at: Optional[int] = None - completed_at: Optional[int] = None - created_at: Optional[int] - expires_at: Optional[int] - failed_at: Optional[int] = None - file_ids: Optional[List[str]] = None - instructions: Optional[str] - last_error: Optional[Any] = None - metadata: Optional[object] = None - model: Optional[str] - object: Optional[str] - required_action: Optional[str] = None - started_at: Optional[int] = None - status: Optional[str] - thread_id: Optional[str] - tools: Optional[List[Any]] = None - usage: Optional[Any] = None - _headers: Optional[httpx.Headers] = None - - def __str__(self): - del self._headers - return json.dumps(self.dict(), indent=4) - - def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) - - -class ThreadMessage(BaseModel, extra="allow"): - id: Optional[str] - assistant_id: Optional[str] = None - content: Optional[List[Any]] - created_at: Optional[int] - file_ids: Optional[List[str]] - metadata: Optional[object] = None - object: Optional[str] - role: Optional[str] - run_id: Optional[str] = None - thread_id: Optional[str] - _headers: Optional[httpx.Headers] = None - - def __str__(self): - del self._headers - return json.dumps(self.dict(), indent=4) - - def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) - - -class MessageList(BaseModel, extra="allow"): - object: Optional[str] - data: Optional[List[Any]] - first_id: Optional[str] - last_id: Optional[str] - has_more: Optional[bool] - _headers: Optional[httpx.Headers] = None - - def __str__(self): - del self._headers - return json.dumps(self.dict(), indent=4) - - def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) - - -class ThreadMessageFileRetrieve(BaseModel, extra="allow"): - id: Optional[str] - object: Optional[str] - created_at: Optional[int] - message_id: Optional[str] - _headers: Optional[httpx.Headers] = None - - def __str__(self): - del self._headers - return json.dumps(self.dict(), indent=4) - - def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) - - -class RunList(BaseModel, extra="allow"): - object: Optional[str] - data: Optional[List[Any]] - first_id: Optional[str] - last_id: Optional[str] - has_more: Optional[bool] - _headers: Optional[httpx.Headers] = None - - def __str__(self): - del self._headers - return json.dumps(self.dict(), indent=4) - - def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) - - -class RunStep(BaseModel, extra="allow"): - id: Optional[str] - assistant_id: Optional[str] - cancelled_at: Optional[int] = None - completed_at: Optional[int] = None - created_at: Optional[int] - expired_at: Optional[int] = None - failed_at: Optional[int] = None - last_error: Optional[Any] = None - metadata: Optional[object] = None - object: Optional[str] - run_id: Optional[str] - status: Optional[str] - step_details: Optional[Any] - thread_id: Optional[str] - type: Optional[str] - usage: Optional[Any] = None - _headers: Optional[httpx.Headers] = None - - def __str__(self): - del self._headers - return json.dumps(self.dict(), indent=4) - - def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) - - -class RunStepList(BaseModel, extra="allow"): - object: Optional[str] - data: Optional[List[Any]] - first_id: Optional[str] - last_id: Optional[str] - has_more: Optional[bool] - _headers: Optional[httpx.Headers] = None - - def __str__(self): - del self._headers - return json.dumps(self.dict(), indent=4) - - def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) From 666a167cbe126a595767e02f5d357584cfcbe623 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Wed, 20 Mar 2024 16:36:33 +0530 Subject: [PATCH 46/62] fix: linting issues --- portkey_ai/__init__.py | 2 +- portkey_ai/api_resources/apis/assistants.py | 9 +++++++- portkey_ai/api_resources/apis/main_files.py | 6 +++++- portkey_ai/api_resources/apis/models.py | 1 + portkey_ai/api_resources/apis/threads.py | 21 ++++++++++++------- .../api_resources/types/complete_type.py | 2 +- portkey_ai/api_resources/types/models_type.py | 4 +++- .../api_resources/types/thread_run_type.py | 4 ++-- portkey_ai/api_resources/utils.py | 1 + tests/test_assistants.py | 2 +- tests/test_async_chat_complete.py | 12 +++++------ tests/test_async_complete.py | 12 +++++------ tests/test_chat_complete.py | 12 +++++------ tests/test_complete.py | 12 +++++------ tests/test_threads.py | 2 +- 15 files changed, 61 insertions(+), 41 deletions(-) diff --git a/portkey_ai/__init__.py b/portkey_ai/__init__.py index bb8cf8b9..29a20e2c 100644 --- a/portkey_ai/__init__.py +++ b/portkey_ai/__init__.py @@ -9,7 +9,7 @@ CacheType, CacheLiteral, Message, - PortkeyResponse, + PortkeyResponse, Completion, AsyncCompletion, Params, diff --git a/portkey_ai/api_resources/apis/assistants.py b/portkey_ai/api_resources/apis/assistants.py index 2a97c2b7..40b2dc4a 100644 --- a/portkey_ai/api_resources/apis/assistants.py +++ b/portkey_ai/api_resources/apis/assistants.py @@ -1,7 +1,14 @@ import json from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey -from portkey_ai.api_resources.types.assistant_type import Assistant, AssistantList, AssistantDeleted, AssistantFile, AssistantFileList, AssistantFileDeleted +from portkey_ai.api_resources.types.assistant_type import ( + Assistant, + AssistantList, + AssistantDeleted, + AssistantFile, + AssistantFileList, + AssistantFileDeleted, +) class Assistants(APIResource): diff --git a/portkey_ai/api_resources/apis/main_files.py b/portkey_ai/api_resources/apis/main_files.py index d7cacc67..566d8736 100644 --- a/portkey_ai/api_resources/apis/main_files.py +++ b/portkey_ai/api_resources/apis/main_files.py @@ -2,7 +2,11 @@ from typing import Any from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey -from portkey_ai.api_resources.types.main_file_type import FileDeleted, FileList, FileObject +from portkey_ai.api_resources.types.main_file_type import ( + FileDeleted, + FileList, + FileObject, +) class MainFiles(APIResource): diff --git a/portkey_ai/api_resources/apis/models.py b/portkey_ai/api_resources/apis/models.py index e5fbe061..9574312c 100644 --- a/portkey_ai/api_resources/apis/models.py +++ b/portkey_ai/api_resources/apis/models.py @@ -3,6 +3,7 @@ from portkey_ai.api_resources.client import AsyncPortkey, Portkey from portkey_ai.api_resources.types.models_type import Model, ModelDeleted, ModelList + class Models(APIResource): def __init__(self, client: Portkey) -> None: super().__init__(client) diff --git a/portkey_ai/api_resources/apis/threads.py b/portkey_ai/api_resources/apis/threads.py index 634855e1..3ba0aecc 100644 --- a/portkey_ai/api_resources/apis/threads.py +++ b/portkey_ai/api_resources/apis/threads.py @@ -2,8 +2,17 @@ from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey -from portkey_ai.api_resources.types.thread_message_type import MessageFile, MessageList, ThreadMessage -from portkey_ai.api_resources.types.thread_run_type import Run, RunList, RunStep, RunStepList +from portkey_ai.api_resources.types.thread_message_type import ( + MessageFile, + MessageList, + ThreadMessage, +) +from portkey_ai.api_resources.types.thread_run_type import ( + Run, + RunList, + RunStep, + RunStepList, +) from portkey_ai.api_resources.types.thread_type import Thread, ThreadDeleted @@ -118,9 +127,7 @@ def list(self, thread_id, message_id, **kwargs) -> MessageList: return data - def retrieve( - self, thread_id, message_id, file_id, **kwargs - ) -> MessageFile: + def retrieve(self, thread_id, message_id, file_id, **kwargs) -> MessageFile: response = ( self.openai_client.with_raw_response.beta.threads.messages.files.retrieve( thread_id=thread_id, message_id=message_id, file_id=file_id, **kwargs @@ -342,9 +349,7 @@ async def list(self, thread_id, message_id, **kwargs) -> MessageList: return data - async def retrieve( - self, thread_id, message_id, file_id, **kwargs - ) -> MessageFile: + async def retrieve(self, thread_id, message_id, file_id, **kwargs) -> MessageFile: # fmt: off response = await self.openai_client\ .with_raw_response\ diff --git a/portkey_ai/api_resources/types/complete_type.py b/portkey_ai/api_resources/types/complete_type.py index 8d4eaaca..075f3a1d 100644 --- a/portkey_ai/api_resources/types/complete_type.py +++ b/portkey_ai/api_resources/types/complete_type.py @@ -1,5 +1,5 @@ import json -from typing import Dict, Literal, Optional +from typing import Dict, Optional import httpx from portkey_ai.api_resources.utils import parse_headers from typing import List, Any diff --git a/portkey_ai/api_resources/types/models_type.py b/portkey_ai/api_resources/types/models_type.py index 5974f4e9..84d20f3c 100644 --- a/portkey_ai/api_resources/types/models_type.py +++ b/portkey_ai/api_resources/types/models_type.py @@ -7,6 +7,7 @@ __all__ = ["Model", "ModelDeleted", "ModelList"] + class Model(BaseModel): id: Optional[str] created: Optional[int] @@ -26,7 +27,7 @@ def get(self, key: str, default: Optional[Any] = None): def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) - + class ModelList(BaseModel, extra="allow"): object: Optional[str] @@ -40,6 +41,7 @@ def __str__(self): def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) + class ModelDeleted(BaseModel): id: Optional[str] deleted: Optional[bool] diff --git a/portkey_ai/api_resources/types/thread_run_type.py b/portkey_ai/api_resources/types/thread_run_type.py index d4633447..eadb832e 100644 --- a/portkey_ai/api_resources/types/thread_run_type.py +++ b/portkey_ai/api_resources/types/thread_run_type.py @@ -116,14 +116,14 @@ class LastError(BaseModel): message: Optional[str] -class Function(BaseModel): +class FunctionRA(BaseModel): arguments: Optional[str] name: Optional[str] class RequiredActionFunctionToolCall(BaseModel): id: Optional[str] - function: Optional[Function] + function: Optional[FunctionRA] type: Optional[str] diff --git a/portkey_ai/api_resources/utils.py b/portkey_ai/api_resources/utils.py index e82b0dcb..c41f31dc 100644 --- a/portkey_ai/api_resources/utils.py +++ b/portkey_ai/api_resources/utils.py @@ -465,6 +465,7 @@ def __str__(self): def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) + def apikey_from_env(provider: Union[ProviderTypes, ProviderTypesLiteral, str]) -> str: env_key = f"{provider.upper().replace('-', '_')}_API_KEY" if provider is None: diff --git a/tests/test_assistants.py b/tests/test_assistants.py index 295ac91a..10e589ca 100644 --- a/tests/test_assistants.py +++ b/tests/test_assistants.py @@ -123,4 +123,4 @@ def test_method_all_params(self, client: Any, virtual_key: str) -> None: assert delete_assistant.id == assistant.id assert delete_assistant.object == "assistant.deleted" - assert delete_assistant.deleted == True + assert delete_assistant.deleted is True diff --git a/tests/test_async_chat_complete.py b/tests/test_async_chat_complete.py index ac82cdd4..3098bd73 100644 --- a/tests/test_async_chat_complete.py +++ b/tests/test_async_chat_complete.py @@ -291,7 +291,7 @@ async def test_method_single_with_vk_and_provider( ) async for chunk in completion: - assert check_chat_streaming_chunk(chunk) == True + assert check_chat_streaming_chunk(chunk) is True # -------------------------- # Test -2 @@ -343,7 +343,7 @@ async def test_method_single_with_basic_config( ) async for chunk in completion: - assert check_chat_streaming_chunk(chunk) == True + assert check_chat_streaming_chunk(chunk) is True # -------------------------- # Test-3 @@ -393,7 +393,7 @@ async def test_method_single_provider_with_vk_retry_cache( ) async for chunk in cached_completion: - assert check_chat_streaming_chunk(chunk) == True + assert check_chat_streaming_chunk(chunk) is True # -------------------------- # Test-4 @@ -422,7 +422,7 @@ async def test_method_loadbalance_with_two_apikeys( ) async for chunk in completion: - assert check_chat_streaming_chunk(chunk) == True + assert check_chat_streaming_chunk(chunk) is True # -------------------------- # Test-5 @@ -452,7 +452,7 @@ async def test_method_loadbalance_and_fallback( stream=True, ) async for chunk in completion: - assert check_chat_streaming_chunk(chunk) == True + assert check_chat_streaming_chunk(chunk) is True # -------------------------- # Test-6 @@ -477,4 +477,4 @@ async def test_method_single_provider(self, client: Any, config: Dict) -> None: ) async for chunk in completion: - assert check_chat_streaming_chunk(chunk) == True + assert check_chat_streaming_chunk(chunk) is True diff --git a/tests/test_async_complete.py b/tests/test_async_complete.py index 6a2471d0..cd063727 100644 --- a/tests/test_async_complete.py +++ b/tests/test_async_complete.py @@ -279,7 +279,7 @@ async def test_method_single_with_vk_and_provider( ) async for chunk in completion: - assert check_text_streaming_chunk(chunk) == True + assert check_text_streaming_chunk(chunk) is True # -------------------------- # Test -2 @@ -328,7 +328,7 @@ async def test_method_single_with_basic_config( ) async for chunk in completion: - assert check_text_streaming_chunk(chunk) == True + assert check_text_streaming_chunk(chunk) is True # -------------------------- # Test-3 @@ -372,7 +372,7 @@ async def test_method_single_provider_with_vk_retry_cache( ) async for chunk in cached_completion: - assert check_text_streaming_chunk(chunk) == True + assert check_text_streaming_chunk(chunk) is True # -------------------------- # Test-4 @@ -399,7 +399,7 @@ async def test_method_loadbalance_with_two_apikeys( ) async for chunk in completion: - assert check_text_streaming_chunk(chunk) == True + assert check_text_streaming_chunk(chunk) is True # -------------------------- # Test-5 @@ -424,7 +424,7 @@ async def test_method_loadbalance_and_fallback( ) async for chunk in completion: - assert check_text_streaming_chunk(chunk) == True + assert check_text_streaming_chunk(chunk) is True # -------------------------- # Test-6 @@ -447,4 +447,4 @@ async def test_method_single_provider(self, client: Any, config: Dict) -> None: ) async for chunk in completion: - assert check_text_streaming_chunk(chunk) == True + assert check_text_streaming_chunk(chunk) is True diff --git a/tests/test_chat_complete.py b/tests/test_chat_complete.py index a244b012..48727950 100644 --- a/tests/test_chat_complete.py +++ b/tests/test_chat_complete.py @@ -281,7 +281,7 @@ def test_method_single_with_vk_and_provider( ) for chunk in completion: - assert check_chat_streaming_chunk(chunk) == True + assert check_chat_streaming_chunk(chunk) is True # -------------------------- # Test -2 @@ -330,7 +330,7 @@ def test_method_single_with_basic_config(self, client: Any, config: Dict) -> Non ) for chunk in completion: - assert check_chat_streaming_chunk(chunk) == True + assert check_chat_streaming_chunk(chunk) is True # -------------------------- # Test-3 @@ -379,7 +379,7 @@ def test_method_single_provider_with_vk_retry_cache( ) for chunk in cache_completion: - assert check_chat_streaming_chunk(chunk) == True + assert check_chat_streaming_chunk(chunk) is True # -------------------------- # Test-4 @@ -407,7 +407,7 @@ def test_method_loadbalance_with_two_apikeys( ) for chunk in completion: - assert check_chat_streaming_chunk(chunk) == True + assert check_chat_streaming_chunk(chunk) is True # -------------------------- # Test-5 @@ -435,7 +435,7 @@ def test_method_loadbalance_and_fallback(self, client: Any, config: Dict) -> Non ) for chunk in completion: - assert check_chat_streaming_chunk(chunk) == True + assert check_chat_streaming_chunk(chunk) is True # -------------------------- # Test-6 @@ -459,4 +459,4 @@ def test_method_single_provider(self, client: Any, config: Dict) -> None: ) for chunk in completion: - assert check_chat_streaming_chunk(chunk) == True + assert check_chat_streaming_chunk(chunk) is True diff --git a/tests/test_complete.py b/tests/test_complete.py index d47104dc..218971a4 100644 --- a/tests/test_complete.py +++ b/tests/test_complete.py @@ -264,7 +264,7 @@ def test_method_single_with_vk_and_provider( ) for chunk in completion: - assert check_text_streaming_chunk(chunk) == True + assert check_text_streaming_chunk(chunk) is True # -------------------------- # Test -2 @@ -310,7 +310,7 @@ def test_method_single_with_basic_config(self, client: Any, config: Dict) -> Non ) for chunk in completion: - assert check_text_streaming_chunk(chunk) == True + assert check_text_streaming_chunk(chunk) is True # -------------------------- # Test-3 @@ -353,7 +353,7 @@ def test_method_single_provider_with_vk_retry_cache( ) for chunk in cached_completion: - assert check_text_streaming_chunk(chunk) == True + assert check_text_streaming_chunk(chunk) is True # -------------------------- # Test-4 @@ -379,7 +379,7 @@ def test_method_loadbalance_with_two_apikeys( ) for chunk in completion: - assert check_text_streaming_chunk(chunk) == True + assert check_text_streaming_chunk(chunk) is True # -------------------------- # Test-5 @@ -401,7 +401,7 @@ def test_method_loadbalance_and_fallback(self, client: Any, config: Dict) -> Non ) for chunk in completion: - assert check_text_streaming_chunk(chunk) == True + assert check_text_streaming_chunk(chunk) is True # -------------------------- # Test-6 @@ -423,4 +423,4 @@ def test_method_single_provider(self, client: Any, config: Dict) -> None: ) for chunk in completion: - assert check_text_streaming_chunk(chunk) == True + assert check_text_streaming_chunk(chunk) is True diff --git a/tests/test_threads.py b/tests/test_threads.py index cdd455e9..9a7bb70f 100644 --- a/tests/test_threads.py +++ b/tests/test_threads.py @@ -81,4 +81,4 @@ def test_method_single_with_vk_and_provider( assert delete_thread.id == thread.id assert delete_thread.object == "thread.deleted" - assert delete_thread.deleted == True + assert delete_thread.deleted is True From 3a46397a7e561121f17b708c01f86356b4f8a43b Mon Sep 17 00:00:00 2001 From: visargD Date: Wed, 20 Mar 2024 18:26:45 +0530 Subject: [PATCH 47/62] fix: add await in async chat complete test case --- tests/test_async_chat_complete.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_async_chat_complete.py b/tests/test_async_chat_complete.py index 3098bd73..a0def363 100644 --- a/tests/test_async_chat_complete.py +++ b/tests/test_async_chat_complete.py @@ -386,7 +386,7 @@ async def test_method_single_provider_with_vk_retry_cache( config=config, ) - cached_completion = portkey_2.chat.completions.create( + cached_completion = await portkey_2.chat.completions.create( messages=[{"role": "user", "content": "Say this is a test"}], model="gpt-3.5-turbo", stream=True, From b0b6613ade720cb26cf94f0932f15a53d9e1996d Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Wed, 20 Mar 2024 19:33:59 +0530 Subject: [PATCH 48/62] fix: auto complete for chat and completion + utils for types directory --- portkey_ai/__init__.py | 4 - portkey_ai/api_resources/__init__.py | 4 - .../api_resources/apis/chat_complete.py | 13 +- portkey_ai/api_resources/apis/complete.py | 12 +- portkey_ai/api_resources/common_types.py | 4 +- .../api_resources/types/assistant_type.py | 2 +- .../api_resources/types/chat_complete_type.py | 56 +++++- .../api_resources/types/complete_type.py | 36 +++- .../api_resources/types/embeddings_type.py | 2 +- portkey_ai/api_resources/types/image_type.py | 2 +- .../api_resources/types/main_file_type.py | 2 +- portkey_ai/api_resources/types/models_type.py | 2 +- .../types/thread_message_type.py | 2 +- .../api_resources/types/thread_run_type.py | 2 +- portkey_ai/api_resources/types/thread_type.py | 2 +- portkey_ai/api_resources/types/utils.py | 18 ++ portkey_ai/api_resources/utils.py | 163 ++---------------- 17 files changed, 146 insertions(+), 180 deletions(-) create mode 100644 portkey_ai/api_resources/types/utils.py diff --git a/portkey_ai/__init__.py b/portkey_ai/__init__.py index 29a20e2c..f945a675 100644 --- a/portkey_ai/__init__.py +++ b/portkey_ai/__init__.py @@ -17,8 +17,6 @@ RetrySettings, ChatCompletion, AsyncChatCompletion, - ChatCompletionChunk, - TextCompletionChunk, createHeaders, Prompts, AsyncPrompts, @@ -76,8 +74,6 @@ "RetrySettings", "ChatCompletion", "AsyncChatCompletion", - "ChatCompletionChunk", - "TextCompletionChunk", "Config", "api_key", "base_url", diff --git a/portkey_ai/api_resources/__init__.py b/portkey_ai/api_resources/__init__.py index d4e3cc4f..f489f0bf 100644 --- a/portkey_ai/api_resources/__init__.py +++ b/portkey_ai/api_resources/__init__.py @@ -45,8 +45,6 @@ Params, Config, RetrySettings, - ChatCompletionChunk, - TextCompletionChunk, ) from .client import Portkey, AsyncPortkey @@ -70,8 +68,6 @@ "RetrySettings", "ChatCompletion", "AsyncChatCompletion", - "ChatCompletionChunk", - "TextCompletionChunk", "Generations", "AsyncGenerations", "Prompts", diff --git a/portkey_ai/api_resources/apis/chat_complete.py b/portkey_ai/api_resources/apis/chat_complete.py index 38420e8c..c1b79dee 100644 --- a/portkey_ai/api_resources/apis/chat_complete.py +++ b/portkey_ai/api_resources/apis/chat_complete.py @@ -11,12 +11,13 @@ Union, ) from portkey_ai.api_resources.client import AsyncPortkey, Portkey -from portkey_ai.api_resources.types.chat_complete_type import ChatCompletions +from portkey_ai.api_resources.types.chat_complete_type import ( + ChatCompletionChunk, + ChatCompletions, +) from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource -from portkey_ai.api_resources.utils import ChatCompletionChunk - __all__ = ["ChatCompletion", "AsyncChatCompletion"] @@ -75,7 +76,7 @@ def create( model: Optional[str] = "portkey-default", messages: Iterable[Any], **kwargs, - ) -> ChatCompletions: + ) -> Union[ChatCompletions, Iterator[ChatCompletionChunk]]: if "stream" in kwargs and kwargs["stream"] is True: return self.stream_create(model=model, messages=messages, **kwargs) # type: ignore elif "stream" in kwargs and kwargs["stream"] is False: @@ -91,7 +92,7 @@ def __init__(self, client: AsyncPortkey) -> None: async def stream_create( self, model, messages, **kwargs - ) -> Union[ChatCompletions, AsyncIterator[ChatCompletionChunk]]: # type: ignore + ) -> Union[ChatCompletions, AsyncIterator[ChatCompletionChunk]]: async with self.openai_client.with_streaming_response.chat.completions.create( model=model, messages=messages, **kwargs ) as response: @@ -123,7 +124,7 @@ async def create( model: Optional[str] = "portkey-default", messages: Iterable[Any], **kwargs, - ) -> ChatCompletions: + ) -> Union[ChatCompletions, AsyncIterator[ChatCompletionChunk]]: if "stream" in kwargs and kwargs["stream"] is True: return self.stream_create(model=model, messages=messages, **kwargs) # type: ignore elif "stream" in kwargs and kwargs["stream"] is False: diff --git a/portkey_ai/api_resources/apis/complete.py b/portkey_ai/api_resources/apis/complete.py index 41f2f0fb..030b84c2 100644 --- a/portkey_ai/api_resources/apis/complete.py +++ b/portkey_ai/api_resources/apis/complete.py @@ -1,8 +1,12 @@ import json from typing import AsyncIterator, Iterator, Optional, Union from portkey_ai.api_resources.client import AsyncPortkey, Portkey -from portkey_ai.api_resources.utils import TextCompletionChunk -from portkey_ai.api_resources.types.complete_type import TextCompletion + +# from portkey_ai.api_resources.utils import TextCompletionChunk +from portkey_ai.api_resources.types.complete_type import ( + TextCompletion, + TextCompletionChunk, +) from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource @@ -46,7 +50,7 @@ def create( model: Optional[str] = "portkey-default", prompt: Optional[str] = None, **kwargs, - ) -> TextCompletion: + ) -> Union[TextCompletion, Iterator[TextCompletionChunk]]: if "stream" in kwargs and kwargs["stream"] is True: return self.stream_create(model=model, prompt=prompt, **kwargs) # type: ignore elif "stream" in kwargs and kwargs["stream"] is False: @@ -94,7 +98,7 @@ async def create( model: Optional[str] = "portkey-default", prompt: Optional[str] = None, **kwargs, - ) -> TextCompletion: + ) -> Union[TextCompletion, AsyncIterator[TextCompletionChunk]]: if "stream" in kwargs and kwargs["stream"] is True: return self.stream_create(model=model, prompt=prompt, **kwargs) # type: ignore elif "stream" in kwargs and kwargs["stream"] is False: diff --git a/portkey_ai/api_resources/common_types.py b/portkey_ai/api_resources/common_types.py index 0a2af85f..ddb765ae 100644 --- a/portkey_ai/api_resources/common_types.py +++ b/portkey_ai/api_resources/common_types.py @@ -2,7 +2,9 @@ import httpx from .streaming import Stream, AsyncStream -from .utils import ChatCompletionChunk, TextCompletionChunk, GenericResponse +from .utils import GenericResponse +from .types.chat_complete_type import ChatCompletionChunk +from .types.complete_type import TextCompletionChunk StreamT = TypeVar( "StreamT", diff --git a/portkey_ai/api_resources/types/assistant_type.py b/portkey_ai/api_resources/types/assistant_type.py index ea72d384..ebfa6b77 100644 --- a/portkey_ai/api_resources/types/assistant_type.py +++ b/portkey_ai/api_resources/types/assistant_type.py @@ -1,7 +1,7 @@ import json from typing import Dict, Optional, Union import httpx -from portkey_ai.api_resources.utils import parse_headers +from .utils import parse_headers from typing import List, Any from pydantic import BaseModel diff --git a/portkey_ai/api_resources/types/chat_complete_type.py b/portkey_ai/api_resources/types/chat_complete_type.py index d0993437..fabc2f80 100644 --- a/portkey_ai/api_resources/types/chat_complete_type.py +++ b/portkey_ai/api_resources/types/chat_complete_type.py @@ -1,7 +1,7 @@ import json from typing import Dict, Optional import httpx -from portkey_ai.api_resources.utils import parse_headers +from .utils import parse_headers from typing import List, Any from pydantic import BaseModel @@ -15,6 +15,10 @@ "ChoiceLogprobs", "Choice", "Usage", + "DeltaToolCall", + "Delta", + "StreamChoice", + "ChatCompletionChunk", ] @@ -24,6 +28,39 @@ class Usage(BaseModel, extra="allow"): total_tokens: Optional[int] = None +class DeltaToolCallFunction(BaseModel): + arguments: Optional[str] = None + name: Optional[str] = None + + +class DeltaToolCall(BaseModel): + index: int + id: Optional[str] = None + function: Optional[DeltaToolCallFunction] = None + type: Optional[str] = None + + +class Delta(BaseModel): + role: Optional[str] = None + content: Optional[str] = "" + tool_calls: Optional[List[DeltaToolCall]] = None + + +class StreamChoice(BaseModel, extra="allow"): + index: Optional[int] = None + delta: Optional[Delta] + finish_reason: Optional[str] = None + + def __str__(self): + return json.dumps(self.dict(), indent=4) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def __getitem__(self, key): + return getattr(self, key, None) + + class FunctionCall(BaseModel): arguments: str name: str @@ -88,3 +125,20 @@ def get(self, key: str, default: Optional[Any] = None): def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) + + +class ChatCompletionChunk(BaseModel): + id: Optional[str] = None + object: Optional[str] = None + created: Optional[int] = None + model: Optional[str] = None + choices: Optional[List[StreamChoice]] + + def __str__(self): + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default diff --git a/portkey_ai/api_resources/types/complete_type.py b/portkey_ai/api_resources/types/complete_type.py index 075f3a1d..920cc0cd 100644 --- a/portkey_ai/api_resources/types/complete_type.py +++ b/portkey_ai/api_resources/types/complete_type.py @@ -1,7 +1,7 @@ import json from typing import Dict, Optional import httpx -from portkey_ai.api_resources.utils import parse_headers +from .utils import parse_headers from typing import List, Any from pydantic import BaseModel @@ -50,3 +50,37 @@ def get(self, key: str, default: Optional[Any] = None): def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) + + +class TextChoice(BaseModel, extra="allow"): + index: Optional[int] = None + text: Optional[str] = None + logprobs: Optional[Logprobs] = None + finish_reason: Optional[str] = None + + def __str__(self): + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + +class TextCompletionChunk(BaseModel, extra="allow"): + id: Optional[str] = None + object: Optional[str] = None + created: Optional[int] = None + model: Optional[str] = None + provider: Optional[str] = None + choices: List[TextChoice] + + def __str__(self): + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default diff --git a/portkey_ai/api_resources/types/embeddings_type.py b/portkey_ai/api_resources/types/embeddings_type.py index 81ef3920..9e0bb4c3 100644 --- a/portkey_ai/api_resources/types/embeddings_type.py +++ b/portkey_ai/api_resources/types/embeddings_type.py @@ -2,7 +2,7 @@ from typing import Dict, Optional, Union import httpx -from portkey_ai.api_resources.utils import parse_headers +from .utils import parse_headers from typing import List, Any from pydantic import BaseModel diff --git a/portkey_ai/api_resources/types/image_type.py b/portkey_ai/api_resources/types/image_type.py index 1dbedb0a..e66839af 100644 --- a/portkey_ai/api_resources/types/image_type.py +++ b/portkey_ai/api_resources/types/image_type.py @@ -1,7 +1,7 @@ import json from typing import Dict, Optional import httpx -from portkey_ai.api_resources.utils import parse_headers +from .utils import parse_headers from typing import List, Any from pydantic import BaseModel diff --git a/portkey_ai/api_resources/types/main_file_type.py b/portkey_ai/api_resources/types/main_file_type.py index 52b7e733..b7437d7a 100644 --- a/portkey_ai/api_resources/types/main_file_type.py +++ b/portkey_ai/api_resources/types/main_file_type.py @@ -1,7 +1,7 @@ import json from typing import Dict, Optional import httpx -from portkey_ai.api_resources.utils import parse_headers +from .utils import parse_headers from typing import List, Any from pydantic import BaseModel diff --git a/portkey_ai/api_resources/types/models_type.py b/portkey_ai/api_resources/types/models_type.py index 84d20f3c..ee08c467 100644 --- a/portkey_ai/api_resources/types/models_type.py +++ b/portkey_ai/api_resources/types/models_type.py @@ -1,7 +1,7 @@ import json from typing import Dict, Optional import httpx -from portkey_ai.api_resources.utils import parse_headers +from .utils import parse_headers from typing import List, Any from pydantic import BaseModel diff --git a/portkey_ai/api_resources/types/thread_message_type.py b/portkey_ai/api_resources/types/thread_message_type.py index 8134130e..9f45efa4 100644 --- a/portkey_ai/api_resources/types/thread_message_type.py +++ b/portkey_ai/api_resources/types/thread_message_type.py @@ -1,7 +1,7 @@ import json from typing import Dict, List, Optional, Union import httpx -from portkey_ai.api_resources.utils import parse_headers +from .utils import parse_headers from pydantic import BaseModel __all__ = [ diff --git a/portkey_ai/api_resources/types/thread_run_type.py b/portkey_ai/api_resources/types/thread_run_type.py index eadb832e..ad77287c 100644 --- a/portkey_ai/api_resources/types/thread_run_type.py +++ b/portkey_ai/api_resources/types/thread_run_type.py @@ -1,7 +1,7 @@ import json from typing import Dict, Literal, Optional, Union import httpx -from portkey_ai.api_resources.utils import parse_headers +from .utils import parse_headers from typing import List from pydantic import BaseModel diff --git a/portkey_ai/api_resources/types/thread_type.py b/portkey_ai/api_resources/types/thread_type.py index 4f047e1d..c1905e0f 100644 --- a/portkey_ai/api_resources/types/thread_type.py +++ b/portkey_ai/api_resources/types/thread_type.py @@ -1,7 +1,7 @@ import json from typing import Dict, Optional import httpx -from portkey_ai.api_resources.utils import parse_headers +from .utils import parse_headers from pydantic import BaseModel diff --git a/portkey_ai/api_resources/types/utils.py b/portkey_ai/api_resources/types/utils.py new file mode 100644 index 00000000..876bc9a4 --- /dev/null +++ b/portkey_ai/api_resources/types/utils.py @@ -0,0 +1,18 @@ +from typing import Optional + +import httpx + +from portkey_ai.api_resources.global_constants import PORTKEY_HEADER_PREFIX + + +def parse_headers(headers: Optional[httpx.Headers]) -> dict: + if headers is None: + return {} + + _headers = {} + for k, v in headers.items(): + if k.startswith(PORTKEY_HEADER_PREFIX): + k = k.replace(PORTKEY_HEADER_PREFIX, "") + _headers[k] = v + + return _headers diff --git a/portkey_ai/api_resources/utils.py b/portkey_ai/api_resources/utils.py index c41f31dc..2308e5e6 100644 --- a/portkey_ai/api_resources/utils.py +++ b/portkey_ai/api_resources/utils.py @@ -6,6 +6,15 @@ import httpx import portkey_ai from pydantic import BaseModel, validator + +from portkey_ai.api_resources.types.chat_complete_type import ( + ChatCompletionChunk, + ChatCompletions, +) +from portkey_ai.api_resources.types.complete_type import ( + TextCompletionChunk, + TextCompletion, +) from .exceptions import ( APIStatusError, BadRequestError, @@ -47,7 +56,7 @@ class CacheType(str, Enum, metaclass=MetaEnum): ResponseT = TypeVar( "ResponseT", - bound="Union[ChatCompletionChunk, ChatCompletions, TextCompletionChunk, TextCompletion, GenericResponse, httpx.Response]", # noqa: E501 + bound="Union[ChatCompletionChunk, ChatCompletions, TextCompletion, TextCompletionChunk, GenericResponse, httpx.Response]", # noqa: E501 ) @@ -289,70 +298,6 @@ def __str__(self): return json.dumps(self.dict(), indent=4) -# Models for Chat Stream -class Delta(BaseModel, extra="allow"): - role: Optional[str] = None - content: Optional[str] = "" - tool_calls: Optional[List[DeltaToolCall]] = None - - def __str__(self): - return json.dumps(self.dict(), indent=4) - - def __getitem__(self, key): - return getattr(self, key, None) - - def get(self, key: str, default: Optional[Any] = None): - return getattr(self, key, None) or default - - -class StreamChoice(BaseModel, extra="allow"): - index: Optional[int] = None - delta: Union[Delta, Dict[Any, Any]] = {} - finish_reason: Optional[str] = None - - def __str__(self): - return json.dumps(self.dict(), indent=4) - - def get(self, key: str, default: Optional[Any] = None): - return getattr(self, key, None) or default - - def __getitem__(self, key): - return getattr(self, key, None) - - -class ChatCompletionChunk(BaseModel, extra="allow"): - id: Optional[str] = None - object: Optional[str] = None - created: Optional[int] = None - model: Optional[str] = None - choices: Union[List[StreamChoice], Dict[Any, Any]] = {} - - def __str__(self): - return json.dumps(self.dict(), indent=4) - - def __getitem__(self, key): - return getattr(self, key, None) - - def get(self, key: str, default: Optional[Any] = None): - return getattr(self, key, None) or default - - -# Models for Chat Non-stream -class ChatChoice(BaseModel, extra="allow"): - index: Optional[int] = None - message: Optional[Message] = None - finish_reason: Optional[str] = None - - def __str__(self): - return json.dumps(self.dict(), indent=4) - - def __getitem__(self, key): - return getattr(self, key, None) - - def get(self, key: str, default: Optional[Any] = None): - return getattr(self, key, None) or default - - class Usage(BaseModel, extra="allow"): prompt_tokens: Optional[int] = None completion_tokens: Optional[int] = None @@ -368,90 +313,6 @@ def get(self, key: str, default: Optional[Any] = None): return getattr(self, key, None) or default -class ChatCompletions(BaseModel, extra="allow"): - id: Optional[str] = None - object: Optional[str] = None - created: Optional[int] = None - model: Optional[str] = None - choices: Union[List[ChatChoice], Dict[Any, Any]] = {} - usage: Optional[Usage] = None - _headers: Optional[httpx.Headers] = None - - def __str__(self): - del self._headers - return json.dumps(self.dict(), indent=4) - - def __getitem__(self, key): - return getattr(self, key, None) - - def get(self, key: str, default: Optional[Any] = None): - return getattr(self, key, None) or default - - def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) - - -# Models for text completion Non-stream -class TextChoice(BaseModel, extra="allow"): - index: Optional[int] = None - text: Optional[str] = None - logprobs: Optional[Any] = None - finish_reason: Optional[str] = None - - def __str__(self): - return json.dumps(self.dict(), indent=4) - - def __getitem__(self, key): - return getattr(self, key, None) - - def get(self, key: str, default: Optional[Any] = None): - return getattr(self, key, None) or default - - -class TextCompletion(BaseModel, extra="allow"): - id: Optional[str] = None - object: Optional[str] = None - created: Optional[int] = None - model: Optional[str] = None - choices: Union[List[TextChoice], Dict[Any, Any]] = {} - usage: Optional[Usage] = None - _headers: Optional[httpx.Headers] = None - - def __str__(self): - del self._headers - return json.dumps(self.dict(), indent=4) - - def __getitem__(self, key): - return getattr(self, key, None) - - def get(self, key: str, default: Optional[Any] = None): - return getattr(self, key, None) or default - - def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) - - -# Models for text completion stream - - -class TextCompletionChunk(BaseModel, extra="allow"): - id: Optional[str] = None - object: Optional[str] = None - created: Optional[int] = None - model: Optional[str] = None - provider: Optional[str] = None - choices: List[TextChoice] - - def __str__(self): - return json.dumps(self.dict(), indent=4) - - def __getitem__(self, key): - return getattr(self, key, None) - - def get(self, key: str, default: Optional[Any] = None): - return getattr(self, key, None) or default - - class GenericResponse(BaseModel, extra="allow"): success: Optional[bool] = None data: Optional[Any] = None @@ -463,7 +324,7 @@ def __str__(self): return json.dumps(self.dict(), indent=4) def get_headers(self) -> Optional[Dict[str, str]]: - return parse_headers(self._headers) + return parse_headers_generic(self._headers) def apikey_from_env(provider: Union[ProviderTypes, ProviderTypesLiteral, str]) -> str: @@ -573,7 +434,7 @@ def get_portkey_header(key: str) -> str: return f"{PORTKEY_HEADER_PREFIX}{key}" -def parse_headers(headers: Optional[httpx.Headers]) -> dict: +def parse_headers_generic(headers: Optional[httpx.Headers]) -> dict: if headers is None: return {} From 9ae3b79a0457a3d7ca88b1545496020431e797ad Mon Sep 17 00:00:00 2001 From: visargD Date: Wed, 20 Mar 2024 20:56:18 +0530 Subject: [PATCH 49/62] chore: update chat stream chunk assert check --- tests/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/utils.py b/tests/utils.py index 372a9b05..61ece057 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -9,7 +9,7 @@ def read_json_file(path: str) -> Dict[str, Any]: def check_chat_streaming_chunk(chunk) -> bool: stop_reason = chunk.choices[0].finish_reason if type(stop_reason) is str: - return chunk.choices[0].delta == {} + return chunk.choices[0].delta.content == "" else: return type(chunk.choices[0].delta.content) is str From 296e8a4ef7f44b8cb2fcd8ab2d9266acb1e9caa5 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Thu, 21 Mar 2024 13:15:48 +0530 Subject: [PATCH 50/62] feat: suggestion for embedding and image.generate --- portkey_ai/api_resources/apis/embeddings.py | 38 +++++++++++++-- portkey_ai/api_resources/apis/images.py | 53 +++++++++++++++++++-- tests/utils.py | 2 +- 3 files changed, 83 insertions(+), 10 deletions(-) diff --git a/portkey_ai/api_resources/apis/embeddings.py b/portkey_ai/api_resources/apis/embeddings.py index 239f2373..0e112444 100644 --- a/portkey_ai/api_resources/apis/embeddings.py +++ b/portkey_ai/api_resources/apis/embeddings.py @@ -1,8 +1,10 @@ import json -from typing import Optional +from typing import Optional, Union +import typing from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey from portkey_ai.api_resources.types.embeddings_type import CreateEmbeddingResponse +from openai._types import NotGiven, NOT_GIVEN class Embeddings(APIResource): @@ -10,11 +12,24 @@ def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client + @typing.no_type_check def create( - self, *, input: str, model: Optional[str] = "portkey-default", **kwargs + self, + *, + input: str, + model: Optional[str] = "portkey-default", + dimensions: Union[int, NotGiven] = NOT_GIVEN, + encoding_format: Union[str, NotGiven] = NOT_GIVEN, + user: Union[str, NotGiven] = NOT_GIVEN, + **kwargs ) -> CreateEmbeddingResponse: response = self.openai_client.with_raw_response.embeddings.create( - input=input, model=model, **kwargs # type: ignore + input=input, + model=model, + dimensions=dimensions, + encoding_format=encoding_format, + user=user, + **kwargs ) data = CreateEmbeddingResponse(**json.loads(response.text)) @@ -28,11 +43,24 @@ def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client + @typing.no_type_check async def create( - self, *, input: str, model: Optional[str] = "portkey-default", **kwargs + self, + *, + input: str, + model: Optional[str] = "portkey-default", + dimensions: Union[int, NotGiven] = NOT_GIVEN, + encoding_format: Union[str, NotGiven] = NOT_GIVEN, + user: Union[str, NotGiven] = NOT_GIVEN, + **kwargs ) -> CreateEmbeddingResponse: response = await self.openai_client.with_raw_response.embeddings.create( - input=input, model=model, **kwargs # type: ignore + input=input, + model=model, + dimensions=dimensions, + encoding_format=encoding_format, + user=user, + **kwargs ) data = CreateEmbeddingResponse(**json.loads(response.text)) data._headers = response.headers diff --git a/portkey_ai/api_resources/apis/images.py b/portkey_ai/api_resources/apis/images.py index 1d63a56e..9a1659b3 100644 --- a/portkey_ai/api_resources/apis/images.py +++ b/portkey_ai/api_resources/apis/images.py @@ -1,7 +1,10 @@ import json +from typing import Union +import typing from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey from portkey_ai.api_resources.types.image_type import ImagesResponse +from openai._types import NotGiven, NOT_GIVEN class Images(APIResource): @@ -9,9 +12,30 @@ def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client - def generate(self, prompt: str, **kwargs) -> ImagesResponse: + @typing.no_type_check + def generate( + self, + *, + prompt: str, + model: Union[str, NotGiven] = NOT_GIVEN, + n: Union[int, NotGiven] = NOT_GIVEN, + quality: Union[str, NotGiven] = NOT_GIVEN, + response_format: Union[str, NotGiven] = NOT_GIVEN, + size: Union[str, NotGiven] = NOT_GIVEN, + user: Union[str, NotGiven] = NOT_GIVEN, + style: Union[str, NotGiven] = NOT_GIVEN, + **kwargs + ) -> ImagesResponse: response = self.openai_client.with_raw_response.images.generate( - prompt=prompt, **kwargs + prompt=prompt, + model=model, + n=n, + quality=quality, + response_format=response_format, + size=size, + user=user, + style=style, + **kwargs ) data = ImagesResponse(**json.loads(response.text)) data._headers = response.headers @@ -42,9 +66,30 @@ def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - async def generate(self, prompt: str, **kwargs) -> ImagesResponse: + @typing.no_type_check + async def generate( + self, + *, + prompt: str, + model: Union[str, NotGiven] = NOT_GIVEN, + n: Union[int, NotGiven] = NOT_GIVEN, + quality: Union[str, NotGiven] = NOT_GIVEN, + response_format: Union[str, NotGiven] = NOT_GIVEN, + size: Union[str, NotGiven] = NOT_GIVEN, + user: Union[str, NotGiven] = NOT_GIVEN, + style: Union[str, NotGiven] = NOT_GIVEN, + **kwargs + ) -> ImagesResponse: response = await self.openai_client.with_raw_response.images.generate( - prompt=prompt, **kwargs + prompt=prompt, + model=model, + n=n, + quality=quality, + response_format=response_format, + size=size, + user=user, + style=style, + **kwargs ) data = ImagesResponse(**json.loads(response.text)) data._headers = response.headers diff --git a/tests/utils.py b/tests/utils.py index 61ece057..60175eba 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -9,7 +9,7 @@ def read_json_file(path: str) -> Dict[str, Any]: def check_chat_streaming_chunk(chunk) -> bool: stop_reason = chunk.choices[0].finish_reason if type(stop_reason) is str: - return chunk.choices[0].delta.content == "" + return chunk.choices[0].delta.content == "" else: return type(chunk.choices[0].delta.content) is str From ca4793990c87567263d56e485198e189b1c2a56b Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Thu, 21 Mar 2024 13:37:54 +0530 Subject: [PATCH 51/62] feat: auto complete suggestions for complete route --- portkey_ai/api_resources/apis/complete.py | 101 ++++++++++++++++++---- 1 file changed, 83 insertions(+), 18 deletions(-) diff --git a/portkey_ai/api_resources/apis/complete.py b/portkey_ai/api_resources/apis/complete.py index 030b84c2..84686da7 100644 --- a/portkey_ai/api_resources/apis/complete.py +++ b/portkey_ai/api_resources/apis/complete.py @@ -1,6 +1,7 @@ import json from typing import AsyncIterator, Iterator, Optional, Union from portkey_ai.api_resources.client import AsyncPortkey, Portkey +from openai._types import NotGiven, NOT_GIVEN # from portkey_ai.api_resources.utils import TextCompletionChunk from portkey_ai.api_resources.types.complete_type import ( @@ -17,10 +18,16 @@ def __init__(self, client: Portkey) -> None: self.client = client def stream_create( - self, model, prompt, **kwargs + self, model, prompt, stream, temperature, max_tokens, top_p, **kwargs ) -> Union[TextCompletion, Iterator[TextCompletionChunk]]: with self.openai_client.with_streaming_response.completions.create( - model=model, prompt=prompt, **kwargs + model=model, + prompt=prompt, + stream=stream, + temperature=temperature, + max_tokens=max_tokens, + top_p=top_p, + **kwargs, ) as response: for line in response.iter_lines(): json_string = line.replace("data: ", "") @@ -36,9 +43,17 @@ def stream_create( else: return "" - def normal_create(self, model, prompt, **kwargs) -> TextCompletion: + def normal_create( + self, model, prompt, stream, temperature, max_tokens, top_p, **kwargs + ) -> TextCompletion: response = self.openai_client.with_raw_response.completions.create( - model=model, prompt=prompt, **kwargs + model=model, + prompt=prompt, + stream=stream, + temperature=temperature, + max_tokens=max_tokens, + top_p=top_p, + **kwargs, ) data = TextCompletion(**json.loads(response.text)) data._headers = response.headers @@ -49,14 +64,32 @@ def create( *, model: Optional[str] = "portkey-default", prompt: Optional[str] = None, + stream: Union[bool, NotGiven] = NOT_GIVEN, + temperature: Union[float, NotGiven] = NOT_GIVEN, + max_tokens: Union[int, NotGiven] = NOT_GIVEN, + top_p: Union[bool, NotGiven] = NOT_GIVEN, **kwargs, ) -> Union[TextCompletion, Iterator[TextCompletionChunk]]: - if "stream" in kwargs and kwargs["stream"] is True: - return self.stream_create(model=model, prompt=prompt, **kwargs) # type: ignore - elif "stream" in kwargs and kwargs["stream"] is False: - return self.normal_create(model=model, prompt=prompt, **kwargs) + if stream is True: + return self.stream_create( + model=model, + prompt=prompt, + stream=stream, + temperature=temperature, + max_tokens=max_tokens, + top_p=top_p, + **kwargs, + ) else: - return self.normal_create(model=model, prompt=prompt, **kwargs) + return self.normal_create( + model=model, + prompt=prompt, + stream=stream, + temperature=temperature, + max_tokens=max_tokens, + top_p=top_p, + **kwargs, + ) class AsyncCompletion(AsyncAPIResource): @@ -65,10 +98,16 @@ def __init__(self, client: AsyncPortkey) -> None: self.openai_client = client.openai_client async def stream_create( - self, model, prompt, **kwargs + self, model, prompt, stream, temperature, max_tokens, top_p, **kwargs ) -> Union[TextCompletion, AsyncIterator[TextCompletionChunk]]: async with self.openai_client.with_streaming_response.completions.create( - model=model, prompt=prompt, **kwargs + model=model, + prompt=prompt, + stream=stream, + temperature=temperature, + max_tokens=max_tokens, + top_p=top_p, + **kwargs, ) as response: async for line in response.iter_lines(): json_string = line.replace("data: ", "") @@ -84,9 +123,17 @@ async def stream_create( else: pass - async def normal_create(self, model, prompt, **kwargs) -> TextCompletion: + async def normal_create( + self, model, prompt, stream, temperature, max_tokens, top_p, **kwargs + ) -> TextCompletion: response = await self.openai_client.with_raw_response.completions.create( - model=model, prompt=prompt, **kwargs + model=model, + prompt=prompt, + stream=stream, + temperature=temperature, + max_tokens=max_tokens, + top_p=top_p, + **kwargs, ) data = TextCompletion(**json.loads(response.text)) data._headers = response.headers @@ -97,11 +144,29 @@ async def create( *, model: Optional[str] = "portkey-default", prompt: Optional[str] = None, + stream: Union[bool, NotGiven] = NOT_GIVEN, + temperature: Union[float, NotGiven] = NOT_GIVEN, + max_tokens: Union[int, NotGiven] = NOT_GIVEN, + top_p: Union[bool, NotGiven] = NOT_GIVEN, **kwargs, ) -> Union[TextCompletion, AsyncIterator[TextCompletionChunk]]: - if "stream" in kwargs and kwargs["stream"] is True: - return self.stream_create(model=model, prompt=prompt, **kwargs) # type: ignore - elif "stream" in kwargs and kwargs["stream"] is False: - return await self.normal_create(model=model, prompt=prompt, **kwargs) + if stream is True: + return self.stream_create( + model=model, + prompt=prompt, + stream=stream, + temperature=temperature, + max_tokens=max_tokens, + top_p=top_p, + **kwargs, + ) else: - return await self.normal_create(model=model, prompt=prompt, **kwargs) + return await self.normal_create( + model=model, + prompt=prompt, + stream=stream, + temperature=temperature, + max_tokens=max_tokens, + top_p=top_p, + **kwargs, + ) From 70762043407c5b7c7462ed43f4f4d97719e9e943 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Thu, 21 Mar 2024 13:52:07 +0530 Subject: [PATCH 52/62] feat: auto complete suggestions for chat complete --- .../api_resources/apis/chat_complete.py | 101 ++++++++++++++---- 1 file changed, 83 insertions(+), 18 deletions(-) diff --git a/portkey_ai/api_resources/apis/chat_complete.py b/portkey_ai/api_resources/apis/chat_complete.py index c1b79dee..2cae9381 100644 --- a/portkey_ai/api_resources/apis/chat_complete.py +++ b/portkey_ai/api_resources/apis/chat_complete.py @@ -17,6 +17,7 @@ ) from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource +from openai._types import NotGiven, NOT_GIVEN __all__ = ["ChatCompletion", "AsyncChatCompletion"] @@ -43,10 +44,16 @@ def __init__(self, client: Portkey) -> None: self.openai_client = client.openai_client def stream_create( - self, model, messages, **kwargs + self, model, messages, stream, temperature, max_tokens, top_p, **kwargs ) -> Union[ChatCompletions, Iterator[ChatCompletionChunk]]: with self.openai_client.with_streaming_response.chat.completions.create( - model=model, messages=messages, **kwargs + model=model, + messages=messages, + stream=stream, + temperature=temperature, + max_tokens=max_tokens, + top_p=top_p, + **kwargs, ) as response: for line in response.iter_lines(): json_string = line.replace("data: ", "") @@ -62,9 +69,17 @@ def stream_create( else: return "" - def normal_create(self, model, messages, **kwargs) -> ChatCompletions: + def normal_create( + self, model, messages, stream, temperature, max_tokens, top_p, **kwargs + ) -> ChatCompletions: response = self.openai_client.with_raw_response.chat.completions.create( - model=model, messages=messages, **kwargs + model=model, + messages=messages, + stream=stream, + temperature=temperature, + max_tokens=max_tokens, + top_p=top_p, + **kwargs, ) data = ChatCompletions(**json.loads(response.text)) data._headers = response.headers @@ -75,14 +90,32 @@ def create( *, model: Optional[str] = "portkey-default", messages: Iterable[Any], + stream: Union[bool, NotGiven] = NOT_GIVEN, + temperature: Union[float, NotGiven] = NOT_GIVEN, + max_tokens: Union[int, NotGiven] = NOT_GIVEN, + top_p: Union[float, NotGiven] = NOT_GIVEN, **kwargs, ) -> Union[ChatCompletions, Iterator[ChatCompletionChunk]]: - if "stream" in kwargs and kwargs["stream"] is True: - return self.stream_create(model=model, messages=messages, **kwargs) # type: ignore - elif "stream" in kwargs and kwargs["stream"] is False: - return self.normal_create(model=model, messages=messages, **kwargs) + if stream is True: + return self.stream_create( + model=model, + messages=messages, + stream=stream, + temperature=temperature, + max_tokens=max_tokens, + top_p=top_p, + **kwargs, + ) else: - return self.normal_create(model=model, messages=messages, **kwargs) + return self.normal_create( + model=model, + messages=messages, + stream=stream, + temperature=temperature, + max_tokens=max_tokens, + top_p=top_p, + **kwargs, + ) class AsyncCompletions(AsyncAPIResource): @@ -91,10 +124,16 @@ def __init__(self, client: AsyncPortkey) -> None: self.openai_client = client.openai_client async def stream_create( - self, model, messages, **kwargs + self, model, messages, stream, temperature, max_tokens, top_p, **kwargs ) -> Union[ChatCompletions, AsyncIterator[ChatCompletionChunk]]: async with self.openai_client.with_streaming_response.chat.completions.create( - model=model, messages=messages, **kwargs + model=model, + messages=messages, + stream=stream, + temperature=temperature, + max_tokens=max_tokens, + top_p=top_p, + **kwargs, ) as response: async for line in response.iter_lines(): json_string = line.replace("data: ", "") @@ -110,9 +149,17 @@ async def stream_create( else: pass - async def normal_create(self, model, messages, **kwargs) -> ChatCompletions: + async def normal_create( + self, model, messages, stream, temperature, max_tokens, top_p, **kwargs + ) -> ChatCompletions: response = await self.openai_client.with_raw_response.chat.completions.create( - model=model, messages=messages, **kwargs + model=model, + messages=messages, + stream=stream, + temperature=temperature, + max_tokens=max_tokens, + top_p=top_p, + **kwargs, ) data = ChatCompletions(**json.loads(response.text)) data._headers = response.headers @@ -123,14 +170,32 @@ async def create( *, model: Optional[str] = "portkey-default", messages: Iterable[Any], + stream: Union[bool, NotGiven] = NOT_GIVEN, + temperature: Union[float, NotGiven] = NOT_GIVEN, + max_tokens: Union[int, NotGiven] = NOT_GIVEN, + top_p: Union[float, NotGiven] = NOT_GIVEN, **kwargs, ) -> Union[ChatCompletions, AsyncIterator[ChatCompletionChunk]]: - if "stream" in kwargs and kwargs["stream"] is True: - return self.stream_create(model=model, messages=messages, **kwargs) # type: ignore - elif "stream" in kwargs and kwargs["stream"] is False: - return await self.normal_create(model=model, messages=messages, **kwargs) + if stream is True: + return self.stream_create( + model=model, + messages=messages, + stream=stream, + temperature=temperature, + max_tokens=max_tokens, + top_p=top_p, + **kwargs, + ) else: - return await self.normal_create(model=model, messages=messages, **kwargs) + return await self.normal_create( + model=model, + messages=messages, + stream=stream, + temperature=temperature, + max_tokens=max_tokens, + top_p=top_p, + **kwargs, + ) def _get_config_string(self, config: Union[Mapping, str]) -> str: return config if isinstance(config, str) else json.dumps(config) From 04dfca316db6ada55be2d167a9d8d6f4107d470a Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Thu, 21 Mar 2024 14:32:00 +0530 Subject: [PATCH 53/62] feat: auto suggestion for all image routes --- portkey_ai/api_resources/apis/images.py | 90 ++++++++++++++++++++++--- 1 file changed, 81 insertions(+), 9 deletions(-) diff --git a/portkey_ai/api_resources/apis/images.py b/portkey_ai/api_resources/apis/images.py index 9a1659b3..5b4a6bb8 100644 --- a/portkey_ai/api_resources/apis/images.py +++ b/portkey_ai/api_resources/apis/images.py @@ -1,5 +1,5 @@ import json -from typing import Union +from typing import Union, Any import typing from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey @@ -42,18 +42,54 @@ def generate( return data - def edit(self, prompt: str, image, **kwargs) -> ImagesResponse: + @typing.no_type_check + def edit( + self, + *, + prompt: str, + image, + mask: Union[Any, NotGiven] = NOT_GIVEN, + model: Union[str, NotGiven] = NOT_GIVEN, + n: Union[int, NotGiven] = NOT_GIVEN, + response_format: Union[str, NotGiven] = NOT_GIVEN, + size: Union[str, NotGiven] = NOT_GIVEN, + user: Union[str, NotGiven] = NOT_GIVEN, + **kwargs + ) -> ImagesResponse: response = self.openai_client.with_raw_response.images.edit( - prompt=prompt, image=image, **kwargs + prompt=prompt, + image=image, + mask=mask, + model=model, + n=n, + response_format=response_format, + size=size, + user=user, + **kwargs ) data = ImagesResponse(**json.loads(response.text)) data._headers = response.headers return data - def create_variation(self, image, **kwargs) -> ImagesResponse: + @typing.no_type_check + def create_variation( + self, + *, + image, + n: Union[int, NotGiven] = NOT_GIVEN, + response_format: Union[str, NotGiven] = NOT_GIVEN, + size: Union[str, NotGiven] = NOT_GIVEN, + user: Union[str, NotGiven] = NOT_GIVEN, + **kwargs + ) -> ImagesResponse: response = self.openai_client.with_raw_response.images.create_variation( - image=image, **kwargs + image=image, + n=n, + response_format=response_format, + size=size, + user=user, + **kwargs ) data = ImagesResponse(**json.loads(response.text)) data._headers = response.headers @@ -96,18 +132,54 @@ async def generate( return data - async def edit(self, prompt: str, image, **kwargs) -> ImagesResponse: + @typing.no_type_check + async def edit( + self, + *, + prompt: str, + image, + mask: Union[Any, NotGiven] = NOT_GIVEN, + model: Union[str, NotGiven] = NOT_GIVEN, + n: Union[int, NotGiven] = NOT_GIVEN, + response_format: Union[str, NotGiven] = NOT_GIVEN, + size: Union[str, NotGiven] = NOT_GIVEN, + user: Union[str, NotGiven] = NOT_GIVEN, + **kwargs + ) -> ImagesResponse: response = await self.openai_client.with_raw_response.images.edit( - prompt=prompt, image=image, **kwargs + prompt=prompt, + image=image, + mask=mask, + model=model, + n=n, + response_format=response_format, + size=size, + user=user, + **kwargs ) data = ImagesResponse(**json.loads(response.text)) data._headers = response.headers return data - async def create_variation(self, image, **kwargs) -> ImagesResponse: + @typing.no_type_check + async def create_variation( + self, + *, + image, + n: Union[int, NotGiven] = NOT_GIVEN, + response_format: Union[str, NotGiven] = NOT_GIVEN, + size: Union[str, NotGiven] = NOT_GIVEN, + user: Union[str, NotGiven] = NOT_GIVEN, + **kwargs + ) -> ImagesResponse: response = await self.openai_client.with_raw_response.images.create_variation( - image=image, **kwargs + image=image, + n=n, + response_format=response_format, + size=size, + user=user, + **kwargs ) data = ImagesResponse(**json.loads(response.text)) data._headers = response.headers From 08e8bc29e8c112aa7a7c2b600ba95f922028e504 Mon Sep 17 00:00:00 2001 From: csgulati09 Date: Thu, 21 Mar 2024 15:25:29 +0530 Subject: [PATCH 54/62] feat: auto complete suggestions for models route --- portkey_ai/api_resources/apis/models.py | 26 +++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/portkey_ai/api_resources/apis/models.py b/portkey_ai/api_resources/apis/models.py index 9574312c..26857ed5 100644 --- a/portkey_ai/api_resources/apis/models.py +++ b/portkey_ai/api_resources/apis/models.py @@ -1,7 +1,9 @@ import json +from typing import Union from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey from portkey_ai.api_resources.types.models_type import Model, ModelDeleted, ModelList +from openai._types import NotGiven, NOT_GIVEN class Models(APIResource): @@ -15,17 +17,21 @@ def list(self, **kwargs) -> ModelList: data._headers = response.headers return data - def retrieve(self, model, **kwargs) -> Model: + def retrieve( + self, model: str, *, timeout: Union[float, NotGiven] = NOT_GIVEN, **kwargs + ) -> Model: response = self.openai_client.with_raw_response.models.retrieve( - model=model, **kwargs + model=model, timeout=timeout, **kwargs ) data = Model(**json.loads(response.text)) data._headers = response.headers return data - def delete(self, model, **kwargs) -> ModelDeleted: + def delete( + self, model: str, *, timeout: Union[float, NotGiven] = NOT_GIVEN, **kwargs + ) -> ModelDeleted: response = self.openai_client.with_raw_response.models.delete( - model=model, **kwargs + model=model, timeout=timeout, **kwargs ) data = ModelDeleted(**json.loads(response.text)) data._headers = response.headers @@ -43,17 +49,21 @@ async def list(self, **kwargs) -> ModelList: data._headers = response.headers return data - async def retrieve(self, model, **kwargs) -> Model: + async def retrieve( + self, model: str, *, timeout: Union[float, NotGiven] = NOT_GIVEN, **kwargs + ) -> Model: response = await self.openai_client.with_raw_response.models.retrieve( - model=model, **kwargs + model=model, timeout=timeout, **kwargs ) data = Model(**json.loads(response.text)) data._headers = response.headers return data - async def delete(self, model, **kwargs) -> ModelDeleted: + async def delete( + self, model: str, *, timeout: Union[float, NotGiven] = NOT_GIVEN, **kwargs + ) -> ModelDeleted: response = await self.openai_client.with_raw_response.models.delete( - model=model, **kwargs + model=model, timeout=timeout, **kwargs ) data = ModelDeleted(**json.loads(response.text)) data._headers = response.headers From f5cbce8f098c442e5bf0e962330559412549037a Mon Sep 17 00:00:00 2001 From: visargD Date: Thu, 21 Mar 2024 16:11:02 +0530 Subject: [PATCH 55/62] chore: add loadbalance and fallback test cases for images --- .../segmind_n_openai.json | 36 +++++++++++++++++++ .../loadbalance_with_two_apikeys.json | 19 ++++++++++ 2 files changed, 55 insertions(+) create mode 100644 tests/configs/images/loadbalance_and_fallback/segmind_n_openai.json create mode 100644 tests/configs/images/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json diff --git a/tests/configs/images/loadbalance_and_fallback/segmind_n_openai.json b/tests/configs/images/loadbalance_and_fallback/segmind_n_openai.json new file mode 100644 index 00000000..644ec482 --- /dev/null +++ b/tests/configs/images/loadbalance_and_fallback/segmind_n_openai.json @@ -0,0 +1,36 @@ +{ + "strategy": { + "mode": "loadbalance" + }, + "targets": [ + { + "virtual_key": "openai-virtual-key", + "override_params": { + "model": "dall-e-2" + } + }, + { + "strategy": { + "mode": "fallback", + "on_status_codes": [ + 429, + 241 + ] + }, + "targets": [ + { + "virtual_key": "openai-virtual-key", + "override_params": { + "model": "dall-e-2" + } + }, + { + "virtual_key": "segmind-virtual-key", + "override_params": { + "model": "sdxl1.0-newreality-lightning" + } + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/configs/images/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json b/tests/configs/images/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json new file mode 100644 index 00000000..441e5291 --- /dev/null +++ b/tests/configs/images/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json @@ -0,0 +1,19 @@ +{ + "strategy": { + "mode": "loadbalance" + }, + "targets": [ + { + "virtual_key": "openai-virtual-key", + "override_params": { + "model": "dall-e-2" + } + }, + { + "virtual_key": "segmind-virtual-key", + "override_params": { + "model": "sdxl1.0-newreality-lightning" + } + } + ] +} \ No newline at end of file From 4e7ff8132ad1d7e107e9491394c566bd57d27edf Mon Sep 17 00:00:00 2001 From: visargD Date: Thu, 21 Mar 2024 16:11:52 +0530 Subject: [PATCH 56/62] chore: fix loadbalance and fallback test cases for images --- tests/test_images.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/test_images.py b/tests/test_images.py index 4f6dc400..8702510a 100644 --- a/tests/test_images.py +++ b/tests/test_images.py @@ -158,10 +158,10 @@ def test_method_loadbalance_with_two_apikeys( ) image = portkey.images.generate( - model="dall-e-3", prompt="A cute baby sea otter", n=1, size="1024x1024" + prompt="A cute baby sea otter", n=1, size="1024x1024", response_format="b64_json" ) - assert type(image.data[0].url) is str + assert type(image.data[0].b64_json) is str # -------------------------- # Test-5 @@ -179,10 +179,10 @@ def test_method_loadbalance_and_fallback(self, client: Any, config: Dict) -> Non ) image = portkey.images.generate( - model="dall-e-3", prompt="A cute baby sea otter", n=1, size="1024x1024" + prompt="A cute baby sea otter", n=1, size="1024x1024", response_format="b64_json" ) - assert type(image.data[0].url) is str + assert type(image.data[0].b64_json) is str # -------------------------- # Test-6 From 1856236ad4183fb8fa9606ad5957d55b20b9074e Mon Sep 17 00:00:00 2001 From: visargD Date: Thu, 21 Mar 2024 16:16:51 +0530 Subject: [PATCH 57/62] chore: update assistants test cases --- tests/test_assistants.py | 79 +++++++++++++++++++++++++--------------- 1 file changed, 50 insertions(+), 29 deletions(-) diff --git a/tests/test_assistants.py b/tests/test_assistants.py index 10e589ca..ffe3d83e 100644 --- a/tests/test_assistants.py +++ b/tests/test_assistants.py @@ -39,35 +39,56 @@ def get_metadata(self): "random_id": str(uuid4()), } - # # -------------------------- - # # Test-1 - - # t1_params = [] - # t = [] - # for k, v in models.items(): - # if k == "openai": - # for i in v["chat"]: - # if "vision" not in i: - # t.append((client, k, os.environ.get(v["env_variable"]), i)) - - # t1_params.extend(t) - - # @pytest.mark.parametrize("client, provider, auth, model", t1_params) - # def test_method_single_with_vk_and_provider( - # self, client: Any, provider: str, auth: str, model - # ) -> None: - # portkey = client( - # base_url=base_url, - # api_key=api_key, - # provider=f"{provider}", - # Authorization=f"{auth}", - # trace_id=str(uuid4()), - # metadata=self.get_metadata(), - # ) - # assistant = portkey.beta.assistants.create( - # model=model, - # ) - # print(assistant) + # -------------------------- + # Test-1 + + t1_params = [] + t = [] + for k, v in models.items(): + if k == "openai": + for i in v["chat"]: + if "vision" not in i: + t.append((client, k, os.environ.get(v["env_variable"]), i)) + + t1_params.extend(t) + + @pytest.mark.parametrize("client, provider, auth, model", t1_params) + def test_method_single_with_vk_and_provider( + self, client: Any, provider: str, auth: str, model + ) -> None: + portkey = client( + base_url=base_url, + api_key=api_key, + provider=f"{provider}", + Authorization=f"{auth}", + trace_id=str(uuid4()), + metadata=self.get_metadata(), + ) + assistant = portkey.beta.assistants.create( + model=model, + ) + + assert type(assistant.id) is str + assert assistant.object == "assistant" + assert assistant.model == model + + update_assistant = portkey.beta.assistants.update( + assistant.id, description="updated string" + ) + + assert update_assistant.description == "updated string" + + retrieve_assistant = portkey.beta.assistants.retrieve(assistant.id) + + assert retrieve_assistant.id == assistant.id + assert retrieve_assistant.object == "assistant" + assert retrieve_assistant.model == model + + delete_assistant = portkey.beta.assistants.delete(assistant.id) + + assert delete_assistant.id == assistant.id + assert delete_assistant.object == "assistant.deleted" + assert delete_assistant.deleted is True # -------------------------- # Test-3 From ed44eaf95dfae89ce40a69b2bb68d428506285cc Mon Sep 17 00:00:00 2001 From: visargD Date: Thu, 21 Mar 2024 16:17:27 +0530 Subject: [PATCH 58/62] chore: remove unused comments --- tests/test_async_chat_complete.py | 2 -- tests/test_async_complete.py | 2 -- tests/test_async_images.py | 1 - tests/test_chat_complete.py | 2 -- tests/test_complete.py | 2 -- tests/test_images.py | 1 - 6 files changed, 10 deletions(-) diff --git a/tests/test_async_chat_complete.py b/tests/test_async_chat_complete.py index a0def363..c0aaaaac 100644 --- a/tests/test_async_chat_complete.py +++ b/tests/test_async_chat_complete.py @@ -182,7 +182,6 @@ async def test_method_loadbalance_with_two_apikeys( portkey = client( base_url=base_url, api_key=api_key, - # virtual_key=virtual_api_key, trace_id=str(uuid4()), metadata=self.get_metadata(), config=config, @@ -409,7 +408,6 @@ async def test_method_loadbalance_with_two_apikeys( portkey = client( base_url=base_url, api_key=api_key, - # virtual_key=virtual_api_key, trace_id=str(uuid4()), metadata=self.get_metadata(), config=config, diff --git a/tests/test_async_complete.py b/tests/test_async_complete.py index cd063727..c3a9060f 100644 --- a/tests/test_async_complete.py +++ b/tests/test_async_complete.py @@ -179,7 +179,6 @@ async def test_method_loadbalance_with_two_apikeys( portkey = client( base_url=base_url, api_key=api_key, - # virtual_key=virtual_api_key, trace_id=str(uuid4()), metadata=self.get_metadata(), config=config, @@ -388,7 +387,6 @@ async def test_method_loadbalance_with_two_apikeys( portkey = client( base_url=base_url, api_key=api_key, - # virtual_key=virtual_api_key, trace_id=str(uuid4()), metadata=self.get_metadata(), config=config, diff --git a/tests/test_async_images.py b/tests/test_async_images.py index a8413865..6ebf9e18 100644 --- a/tests/test_async_images.py +++ b/tests/test_async_images.py @@ -157,7 +157,6 @@ async def test_method_loadbalance_with_two_apikeys( portkey = client( base_url=base_url, api_key=api_key, - # virtual_key=virtual_api_key, trace_id=str(uuid4()), metadata=self.get_metadata(), config=config, diff --git a/tests/test_chat_complete.py b/tests/test_chat_complete.py index 48727950..e7d309cf 100644 --- a/tests/test_chat_complete.py +++ b/tests/test_chat_complete.py @@ -177,7 +177,6 @@ def test_method_loadbalance_with_two_apikeys( portkey = client( base_url=base_url, api_key=api_key, - # virtual_key=virtual_api_key, trace_id=str(uuid4()), metadata=self.get_metadata(), config=config, @@ -394,7 +393,6 @@ def test_method_loadbalance_with_two_apikeys( portkey = client( base_url=base_url, api_key=api_key, - # virtual_key=virtual_api_key, trace_id=str(uuid4()), metadata=self.get_metadata(), config=config, diff --git a/tests/test_complete.py b/tests/test_complete.py index 218971a4..a4c13726 100644 --- a/tests/test_complete.py +++ b/tests/test_complete.py @@ -170,7 +170,6 @@ def test_method_loadbalance_with_two_apikeys( portkey = client( base_url=base_url, api_key=api_key, - # virtual_key=virtual_api_key, trace_id=str(uuid4()), metadata=self.get_metadata(), config=config, @@ -368,7 +367,6 @@ def test_method_loadbalance_with_two_apikeys( portkey = client( base_url=base_url, api_key=api_key, - # virtual_key=virtual_api_key, trace_id=str(uuid4()), metadata=self.get_metadata(), config=config, diff --git a/tests/test_images.py b/tests/test_images.py index 8702510a..1f4ea2d3 100644 --- a/tests/test_images.py +++ b/tests/test_images.py @@ -151,7 +151,6 @@ def test_method_loadbalance_with_two_apikeys( portkey = client( base_url=base_url, api_key=api_key, - # virtual_key=virtual_api_key, trace_id=str(uuid4()), metadata=self.get_metadata(), config=config, From 8cf27d6d8cafc42edf8c0f1fbae631ae8933b10d Mon Sep 17 00:00:00 2001 From: visargD Date: Thu, 21 Mar 2024 17:39:22 +0530 Subject: [PATCH 59/62] fix: add missing await in async chat complete test --- tests/test_async_chat_complete.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_async_chat_complete.py b/tests/test_async_chat_complete.py index c0aaaaac..967e6d90 100644 --- a/tests/test_async_chat_complete.py +++ b/tests/test_async_chat_complete.py @@ -162,7 +162,7 @@ async def test_method_single_provider_with_vk_retry_cache( config=config, ) - cached_completion = portkey_2.chat.completions.create( + cached_completion = await portkey_2.chat.completions.create( messages=[{"role": "user", "content": "Say this is a test"}], model="gpt-3.5-turbo", ) From a2d2ed995b7f8939edaf66dd6ef37c12dae49562 Mon Sep 17 00:00:00 2001 From: visargD Date: Thu, 21 Mar 2024 17:40:04 +0530 Subject: [PATCH 60/62] chore: add loadbalance fallback test for async images --- tests/test_async_images.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/test_async_images.py b/tests/test_async_images.py index 6ebf9e18..11a2cbec 100644 --- a/tests/test_async_images.py +++ b/tests/test_async_images.py @@ -163,10 +163,10 @@ async def test_method_loadbalance_with_two_apikeys( ) image = await portkey.images.generate( - model="dall-e-3", prompt="A cute baby sea otter", n=1, size="1024x1024" + model="dall-e-3", prompt="A cute baby sea otter", n=1, size="1024x1024", response_format="b64_json" ) - assert type(image.data[0].url) is str + assert type(image.data[0].b64_json) is str # -------------------------- # Test-5 @@ -187,10 +187,10 @@ async def test_method_loadbalance_and_fallback( ) image = await portkey.images.generate( - model="dall-e-3", prompt="A cute baby sea otter", n=1, size="1024x1024" + model="dall-e-3", prompt="A cute baby sea otter", n=1, size="1024x1024", response_format="b64_json" ) - assert type(image.data[0].url) is str + assert type(image.data[0].b64_json) is str # -------------------------- # Test-6 From c4de831f1ae02d7f4f99088a77e59849d5c983a5 Mon Sep 17 00:00:00 2001 From: visargD Date: Thu, 21 Mar 2024 17:41:09 +0530 Subject: [PATCH 61/62] chore: replace segmind with stability ai in test cases --- .../{segmind_n_openai.json => stability_n_openai.json} | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) rename tests/configs/images/loadbalance_and_fallback/{segmind_n_openai.json => stability_n_openai.json} (75%) diff --git a/tests/configs/images/loadbalance_and_fallback/segmind_n_openai.json b/tests/configs/images/loadbalance_and_fallback/stability_n_openai.json similarity index 75% rename from tests/configs/images/loadbalance_and_fallback/segmind_n_openai.json rename to tests/configs/images/loadbalance_and_fallback/stability_n_openai.json index 644ec482..e0553dc5 100644 --- a/tests/configs/images/loadbalance_and_fallback/segmind_n_openai.json +++ b/tests/configs/images/loadbalance_and_fallback/stability_n_openai.json @@ -4,9 +4,9 @@ }, "targets": [ { - "virtual_key": "openai-virtual-key", + "virtual_key": "stability-virtual-key", "override_params": { - "model": "dall-e-2" + "model": "stable-diffusion-v1-6" } }, { @@ -25,9 +25,9 @@ } }, { - "virtual_key": "segmind-virtual-key", + "virtual_key": "stability-virtual-key", "override_params": { - "model": "sdxl1.0-newreality-lightning" + "model": "stable-diffusion-v1-6" } } ] From d3acb633f6ab8e0046c6066bdedc4a3a9353658c Mon Sep 17 00:00:00 2001 From: visargD Date: Thu, 21 Mar 2024 17:41:43 +0530 Subject: [PATCH 62/62] chore: replace segmind with stability ai in test cases --- .../loadbalance_with_two_apikeys.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/configs/images/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json b/tests/configs/images/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json index 441e5291..193057b4 100644 --- a/tests/configs/images/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json +++ b/tests/configs/images/loadbalance_with_two_apikeys/loadbalance_with_two_apikeys.json @@ -10,9 +10,9 @@ } }, { - "virtual_key": "segmind-virtual-key", + "virtual_key": "stability-virtual-key", "override_params": { - "model": "sdxl1.0-newreality-lightning" + "model": "stable-diffusion-v1-6" } } ]